query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Function for loading the features and labels associated with the training dataset.
def _loadTrain(self, features, labels): self.trainX_, self.trainY_, self.trainLabel_ = self.__load(features, labels)
[ "def load_features_and_labels(features_dir, labels_dir):\n # load labels\n labels = pd.read_csv(glob.glob('{}/*.csv'.format(labels_dir))[0])\n labels = labels.sort_values(by='bookingID')\n\n # load features\n features = load_features(features_dir)\n\n return features, labels", "def load_features_and_labels(train_partition, test_partition, training_feature_file,\n test_feature_file, preprocessor='tokenized', vectorizer=None, \n feature_outfile_name=None):\n train_labels_path = \"{script_dir}/../data/labels/{train}/labels.{train}.csv\".format(train=train_partition, script_dir=SCRIPT_DIR)\n train_data_path = \"{script_dir}/../data/essays/{}/tokenized/\".format(train_partition, script_dir=SCRIPT_DIR)\n test_labels_path = \"{script_dir}/../data/labels/{test}/labels.{test}.csv\".format(test=test_partition, script_dir=SCRIPT_DIR)\n test_data_path = \"{script_dir}/../data/essays/{}/tokenized\".format(test_partition, script_dir=SCRIPT_DIR)\n\n path_and_descriptor_list = [(train_labels_path, \"training labels file\"),\n (train_data_path, \"training data directory\"),\n (test_labels_path, \"testing labels file\"),\n (test_data_path, \"testing data directory\")]\n for path_, path_descriptor in path_and_descriptor_list:\n if not os.path.exists(path_):\n raise Exception(\"Could not find {desc}: {pth}\".format(desc=path_descriptor, pth=path_))\n #\n # Read labels files. If feature files provided, `training_files` and `test_files` below will be ignored\n # \n with open(train_labels_path) as train_labels_f, open(test_labels_path) as test_labels_f:\n essay_path_train = '{script_dir}/../data/essays/{train}/{preproc}'.format(script_dir=SCRIPT_DIR, train=train_partition, preproc=preprocessor)\n essay_path_test = '{script_dir}/../data/essays/{test}/{preproc}'.format(script_dir=SCRIPT_DIR, test=test_partition, preproc=preprocessor)\n\n training_files, training_labels, training_prompts = zip(*[(os.path.join(essay_path_train, row['test_taker_id'] + '.txt'), row['L1'], row['essay_prompt'])\n for row in csv.DictReader(train_labels_f)])\n\n test_files, test_labels, test_prompts = zip(*[(os.path.join(essay_path_test, row['test_taker_id'] + '.txt'), row['L1'], row['essay_prompt'])\n for row in csv.DictReader(test_labels_f)])\n \n #\n # Verify that either both or neither of training/test feature files are provided\n #\n if bool(training_feature_file) != bool(test_feature_file):\n print(\"Feature files were not provided for both test and train partitions. \"\n \"Generating default unigram features now.\")\n \n #\n # If feature files provided, get features and labels from them\n # \n elif training_feature_file and test_feature_file:\n training_matrix, encoded_training_labels = load_svmlight_file(training_feature_file)\n original_training_labels = tuple([CLASS_LABELS[int(i)] for i in encoded_training_labels])\n \n if original_training_labels != training_labels:\n raise Exception(\"Training labels in feature file do not match those in the labels file.\")\n\n test_matrix, encoded_test_labels = load_svmlight_file(test_feature_file)\n original_test_labels = tuple([CLASS_LABELS[int(i)] for i in encoded_test_labels])\n if original_test_labels != test_labels:\n raise Exception(\"Test labels in feature file do not match those in the labels file.\")\n\n return [(training_matrix, encoded_training_labels, original_training_labels),\n (test_matrix, encoded_test_labels, original_test_labels)]\n \n # \n # If no feature files provided, create feature matrix from the data files\n #\n print(\"Found {} text files in {} and {} in {}\"\n .format(len(training_files), train_partition, len(test_files), test_partition))\n print(\"Loading training and testing data from {} & {}\".format(train_partition, test_partition))\n\n training_data, test_data = [],[]\n for f in training_files:\n with open(f) as doc:\n training_data.append(doc.read())\n\n for f in test_files:\n with open(f) as doc:\n test_data.append(doc.read())\n\n features = FeatureUnion([\n #('word_skipgrams', SkipgramVectorizer(n=2, k=2, base_analyzer='word', binary=True, min_df=5)),\n ('char_ngrams', TfidfVectorizer(ngram_range=(1,11), analyzer=\"char\", binary=True))\n #('char_ngrams', CountVectorizer(analyzer=\"word\"))\n #('char_ngrams', TfidfVectorizer(ngram_range=(1,9),analyzer=\"char\", binary=True))\n #('prompt_ngrams', PromptWordVectorizer(ngram_range=(1, 9), analyzer=\"char\", binary=True))\n #('char_ngrams', TfidfVectorizer(analyzer=\"word\", binary=True))\n #('misspellings', MisspellingVectorizer(ngram_range=(1, 9), analyzer=\"char\", binary=True))\n #('ipa_ngrams', IPAVectorizer(ngram_range=(1, 3), analyzer=\"word\", binary=False)),\n #('pos_ngrams', POSVectorizer(ngram_range=(1, 4), analyzer=\"word\")),\n #('average_word_length', AverageWordLength())\n #('final_letter', FinalLetter(analyzer=\"char\")),\n \n ])\n\n features.fit(training_data)\n\n training_matrix, encoded_training_labels, vectorizer = transform_data(training_data, training_labels, features)\n test_matrix, encoded_test_labels, _ = transform_data(test_data, test_labels, features)\n\n\n #\n # Write features to feature files\n # No need to have different names for train/test since they each have their own directory.\n outfile_name = (strftime(\"{}-%Y-%m-%d-%H.%M.%S.features\".format(train_partition))\n if feature_outfile_name is None \n else \"{}-{}\".format(train_partition, feature_outfile_name))\n\n outfile = strftime(\"{script_dir}/../data/features/essays/{train}/{outfile_name}\"\n .format(script_dir=SCRIPT_DIR, train=train_partition, outfile_name=outfile_name))\n dump_svmlight_file(training_matrix, encoded_training_labels, outfile)\n print(\"Wrote training features to\", outfile.replace(SCRIPT_DIR, '')[1:]) # prints file path relative to script location\n \n outfile_name = (strftime(\"{}-%Y-%m-%d-%H.%M.%S.features\".format(test_partition))\n if feature_outfile_name is None\n else \"{}-{}\".format(test_partition, feature_outfile_name))\n \n outfile = (\"{script_dir}/../data/features/essays/{test}/{outfile_name}\"\n .format(script_dir=SCRIPT_DIR, test=test_partition, outfile_name=outfile_name))\n dump_svmlight_file(test_matrix, encoded_test_labels, outfile)\n print(\"Wrote testing features to\", outfile.replace(SCRIPT_DIR, '')[1:]) # prints file path relative to script location\n\n return [(training_matrix, encoded_training_labels, training_labels, training_prompts, training_files),\n (test_matrix, encoded_test_labels, test_labels, test_prompts, test_files)]", "def load_features(self, features):\n pass\n # self.features = features", "def load_training_set(self, features, labels=None, feature_id_col_name=None, metadata_col_names=None):\n self._training = self._learner.convert_data_to_format(features, labels, feature_id_col_name, metadata_col_names)", "def read_data(feature_file, label_file):", "def load_training_data(self):\n self.flux_data = pd.read_csv(settings['RAW_TRAINING_PATH'])\n self.meta_data = pd.read_csv(settings[\"RAW_TRAINING_METADATA_PATH\"])\n\n # Label folds\n y = self.meta_data['target']\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n kfold_indices = -1*np.ones(len(y))\n for idx, (fold_train, fold_val) in enumerate(folds.split(y, y)):\n kfold_indices[fold_val] = idx\n self.meta_data['fold'] = kfold_indices\n\n self.dataset_name = 'train'", "def loadTrainingData():\n summaryFile = '../Summarization/Data/training_data_arabic_features.dat'\n dataFile = '../Summarization/Data/dataset_info_arabic.dat'\n with open(summaryFile, 'rb') as sumFile:\n summaries = pickle.load(sumFile, encoding='bytes')\n with open(dataFile, 'rb') as datFile:\n data = pickle.load(datFile, encoding='bytes')\n return summaries, data", "def _loadTest(self, features, labels):\n\t\tself.testX_, self.testY_, self.testLabel_ = self.__load(features, labels)", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_data(path):\n\n\t# Create a list of all files ending in .jpg\n\tim_list = list_images(path, '.jpg')\n\n\t# Create labels\n\tlabels = [int(im_name.split('/')[-1][0]) for im_name in im_list]\n\tfeatures = []\n\n\t# Create features from the images\n\t# TOD.O: iterate over images paths\n\tfor im_path in im_list:\n\t\t# TOD.O: load image as a gray level image\n\t\tim = np.array(Image.open(im_path).convert('L'))\n\t\t# TOD.O: process the image to remove borders and resize\n\t\tim = process_image(im)\n\t\t# TOD.O: append extracted features to the a list\n\t\tfeatures.append(extract_features(im))\n\n\t# TOD.O: return features, and labels\n\treturn features, labels", "def create_train_and_label_sets(self):\r\n\t\ttraining_set = pd.read_csv(self.training_f_name)\r\n\t\tlabels = training_set.label.values #.apply(lambda x: str(x)).values\r\n\t\ttraining_set.drop(\"label\", inplace=True, axis=1)\r\n\t\ttrain_data = training_set.values\r\n\t\tself.dtrain = xgb.DMatrix(train_data, label=labels)\r\n\t\tprint(\"training set loaded\")", "def load_data_from_folder(folder_path, label_file=\"labels.txt\"):\n\n with open(os.path.join(folder_path, label_file), \"r\") as f:\n lines = f.readlines()\n\n features = []\n classes = []\n int_to_class = {}\n\n for line in lines:\n line = line.strip(\"\\n\").strip(\" \")\n img_path, char_class = line.split(\" \")\n\n number = int(img_path.split(\".\")[0])\n int_to_class.update({number: char_class})\n\n # read the character image as gray and reshape into a vector\n img = cv2.imread(os.path.join(folder_path, img_path), cv2.IMREAD_GRAYSCALE)\n img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)[1]\n\n # cv2.imshow(\"train char\", img)\n # cv2.waitKey(0)\n\n features.append(img.flatten())\n classes.append(char_class)\n\n int_classes = list(int_to_class.keys())\n features = np.array(features)\n\n return features, int_classes, int_to_class", "def train( self, trainingData, trainingLabels):\n\n self.features = trainingData[0].keys()\n # \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n n = len(trainingData)\n fulldata = zip(trainingData,trainingLabels)\n\n # print len(fulldata[0])\n distribution = list([1])*n\n m = int(n*self.ratio)\n\n for i in range(self.num_classifiers):\n samples = util.nSample(distribution,fulldata,m)\n RandtarinData = [x for (x,v) in samples]\n Randlabels = [y for (x,y) in samples] \n \n self.classifiers[i].train(RandtarinData,Randlabels)", "def read_all_feature_data(feats, mode='train', label_name='age'):\n if mode == 'train':\n if not os.path.exists(os.path.join(cfg.data_path, 'train_log_NN_v2.pkl')):\n train_log = preprocess(log_path='train_log_time_click_time_sequence.pkl')\n\n # add w2v features\n for multi_feat in feats.multi_features:\n dense_feats = [x for x in feats.dense_features if multi_feat and 'w2v' in x]\n print('read %s w2v embedding' % multi_feat)\n train_log = read_embedding_data(train_log, multi_feat, dense_feats,\n os.path.join(cfg.data_path, 'user_id_%s_test_w2v_128.pkl' % multi_feat))\n\n # add tfidf features\n for multi_feat in feats.multi_features:\n print('read %s tfidf embedding' % multi_feat)\n tfidf_df = pd.read_pickle(os.path.join(cfg.data_path, 'user_id_%s_test_tfidf.pkl' % multi_feat))\n train_log = pd.merge(train_log, tfidf_df, on='user_id')\n\n # process label\n print('read label')\n if label_name == 'age':\n train_log = read_labels_v2(train_log, feats.label_features)\n elif label_name == 'gender':\n train_log = read_labels_v3(train_log, feats.label_features)\n\n # save data\n print('save data')\n train_log.to_pickle(os.path.join(cfg.data_path, 'train_log_NN_v2.pkl'))\n\n train_log = train_log.fillna(0)\n else:\n print('read all data')\n train_log = pd.read_pickle(os.path.join(cfg.data_path, 'train_log_NN_v2.pkl'))\n train_log = train_log.fillna(0)\n\n print('read label')\n if label_name == 'age':\n train_log = read_labels_v2(train_log, feats.label_features)\n elif label_name == 'gender':\n train_log = read_labels_v3(train_log, feats.label_features)\n\n return train_log\n\n elif mode == 'test':\n if not os.path.exists(os.path.join(cfg.data_path, 'test_log_NN_v2.pkl')):\n test_log = preprocess(is_train=False, log_path='test_log_time_click_time_sequence.pkl')\n\n # add w2v features\n for multi_feat in feats.multi_features:\n dense_feats = [x for x in feats.dense_features if multi_feat and 'w2v' in x]\n print('read %s w2v embedding' % multi_feat)\n test_log = read_embedding_data(test_log, multi_feat, dense_feats,\n os.path.join(cfg.data_path, 'user_id_%s_test_w2v_128.pkl' % multi_feat))\n\n # add tfidf features\n for multi_feat in feats.multi_features:\n print('read %s tfidf embedding' % multi_feat)\n tfidf_df = pd.read_pickle(os.path.join(cfg.data_path, 'user_id_%s_test_tfidf.pkl' % multi_feat))\n test_log = pd.merge(test_log, tfidf_df, on='user_id')\n\n # save data\n print('save data')\n test_log.to_pickle(os.path.join(cfg.data_path, 'test_log_NN_v2.pkl'))\n\n test_log = test_log.fillna(0)\n else:\n print('read all data')\n test_log = pd.read_pickle(os.path.join(cfg.data_path, 'test_log_NN_v2.pkl'))\n test_log = test_log.fillna(0)\n\n return test_log\n\n elif mode == 'val':\n if not os.path.exists(os.path.join(cfg.data_path, 'train_train_log_NN_v2.pkl')):\n train_log, val_log = preprocess(log_path='train_log_time_click_time_sequence.pkl', is_split=True)\n\n # add w2v features\n for multi_feat in feats.multi_features:\n dense_feats = [x for x in feats.dense_features if multi_feat in x]\n print('read %s w2v embedding' % multi_feat)\n train_log = read_embedding_data(train_log, multi_feat, dense_feats,\n os.path.join(cfg.data_path, 'user_id_%s_val_w2v_128.pkl' % multi_feat))\n val_log = read_embedding_data(val_log, multi_feat, dense_feats,\n os.path.join(cfg.data_path, 'user_id_%s_val_w2v_128.pkl' % multi_feat))\n\n # add tfidf features\n for multi_feat in feats.multi_features:\n print('read %s tfidf embedding' % multi_feat)\n tfidf_df = pd.read_pickle(os.path.join(cfg.data_path, 'user_id_%s_val_tfidf.pkl' % multi_feat))\n train_log = pd.merge(train_log, tfidf_df, on='user_id')\n tfidf_df = pd.read_pickle(os.path.join(cfg.data_path, 'user_id_%s_val_tfidf.pkl' % multi_feat))\n val_log = pd.merge(val_log, tfidf_df, on='user_id')\n\n # process label\n print('read label')\n if label_name == 'age':\n train_log = read_labels_v2(train_log, feats.label_features)\n val_log = read_labels_v2(val_log, feats.label_features)\n elif label_name == 'gender':\n train_log = read_labels_v3(train_log, feats.label_features)\n val_log = read_labels_v3(val_log, feats.label_features)\n\n # save data\n print('save data')\n train_log.to_pickle(os.path.join(cfg.data_path, 'train_train_log_NN_v2.pkl'))\n val_log.to_pickle(os.path.join(cfg.data_path, 'train_val_log_NN_v2.pkl'))\n\n train_log = train_log.fillna(0)\n val_log = val_log.fillna(0)\n else:\n print('read all data')\n train_log = pd.read_pickle(os.path.join(cfg.data_path, 'train_train_log_NN_v2.pkl'))\n val_log = pd.read_pickle(os.path.join(cfg.data_path, 'train_val_log_NN_v2.pkl'))\n\n print('read label')\n if label_name == 'age':\n train_log = read_labels_v2(train_log, feats.label_features)\n val_log = read_labels_v2(val_log, feats.label_features)\n elif label_name == 'gender':\n train_log = read_labels_v3(train_log, feats.label_features)\n val_log = read_labels_v3(val_log, feats.label_features)\n\n train_log = train_log.fillna(0)\n val_log = val_log.fillna(0)\n\n return train_log, val_log\n\n else:\n raise Exception('[!] Such mode not available')", "def load_data_for_statistics_features(self):\n\t\t#load feature type\n\t\tcategorical_feature,numerical_feature=self.load_feature_type()\n\t\t#load train and predict\n\t\treader_category_train,reader_numeric_train=self.load_train_X()\n\t\treader_category_predict,reader_numeric_predict=self.load_predict_X()\n\t\ty=self.load_train_y()\n\n\t\treader_category_train = pd.merge(reader_category_train,y,on='uid')\n\t\treader_numeric_train = pd.merge(reader_numeric_train,y,on='uid')\n\n\t\treader_category_predict['y'] = [-99999 for i in range(len(reader_category_predict))]\n\t\treader_numeric_predict['y'] = [-99999 for i in range(len(reader_numeric_predict))]\n\n\t\t#merge data\n\t\treader_category=pd.concat([reader_category_train,reader_category_predict],ignore_index=True)\n\t\treader_numeric=pd.concat([reader_numeric_train,reader_numeric_predict],ignore_index=True)\n\n\t\treturn categorical_feature,numerical_feature,reader_category,reader_numeric", "def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def load_labels():\n labels = pd.read_csv(PATH_DATA_RAW / LABEL_PATH)\n labels = labels.iloc[:, 1:4].to_numpy()\n return labels", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for loading the features and labels associated with the testing dataset.
def _loadTest(self, features, labels): self.testX_, self.testY_, self.testLabel_ = self.__load(features, labels)
[ "def load_features_and_labels(train_partition, test_partition, training_feature_file,\n test_feature_file, preprocessor='tokenized', vectorizer=None, \n feature_outfile_name=None):\n train_labels_path = \"{script_dir}/../data/labels/{train}/labels.{train}.csv\".format(train=train_partition, script_dir=SCRIPT_DIR)\n train_data_path = \"{script_dir}/../data/essays/{}/tokenized/\".format(train_partition, script_dir=SCRIPT_DIR)\n test_labels_path = \"{script_dir}/../data/labels/{test}/labels.{test}.csv\".format(test=test_partition, script_dir=SCRIPT_DIR)\n test_data_path = \"{script_dir}/../data/essays/{}/tokenized\".format(test_partition, script_dir=SCRIPT_DIR)\n\n path_and_descriptor_list = [(train_labels_path, \"training labels file\"),\n (train_data_path, \"training data directory\"),\n (test_labels_path, \"testing labels file\"),\n (test_data_path, \"testing data directory\")]\n for path_, path_descriptor in path_and_descriptor_list:\n if not os.path.exists(path_):\n raise Exception(\"Could not find {desc}: {pth}\".format(desc=path_descriptor, pth=path_))\n #\n # Read labels files. If feature files provided, `training_files` and `test_files` below will be ignored\n # \n with open(train_labels_path) as train_labels_f, open(test_labels_path) as test_labels_f:\n essay_path_train = '{script_dir}/../data/essays/{train}/{preproc}'.format(script_dir=SCRIPT_DIR, train=train_partition, preproc=preprocessor)\n essay_path_test = '{script_dir}/../data/essays/{test}/{preproc}'.format(script_dir=SCRIPT_DIR, test=test_partition, preproc=preprocessor)\n\n training_files, training_labels, training_prompts = zip(*[(os.path.join(essay_path_train, row['test_taker_id'] + '.txt'), row['L1'], row['essay_prompt'])\n for row in csv.DictReader(train_labels_f)])\n\n test_files, test_labels, test_prompts = zip(*[(os.path.join(essay_path_test, row['test_taker_id'] + '.txt'), row['L1'], row['essay_prompt'])\n for row in csv.DictReader(test_labels_f)])\n \n #\n # Verify that either both or neither of training/test feature files are provided\n #\n if bool(training_feature_file) != bool(test_feature_file):\n print(\"Feature files were not provided for both test and train partitions. \"\n \"Generating default unigram features now.\")\n \n #\n # If feature files provided, get features and labels from them\n # \n elif training_feature_file and test_feature_file:\n training_matrix, encoded_training_labels = load_svmlight_file(training_feature_file)\n original_training_labels = tuple([CLASS_LABELS[int(i)] for i in encoded_training_labels])\n \n if original_training_labels != training_labels:\n raise Exception(\"Training labels in feature file do not match those in the labels file.\")\n\n test_matrix, encoded_test_labels = load_svmlight_file(test_feature_file)\n original_test_labels = tuple([CLASS_LABELS[int(i)] for i in encoded_test_labels])\n if original_test_labels != test_labels:\n raise Exception(\"Test labels in feature file do not match those in the labels file.\")\n\n return [(training_matrix, encoded_training_labels, original_training_labels),\n (test_matrix, encoded_test_labels, original_test_labels)]\n \n # \n # If no feature files provided, create feature matrix from the data files\n #\n print(\"Found {} text files in {} and {} in {}\"\n .format(len(training_files), train_partition, len(test_files), test_partition))\n print(\"Loading training and testing data from {} & {}\".format(train_partition, test_partition))\n\n training_data, test_data = [],[]\n for f in training_files:\n with open(f) as doc:\n training_data.append(doc.read())\n\n for f in test_files:\n with open(f) as doc:\n test_data.append(doc.read())\n\n features = FeatureUnion([\n #('word_skipgrams', SkipgramVectorizer(n=2, k=2, base_analyzer='word', binary=True, min_df=5)),\n ('char_ngrams', TfidfVectorizer(ngram_range=(1,11), analyzer=\"char\", binary=True))\n #('char_ngrams', CountVectorizer(analyzer=\"word\"))\n #('char_ngrams', TfidfVectorizer(ngram_range=(1,9),analyzer=\"char\", binary=True))\n #('prompt_ngrams', PromptWordVectorizer(ngram_range=(1, 9), analyzer=\"char\", binary=True))\n #('char_ngrams', TfidfVectorizer(analyzer=\"word\", binary=True))\n #('misspellings', MisspellingVectorizer(ngram_range=(1, 9), analyzer=\"char\", binary=True))\n #('ipa_ngrams', IPAVectorizer(ngram_range=(1, 3), analyzer=\"word\", binary=False)),\n #('pos_ngrams', POSVectorizer(ngram_range=(1, 4), analyzer=\"word\")),\n #('average_word_length', AverageWordLength())\n #('final_letter', FinalLetter(analyzer=\"char\")),\n \n ])\n\n features.fit(training_data)\n\n training_matrix, encoded_training_labels, vectorizer = transform_data(training_data, training_labels, features)\n test_matrix, encoded_test_labels, _ = transform_data(test_data, test_labels, features)\n\n\n #\n # Write features to feature files\n # No need to have different names for train/test since they each have their own directory.\n outfile_name = (strftime(\"{}-%Y-%m-%d-%H.%M.%S.features\".format(train_partition))\n if feature_outfile_name is None \n else \"{}-{}\".format(train_partition, feature_outfile_name))\n\n outfile = strftime(\"{script_dir}/../data/features/essays/{train}/{outfile_name}\"\n .format(script_dir=SCRIPT_DIR, train=train_partition, outfile_name=outfile_name))\n dump_svmlight_file(training_matrix, encoded_training_labels, outfile)\n print(\"Wrote training features to\", outfile.replace(SCRIPT_DIR, '')[1:]) # prints file path relative to script location\n \n outfile_name = (strftime(\"{}-%Y-%m-%d-%H.%M.%S.features\".format(test_partition))\n if feature_outfile_name is None\n else \"{}-{}\".format(test_partition, feature_outfile_name))\n \n outfile = (\"{script_dir}/../data/features/essays/{test}/{outfile_name}\"\n .format(script_dir=SCRIPT_DIR, test=test_partition, outfile_name=outfile_name))\n dump_svmlight_file(test_matrix, encoded_test_labels, outfile)\n print(\"Wrote testing features to\", outfile.replace(SCRIPT_DIR, '')[1:]) # prints file path relative to script location\n\n return [(training_matrix, encoded_training_labels, training_labels, training_prompts, training_files),\n (test_matrix, encoded_test_labels, test_labels, test_prompts, test_files)]", "def load_test_data():\n print(\"Loading test data...\")\n parser = MyHTMLParser()\n parser.feed(codecs.open(test_data_with_label, \"r\", \"utf-8\").read())\n test_list = data_list[1:]\n data_list.clear()\n label = np.array(label_list)\n test_examples = [[item for item in jieba.cut(s, cut_all=False)] for s in test_list]\n data_analysis(test_examples, 'test')\n return test_examples, label", "def load_test_set(self, features, labels=None, feature_id_col_name=None, metadata_col_names=None):\n self._test = self._learner.convert_data_to_format(features, labels, feature_id_col_name, metadata_col_names)", "def load_mnist_test():\n # Load training data\n print(\"Loading testing data...\")\n data, label = load_csv('./data/mnist_test.csv')\n assert len(data) == len(label)\n print(\"Testing data loaded with {count} images\".format(count=len(data)))\n\n return data, label", "def read_data(feature_file, label_file):", "def _loadTrain(self, features, labels):\n\t\tself.trainX_, self.trainY_, self.trainLabel_ = self.__load(features, labels)", "def load_test_data():\n\n images, cls = _load_data(filename=\"test_batch\")\n\n return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)", "def load_features_and_labels(features_dir, labels_dir):\n # load labels\n labels = pd.read_csv(glob.glob('{}/*.csv'.format(labels_dir))[0])\n labels = labels.sort_values(by='bookingID')\n\n # load features\n features = load_features(features_dir)\n\n return features, labels", "def load_training_test_sets():\n train = pickle.load(open('./data/tweets/train.p', 'rb'))\n test = pickle.load(open('./data/tweets/test.p', 'rb'))\n return train, test", "def Load_test_file(self):\n image_path = os.path.join(self.test_file_path,\n 'labels_test_images.npy')\n labels_path = os.path.join(self.test_file_path,\n 'labels_test_labels.npy')\n image = np.load(image_path)\n labels = np.load(labels_path)\n self.N = image.shape[0]\n return image, labels", "def load_data_test(self):\n data_set = list(open(self.DATA_DIR + 'TREC_10.label', encoding='utf-8', errors='replace').readlines())\n data_set_cleaned = [self.clean_str(sent) for sent in data_set]\n Y_Test = [s.split(' ')[0].split(':')[0] for s in data_set_cleaned]\n X_Test = [s.split(\" \")[1:] for s in data_set_cleaned]\n return X_Test, Y_Test", "def load_features(self, features):\n pass\n # self.features = features", "def ucf_read_train_test_split(self, path):\n # get the test train split txt file\n train = []\n test = []\n for (dirpath, dirnames, filenames) in os.walk(path):\n train += [os.path.join(path, file) for file in filenames if file.startswith('trainlist')]\n test += [os.path.join(path, file) for file in filenames if file.startswith('testlist')]\n train.sort()\n test.sort()\n\n # read test train data name and label from the txt file\n train_data_labels = []\n test_data_labels = []\n for tra, test in zip(train, test):\n with open(tra) as f:\n names_labels = f.readlines()\n data = [line.split(' ')[0].split('/')[-1].split('.')[0] for line in names_labels]\n label = [line.split(' ')[0].split('/')[0] for line in names_labels]\n train_data_labels.append({'data': data, 'label': label})\n with open(test) as f:\n names_labels = f.readlines()\n data = [line.split('/')[-1].split('.')[0] for line in names_labels]\n label = [line.split('/')[0] for line in names_labels]\n test_data_labels.append({'data': data, 'label': label})\n return train_data_labels, test_data_labels", "def load_test_dataset(self):\n test_data_path = \"testdata\"\n root = Path(test_data_path)\n classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])\n print(classes)\n\n transform = transforms.Compose([\n transforms.Resize(300),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(250),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.6071, 0.4828, 0.3934], std=[0.2845, 0.3187, 0.3240])\n ])\n\n dataset = datasets.ImageFolder(test_data_path, transform=transform)\n testloader = DataLoader(dataset, batch_size=4, shuffle=True)\n print(\"Loaded data\")\n return testloader", "def test_load(self):\n classifier_load_test = Classifier(\"data/train\",\"data/test\",\"test\")\n try:\n classifier_load_test.load(\"test\")\n self.assert_(True)\n except Exception as fallo_abrir:\n self.assert_(False)", "def load_test_data(self):\n return self.load_annotations_set(is_test=True)", "def load_data(train_file, test_file):\n\n # load train and test data\n data_train = pd.read_csv(train_file)\n data_test = pd.read_csv(test_file)\n\n # concat and label\n data_out = pd.concat([data_train, data_test], keys=['train', 'test'])\n\n return data_out", "def load_datasets(training_path, testing_path):\n\n with h5py.File(training_path, \"r\") as h5_file_ptr:\n x_train = np.array(h5_file_ptr[\"data\"])\n y_train = np.array(h5_file_ptr[\"label\"])\n\n with h5py.File(testing_path, \"r\") as h5_file_ptr:\n x_test = np.array(h5_file_ptr[\"data\"])\n y_test = np.array(h5_file_ptr[\"label\"])\n\n return x_train, x_test, y_train, y_test", "def test_get_training_datasets_for_featurestore(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for loading the features and labels associated with the validation dataset.
def _loadValid(self, features, labels): self.validX_, self.validY_, self.validLabel_ = self.__load(features, labels)
[ "def load_features_and_labels(train_partition, test_partition, training_feature_file,\n test_feature_file, preprocessor='tokenized', vectorizer=None, \n feature_outfile_name=None):\n train_labels_path = \"{script_dir}/../data/labels/{train}/labels.{train}.csv\".format(train=train_partition, script_dir=SCRIPT_DIR)\n train_data_path = \"{script_dir}/../data/essays/{}/tokenized/\".format(train_partition, script_dir=SCRIPT_DIR)\n test_labels_path = \"{script_dir}/../data/labels/{test}/labels.{test}.csv\".format(test=test_partition, script_dir=SCRIPT_DIR)\n test_data_path = \"{script_dir}/../data/essays/{}/tokenized\".format(test_partition, script_dir=SCRIPT_DIR)\n\n path_and_descriptor_list = [(train_labels_path, \"training labels file\"),\n (train_data_path, \"training data directory\"),\n (test_labels_path, \"testing labels file\"),\n (test_data_path, \"testing data directory\")]\n for path_, path_descriptor in path_and_descriptor_list:\n if not os.path.exists(path_):\n raise Exception(\"Could not find {desc}: {pth}\".format(desc=path_descriptor, pth=path_))\n #\n # Read labels files. If feature files provided, `training_files` and `test_files` below will be ignored\n # \n with open(train_labels_path) as train_labels_f, open(test_labels_path) as test_labels_f:\n essay_path_train = '{script_dir}/../data/essays/{train}/{preproc}'.format(script_dir=SCRIPT_DIR, train=train_partition, preproc=preprocessor)\n essay_path_test = '{script_dir}/../data/essays/{test}/{preproc}'.format(script_dir=SCRIPT_DIR, test=test_partition, preproc=preprocessor)\n\n training_files, training_labels, training_prompts = zip(*[(os.path.join(essay_path_train, row['test_taker_id'] + '.txt'), row['L1'], row['essay_prompt'])\n for row in csv.DictReader(train_labels_f)])\n\n test_files, test_labels, test_prompts = zip(*[(os.path.join(essay_path_test, row['test_taker_id'] + '.txt'), row['L1'], row['essay_prompt'])\n for row in csv.DictReader(test_labels_f)])\n \n #\n # Verify that either both or neither of training/test feature files are provided\n #\n if bool(training_feature_file) != bool(test_feature_file):\n print(\"Feature files were not provided for both test and train partitions. \"\n \"Generating default unigram features now.\")\n \n #\n # If feature files provided, get features and labels from them\n # \n elif training_feature_file and test_feature_file:\n training_matrix, encoded_training_labels = load_svmlight_file(training_feature_file)\n original_training_labels = tuple([CLASS_LABELS[int(i)] for i in encoded_training_labels])\n \n if original_training_labels != training_labels:\n raise Exception(\"Training labels in feature file do not match those in the labels file.\")\n\n test_matrix, encoded_test_labels = load_svmlight_file(test_feature_file)\n original_test_labels = tuple([CLASS_LABELS[int(i)] for i in encoded_test_labels])\n if original_test_labels != test_labels:\n raise Exception(\"Test labels in feature file do not match those in the labels file.\")\n\n return [(training_matrix, encoded_training_labels, original_training_labels),\n (test_matrix, encoded_test_labels, original_test_labels)]\n \n # \n # If no feature files provided, create feature matrix from the data files\n #\n print(\"Found {} text files in {} and {} in {}\"\n .format(len(training_files), train_partition, len(test_files), test_partition))\n print(\"Loading training and testing data from {} & {}\".format(train_partition, test_partition))\n\n training_data, test_data = [],[]\n for f in training_files:\n with open(f) as doc:\n training_data.append(doc.read())\n\n for f in test_files:\n with open(f) as doc:\n test_data.append(doc.read())\n\n features = FeatureUnion([\n #('word_skipgrams', SkipgramVectorizer(n=2, k=2, base_analyzer='word', binary=True, min_df=5)),\n ('char_ngrams', TfidfVectorizer(ngram_range=(1,11), analyzer=\"char\", binary=True))\n #('char_ngrams', CountVectorizer(analyzer=\"word\"))\n #('char_ngrams', TfidfVectorizer(ngram_range=(1,9),analyzer=\"char\", binary=True))\n #('prompt_ngrams', PromptWordVectorizer(ngram_range=(1, 9), analyzer=\"char\", binary=True))\n #('char_ngrams', TfidfVectorizer(analyzer=\"word\", binary=True))\n #('misspellings', MisspellingVectorizer(ngram_range=(1, 9), analyzer=\"char\", binary=True))\n #('ipa_ngrams', IPAVectorizer(ngram_range=(1, 3), analyzer=\"word\", binary=False)),\n #('pos_ngrams', POSVectorizer(ngram_range=(1, 4), analyzer=\"word\")),\n #('average_word_length', AverageWordLength())\n #('final_letter', FinalLetter(analyzer=\"char\")),\n \n ])\n\n features.fit(training_data)\n\n training_matrix, encoded_training_labels, vectorizer = transform_data(training_data, training_labels, features)\n test_matrix, encoded_test_labels, _ = transform_data(test_data, test_labels, features)\n\n\n #\n # Write features to feature files\n # No need to have different names for train/test since they each have their own directory.\n outfile_name = (strftime(\"{}-%Y-%m-%d-%H.%M.%S.features\".format(train_partition))\n if feature_outfile_name is None \n else \"{}-{}\".format(train_partition, feature_outfile_name))\n\n outfile = strftime(\"{script_dir}/../data/features/essays/{train}/{outfile_name}\"\n .format(script_dir=SCRIPT_DIR, train=train_partition, outfile_name=outfile_name))\n dump_svmlight_file(training_matrix, encoded_training_labels, outfile)\n print(\"Wrote training features to\", outfile.replace(SCRIPT_DIR, '')[1:]) # prints file path relative to script location\n \n outfile_name = (strftime(\"{}-%Y-%m-%d-%H.%M.%S.features\".format(test_partition))\n if feature_outfile_name is None\n else \"{}-{}\".format(test_partition, feature_outfile_name))\n \n outfile = (\"{script_dir}/../data/features/essays/{test}/{outfile_name}\"\n .format(script_dir=SCRIPT_DIR, test=test_partition, outfile_name=outfile_name))\n dump_svmlight_file(test_matrix, encoded_test_labels, outfile)\n print(\"Wrote testing features to\", outfile.replace(SCRIPT_DIR, '')[1:]) # prints file path relative to script location\n\n return [(training_matrix, encoded_training_labels, training_labels, training_prompts, training_files),\n (test_matrix, encoded_test_labels, test_labels, test_prompts, test_files)]", "def _loadTrain(self, features, labels):\n\t\tself.trainX_, self.trainY_, self.trainLabel_ = self.__load(features, labels)", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def _load_validation_data():\n\n print \"\\tLoading validation data...\"\n input_vectors = []\n expected_targets = []\n\n db = plyvel.DB(constants.VALIDATION_FILE)\n for key, value in db:\n datum = Datum()\n datum.ParseFromString(value)\n\n data = np.fromstring(datum.data, dtype=np.uint8)\n data = np.reshape(data, (3, constants.HEIGHT, constants.WIDTH))\n # Move the color channel to the end to match what Caffe wants.\n data = np.swapaxes(data, 0, 2) # Swap channel with width.\n data = np.swapaxes(data, 0, 1) # Swap width with height, to yield final h x w x channel.\n\n input_vectors.append(data)\n expected_targets.append(datum.label)\n\n db.close()\n\n print \"\\t\\tValidation data has %d images\" % len(input_vectors)\n\n return {\n \"input_vectors\": np.asarray(input_vectors),\n \"expected_targets\": np.asarray(expected_targets)\n }", "def _loadTest(self, features, labels):\n\t\tself.testX_, self.testY_, self.testLabel_ = self.__load(features, labels)", "def load_features_and_labels(features_dir, labels_dir):\n # load labels\n labels = pd.read_csv(glob.glob('{}/*.csv'.format(labels_dir))[0])\n labels = labels.sort_values(by='bookingID')\n\n # load features\n features = load_features(features_dir)\n\n return features, labels", "def loadTrainingData():\n summaryFile = '../Summarization/Data/training_data_arabic_features.dat'\n dataFile = '../Summarization/Data/dataset_info_arabic.dat'\n with open(summaryFile, 'rb') as sumFile:\n summaries = pickle.load(sumFile, encoding='bytes')\n with open(dataFile, 'rb') as datFile:\n data = pickle.load(datFile, encoding='bytes')\n return summaries, data", "def load_training_data(self):\n self.flux_data = pd.read_csv(settings['RAW_TRAINING_PATH'])\n self.meta_data = pd.read_csv(settings[\"RAW_TRAINING_METADATA_PATH\"])\n\n # Label folds\n y = self.meta_data['target']\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n kfold_indices = -1*np.ones(len(y))\n for idx, (fold_train, fold_val) in enumerate(folds.split(y, y)):\n kfold_indices[fold_val] = idx\n self.meta_data['fold'] = kfold_indices\n\n self.dataset_name = 'train'", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def read_data(feature_file, label_file):", "def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]", "def load_data_and_labels():\n # Load data from files\n positive_examples = list(open(\"./data/rt-polaritydata/rt-polarity.pos\", \"r\").readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(open(\"./data/rt-polaritydata/rt-polarity.neg\", \"r\").readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n # pos/neg label is 0 or 1\n pos_label = np.zeros(len(positive_examples))\n neg_label = np.ones(len(negative_examples))\n labels = np.concatenate((pos_label, neg_label))\n # Generate labels is [0,1] or [1,0]\n positive_labels = [[0,1] for _ in positive_examples]\n negative_labels = [[1,0] for _ in negative_examples] \n y = np.concatenate([positive_labels, negative_labels], 0)\n build_word2vec_vocabulary('data/',x_text, labels,'data/word2vec.cpkl')\n \n return x_text, y", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def load_data_and_labels():\n # Load data from files\n positive_examples = list(open(\"/Users/guo/TrainData/rt-polaritydata/rt-polarity.pos\").readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(open(\"/Users/guo/TrainData/rt-polaritydata/rt-polarity.neg\").readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n # Generate labels\n positive_labels = [1 for _ in positive_examples]\n negative_labels = [0 for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n return [x_text, y]", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def create_train_and_label_sets(self):\r\n\t\ttraining_set = pd.read_csv(self.training_f_name)\r\n\t\tlabels = training_set.label.values #.apply(lambda x: str(x)).values\r\n\t\ttraining_set.drop(\"label\", inplace=True, axis=1)\r\n\t\ttrain_data = training_set.values\r\n\t\tself.dtrain = xgb.DMatrix(train_data, label=labels)\r\n\t\tprint(\"training set loaded\")", "def load_data(training_file):\r\n\r\n f = open(training_file, \"r\")\r\n lines = f.readlines()\r\n number_of_feature_vectors = len(lines)\r\n number_of_training_vectors = math.floor(number_of_feature_vectors * 0.8)\r\n \r\n trainset = []\r\n trainset_label = []\r\n testset = []\r\n testset_label = []\r\n \r\n if number_of_feature_vectors == 0:\r\n return (None, None, None, None)\r\n if number_of_training_vectors == 0:\r\n return (None, None, None, None)\r\n \r\n \r\n (_arg0, _arg1, dimension) = parse_line(lines[0])\r\n count = 0\r\n for line in lines:\r\n (_label, _feature_vector, _dimension) = parse_line(line)\r\n if _dimension != dimension:\r\n print \"[Error] Inconsistent dimension found in the training data\"\r\n return (None, None, None, None)\r\n if count < number_of_training_vectors:\r\n trainset.append(_feature_vector)\r\n trainset_label.append(_label)\r\n else:\r\n testset.append(_feature_vector)\r\n testset_label.append(_label)\r\n count = count + 1\r\n \r\n print \"len(trainset): \" + str(len(trainset))\r\n print \"len(testset): \" + str(len(testset))\r\n \r\n trainset_ndarray = np.array(trainset)\r\n trainset_labels_ndarray = np.array(trainset_label)\r\n training_data_list = [trainset_ndarray, trainset_labels_ndarray]\r\n training_data = tuple(training_data_list)\r\n \r\n testset_ndarray = np.array(testset)\r\n testset_labels_ndarray = np.array(testset_label)\r\n test_data_list = [testset_ndarray, testset_labels_ndarray]\r\n test_data = tuple(test_data_list)\r\n \r\n validation_data = training_data\r\n test_data = test_data\r\n \r\n # print training_data\r\n\r\n return (training_data, validation_data, test_data, dimension)\r\n\r\n #for feature_vector in features_ndarray:\r\n # print \"feature_vector: \" + str(feature_vector)\r\n #array_type = type(features_ndarray)\r\n #print \"array_type: \" + str(array_type)\r\n \r\n # Not implemented yet\r\n #f = gzip.open('../data/mnist.pkl.gz', 'rb')\r\n #training_data, validation_data, test_data = cPickle.load(f)\r\n #f.close()\r\n #return (training_data, validation_data, test_data)\r", "def _load_validation_data(validation_leveldb, width, height):\n\n print \"\\tLoading validation data...\"\n input_vectors = []\n expected_targets = []\n\n db = plyvel.DB(validation_leveldb)\n for key, value in db:\n datum = Datum()\n datum.ParseFromString(value)\n\n data = np.fromstring(datum.data, dtype=np.uint8)\n data = np.reshape(data, (3, height, width))\n # Move the color channel to the end to match what Caffe wants.\n data = np.swapaxes(data, 0, 2) # Swap channel with width.\n data = np.swapaxes(data, 0, 1) # Swap width with height, to yield final h x w x channel.\n\n input_vectors.append(data)\n expected_targets.append(datum.label)\n\n db.close()\n\n print \"\\t\\tValidation data has %d images\" % len(input_vectors)\n\n return {\n \"input_vectors\": np.asarray(input_vectors),\n \"expected_targets\": np.asarray(expected_targets)\n }", "def load_mnist_test():\n # Load training data\n print(\"Loading testing data...\")\n data, label = load_csv('./data/mnist_test.csv')\n assert len(data) == len(label)\n print(\"Testing data loaded with {count} images\".format(count=len(data)))\n\n return data, label" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles displaying recipe categories
def categories(): return render_template('categories.html', recipe_categories=USERS[session['username']].recipe_categories)
[ "def render_categories(self, categories):", "def category(request):\n\n return render(request, \"core/category_list.html\", {\n \"category_list\": Category.objects.all()\n })", "def our_categories(request):\n\n # Render the HTML template categories.html with the data in the context variable.\n return render(\n request,\n 'categories.html',\n context={},\n )", "def all_categories(request, slug=None):\n c = {\"categories\": Node.objects.filter(kind=\"C\")}\n return render_to_response(\"categories.html\", c)", "def recipes(category):\n # if statements to display the recipes base on category name\n if category == \"Pre Workout Meal\":\n recipe = mongo.db.recipes.find({\"category_name\": \"Pre Workout Meal\"})\n elif category == \"Post Workout Meal\":\n recipe = mongo.db.recipes.find({\"category_name\": \"Post Workout Meal\"})\n else:\n recipe = mongo.db.recipes.find()\n\n return render_template('pages/allrecipe.html', recipe=recipe, category_title=category, recipes=mongo.db.recipes.find(), isFooter=True)", "def show_categories(self):\n cat_model = TreeModel(('Categories', ))\n self.categoriesView.setModel(cat_model)\n\n categories = self.orm.fetch_parents()\n for category in categories:\n item = TreeItem(category, cat_model.rootItem)\n cat_model.rootItem.appendChild(item)\n\n subs = self.orm.fetch_subcategories_for_parent(category)\n\n for sub in subs:\n sub_item = TreeItem(sub, item)\n item.appendChild(sub_item)\n\n self.categoriesView.expandAll()", "def category_choice(self):\n self.leave_category_choice = 1\n while self.leave_category_choice:\n print(fr.FR[15])\n for element in config.CATEGORIES:\n print(str(config.CATEGORIES.index(element)+1)\n + \" : \" + element)\n self.category_choice_input()", "def category(request, slug):\n categry = get_object_or_404(Category,slug=slug)\n story_list = Story.objects.filter(category=category)\n heading = \"Category: %s\" % category.label\n return render_to_response('cms/story_list.html', locals())", "def category(category, term):", "def show_categories():\n for category in NEWS_CATEGORIES:\n print(category)", "def list_categories(self):\n raise NotImplementedError()", "def find_recipes_by_category(category):\n return render_template(\"recipes.html\", recipes=mongo.db.recipes.find({\"category_name\":category}), categories=mongo.db.categories.find())", "def categories(request):\n \n # query for all active listings and initialize an empty list\n listing = Auction_listing.objects.filter(active=True)\n categories = []\n # loop over all listings, if category of current listing is not yet present in the categories list, add it there\n for lis in listing:\n if lis.category not in categories and lis.category != '':\n categories.append(lis.category)\n \n return render(request, 'auctions/categories.html', {\n 'categories': categories\n })", "def getcategory(self):\n\n response = requests.get(\"https://fr.openfoodfacts.org/categories.json\")\n\n data = response.json()\n\n self.rawcategorydata = data", "def product_category_menu():\n return {\n 'category_list': ProductCategory.objects.all()\n }", "def menu_categories(self, app: object) -> None:\n while True:\n if self.back:\n break\n else:\n self.cmd_categories = app.view_cat()\n rand_cat = random.sample(list(self.cmd_categories), 10)\n print(\"-\" * 50)\n for x in rand_cat:\n print(f\"{x} : {self.cmd_categories[x]}\")\n entry = input(\n \"\\nEntrer un chiffre pour sélectionner la catégorie correspondante : \"\n )\n if entry in self.cmd_categories:\n if entry == \"0\":\n break\n else:\n self.menu_products(app, entry)\n else:\n print(\"\\nCommande incorrecte\")", "def scrap_recipes_basic_info_by_category(self, category: str) -> list:\n\n self.driver.get(f'{self.BASE_URL}/{category}')\n\n try:\n button_xpath = '//*[@id=\"react-root\"]/div[2]/button'\n button = self.driver.find_element_by_xpath(button_xpath)\n\n while button:\n button.click()\n button = self.driver.find_element_by_xpath(button_xpath)\n sleep(1)\n except Exception:\n pass\n\n ul = self.driver.find_element_by_xpath(\n '//*[@id=\"react-root\"]/div[2]/div/div/ul')\n ul_li = ul.find_elements_by_tag_name('li')\n recipes = []\n names = set()\n\n for li in ul_li:\n url = li.find_element_by_tag_name('a').get_attribute('href')\n name = li.find_element_by_class_name(\n 'MediaCard__Title-zlkxh-3').text\n image_url = self.get_url_from_selenium_element(li)\n\n # avoid duplicates\n if name in names:\n continue\n names.add(name)\n\n recipes.append({\n 'url': url,\n 'name': name,\n 'category': category.split('-')[-1],\n 'image_url': image_url\n })\n\n return recipes", "def insert_recipe_category():\r\n\r\n # validates request form\r\n form = request.form\r\n error_list = validate_form(form, 'recipe_category')\r\n\r\n if error_list == []:\r\n # validates image URL\r\n image_URL = validate_image(form['img_link'])\r\n\r\n # inserts recipe category\r\n recipe_category = {\r\n 'name': request.form.get('name'),\r\n 'img_link': image_URL,\r\n 'number_of_recipes': 0\r\n }\r\n mongo.db.recipe_categories.insert_one(recipe_category)\r\n\r\n # redirects to the recipe category search\r\n return redirect(url_for(\r\n 'search',\r\n collection='recipe_categories')\r\n )\r\n\r\n else:\r\n # initializes page title and header\r\n page_title = 'Add recipe category'\r\n page_header = 'Add a new recipe category:'\r\n\r\n # sends error list back to the form to correct mistakes\r\n return render_template(\r\n 'add_form.html',\r\n errors=error_list,\r\n form=form,\r\n page_title=page_title,\r\n page_header=page_header\r\n )", "def categories_menu():\n categories = ['EU-affairs', 'Economy', 'Security', 'Society', 'World']\n\n for category in categories:\n url = build_url({'mode': 'Topic', 'foldername': category, 'page': 1})\n li = xbmcgui.ListItem(category, iconImage='DefaultFolder.png')\n xbmcplugin.addDirectoryItem(handle=ADDON_HANDLE, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(ADDON_HANDLE)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts a book into the book table
def insert_book(self, title, author, year, isbn): self.cursor.execute("INSERT INTO Book VALUES(NULL, ?, ?, ?, ?)", (title, author, year, isbn)) self.connection.commit()
[ "def insert_book(title, author, year, isbn):\n connection = sqlite3.connect(\"books.db\")\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO Book VALUES(NULL, ?, ?, ?, ?)\", (title, author, year, isbn))\n connection.commit()\n connection.close()", "def insert_books():\n for _, row in books.iterrows():\n book = Book(book_id=row['book_id'],\n authors=row['authors'],\n year=row['original_publication_year'],\n title=row['title'],\n language=row['language_code'])\n book.insert()", "def add_book(self, book):\n\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('INSERT INTO books values (?, ?, ?)', (book.title, book.author, book.read))\n book.id = cur.lastrowid\n except sqlite3.IntegrityError:\n raise BookError('This book is already in the database')\n except sqlite3.Error as e:\n raise BookError(f'Error adding book {book}') from e", "def insert_book():\n books = mongo.db.books\n books.insert_one(request.form.to_dict())\n return redirect(url_for('books'))", "def create(self, book):\n db = self.getBookDB()\n value = self.getValueFromBook(book)\n db.execute('INSERT INTO books VALUES (?,?,?,?,?,?)', value)\n self.conn.commit()\n return book.get('_id')", "def add_to_database(self):\r\n url = root_url + \"/book/b/\"\r\n barcode = self.add_barcode()\r\n book_title = input(\"Please enter book title: \")\r\n book_author = input(\"Please enter book author: \")\r\n book_published = input(\"Please enter book publish date: \")\r\n send_data = {\"Title\":book_title,\"Author\":book_author,\"PublishedDate\":book_published,\"BarcodeData\":barcode}\r\n requests.put(url,data=json.dumps(send_data),headers={'Content-Type': 'application/json'})\r\n print(\"Successfully added this book!\")", "def insert_book():\n book = mongo.db.book\n book.insert_one(request.form.to_dict())\n return redirect(url_for(\"get_book\"))", "def insert_record(self, record):\n positional_record = (\n record['Book Title'].strip(),\n record['First Name'].strip(),\n record['Last Name'].strip(),\n record['Book Publication Date'].strip()\n )\n self.cursor.execute(self.INSERT_BOOK_SQL, positional_record)", "def save_book(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n database.insert(self.title_box.get(), \\\n self.author_box.get(), \\\n self.year_box.get(), \\\n self.isbn_box.get())\n self.view_box.delete(0,END)\n self.view_box.insert(END,(self.title_value.get(), \\\n self.author_value.get(), \\\n self.year_value.get(), \\\n self.isbn_value.get()))\n messagebox.showinfo(\"Saved Successfully\", \"The book was saved successfully\")", "def post_book():\n\n book_info = request.get_json()\n if (len(book_info) != 1) or (book_info is None):\n return \"Bad request.Please enter only one valid book information\", 400\n\n else:\n db.books.insert_one(book_info[0])\n return \"Successfully added one book.\", 201", "def create_and_add(self, title, author):\n book = Book.query.filter_by(title=title, author=author).first()\n self.add(book or Book(title, author))", "def create_book():\n Book.objects.create(book_id=\"test_id\",\n title=\"test_title\",\n authors=\"test_author\",\n published_date=\"2021\",\n categories=[\"test_category\"],\n average_rating=5,\n ratings_count=5,\n thumbnail=\"http://books.google.com/books/test\"\n )", "def insert_books_data():\n # Get data from csv file\n print(\"Getting data from csv..\")\n file = open(\"books.csv\")\n reader = csv.reader(file)\n\n # Insert csv data into table\n print(\"Inserting data into 'books' table..\")\n for isbn, title, author, year in reader:\n try:\n db.execute(\"INSERT INTO books (isbn, title, author, year)\\\n VALUES (:isbn, :title, :author, :year)\", {\n \"isbn\": isbn, \"title\": title, \"author\": author, \"year\": year })\n except exc.DataError as err:\n print(\"Invalid entry in csv file\")\n db.commit()\n print(\"Data inserted\")", "def register_book(self, title: str, author: str, price: float, barcode: str, stock=0):\n try:\n if not self.verify_register(barcode):\n self.db.cursor.execute('INSERT INTO books (title, author, price, bar_code, stock) VALUES (%s, %s, %s, '\n '%s, %s)', (title, author, round(price, 2), barcode, stock))\n self.db.con.commit()\n self.db.con.close()\n print('Registered Successfully!')\n else:\n print('Book already registered!')\n except Exception as error:\n print(error)", "def create_book(title, author, completion):\n return Book.objects.create(title=title, author=author, completion=completion)", "def create_book(self, name, author):\n\n # Validates type\n if not TypeUtils.all_of_type(name, author, var_type=str):\n print(ErrorMsgUtils.type_error(name, author, var_type=str))\n return\n\n # Validates existence\n book_to_check = self.find_by_name(name)\n if book_to_check is not None:\n print(ErrorMsgUtils.already_exists(name))\n return\n\n # SQL & Execution\n sql = \"INSERT INTO BOOKS (NAME, AUTHOR) VALUES (%s, %s)\"\n values = (f\"{name}\", f\"{author}\")\n try:\n self.cursor.execute(sql, values)\n self.db.commit()\n except mysql.connector.Error as error:\n print(ErrorMsgUtils.display_error(error))\n # Return None\n return None\n else:\n print(self.cursor.rowcount, \"was inserted.\")\n print(\"ID: \", self.cursor.lastrowid)\n # Return the Book\n return Book(name=name, author=author, id=self.cursor.lastrowid)", "def add_book():\n errors = check_library_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n details = request.get_json()\n admission_no = details['admission_no']\n title = details['title']\n author = details['author']\n book_no = details['book_no']\n if UsersModel().get_user_by_admission(admission_no):\n response = LibraryModel(admission_no,\n title,\n author,\n book_no).save()\n return Serializer.serialize(response, 201, \"Book added successfully\")\n return raise_error(404, \"Student not found\")", "def add(self, path, title, author):\n path = path.decode('utf8')\n title = title.decode('utf8')\n author = author.decode('utf8')\n cursor = self._dbcon.cursor()\n filename = os.path.basename(path)\n dirname = os.path.dirname(path)\n t = (title, author, filename, dirname)\n sql = u\"insert into books values (?, ?, ?, ?)\"\n cursor.execute(sql, t)\n self._dbcon.commit()\n cursor.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates a book from the book database
def update(self, id, title, author, year, isbn): self.cursor.execute("UPDATE Book SET Title = ?, Author = ?, Year = ?, \ ISBN = ? WHERE Id = ?", (title, author, year, isbn, id)) self.connection.commit()
[ "def updateBook():\n id = request.form.get(\"id\")\n addBookForm = AddEditBookForm(request.form)\n newISBN = addBookForm.book_ISBN.data\n newTitle = addBookForm.book_title.data\n newAuthor = addBookForm.book_author.data\n requests.put(API_IP + \"book/\"+id,\n json={\"ISBN\": newISBN,\n \"Title\": newTitle,\n \"Author\": newAuthor})\n return redirect(\"/booklist\")", "def put(self, book_id):\n a_book = query_book_by_id(book_id)\n if a_book is None:\n return 'Book does not exit', 404\n body = request.get_json()\n a_book.parse_body(body)\n db.session.add(a_book)\n db.session.commit()\n return a_book.serialize(), 200", "def update_audiobook(_id, _title_of_the_audiobook, _author_of_the_title, _narrator,\r\n _duration_in_number_of_seconds):\r\n audiobook_to_update = Audiobook.query.filter_by(id=_id).first()\r\n audiobook_to_update.title_of_the_audiobook = _title_of_the_audiobook\r\n audiobook_to_update.author_of_the_title = _author_of_the_title\r\n audiobook_to_update.narrator = _narrator\r\n audiobook_to_update.duration_in_number_of_seconds = _duration_in_number_of_seconds\r\n db.session.commit()", "def update(self):\n self.view_box.delete(0,END)\n database.update(self.selected_tuple[0], \\\n self.title_value.get(), \\\n self.author_value.get(), \\\n self.year_value.get(), \\\n self.isbn_value.get())\n self.view_box.insert(END,(self.title_value.get(), \\\n self.author_value.get(), \\\n self.year_value.get(), \\\n self.isbn_value.get()))\n messagebox.showinfo(\"Update Successfully\", \"The book was updated successfully\")", "def editBookByID(id: int, title: str, yearOfPublication: str, genre: str, authorID: int):\n if not id:\n abort(400)\n book = Book.query.get(id)\n if not book:\n abort(404, \"Book is not found\")\n if title:\n book.title = title\n if yearOfPublication:\n book.yearOfPublication = yearOfPublication\n if genre:\n book.genre = genre\n if authorID:\n book.authorID = authorID\n db.session.commit()\n app.logger.info(f\"The book {id} has been edited\")", "def update(self, book_info, destroy):\n self.connect()\n is_issue = len(book_info) == 2\n\n bid = book_info[0].get()\n if is_issue:\n issue_to = book_info[1].get()\n\n if is_issue:\n extract_bid = f\"select bid from {self.book_table}\"\n else:\n extract_bid = f\"select bid from {self.issued_table}\"\n\n status = False\n try:\n self.cur.execute(extract_bid)\n self.con.commit()\n for i in self.cur:\n self.all_bid.append(i[0])\n\n if bid in self.all_bid:\n check_avail = f\"select status from {self.book_table} where \" \\\n f\"bid = '{bid}'\"\n self.cur.execute(check_avail)\n self.con.commit()\n check = None\n for i in self.cur:\n check = i[0]\n\n if (is_issue and check == 'avail'\n or not is_issue and check == 'issued'):\n status = True\n else:\n status = False\n else:\n messagebox.showinfo(\"Error\", \"Book ID not present\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't fetch Book IDs\")\n print(err)\n\n if is_issue:\n issue_sql = f\"insert into {self.issued_table} values ('{bid}',\" \\\n f\"'{issue_to}')\"\n up_status = f\"update {self.book_table} set status = 'issued' \" \\\n f\"where bid = '{bid}'\"\n else:\n issue_sql = f\"delete from {self.issued_table} where bid = '{bid}'\"\n up_status = f\"update {self.book_table} set status = 'avail' \" \\\n f\"where bid = '{bid}'\"\n\n try:\n if bid in self.all_bid and status:\n self.cur.execute(issue_sql)\n self.con.commit()\n self.cur.execute(up_status)\n self.con.commit()\n if is_issue:\n msg = \"Book Issued Successfully\"\n else:\n msg = \"Book Returned Successfully\"\n state = 'Success'\n else:\n if is_issue:\n msg = \"Book Already Issued\"\n else:\n msg = \"Please check the book ID\"\n state = \"Message\"\n messagebox.showinfo(state, msg)\n except MySQLError as err:\n messagebox.showinfo(\n \"Search Error\", \"The value entered is wrong, Try again\"\n )\n print(err)\n self.all_bid.clear()\n destroy()", "def update_by_id(cls, id, name, author_id):\n\t\tbook = Book.query.get(id)\n\t\tbook.name = name\n\t\tbook.authors_id = author_id\n\t\tdb.session.commit()", "def edit_book(id):\n flash('Thank you, Thumbs Up Added!')\n the_book = NewBook.query.get_or_404(id)\n the_book.google_id = request.form[\"id\"]\n the_book.title = request.form[\"title\"]\n the_book.author = request.form[\"author\"]\n the_book.thumbs_up = request.form[\"thumbs_up\"]\n the_book.isbn = request.form[\"isbn\"]\n\n db.session.add(the_book)\n db.session.commit()\n\n return redirect(f'/book_details/{the_book.google_id}')", "def test_api_can_update_book(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\n\t\t# update book\n\t\tbook = self.client.put(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(\n\t\t\t\tdict(\n\t\t\t\t\ttitle='updated book'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['title'] == 'updated book')", "def test_update_book_details(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n first_book_list.add_book(first_book)\n\n new_book_details = {\n \"title\": \"First Man\",\n \"author\": \"James Hansen\",\n \"year\": 2018,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 5\n }\n\n assert first_book_list.update_book_details(new_book_details) == True\n assert first_book_list.find_book(\"First Man\") == True\n\n for book in first_book_list.show_all():\n assert book.get(\"title\") == \"First Man\"\n assert book.set(\"title\", \"First Man: The Life of Neil A. Armstrong\") == True\n\n assert first_book_list.find_book(\"First Man: The Life of Neil A. Armstrong\") == True", "def update_price_books(self, barcode, new_price):\n try:\n self.db.cursor.execute('UPDATE books SET price = %s where id_books = %s', (round(new_price, 2), barcode))\n except Exception as error:\n print(error)\n else:\n self.db.con.commit()\n self.db.con.close()\n print('Updated Successfully!')", "def update_book():\n try:\n key = list(request.args.keys())[0]\n val = request.args[key].strip('\"')\n data = request.get_json()\n filter = {key: val}\n except IndexError:\n queryVal = request.form.to_dict()\n filter_val, change_to_val = parse_filter_newValue(queryVal)\n filter = {filter_val[0]: filter_val[1]}\n data = {change_to_val[0]: change_to_val[1]}\n if all(value == '' for value in data.values()) or all(value == '' for value in filter.values()):\n print('here tho')\n return render_template('error.html', message=\"Please enter both fields\"), 400\n new_values = {\"$set\": data}\n mongo.db.Books.update_one(filter, new_values, upsert=False)\n\n return render_template(\"updated_book.html\", message=\"Book Has been updated\"), 200\n # return jsonify({'result': \"Successfully Updated\"}), 200", "def edit_book(book_id):\n the_book = mongo.db.book.find_one({\"_id\": ObjectId(book_id)})\n return render_template(\"editbook.html\", book=the_book)", "def update_command(self):\n self.database.update(selected_tuple[0], self.title.get(), self.author.get(),\n self.year.get(), self.isbn.get())", "def update_command():\n AppbookstoredbBACKEND.update_data(selected_tuple[0],title_text.get(),author_text.get(),year_text.get(),isbn_text.get())", "def add_book(self, book):\n\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('INSERT INTO books values (?, ?, ?)', (book.title, book.author, book.read))\n book.id = cur.lastrowid\n except sqlite3.IntegrityError:\n raise BookError('This book is already in the database')\n except sqlite3.Error as e:\n raise BookError(f'Error adding book {book}') from e", "def test_partly_update_book(self):\n data = {'isbn':'96712116-2'}\n response = self.client.patch(self.book.get_absolute_url(), data, format='json', content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n response = self.client.get(self.book.get_absolute_url())\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, '96712116-2')", "def edit_book(book_id):\n book = mongo.db.books.find_one({\"_id\": ObjectId(book_id)})\n # if edit_book form submitted get information from form\n if request.method == \"POST\":\n # if admin get title from form otherwise from the book document\n if admin():\n title = request.form.get(\"title\").lower()\n else:\n title = mongo.db.books.find_one(\n {\"_id\": ObjectId(book_id)})[\"title\"]\n # update document with information from form, display message and\n # redirect to library\n is_series = \"Yes\" if request.form.get(\"series\") else \"No\"\n update = {\n \"title\": title,\n \"author\": request.form.get(\"author\").lower(),\n \"synopsis\": request.form.get(\"synopsis\"),\n \"series\": is_series,\n \"series_name\": request.form.get(\"series_name\").lower(),\n \"genre\": request.form.get(\"genre\"),\n \"cover_image\": request.form.get(\"cover_image\"),\n \"rating\": int(request.form.get(\"rating\")),\n \"review\": request.form.get(\"review\"),\n }\n mongo.db.books.update({\"_id\": ObjectId(book_id)}, {\"$set\": update})\n flash(f\"Thankyou {title.title()} has been updated\")\n return redirect(url_for(\"library\"))\n # Check user is logged in before using edit_book template and pass\n # in genre, book and admin variables\n elif \"user\" in session:\n if admin():\n return render_template(\n \"edit-book.html\", genres=genres(), book=book, admin=admin)\n elif session[\"user\"] == book[\"added_by\"]:\n return render_template(\n \"edit-book.html\", genres=genres(), book=book)\n else:\n flash(\"You can't edit that\")\n return redirect(url_for(\"library\"))\n else:\n flash(\"You need to be logged in to do that!\")\n return redirect(url_for(\"sign_in\"))", "def add_to_database(self):\r\n url = root_url + \"/book/b/\"\r\n barcode = self.add_barcode()\r\n book_title = input(\"Please enter book title: \")\r\n book_author = input(\"Please enter book author: \")\r\n book_published = input(\"Please enter book publish date: \")\r\n send_data = {\"Title\":book_title,\"Author\":book_author,\"PublishedDate\":book_published,\"BarcodeData\":barcode}\r\n requests.put(url,data=json.dumps(send_data),headers={'Content-Type': 'application/json'})\r\n print(\"Successfully added this book!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a UI from a SQL table and return a ``Panel``
def create_tables(name, role, doc, options, connection): if role == 'data_samples': print(f'create data_samples={name}') data_table = TabsDynamicData(doc, options, connection, name, role, creator_fn=process_data_samples) panel = Panel(child=data_table.get_ui(), title=name, name=f'data_samples_{name}_main_panel') elif role == 'data_tabular': print(f'create data_tabular={name}') data_table = TabsDynamicData(doc, options, connection, name, role, creator_fn=process_data_tabular) panel = data_table.get_ui() elif role == 'data_graph': print(f'create data_graph={name}') data_table = TabsDynamicData(doc, options, connection, name, role, creator_fn=process_data_graph) panel = data_table.get_ui() else: raise NotImplementedError(f'role not implemented={role}') assert isinstance(panel, Panel), f'unexpected type={type(panel)}' # handle the table preamble here metadata_name = get_metadata_name(name) metadata = get_table_data(connection, metadata_name) preamble = metadata.get('table_preamble') if preamble is not None and len(preamble[0]) > 0: # we have a preamble for our table. Add a ``Div`` widget to y_axis the preamble child = column(Div(text=preamble[0]), panel.child, sizing_mode='stretch_both') panel.update(child=child) print(f'table={name} created!') return panel
[ "def makeTableWidget(self):\n from collective.table.browser.table import TableWidget\n context = self.portal.table\n widget = TableWidget(context, None)\n widget.fieldName = 'table'\n return widget", "def create_layout(datatable, table_df):\n return html.Div(\n id='database-table-container',\n children=[dcc.Dropdown(\n id='type-dropdown',\n options=[{'label': i, 'value': i} for i in table_df.type.unique() if i],\n multi=True,\n placeholder='Filter commands by type'\n ),\n datatable,\n html.Div(id='callback-container'),\n html.Div(id='container-button-basic',\n children=[\n html.Div(id='save-status')\n ]),\n ])", "def create_panel(self):\n return\n # return Panel(self)", "def create_widget(self):\n self.widget = QTableView(self.parent_widget())\n self.widget.setAttribute(Qt.WA_StaticContents, True)", "def setUpContainer(self):\n return DynamicTable(name='electrodes', description='a placeholder table')", "def setUpContainer(self):\n return DynamicTable(name='trials', description='a placeholder table') # this will get ignored", "def create_table(self) -> NoReturn:\n # If table don't exist, Create.\n if not self.table_exists():\n # Create a table with the appropriate Columns\n db.Table(self._WIDGET_DB_TABLE_NAME, db.MetaData(bind=db.engine),\n db.Column('id', db.Integer, primary_key=True),\n db.Column('user_id', db.Integer, nullable=False),\n db.Column('data', JSON, nullable=False),\n db.Column('layout_id', db.Integer,\n db.ForeignKey('layouts.id')),\n db.relationship('Layouts',\n backref=db.backref('layouts', lazy=True)),\n schema=None).create()", "def __init__(self, rowStyles=None, containerIndex=1, **kwargs):\n \n if rowStyles is None:\n rowStyles = self.DEFAULT_ROW_STYLENAMES\n\n if kwargs.has_key('Element'):\n self.table = kwargs.pop('Element')\n fc = DOM.getFirstChild(self.table)\n if fc:\n self.tbody = fc\n else:\n self.tbody = DOM.createTBody()\n DOM.appendChild(self.table, self.tbody)\n else:\n # Add a tbody\n self.table = DOM.createTable()\n self.tbody = DOM.createTBody()\n DOM.appendChild(self.table, self.tbody)\n DOM.setAttribute(self.table, \"cellSpacing\", \"0\")\n DOM.setAttribute(self.table, \"cellPadding\", \"0\")\n\n if not kwargs.has_key('StyleName'): kwargs['StyleName']=self.DEFAULT_STYLENAME\n SimplePanel.__init__(self, self.table, **kwargs)\n\n # Add each row\n for i in range(len(rowStyles)): \n row = self.createTR(rowStyles[i])\n DOM.appendChild(self.tbody, row)\n if i == containerIndex:\n self.containerElem = DOM.getFirstChild(DOM.getChild(row, 1))", "def create_table():\n # print(schdl)\n # quedarse solo con un dia!!\n # hacer la traspuesta\n\n # Añadir a la tabla de botones\n\n schdl = aulas.get_aula()\n\n keyboard = []\n for aula in schdl:\n # print(aula)\n row = []\n for h in aula:\n row.append(InlineKeyboardButton(h, callback_data='1'))\n # print(h)\n print(\"====\")\n keyboard.append(row)\n\n return InlineKeyboardMarkup(keyboard)", "def generate_panel(self):\r\n \r\n self.PanelData = self.RawData.filter(['ID', 'X', 'Z', 'W', 'R', 'β', 'LFP', 'H'], axis=1)", "def __init__(self, window, database):\r\n super().__init__()\r\n self.window = window\r\n self.mainLayout = QGridLayout()\r\n # Creating the Widgets\r\n self.createLabels(database)\r\n self.createLineWidgets()\r\n self.createButtons()\r\n # Adding the Table widget in function of the database name\r\n self.addTable(database)\r\n # Connecting the buttons with their function\r\n self.buttons[\"REFRESH TABLE\"].clicked.connect(\r\n partial(self.addTable, database))\r\n self.buttons[\"SAVE\"].clicked.connect(\r\n partial(self.addItem, database))\r\n self.buttons[\"DELETE\"].clicked.connect(\r\n partial(self.deleteRow, database))\r\n self.buttons[\"ADD NULL DAY\"].clicked.connect(\r\n partial(self.addingNull, database))", "def getDataPanel(self, onlyactive = False):\n trdatadict = {tr.getSettings():tr.data for tr in self.getTestRuns(onlyactive)}\n return pd.Panel(trdatadict)", "def setUpContainer(self):\n return DynamicTable(name='units', description='a placeholder table')", "def build_ui(self):\n\n pass", "def create_widgets(self):", "def select_table(self):\n\n selected = self.mylist.selection_get()\n data = self.read_table(selected)\n db_frame = self.db_frame\n\n db_frame.pack(side=\"left\", fill=\"both\")\n col_names = tuple((\"heading%d\" % i for i in range(len(data[0]))))\n if not self.Tree:\n self.Tree = Treeview(db_frame, columns=col_names)\n else:\n self.Tree.destroy()\n self.scrollbarY.destroy()\n self.scrollbarX.destroy()\n self.Tree = Treeview(db_frame, columns=col_names)\n self.scrollbarY = Scrollbar(db_frame)\n self.scrollbarX = Scrollbar(db_frame, orient=HORIZONTAL)\n self.Tree.config(yscrollcommand=self.scrollbarY.set,\n xscrollcommand=self.scrollbarX.set)\n\n for x in data:\n self.Tree.insert('', 'end', values=x)\n for col in col_names:\n self.Tree.heading(col, text=col)\n self.scrollbarY.config(command=self.Tree.yview)\n self.scrollbarY.pack(side='right', fill=Y)\n self.scrollbarX.config(command=self.Tree.xview)\n self.scrollbarX.pack(side='bottom', fill=X)\n self.Tree.pack(side='left', fill='both')", "def get_widget(id):\n query = \"select * from widget where id=?\"\n db = get_db()\n db.execute(query, (id))\n rows = db.fetchall()\n\n if len(rows) != 1:\n raise (\"unexpected number of results\")\n\n id, name, parts, created, updated = rows[0]\n w = Widget()\n w.id = id\n w.name = name\n w.parts = parts\n w.created = created\n w.updated = updated\n return w", "def _make_sliders_and_tables(self, df):\n if not len(df):\n raise ValueError('DataFrame must be at least one entry long.')\n\n self.evt_sel_slid = pn.widgets.IntSlider(value=0,\n start=0,\n end=len(df))\n self._make_tables(df)\n\n # Define callbacks for tables:\n self.evt_sel_slid.param.watch(lambda event: table_callback(self.time_table,\n self.df_event_time,\n event, True),\n 'value')\n self.evt_sel_slid.param.watch(lambda event: table_callback(self.prop_table,\n self.df_event_properties,\n event),\n 'value')\n self.evt_sel_slid.param.watch(lambda event: table_callback(self.pos_table,\n self.df_event_position,\n event),\n 'value')\n\n # Now make title and also define callback:\n title = self._make_title(self.evt_sel_slid.value)\n self.title_panel = pn.panel(title, sizing_mode='scale_width')\n\n def title_callback(event):\n self.title_panel.object = self._make_title(event.new)\n\n self.evt_sel_slid.param.watch(title_callback, 'value')", "def TableFieldWidget(field, request):\n return widget.FieldWidget(field, TableWidget(request))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the next update from the queue. If no update is found, block the process until one is received. If a stop signal is sent, try to gracefully stop the thread.
def __receive_next_update(self) -> telegram.Update: # Pop data from the queue try: data = self.queue.get(timeout=self.cfg.telegram["conversation_timeout"]) except queuem.Empty: # If the conversation times out, gracefully stop the thread self.__graceful_stop(StopSignal("timeout")) # Check if the data is a stop signal instance if isinstance(data, StopSignal): # Gracefully stop the process log.debug("Waiting for a specific message...") self.__graceful_stop(data) # Return the received update return data
[ "def receive_next_update(self) -> telegram.Update:\n # Pop data from the queue\n data = \"\"\n try:\n data = self.queue.get(timeout=self.cfg.telegram[\"conversation_timeout\"])\n except queuem.Empty:\n # If the conversation times out, gracefully stop the thread\n self.__graceful_stop(StopSignal(\"timeout\"))\n # Check if the data is a stop signal instance\n if isinstance(data, StopSignal):\n # Gracefully stop the process\n log.debug(\"Waiting for a specific message...\")\n self.__graceful_stop(data)\n # Return the received update\n return data", "def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block = True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.is_running():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None", "def get_updates(self):\n if update_queue:\n return update_queue.pop()", "def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block=True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.isAlive():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None", "def _thread_worker(self):\n while self._running:\n # Retrieve next cmd, or block\n packet = self._queue.get(True)\n if isinstance(packet, dict) and QS_CMD in packet:\n try:\n self._callback_listen(packet)\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.error(\"Exception in callback\\nType: %s: %s\",\n type(err), err)\n self._queue.task_done()", "def next(self):\n while True: # waiting\n item = self.get_next_if_any()\n if item is not None: # feature: value None is filtered out\n return item\n\n if self.nomore: # if nothing else is coming\n break # stop waiting\n\n time.sleep(0.1) # wait before checking again\n\n raise StopIteration() # tell next worker nothing else is coming", "def read_updates(self):\n while True:\n try:\n # Wait for an update on the event queue.\n _log.debug(\"Reading from event queue\")\n update = self._event_queue.get(block=True)\n event_type, resource_type, resource = update\n self._event_queue.task_done()\n\n # We've recieved an update - process it.\n _log.debug(\"Read event: %s, %s, %s\",\n event_type,\n resource_type,\n json.dumps(resource, indent=2))\n self._process_update(event_type,\n resource_type,\n resource)\n except KeyError:\n # We'll hit this if we fail to parse an invalid update.\n _log.exception(\"Invalid update: %s\", update)", "def run(self):\n while True:\n # Check to see if we should stop\n if self._stop.isSet():\n logger.debug(\"Worker thread stopping.\")\n break\n\n # Try to pull from the queue\n try:\n func, args, kwargs = self.queue.get_nowait()\n func(*args, **kwargs)\n except Queue.Empty:\n time.sleep(5)\n continue\n except Exception as e:\n logger.exception(e)", "def _process_event_queue(self):\n while not self._event_queue.empty():\n event = self._event_queue.get()\n self._process_event(event)\n return None", "def wait_for_update(self):\n original_time = self._read('/_updated')\n new_time = original_time\n\n while new_time == original_time:\n try:\n new_time = self._read('/_updated', wait=True)\n except etcd.EtcdWatchTimedOut:\n new_time = self._read('/_updated')\n time.sleep(10)", "def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return", "def updater(response_q, storage):\n while True:\n resp = response_q.get()\n if resp == 'stop':\n break\n storage.update(resp)", "async def get_next(self) -> Probe:\n schedule: Optional[Schedule] = None\n while schedule is None:\n try:\n # Try to get the earliest scheduled probe\n schedule = self.queue[0]\n except IndexError:\n # If there is none, wait for a change\n async with self.queue_changed:\n await self.queue_changed.wait()\n else:\n # Wait until it's time to run the scheduled probe\n with trio.move_on_at(schedule.next_time):\n # However, if the queue changes before it's time to run,\n # we forget the selected schedule to re-elect a new one.\n async with self.queue_changed:\n await self.queue_changed.wait()\n schedule = None\n # Just before running it, check if it's not actually removed\n if schedule is not None and schedule.removed:\n heapq.heappop(self.queue)\n schedule = None\n # Immediately reschedule the next run of the selected probe\n schedule.advance()\n heapq.heapreplace(self.queue, schedule)\n # Then let the caller actually run the elected probe\n return schedule.probe", "def _get_next_minibatch(self):\n return self._blob_queue.get()", "def run(self):\n while self._update_func():\n self.update_signal.emit(None)", "def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None", "def _queue_thread(self):\n while self.running:\n try:\n msg = self.q.get(True, max(self.blocktime / 1000, 1))\n self.busy = True\n self.send(msg)\n self.update()\n except Empty:\n self.busy = False\n pass\n\n # Prune the events list of dead events\n self.events_lock.acquire()\n self.events = filter(lambda t: t.is_alive(), self.events)\n self.events_lock.release()", "def get_next_submission(self):\n try:\n return self.submission_list.get(block=True, timeout=1)\n except queue.Empty:\n return None", "def __suggestion_thread_func(self):\n last_typed_text = None\n text = None\n self.__stop_suggestion_thread = False\n\n try:\n while not self.__stop_suggestion_thread:\n # Act only on valid text.\n if not text:\n try:\n # Blocking wait for first update request\n text = self._update_queue.get(block=True)\n finally:\n self._update_queue.task_done()\n # text == None is issued only on thread stop request in order to wakeup the initial\n # blocking queue.get().\n if text is None:\n continue\n if text != last_typed_text:\n # If text differs, get the suggestions from webservice\n suggestions = self.__suggest(text)\n last_typed_text = text\n if suggestions:\n self.__last_text_for_suggestions = text\n # If we got any suggestions, update the command parameter suggestions\n self.caller.setParameterSuggestions(suggestions)\n text = None\n # Wait for any additional requests to accumulate before updating\n # Loop for 100ms and collect the most recently typed text\n started = datetime.datetime.now()\n elapsed_ms = 0\n while elapsed_ms < self.polling_interval:\n time_to_wait = self.polling_interval - elapsed_ms\n if time_to_wait > 0:\n with suppress(Empty):\n text = self._update_queue.get(block=True, timeout=time_to_wait / 1000)\n self._update_queue.task_done()\n tdiff = datetime.datetime.now() - started\n elapsed_ms = (tdiff.days * 24 * 60 * 60 + tdiff.seconds) * 1000 + tdiff.microseconds / 1000.0\n else:\n pass\n finally:\n self.__suggestion_thread = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the next update from the queue. If no update is found, block the process until one is received. If a stop signal is sent, try to gracefully stop the thread.
def receive_next_update(self) -> telegram.Update: # Pop data from the queue data = "" try: data = self.queue.get(timeout=self.cfg.telegram["conversation_timeout"]) except queuem.Empty: # If the conversation times out, gracefully stop the thread self.__graceful_stop(StopSignal("timeout")) # Check if the data is a stop signal instance if isinstance(data, StopSignal): # Gracefully stop the process log.debug("Waiting for a specific message...") self.__graceful_stop(data) # Return the received update return data
[ "def __receive_next_update(self) -> telegram.Update:\n # Pop data from the queue\n try:\n data = self.queue.get(timeout=self.cfg.telegram[\"conversation_timeout\"])\n except queuem.Empty:\n # If the conversation times out, gracefully stop the thread\n self.__graceful_stop(StopSignal(\"timeout\"))\n # Check if the data is a stop signal instance\n if isinstance(data, StopSignal):\n # Gracefully stop the process\n log.debug(\"Waiting for a specific message...\")\n self.__graceful_stop(data)\n # Return the received update\n return data", "def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block = True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.is_running():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None", "def get_updates(self):\n if update_queue:\n return update_queue.pop()", "def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block=True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.isAlive():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None", "def _thread_worker(self):\n while self._running:\n # Retrieve next cmd, or block\n packet = self._queue.get(True)\n if isinstance(packet, dict) and QS_CMD in packet:\n try:\n self._callback_listen(packet)\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.error(\"Exception in callback\\nType: %s: %s\",\n type(err), err)\n self._queue.task_done()", "def next(self):\n while True: # waiting\n item = self.get_next_if_any()\n if item is not None: # feature: value None is filtered out\n return item\n\n if self.nomore: # if nothing else is coming\n break # stop waiting\n\n time.sleep(0.1) # wait before checking again\n\n raise StopIteration() # tell next worker nothing else is coming", "def read_updates(self):\n while True:\n try:\n # Wait for an update on the event queue.\n _log.debug(\"Reading from event queue\")\n update = self._event_queue.get(block=True)\n event_type, resource_type, resource = update\n self._event_queue.task_done()\n\n # We've recieved an update - process it.\n _log.debug(\"Read event: %s, %s, %s\",\n event_type,\n resource_type,\n json.dumps(resource, indent=2))\n self._process_update(event_type,\n resource_type,\n resource)\n except KeyError:\n # We'll hit this if we fail to parse an invalid update.\n _log.exception(\"Invalid update: %s\", update)", "def run(self):\n while True:\n # Check to see if we should stop\n if self._stop.isSet():\n logger.debug(\"Worker thread stopping.\")\n break\n\n # Try to pull from the queue\n try:\n func, args, kwargs = self.queue.get_nowait()\n func(*args, **kwargs)\n except Queue.Empty:\n time.sleep(5)\n continue\n except Exception as e:\n logger.exception(e)", "def _process_event_queue(self):\n while not self._event_queue.empty():\n event = self._event_queue.get()\n self._process_event(event)\n return None", "def wait_for_update(self):\n original_time = self._read('/_updated')\n new_time = original_time\n\n while new_time == original_time:\n try:\n new_time = self._read('/_updated', wait=True)\n except etcd.EtcdWatchTimedOut:\n new_time = self._read('/_updated')\n time.sleep(10)", "def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return", "def updater(response_q, storage):\n while True:\n resp = response_q.get()\n if resp == 'stop':\n break\n storage.update(resp)", "async def get_next(self) -> Probe:\n schedule: Optional[Schedule] = None\n while schedule is None:\n try:\n # Try to get the earliest scheduled probe\n schedule = self.queue[0]\n except IndexError:\n # If there is none, wait for a change\n async with self.queue_changed:\n await self.queue_changed.wait()\n else:\n # Wait until it's time to run the scheduled probe\n with trio.move_on_at(schedule.next_time):\n # However, if the queue changes before it's time to run,\n # we forget the selected schedule to re-elect a new one.\n async with self.queue_changed:\n await self.queue_changed.wait()\n schedule = None\n # Just before running it, check if it's not actually removed\n if schedule is not None and schedule.removed:\n heapq.heappop(self.queue)\n schedule = None\n # Immediately reschedule the next run of the selected probe\n schedule.advance()\n heapq.heapreplace(self.queue, schedule)\n # Then let the caller actually run the elected probe\n return schedule.probe", "def _get_next_minibatch(self):\n return self._blob_queue.get()", "def run(self):\n while self._update_func():\n self.update_signal.emit(None)", "def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None", "def _queue_thread(self):\n while self.running:\n try:\n msg = self.q.get(True, max(self.blocktime / 1000, 1))\n self.busy = True\n self.send(msg)\n self.update()\n except Empty:\n self.busy = False\n pass\n\n # Prune the events list of dead events\n self.events_lock.acquire()\n self.events = filter(lambda t: t.is_alive(), self.events)\n self.events_lock.release()", "def get_next_submission(self):\n try:\n return self.submission_list.get(block=True, timeout=1)\n except queue.Empty:\n return None", "def __suggestion_thread_func(self):\n last_typed_text = None\n text = None\n self.__stop_suggestion_thread = False\n\n try:\n while not self.__stop_suggestion_thread:\n # Act only on valid text.\n if not text:\n try:\n # Blocking wait for first update request\n text = self._update_queue.get(block=True)\n finally:\n self._update_queue.task_done()\n # text == None is issued only on thread stop request in order to wakeup the initial\n # blocking queue.get().\n if text is None:\n continue\n if text != last_typed_text:\n # If text differs, get the suggestions from webservice\n suggestions = self.__suggest(text)\n last_typed_text = text\n if suggestions:\n self.__last_text_for_suggestions = text\n # If we got any suggestions, update the command parameter suggestions\n self.caller.setParameterSuggestions(suggestions)\n text = None\n # Wait for any additional requests to accumulate before updating\n # Loop for 100ms and collect the most recently typed text\n started = datetime.datetime.now()\n elapsed_ms = 0\n while elapsed_ms < self.polling_interval:\n time_to_wait = self.polling_interval - elapsed_ms\n if time_to_wait > 0:\n with suppress(Empty):\n text = self._update_queue.get(block=True, timeout=time_to_wait / 1000)\n self._update_queue.task_done()\n tdiff = datetime.datetime.now() - started\n elapsed_ms = (tdiff.days * 24 * 60 * 60 + tdiff.seconds) * 1000 + tdiff.microseconds / 1000.0\n else:\n pass\n finally:\n self.__suggestion_thread = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Continue getting updates until the regex finds a match in a message, then return the first capture group.
def __wait_for_regex(self, regex: str, cancellable: bool = False) -> Union[str, CancelSignal]: log.debug("Waiting for a regex...") while True: # Get the next update update = self.__receive_next_update() # If a CancelSignal is received... if isinstance(update, CancelSignal): # And the wait is cancellable... if cancellable: # Return the CancelSignal return update else: # Ignore the signal continue # Ensure the update contains a message if update.message is None: continue # Ensure the message contains text if update.message.text is None: continue # Try to match the regex with the received message match = re.search(regex, update.message.text) # Ensure there is a match if match is None: continue # Return the first capture group return match.group(1)
[ "def _recvregex(self, regex, exact=False, group=None, **kwargs):\n\n if isinstance(regex, (str, unicode)):\n regex = re.compile(regex)\n\n if exact:\n pred = regex.match\n else:\n pred = regex.search\n\n data = self.recvpred(pred, **kwargs)\n if group is None:\n return data\n match = pred(data)\n if hasattr(group, '__iter__'):\n return match.group(*group)\n return match.group(group)", "def _recvline_regex(self, regex, exact=False, group=None, **kwargs):\n\n if isinstance(regex, (str, unicode)):\n regex = re.compile(regex)\n\n if exact:\n pred = regex.match\n else:\n pred = regex.search\n\n data = self.recvline_pred(pred, **kwargs)\n if group is None:\n return data\n match = pred(data)\n if hasattr(group, '__iter__'):\n return match.group(*group)\n return match.group(group)", "def getMatch(reMatch,group=0):\n if reMatch: return reMatch.group(group)\n else: return ''", "def _consume(self, pattern):\n if self.is_finished:\n raise StopIteration()\n found = re.match(pattern, self.text[self.pos:])\n if found is None:\n return None\n self.pos += found.end()\n return found.group()", "def message_matches(cls, msg, regex):\n m = regex.match(msg.text)\n if m:\n return m.groups()\n return None", "def read_until_re(self, r, flags=0, timeout=None):\n match = self.read_cond(lambda x: re.search(r, x.buf, flags=flags), timeout)\n self.buf = self.buf[match.end():]\n return match if len(match.groups()) > 1 else match.group(len(match.groups()))", "def _search_regex(pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):\n if isinstance(pattern, str):\n mobj = re.search(pattern, string, flags)\n else:\n for p in pattern:\n mobj = re.search(p, string, flags)\n if mobj:\n break\n\n _name = name\n\n if mobj:\n if group is None:\n # return the first matching group\n return next(g for g in mobj.groups() if g is not None)\n else:\n return mobj.group(group)\n elif default is not NO_DEFAULT:\n return default\n elif fatal:\n print('[-] Unable to extract %s' % _name)\n else:\n print('[-] unable to extract %s' % _name)\n return None", "def read_until_regex(self, regex):\n with self.reading:\n while True:\n data = self.read_buffer.slice()\n match = regex.search(data)\n if match:\n break\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n do_return((self.read_buffer.dequeue(match.end()), match))", "def GetRegexGroupMatches(pattern, text, groupNum):\n try:\n result = []\n mi = re.finditer(pattern, text, re.MULTILINE)\n for matchnum, match in enumerate(mi):\n # regex group 1 contains the connection remote address\n result.append(match.group(groupNum))\n return result\n except :\n return None", "async def readuntil_re(self, regex, start=0):\n self.logger.debug(\"readuntil_re: %s\", regex)\n\n try:\n match = await self.wait_for(lambda data: regex.search(data, start))\n\n m_beg, m_end = match.span()\n # We are matching against the data stored stored in bytebuffer\n # The bytebuffer is manipulated in place. After we read the data\n # the buffer may get overwritten. The match object seems to be\n # directly referring the data in bytebuffer. This causes a problem\n # when we try to find the matched groups in match object.\n #\n # In [38]: data = bytearray(b\"localhost login:\")\n #\n # In [39]: rex = re.compile(b'(?P<login>.*((?<!Last ).ogin|.sername):)|(?P<passwd>\\n.*assword:)|(?P<prompt>\\n.*[%#>])|(?P<ignore>( to cli \\\\])|(who is on this device.\\\\]\\r\\n)|(Press R\n # ...: ETURN to get started\\r\\n))\\\\s*$')\n #\n # In [40]: m = rex.search(data)\n #\n # In [41]: m.groupdict()\n # Out[41]: {'ignore': None, 'login': b'localhost login:', 'passwd': None, 'prompt': None}\n #\n # In [42]: data[:]=b'overwrite'\n #\n # In [43]: m.groupdict()\n # Out[43]: {'ignore': None, 'login': b'overwrite', 'passwd': None, 'prompt': None}\n #\n groupdict = match.groupdict()\n rdata = await self.read(m_end)\n data = rdata[:m_beg] # Data before the regex match\n matched = rdata[m_beg:m_end] # portion that matched regex\n except AssertionError:\n if self._eof:\n # We are at the EOF. Read the whole buffer and send it back\n data = await self.read(len(self._buffer))\n matched = b\"\"\n match = None\n groupdict = None\n else:\n # re-raise the exception\n raise\n\n return ResponseMatch(data, matched, groupdict, match)", "def last_match_and_replace(self, txt, pat):\n m = None\n for m in pat.finditer(txt):\n pass\n\n if m:\n marker = self.findmarker(txt)\n txt = pat.sub(marker, txt)\n return (txt, m.groupdict())\n else:\n return (txt, None)", "def search_within_stream(input_stream, pattern, default=None):\n pattern_object = re.compile(pattern)\n for line in input_stream:\n match = pattern_object.search(line)\n if match:\n return match.group('return')\n return default", "def find_matches_to_message(\n self, message: str\n ) -> Tuple[Optional[str], Optional[Module]]:\n processed_message = message.lower()\n for _, module in self.modules.get_modules():\n if not module.is_loaded:\n continue\n for func_name, reg_list in module.module_settings.templates.items():\n for reg in reg_list:\n find_match = re.findall(reg, processed_message)\n if find_match:\n return (func_name, module)\n\n return (None, None)", "def parse_message(self, it, line):\n match = self.message_line_re.match(line)\n if match is None:\n return None, next(it)\n\n file_path = os.path.normpath(\n os.path.join(os.path.dirname(self.analyzer_result),\n match.group('path')))\n\n message = Message(\n file_path,\n int(match.group('line')),\n 0,\n match.group('message').strip(),\n match.group('checker').strip())\n\n try:\n return message, next(it)\n except StopIteration:\n return message, ''", "def skip_until_re(self, r, flags=0, timeout=None):\n match = self.read_cond(lambda x: re.search(r, x.buf, flags=flags), timeout)\n self.buf = self.buf[match.start():]\n return match if len(match.groups()) > 1 else match.group(len(match.groups()))", "def getMatch(self, text, pattern):\n \n return re.search(pattern,text,re.MULTILINE + re.DOTALL)", "def regex_groups(regex, line, on_error):\n if match := re.search(regex, line):\n return match.groups()\n else:\n if on_error == 'ignore':\n return match # None will be returned when re.search fails.\n else: # on_error == 'raise'\n raise ValueError(f'Regex failed to match: {line}')", "def process_regex(_data):\n _tmp = {}\n if _data is not None and len(_data.groups()) > 0:\n for _key in (\"head\", \"func\", \"file\", \"line\", \"tail\"):\n try:\n _val = _data.group(_key)\n if _val:\n _tmp[_key] = _val\n except Exception:\n pass\n return _tmp if _tmp else None", "def group(self, group_num):\n if self.rematch is None:\n return None\n else:\n return self.rematch.group(group_num)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Continue getting updates until a precheckoutquery is received. The payload is checked by the core before forwarding the message.
def __wait_for_precheckoutquery(self, cancellable: bool = False) -> Union[telegram.PreCheckoutQuery, CancelSignal]: log.debug("Waiting for a PreCheckoutQuery...") while True: # Get the next update update = self.__receive_next_update() # If a CancelSignal is received... if isinstance(update, CancelSignal): # And the wait is cancellable... if cancellable: # Return the CancelSignal return update else: # Ignore the signal continue # Ensure the update contains a precheckoutquery if update.pre_checkout_query is None: continue # Return the precheckoutquery return update.pre_checkout_query
[ "async def answer_pre_checkout_query(\n self,\n pre_checkout_query_id: int,\n error_message: str,\n *,\n request_id: str = None,\n request_timeout: int = None,\n skip_validation: bool = False\n ) -> Ok:\n _constructor = AnswerPreCheckoutQuery.construct if skip_validation else AnswerPreCheckoutQuery\n\n return await self.client.request(\n _constructor(\n pre_checkout_query_id=pre_checkout_query_id,\n error_message=error_message,\n ),\n request_id=request_id,\n request_timeout=request_timeout,\n )", "async def answer_pre_checkout_query(self, pre_checkout_query_id: base.String, ok: base.Boolean,\n error_message: typing.Union[base.String, None] = None) -> base.Boolean:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.ANSWER_PRE_CHECKOUT_QUERY, payload)\n\n return result", "def run(self):\n while True:\n self.check_inventory_change()\n self.confirm_new_orders()\n sleep(settings.CHECK_BLOCKCHAIN_EVERY)", "def test_contract_pre_exploit(self):\n print(\"PRE EXPLOIT TEST RUNNING...\")\n self.receiverContract.executeFlashLoan(10, {\"from\": self.some_user})", "def check_in(self):\n etree = self._encapsulate_request(self._generate_ping())\n self.zmq_scheduler_request_queue.put_nowait(etree)", "def preQuery(self):\n self.request_url = self.url\n pass", "def m_req_Update(self, sender, e):\r\n if e.Instrument != None and e.Error == None:\r\n # Instrument was found\r\n print(\"Found: {0}\".format(e.Instrument.Name))\r\n # Subscribe for Inside Market Data\r\n self.m_ps = ttapi.PriceSubscription(e.Instrument, ttapi.Dispatcher.Current)\r\n self.m_ps.Settings = ttapi.PriceSubscriptionSettings(ttapi.PriceSubscriptionType.InsideMarket)\r\n self.m_ps.FieldsUpdated += self.m_ps_FieldsUpdated\r\n self.m_ps.Start()\r\n elif e.IsFinal:\r\n # Instrument was not found and TT API has given up looking for it\r\n print(\"Cannot find instrument: {0}\".format(e.Error.Message))\r\n self.Dispose()", "def stock_processor(id, price, title, remaining, totalPackCount, preorder, start, proxy, headers):\n\n r = request_pack_stock(proxy, headers)\n packs = r['data']['searchPackListings']['data']['searchSummary']['data']['data']\n\n for pack in packs:\n item = [pack['id'], pack['title'], pack['price'], pack['remaining'], pack['totalPackCount'], pack['preorder']]\n #print(f'\\n\\nITEM:{item}\\n\\n')\n if pack['remaining'] == remaining: #change back to !=\n # Checks if it already exists in our instock\n if checker(item):\n pass\n else:\n # Add to instock dict\n INSTOCK.append(item)\n print(f'\\n\\nINSTOCK:{INSTOCK}\\n\\n')\n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n print(item)\n discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if checker(item):\n INSTOCK.remove(item)", "def awaiting_payment(self):", "def pre_step(self) -> 'outputs.ExecStepResponse':\n return pulumi.get(self, \"pre_step\")", "def pre_poll(self):\n pass", "def acknowledge_prepayment(self):\n self.acknowledge_payment()", "def preloop(self):\r\n\r\n if self.reader is None:\r\n print('\\nError: Unable to connect to card reader\\n')\r\n sys.exit()\r\n\r\n self.postcmd(None, None)", "def update_order_webhook(self):\n res, instance, odoo_webhook = self.get_basic_info(\n route=\"shopify_odoo_webhook_for_orders_partially_updated\")\n # When the webhook is not active then it will skip the process.\n if not instance.active or not odoo_webhook.state == 'active':\n _logger.info(\n \"The method is skipped. It appears the instance:{0} is not active or that the \"\n \"webhook{1} is not active.\"\n \"\".format(instance.name, odoo_webhook.webhook_name))\n return\n _logger.info(\"UPDATE ORDER WEBHOOK call for order: {0}\".format(res.get('name')))\n\n if request.env[\"sale.order\"].sudo().search_read([\n (\"shopify_instance_id\", \"=\", instance.id),\n (\"shopify_order_id\", \"=\", res.get(\"id\")),\n (\"shopify_order_number\", \"=\", res.get(\"order_number\"))], [\"id\"]):\n request.env[\"sale.order\"].sudo().process_shopify_order_via_webhook(res, instance, True)\n elif not res.get(\"fulfillment_status\"):\n res.update({'fulfillment_status': 'unshipped'})\n if res.get(\"fulfillment_status\") in instance.import_shopify_order_status_ids.mapped(\n \"status\"):\n request.env[\"sale.order\"].sudo().process_shopify_order_via_webhook(res, instance)\n return", "def step(self):\n self.fetch()\n self.execute()", "def continue_checking(self):\n pass", "def process_reenable_request(self, config_settings, core_state_content):\n self.logger.log(\"This is the same request as the previous patch operation. Checking previous request's status\")\n if core_state_content.__getattribute__(self.core_state_fields.completed).lower() == 'false':\n running_process_ids = self.process_handler.identify_running_processes(core_state_content.__getattribute__(self.core_state_fields.process_ids))\n if len(running_process_ids) == 0:\n self.logger.log(\"Re-triggering the patch operation as the previous patch operation was not running and hadn't marked completion either.\")\n self.utility.delete_file(self.core_state_handler.dir_path, self.core_state_handler.file)\n self.launch_new_process(config_settings, create_status_output_file=False)\n else:\n self.logger.log(\"Patch operation is in progress from the previous request. [Operation={0}]\".format(config_settings.__getattribute__(self.config_public_settings.operation)))\n exit(Constants.ExitCode.Okay)\n\n else:\n self.logger.log(\"Patch operation already completed in the previous request. [Operation={0}]\".format(config_settings.__getattribute__(self.config_public_settings.operation)))\n exit(Constants.ExitCode.Okay)", "def handleMsgs(self):\n\n force_sheep_check = self.changed_last_step\n self.changed_last_step = False\n if not self.queue:\n return\n\n need_to_check = False\n for msg in self.popMsg(): # Receive message(s) from queue.\n if msg.type == Type.BLOCK:\n new_tx = msg.content\n if new_tx.hash in self.seen_tx:\n continue\n need_to_check = True\n self.changed_last_step = True\n self.handleNewTx(new_tx, msg.sender)\n elif msg.type == Type.REQUEST: # Requests are issued by other miners.\n target_hash = msg.content\n assert target_hash in self.seen_tx # I should never get a request for a tx I haven't seen.\n requestedTx = self.seen_tx[target_hash]\n self.sendMsg(msg.sender, Message(self.id, Type.BLOCK, requestedTx))\n if need_to_check or (self.hasSheep() and force_sheep_check): # Have to check every time if has sheep.\n self.checkAllTx()", "def ask_for_state_update(self):\n command_tuple = self.command_parser.get_command_tuple(\"common\", \"Common\", \"AllStates\")\n return self.drone_connection.send_noparam_command_packet_ack(command_tuple)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Continue getting updates until a successfulpayment is received.
def __wait_for_successfulpayment(self, cancellable: bool = False) -> Union[telegram.SuccessfulPayment, CancelSignal]: log.debug("Waiting for a SuccessfulPayment...") while True: # Get the next update update = self.__receive_next_update() # If a CancelSignal is received... if isinstance(update, CancelSignal): # And the wait is cancellable... if cancellable: # Return the CancelSignal return update else: # Ignore the signal continue # Ensure the update contains a message if update.message is None: continue # Ensure the message is a successfulpayment if update.message.successful_payment is None: continue # Return the successfulpayment return update.message.successful_payment
[ "def awaiting_payment(self):", "def do_payment(self, *args):\n print('do_payment function called')\n\n # scans a qr-code and returns either the qr-code or False\n qrcode = qr.QrCode.scan()\n\n # takes the qr-code and checks if a lightning invoice is in it\n # if this is the case the invoice variable from shared_values is set\n shared_values.SharedValues.INVOICE = lnd_rest.LndRest.evaluate_scan(qrcode)\n\n # creates a counter for the while loop\n self.counter = 0\n\n # if no invoice was presented do it again\n while shared_values.SharedValues.INVOICE is False:\n print(f'Round: {self.counter + 1}')\n\n # same as above\n qrcode = qr.QrCode.scan()\n\n # same as above\n shared_values.SharedValues.INVOICE = lnd_rest.LndRest.evaluate_scan(qrcode)\n\n # increases the counter\n self.counter += 1\n\n # if the counter reaches the number 3 (the process was called 3 times), the while loop ends\n if self.counter == 3:\n self.label_main.text = f\"Maybe next time\"\n\n # stops all processes for 2 seconds\n time.sleep(2)\n break\n\n # if a valid qr-code was presented and a invoice was detected the handling invoice method is called\n if self.counter < 3:\n\n # if the handling invoice method was successful the value True is presented and the process ends\n if lnd_rest.LndRest.handle_invoice():\n\n # updates main label\n self.label_main.text = \"Thank you!\"\n\n # the handling invoice method presented the False value\n else:\n # updates main label\n self.label_main.text = \"Error during decoding\"\n\n # initiates clean up of all important parameters\n # a scheduler was chosen to see the 'thank you' displayed\n Clock.schedule_once(self.soft_reset, 10)\n\n # if no qr-code was presented and scanning fails go back to MainPage\n else:\n # initiates clean up of all important parameters\n self.soft_reset()", "def _onSuccess(self, controller):\r\n if controller.order.paid_in_full:\r\n controller.cart.empty()\r\n for item in controller.order.orderitem_set.all():\r\n if item.product.is_subscription:\r\n item.completed = True\r\n item.save()\r\n try:\r\n curr_status = controller.order.orderstatus_set.latest() \r\n except OrderStatus.DoesNotExist:\r\n curr_status = None\r\n \r\n if (curr_status is None) or (curr_status.notes and curr_status.status == \"New\"):\r\n controller.order.add_status(status='New', notes = \"Order successfully submitted\")\r\n else:\r\n # otherwise just update and save\r\n if not curr_status.notes:\r\n curr_status.notes = _(\"Order successfully submitted\")\r\n curr_status.save() \r\n\r\n #Redirect to the success page\r\n url = controller.lookup_url('satchmo_checkout-success')\r\n return HttpResponseRedirect(url) \r\n\r\n else:\r\n log.debug('Order #%i not paid in full, sending to pay rest of balance', controller.order.id)\r\n #url = controller.order.get_balance_remaining_url()\r\n url = reverse('satchmo_balance_remaining')\r\n return HttpResponseRedirect(url)", "def test_update_payment(self):\n pass", "def wait_for_funds(self) -> None:\n pass", "async def process_payment_status(order_id, payment_id, calls=3):\n await asyncio.sleep(10)\n if await is_paid(payment_id):\n await db.orders.update({'order_id': order_id, 'status': 'PAID'})\n return True\n if calls:\n loop.create_task(process_payment_status(order_id, payment_id, calls - 1))\n else:\n return await db.process_order(order_id, True)", "def run_now(request):\n rp_id = request.POST.get('rp_id')\n rp = get_object_or_404(RecurringPayment, pk=rp_id)\n\n result_data = {}\n result_data['processed'] = 'false'\n result_data['reason'] = 'done'\n\n payment_profiles = PaymentProfile.objects.filter(\n customer_profile_id=rp.customer_profile_id,\n status=True,\n status_detail='active'\n ).order_by('-update_dt')\n if not payment_profiles:\n valid_cpp_ids, invalid_cpp_ids = rp.populate_payment_profile()\n #print valid_cpp_ids, invalid_cpp_ids\n\n if valid_cpp_ids:\n payment_profiles = PaymentProfile.objects.filter(\n customer_profile_id=valid_cpp_ids[0])\n\n if not payment_profiles:\n result_data['reason'] = 'not setup'\n else:\n if rp.status_detail == 'active':\n num_processed = run_a_recurring_payment(rp)\n if num_processed:\n result_data['processed'] = 'true'\n result_data['reason'] = 'processed'\n # get total_paid and balance for this rp\n result_data['total_paid'] = str(rp.total_paid)\n result_data['balance'] = str(rp.get_outstanding_balance())\n\n # get total amount received for all rps\n d = RecurringPaymentInvoice.objects.filter(\n invoice__balance=0,\n ).aggregate(total_amount_received=Sum('invoice__total'))\n result_data['total_amount_received'] = d['total_amount_received']\n if not result_data['total_amount_received']:\n result_data['total_amount_received'] = 0\n result_data['total_amount_received'] = tcurrency(result_data['total_amount_received'])\n\n return HttpResponse(simplejson.dumps(result_data))", "def _simulate_payment(self):\n post = self.ipayment_backend.get_hidden_context(self.order)\n post['advanced_strict_id_check'] = 0 # disabled for testing only\n # (see ipayment_Technik-Handbuch.pdf page 32)\n if settings.IPAYMENT['useSessionId']:\n post['ipayment_session_id'] = self.ipayment_backend.get_session_id(self.request, self.order)\n else:\n post.update(self.ipayment_backend.get_sessionless_context(self.request, self.order))\n post['trx_securityhash'] = self.ipayment_backend._calc_trx_security_hash(post)\n post.update({\n 'addr_name': 'John Doe',\n 'cc_number': '4012888888881881', # Visa test credit card number\n 'cc_checkcode': '123',\n 'cc_expdate_month': '12',\n 'cc_expdate_year': '2029',\n })\n ipayment_uri = '/merchant/%s/processor/2.0/' % settings.IPAYMENT['accountId']\n headers = {\n \"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\"\n }\n conn = httplib.HTTPSConnection('ipayment.de')\n conn.request(\"POST\", ipayment_uri, urllib.urlencode(post), headers)\n httpresp = conn.getresponse()\n self.assertEqual(httpresp.status, 302, 'Expected to be redirected back from IPayment')\n redir_url = urlparse.urlparse(httpresp.getheader('location'))\n query_params = urlparse.parse_qs(redir_url.query)\n redir_uri = redir_url.path + '?' + redir_url.query\n conn.close()\n self.assertEqual(query_params['ret_status'][0], 'SUCCESS', 'IPayment reported: ' + redir_uri)\n\n # IPayent redirected the customer onto 'redir_uri'. Continue to complete the order.\n response = self.client.get(redir_uri, follow=True)\n self.assertEqual(len(response.redirect_chain), 1, '')\n urlobj = urlparse.urlparse(response.redirect_chain[0][0])\n self.assertEqual(resolve(urlobj.path).url_name, 'thank_you_for_your_order')\n self.assertEqual(response.status_code, 200)\n order = Order.objects.get(pk=self.order.id)\n self.assertEqual(order.status, Order.COMPLETED)\n confirmation = Confirmation.objects.get(shopper_id=self.order.id)\n self.assertEqual(confirmation.ret_status, 'SUCCESS')", "def payment_success(request):\r\n\tsecret_key = settings.SELLER_KEY\r\n\tpid = request.GET['pid']\r\n\tref = request.GET['ref']\r\n\tresult = request.GET['result']\r\n\t# Retrieve the cheksum value and validate it\r\n\tchecksumstr = \"pid={}&ref={}&result={}&token={}\".format(pid, ref, result, secret_key)\r\n\tm = md5(checksumstr.encode(\"ascii\"))\r\n\tchecksum = m.hexdigest()\r\n\tmalformed = False\r\n\tprint(\"calculated: \" + checksum)\r\n\tprint(\"received: \" + request.GET['checksum'] )\r\n\tif (checksum == request.GET['checksum'] ):\r\n\t\ttransaction = Transaction.objects.get(pk=pid)\r\n\t\ttransaction.state = Transaction.CONFIRMED\r\n\t\ttransaction.reference = ref\r\n\t\tgame = Game.objects.get(id = transaction.game.id)\r\n\t\ttransaction.save()\r\n\t\tinc_purchase = game.purchase_number + 1\r\n\t\tgame.purchase_number = inc_purchase\r\n\t\tgame.save()\r\n\t\tprint(\"about to call success\")\r\n\t\treturn render(request, 'success.html', {'game': game, 'MEDIA_URL': settings.MEDIA_URL, 'malformed': malformed})\r\n\telse:\r\n\t\ttransaction = Transaction.objects.get(pk=pid)\r\n\t\ttransaction.delete()\r\n\t\tmalformed = True\r\n\t\treturn render(request, 'success.html', {\"malformed\": malformed})", "def handle_payment_intent_succeeded(self, event):\n intent = event.data.object\n pid = intent.id\n bag = intent.metadata.bag\n\n billing_details = intent.charges.data[0].billing_details\n grand_total = round(intent.charges.data[0].amount / 100, 2)\n\n order_exists = False\n attempt = 1\n while attempt <= 5:\n try:\n order = Order.objects.get(\n full_name__iexact=billing_details.name,\n email__iexact=billing_details.email,\n phone_number__iexact=billing_details.phone,\n street_address1__iexact=(\n billing_details.address.line1),\n street_address2__iexact=(\n billing_details.address.line2),\n town_or_city__iexact=billing_details.address.city,\n county__iexact=billing_details.address.state,\n country__iexact=billing_details.address.country,\n grand_total=grand_total,\n original_bag=bag,\n stripe_pid=pid,\n )\n order_exists = True\n break\n except Order.DoesNotExist:\n attempt += 1\n time.sleep(1)\n\n if order_exists:\n return HttpResponse(\n content=f'Webhook received: ({event[\"type\"]}'\n '| SUCCESS: Verified order already in database',\n status=200)\n else:\n order = None\n try:\n order = Order.objects.create(\n full_name=billing_details.name,\n email=billing_details.email,\n phone_number=billing_details.phone,\n street_address1=billing_details.address.line1,\n street_address2=billing_details.address.line2,\n town_or_city=billing_details.address.city,\n county=billing_details.state,\n country=billing_details.country,\n original_bag=bag,\n stripe_pid=pid,\n )\n for workshop_id, quantity in json.loads(bag).items():\n workshop = Workshop.objects.get(id=workshop_id)\n if isinstance(quantity, int):\n order_line_item = OrderLineItem(\n order=order,\n workshop=workshop,\n quantity=quantity,\n )\n order_line_item.save()\n except Exception as e:\n if order:\n order.delete()\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]} | ERROR: {e}',\n status=500)\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]}'\n '| SUCCESS: Created order in webhook',\n status=200)", "def request_renew_accommodation(self):\n self.write({'state':'waiting'})\n return True", "def update_payment(self):\r\n update_payment_to_db(self.__payment_id__, self.__camper_id__, self.__camp_id__, self.__payment_date__, self.__paid_amount__)", "def update(self):\n try:\n\n # 1 --> Get all the NewValidTransaction(s)\n new_valid_txns = self.frame.get(NewValidTransaction)\n\n for txn in new_valid_txns:\n\n # 2 --> Update the BankingRecord corresponding to Customer that initiated it\n self.atm.update_banking_record(txn, self.frame)\n\n # 3 --> Process the Transaction \n self.atm.process_transaction(txn, self.frame)\n\n # ~ Print based on a cycle count (optional functionality)\n self.atm.print_using_base(10, self.frame, Customer)\n #self.atm.print_using_base(10, self.frame, BankingRecord)\n\n except Exception:\n logger.exception(\"Error: \")", "def proceed_to_checkout_and_payment(self):\r\n # 1- summary\r\n logger.info('starting wizard with summary')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '.cart_navigation a.standard-checkout')))\r\n self.automation.driver.execute_script(\"document.querySelectorAll('.cart_navigation a.standard-checkout')[0]\"\r\n \".click()\")\r\n\r\n # 2-sign in & 3-address\r\n logger.info('2-sign in & 3-address')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, 'button[name=\"processAddress\"]')))\r\n\r\n self.automation.driver.find_element_by_css_selector('button[name=\"processAddress\"]').click()\r\n\r\n # 4- shipping\r\n logger.info('4- shipping')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#uniform-cgv span')))\r\n\r\n is_checked = self.automation.driver.find_element_by_css_selector('#uniform-cgv span').get_attribute('class')\r\n if not is_checked: # agree\r\n self.automation.driver.execute_script(\"document.querySelectorAll('#cgv')[0].click()\")\r\n\r\n self.automation.driver.find_element_by_css_selector('button[name=processCarrier]').click()\r\n logger.info('agree and confirmed')\r\n\r\n # pay by bank wire\r\n logger.info('pay by bank wire')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '.payment_module a')))\r\n\r\n self.automation.driver.find_element_by_css_selector('.payment_module a').click()\r\n\r\n # 5- payment and confirm\r\n logger.info('5- payment and confirm')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#cart_navigation button')))\r\n self.automation.driver.find_element_by_css_selector('#cart_navigation button').click()\r\n\r\n # back to orders\r\n logger.info('back to orders')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, 'p.cart_navigation .button-exclusive.btn')))\r\n self.automation.driver.find_element_by_css_selector('p.cart_navigation .button-exclusive.btn').click()\r\n\r\n # how many items do you have\r\n time.sleep(1.5)\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#order-list tbody tr')))\r\n items = self.automation.driver.find_elements_by_css_selector('#order-list tbody tr')\r\n logger.info(f'You have \"{len(items)}\" at your order')", "def __call__(self):\n adapted = IMollieIdealMultiplePayments(self.context)\n received_transaction_id = self.request.form.get('transaction_id')\n\n try:\n transaction = adapted.get_transaction(received_transaction_id)\n except UnknownTransactionError:\n transaction = None\n if (not received_transaction_id or transaction is None):\n message = 'Wrong or missing transaction ID'\n self.request.response.setStatus(403, message)\n return message\n\n adapted.get_payment_status(received_transaction_id)\n notify(MollieIdealPaymentEvent(self.context, self.request,\n received_transaction_id))\n self.request.response.setStatus(200)\n return 'OK'", "def acknowledge_prepayment(self):\n self.acknowledge_payment()", "def checkout_paid(request):\n\n if request.method == \"POST\":\n order_form = OrderForm(request.POST)\n payment_form = MakePaymentForm(request.POST)\n\n if payment_form.is_valid():\n card_error = False\n\n cart = request.session.get('cart', {})\n total = 0\n for id, quantity in cart.items():\n product = get_object_or_404(Product, pk=id)\n total += quantity * product.price\n\n try:\n stripe.Charge.create(\n amount=int(total * 100),\n currency=\"GBP\",\n description=request.user.email,\n card=payment_form.cleaned_data['stripe_id'],\n )\n except stripe.error.CardError:\n messages.error(request, \"Your card was declined!\")\n card_error = True\n except Exception:\n messages.error(request, \"Sorry, a payment error occurred so we couldn't take payment.\")\n card_error = True\n else:\n print(payment_form.errors)\n messages.error(request, \"We were unable to take a payment with that card!\")\n card_error = True\n\n if not card_error:\n if order_form.is_valid():\n order = order_form.save(commit=False)\n order.date = timezone.now()\n order.save()\n\n cart = request.session.get('cart', {})\n for id, quantity in cart.items():\n product = get_object_or_404(Product, pk=id)\n order_line_item = OrderLineItem(\n order=order,\n product=product,\n quantity=quantity\n )\n order_line_item.save()\n\n do_subscription_tasks(request, order)\n\n messages.error(request, \"You have successfully paid. Thank you for your order!\")\n\n # Empty the Cart now that the subscription has been successfully taken out.\n request.session['cart'] = {}\n return redirect(reverse('all_products'))\n else:\n print(order_form.errors)\n messages.error(request, \"Sorry, there is an unexpected technical problem with your order. \"\n \"Please contact us on 0000 1234567 so we can take your order manually.\")\n else:\n messages.error(request, \"Please check your card details below are correct. If you still cannot make a \"\n \"payment then contact us on 0000 1234567 so we can take your order manually.\")\n\n else:\n order_form = OrderForm()\n payment_form = MakePaymentForm()\n\n return render(request, 'checkout.html', {'order_form': order_form, 'payment_form': payment_form,\n 'publishable': settings.STRIPE_PUBLISHABLE})", "def notify_payment_success(self, **kwargs):\n return self.notify(\"notify_payment_success\", **kwargs)", "def run(self):\n while True:\n self.check_inventory_change()\n self.confirm_new_orders()\n sleep(settings.CHECK_BLOCKCHAIN_EVERY)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Continue getting updates until a photo is received, then return it.
def __wait_for_photo(self, cancellable: bool = False) -> Union[List[telegram.PhotoSize], CancelSignal]: log.debug("Waiting for a photo...") while True: # Get the next update update = self.__receive_next_update() # If a CancelSignal is received... if isinstance(update, CancelSignal): # And the wait is cancellable... if cancellable: # Return the CancelSignal return update else: # Ignore the signal continue # Ensure the update contains a message if update.message is None: continue # Ensure the message contains a photo if update.message.photo is None: continue # Return the photo array return update.message.photo
[ "def photo_info(self) -> List[protocol.PhotoInfo]:\n if not self._photo_info:\n self.logger.debug(\"Photo list was empty. Lazy-loading photo list now.\")\n result = self.load_photo_info()\n if isinstance(result, concurrent.futures.Future):\n result.result()\n return self._photo_info", "def fetch_photo(self, photo_flickr_id):\n self.log(2, \"Fetching Photo ID %s\" % photo_flickr_id)\n photo_result = self.flickr.photos_getInfo(photo_id = photo_flickr_id)\n return self._fetch_photo(photo_result)", "def take_photo(self):\n\n status = self.camera.status()\n if status['mode'] != 'still':\n # place camera in snapshot mode\n self.camera.command('mode', 'still')\n\n photo_successful = self.camera.command('record', 'on')\n\n if photo_successful:\n\n # sleep for two seconds so the camera can process\n # and serve the new photo via http\n\n retrieved = False\n while not retrieved:\n print(\"Waiting for image to be served.\")\n time.sleep(2)\n retrieved = self.get_photos_from_device()\n\n print(\"Image got served.\")\n return True\n\n else:\n return False", "def on_photo(self, update, context):\n user = update.effective_user\n photo_count = len(update.message.photo)\n log.info(\n \"PIC from %s, %s, @%s, #%i\",\n user.username,\n user.full_name,\n update.effective_chat.id,\n photo_count,\n )\n\n if context.user_data[\"state\"] != c.State.EXPECTING_RECEIPT:\n # Got an image from someone we weren't expecting to send any. We log this, and TODO decide what\n log.debug(\"Got image when I was not expecting one\")\n return\n\n # Process each photo\n for entry in update.message.photo:\n raw_image = entry.get_file().download_as_bytearray()\n\n # At this point the image is in the memory\n with NamedTemporaryFile(delete=False, prefix=str(update.effective_chat.id)) as pic:\n pic.write(raw_image)\n log.debug(\"Image written to %s\", pic.name)\n\n # Note: you can disable this line when testing locally, if you don't have an actual backend that will\n # serve this request\n self.backend.upload_shopping_receipt(raw_image, context.user_data[\"current_request\"])\n\n # if we got this far it means that we're ready to proceed to the exit survey and ask some additional questions\n # about this request\n self.send_exit_survey(update, context)\n context.user_data[\"state\"] = c.State.EXPECTING_EXIT_SURVEY", "def update_photo(self) -> Callable[\n [rpcmessages.UpdatePhotoRequest],\n resources.Photo]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if 'update_photo' not in self._stubs:\n self._stubs['update_photo'] = self.grpc_channel.unary_unary(\n '/google.streetview.publish.v1.StreetViewPublishService/UpdatePhoto',\n request_serializer=rpcmessages.UpdatePhotoRequest.serialize,\n response_deserializer=resources.Photo.deserialize,\n )\n return self._stubs['update_photo']", "def get_latest_image(self):\n self.log.debug('Got latest image request')\n assert self.is_running, 'Camera must be running'\n return self._image_data", "def get_latest_image(self):\n self._log_debug('Got latest image request')\n assert self.is_running, 'Camera must be running'\n return self._image_data", "def photo():\n\n def put():\n \"\"\"\n Immediately finish this request, no need for the client to wait for\n backend communication.\n \"\"\"\n\n if request.data:\n app._media_backend.show_picture(request.data)\n\n put()\n return ''", "def take_photo(update, context):\n user_str = user_data(update)\n logger.info(f\"[{user_str}] sent a photo\")\n\n if check_image_restrictions(update):\n image_filename = get_image(update.message.photo[-1], context, \"jpg\", user_str)\n caption = update.message.caption or \"\"\n pipe(image_filename, update, context, caption)", "def get_photo(self) -> Callable[\n [rpcmessages.GetPhotoRequest],\n resources.Photo]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if 'get_photo' not in self._stubs:\n self._stubs['get_photo'] = self.grpc_channel.unary_unary(\n '/google.streetview.publish.v1.StreetViewPublishService/GetPhoto',\n request_serializer=rpcmessages.GetPhotoRequest.serialize,\n response_deserializer=resources.Photo.deserialize,\n )\n return self._stubs['get_photo']", "def get_photo(self, photo_id):\n uri = 'photos/' + photo_id\n return self.make_request(uri)", "def get_info(self) -> ImageData:\n user_photo = self.open_photo(self.message)\n image = self.image_handler(self.user, user_photo)\n return image.get_image_info()", "def get_next_image(self, timeout=10):\n # self.log.debug('Got next image request')\n assert self.is_init, 'Camera must be initialised'\n if not self.is_running:\n self.log.debug('Camera was not running, start and grab the first image')\n self._got_image_event.clear()\n self.start()\n if not self._got_image_event.wait(timeout):\n raise TimeoutError('Getting image timed out')\n img = self._image_data\n self.stop()\n else:\n # self.log.debug('Camera running, grab the first image to show up')\n self._got_image_event.clear()\n if not self._got_image_event.wait(timeout):\n raise TimeoutError('Getting image timed out')\n img = self._image_data\n return img", "async def _async_update_media_image_hash(self):\n file = self._currentsong.get(\"file\")\n\n if file == self._media_image_file:\n return\n\n if (\n file is not None\n and (response := await self._async_get_file_image_response(file))\n is not None\n ):\n self._media_image_hash = hashlib.sha256(\n bytes(response[\"binary\"])\n ).hexdigest()[:16]\n else:\n # If there is no image, this hash has to be None, else the media player component\n # assumes there is an image and returns an error trying to load it and the\n # frontend media control card breaks.\n self._media_image_hash = None\n\n self._media_image_file = file", "def get_next_image(self, timeout=10):\n self._log_debug('Got next image request')\n assert self.is_init, 'Camera must be initialised'\n if not self.is_running:\n self._log_debug('Camera was not running, start and grab the first image')\n self._got_image_event.clear()\n self.start()\n if not self._got_image_event.wait(timeout):\n raise TimeoutError('Getting image timed out')\n img = self._image_data\n self.stop()\n else:\n self._log_debug('Camera running, grab the first image to show up')\n self._got_image_event.clear()\n if not self._got_image_event.wait(timeout):\n raise TimeoutError('Getting image timed out')\n img = self._image_data\n return img", "def __photo_handler(self, update, context):\n trigger = Constructor.PHOTO_TRIGGER\n self.__handler(context, update, trigger)", "def get_photo(self):\n if photo_source == \"S3\":\n self._retrieve_random_file_from_s3()\n\n # Pick a random photo from the local folder\n files = [\n f\n for f in os.listdir(self._file_folder)\n if f.endswith(\"jpg\") or f.endswith(\"jpeg\")\n ]\n photo = files[random.randint(0, len(files) - 1)]\n\n # TODO: raise exception when there're no photos in the folder\n self._photo = PhotoWithBenefits(\n photo_path=os.path.join(self._file_folder, photo),\n throwback_thursday=self.throwback_thursday,\n )\n return self._photo", "async def _request_and_handle_images(self) -> None:\n try:\n req = protocol.CameraFeedRequest()\n async for evt in self.grpc_interface.CameraFeed(req):\n # If the camera feed is disabled after stream is setup, exit the stream\n # (the camera feed on the robot is disabled internally on stream exit)\n if not self._enabled:\n self.logger.warning('Camera feed has been disabled. Enable the feed to start/continue receiving camera feed data')\n return\n self._unpack_image(evt)\n except CancelledError:\n self.logger.debug('Camera feed task was cancelled. This is expected during disconnection.')", "def grab_image(self):\n _, camera_image = self.camera.read()\n with self.lock:\n self.image = camera_image" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Continue getting updates until an inline keyboard callback is received, then return it.
def __wait_for_inlinekeyboard_callback(self, cancellable: bool = False) \ -> Union[telegram.CallbackQuery, CancelSignal]: log.debug("Waiting for a CallbackQuery...") while True: # Get the next update update = self.__receive_next_update() # If a CancelSignal is received... if isinstance(update, CancelSignal): # And the wait is cancellable... if cancellable: # Return the CancelSignal return update else: # Ignore the signal continue # Ensure the update is a CallbackQuery if update.callback_query is None: continue # Answer the callbackquery self.bot.answer_callback_query(update.callback_query.id) # Return the callbackquery return update.callback_query
[ "def keyboard_input(self) -> None:\n with Listener(on_press=self.press, on_release=self.release) as listener: # set keys to be read immediately\n listener.join()", "def get_input(self, timeout):\n while True:\n if self.led_callback != None:\n self.led_callback.update()\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n (now, _press, _release) = self.api.read_keypad()\n if ord(now) != 0:\n return (True, ord(now))\n timeout -= 1\n if timeout < 1:\n return (False, None)", "def whileKeyPressed(self, code, keys=None):", "def on_keyboard(self, event):", "def key_wait():\n while 1:\n for event in get():\n if event.type == 'KEYDOWN':\n return event\n if event.type == 'QUIT':\n # convert QUIT into alt+F4\n return KeyDown('F4', '', True, False, True, False, False)\n _time.sleep(.001)", "def keypress(self):\n k = self.__screen.getch()\n ret = None\n if k == curses.KEY_ENTER or (k < 256 and chr(k) == '\\n'):\n ret = self.__textPad.gather()\n self.__textWin.clear()\n else:\n self.__textPad.do_command(k)\n\n self.__update()\n return ret", "def handle_keyboard_data(data):\n pass", "def get_key():\n\tinput_key: str = \"\"\n\ttry:\n\t\twhile not False:\n\t\t\twith Raw(sys.stdin):\n\t\t\t\tif not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag\n\t\t\t\t\tcontinue\n\t\t\t\tinput_key += sys.stdin.read(1) #* Read 1 key safely with blocking on\n\t\t\t\tif input_key == \"\\033\": #* If first character is a escape sequence keep reading\n\t\t\t\t\twith Nonblocking(sys.stdin): #* Set non blocking to prevent read stall\n\t\t\t\t\t\tinput_key += sys.stdin.read(20)\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<\"):\n\t\t\t\t\t\t\t_ = sys.stdin.read(1000)\n\t\t\t\tprint(\"INPUT: \"+input_key.replace(\"\\033\",\"<ESC>\"))\n\t\t\t\tif input_key == \"\\033\" or input_key == \"q\": #* Key is \"escape\" key if only containing \\033\n\t\t\t\t\tbreak\n\t\t\t\telif input_key.startswith((\"\\033[<0;\", \"\\033[<35;\", \"\\033[<64;\", \"\\033[<65;\")): #* Detected mouse event\n\t\t\t\t\ttry:\n\t\t\t\t\t\tprint((int(input_key.split(\";\")[1]), int(input_key.split(\";\")[2].rstrip(\"mM\"))))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<35;\"):\n\t\t\t\t\t\t\tprint(\"mouse Move\") #* Detected mouse move in mouse direct mode\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<64;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll UP\") #* Detected mouse scroll up\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<65;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll DOWN\") #* Detected mouse scroll down\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<0;\") and input_key.endswith(\"m\"):\n\t\t\t\t\t\t\tprint(\"mouse Click Release\") #* Detected mouse click release\n\t\t\t\tinput_key = \"\"\n\texcept Exception as e:\n\t\tprint(f'EXCEPTION: Input thread failed with exception: {e}')", "def sleep_wait_for_input(self):\n while True:\n if self.led_callback != None:\n self.led_callback.update()\n time.sleep(.24)\n else:\n time.sleep(.25)\n (now, _press, _release) = self.api.read_keypad()\n if ord(now) != 0:\n return True", "def get_next_signal(self):\n keypress = None\n\n while not keypress:\n #While no keypress received\n self.do_polling()\n if self.stream:\n keypress = self.get_stream()[0]\n time.sleep(0.01)\n\n return keypress", "def run(self):\n while(True):\n time.sleep(1)\n new_input = self.voiceProc.stdout.readline() if not self.voiceProc.stdout.closed else None\n if new_input:\n new_input = self.match_input(new_input)\n if new_input:\n #print new_input\n wx.PostEvent(self._notify_window, VoiceInputEvent(new_input))\n else:\n print \"No input\"\n return", "def getkey(self):\n userinput = self.peekinput()\n if type(userinput) == str:\n return self.getinput()\n else:\n return self.doc.cancelkey", "def getkey():\n\tglobal _s\n\twhile True:\n\t\te = _s.wait_event()\n\t\tif e.type == pygame.KEYDOWN and len(e.dict[\"unicode\"]) > 0:\n\t\t\treturn e.dict[\"unicode\"]", "def keypress() :\n last = 0\n tty.setcbreak(sys.stdin)\n\n try :\n while True :\n code = ord(sys.stdin.read(1)) \n if (code== 53 and last==91 or code==68) : \n key = \"left\"\n elif (code==98 or code==66) :\n key = \"down\"\n elif (code==54 or code==67) :\n key = \"right\"\n elif (code==49 or code==65 or code==69) :\n key = \"up\"\n else :\n key = None\n last=code\n if not(key is None) :\n yield key\n\n finally :\n #turn echo back on \n fd = sys.stdin.fileno()\n old = termios.tcgetattr(fd)\n old[3] = old[3] | termios.ECHO\n termios.tcsetattr(fd, termios.TCSADRAIN, old)", "def get_anykey(self):\n return input('Press --> ENTER<-- to move forward.')", "def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1", "def wait_for_input():\n # Flag showing whether the input was found.\n found_input = False\n # Loop until receive a message.\n while not found_input:\n print(\"Waiting for an input.\")\n # Check for a new message from the redis queue.\n task = redis_connection.blpop([products_input_queue], 0)\n received_queue = task[0].decode(\"utf-8\")\n received_input = task[1].decode(\"utf-8\")\n print(f\"Received message from {received_queue}\")\n found_input = True\n return received_input", "def call_get_user_input_event(self):\n ...", "def wait_keydown(self):\n while True:\n self.clock.tick(self.fps)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n return\n if event.type == pygame.KEYDOWN:\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select an user from the ones in the database.
def __user_select(self) -> Union[db.User, CancelSignal]: log.debug("Waiting for a user selection...") # Find all the users in the database users = self.session.query(db.User).order_by(db.User.user_id).all() # Create a list containing all the keyboard button strings keyboard_buttons = [[self.loc.get("menu_all_cancel")]] # Add to the list all the users for user in users: keyboard_buttons.append([user.identifiable_str()]) # Create the keyboard keyboard = telegram.ReplyKeyboardMarkup(keyboard_buttons, one_time_keyboard=True) # Keep asking until a result is returned while True: # Send the keyboard self.bot.send_message(self.chat.id, self.loc.get("conversation_admin_select_user"), reply_markup=keyboard) # Wait for a reply reply = self.__wait_for_regex("user_([0-9]+)", cancellable=True) # Propagate CancelSignals if isinstance(reply, CancelSignal): return reply # Find the user in the database user = self.session.query(db.User).filter_by(user_id=int(reply)).one_or_none() # Ensure the user exists if not user: self.bot.send_message(self.chat.id, self.loc.get("error_user_does_not_exist")) continue return user
[ "def select_user(user_id):\n return session.query(User).filter(User.id == user_id).first()", "def get_user(identifier, value):\n user = get_db().execute(\n 'SELECT * FROM user WHERE {} = ?'.format(identifier),\n (value,)\n ).fetchone()\n\n return user", "def select_user(self):\n\t\tself.window.title('Select User')\n\t\tself._input_credentials()\n\t\tif self.status is True:\n\t\t\tif self._user_authorisation() is True:\n\t\t\t\t# Initialize contact management module and transfer control\n\t\t\t\ttablename = helper.scrub('contacts_' + self.username)\n\t\t\t\tcontact = contacts.ContactsManagement(self.window, self.frame, tablename)\n\t\t\telse:\n\t\t\t\ttk.messagebox.showerror(title='Failed to select user', message='The user does not exist!')\n\t\tself.draw_user_menu()", "def _select_user(self, **kwargs):\n def where(item):\n for k, v in kwargs.items():\n if item[k] != v:\n return False\n\n return True\n\n return random.choice(filter(where, self.accounts))", "def get_user_by_id(cur, id) -> str:\n cur.execute(f'''\n SELECT name FROM user WHERE id = {id} ''')\n return cur.fetchone()[0]", "def get_user(self, **kwargs):\n if not self.user_id:\n return (\n self.db_session.query(User)\n .filter(*[getattr(User, key) == value for key, value in kwargs.items()])\n .first()\n )\n\n return self.db_session.query(User).filter(User.id == self.user_id).first()", "def retrieve_user_from_db_by_username(username):\n # Query db for user with those params\n query = \"\"\"\n SELECT user_id, username, email, password FROM users\n WHERE users.username = '{}'\"\"\".format(username)\n\n return database.select_from_db(query)", "def retrieveUser(userName):\n \n all_reg_users = db.GqlQuery(\"SELECT * FROM RegisteredUsers ORDER BY created DESC\")\n\n if all_reg_users:\n for user in all_reg_users:\n if user.name == userName:\n return user\n return None", "def query_users_table_by_id(khoros_object, select_fields, user_id, first_item=False):\n if type(select_fields) == tuple or type(select_fields) == list or type(select_fields) == set:\n select_fields = ','.join(select_fields)\n liql_query = f\"select {select_fields} from users where id = '{user_id}'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n if first_item:\n api_response = api_response['data']['items'][0]\n return api_response", "def getUserByuID(self, uID):\n cursor = self.conn.cursor()\n query = \"SELECT ufirstname, ulastname, udescription, urole, uclassification, email, pin \" \\\n \"FROM Users natural inner join Credential \" \\\n \"WHERE uID= %s;\"\n cursor.execute(query, (uID,))\n result = cursor.fetchone()\n return result", "def get_user_data(cursor, user_name: str):\n query = \"\"\"\n SELECT * FROM users\n WHERE user_name = %(user_name)s\n \"\"\"\n cursor.execute(query, {'user_name': user_name})\n\n return cursor.fetchone()", "def get_user_by_id(id):\n\n\treturn User.query.get(id)", "def get_user_by_name(cls, username):\n a = db.GqlQuery(\"select * from Users where username=:1\", username)\n return a", "def search_user_by_id(self,id, cursor):\n sql = \"SELECT * FROM users WHERE userid = %s\"\n cursor.execute(sql, (id,))\n return cursor", "def query_users_table_by_id(self, select_fields, user_id):\n return objects_module.users.query_users_table_by_id(self.khoros_object, select_fields, user_id)", "def get_user_by_phone(self, phone):\n sql = 'select id ,first_name' \\\n ',last_name' \\\n ',password' \\\n ',phone ' \\\n 'from account_user ' \\\n 'where phone = %s'\n user = User.objects.raw(sql, [phone])[0];\n return user", "def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user", "def test_get_user_by_name(self):\n\n # Select on empty set\n selected = self.user_api.get_user_by_name(MAGEN_USER['username'])\n self.assertFalse(selected.success)\n self.assertIsNone(selected.documents)\n\n # Insert user\n inserted = self.user_api.insert_user(MAGEN_USER)\n self.assertTrue(inserted.success)\n\n # Select user by username\n selected = self.user_api.get_user_by_name(MAGEN_USER['username'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents, MAGEN_USER)", "def selectUserInfo(self, userID):\n try:\n self._select(USER_TABLE.table,\n self.__columns + (USER_TABLE.isAdmin, ),\n self.__idCondition, (userID, ))\n except Exception:\n print('failed to select user info')\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
User menu to order products from the shop.
def __order_menu(self): log.debug("Displaying __order_menu") # Get the products list from the db products = self.session.query(db.Product).filter_by(deleted=False).all() # Create a dict to be used as 'cart' # The key is the message id of the product list cart: Dict[List[db.Product, int]] = {} # Initialize the products list for product in products: # If the product is not for sale, don't display it if product.price is None: continue # Send the message without the keyboard to get the message id message = product.send_as_message(w=self, chat_id=self.chat.id) # Add the product to the cart cart[message['result']['message_id']] = [product, 0] # Create the inline keyboard to add the product to the cart inline_keyboard = telegram.InlineKeyboardMarkup( [[telegram.InlineKeyboardButton(self.loc.get("menu_add_to_cart"), callback_data="cart_add")]] ) # Edit the sent message and add the inline keyboard if product.image is None: self.bot.edit_message_text(chat_id=self.chat.id, message_id=message['result']['message_id'], text=product.text(w=self), reply_markup=inline_keyboard) else: self.bot.edit_message_caption(chat_id=self.chat.id, message_id=message['result']['message_id'], caption=product.text(w=self), reply_markup=inline_keyboard) # Create the keyboard with the cancel button inline_keyboard = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get("menu_all_cancel"), callback_data="cart_cancel")]]) # Send a message containing the button to cancel or pay final_msg = self.bot.send_message(self.chat.id, self.loc.get("conversation_cart_actions"), reply_markup=inline_keyboard) # Wait for user input while True: callback = self.__wait_for_inlinekeyboard_callback() # React to the user input # If the cancel button has been pressed... if callback.data == "cart_cancel": # Stop waiting for user input and go back to the previous menu return # If a Add to Cart button has been pressed... elif callback.data == "cart_add": # Get the selected product, ensuring it exists p = cart.get(callback.message.message_id) if p is None: continue product = p[0] # Add 1 copy to the cart cart[callback.message.message_id][1] += 1 # Create the product inline keyboard product_inline_keyboard = telegram.InlineKeyboardMarkup( [ [telegram.InlineKeyboardButton(self.loc.get("menu_add_to_cart"), callback_data="cart_add"), telegram.InlineKeyboardButton(self.loc.get("menu_remove_from_cart"), callback_data="cart_remove")] ]) # Create the final inline keyboard final_inline_keyboard = telegram.InlineKeyboardMarkup( [ [telegram.InlineKeyboardButton(self.loc.get("menu_all_cancel"), callback_data="cart_cancel")], [telegram.InlineKeyboardButton(self.loc.get("menu_done"), callback_data="cart_done")] ]) # Edit both the product and the final message if product.image is None: self.bot.edit_message_text(chat_id=self.chat.id, message_id=callback.message.message_id, text=product.text(w=self, cart_qty=cart[callback.message.message_id][1]), reply_markup=product_inline_keyboard) else: self.bot.edit_message_caption(chat_id=self.chat.id, message_id=callback.message.message_id, caption=product.text(w=self, cart_qty=cart[callback.message.message_id][1]), reply_markup=product_inline_keyboard) self.bot.edit_message_text( chat_id=self.chat.id, message_id=final_msg.message_id, text=self.loc.get("conversation_confirm_cart", product_list=self.__get_cart_summary(cart), total_cost=str(self.__get_cart_value(cart))), reply_markup=final_inline_keyboard) # If the Remove from cart button has been pressed... elif callback.data == "cart_remove": # Get the selected product, ensuring it exists p = cart.get(callback.message.message_id) if p is None: continue product = p[0] # Remove 1 copy from the cart if cart[callback.message.message_id][1] > 0: cart[callback.message.message_id][1] -= 1 else: continue # Create the product inline keyboard product_inline_list = [[telegram.InlineKeyboardButton(self.loc.get("menu_add_to_cart"), callback_data="cart_add")]] if cart[callback.message.message_id][1] > 0: product_inline_list[0].append(telegram.InlineKeyboardButton(self.loc.get("menu_remove_from_cart"), callback_data="cart_remove")) product_inline_keyboard = telegram.InlineKeyboardMarkup(product_inline_list) # Create the final inline keyboard final_inline_list = [[telegram.InlineKeyboardButton(self.loc.get("menu_all_cancel"), callback_data="cart_cancel")]] for product_id in cart: if cart[product_id][1] > 0: final_inline_list.append([telegram.InlineKeyboardButton(self.loc.get("menu_done"), callback_data="cart_done")]) break final_inline_keyboard = telegram.InlineKeyboardMarkup(final_inline_list) # Edit the product message if product.image is None: self.bot.edit_message_text(chat_id=self.chat.id, message_id=callback.message.message_id, text=product.text(w=self, cart_qty=cart[callback.message.message_id][1]), reply_markup=product_inline_keyboard) else: self.bot.edit_message_caption(chat_id=self.chat.id, message_id=callback.message.message_id, caption=product.text(w=self, cart_qty=cart[callback.message.message_id][1]), reply_markup=product_inline_keyboard) self.bot.edit_message_text( chat_id=self.chat.id, message_id=final_msg.message_id, text=self.loc.get("conversation_confirm_cart", product_list=self.__get_cart_summary(cart), total_cost=str(self.__get_cart_value(cart))), reply_markup=final_inline_keyboard) # If the done button has been pressed... elif callback.data == "cart_done": # End the loop break # Create an inline keyboard with a single skip button cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get("menu_skip"), callback_data="cmd_cancel")]]) # Ask if the user wants to add notes to the order self.bot.send_message(self.chat.id, self.loc.get("ask_order_notes"), reply_markup=cancel) # Wait for user input notes = self.__wait_for_regex(r"(.*)", cancellable=True) # Create a new Order order = db.Order(user=self.user, creation_date=datetime.datetime.now(), notes=notes if not isinstance(notes, CancelSignal) else "") # Add the record to the session and get an ID self.session.add(order) self.session.flush() # For each product added to the cart, create a new OrderItem for product in cart: # Create {quantity} new OrderItems for i in range(0, cart[product][1]): order_item = db.OrderItem(product=cart[product][0], order_id=order.order_id) self.session.add(order_item) # Ensure the user has enough credit to make the purchase credit_required = self.__get_cart_value(cart) - self.user.credit # Notify user in case of insufficient credit if credit_required > 0: self.bot.send_message(self.chat.id, self.loc.get("error_not_enough_credit")) # Suggest payment for missing credit value if configuration allows refill if self.cfg.ccard["credit_card_token"] != "" \ and self.cfg.appearance["refill_on_checkout"] \ and self.Price(self.cfg.ccard["min_amount"]) <= \ credit_required <= \ self.Price(self.cfg.ccard["max_amount"]): self.__make_payment(self.Price(credit_required)) # If afer requested payment credit is still insufficient (either payment failure or cancel) if self.user.credit < self.__get_cart_value(cart): # Rollback all the changes self.session.rollback() else: # User has credit and valid order, perform transaction now self.__order_transaction(order=order, value=-int(self.__get_cart_value(cart)))
[ "def __products_menu(self):\n log.debug(\"Displaying __products_menu\")\n # Get the products list from the db\n products = self.session.query(db.Product).filter_by(deleted=False).all()\n # Create a list of product names\n product_names = [product.name for product in products]\n # Insert at the start of the list the add product option, the remove product option and the Cancel option\n product_names.insert(0, self.loc.get(\"menu_all_cancel\"))\n product_names.insert(1, self.loc.get(\"menu_add_product\"))\n product_names.insert(2, self.loc.get(\"menu_delete_product\"))\n # Create a keyboard using the product names\n keyboard = [[telegram.KeyboardButton(product_name)] for product_name in product_names]\n # Send the previously created keyboard to the user (ensuring it can be clicked only 1 time)\n self.bot.send_message(self.chat.id, self.loc.get(\"conversation_admin_select_product\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait for a reply from the user\n selection = self.__wait_for_specific_message(product_names, cancellable=True)\n # If the user has selected the Cancel option...\n if isinstance(selection, CancelSignal):\n # Exit the menu\n return\n # If the user has selected the Add Product option...\n elif selection == self.loc.get(\"menu_add_product\"):\n # Open the add product menu\n self.__edit_product_menu()\n # If the user has selected the Remove Product option...\n elif selection == self.loc.get(\"menu_delete_product\"):\n # Open the delete product menu\n self.__delete_product_menu()\n # If the user has selected a product\n else:\n # Find the selected product\n product = self.session.query(db.Product).filter_by(name=selection, deleted=False).one()\n # Open the edit menu for that specific product\n self.__edit_product_menu(product=product)", "def open_products_page(catalog_menu):\n catalog_menu.open_products_page()", "def menu(request):\n cart = cartData(request)\n cart_items = cart['cart_items']\n # order = cart['order']\n # items = cart['items']\n # Get all our object\n products = BobaProduct.objects.all()\n # Dictionary to hold our products\n context = {\"products\": products, \"cart_items\": cart_items}\n return render(request, 'store/menu.html', context)", "def product_menu(request):\n\n brands = Brand.objects.all().order_by('brand')\n categories = Category.objects.all().order_by('name')\n\n context = {\n 'brands': brands,\n 'categories': categories\n\n }\n\n return context", "def products_menu():\n \n product_menu_options = \"\"\"\n Welcome to the products menu, your options are:\n 0) Return to main menu\n 1) Show current products list\n 2) Add a new item to the products list\n 3) Edit an existing item from the products list\n 4) Delete an item from the products list\n \"\"\"\n print(product_menu_options)", "def main_menu(self):\n action = ''\n while action != 'q':\n os.system('cls')\n self.header(\"Orders\")\n print(\"You can do the following: \")\n print(\"1. Rent a car\")\n print(\"2. Return car\")\n print(\"3. Current orders\")\n print(\"4. Completed orders\")\n print(\"5. Revoke order\")\n print(\"6. Edit order\")\n print(\"7. List order history of car\")\n print(\"8. List order history of customer\")\n print(\"\\n\"\"\\33[;31mPress q to go back \\33[;0m\")\n\n action = input(\"\\nChoose an option: \")\n if action == '1':\n self.rent_car()\n\n elif action == '2':\n self.return_car()\n\n elif action == '3':\n self.header(\"Current orders\")\n orders = self.__order_service.get_orders()\n if orders:\n self.__order_service.print_current_orders(orders)\n else:\n print(\"\\nNo orders\\n\")\n input(\"\\33[;32mPress enter to continue \\33[;0m\")\n\n elif action == '4':\n self.completed_orders()\n\n elif action == '5':\n self.revoke_order()\n\n elif action == '6':\n self.edit_current_order()\n\n elif action == \"7\":\n self.history_of_car()\n\n elif action == '8':\n self.get_order_history_of_customer()", "def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)", "def sellMenu(userid, args):\r\n buildSellMenu(userid)", "def setup_products(self, *args, **kwargs):", "def open_cart(self):\n self.__CART_BUTTON.click()", "def products():\n\n\treturn render_template(\"products.html\")", "def handle_order():\n\torder=input(\"> \").lower()\n\tif order != \"quit\":\n\t\tif order in available_menu:\n\t\t\t\n\t\t\torder_list.append(order)\n\t\t\tif order in order_list:\n\t\t\t\tcount=0\n\t\t\t\tfor i in order_list:\n\t\t\t\t\tif i==order:\n\t\t\t\t\t\tcount+=1\n\t\t\t\tprint(f\"** {count} order of {order} have been added to your meal **\")\n\t\t\t\thandle_order()\n\t\t\telse:\n\t\t\t\torder_list.append(order)\n\t\t\t\tprint(f\"** 1 order of {order} have been added to your meal **\")\n\t\t\t\thandle_order()\t\n\n\t\telse:\n\t\t\tprint(f\"please choose item form the menu or type quit to quit\")\n\t\t\thandle_order()\t\n\telse:\n\t\treturn", "def main():\n store = Store()\n store.user_menu()", "def click_procesos(self):\n self.button.click(menu_catalog.M06_PROCESOS)", "def make_purchase(self):\n choice = int(input('Enter the vinyl ID of what Vinyl you want to buy'))\n self.cart.append(choice)", "def test_ordering_product_on_swaglabs_and_getting_confirmation(browser):", "def browse_all_products(request):\n if login_required_if_login_only_mode(request):\n return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))\n\n return render(request, \"productdb/browse/view_products.html\", context={})", "def test_add_product_to_cart(self, driver):\n logging.info(\"Start test case: Continue Shop\")\n data = self.test_data[\"Continue Shop\"][\"Products\"][0]\n logging.info(f\"Test data: [{data}]\")\n product_name = data[\"Product Name\"]\n\n select_product(driver, data[\"Page\"], product_name)\n add_product_to_cart(driver, data[\"Size\"], data[\"Color\"], data[\"Quantity\"])\n assert is_product_in_cart(driver, product_name)\n continue_shopping_from_order_summary(driver)\n assert verify_current_page_is_home(driver)", "def add_to_basket(self):\n button_add_to_basket = self.browser.find_element(*ProductPageLocators.basket_btn_loc)\n button_add_to_basket.click()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the status of the sent orders.
def __order_status(self): log.debug("Displaying __order_status") # Find the latest orders orders = self.session.query(db.Order) \ .filter(db.Order.user == self.user) \ .order_by(db.Order.creation_date.desc()) \ .limit(20) \ .all() # Ensure there is at least one order to display if len(orders) == 0: self.bot.send_message(self.chat.id, self.loc.get("error_no_orders")) # Display the order status to the user for order in orders: self.bot.send_message(self.chat.id, order.text(w=self, session=self.session, user=True)) # TODO: maybe add a page displayer instead of showing the latest 5 orders
[ "def show_orders(self):\n\n data = cur.execute(\"\"\"SELECT * FROM orders\"\"\").fetchall()\n print(tabulate(data, headers=[\"Order ID\", \"Status\", \"Customer\", \"Address\", \"Delivery Method\"]))", "def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result", "def get_order_status(self, uuid):\n\t\torder = self.my_bittrex.get_order(uuid)[\"result\"]\n\t\treturn f'Order {order[\"OrderUuid\"]}\\n\\n{order[\"Exchange\"]}\\nType: {order[\"Type\"]}\\nQuantity: {order[\"Quantity\"]}\\nPrice: {order[\"Limit\"]}\\nBTC total: {order[\"Reserved\"]}\\n\\nOpen: {order[\"IsOpen\"]}'", "def export_order_status_button(cls, store_views):\n pass", "def printStatus(self):\n\n print(\"\\nSubscriptions: \")\n subs = {k: v for k, v in self.data.items() if \"subscription\" in v.keys()}\n if not subs: print(\"\\tNone\")\n else:\n for key, value in subs.items():\n print(\"\\tID: {}. Name: {}\".format(key, value[\"subscription\"][0]))\n\n print(\"Streaming Data:\")\n streams = {k: v for k, v in self.data.items() if \"price\" in v.keys()}\n if not streams: print(\"\\tNone\")\n else:\n for key, value in streams.items():\n print(\"\\t{}: {}\".format(value[\"subscription\"][0], value[\"price\"]))\n\n print(\"Positions:\")\n positions = self.logic.account.positions #pylint: disable=no-member\n if not positions: print(\"\\tNone\")\n else:\n for key, value in positions.items():\n print(\"\\t{}: #Contracts: {}\".format(key, value[1]))\n\n print(\"Orders:\")\n orders = self.logic.account.openOrders #pylint: disable=no-member\n if not orders: print(\"\\tNone\")\n else:\n for key, value in orders.items():\n print(\"\\tID {}: Contract: {}. Order: {}. State: {}\".format(\n key, value[0].localSymbol, value[1], value[2]))", "def order_status(self, obj):\n return obj.order.all()[0].status", "def print_status(self):\n\n packages = self.packages[\"Delivered\"]\n\n for packageID, package in packages.items():\n package.status = \"Not Delivered\"\n\n next_delivery_time = None\n while next_delivery_time != self.end_time:\n next_delivery_time = self.end_time\n for packageID, package in packages.items():\n if package.status == \"Not Delivered\" and package.delivery_time < next_delivery_time:\n next_delivery_time = package.delivery_time\n\n for packageID, package in packages.items():\n if package.delivery_time <= next_delivery_time:\n package.status = \"Delivered\"\n\n\n print(next_delivery_time)\n pprint([{packageID: package.status} for packageID, package in self.packages[\"Delivered\"].items()])\n print(\"\\n\")", "async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]", "def print_status(self):\n statuses = self.status_dict()\n for k, v in statuses.items():\n print(f'\\nLine: {k}\\nStatus: {v}\\n')", "def order_status(self, order_status):\n\n self._order_status = order_status", "def format_status(self) -> str:\n if not self.ready_to_trade:\n return \"Market connectors are not ready.\"\n lines = []\n\n balance_df = self.get_balance_df()\n lines.extend([\"\", \" Balances:\"] + [\" \" + line for line in balance_df.to_string(index=False).split(\"\\n\")])\n\n exchanges_df = self.exchanges_df()\n lines.extend([\"\", \" Exchanges:\"] + [\" \" + line for line in exchanges_df.to_string(index=False).split(\"\\n\")])\n\n try:\n orders_df = self.active_orders_df()\n lines.extend([\"\", \" Active Orders:\"] + [\" \" + line for line in orders_df.to_string(index=False).split(\"\\n\")])\n except ValueError:\n lines.extend([\"\", \" No active maker orders.\"])\n\n return \"\\n\".join(lines)", "def status_view(self):\n return self.post(action=\"status_trn\")", "def statuses(self):\n big = BigCommerceAPI()\n response = big.get('orderstatuses')\n return response.text", "def show_sent_message(sent_messages):\n print(\"\\nThese are the printed messages.\")\n for sent_message in sent_messages:\n print(sent_message)", "def format_status(self) -> str:\n if not self.ready_to_trade:\n return \"Market connectors are not ready.\"\n lines = []\n warning_lines = []\n warning_lines.extend(self.network_warning(self.get_market_trading_pair_tuples()))\n\n balance_df = self.get_balance_df()\n lines.extend([\"\", \" Balances:\"] + [\" \" + line for line in balance_df.to_string(index=False).split(\"\\n\")])\n\n try:\n df = self.active_orders_df()\n lines.extend([\"\", \" Orders:\"] + [\" \" + line for line in df.to_string(index=False).split(\"\\n\")])\n except ValueError:\n lines.extend([\"\", \" No active maker orders.\"])\n\n warning_lines.extend(self.balance_warning(self.get_market_trading_pair_tuples()))\n if len(warning_lines) > 0:\n lines.extend([\"\", \"*** WARNINGS ***\"] + warning_lines)\n return \"\\n\".join(lines)", "def ConfirmedTradeStatus():\n return 'FO Confirmed'", "def sendStatus(msg, arg=None):\n\t\tprint(\"STATUS\", msg, arg, sep='\\t')", "def test_order_status_list(self):\n pass", "def order_confirmation():\n print(\"order_confirmation()\")\n message = \"\"\n\n response = make_response(render_template(\"order_confirmation.html\", app_config=config.app, message=message))\n\n return response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send information about the bot.
def __bot_info(self): log.debug("Displaying __bot_info") self.bot.send_message(self.chat.id, self.loc.get("bot_info"))
[ "async def _botinfo(self, ctx):\n embed = discord.Embed(title=\"Bot Information\", color=discord.Color.green(),\n description=\"\")\n embed.add_field(name=\"Creation Date\",\n value=\"%s\" % discord.utils.snowflake_time(ctx.bot.user.id).strftime(\n \"%Y-%m-%d %H:%M:%S\"), inline=True)\n embed.add_field(name=\"Guilds\", value=\"%s\" % len(self.bot.guilds), inline=True)\n embed.add_field(name=\"Gods\", value=\"%s\" % str(database.getGodsGlobalCount()), inline=True)\n embed.add_field(name=\"Believers\", value=\"%s\" % str(database.getBelieversGlobalCount()), inline=True)\n embed.set_footer(text=\"%s\" % ctx.author.name,\n icon_url=ctx.author.avatar_url)\n await ctx.send(embed=embed)", "def send_info(self):\r\n pass", "async def info(self, ctx: commands.Context) -> None:\n\n # change permissions integer here if need be\n auth_url = 'https://discordapp.com/api/oauth2/authorize?client_id={}&permissions={}&scope=bot'.format(\n self.bot.user.id,\n 473300048\n )\n embed = c.crimbed(\n title='crimsoBOT info!',\n descr='crimsoBOT was born of boredom and is maintained from love.\\n',\n thumb_name='pfp',\n footer='Thanks for using crimsoBOT!'\n )\n embed.add_field(\n name=\"crimsoBOT's Discord server\",\n value='https://discord.gg/Kj3WNHX',\n inline=False\n )\n embed.add_field(\n name='Invite crimsoBOT to your server!',\n value=auth_url,\n inline=False\n )\n embed.add_field(\n name='Support crimsoBOT server time, get a sticker!',\n value='https://www.patreon.com/crimso',\n inline=False\n )\n embed.add_field(\n name='Buy stickers and more *a la carte*!',\n value='https://crimsobot-store.square.site/',\n inline=False\n )\n\n await ctx.send(embed=embed)", "async def botinfo(ctx):\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name='Bot Info', value = \"I'm made with the library Discord.py Async.\"\n \" I'm developed by Shutdown.py#2406. \"\n \"If you need any help with me, Join my [devs' server](https://discord.gg/X4CJdEM).\"\n \"Send feedback using the feedback command\")\n embed.add_field(name='Total Commands', value=(len(bot.commands)))\n embed.add_field(name = 'Invite Me!', value = '[Invite](https://discordbots.org/bot/399115688792424448)')\n embed.set_footer(text= \"{} | Requested by: {} at\".format(version, ctx.message.author))\n await bot.say(embed = embed)", "async def botinfo(ctx, bot: typing.Union[discord.Member, discord.User]):\n if not bot.bot:\n return await r(ctx, 'Not a bot.')\n\n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n \n e = discord.Embed(\n title=f'Available bot info for {bot}',\n color=0xfecdea,\n description=f\"**Short Bot Description:** (do `uwu desc [bot]` for big description)\\n\\n*{data['Small_desc']}*\"\n )\n\n if data[\"bot_status\"] == \"online\":\n status = '<:online:805576670353948702> Online'\n elif data[\"bot_status\"] == \"idle\":\n status = '<:idle:805855470778056725> Idle'\n elif data[\"bot_status\"] == \"offline\":\n status = '<:offline:805576352450871346> Offline'\n elif data[\"bot_status\"] == \"dnd\":\n status = '<:dnd:819964146317393990> Do Not Disturb'\n\n listed_at = datetime.datetime.strptime(data[\"list_date\"], '%Y-%m-%d')\n\n e.add_field(\n name='Owner:', value=f'**{data[\"owner_name\"]}**\\n({data[\"owner_id\"]})', inline=False)\n e.add_field(name='Tags:', value=', '.join(data[\"tops\"]))\n e.add_field(name='Vanity URL:', value=data[\"vanity_url\"]\n if data[\"vanity_url\"] != '' else 'No vanity URL set.', inline=False)\n e.add_field(name='Bot Status:', value=status)\n e.add_field(name='Invites:',\n value=f'[Bot Invite]({data[\"invite\"]})\\n[Bot Support Server](https://discord.gg/{data[\"discord\"]})', inline=False)\n e.add_field(name='Other Bot Info:', value=f'''\n **Prefix:** `{data[\"prefix\"]}`\n **Site:** {data[\"site\"] if data[\"site\"] != '' else \"No sites.\"}\n **Library:** {data[\"lib\"]}\n **Listed at:** {listed_at}\n **Server Count:** {data[\"servers\"] if data[\"servers\"] != 'None' else '*Not set up!*'}''', inline=False)\n e.set_thumbnail(url=f'https://cdn.discordapp.com/avatars/{data[\"id\"]}/{data[\"avatar\"]}')\n await em(ctx, embed=e)", "def details(update, context):\n context.bot.send_message(chat_id=update.effective_chat.id, text=str(owner))", "async def info(self, ctx):\n if ctx.guild is not None:\n await ctx.reply(\"This command can only be used in DMs, because of privacy reasons.\")\n raise commands.CommandError(\"Invoker not in DMs.\")\n\n if not is_linked(ctx.author.id):\n await ctx.reply(f\"You don't have a Spotify account linked. Please link one using \"\n f\"`{self.bot_config['prefix']}link`.\")\n raise commands.CommandError(\"User has no spotify account linked.\")\n\n sp = init_spotify(ctx.author.id)\n result = sp.me()\n msg_embed = Embed()\n msg_embed.title = \"Linked Spotify account\"\n msg_embed.url = result['external_urls'].get('spotify', None)\n if len(result['images']) > 0:\n msg_embed.set_image(url=result['images'][0]['url'])\n msg_embed.add_field(name=\"Display name\", value=result['display_name'])\n msg_embed.add_field(name=\"Subscription type\", value=result.get('product', 'free'))\n if result.get('product', None) != \"premium\":\n msg_embed.add_field(name=\"Warning!\",\n value=\"Only accounts with Spotify Premium can use this bot!\",\n inline=False)\n await ctx.reply(embed=msg_embed)", "def info(bot, update):\n\tchat_id = update.message.chat_id\n\tif update.message.chat_id == cfg.FANTA_ID:\n\t\treturn bot.send_message(chat_id=chat_id,\n\t\t text='Utilizza la chat privata')\n\n\tg = open('info.txt', 'r')\n\tcontent = g.readlines()\n\tg.close()\n\n\tmessage = ''\n\tfor row in content:\n\t\trow = row.replace('xx\\n', ' ')\n\t\tmessage += row\n\n\tlg.logger.info(f'/INFO - {select_user(update, )}')\n\n\treturn bot.send_message(parse_mode='HTML', chat_id=chat_id, text=message)", "async def server_info(self, ctx: commands.context.Context) -> None:\n server = ctx.message.guild\n info = \"name: {}\\nuser count: {}\\ncreation date: {}\".format(\n server.name,\n server.member_count,\n server.created_at)\n em = discord.Embed(title='server info',\n description=info, colour=col.green())\n em.set_thumbnail(url=server.icon_url)\n await ctx.message.channel.send(embed=em)", "def Dciaosoho(self):\n self.bot.send_message(241317532,\"ciao osho\")", "def handle(bot, update):\n print(update.message.text)\n bot.send_message(chat_id=update.message.chat_id,\n text='Hey! I\\'m Meditech Bot')", "async def info(ctx, message):\n if ctx.args.command == None:\n embed = discord.Embed()\n embed.add_field(name=\"Profile\", value=ctx.profile.name)\n embed.add_field(name=\"Mode\", value=ctx.profile.mode)\n embed.set_author(name=ctx.user.name, icon_url=ctx.user.avatar_url)\n await message.channel.send(embed=embed)\n else:\n embed = discord.Embed(\n title=\"{0.profile.prefix}{0.args.command.name} {0.args.command.usage}\".format(ctx),\n description=ctx.args.command.help,\n url=\"https://github.com/Synixe/Bot/blame/master/{0}#L{1.start}L{1.end}\".format(ctx.args.command.file.replace(os.getcwd(), \"\"), ctx.args.command)\n )\n embed.set_footer(text=ctx.args.command.extension.fullname + \".\" + ctx.args.command.name)\n await message.channel.send(embed=embed)", "def send_game_info( game, client_key, from_name, send_message_func ): # TODO: change game to lobby?\n\n game_info = message.Message( client_key, 'd' )\n new_message = game_info.new_message(from_name, game.game.game_name, game.get_player_names(),\n game.game.min_players, game.game.max_players, game.get_time_till_start())\n game_info.message = new_message\n game_info.to_clients = [ client_key ]\n\n send_message_func( game_info )", "async def info(self,ctx):\n avatar=self.bot.user.avatar_url_as(format=None,static_format='png',size=1024)\n repo=discord.Embed(color=embedColour)\n repo.set_author(name=self.bot.user.name,icon_url=avatar)\n repo.set_thumbnail(url=avatar)\n repo.add_field(name=\"Hva?\",value=\"Ein bot laga av MarlinMr.\")\n repo.add_field(name=\"Kildekode\",value=\"[Gitlab](https://gitlab.com/MarlinMr/trollbot).\",inline=True)\n await ctx.send(embed=repo)", "async def roominfo(self, ctx: Message):\n\t\tawait self.send(\n\t\t f\"Name: {self.room.name} • Description: {self.room.description} • ID: {self.room.id} • Member Count: {self.room.count} • Created at: {self.room.created_at} • Is Private?: {self.room.is_private}\"\n\t\t)", "async def info(self, ctx, trigger_name: str):\n trigger = self.get_trigger_by_name(trigger_name)\n if trigger:\n msg = \"Name: {}\\n\".format(trigger.name)\n owner_name = discord.utils.get(self.bot.get_all_members(), id=trigger.owner)\n owner_name = owner_name if owner_name is not None else \"not found\"\n msg += \"Owner: {} ({})\\n\".format(owner_name, trigger.owner)\n trigger_type = \"all responses\" if trigger.type == \"all\" else \"random response\"\n msg += \"Type: {}\\n\".format(trigger_type)\n influence = \"server\" if trigger.server is not None else \"global\"\n msg += \"Influence: {}\\n\".format(influence)\n cs = \"yes\" if trigger.case_sensitive else \"no\"\n msg += \"Case Sensitive: {}\\n\".format(cs)\n regex = \"yes\" if trigger.regex else \"no\"\n msg += \"Regex: {}\\n\".format(regex)\n msg += \"Channels: (In this server)\\n\"\n if trigger.server not in (None, ctx.guild.id):\n msg+=\" None (Not enabled in server)\\n\"\n elif not trigger.channels.get(str(ctx.guild.id)):\n msg+=\" All\\n\"\n else:\n for channel in trigger.channels[str(ctx.guild.id)]:\n try:\n c=self.bot.get_channel(channel)\n if c.guild.id == ctx.guild.id:\n msg+=\" #{}\\n\".format(c.name)\n except Exception as e:\n msg+=\" {} (unknown)\\n\".format(channel)\n msg += \"Cooldown: {} seconds\\n\".format(trigger.cooldown)\n msg += \"Triggered By: \\\"{}\\\"\\n\".format(trigger.triggered_by.replace(\"`\", \"\\\\`\"))\n msg += \"Payload: {} responses\\n\".format(len(trigger.responses))\n msg += \"Triggered: {} times\\n\".format(trigger.triggered)\n await ctx.send(box(msg))\n else:\n await ctx.send(\"There is no trigger with that name.\")", "def update_info(self):\n\n r = requests.get(self.url + 'getMe')\n if r.status_code == 200:\n response = json.loads(r.text)\n if response['ok']:\n bot_info = response['result']\n self.user_id = bot_info['id']\n self.first_name = bot_info['first_name']\n if 'last_name' in bot_info:\n self.last_name = bot_info['last_name']\n if 'username' in bot_info:\n self.username = bot_info['username']\n else:\n raise TelegramError('The result was not \"ok\"')\n else:\n raise TelegramError('Did not get a 200 response', r.status_code)", "async def bot_info(self, id: int) -> dict:\n return await self._do_request(\"get\", botinfo_address, self._user_auth,\n params={\"id\": id})", "async def hello(self):\t# << This is the actual command, or input # << Info\r\n\r\n await self.bot.say(\"Hi there!\")\t# << This is the output\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the admin menu to select a product to edit.
def __products_menu(self): log.debug("Displaying __products_menu") # Get the products list from the db products = self.session.query(db.Product).filter_by(deleted=False).all() # Create a list of product names product_names = [product.name for product in products] # Insert at the start of the list the add product option, the remove product option and the Cancel option product_names.insert(0, self.loc.get("menu_all_cancel")) product_names.insert(1, self.loc.get("menu_add_product")) product_names.insert(2, self.loc.get("menu_delete_product")) # Create a keyboard using the product names keyboard = [[telegram.KeyboardButton(product_name)] for product_name in product_names] # Send the previously created keyboard to the user (ensuring it can be clicked only 1 time) self.bot.send_message(self.chat.id, self.loc.get("conversation_admin_select_product"), reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True)) # Wait for a reply from the user selection = self.__wait_for_specific_message(product_names, cancellable=True) # If the user has selected the Cancel option... if isinstance(selection, CancelSignal): # Exit the menu return # If the user has selected the Add Product option... elif selection == self.loc.get("menu_add_product"): # Open the add product menu self.__edit_product_menu() # If the user has selected the Remove Product option... elif selection == self.loc.get("menu_delete_product"): # Open the delete product menu self.__delete_product_menu() # If the user has selected a product else: # Find the selected product product = self.session.query(db.Product).filter_by(name=selection, deleted=False).one() # Open the edit menu for that specific product self.__edit_product_menu(product=product)
[ "def edit_product(request, product_id):\n # only allow super user access\n if not request.user.is_superuser:\n messages.error(\n request,\n \"You must be logged in as KOR admin to do this\"\n )\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n # validate, save and redirect user to product page\n form = ProductForm(request.POST, instance=product)\n if form.is_valid():\n form.save()\n messages.info(request, f\"{product.name} successfully updated\")\n return redirect(reverse('product_detail', args=[product.id]))\n else:\n messages.error(\n request,\n 'Failed to edit product. Double check the form is valid.'\n )\n else:\n form = ProductForm(instance=product)\n context = {\n 'form': form,\n 'product': product,\n }\n return render(request, 'products/edit_product.html', context)", "def products_menu():\n \n product_menu_options = \"\"\"\n Welcome to the products menu, your options are:\n 0) Return to main menu\n 1) Show current products list\n 2) Add a new item to the products list\n 3) Edit an existing item from the products list\n 4) Delete an item from the products list\n \"\"\"\n print(product_menu_options)", "def enable_product_admin_panels():", "def admin(self):\n self.salir_a_modulo(MODULO_MENU)", "def edit_product(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'POST':\n form = ProductPostForm(request.POST, instance=products)\n if form.is_valid():\n product = form.save()\n return redirect(product_details, product.pk)\n else:\n form = ProductPostForm(instance=products)\n return render(request, 'editproduct.html', {'form': form})", "def show_admin_edit_admins():\n return render_admin_page(\"admin-ea.html\")", "def access_catalogue(self):\n self.catalogue.display_catalogue_menu()", "def show_admin_edit_users():\n return render_admin_page(\"admin-eu.html\")", "def change_product(uuid: str):\n product_: 'Products' = services.products.get_by_uuid(uuid)\n\n form: 'AddProductForm' = create_change_form(product_)\n\n return render_template('change_product.html', form=form)", "def edit_product(request, slug, product_id):\n product = get_object_or_404(Product, slug=slug, pk=product_id)\n # check if product is active\n if product.active:\n # make sure user is the product owner\n if request.user.id == product.seller_id:\n if request.method == \"POST\":\n # Create instance of ProductForm & bind file data and form data\n form = ProductForm(\n request.POST, request.FILES, instance=product)\n if form.is_valid():\n product = form.save(commit=False)\n product.seller = request.user\n product.save()\n messages.success(\n request, 'You have successfully updated your product')\n return redirect(Product.get_absolute_url(product))\n else:\n # Render the edited product\n form = ProductForm(instance=product)\n\n context = {'form': form, 'product': product}\n return render(request, 'product_form_edit.html', context)\n\n else:\n # if not product owner, raise 403 forbidden exception and render\n # 403.html template\n messages.error(request, 'You cannot edit this product')\n raise PermissionDenied\n\n else:\n messages.error(request, 'Product is no longer available')\n return redirect('products_list')", "def open_products_page(catalog_menu):\n catalog_menu.open_products_page()", "def edit_products_field(self):\n text = '<table style=\"padding:5px;\">'\n subscription_products = SubscriptionProduct.objects.filter(subscription=self)\n for sp in subscription_products:\n text += (\n '<tr style=\"padding:5px;\"><td style=\"padding:5px;\">{}</td><td style=\"padding:5px;\">{} un.</td>'\n '<td style=\"padding:5px;\">{}</td></tr>'.format(\n sp.product.name, sp.copies, sp.address\n )\n )\n text += \"</table>\"\n text += (\n \"<a href='/admin/core/subscription/{}/' target='_blank'>Edit</a>\".format(\n self.id\n )\n )\n return mark_safe(text)", "def editar_prod(self):\n\t codigo=self.ui.codigo_prod.text()\n\t\tnombre=self.ui.nombre_prod.text()\n\t\tdescripcion=self.ui.descripcion_prod.text()\n\t\tmarca=self.ui.marca_prod.text()\n\t\tcolor=self.ui.color_prod.text()\n\t\tresultado=controller.editar_producto(codigo,nombre,descripcion,marca,color)\n\t\tif resultado:\n\t\t self.reject()", "def product_detail(product_id):\n product_id = Product.get_by_id(product_id)\n return render_template(\"product.html\", product_id=product_id)", "def action_options_menu(self, *args):\n\t\tself.options_manager.toggle_menu()", "def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)", "def as_admin_link(self):\r\n\r\n try:\r\n # Assume we're dealing with a nice version of django that can handle\r\n # reversing admin urls.\r\n info = (self._meta.app_label,\r\n self._meta.module_name\r\n )\r\n url_name = 'admin:%s_%s_change' % info\r\n url = reverse(url_name, args=[self.pk])\r\n except NoReverseMatch, e:\r\n # If not then this function is designed to run on the url level just below\r\n # edit anyway so return the url as a relative one pointing to the item id.\r\n url = '%s/' % self.pk\r\n string_params = { 'url': url,\r\n 'title' : _(self.alt_text),\r\n 'link_text': _(self.name),\r\n }\r\n return MenuOption.link_template % string_params", "def search_products_as_admin_single_page(self, **kwargs):\n return slurp(\n 'search_products_as_admin',\n self.search_products_as_admin,\n 'ProductViewDetails',\n **kwargs\n )", "def edit_self(self):\n if self.id:\n return '<a href=\"%s\">' \\\n 'Details</a>' % admin_url(self.__class__, \"change\",\n self.id)\n return ''" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the latest transactions, in pages.
def __transaction_pages(self): log.debug("Displaying __transaction_pages") # Page number page = 0 # Create and send a placeholder message to be populated message = self.bot.send_message(self.chat.id, self.loc.get("loading_transactions")) # Loop used to move between pages while True: # Retrieve the 10 transactions in that page transactions = self.session.query(db.Transaction) \ .order_by(db.Transaction.transaction_id.desc()) \ .limit(10) \ .offset(10 * page) \ .all() # Create a list to be converted in inline keyboard markup inline_keyboard_list = [[]] # Don't add a previous page button if this is the first page if page != 0: # Add a previous page button inline_keyboard_list[0].append( telegram.InlineKeyboardButton(self.loc.get("menu_previous"), callback_data="cmd_previous") ) # Don't add a next page button if this is the last page if len(transactions) == 10: # Add a next page button inline_keyboard_list[0].append( telegram.InlineKeyboardButton(self.loc.get("menu_next"), callback_data="cmd_next") ) # Add a Done button inline_keyboard_list.append( [telegram.InlineKeyboardButton(self.loc.get("menu_done"), callback_data="cmd_done")]) # Create the inline keyboard markup inline_keyboard = telegram.InlineKeyboardMarkup(inline_keyboard_list) # Create the message text transactions_string = "\n".join([transaction.text(w=self) for transaction in transactions]) text = self.loc.get("transactions_page", page=page + 1, transactions=transactions_string) # Update the previously sent message self.bot.edit_message_text(chat_id=self.chat.id, message_id=message.message_id, text=text, reply_markup=inline_keyboard) # Wait for user input selection = self.__wait_for_inlinekeyboard_callback() # If Previous was selected... if selection.data == "cmd_previous" and page != 0: # Go back one page page -= 1 # If Next was selected... elif selection.data == "cmd_next" and len(transactions) == 10: # Go to the next page page += 1 # If Done was selected... elif selection.data == "cmd_done": # Break the loop break
[ "def show_transactions():\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = g.user\n all_transactions = Transaction.query.filter(Transaction.user_id == user.id).all()\n return render_template('transactions_list.html', all_transactions=all_transactions)", "def history():\n transactions = db.execute(\"select * from transactions where id = %s order by datetime desc\",\n session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)", "def history():\n\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n\n #Convert Price to US Dollars and format transaction time\n for t in trans:\n t.price = usd(t.price)\n t.transacted = t.transacted.strftime('%Y-%m-%d %H:%M:%S')\n\n #Return history.html\n return render_template('history.html', trans=trans)", "def get_latest_transactions(self):", "def history():\n transactions = Transaction.query.filter(Transaction.user_id == session.get('user_id')).all()\n\n return render_template(\"history.html\", transactions=transactions)", "def main_page():\n pages=get_accounts()\n return render_template('disp.html',pages=pages)", "def committees():\n\n logger.debug(\"committees page called\")\n committee_list = load_from_api('committee', return_everything=True)\n committees = committee_list['results']\n return render_template('committee_list.html', committees=committees, )", "def index(request):\n\n queryset_list = Todo.objects.all() #.order_by(\"-timestamp\")\n page = request.GET.get('page', 1)\n\n paginator = Paginator(queryset_list, 2)\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n \"taskli\": queryset, \n }\n return render(request, \"lists/task_list.html\", context)", "def test_transaction_list(self):\n response = self.client.get(reverse('transaction_list_all'))\n\n self.assertVisitedPageWithExpectedText(response, 'web/transaction/transaction_list.html')", "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Transacted FROM cash WHERE id=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)", "def get(self):\n accounts = self.get_account_data()\n transactions = self.get_transaction_data()\n return render_template(\n \"index.html\", page_name=\"Main\", accounts=accounts, transactions=transactions\n )", "def public_timeline():\n return render_template('timeline.html', messages=query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE]))\n # 메세지 쓴 날짜를 내림차순으로 limit 수만큼 message테이블과 user테이블을 조인하여 전체를 가져온다.", "def showMain():\n categories = session.query(Category).order_by(Category.name).all()\n latestItems = session.query(Item).order_by(Item.id.desc()).limit(10)\n return render_template('main.html',\n categories=categories, latestItems=latestItems)", "def list(request):\n # Access restrictions\n if not request.user.is_authenticated:\n messages.error(request, \"Du musst angemeldet sein, um diese Seite sehen zu können.\")\n return redirect(\"/\")\n if not request.user.is_superuser:\n messages.error(request, \"Du musst Admin sein, um diese Seite aufrufen zu können.\")\n return redirect(\"/mitglieder\")\n\n # Fetch all entries\n mitglieder = Mitglied.history.all()\n mitgliederMails = MitgliedMail.history.all()\n mitgliederAemter = MitgliedAmt.history.all()\n\n referate = Organisationseinheit.history.all()\n unterbereiche = Unterbereich.history.all()\n aemter = Funktion.history.all()\n rechte = Recht.history.all()\n aemterRechte = FunktionRecht.history.all()\n\n checklisten = Checkliste.history.all()\n checklistenRechte = ChecklisteRecht.history.all()\n checklistenAufgaben = ChecklisteAufgabe.history.all()\n\n users = User.history.all()\n\n # Paginate results\n page_number = 1\n\n mitgliederPaginator = Paginator(mitglieder, 15)\n mitgliederMailsPaginator = Paginator(mitgliederMails, 15)\n mitgliederAemterPaginator = Paginator(mitgliederAemter, 15)\n\n referatePaginator = Paginator(referate, 15)\n unterbereichePaginator = Paginator(unterbereiche, 15)\n aemterPaginator = Paginator(aemter, 15)\n rechtePaginator = Paginator(rechte, 15)\n aemterRechtePaginator = Paginator(aemterRechte, 15)\n\n checklistenPaginator = Paginator(checklisten, 15)\n checklistenRechtePaginator = Paginator(checklistenRechte, 15)\n checklistenAufgabenPaginator = Paginator(checklistenAufgaben, 15)\n\n usersPaginator = Paginator(users, 15)\n\n # Get first page for each tab\n mitgliederPage = mitgliederPaginator.get_page(page_number)\n mitgliederMailsPage = mitgliederMailsPaginator.get_page(page_number)\n mitgliederAemterPage = mitgliederAemterPaginator.get_page(page_number)\n\n referatePage = referatePaginator.get_page(page_number)\n unterbereichePage = unterbereichePaginator.get_page(page_number)\n aemterPage = aemterPaginator.get_page(page_number)\n rechtePage = rechtePaginator.get_page(page_number)\n aemterRechtePage = aemterRechtePaginator.get_page(page_number)\n\n checklistenPage = checklistenPaginator.get_page(page_number)\n checklistenRechtePage = checklistenRechtePaginator.get_page(page_number)\n checklistenAufgabenPage = checklistenAufgabenPaginator.get_page(page_number)\n\n usersPage = usersPaginator.get_page(page_number)\n\n return render(request=request,\n template_name=\"historie/list.html\",\n context={\"mitglieder\": mitgliederPage,\n \"mitgliederMails\": mitgliederMailsPage,\n \"mitgliederAemter\": mitgliederAemterPage,\n \"referate\": referatePage,\n \"unterbereiche\": unterbereichePage,\n \"aemter\": aemterPage,\n \"rechte\": rechtePage,\n \"aemterRechte\": aemterRechtePage,\n \"checklisten\": checklistenPage,\n \"checklistenRechte\": checklistenRechtePage,\n \"checklistenAufgaben\": checklistenAufgabenPage,\n \"users\": usersPage})", "def get(self, request):\n pages = self.get_queryset().all()\n return render(request, 'list.html', {\n 'pages': pages\n })", "def view_transactions(self) -> None:\n user_choice = Menu.prompt_view_transactions()\n if user_choice == 5:\n print(\"Returning to main menu...\")\n return\n\n budget_category = BudgetManager.category_mapping[user_choice]\n print(f\"\\nTransactions in the {budget_category.value} \"\n f\"category: \")\n for tx in self.user.tx_manager:\n if tx.budget_category == user_choice:\n print(f\"\\n{tx}\")", "def entries_index(request):\n blog_entries = Entry.objects.filter(status=2).order_by('-pub_date')\n paginator = Paginator(blog_entries, 4)#4 posts/page\n try:\n page = int(request.GET.get('page','1'))\n except ValueError:\n page = 1\n try:\n entries = paginator.page(page)\n except (EmptyPage, InvalidPage):\n entries = paginator.page(paginator.num_pages)\n return render_to_response('blog/blog.html', {'entries':entries}, RequestContext(request))", "def serve_listing(self, request):\n pages = self.children\n\n # Pagination\n page = request.GET.get('page')\n paginator = Paginator(pages, settings.ITEMS_PER_PAGE)\n\n try:\n pages = paginator.page(page)\n except EmptyPage:\n pages = paginator.page(paginator.num_pages)\n except PageNotAnInteger:\n pages = paginator.page(1)\n\n return render(request, self.get_template(request),\n {'self': self, 'pages': pages})", "def transactions():\n # this redirects user to login screen if not logged in\n if \"user\" not in session:\n return redirect(url_for(\"login\"))\n\n if session[\"user\"]:\n # retrieve user's transaction history\n user_transactions = mongo.db.transactions.find(\n {\"email\": session[\"user\"]})\n\n return render_template(\n \"transactions.html\", user_transactions=user_transactions)\n\n return redirect(url_for(\"login\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a .csv file containing the list of all transactions.
def __transactions_file(self): log.debug("Generating __transaction_file") # Retrieve all the transactions transactions = self.session.query(db.Transaction).order_by(db.Transaction.transaction_id.asc()).all() # Create the file if it doesn't exists try: with open(f"transactions_{self.chat.id}.csv", "x"): pass except IOError: pass # Write on the previously created file with open(f"transactions_{self.chat.id}.csv", "w") as file: # Write an header line file.write(f"UserID;" f"TransactionValue;" f"TransactionNotes;" f"Provider;" f"ChargeID;" f"SpecifiedName;" f"SpecifiedPhone;" f"SpecifiedEmail;" f"Refunded?\n") # For each transaction; write a new line on file for transaction in transactions: file.write(f"{transaction.user_id if transaction.user_id is not None else ''};" f"{transaction.value if transaction.value is not None else ''};" f"{transaction.notes if transaction.notes is not None else ''};" f"{transaction.provider if transaction.provider is not None else ''};" f"{transaction.provider_charge_id if transaction.provider_charge_id is not None else ''};" f"{transaction.payment_name if transaction.payment_name is not None else ''};" f"{transaction.payment_phone if transaction.payment_phone is not None else ''};" f"{transaction.payment_email if transaction.payment_email is not None else ''};" f"{transaction.refunded if transaction.refunded is not None else ''}\n") # Describe the file to the user self.bot.send_message(self.chat.id, self.loc.get("csv_caption")) # Reopen the file for reading with open(f"transactions_{self.chat.id}.csv") as file: # Send the file via a manual request to Telegram requests.post(f"https://api.telegram.org/bot{self.cfg.telegram['token']}/sendDocument", files={"document": file}, params={"chat_id": self.chat.id, "parse_mode": "HTML"}) # Delete the created file os.remove(f"transactions_{self.chat.id}.csv")
[ "def makeCSV(self):\r\n with open(\"upload.csv\", \"w\") as f:\r\n f.write(self.makeString() + \"\\n\" )\r\n for student in self.studentList:\r\n f.write(student.makeString() + \"\\n\")", "def get_transactions_csv(self, include_investment=False):\n\n # Specifying accountId=0 causes Mint to return investment\n # transactions as well. Otherwise they are skipped by\n # default.\n result = self.request_and_check(\n '{}/transactionDownload.event'.format(MINT_ROOT_URL) +\n ('?accountId=0' if include_investment else ''),\n headers=self.headers,\n expected_content_type='text/csv')\n return result.content", "def print_customers(self):\n output = ''\n for i in range(len(self.customers)):\n output += f'Customer no. {self.customers[i].id} is in {self.customers[i].state[0]} section\\n'\n #print(output)\n with open('oneday.csv','a') as outfile:\n for i in range(len(self.customers)):\n outfile.write(f'{self.get_time()};{self.customers[i].id};{self.customers[i].state[0]}\\n')", "def gen_csv(self, show_headers=True, show_tags=True):\n class TextOut:\n \"\"\"Simple string output source to capture CSV\"\"\"\n def __init__(self):\n self.data = ''\n def write(self, s):\n self.data += s\n def get(self):\n data = self.data\n self.data = ''\n return data\n output = TextOut()\n writer = csv.writer(output)\n for raw in self.gen_raw(show_headers, show_tags):\n writer.writerow(raw)\n yield output.get()", "def writeFoundationTransactions():\n\n df = collectFoundationTransactions(Foundation1)\n outDf = pd.DataFrame(columns=columnNames+[\"type\"])\n for index, row in df.iterrows():\n method = row[13][0:10]\n if (method in RelevantTransactionTypes):\n series = row.append(pd.Series(data=[FoundationTransactionTypes[method]]\n ,index=[\"type\"]))\n outDf = outDf.append(series,ignore_index=True)\n else:\n continue\n outDf.to_csv(\"../Data/Raw/FoundationTransactions.csv\",index=False)", "def output_csv(infos):\n logging.debug(\"Beginning output_csv\")\n\n for info in infos:\n print \"{};{};{};{};{};{};{};{};{}\".format(\n info[\"backup_label\"]\n ,info[\"backup_type\"]\n ,info[\"backup_timestamp_start_ts\"]\n ,info[\"backup_timestamp_stop_ts\"]\n ,info[\"backup_size\"]\n ,info[\"partial_backup_size\"]\n ,info[\"original_size\"]\n ,info[\"partial_original_size\"]\n ,info[\"backup_items\"])\n logging.debug(\"End output_csv\")", "def get(self, request):\n csv_response = HttpResponse(content_type=\"text/csv\")\n csv_response[\"Content-Disposition\"] = 'attachment; filename=\"export.csv\"'\n all_records = Journal.objects.filter(login=request.user)\n\n for item in all_records:\n one_row_tab = [\n item.id,\n item.login.username,\n item.date,\n item.value,\n item.category.category,\n item.description,\n ]\n writer = csv.writer(csv_response)\n writer.writerow(one_row_tab)\n\n return csv_response", "def generate_csv(self, lista):\r\n\t\ts = ''\r\n\t\tsalida = self.get_rel_path() + \"/\" + \"tree_names.csv\"\r\n\t\tfor i in lista:\r\n\t\t\t#st = i[2].split('/')\r\n\t\t\t#newpath = os.path.join(i[1],st)\r\n\t\t\thash = str(i[0])\r\n\t\t\tname_path = str(i[1] + \"/\" + i[2])\r\n\t\t\t#s = s + str(i[0]) + \";\" + i[1] + \"/\" + i[2] + \"\\n\"\r\n\t\t\tself.copy_file(hash,name_path)\r\n\t\t\ts = s + str(hash + \";\" + name_path + \"\\n\")\r\n\r\n\t\tf = open(salida,\"w\")\r\n\t\tf.write(s)\r\n\t\treturn salida", "def generate_csv_output(payslip_data):\n payslip_output = StringIO(newline=None)\n csvFileWriter = csv.writer(payslip_output, delimiter=',')\n\n data = [['Full Name', 'Payment Period', 'Gross Income',\n 'Income Tax', 'Net Income', 'Super']]\n\n for employee in payslip_data:\n data.append([\n employee['full_name'],\n employee['payment_period'],\n str(employee['gross_income']),\n str(employee['income_tax']),\n str(employee['net_income']),\n str(employee['super_amount'])\n ])\n\n csvFileWriter.writerows(data)\n\n return payslip_output", "def to_csv(self):\n tables = self._table_names()\n files = []\n pbar = tqdm(tables)\n for t in pbar:\n if t != '':\n # Dump each table as a CSV file using \"mdb-export\",\n contents = subprocess.Popen([\"mdb-export\", self.mdb,\n str(t, encoding='utf8').replace(\" \", \"_\")],\n stdout=subprocess.PIPE).communicate()[0]\n if len(contents) != 0:\n filename = str(t, encoding='utf8').replace(\" \", \"_\") + str(\".csv\")\n file = open('./data/csv/' + filename, 'w+')\n pbar.set_description('Dumping {}'.format(filename))\n file.write(str(contents, encoding='utf8'))\n files.append(os.path.abspath(file.name))\n file.close()\n else:\n raise FileNotFoundError('{} is null'.format(t))\n print('Successfully dump csv files:', files)\n return files", "def generate_csv(project):\n mem_file = io.BytesIO()\n with zipfile.ZipFile(mem_file, \"w\") as zip_file:\n attributes = Attribute.objects.filter(project=project)\n nodes = Node.objects.filter(project=project)\n field_names = ['latitude', 'longitude'] + [attribute.name for attribute in attributes] + ['picture']\n csv = write_csv_row('', field_names)\n for node in nodes:\n row = [node.latitude, node.longitude] + [data.value for data in Data.objects.filter(node=node)]\n if node.picture:\n zip_file.write(settings.MEDIA_URL[1:] + node.picture.name, node.picture.name)\n row.append(node.picture.name)\n else:\n row.append('Null')\n csv = write_csv_row(csv, row)\n zip_file.writestr(\"{}.csv\".format(project.name), csv)\n return mem_file.getvalue()", "def create_csv(request):\n date_dict = income_date_parser(request)\n\n income_history = get_incomes_funds_ids(user_id=date_dict['user_id'],\n date_start=date_dict['start_date'],\n date_end=date_dict['finish_date'],\n time_diff=date_dict['utc_difference'])\n del income_history[-1]\n\n output = io.StringIO()\n\n headers = []\n if income_history:\n for i in income_history[0]:\n if i != 'income_history_id':\n headers.append(i)\n\n writer = csv.DictWriter(output, dialect='excel', quoting=csv.QUOTE_ALL, fieldnames=headers)\n writer.writeheader()\n\n if income_history:\n for entry in income_history:\n del entry['income_history_id']\n writer.writerow(entry)\n\n response = file_streaming_response('text/csv', 'income_history.csv', output)\n return response", "def create_csv():\n row = [\"price\", \"dif_sar\", \"klinger\", \"rsi\"]\n if not os.path.exists(\"csv.csv\"):\n with open('csv.csv', 'w', newline=\"\") as write_file:\n writer = csv.writer(write_file)\n writer.writerow(row)", "def csv_basic_output(self):\n\n # determine the file name\n csv_filename = \"query-%s-%siter-%s-%s.csv\" % (self.querytype,\n self.iterations,\n self.chart_type.lower(),\n self.testdatetime)\n\n # initialize the csv file\n csvfile_stream = open(csv_filename, \"w\")\n csvfile_writer = csv.writer(csvfile_stream, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n\n # iterate over the SIBs\n for sib in self.results.keys(): \n \n row = [sib]\n \n # add all the times\n for value in self.results[sib]:\n row.append(value)\n\n # add the mean value of the times to the row\n row.append(round(mean(self.results[sib]),3)) \n\n # write the row\n csvfile_writer.writerow(row)\n \n # close the csv file\n csvfile_stream.close()", "def main(dbName):\n import DAOClass\n d = DAOClass.DAO(dbName)\n a = d.queryTCVR(\"*\",'','','','')\n # open and write header\n fh = open('out.csv','w') # open and clear\n fh.write ('ID,T,C,V,R \\n')\n for rx in range(len(a)):\n ls = ''\n for cx in range(len(a[0])):\n if (cx ==0):\n ls += a[rx][cx].__str__()\n else:\n ls += ' , ' + a[rx][cx].__str__()\n #endif\n #end for cx\n ls += '\\n'\n fh.write(ls)\n #endfor rx\n fh.close()\n print(\"file out.csv created \")", "def output_all_lines_to_csv(self):\n with open(cfg.OUTPUT_LOC+\"\\\\all_lines.csv\", \"wb\") as f:\n writer = csv.writer(f)\n writer.writerows(self.all_lines)", "def generate_data():\n with open('../data/exercise.csv', 'w') as file:\n for index in range(1, 1000001):\n guid = FAKE.uuid4()\n ccnumber = FAKE.credit_card_number()\n date = FAKE.date(pattern='%m/%d/%Y')\n text = FAKE.text().replace('\\n', ' ')\n file.write(\n f\"{index},{guid},{index},{index},{ccnumber},{date},{text}\\n\")", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def create_final_transaction_list(new_file):\n filename = \"final_transactions.csv\"\n if not os.path.isfile(filename):\n old_file = []\n else:\n old_file = filename\n final_transactions = _merge_transactions(old_file, new_file)\n if final_transactions:\n with open(filename, \"w\", newline='') as file:\n headers = list(final_transactions[0].keys())\n headers.sort()\n writer = csv.DictWriter(file, fieldnames=headers)\n writer.writeheader()\n for transaction in final_transactions:\n writer.writerow(transaction)\n _set_categories(filename)\n return final_transactions" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an administrator to the bot.
def __add_admin(self): log.debug("Displaying __add_admin") # Let the admin select an administrator to promote user = self.__user_select() # Allow the cancellation of the operation if isinstance(user, CancelSignal): return # Check if the user is already an administrator admin = self.session.query(db.Admin).filter_by(user_id=user.user_id).one_or_none() if admin is None: # Create the keyboard to be sent keyboard = telegram.ReplyKeyboardMarkup([[self.loc.get("emoji_yes"), self.loc.get("emoji_no")]], one_time_keyboard=True) # Ask for confirmation self.bot.send_message(self.chat.id, self.loc.get("conversation_confirm_admin_promotion"), reply_markup=keyboard) # Wait for an answer selection = self.__wait_for_specific_message([self.loc.get("emoji_yes"), self.loc.get("emoji_no")]) # Proceed only if the answer is yes if selection == self.loc.get("emoji_no"): return # Create a new admin admin = db.Admin(user=user, edit_products=False, receive_orders=False, create_transactions=False, is_owner=False, display_on_help=False) self.session.add(admin) # Send the empty admin message and record the id message = self.bot.send_message(self.chat.id, self.loc.get("admin_properties", name=str(admin.user))) # Start accepting edits while True: # Create the inline keyboard with the admin status inline_keyboard = telegram.InlineKeyboardMarkup([ [telegram.InlineKeyboardButton( f"{self.loc.boolmoji(admin.edit_products)} {self.loc.get('prop_edit_products')}", callback_data="toggle_edit_products" )], [telegram.InlineKeyboardButton( f"{self.loc.boolmoji(admin.receive_orders)} {self.loc.get('prop_receive_orders')}", callback_data="toggle_receive_orders" )], [telegram.InlineKeyboardButton( f"{self.loc.boolmoji(admin.create_transactions)} {self.loc.get('prop_create_transactions')}", callback_data="toggle_create_transactions" )], [telegram.InlineKeyboardButton( f"{self.loc.boolmoji(admin.display_on_help)} {self.loc.get('prop_display_on_help')}", callback_data="toggle_display_on_help" )], [telegram.InlineKeyboardButton( self.loc.get('menu_done'), callback_data="cmd_done" )] ]) # Update the inline keyboard self.bot.edit_message_reply_markup(message_id=message.message_id, chat_id=self.chat.id, reply_markup=inline_keyboard) # Wait for an user answer callback = self.__wait_for_inlinekeyboard_callback() # Toggle the correct property if callback.data == "toggle_edit_products": admin.edit_products = not admin.edit_products elif callback.data == "toggle_receive_orders": admin.receive_orders = not admin.receive_orders elif callback.data == "toggle_create_transactions": admin.create_transactions = not admin.create_transactions elif callback.data == "toggle_display_on_help": admin.display_on_help = not admin.display_on_help elif callback.data == "cmd_done": break self.session.commit()
[ "async def add_admin(ctx: dc.Context):\n if not ctx.message.mentions:\n await ctx.message.channel.send(\n embed=dc.ErrorEmbed(\n \"Argument Error\",\n \"No mentions found.\",\n \"Syntax: [add-admin] @user_1 @user_2 .. @user_n\",\n ctx.message\n )\n )\n return\n user_names = [i.name for i in ctx.message.mentions]\n for user in ctx.message.mentions:\n result = admin.add_admin(str(user.id))\n if result:\n await ctx.message.channel.send(\n embed=dc.CommandSuccessListEmbed(\n \"Successfuly added user(s) to admin group\",\n targets=user_names,\n list_title=\"User(s) added:\"\n )\n )", "def administrator(self, administrator):\n\n self._administrator = administrator", "def add_admin(user):\n _add_owner(\n _lookup_user(user).biv_id,\n _add_model(pam.Admin())\n )", "def add_administrator(self, project_id, name, email):\n self._run(\n url_path=\"contributors/add\",\n id=project_id,\n name=name,\n email=email,\n admin=True\n )\n return True", "def add_admin():\n email = Config.SITE_ADMIN\n password = input('Enter Admin Password >>> ')\n name = input('Enter Display Name >>> ')\n\n user = User(email, password, name)\n user.confirmed = True\n db.session.add(user)\n db.session.commit()\n print(\"%s has been added to the system as Admin\" % user.username)", "def add_admin(self, data):\n tagged = set(data['mentionedPeople']) - {self.api.people.me().id}\n\n if not tagged:\n self.create_message(\n \"Nobody was tagged to be added. Please tag who you would like to add\",\n roomId=data['roomId']\n )\n elif len(tagged) > 1:\n self.create_message(\n \"Too many people were tagged. Please only tag one person at a time to be added\",\n roomId=data['roomId']\n )\n else:\n person = self.api.people.get(tagged.pop())\n if person and person.id not in self.admins.get_admins():\n self.admins.add_admin(person.id)\n self.create_message(\n \"Added '\" + str(person.displayName) + \"' as an admin.\",\n roomId=data['roomId']\n )\n self.show_admins(data)\n elif person:\n self.create_message(\n \"'\" + str(person.displayName) + \"' is already an admin on this project\",\n roomId=data['roomId']\n )\n else:\n # This line should be unreachable.\n self.create_message(\n \"No person with id '\" + str(person.id) + \"' exists.\",\n roomId=data['roomId']\n )", "async def admin_add(self, ctx: MyContext, wormhole: str, user: discord.User):\n if not self.check_wh_exists(wormhole):\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-exists\", name=wormhole\n )\n )\n return\n if not self.check_is_admin(wormhole, ctx.author.id):\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-admin\"))\n return\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n isAlready = len(self.bot.db_query(query, (wormhole, user.id))) > 0\n if not isAlready:\n query = \"INSERT INTO wormhole_admin (name, admin) VALUES (?, ?)\"\n self.bot.db_query(query, (wormhole, user.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.admin-added\")\n )\n else:\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.already-admin\", user=user.name\n )\n )", "def add_admin():\n admin_role = Role.query.filter_by(permissions=0xFF).first()\n admin = User.query.filter_by(email=current_app.config['PILI_ADMIN']).first()\n if not admin:\n admin_user = User(\n email=current_app.config['PILI_ADMIN'],\n username=current_app.config['PILI_ADMIN_NAME'],\n password=generate_password(10),\n role=admin_role,\n confirmed=True,\n )\n db.session.add(admin_user)\n db.session.commit()", "def add_admin(self, user_id: Union[str, int]):\n insert_admin_sql = \"\"\" INSERT INTO admins (user_id) VALUES (?)\"\"\"\n with db_conn(self.db_file) as c:\n c.execute(insert_admin_sql, [user_id])", "def add_admin_to_members(sender, instance, created, **kwargs):\n\n if created and instance.admin:\n instance.members.add(instance.admin)", "def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()", "def add_administrator_interface(self, udp_port: int, login :str, password: str):\n self.administrator_cfg = AdministratorCfg(udp_port=udp_port,\n login=login,\n password=password)\n return self", "def set_admin(self, admins):\n self.set_group(self._gp_admin_name, admins)", "def create_admin():\n db.session.add(User(email='ad@min.com', password='admin', admin=True))\n db.session.commit()", "def __addNewAdminQuery(self,admin_id,username,password,name,comment,creator_id):\n return ibs_db.createInsertQuery(\"admins\",{\"admin_id\":admin_id,\n \"username\":dbText(username),\n \"password\":dbText(password),\n \"name\":dbText(name.strip()),\n \"comment\":dbText(comment.strip()),\n \"creator_id\":dbText(creator_id),\n \"deposit\":0,\n \"due\":0\n })", "def add_admin(user_id=None):\r\n try:\r\n if user_id:\r\n user = db.session.query(model.user.User)\\\r\n .get(user_id)\r\n require.user.update(user)\r\n if user:\r\n user.admin = True\r\n db.session.commit()\r\n return redirect(url_for(\".users\"))\r\n else:\r\n msg = \"User not found\"\r\n return format_error(msg, 404)\r\n except Exception as e: # pragma: no cover\r\n current_app.logger.error(e)\r\n return abort(500)", "async def insert_administrator(self, role_id: int) -> None:\r\n\r\n query = \"\"\"\r\n INSERT INTO administrators (role)\r\n VALUES (%s)\r\n \"\"\"\r\n async with self.conn.cursor() as cur:\r\n await cur.execute(query, (role_id,))", "def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)", "def addAdmin(username, sshId, user, identity):\n if identity:\n env.key_filename = identity\n if user:\n env.user = user\n sudo('adduser --disabled-password --gecos \",,,\" %s' % username)\n sudo('usermod -p \"\" %s' % username)\n sudo('chage -d 0 %s' % username)\n sudo('gpasswd --add %s admin' % username)\n authorizeSshKey(username, sshId)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a frame from the camera and a destination, figure out which direction to take next
def get_next_direction(current_frame, scanner, code): # ### thresholding. susceptible to glare, solve with masking tape? thresh = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY) # success, thresh = cv2.threshold(thresh, BW_THRESHOLD, 255, cv2.THRESH_BINARY) # if not success: # print "Could not threshold frame, skipping." # # Okay to return 'STRAIGHT' here because the thresholding error will cause the # # speed calculator to bail out and we'll skip the frame. # return 'STRAIGHT' pil_image = Image.fromarray(thresh, 'L') width, height = pil_image.size raw = pil_image.tostring() # wrap image data image = zbar.Image(width, height, 'Y800', raw) # scan the image for barcodes scanResult = scanner.scan(image) if scanResult: for symbol in image: # do something useful with results print 'decoded', symbol.type, 'symbol', '"%s"' % symbol.data report_data_to_webserver(symbol.data) if symbol.data == code: return 'STOP' # if QR code found, and QR code text is the desired destination, return stop return 'STRAIGHT' # Can be one of STRAIGHT, STOP.
[ "def get_direction(self, destination:int):\n if int(destination) - int(self.position) > 0 :\n return \"forward\"\n if int(destination) - int(self.position) < 0 :\n return \"reverse\"\n return \"stop\"", "def choose_direction(self):\n\n # for getting the neighbours in the front 180 degrees within vision range; for calc_utility\n peds_in_180 = self.pedestrians_in_field(180)\n\n # Loop over the possible directions\n pos_directions = self.possible_directions()\n max_util = list(self.calc_utility(pos_directions[0], peds_in_180))+[pos_directions[0]]\n max_util[0]+=random.gauss(0, self.model.stoch_variable)\n\n for direction in pos_directions[1:]:\n # Calculate utility for every possible direction\n util, next_pos = self.calc_utility(direction, peds_in_180)\n util+= random.gauss(0, self.model.stoch_variable)\n # Check if this utility is higher than the previous\n if util > max_util[0]:\n max_util = [util, next_pos, direction]\n\n # Return next position and the direction\n return max_util[1], max_util[2]", "def desired_directions(state):\n destination_vectors = state[:, 6:8] - state[:, 0:2]\n\n # support for prediction without given destination:\n # \"desired_direction\" is in the direction of the current velocity\n invalid_destination = torch.isnan(destination_vectors[:, 0])\n destination_vectors[invalid_destination] = state[invalid_destination, 2:4]\n\n norm_factors = torch.linalg.norm(destination_vectors, ord=2, dim=-1)\n norm_factors[norm_factors == 0.0] = 1.0\n return destination_vectors / norm_factors.unsqueeze(-1)", "def get_direction(position, next_position):\n x, y = position\n next_x, next_y = next_position\n if x == next_x:\n if y < next_y:\n return constants.Action.Right\n else:\n return constants.Action.Left\n elif y == next_y:\n if x < next_x:\n return constants.Action.Down\n else:\n return constants.Action.Up\n raise constants.InvalidAction(\"We did not receive a valid position transition.\")", "def step_towards(self, x, y, target_x, target_y):\n path = libtcod.path.new_using_map(self.fov_map)\n libtcod.path.compute(path, x, y, target_x, target_y)\n (t_x, t_y) = libtcod.path.walk(path, False)\n if t_x is None:\n return None, None\n else:\n return t_x - x, t_y - y", "def next_location(x, y, direction):\n new_x = x\n new_y = y\n\n if direction == \"W\":\n new_x = new_x - 1\n elif direction == \"S\":\n new_y = new_y - 1\n elif direction == \"E\":\n new_x = new_x + 1\n else: # going north\n new_y = new_y + 1\n\n return new_x, new_y", "def _calculate_direction(source, target):\n direction = []\n x_direction = target.x - source.x\n y_direction = target.y - source.y\n # print(\"source:\", source.id, \"target:\", target.id, \"x\", x_direction, \"y\", y_direction)\n\n if x_direction < 0:\n direction.append('4')\n elif x_direction > 0:\n direction.append('3')\n\n if y_direction < 0:\n direction.append('1')\n elif y_direction > 0:\n direction.append('2')\n\n return tuple(direction)", "def move(direction):\n if direction == 'r':\n return 1\n if direction == 'l':\n return -1", "def next_waypoint_from_direction(direction, current_pose):\n\n wp = Point(current_pose.x, current_pose.y, None)\n if direction == 'up':\n wp.y = np.ceil(wp.y) + 0.5\n elif direction == 'down':\n wp.y = np.floor(wp.y) - 0.5\n elif direction == 'left':\n wp.x = np.floor(wp.x) - 0.5\n elif direction == 'right':\n wp.x = np.ceil(wp.x) + 0.5\n else:\n err_msg = \"The direction value {} is not valid\".format(direction)\n rospy.logerr(err_msg)\n sys.exit()\n\n return wp", "def march(self, src):\n target = self.find_nearest_target(src)\n if not target:\n direction = STILL\n else:\n direction = self.find_direction(src, target)\n # don't commit suicide\n dest = self.game_map.getSite(src, direction)\n if not dest.is_friend() and not self.can_capture(src, dest):\n direction = STILL\n return Move(src, direction)", "def moves_to(self, destination):\n head = self.head()\n diff = head.sub(destination)\n moves = []\n\n if diff.x < 0:\n moves.append(RIGHT)\n elif diff.x > 0:\n moves.append(LEFT)\n\n if diff.y > 0:\n moves.append(UP)\n elif diff.y < 0:\n moves.append(DOWN)\n\n return moves", "def __next_direction(xy):\n p, q = tuple(xy)\n nbh = [[-q, p], [p, q], [q, -p]]\n return nbh[npr.randint(3)]", "def goInDirection (self, d, dist, steps=10): \n initTransform = self.arm.manip.GetEndEffectorTransform()\n initOrigin = initTransform[0:3,3]\n \n if d == 'f':\n dirVec = initTransform[0:3,2]\n elif d == 'b': \n dirVec = -1*initTransform[0:3,2]\n elif d == 'u':\n dirVec = initTransform[0:3,1]\n elif d == 'd':\n dirVec = -1*initTransform[0:3,1]\n elif d == 'l':\n dirVec = initTransform[0:3,0]\n elif d == 'r':\n dirVec = -1*initTransform[0:3,0]\n else:\n rospy.ERROR(\"Invalid direction: \" + d)\n \n endOffset = dirVec*float(dist)\n \n transforms = []\n \n for step in range(steps+1):\n currVec = initOrigin + float(step)/steps*endOffset\n \n newTfm = initTransform.copy()\n newTfm[0:3,3] = currVec\n \n transforms.append(newTfm)\n \n return self.smoothPlan(transforms)", "def move_to_next_pixel(fdr, row, col):\n # get the fdr pixel value (x,y)\n value = fdr[row, col]\n\n # Update the row, col based on the flow direction\n if value == 1:\n col += 1\n elif value == 2:\n col += 1\n row += 1\n elif value == 4:\n row += 1\n elif value == 8:\n row += 1\n col -= 1\n elif value == 16:\n col -= 1\n elif value == 32:\n row -= 1\n col -= 1\n elif value == 64:\n row -= 1\n elif value == 128:\n row -= 1\n col += 1\n else:\n # Indetermine flow direction, sink. Do not move.\n row = row\n col = col\n return (row, col)", "def get_starting_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[0].y\n return delta_y, delta_x", "def faceTowards(self, target):\n current_tile = self.current_tile()\n if(target and current_tile):\n x_dist = target.coordinates()[0] - current_tile.coordinates()[0]\n if x_dist == 0: return\n self.direction_val = x_dist/abs(x_dist)\n #TEMP\n if self.direction_val == -1:\n self.direction_id = 'left'\n if self.direction_val == 1:\n self.direction_id = 'right'", "def get_direction(self, agent):\n x, y = self.state\n a_x, a_y = agent.state\n if a_x == x and a_y == y:\n return 0\n elif a_x == x and a_y > y:\n return 1\n elif a_x > x and a_y > y:\n return 2\n elif a_x > x and a_y == y:\n return 3\n elif a_x > x and a_y < y:\n return 4\n elif a_x == x and a_y < y:\n return 5\n elif a_x < x and a_y < y:\n return 6\n elif a_x < x and a_y == y:\n return 7\n elif a_x < x and a_y > y:\n return 8", "def get_direction(event):\n return event['result']['parameters']['direction']", "def _next_step(position, direction):\n # Access the values stored in the pairs\n (row, column), (row_step, column_step) = position, direction\n # Calculate the new position\n new_position = (new_row, new_column) = (row + row_step, column + column_step)\n # Check whether the new position is still on the board.\n if 0 <= new_column < 8 and 0 <= new_row < 8:\n return new_position\n else:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a frame from the camera, figure out the line error
def get_line_error(im): ### Crop the picture height = len(im) width = len(im[0]) im = im[height/CROP_RATIO:-height/CROP_RATIO, width/CROP_RATIO:-width/CROP_RATIO] ### thresholding. susceptible to glare, solve with masking tape? thresh = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) success, thresh = cv2.threshold(thresh, BW_THRESHOLD, 255, cv2.THRESH_BINARY) if not success: print "Could not threshold frame, skipping." return None ### edge detection. constants here are magic canny = cv2.Canny(thresh, 180, 220, apertureSize = 3) ### contour detection contours, _ = cv2.findContours(canny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) if len(contours) < 1: return None sorted_contours = sorted(contours, key=lambda x:cv2.arcLength(x,False), reverse=True) ## JUST FOR TESTING # longest contours if DEBUG_MODE: cv2.drawContours(im,sorted_contours[0:2],-1,(0,255,0),3) # draw longest contour cv2.imshow('lines',im) k = cv2.waitKey(5) if k == 27: cv2.destroyAllWindows() return None ### Find x coordinates of endpoints if len(sorted_contours) == 0: print "No contours found, skipping" return None # get points for the longest contours mask = numpy.zeros(im.shape,numpy.uint8) cv2.drawContours(mask,[sorted_contours[0]],0,255,-1) pixelpoints = numpy.transpose(numpy.nonzero(mask)) xTop_one = pixelpoints[0][1] # IMPORTANT: pixelpoints is returned in row, column format xBottom_one = pixelpoints[-1][1] ## IMPORTANT TODO: assumes points are returned sorted, need to verify if len(sorted_contours) > 1: # we have more than one contour mask = numpy.zeros(im.shape,numpy.uint8) cv2.drawContours(mask,[sorted_contours[1]],0,255,-1) pixelpoints = numpy.transpose(numpy.nonzero(mask)) xTop_two = pixelpoints[0][1] # IMPORTANT: pixelpoints is returned in row, column format xBottom_two = pixelpoints[-1][1] ## IMPORTANT TODO: assumes points are returned sorted, need to verify # average two longest contours if available if len(sorted_contours) == 1: xTop = xTop_one xBottom = xBottom_one else: xTop = (xTop_one + xTop_two) / 2 xBottom = (xBottom_one + xBottom_two) / 2 ### Calculate offset to return ### (XTop - XBottom) + (XTop - CENTER) ### CENTER = TRUE_CENTER - CENTER_OFFSET MOST_POSITIVE_VAL = 3*len(im[0])/2 + CENTER_OFFSET MOST_NEGATIVE_VAL = -3*len(im[0])/2 + CENTER_OFFSET adjusted_midpoint = len(im[0])/2 - CENTER_OFFSET #unscaled_error = xTop - xBottom + 2*(xTop - adjusted_midpoint) unscaled_error = xTop - adjusted_midpoint if unscaled_error == 0: return 0.0 if unscaled_error > 0: scaled_error = float(unscaled_error)/MOST_POSITIVE_VAL if abs(scaled_error) > 1.0: print "Warning: scaled_error value greater than 1.0: " + scaled_error return min(scaled_error, 1.0) else: scaled_error = float(unscaled_error)/abs(MOST_NEGATIVE_VAL) if abs(scaled_error) > 1.0: print "Warning: scaled_error value less than -1.0: " + scaled_error return max(scaled_error, -1.0)
[ "def error_check(frame_tp):\n deriv_frame_tp = np.diff(frame_tp)\n error_len_th = np.mean(deriv_frame_tp)+np.std(deriv_frame_tp)*6\n\n error_frames = np.abs(deriv_frame_tp)>error_len_th\n if np.any(error_frames):\n print(\"Error in timepoints detected in frames\", np.where(error_frames)[0],\n \"at timepoint\", frame_tp[np.where(error_frames)[0]])", "def error(line, data):\n\t# Metric: Sum of squared Y-axis difference\n\terr = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n\treturn err", "def line_error(params, args):\n x, y = args\n m, b = params[0:2]\n y_star = m * x + b\n\n return y - y_star", "def get_linear_track_error(self):\r\n return self._arm.get_linear_track_error()", "def error(line, data): #error function for a line\n \n #Metric: Sum of squared Y-axis differences, y2 - c0.x1 + c1\n err = np.sum((data[:,1] - line[0]*data[:, 0] + line[1])**2)\n return err", "def findLine(self, pose, visualize=False):\n address = 0 # Store the address of the black points\n num = 0 # Store the number of black points find\n read = self.readLine(pose, visualize) # read the line from the pose of the model\n for i in range(self.L - 1): # run over the read array\n if (read[i] < 150): # if the pixel value < 150 it is black\n address += i\n num += 1\n\n # determine the dif between the center fo the camera and the centroid\n # of the black pixels\n # If the line is by the rigth of the center the error is negative\n if (num < 1):\n self.error = self.error\n else:\n centroid = address / num # find the centroid of the black pixels\n self.error = (self.L / 2) - centroid\n return self.error / float(self.L / 2)", "def error(line, data): # error function\n # Metric: Sum of squared Y-axis differences\n err = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n return err", "def error(self) -> Sequence[float]:\n errors = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket_pos = self._fit(line.center.y)\n mlc_pos = line.center.x\n else:\n picket_pos = self._fit(line.center.x)\n mlc_pos = line.center.y\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket_pos += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n errors.append((mlc_pos - picket_pos) / self._image.dpmm)\n return errors", "def error(line, data):\r\n \"\"\" If we sum up the squares of the residuals of all the points \r\n from the line we get a measure of the\r\n fitness of the line. Our aim should be to minimize this value.\r\n equation is y =mx + c so the error or residual \r\n so the residual = y - (mx=c) , sum of error = (y -(mx+c))^2\r\n data[:,1] = y value , data[:,0] = x value\r\n \"\"\"\r\n err = np.sum((data[:,1] - (line[0]*data[:,0]+line[1]))**2)\r\n \r\n return err", "def test_get_line_error_neg(self) -> None:\n test_model = line2d.Line2D(slope=-3, y_int=3, x_int=1)\n test_point = line2d.Point2D(6, 1)\n\n error = test_model.calc_error(point=test_point)\n\n self.assertAlmostEqual(error, 8 * math.sqrt(10) / 5)", "def compute_speed_and_line_error(current_frame, scanner, code): \n next_direction = get_next_direction(current_frame, scanner, code)\n if next_direction == 'STOP':\n return STOP\n\n line_error = get_line_error(current_frame)\n if line_error is None:\n return None\n\n return (2, line_error)", "def get_error(self, camera: int = 0) -> str:\n return self.sources[camera].getError()", "def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road", "def testErrors(self):\n factor = 100\n before = fitLine(self.spectrum, int(self.center), int(self.rmsSize), 0, self.fittingRadius)\n\n spectrum = makeSpectrum(self.length, self.center, factor*self.amplitude, self.rmsSize,\n self.bgConst, self.bgSlope)\n after = fitLine(spectrum, int(self.center), int(self.rmsSize), 0, self.fittingRadius)\n\n # The amplitude and background errors doesn't change, because they are related to the variance,\n # which we aren't using.\n self.assertFloatsAlmostEqual(after.amplitudeErr, before.amplitudeErr, atol=1.0e-5)\n self.assertFloatsAlmostEqual(after.bg0Err, before.bg0Err, atol=1.0e-2)\n self.assertFloatsAlmostEqual(after.bg1Err, before.bg1Err, atol=1.0e-5)\n # The center and rmsSize errors decrease proportionally with the line flux\n # (they scale inversely with the S/N, and the noise is constant)\n self.assertFloatsAlmostEqual(after.centerErr, before.centerErr/factor, atol=1.0e-5)\n self.assertFloatsAlmostEqual(after.rmsSizeErr, before.rmsSizeErr/factor, atol=1.0e-5)", "def _get_rx_frame_error_cnt(self):\n return self.__rx_frame_error_cnt", "def error(line, data): # error function\n # sum of squared error\n err = np.sum((data[:,1]-(line[0] * data[:,0] + line[1]))** 2)\n return err", "def test_get_error_vertical(self) -> None:\n test_model = line2d.Line2D(slope=math.nan, y_int=math.nan, x_int=3)\n test_point = line2d.Point2D(1, 2)\n\n error = test_model.calc_error(point=test_point)\n\n self.assertEqual(error, 2)", "def accr_lum(L_line, tracer, L_line_err = 0*u.W):\n \n a, a_err, b, b_err = rel['a'][tracer],rel['a_err'][tracer],rel['b'][tracer],rel['b_err'][tracer]\n \n log_L_acc = b + a * log10(L_line*u.W/L_sun)\n \n L_acc = 10**log_L_acc*L_sun/u.W\n \n #error propagation\n \n #c_err = (L_line_err)/(log(10) * L_line)\n #ac_err = a * log10(L_line/L_sun) * ((a_err/a)**2 + (c_err/log10(L_line/L_sun))**2)**0.5\n #log_L_acc_err = (b_err**2 + ac_err**2)**0.5\n #L_acc_err = L_acc * log(10) * log_L_acc_err\n\n return L_acc", "def extract_line_col(self, error):\n _, _, line, column, _, _, _, _, _ = error\n return line, column" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a frame from the camera, figure out the desired speed and current line error
def compute_speed_and_line_error(current_frame, scanner, code): next_direction = get_next_direction(current_frame, scanner, code) if next_direction == 'STOP': return STOP line_error = get_line_error(current_frame) if line_error is None: return None return (2, line_error)
[ "def calc_acc_frame(velocity, step_size, frame, vel_start_frame):\n #The offset required due to the velocities starting a vel_start_frame\n acc_offset = frame - vel_start_frame + 1\n if ((acc_offset) < step_size):\n raise IndexError(\"Acceleration cannot be calculated for this frame\")\n else:\n try:\n acc = (velocity[acc_offset - 1] - velocity[acc_offset - 1 - step_size]) / step_size\n return acc\n #return round(acc,2)\n except IndexError:\n print(\"Frame or step_size out of bounds\")", "def calc_vel_frame(position, step_size, frame):\n if (frame < step_size):\n raise IndexError(\"Frame must be greater than step size\")\n else:\n try:\n vel = (position[frame - 1] - position[frame - 1 - step_size]) / step_size\n return vel\n except IndexError:\n print(\"Frame or step_size out of bounds\")", "def process(self, frame):\n guide_x = self.find_guide_pos(frame)\n # print('guide_x = ', guide_x) # DEBUG\n if guide_x is None: # can't see the track guideline\n angle = self.current_angle\n else:\n angle = (guide_x - CENTER_X) / CENTER_X\n angle *= ANGLE_EXAGGERATION # force steeper wheel angles\n\n # slow down on steeper turns\n # when the angle is zero, then go full straight speed\n # when the angle is one (either full left or full right), then go at the lower turn speed\n speed = interpolate_01(STRAIGHT_SPEED, TURN_SPEED, 1-abs(angle))\n\n self.current_speed = speed\n self.current_angle = angle\n return speed, angle", "def computeSteeringAngle(frame, laneLines):\n if len(laneLines) == 0:\n print('No lane lines detected, do nothing')\n #MAKE CAR STOP?\n return -90\n\n # Get middle line in case of detecting single lane\n height, width, _ = frame.shape\n if len(laneLines) == 1:\n print('Only detected one lane line, just follow it. ', laneLines[0])\n x1, _, x2, _ = laneLines[0][0]\n x_offset = x2 - x1\n else: # get middle line in case of detecting two lanes\n _, _, left_x2, _ = laneLines[0][0]\n _, _, right_x2, _ = laneLines[1][0]\n cameraMidOffsetPercent = 0.00 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right\n mid = int(width / 2 * (1 + cameraMidOffsetPercent))\n x_offset = (left_x2 + right_x2) / 2 - mid\n\n # find the steering angle, which is angle between navigation direction to end of center line\n y_offset = int(height / 2)\n\n angleToMidRadian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line\n angleToMidDeg = int(angleToMidRadian * 180.0 / math.pi) # angle (in degrees) to center vertical line\n steeringAngle = angleToMidDeg + 90 # this is the steering angle needed by picar front wheel\n\n print('new steering angle: ', steeringAngle)\n return steeringAngle", "def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road", "def compute_steering_angle(frame, lane_lines):\r\n if len(lane_lines) == 0:\r\n print(\"No lane lines detected, do nothing\")\r\n return -90\r\n\r\n height, width, _ = frame.shape\r\n if len(lane_lines) == 1:\r\n print(\"Only detected one lane line, just follow it\", lane_lines[0])\r\n x1, _, x2, _ = lane_lines[0][0]\r\n x_offset = x2 - x1\r\n else:\r\n left_x2= lane_lines[0][0][2]\r\n right_x2 = lane_lines[1][0][2]\r\n camera_mid_offset_percent = 0.02 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right\r\n mid = int(width / 2 * (1 + camera_mid_offset_percent))\r\n x_offset = (left_x2 + right_x2) / 2 - mid\r\n\r\n # find the steering angle, which is angle between navigation direction to end of center line\r\n y_offset = int(height / 2)\r\n\r\n angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line\r\n angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line\r\n steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel\r\n\r\n return steering_angle", "def calc_avg_vel_frame(position, step_size, frame, avg_quantity):\n avg_disp = int(math.floor(avg_quantity / 2))\n\n if (frame < (step_size + avg_disp)):\n raise IndexError(\"Can not calculate for this frame\")\n else:\n try:\n position_avg = 0\n for i in range(frame - 1 - avg_disp, frame + avg_disp):\n position_avg += position[i]\n position_1 = position_avg / (avg_disp * 2 + 1)\n \n position_avg = 0\n for i in range(frame - 1 - avg_disp - step_size, frame + avg_disp - step_size):\n position_avg += position[i]\n position_2 = position_avg / (avg_disp * 2 + 1)\n\n vel = (position_1 - position_2) / step_size\n return vel\n #return round(vel, 2)\n except IndexError:\n print(\"Frame or step_size out of bounds\")", "def fps(self):\n\t\treturn float(len(self.buf)) / (self.buf[-1][0] - self.buf[0][0])", "def frame(step):\n\n camera = Camera('location', [0, 7, -200], 'look_at', [0, 0, 0])\n lights = [LightSource([0, -10, -60], 0.5),\n LightSource([0, -50, -60], 0.5),\n ]\n\n receptor = make_receptor([0, 0, -2], 5)\n membrane = make_membrane([0, 0, 0], 10, 5)\n tyrine = make_tyrine([0, 0, -2], 5)\n alphact_stage_one_sliced, alphact_stage_two_sliced = slice_alphact()\n\n seconds = step / 30\n if seconds < 1: # Frame 0 -> 30\n return Scene(camera,\n objects=[models.default_light] + tyrine + membrane + receptor + tyrine + lights)\n\n elif seconds < 4: # Frame 30 -> 120\n insuline_schematic = bind_schematic(step, 5)\n return Scene(camera,\n objects=[models.default_light] + tyrine + membrane + receptor + tyrine + lights + insuline_schematic)\n\n elif seconds < 6: # Frame 120 -> 180\n insuline_schematic = bind_schematic(step, 5)\n camera = move_camera(step, 60, [0, 7, -200], [-20, 20, 3], 120)\n return Scene(camera,\n objects=[models.default_light] + tyrine + membrane + receptor + tyrine + lights + insuline_schematic)\n\n elif seconds < 8: # Frame 180 -> 240\n camera = Camera('location', [0, 0, -300], 'look_at', [0, 0, 0])\n INSULIN_RECEPTOR, insulin, light = bind_insuline_complete_ectodomain(step)\n return Scene(camera,\n objects=[light] + INSULIN_RECEPTOR.povray_molecule + insulin.povray_molecule)\n\n elif seconds < 9: # Frame 240 -> 270\n camera = Camera('location', [0, 0, -300], 'look_at', [0, 0, 0])\n INSULIN_RECEPTOR, light = insulin_bonded_to_ectodomain(step)\n return Scene(camera,\n objects=[light] + INSULIN_RECEPTOR.povray_molecule)\n \n elif seconds < 11: #Frame 270 -> 330\n camera = Camera('location', [0, 0, -300], 'look_at', [0, 0, 0])\n light = LightSource([0, 0, -100], 'color', [1, 1, 1])\n alphact_stage_one_sliced_mol, insulin_alpha = alphact_conformational_change(step, alphact_stage_one_sliced, alphact_stage_two_sliced)\n return Scene(camera,\n objects=[light] + alphact_stage_one_sliced_mol.povray_molecule + insulin_alpha.povray_molecule )\n\n elif seconds < 13: #Frame 330 -> 390\n camera = Camera('location', [0, 0, -300], 'look_at', [0, 0, 0])\n light = LightSource([0, 0, -100], 'color', [1, 1, 1])\n alphact_stage_two_sliced_mol, insulin_alpha = alphains_binding_alphact(step, alphact_stage_two_sliced)\n return Scene(camera,\n objects=[light] + alphact_stage_two_sliced_mol.povray_molecule + insulin_alpha.povray_molecule )\n \n elif seconds < 14: #Frame 390 -> 420\n camera = Camera('location', [0, 0, -300], 'look_at', [0, 0, 0])\n light = LightSource([0, 0, -100], 'color', [1, 1, 1])\n alphact_complex_insulinalpha_mol = alphains_bonded_to_alphact(step, alphact_stage_two_sliced)\n return Scene(camera,\n objects=[light] + alphact_complex_insulinalpha_mol.povray_molecule )\n\n elif seconds < 16: #Frame 420 -> 480\n if seconds < 14.7: #Frame 420 -> 441\n camera = move_camera(step, 21, [0, 0, -300], [0, 0, 3], 420)\n light = LightSource([0, 0, -100], 'color', [1, 1, 1])\n alphact_complex_insulinalpha_mol = alphains_bonded_to_alphact(step, alphact_stage_two_sliced)\n return Scene(camera,\n objects=[light] + alphact_complex_insulinalpha_mol.povray_molecule)\n else: #Frame 441 -> 480\n camera = move_camera(step, 39, [0, 0, 3], [0, 7, -200], 441)\n insuline_schematic = bind_schematic(step, 5)\n return Scene(camera,\n objects=[models.default_light] + tyrine + membrane + receptor + tyrine + lights + insuline_schematic)\n \n elif seconds < 19: #Frame 480 -> 570\n insuline_schematic = bind_schematic(step, 5)\n phosphorus = bind_phosphorus(step, 5)\n return Scene(camera,\n objects=[models.default_light] + tyrine + membrane + receptor + tyrine + lights + insuline_schematic + phosphorus)\n \n elif seconds < 21: #Frame 570 -> 630\n insuline_schematic = bind_schematic(step, 5)\n phosphorus = bind_phosphorus(step, 5)\n IRS = bind_IRS(step, 5)\n return Scene(camera,\n objects=[models.default_light] + tyrine + membrane + receptor + tyrine + lights + insuline_schematic + phosphorus + IRS)\n\n insuline_schematic = bind_schematic(step, 5)\n phosphorus = bind_phosphorus(step, 5)\n IRS = bind_IRS(step, 5)\n return Scene(camera,\n objects=[models.default_light] + tyrine + membrane + receptor + tyrine + lights + insuline_schematic + phosphorus + IRS)", "def calculate_fps(self):\n time_difference = self.time_array[-1] - self.time_array[0]\n time_difference_in_seconds = time_difference.to_sec()\n if time_difference_in_seconds == 0:\n pass\n self.fps = self.buffer_size / time_difference_in_seconds\n rospy.loginfo(\"[EulerianMotionMagnification] Estimated FPS: \" + str(self.fps) + \" (Measured timespan: \" + str(time_difference_in_seconds) + \"s)\")\n rospy.loginfo(\"[EulerianMotionMagnification] Video array length: \" + str(len(self.video_array)))", "def frame(step):\n\n curr_time = step / eval(SETTINGS.NumberFrames) * eval(SETTINGS.FrameTime)\n logger.info(\" @Time: %.3fs, Step: %d\", curr_time, step)\n\n nframes = eval(SETTINGS.NumberFrames)\n\n style = Texture(Pigment('color', [0.80, 0.00, 1.00], 'filter', 0.7),\n Finish('phong', 0.6, 'reflection', 0.4))\n\n cylinder = Cylinder([-6, -1, 4], [-6, 7, 4], 3, style)\n sphere = Sphere([6, 2, -2], 3, style)\n leg = legend([-15, 0, 0], 5)\n radius = 25\n z_start = 0\n x_start = 0\n\n alpha = (-pi/2) + (step * 2 * pi / nframes)\n # For each step, de difference in the x and z positions is equal to the radius time the sin and cos of alpha.\n x_coord = radius * cos(alpha)\n z_coord = radius * sin(alpha)\n # Adding or subtracting the difference of the position for each step from the original camera position.\n x = x_start + x_coord\n z = z_start - z_coord\n return Scene(Camera('location', [x, 8, z], 'look_at', [0, 0, 0]),\n objects=[models.checkered_ground, models.default_light, cylinder, sphere] + leg)", "def get_speed(self) -> float: \r\n if self.distance < self.distance_stop:\r\n print(\"STOP: Obstacle detected ({} cm)\".format(self.distance))\r\n return 0\r\n elif self.distance < self.distance_slow: \r\n return self.speed * 0.8\r\n else:\r\n return self.speed", "def get_speed(self):\r\n\t\tvel = self.vehicle.get_velocity()\r\n\t\treturn 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "def test_h_decreases_speed_in_direction_opposite_to_banana(self):\n # using y and h keys basically let you switch from 2.3 to 2.4\n # in a continuous fashion. Works only from 2.3\n training = 2.3\n self.tb.set_level_variables(training)\n self.tb.restart_bananas()\n # check initial speed\n #print('wrong speed', self.tb.wrong_speed)\n # first two frames get messed up for timing, so go two steps\n #print self.tb.free_move\n taskMgr.step()\n taskMgr.step()\n messenger.send('x_axis', [2 * -self.tb.multiplier])\n camera_h = self.tb.base.camera.getH()\n #print camera_h\n # go a few steps, see how long it takes\n start = time.time()\n for i in range(30):\n #print self.tb.x_mag\n #print self.tb.speed\n taskMgr.step()\n first_time = time.time() - start\n #print('time', first_time)\n first_dist = camera_h - self.tb.base.camera.getH()\n #print('dist', first_dist)\n first_speed = abs(first_dist/first_time)\n # now change speed\n messenger.send('h')\n # have to reset for it to go into effect\n self.tb.restart_bananas()\n #print('wrong speed', self.tb.wrong_speed)\n taskMgr.step()\n taskMgr.step()\n messenger.send('x_axis', [2 * -self.tb.multiplier])\n avatar_h = self.tb.base.camera.getH()\n #print avatar_h\n start = time.time()\n for i in range(30):\n #print self.tb.x_mag\n #print self.tb.speed\n taskMgr.step()\n second_time = time.time() - start\n #print('time', second_time)\n #print self.tb.base.camera.getH()\n second_dist = avatar_h - self.tb.base.camera.getH()\n #print('dist', second_dist)\n second_speed = abs(second_dist / second_time)\n #print('first', first_speed)\n #print('second', second_speed)\n self.assertTrue(first_speed > second_speed)", "def calculate_framerate(self):\n # The \"StatFrameRate\" attribute always reads 0.0.\n # Called preiodically from \"resume\".\n from numpy import argsort,array as a,nan\n if len(self.Frames) < 2: return nan\n # Find the last two image based on their frame count.\n counts = a([self.Frames[i].frame.FrameCount for i in range(0,len(self.Frames))])\n times = a([self.frame_timestamp(i) for i in range(0,len(self.Frames))])\n order = argsort(counts)\n count1,count2 = counts[order][-2:]\n time1,time2 = times[order][-2:]\n if count1 == 0 or count2 == 0: return nan # not enough valid images.\n # Calculate the frame rate based on the last two images.\n if time2 == time1: return nan \n self.framerate = (count2-count1)/(time2-time1)", "def get_incremental_distance():\n return current_speed/float(FPS)", "def ComputeLightTravelTime(Det1Pos, Det2Pos):\n\n # Get relative position vector\n Det21Pos = Det2Pos - Det1Pos\n \n # Dot difference vector into itself to get magnitude of detector separation\n dist = np.sqrt(np.dot(Det21Pos,Det21Pos))\n\n # Normalise with speed of light\n travelTime = dist/c\n\n return travelTime", "def get_linear_track_error(self):\r\n return self._arm.get_linear_track_error()", "def timeForOneFrame(self):\n return self.timeLineTimeDuration/self.timeLineNumberOfFrames" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a Student instance with given name and the number of scores to associate with the given Student.
def __init__(self, name: str, number: float): self._name = name self._scores = [0] * number
[ "def __init__(self, name, surname):\n\t\t\n\t\tself.grades = {}\n\t\tself.attendance = 0\n\t\t\n\t\tif not (isinstance(name, str) and isinstance(surname, str)):\n\t\t\tname, surname = \"None\", \"None\"\n\t\tself.name, self.surname = name, surname", "def __init__(self, name, skill):\n \n super(Student, self).__init__(name)\n self.grades = []\n self.skill = skill", "def __init__(self, name, grade):\n self.student_info = {'name': name, 'grade': grade}", "def __init__(self, name, grade):\n self.student_info = [name, grade]", "def __init__(self, name: str):\n assert name in Card._map_names_to_scores\n self.name = name\n self.value = self._map_names_to_scores[name]", "def __init__(self, student_id, student_name, grad_year, classes_taken):\n self.student_name = student_name\n self.student_id = student_id\n self.grad_year = grad_year\n self.classes_taken = classes_taken", "def __init__(self, student, exam):\n\n self.student = student\n self.exam = exam\n self.score = None", "def number_of_students(self, number_of_students):\n\n self._number_of_students = number_of_students", "def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()", "def __init__(self, name, ssn, address=\"\"):\n self.name = name\n self._ssn = ssn\n self.set_address(address)", "def __init__(self, \n student_id=0,\n # - Arguments from Person\n given_name=None, initials=None, family_name=None, \n email_address=None,\n # - Other student-specific arguments\n major=None, minor=None\n ):\n # - We can use super() to call the parent class' __init__ \n # because there's only one parent class...\n super().__init__(\n given_name, initials, family_name, email_address\n )\n # - But we ALSO need to initialize properties that are \n # members of THIS class\n self.student_id = student_id\n self.major = major\n self.minor = minor", "def add_student(student_name):\n students = load_students()\n students.append({'name': student_name, 'words': [], 'review words': [], 'reading strategy': [], 'books' : [], 'group': \"\"})\n print \"{} has been added\".format(student_name)\n save_students(students)", "def __init__(self,student_id,lname,fname, major='Computer Science',gpa='0.0'):\n super().__init__(lname,fname) # Call init on parent class\n self._student_id = student_id\n self._major = major\n self._gpa = gpa", "def add_student_to_course(self, course_name):\n self._course_student[course_name] += 1", "def _create_students(self, num_students):\r\n return [self.create_student('robot%d' % i) for i in xrange(num_students)]", "def test_student_init(self):\n\n student1 = Students('10440989', 'Mrunal, S', 'SFEN')\n\n self.assertEqual(student1.name, 'Mrunal, S')\n self.assertEqual(student1.major, 'SFEN')\n self.assertEqual(student1.cwid, '10440989')\n self.assertEqual(type(student1.courses), type(defaultdict()))\n self.assertEqual(type(student1.remaining_required), type(list()))\n self.assertEqual(type(student1.remaining_electives), type(list()))\n \n student2 = Students('10345678', 'Anirudha, P', 'CS')\n self.assertEqual(student2.name, 'Anirudha, P')\n self.assertEqual(student2.major, 'CS')\n self.assertEqual(student2.cwid, '10345678')\n self.assertEqual(type(student2.courses), type(defaultdict()))\n self.assertEqual(type(student2.remaining_required), type(list()))\n self.assertEqual(type(student2.remaining_electives), type(list()))", "def __init__(self, N, S, students, leaders):\n self.N = N\n self.S = S\n self.G = int(math.ceil(N/S))\n self.partitions = []\n self.students = students\n self.leaders = leaders", "def __init__(self):\n self.students = {}", "def _create_students_with_state(self, num_students, state=None, grade=0, max_grade=1):\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n students = [\r\n UserFactory.create(username='robot%d' % i, email='robot+test+%d@edx.org' % i)\r\n for i in xrange(num_students)\r\n ]\r\n for student in students:\r\n CourseEnrollmentFactory.create(course_id=self.course.id, user=student)\r\n StudentModuleFactory.create(course_id=self.course.id,\r\n module_state_key=self.location,\r\n student=student,\r\n grade=grade,\r\n max_grade=max_grade,\r\n state=state)\r\n return students" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the average score associated with this Student.
def get_average(self) -> float: return sum(self._scores) / len(self._scores)
[ "def avg_score(self):\r\n return np.average(self.scores)", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def average_scorecons(self):\n return statistics.mean(self.scores)", "def get_avg_score(self):\n return self.evaluations.aggregate(Avg('score'))", "def average_grade(self):\n count = len(self._subjects)\n total = sum(e.average_score() for e in self._subjects.values())\n return total / count", "def calc_average_score(self):\n all_parameters = [self.temp_score, self.wind_score, self.precipitation_score]\n return sum(all_parameters) / len(all_parameters)", "def get_average(student):\n return (average(student[\"homework\"]) * 0.10 + average(student[\"quizzes\"]) * 0.30 + average(student[\"tests\"]) * 0.60 )", "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n total_rating = 0\n for rating in ratings:\n total_rating += rating.score\n\n average = 0\n if (len(ratings)):\n average = total_rating / len(ratings)\n return average", "def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average", "def average(self):\n\t\treturn sum(self) / len(self)", "def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average", "def calc_mean_score(movies):\r\n return round(sum([movie.score for movie in movies]) / len(movies), 1)", "def calc_mean_score(movies):\n return round(sum([movie.score for movie in movies]) / len(movies), 1)", "def get_mean(self):\n for grade in self.grade_list:\n self.total = self.total + grade\n self.mean = self.total / len(self.grade_list)", "def averaged_risk(self):\n return self._averaged_risk", "def average_grade(self):\n if not self.exam_results or not self._contains_graded_courses():\n return None\n return round(self._raw_average_grade(), 1)", "def get_avg_score(stu_obj, cls_obj):\r\n # 获取该班级的所有上课记录\r\n course_record_list = models.CourseRecord.objects.filter(class_grade=cls_obj)\r\n stu_study_score_list = models.StudyRecord.objects.filter(\r\n student=stu_obj, course_record__in=course_record_list\r\n ).values('score')\r\n\r\n score_sum = sum(map(lambda x: x['score'], stu_study_score_list))\r\n return '%.2f' %(score_sum/len(course_record_list), )", "def calc_mean_score(movies: List[Movie]) -> float:\n return round(sum([m.score for m in movies]) / len(movies), 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor creates a number with the given numerator and denominator and reduces it to lowest terms.
def __init__(self, numerator: int, denominator: int): self._numerator = numerator self._denominator = denominator self._reduce()
[ "def __init__(self, numerator, denominator=1):\n gcd1 = math.gcd(numerator, denominator)\n\n if denominator < 0:\n self.numerator = -(int(numerator/gcd1))\n self.denominator = abs(int(denominator/gcd1))\n elif denominator == 0:\n self.numerator = 1\n self.denominator = 0\n else:\n self.numerator = int(numerator/gcd1)\n self.denominator = int(denominator/gcd1)", "def __init__(self, numerator, denominator=1):\n self.numerator = numerator\n self.denominator = denominator\n if self.denominator is not 0 :\n gcd = math.gcd(self.numerator,self.denominator)\n self.numerator = self.numerator/gcd\n self.denominator = self.denominator/gcd", "def __init__(self, numerator=0, denominator=1):\n if isinstance(numerator, Rational):\n denominator *= numerator.getDenominator()\n numerator = numerator.getNumerator()\n if isinstance(denominator,Rational):\n numerator *= denominator.getDenominator()\n denominator = denominator.getNumerator()\n \n if isinstance(numerator, (int,long)) and isinstance(denominator, (int,long)):\n if denominator == 0: # fraction is undefined\n self._numer = 0\n self._denom = 0\n else:\n factor = gcd( abs(numerator), abs(denominator) )\n if denominator < 0: # want to divide through by negated factor\n factor = -factor\n self._numer = numerator // factor\n self._denom = denominator // factor\n else:\n raise TypeError('numerator and denominator for rational must be integral.')", "def __init__(self, numerator, denominator):\n if denominator == 0:\n raise ValueError(\"Denominator must be non-zero\")\n gcd = self.__gcd(numerator, denominator)\n self.numerator = numerator / gcd\n self.denominator = denominator / gcd", "def __init__ (self,numerator,denominator=1):\n self.debug = False\n if (self.debug): print(f'enter fraction.__init__ with {numerator}, {denominator}')\n sign = int(numerator * denominator / abs(numerator * denominator))\n if (self.debug): print(f'enter sign is {sign}')\n self.value=(sign * abs(numerator),abs(denominator))\n self.simplify()", "def __init__(self, numerator, denominator=1, deadtime=0):\n self.numerator = numpy.poly1d(numerator)\n self.denominator = numpy.poly1d(denominator)\n self.simplify()\n self.deadtime = deadtime", "def __init__(self, numerator, denominator):\n \n # isinstance is a standard function which can be used to check if\n # a value is an object of a certain class. Remember, in Python\n # all the data types are implemented as classes.\n # ``isinstance(a, b´´) means more or less the same as ``type(a) is b´´\n # So, the following test checks that both parameters are ints as\n # they should be in a valid fraction.\n if not isinstance(numerator, int) or not isinstance(denominator, int):\n raise TypeError\n \n # Denominator can't be zero, not in mathematics, and not here either.\n elif denominator == 0:\n raise ValueError\n \n self.__numerator = numerator\n self.__denominator = denominator", "def __reduce(self):\n if self.denominator <0:\n self.denominator *= -1\n self.numerator *= -1\n gcd = math.gcd(int(self.denominator),int(self.numerator))\n if self.denominator != 0 and self.numerator!= 0:\n if gcd > 0:\n self.denominator /= gcd\n self.numerator /= gcd\n self.numerator = int(self.numerator)\n self.denominator = int(self.denominator)", "def __make_denominator_integer(self):\n while self.denominator % 1 !=0:\n self.denominator *=10\n self.numerator *=10", "def __init__(self, num, denom):\n assert type(num) == int and type(denom) == int, \"ints not used\"\n self.num = num\n self.denom = denom\n def simplify(x, y):\n \"\"\" Simplifies a fraction \"\"\"\n if x % 2 > 0:\n if y % x > 0:\n # Check Prime\n prime = check_prime(x, y)\n if prime == 0:\n return str(int(x)) + \"/\" + str(int(y))\n else:\n return simplify ((x / prime), (y / prime))\n else:\n return str(int(x/x)) + \"/\" + str(int(y/x))\n else:\n return simplify ((x / 2), (y / 2))\n def check_prime(x, y):\n \"\"\" Function used by simplify to check prime number division of num and denom \"\"\"\n pri = (3,5,7,11,13,17,19,23)\n for i in pri:\n if (x % i == 0) and (y % i == 0):\n return i\n return 0", "def __new__(cls, numerator=0, denominator=None, _normalize=True):\n self = super(Fraction, cls).__new__(cls)\n\n if denominator is None:\n if type(numerator) is int:\n self._numerator = numerator\n self._denominator = 1\n return self\n\n elif isinstance(numerator, numbers.Rational):\n self._numerator = numerator.numerator\n self._denominator = numerator.denominator\n return self\n\n elif isinstance(numerator, float):\n # Exact conversion from float\n value = Fraction.from_float(numerator)\n self._numerator = value._numerator\n self._denominator = value._denominator\n return self\n\n elif isinstance(numerator, Decimal):\n value = Fraction.from_decimal(numerator)\n self._numerator = value._numerator\n self._denominator = value._denominator\n return self\n\n elif isinstance(numerator, str):\n # Handle construction from strings.\n m = _RATIONAL_FORMAT.match(numerator)\n if m is None:\n raise ValueError('Invalid literal for Fraction: %r' %\n numerator)\n numerator = int(m.group('num') or '0')\n denom = m.group('denom')\n if denom:\n denominator = int(denom)\n else:\n denominator = 1\n decimal = m.group('decimal')\n if decimal:\n scale = 10**len(decimal)\n numerator = numerator * scale + int(decimal)\n denominator *= scale\n exp = m.group('exp')\n if exp:\n exp = int(exp)\n if exp >= 0:\n numerator *= 10**exp\n else:\n denominator *= 10**-exp\n if m.group('sign') == '-':\n numerator = -numerator\n\n else:\n raise TypeError(\"argument should be a string \"\n \"or a Rational instance\")\n\n elif type(numerator) is int is type(denominator):\n pass # *very* normal case\n\n elif (isinstance(numerator, numbers.Rational) and\n isinstance(denominator, numbers.Rational)):\n numerator, denominator = (\n numerator.numerator * denominator.denominator,\n denominator.numerator * numerator.denominator\n )\n else:\n raise TypeError(\"both arguments should be \"\n \"Rational instances\")\n\n if denominator == 0:\n raise ZeroDivisionError('Fraction(%s, 0)' % numerator)\n if _normalize:\n if type(numerator) is int is type(denominator):\n # *very* normal case\n g = math.gcd(numerator, denominator)\n if denominator < 0:\n g = -g\n else:\n g = _gcd(numerator, denominator)\n numerator //= g\n denominator //= g\n self._numerator = numerator\n self._denominator = denominator\n return self", "def __init__(self, numerator, denominator):\n self.num = numerator\n self.den = denominator\n\n\n if self.den == 0:\n raise ValueError(\"Error! The denominator of a fraction cannot be zero!\")", "def setNumerator(self , numerator):\n self.numerator = int(numerator)\n self.result = \"(\" + str(self.numerator) + \"/\" + str(self.denominator) + \")\"", "def simplify(self):\n\n if self.numerator == 0:\n self.denominator = 1\n if self.numerator < 0 and self.denominator < 0:\n self.numerator, self.denominator = -self.numerator, -self.denominator\n else:\n gcd_simple = gcd(self.numerator, self.denominator)\n\n self.numerator //= gcd_simple\n self.denominator //= gcd_simple", "def reduce(self):\n import math\n g = math.gcd(self.num, self.den)\n return Fraction(self.num//g, self.den//g)", "def __div__(self, other):\n if isinstance(other,(int,long)):\n other = Rational(other)\n return Rational(self._numer * other._denom, self._denom * other._numer)", "def reduce(self):\n # assign absolute value of numerator and denominator to a new variable\n abs_num = abs(self.num)\n abs_denom = abs(self.denom)\n\n # get a gcd\n GCD = 1\n i = 1\n while i <= min(abs_num, abs_denom):\n if (abs_num % i == 0) and (abs_denom % i == 0):\n GCD = i\n i = i + 1\n if self.num < 0 and self.denom < 0:\n self.num = abs_num // GCD\n self.denom = abs_denom // GCD\n else:\n self.num = self.num // GCD\n self.denom = self.denom // GCD", "def reduce_rational(self):\n print('in reduce')\n # find the gcd and then divide numerator and denominator by gcd\n the_gcd = gcd(self.numer,self.denom)\n return Rational(self.numer//the_gcd, self.denom//the_gcd)", "def __init__( self, *arg ):\r\n\t\tif ( len( arg ) == 2 ):\r\n\t\t\t# if one of the arguments is a fraction:\r\n\t\t\tif ( type( self ) in ( type( arg[ 0 ] ), type( arg[ 1 ] ) ) ):\r\n\t\t\t\t# future division is broken, so simple division doesn't work here.\r\n\t\t\t\tif ( type( self ) == type( arg[ 0 ] ) == type( arg[ 1 ] ) ):\r\n\t\t\t\t\t self.numerator = arg[ 0 ].numerator * arg[ 1 ].denominator\r\n\t\t\t\t\t self.denominator = arg[ 0 ].denominator * arg[ 1 ].numerator\r\n\t\t\t\telif ( type( self ) == type( arg[ 0 ] ) ):\r\n\t\t\t\t\tself.numerator = arg[ 0 ].numerator\r\n\t\t\t\t\tself.denominator = arg[ 0 ].denominator * arg[ 1 ]\r\n\t\t\t\telse: #( type( self ) == type( arg[ 1 ] ) ):\r\n\t\t\t\t\tself.numerator = arg[ 0 ] * arg[ 1 ].denominator\r\n\t\t\t\t\tself.denominator = arg[ 1 ].numerator\r\n\t\t\t\t\t\r\n\t\t\telif ( type( arg[ 0 ] ) in FRACTION_VALID_TYPES ) and ( type( arg[ 1 ] ) in FRACTION_VALID_TYPES ):\r\n\t\t\t\tself.numerator = arg[ 0 ]\r\n\t\t\t\tif ( arg[ 1 ] ):\r\n\t\t\t\t\tself.denominator = arg[ 1 ]\r\n\t\t\t\telse:\r\n\t\t\t\t\traise ZeroDivisionError( \"Denominator of a fraction cannot be 0\" )\r\n\t\t\telse:\r\n\t\t\t\traise TypeError( \"Invalid type for Fraction Constructor\" )\r\n\r\n\t\telif ( len( arg ) == 1 ):\r\n\t\t\tif ( type( arg[ 0 ] ) in FRACTION_VALID_TYPES ):\r\n\t\t\t\t self.numerator = arg[ 0 ]\r\n\t\t\t\t self.denominator = 1\r\n\t\t\telif ( type( arg[ 0 ] ) == type( self ) ): # if the argument is a fraction, copy it.\r\n\t\t\t\tself.numerator = arg[ 0 ].numerator\r\n\t\t\t\tself.denominator = arg[ 0 ].denominator\r\n\t\t\telse:\r\n\t\t\t\ttry: # check to see if the object has a __fraction__ method that returns a fraction. If not, raise an error.\r\n\t\t\t\t\tf = arg[ 0 ].__fraction__( )\r\n\t\t\t\texcept AttributeError:\r\n\t\t\t\t\traise TypeError( \"Invalid type for fraction constructor\" )\r\n\t\t\t\tif ( type( f ) == type( self ) ):\r\n\t\t\t\t\tself = f\r\n\t\t\t\telse:\r\n\t\t\t\t\traise TypeError( \"__fraction__( ) method returns incorrect data type for fraction constructor\" )\r\n\t\telif not len( arg ):\r\n\t\t\tself.numerator = 0\r\n\t\t\tself.denominator = 1\r\n\t\telse:\r\n\t\t\traise TypeError( \"fraction constructor takes at most 2 arguments (%d given)\" % len( arg ) )\r\n\r\n\t\t#eliminate any float values, we don't need floats in a fraction.\r\n\t\tif ( types.FloatType in ( type( self.numerator ), type( self.denominator ) ) ):\r\n\t\t\tself.numerator, self.denominator = self._noFloats( self.numerator, self.denominator )\r\n\t\t\t\r\n\t\tself._reduce( )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current pin.
def get_pin(self) -> str: return self._pin
[ "def getPin(self):\n\t\treturn self.pin", "def getPin(self):\r\n return self.pin", "def pin(self):\n return self.__pin", "def pin(self):\n return self._pin_num", "def pin(self) -> int:", "def pin_name(self):\n return self.__pinName", "async def get_pin_thread(self) -> int:\n return await self.AD.threading.get_pin_thread(self.name)", "def as_pin_in( self ) -> \"pin_in\":\n return self._as_pin_in( self )", "def read_pin(self, pin):\n tmp = self.read()\n return (tmp >> pin) & 0x1", "async def get_app_pin(self) -> bool:\n return await self.AD.threading.get_app_pin(self.name)", "def _getCurrentPoint(self):\n return self.__currentPoint", "def GetPinCode(self):\n result = self.SerialSendReceive(self.CMD_GET_PIN_CODE,\n msg='getting pin code')\n return result", "def get_location(self):\n\t\treturn self.current_coordinate", "def get(self, pin):\n\t\treturn self.accounts.get(pin, None)", "def current_address(self):\n\t\treturn core.BNLowLevelILGetCurrentAddress(self.handle)", "def pin_state(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n value = gpio.input(port_num)\n return value", "def current_instruction(self):\n return self.instruction_at(self.pc)", "def getIP(self):\n\treturn self.ipEntry.get()", "def read(self,pin):\n\t\treturn GPIO.input(pin)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the amount is valid, subtract it from the balance and returns None; otherwise, returns an error message.
def withdraw(self, amount): if amount < 0: return "Amount must be >= 0" elif self._balance < amount: return "Insufficient funds" else: self._balance -= amount return None
[ "def test_fail_balance_negative(self):\n self.bundle.transactions[3].value -= 1\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Bundle has invalid balance (expected 0, actual -1).',\n ],\n )", "def negative_balance_error(root_in: str):\r\n\r\n import pandas as pd\r\n \r\n transactions = pd.read_excel(root_in)\r\n\r\n income = transactions[transactions['category'] == 'charge']['cost for each unit'].sum()\r\n expenses = transactions[transactions['category'] != 'charge']['cost for each unit'].sum()\r\n total_balance = income - expenses\r\n \r\n if total_balance >= 0:\r\n \r\n return ('Your total balance is' + str(total_balance)+ ', All expenses have been paid.')\r\n \r\n else:\r\n \r\n return ('Warning! You have negative total balance. You need at least' + str(abs(total_balance)))", "def test_clean_amount_insufficient_balance(self):\n # This test returns a user who have a credit of 100.00, so we will try\n # here to pass that.\n self.test_data['amount'] = 200.0\n self.form.cleaned_data = self.test_data\n\n self.assertRaisesMessage(\n forms.ValidationError,\n _(u'You do not have sufficient credit to complete the payment'),\n self.form.clean_amount\n )", "def check_amount_validity(self, amount):\r\n\r\n alert = \"Not a valid amount. Please try again!\"\r\n\r\n if type(amount) == int or type(amount) == float:\r\n return amount\r\n else:\r\n return alert", "def check_balance(self):\n if self.amount > self.account.balance:\n raise InsuficientFunds", "def deposit(self, amount):\n message = self.account.deposit(float(amount))\n if message:\n return message\n else:\n self.myView.displayAccount()\n return \"success\"", "def test_fail_balance_positive(self):\n self.bundle.transactions[0].value += 1\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Bundle has invalid balance (expected 0, actual 1).',\n ],\n )", "def check_balance(self, data):\n remain = sum([pack['amt_eur'] * OPERATIONS_DIRECTION[pack['method']] for pack in self.all_packages\n if pack['account'] == data['account'] and\n data['method'] in ['transfer', 'deposit', 'withdrawal']])\n if remain + OPERATIONS_DIRECTION[data['method']] * data['amt_eur'] < 0:\n raise OperationException('You havent money to perform this operation')", "def clean_amount(self):\n\n amount = self.cleaned_data[\"amount\"]\n listing_obj = Auction.objects.get(pk=self.listing)\n\n if listing_obj.auction_bids.last() == None:\n if amount < listing_obj.starting_bid:\n raise ValidationError(\"Error: Bid is lower than the starting bid.\")\n else:\n if amount <= listing_obj.auction_bids.last().amount:\n raise ValidationError(\"Error: Bid must be higher than the previous bids.\")\n\n return amount", "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def withdraw(self, amount: int) -> int:\n if self.balance - amount >= 0:\n self.balance = self.balance - amount\n self.transactions.append(f'Withdrew from Account: -{amount}')\n return self.balance\n pass", "def HandleWithdrawl(balance):\r\n WithdrawMoney= input(\"Enter amount to withdrawl : \")\r\n while not WithdrawMoney.isdigit() or int(WithdrawMoney) <= 0 or int(WithdrawMoney)%10 != 0:\r\n print(\"Withdrawl amount must be only numbers and needs to be multiplication of 10. For Eg: 10,20,30 etc.\")\r\n WithdrawMoney= input(\"Enter amount to withdrawl : \") \r\n WithdrawMoney = int(WithdrawMoney)\r\n if WithdrawMoney <= balance:\r\n balance -= WithdrawMoney\r\n\r\n else:\r\n print(\"\\n{}{:.2f}\\n\".format(\"Overdraft is not allowed.Your Balance is $\",balanceNow))\r\n \r\n return balance", "def test_payment_neg_amount(self):\n data = {\n \"from_account\": self.bob_account.id,\n \"to_account\": self.bob_account.id,\n \"amount\": -100\n }\n resp = self.client.post(self.url, data=data, format=\"json\")\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n resp_data = resp.data\n self.assertIn(\"non_field_errors\", resp_data)\n self.assertEqual(len(resp_data[\"non_field_errors\"]), 1)\n self.assertEqual(\n resp_data[\"non_field_errors\"][0], \"Amount must be positive value\")", "def validate_deposit(amt):\n \n if type(amt) == float and amt > 0:\n return True\n else:\n return False", "def withdraw(self,amount):\n self.amount = amount\n self.balance -= self.amount\n\n if self.balance <= 0:\n print(f\"Balance: {self.balance}\")\n self.balance -= 10\n return \"Insufficient funds. Balance Charged $10.\\n\"\n else:\n print(f\"Amount Withdrawn: ${self.amount}.\")\n return \"\\n\"", "def validate(self):\n if self.amount > 0:\n return True\n return False", "def withdraw(self, amount):\n\n if self.balance < Decimal('0.00'):\n raise ValueError('You can not overdraft your account')\n\n # if withdraw is less than balance, subtract amount from balance\n self.balance -= amount", "def withdraw(self, amount):\n if amount > self.balance:\n raise ValueError('insufficient funds to withdraw $%.2f' % amount)\n self.balance -= amount\n return self.balance", "def check_withdrawal(self, amount: int) -> bool:\n if self.balance - amount >= 0:\n self.transactions.append(f'Checked Withdrawal: -{amount} from {self.balance}')\n return True\n else:\n self.transactions.append(f'Checked Withdrawal: -{amount} from {self.balance}')\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes, deposits, and returns the interest.
def compute_interest(self) -> float: interest = self._balance * SavingsAccount.RATE self.deposit(interest) return interest
[ "def computeInterest(self):\r\n interest = self.balance * SavingsAccount.RATE\r\n self.deposit(interest)\r\n return interest", "def computeInterest(self):\n\t\tinterest = self.balance * savingAccount.RATE\n\t\tself.deposit(interest)\n\t\treturn interest", "def computeInterest(self):\r\n total = 0.0\r\n for account in self.accounts.values():\r\n total += account.computeInterest()\r\n return total", "def computeInterest(self):\n\t\ttotal = 0.0\n\t\tfor account in self.accounts.values(:\n\t\t\ttotal += account.computeInterest()\n\t\treturn total", "def interest(self):\n return self._interest", "def interest_rate(self):\n return 0.00299", "def total_interest(self):\n return sum(self.table[\"interest\"])", "def calcMonthlyInterest(self):\n imensual=self.ianual/12.\n self.deposit(imensual*self.balance)", "def calcInterest(principal, apr, numOfDays):\n return principal * ((apr/100.00) / 365.00 ) * numOfDays;", "def investment(principal, interest):\r\n while True:\r\n principal *= (1 + interest)\r\n yield principal", "def __calculate_monthly_interest(self):\n return self.__percentage_interest / 12", "def effecticeInterestRate():\n rate = float(input(\"What is your interest rate:\\n\"))\n compound = int(input(\"How many times in a year you give interest:\\n\"))\n\n EIR = (1 + ((rate/100)/compound))**compound - 1\n eir = EIR*100\n return \"Your effective interest rate is: %.3f\" % eir", "def nominalInterest(n, cont, PV, PMT, FV, CF, PF, x):\n\n if PMT == 0:\n ieff = (FV / PV) ** (1.0 / n) - 1.0\n else:\n \"\"\"Solve iteratively for ieff using Newton-Raphson method\"\"\"\n \"\"\" Calculate initial guess \"\"\"\n if PMT * FV >= 0:\n icurr = fabs((n * PMT + PV + FV) / (n * PV))\n else:\n if PV == 0:\n icurr = fabs((FV + n * PMT) /\n 3 * (PMT * (n - 1) ** 2 + PV - FV))\n else:\n icurr = fabs((FV - n * PMT) / 3 *\n (PMT * (n - 1) ** 2 + PV - FV))\n\n \"\"\"Calculate using Newton-Raphson method\"\"\"\n while True:\n diff = equation(n, icurr, cont, PV, PMT, FV, CF, PF, x)\\\n / derivative(n, icurr, cont, PV, PMT, FV, CF, PF, x)\n inext = icurr - diff\n\n if fabs(diff) < 10 ** -10:\n break\n else:\n icurr = inext\n\n ieff = inext\n\n \"\"\"Calculate nominal interest rate\"\"\"\n i = toNominalRate(ieff, cont, CF, PF)\n\n return i", "def debt_servicing_cost_interest_amount(self):\n return self._debt_servicing_cost_interest_amount", "def interest_to_principle(self) -> float:\n return float(round(self.total_interest / self.total_principal * 100, 1))", "def total_interest(self, time):\r\n return self._initial * self._rate * time", "def internal_interest_rate_update(self):\n self.verify_internal(sp.unit)\n\n last_cycle = sp.as_nat(self.data.last_update_timestamp -\n sp.timestamp(0))//Constants.REFERENCE_INTEREST_UPDATE_INTERVAL\n current_cycle = sp.as_nat(\n sp.now-sp.timestamp(0))//Constants.REFERENCE_INTEREST_UPDATE_INTERVAL\n\n sp.verify(current_cycle > last_cycle, message=Errors.TOO_EARLY)\n\n price_difference = sp.local(\"price_difference\", self.data.observed_price - self.data.target_price)\n stable_token_difference = sp.min(\n abs(price_difference.value), self.data.target_price >> Constants.MAX_STABLE_TOKEN_BITSHIFT)\n normalised_stable_token_difference = (\n stable_token_difference*Constants.FX_MULTIPLIER)/self.data.target_price\n target_step = sp.local(\"target_step\", (sp.as_nat((1 << (normalised_stable_token_difference >>\n Constants.SCALING_FACTOR_ONE))-1)*Constants.PRECISION_FACTOR) >> Constants.SCALING_FACTOR_TWO)\n\n self.update_accrual(sp.unit)\n\n with sp.if_(price_difference.value > 0):\n self.data.reference_interest_rate = sp.as_nat(\n sp.max(self.data.reference_interest_rate-target_step.value, Constants.SECONDS_INTEREST_MINIMUM))\n with sp.else_():\n self.data.reference_interest_rate = sp.min(\n self.data.reference_interest_rate+target_step.value, Constants.SECONDS_INTEREST_MAXIMUM)\n\n self.data.last_update_timestamp = sp.now", "def calculate_profit(self):", "def compound_interest(principal, interest, compound_times, years_to_grow):\n # convert annual interest into a fraction form\n interest = interest / 100\n\n # calculate the amount of money in the account after the specified number of years\n result = principal * ((1 + (interest / compound_times)) ** (compound_times * years_to_grow))\n\n # round result to two decimal places\n amount_of_money = round(result, 2)\n return amount_of_money" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This request returns all the colors in an image as RGB values.
def color(self, image): response = self._send_request("color", files=dict(image=image)) return response[self._layer]['colors']
[ "def get_colors(img):\n return [ c[1] for c in img.getcolors(img.size[0]*img.size[1]) ]", "def get_colors(self, url):\n fd = urlopen(url)\n f = io.BytesIO(fd.read())\n im = Image.open(f)\n palette = im.quantize(colors=len(self.lights)).getpalette()\n return self.extract_colors(palette, len(self.lights))", "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def __retrieve_colours(self) -> list:\n\n cur = self.con.cursor()\n cur.execute('SELECT id, color FROM public.app_coloract')\n result = cur.fetchall()\n return result", "def getColors(self):\n _val = self._color\n _r = (_val & 0xff0000) >> 16\n _g = (_val & 0xff00) >> 8\n _b = (_val & 0xff)\n return _r, _g, _b", "def rgb(self):\n return [self.__r, self.__g, self.__b]", "def get_all_rgb_values(self):\n\n rgb_values = []\n response = self._table.scan()\n for item in response['Items']:\n rgb_values.append(self._convert_rgb_string_to_tuple(item['rgb_values']))\n\n return rgb_values", "def generate_rgb(image, colors):\n color_list = []\n for color in colorgram.extract(image, colors):\n rgb = color.rgb\n color_rgb = []\n for i in rgb:\n color_rgb.append(i)\n color_list.append(tuple(color_rgb))\n\n return color_list", "def get_red_pixels(image):\n red_image = {'height' : image['height'], 'width' : image['width'], 'pixels' : image['pixels'][:]}\n for x in range(image['width']):\n for y in range(image['height']):\n red_pixel = get_pixel(image, x, y)[0] #We index into each tuple to its first element, which will be the red pixel.\n set_pixel(red_image, x, y, red_pixel)\n return red_image", "def _get_colors_analysis(json, list):\n for color in json.get(\"colors\"):\n list.append(_hex_to_decimal(_rgb_to_hex(color).replace(\"#\",\"\")))", "def getPaletteInRgb(img):\n assert img.mode == 'P', \"image should be palette mode\"\n pal = img.getpalette()\n colors = chunk(pal, 3, False)\n return colors", "def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += color_px[2]\n\n r = r / (x * y)\n g = g / (x * y)\n b = b / (x * y)\n return (r, g, b)", "def rgb_colors() -> Iterator[Tuple[int, int, int]]:\n\n while True:\n rgb = randrange(0, 256**3) # nosec\n rg, b = divmod(rgb, 256)\n r, g = divmod(rg, 256)\n yield (r, g, b)", "def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]", "def colors(self):\r\n\t\treturn self._colors", "def rgb_color(self) -> tuple[int, int, int]:\n rgb = self.device.color\n return rgb[\"r\"], rgb[\"g\"], rgb[\"b\"]", "def read_rgb():\n lib.iw_rgb.turn_led_on()\n time.sleep(2)\n rgb_values = lib.iw_rgb.get_rgb()\n log_iw(\"Red: \" + str(rgb_values[0]))\n log_iw(\"Green: \" + str(rgb_values[1]))\n log_iw(\"Blue: \" + str(rgb_values[2]))\n time.sleep(2)\n lib.iw_rgb.turn_led_off()\n return rgb_values", "def list_colors():\n clrs = [color for color, value in colors.items()]\n return clrs", "def color(self):\n return tuple(self._to_255(v) for v in self.value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create instances of each available layer.
def __init__(self): for layer in self._layer_class_map: setattr(self, layer, self._layer_class_map[layer]())
[ "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def _create_layers(self):\n assert not self._locked, \"ObservationEncoder: layers have already been created\"\n\n for k in self.obs_shapes:\n if self.obs_nets_classes[k] is not None:\n # create net to process this modality\n self.obs_nets[k] = self.obs_nets_classes[k](**self.obs_nets_kwargs[k])\n elif self.obs_share_mods[k] is not None:\n # make sure net is shared with another modality\n self.obs_nets[k] = self.obs_nets[self.obs_share_mods[k]]\n\n self.activation = None\n if self.feature_activation is not None:\n self.activation = self.feature_activation()", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def create_all(self, registry):\n for cls in registry.values():\n self.create_class(cls)", "def create(self):\n\n if rs.IsLayer(self.name):\n\n return self\n\n mom = \"\"\n \n for s in self.path:\n \n son = s if (mom == \"\") else (mom + \"::\" + s)\n\n mommy = None if mom == \"\" else mom\n\n if not rs.IsLayer(son):\n\n rs.AddLayer(s, color = None, visible = True, locked = False, parent = mommy)\n\n mom = son\n \n return self", "def build_layers(self):\n raise NotImplementedError", "def initialize(self):\n for layer in self._layers:\n layer.initialize()", "def create_netlist(self):\n # This will create a default set of bitline/wordline names\n self.create_all_wordline_names()\n self.create_all_bitline_names()\n\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def declare_layers(self, names):\n for name in names:\n self[name]\n return self", "def initializeLayers(self):\n\n # first delete any current display layers\n layers = cmds.ls( type='displayLayer')\n for layer in layers:\n if (layer.endswith(self.LAYER_SUFFIX)):\n cmds.delete(layer)\n\n # get the current UID list so we can extract the site, year, sector, and\n # level information\n uidList = self.getUidList()\n\n if not uidList:\n return\n\n # we'll use the first track as representative of the site and level,\n # presumed to be in common with all tracks in this scene.\n track = self.getTrackByUid(uidList[0])\n trackwayNames = self.getTrackwayNames(track.site, track.level)\n\n # then for each trackway name (such as 'S1') create a corresponding\n # layer with the LAYER_SUFFIX, then populate it with the track nodes of\n # the tracks comprising that trackway\n for trackwayName in trackwayNames:\n layer = '%s%s' % (trackwayName, self.LAYER_SUFFIX)\n\n # then make the layer\n self.createLayer(layer)\n\n # get a list of tracks for this trackway (filtering on site, level,\n # sector and name)\n trackway = self.getTrackway(trackwayName, uidList)\n if trackway and len(trackway) > 0:\n self.addTrackwayToLayer(layer, trackway)\n\n return trackwayNames", "def create_instances(self):\n self.create_dffs()\n self.create_clk_buf_row()\n self.create_gated_clk_bar_row()\n self.create_gated_clk_buf_row()\n self.create_wlen_row()\n if (self.port_type == \"rw\") or (self.port_type == \"w\"):\n self.create_rbl_delay_row()\n self.create_wen_row()\n if (self.port_type == \"rw\") or (self.port_type == \"r\"):\n self.create_sen_row()\n self.create_delay()\n self.create_pen_row()", "def __createLayers(self, layer_sizes, backward = False) :\n layers = []\n for i in six.moves.range(len(layer_sizes) - 1) :\n if not backward :\n in_size = layer_sizes[i]\n out_size = layer_sizes[i + 1]\n else :\n in_size = layer_sizes[i + 1]\n out_size = layer_sizes[i]\n layers.append(F.Linear(in_size, out_size))\n\n return layers", "def create_networks(num_networks, input_shape, network_shape, base_scope):\r\n return [Network(input_shape, network_shape,\r\n scope='{}{}'.format(base_scope, i))\r\n for i in range(num_networks)]", "def __init__(self, layers):\r\n self.pnetwork = []\r\n for layer in layers:\r\n self.pnetwork.append(perceptronlayer.Perceptronlayer(layer))", "def __initAvailableLayerTypes(self):\n from backend.caffe.path_loader import PathLoader\n caffe = PathLoader().importCaffe()\n layerNameMainParts = list(caffe.layer_type_list())\n\n res = {}\n paramsPerLayerType = {}\n\n # calculate common parameters of all layer types\n # by removing all which will be used for one specific layer type only\n # also keep in mind which ones have been removed to readd them to specific layers\n commonParams = self._availableParameterGroupDescriptors[\"LayerParameter\"].parameter() #use .parameter() on purpose\n layerSpecificParameters = set()\n for nameMainPart in layerNameMainParts:\n specificParamsName = [nameMainPart + \"Parameter\"]\n if moreLayerNameParameter.has_key(nameMainPart):\n specificParamsName.append( moreLayerNameParameter[nameMainPart])\n paramsPerLayerType[nameMainPart] = {}\n for key, value in commonParams.items():\n if value.isParameterGroup() and value.parameterName() in specificParamsName:\n paramsPerLayerType[nameMainPart][key] = value\n layerSpecificParameters.add(key)\n\n\n # special case: shared params for loss layers\n key = \"loss_param\"\n value = commonParams[key]\n del commonParams[key]\n for nameMainPart in layerNameMainParts:\n if LayerType.getCategoryByName(nameMainPart) == LayerType.CATEGORY_LOSS:\n paramsPerLayerType[nameMainPart][key] = value\n\n # TODO is there a special case for the TransformationParameter?\n\n # create each layer type after one another\n for nameMainPart in layerNameMainParts:\n\n # add common params to the specific ones\n layerTypeParam = paramsPerLayerType[nameMainPart].keys()\n paramsPerLayerType[nameMainPart].update(commonParams)\n\n irrelevant = layerSpecificParameters.difference(layerTypeParam)\n res[nameMainPart] = LayerType(nameMainPart, paramsPerLayerType[nameMainPart], layerTypeParam, irrelevant)\n\n self._commonParams = commonParams\n self._availableLayerTypes = res", "def setup_networks(self):\n for name in self.network_names:\n self.create_network(name)", "def make_instances():\n body = request.json\n return create_instances(\n flavor=body.get(\"flavor\"),\n name=body.get(\"name\"),\n network_name=body.get(\"network_name\"),\n )", "def _initLayers(self):\n if isinstance(self.in_names, list):\n src_ds = gdal.Open(self.in_names[0])\n else:\n raise Exception(\"The input value should be a list of HDF files\")\n layers = src_ds.GetSubDatasets()\n self.layers = OrderedDict()\n n = 0\n if not self.subset:\n self.subset = [1 for i in range(len(layers))]\n for i in self.subset:\n if str(i) == '1':\n name = layers[n][0].split(':')[-1]\n self.layers[name] = list()\n n = n + 1", "def _duplicate_layer_stack(self, layer_stack):\n layer_objs = []\n for i_ly, ly in enumerate(layer_stack):\n layer_type = type(ly)\n layer_kwargs = ly.get_params()\n\n # Construct a parallel network for inference. Tie the weights to the training network.\n layer_kwargs.update(layer_stack[i_ly].get_trainable_params())\n layer_objs.append(layer_type(rng=self.rng, theano_rng=self.theano_rng, **layer_kwargs))\n\n return layer_objs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set value to the tensor.
def set_value(self, indices, val): assert len(indices) == 3, indices if self.model_tensor is None: raise ValueError("Please set the tensor") self.model_tensor[indices[0], indices[1], indices[2]] = val return val
[ "def set_node_value(node: Node, value: np.ndarray):\n if node.type != 'Const':\n raise Exception('Can\\'t set value for non-constant node {}'.format(node.name))\n data_type = np.float32\n if node.out_port(0).is_data_type_defined():\n data_type = node.out_port(0).get_data_type()\n node.out_port(0).data.set_value(np.array(value).astype(data_type))", "def share(self, value):\n self._tensor = value", "def execute(self):\n self.tensor.fill(self.value)", "def set(self, value: T) -> None:\n with self.__lock:\n self.__value = value", "def setTensor(self, tensor):\t\t\n\t\tself.cur_tensor = tensor\n\t\tif tensor is not None:\n\t\t\tself.output_shape[self.cur_id] = self.cur_tensor.size()\n\t\telse:\n\t\t\tself.output_shape[self.cur_id] = None", "def transform(self, value):\n if self._variable:\n # anything that caches anything that relies on self.tensor needs\n # to clear its cache.\n self.clear_all_ancestor_caches()\n old_value = self.value\n if '_Param__tensor' in self._shared.cache:\n del self._shared.cache['_Param__tensor']\n self._shared.transform = value\n self.value = old_value\n else:\n self._shared.transform = value", "def SetNodeValue(self, p_int, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def set_value( self, row, col, value ):\n self.theMaze[row][col]=value", "def assign(self, val):\n\n if isinstance(val, array):\n err_code = _cudanet.copy_on_device(val.p_mat, self.p_mat)\n elif isinstance(val, (np.int32, np.float32, int, float)):\n err_code = _cudanet.assign_scalar(self.p_mat, ct.c_float(val))\n else:\n raise ValueError(\"Assigned value must be of type CUDAMatrix, int, or float.\")\n \n if err_code:\n raise generate_exception(err_code)\n\n return self", "def set_value(self, position, value):\n\n self.grid[position] = value", "def set_value ( self, object, value ):\n object[ self.index ] = value", "def assign(self, val):\n\n if isinstance(val, CUDAMatrix):\n err_code = _cudanet.copy_on_device(val.p_mat, self.p_mat)\n elif isinstance(val, (np.int32, np.float32, int, float)):\n err_code = _cudanet.assign_scalar(self.p_mat, ct.c_float(val))\n else:\n raise ValueError(\"Assigned value must be of type CUDAMatrix, int, or float.\")\n \n if err_code:\n raise generate_exception(err_code)\n\n return self", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def set_value(self, m: int, n: int, value: int) -> None:\n\t\tself.matrix[m][n] = value", "def set(self, x: int, y: int, value: Union[int, float]) -> None:\n self.values[x][y] = value", "def assign(self, value: int) -> None:\n self._value = value\n self._row.add(value)\n self._column.add(value)\n self._box.add(value)", "def ggml_metal_set_tensor(ctx: ffi.CData, t: ffi.CData) -> None:\n ...", "def set_value(self, var_value):\n pass", "def set_value(self, value, borrow=False):\r\n if not borrow:\r\n #TODO: check for cuda_ndarray type\r\n if not isinstance(value, numpy.ndarray):\r\n # in case this is a cuda_ndarray, we copy it\r\n value = copy.deepcopy(value)\r\n self.container.value = value # this will copy a numpy ndarray\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of indices (with possible repeats), run optimization and return stats.
def best_value_many_indices(self, indices_list, **kwargs): indices_list = [tuple(x) for x in indices_list] stats = {indices: [] for indices in set(indices_list)} for indices in indices_list: stats[indices].append(self.best_value_indices(indices=indices, **kwargs)) return stats
[ "def scipy_optimize_from_indices(\n muygps: MuyGPS,\n batch_indices: np.ndarray,\n batch_nn_indices: np.ndarray,\n test: np.ndarray,\n train: np.ndarray,\n train_targets: np.ndarray,\n loss_method: str = \"mse\",\n verbose: bool = False,\n) -> np.ndarray:\n crosswise_dists = crosswise_distances(\n test,\n train,\n batch_indices,\n batch_nn_indices,\n metric=muygps.kernel.metric,\n )\n pairwise_dists = pairwise_distances(\n train, batch_nn_indices, metric=muygps.kernel.metric\n )\n return scipy_optimize_from_tensors(\n muygps,\n batch_indices,\n batch_nn_indices,\n crosswise_dists,\n pairwise_dists,\n train_targets,\n loss_method=loss_method,\n verbose=verbose,\n )", "def _sample_experts(self, indices, n=1):\n\n initialized = False\n num_tasks = len(indices)\n for idx in indices:\n batch_initialized = False\n for _ in range(n):\n path = self._expert_traj_buffers[idx].sample_path()\n\n no1 = np.roll(path['observations'], -1)\n no1[:, -1] = 0.0\n path['next_observation'] = no1\n\n if not batch_initialized:\n o = path['observations'][np.newaxis]\n a = path['actions'][np.newaxis]\n r = path['rewards'][np.newaxis]\n #s = path['success'][np.newaxis]\n #l = path['length'][np.newaxis]\n no = path['next_observation'][np.newaxis]\n batch_initialized = True\n else:\n o = np.vstack((o, path['observations'][np.newaxis]))\n no = np.vstack((no, path['next_observation'][np.newaxis]))\n a = np.vstack((a, path['actions'][np.newaxis]))\n r = np.vstack((r, path['rewards'][np.newaxis]))\n #s = np.vstack((s, path['success'][np.newaxis]))\n #l = np.vstack((l, path['length'][np.newaxis]))\n\n if not initialized:\n all_o = o[np.newaxis]\n all_no = no[np.newaxis]\n all_a = a[np.newaxis]\n all_r = r[np.newaxis]\n #all_s = s[np.newaxis]\n #all_l = l[np.newaxis]\n initialized = True\n else:\n all_o = np.vstack((all_o, o[np.newaxis]))\n all_no = np.vstack((all_no, no[np.newaxis]))\n all_a = np.vstack((all_a, a[np.newaxis]))\n all_r = np.vstack((all_r, r[np.newaxis]))\n #all_s = np.vstack((all_s, s[np.newaxis]))\n #all_l = np.vstack((all_l, l[np.newaxis]))\n\n observations = torch.as_tensor(\n all_o, device=global_device()).float().view(num_tasks, -1, self.T, self.O)\n next_observations = torch.as_tensor(\n all_no, device=global_device()).float().view(num_tasks, -1, self.T, self.O)\n actions = torch.as_tensor(\n all_a, device=global_device()).float().view(num_tasks, -1, self.T, self.A)\n rewards = torch.as_tensor(\n all_r, device=global_device()).float().view(num_tasks, -1, self.T, 1)\n #success = torch.as_tensor(\n # all_s, device=global_device()).view(num_tasks, -1, self.T, 1)\n #lengths = torch.as_tensor(\n # all_l, device=global_device()).int().view(num_tasks, -1, 1)\n\n return observations, next_observations, actions, rewards#, success, lengths", "def _do_test(self, indices, animated):\n # final_returns = []\n online_returns = []\n cnt=0\n for idx in indices:\n # runs, all_rets = [], []\n print(idx)\n # better do several times\n all_rets = [self.online_test_paths_exp(idx, deterministic=True, animated=animated, causal_update=True) for _ in range(3)]\n all_rets = np.mean(np.stack(all_rets), axis=0)\n # a list of n_trial, in each trial : is a list of trajs, most often 1 for a single testing traj.\n # final_returns.append(all_rets[-1])\n online_returns.append(all_rets)\n cnt += 1\n online_returns = np.mean(np.stack(online_returns), axis=0)\n return online_returns", "def _integrate(index, models):\n def index2(X, grad=False):\n indices = [index(X, grad, model) for model in models]\n if grad:\n return tuple([np.sum(_, axis=0) for _ in zip(*indices)])\n else:\n return np.sum(indices, axis=0)\n return index2", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def parallel_put_by_advanced_index(X, advanced_indices, values):\n x_indices, y_indices = advanced_indices\n for i in numba.prange(x_indices.shape[0]):\n x = x_indices[i]\n y = y_indices[i]\n X[x, y] = values[i]\n return X", "def process_profile_indices(\n results: Sequence[Result],\n profile_indices: Sequence[int],\n profile_list_ids: Union[int, Sequence[int]],\n):\n # get all parameter indices, for which profiles were computed\n plottable_indices = set()\n for result in results:\n for profile_list_id in profile_list_ids:\n # get parameter indices, for which profiles were computed\n if profile_list_id < len(result.profile_result.list):\n tmp_indices = [\n par_id\n for par_id, prof in enumerate(\n result.profile_result.list[profile_list_id]\n )\n if prof is not None\n ]\n # profile_indices should contain all parameter indices,\n # for which in at least one of the results a profile exists\n plottable_indices.update(tmp_indices)\n plottable_indices = sorted(plottable_indices)\n\n # get the profiles, which should be plotted and sanitize, if not plottable\n if profile_indices is None:\n profile_indices = list(plottable_indices)\n else:\n for ind in profile_indices:\n if ind not in plottable_indices:\n profile_indices.remove(ind)\n warn(\n 'Requested to plot profile for parameter index %i, '\n 'but profile has not been computed.' % ind\n )\n\n return profile_indices", "def indexTargetsProcesses(targets):\n print '\\nProcessing targets with faidx'\n # Initialise the args list\n indexArgs = []\n if __name__ == '__main__':\n # Initialise the pool of processes - it defaults to the number of processors\n indexPool = Pool()\n # for target in targets:\n # indexArgs.append(target)\n indexPool.map(indexTargets, targets)", "def sample_task_ind(self, inds: List[int]):\n for i in range(len(self.data)):\n all_targets = self.data[i].targets\n new_targets = [all_targets[ind] for ind in inds]\n self.data[i].targets = new_targets", "def getResult(targets, i=None):", "def compute_tuning_objective(results_list, hparams, trial_name, num_trials):\n found_solution = [r['found_solution'] for r in results_list]\n successful_program_counts = [\n r['npe'] for r in results_list if r['found_solution']]\n\n success_rate = sum(found_solution) / float(len(results_list))\n\n max_programs = FLAGS.max_npe # Per run.\n all_program_counts = [\n r['npe'] if r['found_solution'] else max_programs\n for r in results_list]\n programs_seen_fraction = (\n float(sum(all_program_counts))\n / (max_programs * len(all_program_counts)))\n\n # min/max/avg stats are over successful runs.\n metrics = {\n 'num_runs': len(results_list),\n 'num_succeeded': sum(found_solution),\n 'success_rate': success_rate,\n 'programs_seen_fraction': programs_seen_fraction,\n 'avg_programs': np.mean(successful_program_counts),\n 'max_possible_programs_per_run': max_programs,\n 'global_step': sum([r['num_batches'] for r in results_list]),\n 'hparams': hparams.values(),\n 'trial_name': trial_name,\n 'num_trials': num_trials}\n\n # Report stats per tasks.\n tasks = [r['task'] for r in results_list]\n for task in set(tasks):\n task_list = [r for r in results_list if r['task'] == task]\n found_solution = [r['found_solution'] for r in task_list]\n successful_rewards = [\n r['best_reward'] for r in task_list\n if r['found_solution']]\n successful_num_batches = [\n r['num_batches']\n for r in task_list if r['found_solution']]\n successful_program_counts = [\n r['npe'] for r in task_list if r['found_solution']]\n metrics_append = {\n task + '__num_runs': len(task_list),\n task + '__num_succeeded': sum(found_solution),\n task + '__success_rate': (\n sum(found_solution) / float(len(task_list)))}\n metrics.update(metrics_append)\n if any(found_solution):\n metrics_append = {\n task + '__min_reward': min(successful_rewards),\n task + '__max_reward': max(successful_rewards),\n task + '__avg_reward': np.median(successful_rewards),\n task + '__min_programs': min(successful_program_counts),\n task + '__max_programs': max(successful_program_counts),\n task + '__avg_programs': np.mean(successful_program_counts),\n task + '__min_batches': min(successful_num_batches),\n task + '__max_batches': max(successful_num_batches),\n task + '__avg_batches': np.mean(successful_num_batches)}\n metrics.update(metrics_append)\n\n # Objective will be maximized.\n # Maximize success rate, minimize num programs seen.\n # Max objective is always 1.\n weight = FLAGS.success_rate_objective_weight\n objective = (\n weight * success_rate\n + (1 - weight) * (1 - programs_seen_fraction))\n metrics['objective'] = objective\n\n return objective, metrics", "def computeAllIndex(self):\n for arm in range(self.nbArms):\n self.index[arm] = self.computeIndex(arm)", "def foreach_evaluator_with_index(self, func):\n\n local_result = [func(self.local_evaluator, 0)]\n remote_results = ray.get([\n ev.apply.remote(func, i + 1)\n for i, ev in enumerate(self.remote_evaluators)\n ])\n return local_result + remote_results", "def test_get_indices_several_existing_items(self):\r\n control_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593']\r\n exp_control_indices = [0, 1, 2, 3, 4]\r\n\r\n fast_ids = ['PC.607', 'PC.634', 'PC.635', 'PC.636']\r\n exp_fast_indices = [5, 6, 7, 8]\r\n\r\n obs_control = _get_indices(self.dist_matrix_header, control_ids)\r\n self.assertEqual(obs_control, exp_control_indices)\r\n\r\n obs_fast = _get_indices(self.dist_matrix_header, fast_ids)\r\n self.assertEqual(obs_fast, exp_fast_indices)", "def run_several_iterations(iterations, means, horizon):\n\n # Initializing the results vector.\n results = [0]*horizon\n\n for iteration in range(iterations):\n\n # The current cumulative regret.\n results = np.add(results, run_sparring_algorithm(means[:, iteration], horizon))\n\n # Returning the average cumulative regret.\n return results/(iterations +.0)", "def start(self, start_idx=0, stop_idx=-1):\n if stop_idx == -1:\n stop_idx = len(self._generator_object)\n self._log_idx = start_idx\n self._tqdm = tqdm.tqdm(total=stop_idx-start_idx)\n for i, experiment in enumerate(self._generator_object.experiment_instances()):\n if start_idx <= i <= stop_idx:\n self._semaphore.acquire()\n opti = self._generator_object.experiment_class(**experiment)\n self._pool.apply_async(opti.optimize,callback=self._callback_success, error_callback=self._callback_fail)", "def special_Dijk(A1,A2,A3,A4,i,j,k):\n\n sum_terms = special_indices(i,j,k)\n start = time.time()\n total = 0\n for sum_index in sum_terms:\n total += term(A1,A2,A3,A4,sum_index,i,j,k)\n end = time.time()\n print('Sum time:', (end - start)/60)\n return(total*32/2304)", "def indices(\n index_group: Literal[\"all\"] | str | IndexGroup | Sequence[str],\n ignore_error: bool = False,\n **kwargs,\n) -> Dataset:\n indices = _get_indices_of_group(index_group)\n out = None\n if \"out_file\" in kwargs.keys():\n out = kwargs[\"out_file\"]\n del kwargs[\"out_file\"]\n acc = []\n for i in indices:\n log.info(f\"Computing index '{i.short_name}'\")\n kwargs[\"index_name\"] = i.short_name\n if ignore_error:\n try:\n res = index(**kwargs)\n if \"percentiles\" in res.coords:\n res = res.rename({\"percentiles\": i.short_name + \"_percentiles\"})\n if \"thresholds\" in res.coords:\n res = res.rename({\"thresholds\": i.short_name + \"_thresholds\"})\n acc.append(res)\n except Exception:\n warn(f\"Could not compute {i.short_name}.\")\n else:\n res = index(**kwargs)\n if \"percentiles\" in res.coords:\n res = res.rename({\"percentiles\": i.short_name + \"_percentiles\"})\n if \"thresholds\" in res.coords:\n res = res.rename({\"thresholds\": i.short_name + \"_thresholds\"})\n acc.append(res)\n ds: Dataset = xr.merge(acc)\n if out is not None:\n _write_output_file(\n result_ds=ds,\n input_time_encoding=ds.time.encoding,\n netcdf_version=kwargs.get(\"netcdf_version\", NetcdfVersionRegistry.NETCDF4),\n file_path=out,\n )\n return ds", "def run_qae_optimization(training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n result_list = []\n def proxy(params, training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n \"\"\"Embedded function version\n \"\"\"\n input_list = fix_list(params, all_param_array=all_param, var_param_array=var_param, fixed_vals_array=fixed_vals)\n fidelities = []\n for training_state in training_states:\n fid = cusp_stage2.compute_stage2_cost_function(*input_list, alpha=training_state, n_repetitions=n_repetitions,\n exact=exact, noisy=noisy)\n fidelities.append(fid)\n avg_fid = np.mean(fidelities)\n result_list.append(1-avg_fid)\n print(1-avg_fid)\n return 1. - avg_fid\n\n \n # Initialize parameters\n half_turn_min = 0\n half_turn_max = 2\n init_params = np.random.uniform(low=half_turn_min, high=half_turn_max,\n size=num_param)\n\n # Optimization using Nelder-Mead.\n h2_qae_wrap = lambda params: proxy(params, training_states=training_states,\n n_repetitions=n_repetitions, exact=exact, noisy=noisy)\n \n if noisy:\n maxiter = 60\n else:\n maxiter = None\n \n res = minimize(h2_qae_wrap, init_params, args=(),\n method='Nelder-Mead', tol=None, \n options={'disp': False, 'maxiter': maxiter, 'xatol': 0.001,\n 'return_all': False, 'fatol': 0.001})\n np.savetxt('stage2_data.csv',result_list, delimiter=',')\n return res.x" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute an online update given a precomputed minibatch of data, and a model tensor.
def compute_online_update(rating_value, mb_np_orig, model_tensor_orig, idx_set, users_get_value=None, n_repeats=1, hotfix_update_hypers=None, plot_charts=False, pbars=None, cancel_updates=False, **kwargs): # internal object IDs to update obj1 = mb_np_orig['objects_rating_v1'][idx_set] obj2 = mb_np_orig['objects_rating_v2'][idx_set] # copying the minibatch and the model tensor (will be updated) mb_np_copy = deepcopy(mb_np_orig) model_tensor_copy = deepcopy(model_tensor_orig) if not cancel_updates: # SETTING THE RATING VALUE mb_np_copy['cmp'][idx_set, 0] = rating_value # creating the updater online = FeaturelessOnlineUpdater() online.hypers['aggregate_index'] = -1 # hotfix parameter updates if hotfix_update_hypers is not None: for key, value in hotfix_update_hypers.items(): online.hypers[key] = value for key, value in kwargs.items(): online.golden_params[key] = value # setting data online.set_minibatch(mb_np_copy) online.set_model_tensor(model_tensor_copy) online.set_subtract() online.silent = True # CONFIGURATION FOR INDICES indices_lst = [] for i in range(model_tensor_orig.shape[0]): indices_lst.append((i, obj1, 0)) indices_lst.append((i, obj2, 0)) indices_lst *= n_repeats # initial value for the loss/index initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in set(indices_lst)} if not cancel_updates: # RUNNING OPTIMIZATION with GOLDEN RATIO result = online.best_value_many_indices(indices_lst, assign_at_end=True) # plotting if plot_charts: visualize_result_loss(result, indices_lst) visualize_byindex(result, indices_lst, initial_value) else: result = None if pbars is not None: if 'comparison' in pbars: assert len(users_get_value) == len(pbars['comparison']) for user, pbar in zip(users_get_value, pbars['comparison']): # obtaining model scores score1 = online.get_value((user, obj1, 0)) score2 = online.get_value((user, obj2, 0)) # computing the comparison comparison = 1 / (1 + np.exp(score1 - score2)) * MAX_VALUE pbar.value = comparison if 'v1' in pbars: assert len(users_get_value) == len(pbars['v1']) for user, pbar in zip(users_get_value, pbars['v1']): # obtaining model scores score1 = online.get_value((user, obj1, 0)) pbar.value = score1 if 'v2' in pbars: assert len(users_get_value) == len(pbars['v2']) for user, pbar in zip(users_get_value, pbars['v2']): # obtaining model scores score1 = online.get_value((user, obj2, 0)) pbar.value = score1 return None else: return { 'new_model_tensor': model_tensor_copy, 'new_minibatch': mb_np_copy, 'online_learner': online, 'indices_lst': indices_lst, 'result': result, }
[ "def create_client_update_fn():\n\n @tf.function\n def client_update(model,\n dataset,\n initial_weights,\n client_optimizer,\n client_weight_fn=None):\n \"\"\"Updates client model.\n\n Args:\n model: A `tff.learning.Model`.\n dataset: A 'tf.data.Dataset'.\n initial_weights: A `tff.learning.ModelWeights` from server.\n client_optimizer: A `tf.keras.optimizer.Optimizer` object.\n client_weight_fn: Optional function that takes the output of\n `model.report_local_unfinalized_metrics` and returns a tensor that\n provides the weight in the federated average of model deltas. If not\n provided, the default is the total number of examples processed on\n device.\n\n Returns:\n A 'ClientOutput`.\n \"\"\"\n\n model_weights = _get_weights(model)\n tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,\n initial_weights)\n num_examples = tf.constant(0, dtype=tf.int32)\n\n # Initialize local states for local and global corrections\n avg_local_states = tf.nest.map_structure(tf.zeros_like,\n model_weights.trainable)\n cum_local_states = tf.nest.map_structure(tf.zeros_like,\n model_weights.trainable)\n for batch in iter(dataset):\n with tf.GradientTape() as tape:\n output = model.forward_pass(batch)\n grads = tape.gradient(output.loss, model_weights.trainable)\n grads_and_vars = zip(grads, model_weights.trainable)\n client_optimizer.apply_gradients(grads_and_vars)\n num_examples += tf.shape(output.predictions)[0]\n\n # Get momentum factor and preconditioner to update local states\n client_opt_beta = _get_optimizer_momentum_beta(client_optimizer)\n client_opt_preconditioner = _get_optimizer_preconditioner(\n client_optimizer, model_weights)\n avg_local_states = tf.nest.map_structure(\n lambda m, p, b=client_opt_beta: b * m + (1 - b) * p,\n avg_local_states,\n client_opt_preconditioner)\n cum_local_states = tf.nest.map_structure(lambda m, n: m + n,\n avg_local_states,\n cum_local_states)\n\n aggregated_outputs = model.report_local_unfinalized_metrics()\n weights_delta = tf.nest.map_structure(\n lambda a, b, c: tf.math.divide_no_nan(a - b, c),\n model_weights.trainable, initial_weights.trainable, cum_local_states)\n local_cor_states = tf.nest.map_structure(\n lambda a: tf.math.divide_no_nan(1.0, a), cum_local_states)\n\n weights_delta, has_non_finite_weight = (\n tensor_utils.zero_all_if_any_non_finite(weights_delta))\n\n if has_non_finite_weight > 0:\n client_weight = tf.constant(0, dtype=tf.float32)\n elif client_weight_fn is None:\n client_weight = tf.cast(num_examples, dtype=tf.float32)\n else:\n client_weight = client_weight_fn(aggregated_outputs)\n\n return ClientOutput(weights_delta, client_weight, aggregated_outputs,\n local_cor_states)\n\n return client_update", "def inject(self, model):\n if not hasattr(model, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n\n model._check_trainable_weights_consistency()\n\n if model.train_function is None:\n inputs = (model._feed_inputs +\n model._feed_targets +\n model._feed_sample_weights)\n if model._uses_dynamic_learning_phase():\n inputs += [K.learning_phase()]\n fast_params = model._collected_trainable_weights\n\n with K.name_scope('training'):\n with K.name_scope(model.optimizer.__class__.__name__):\n training_updates = model.optimizer.get_updates(\n params=fast_params,\n loss=model.total_loss)\n slow_params = [K.variable(p) for p in fast_params]\n fast_updates = (model.updates +\n training_updates +\n model.metrics_updates)\n\n slow_updates, copy_updates = [], []\n for p, q in zip(fast_params, slow_params):\n slow_updates.append(K.update(q, q + self.alpha * (p - q)))\n copy_updates.append(K.update(p, q))\n\n # Gets loss and metrics. Updates weights at each call.\n fast_train_function = K.function(\n inputs,\n [model.total_loss] + model.metrics_tensors,\n updates=fast_updates,\n name='fast_train_function',\n **model._function_kwargs)\n\n def F(inputs):\n self.count += 1\n R = fast_train_function(inputs)\n if self.count % self.k == 0:\n K.batch_get_value(slow_updates)\n K.batch_get_value(copy_updates)\n return R\n\n model.train_function = F", "def inject(self, model):\n if not hasattr(model, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n\n model._check_trainable_weights_consistency()\n\n if model.train_function is None:\n inputs = (model._feed_inputs +\n model._feed_targets +\n model._feed_sample_weights)\n if model._uses_dynamic_learning_phase():\n inputs += [K.learning_phase()]\n fast_params = model._collected_trainable_weights\n\n with K.name_scope('training'):\n with K.name_scope(model.optimizer.__class__.__name__):\n training_updates = model.optimizer.get_updates(\n params=fast_params,\n loss=model.total_loss)\n slow_params = [K.variable(p) for p in fast_params]\n fast_updates = (model.updates +\n training_updates +\n model.metrics_updates)\n\n slow_updates, copy_updates = [], []\n for p, q in zip(fast_params, slow_params):\n slow_updates.append(K.update(q, q + self.alpha * (p - q)))\n copy_updates.append(K.update(p, q))\n\n # Gets loss and metrics. Updates weights at each call.\n fast_train_function = K.function(\n inputs,\n [model.total_loss] + model.metrics_tensors,\n updates=fast_updates,\n name='fast_train_function',\n **model._function_kwargs)\n\n def F(inputs):\n self.count += 1\n R = fast_train_function(inputs)\n if self.count % self.k == 0:\n K.batch_get_value(slow_updates)\n K.batch_get_value(copy_updates)\n return R\n\n #### REM : C'est pas super propre ca comme manière de faire\n #### Tu rompts l'encapsulation de la classe \n model.train_function = F", "def inject(self, model):\n if not hasattr(model, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n\n model._check_trainable_weights_consistency()\n\n if model.train_function is None:\n inputs = (model._feed_inputs +\n model._feed_targets +\n model._feed_sample_weights)\n if model._uses_dynamic_learning_phase():\n inputs += [K.learning_phase()]\n fast_params = model._collected_trainable_weights\n\n with K.name_scope('training'):\n with K.name_scope(model.optimizer.__class__.__name__):\n training_updates = model.optimizer.get_updates(\n params=fast_params,\n loss=model.total_loss)\n slow_params = [K.variable(p) for p in fast_params]\n fast_updates = (model.updates +\n training_updates +\n model.metrics_updates)\n\n slow_updates, copy_updates = [], []\n for p, q in zip(fast_params, slow_params):\n slow_updates.append(K.update(q, q + self.alpha * (p - q)))\n copy_updates.append(K.update(p, q))\n\n # Gets loss and metrics. Updates weights at each call.\n fast_train_function = K.function(\n inputs,\n [model.total_loss] + model.metrics_tensors,\n updates=fast_updates,\n name='fast_train_function',\n **model._function_kwargs)\n\n def F(inputs):\n self.count += 1\n R = fast_train_function(inputs)\n if self.count % self.k == 0:\n K.batch_get_value(slow_updates)\n K.batch_get_value(copy_updates)\n return R\n \n model.train_function = F", "async def update_model(model_updates):\n async for model_update in model_updates:\n model_location = model_update['model_location']\n print(f\"Updating model to: {model_location}\")\n\n # using incrementing version number to keep track of live model\n # but obviously doesnt work for a real distributed system \n model_table['live_version'] += 1\n model_table['model_location'] = model_location", "def _compute_raw_update(self):\n\n self.print(\"SGD with Momentum: Computing raw update...\", line_above=True)\n # Read task toml\n\n iteration_number = self.task_dict[\"iteration_number\"] + 1\n\n indices = self.get_parameter_indices(self.raw_gradient_path)\n # scale the gradients, because they can be tiny and this leads to issues\n g_t = self.get_h5_data(self.raw_gradient_path) * self.grad_scaling_fac\n\n if np.sum(np.isnan(g_t)) > 1:\n raise Exception(\n \"NaNs were found in the raw gradient.\" \"Something must be wrong.\"\n )\n\n if iteration_number == 1: # Initialize moments if needed\n shutil.copy(self.raw_gradient_path, self.moment_path)\n write_xdmf(self.moment_path)\n\n with h5py.File(self.moment_path, \"r+\") as h5:\n data = h5[\"MODEL/data\"]\n\n # initialize with zeros\n for i in indices:\n data[:, i, :] = np.zeros_like(data[:, i, :])\n\n v_t = self.beta * self.get_h5_data(self.moment_path) + (1 - self.beta) * g_t\n\n # Store first moment\n shutil.copy(\n self.moment_path,\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n )\n self.set_h5_data(\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n v_t,\n )\n\n # Correct bias\n v_t = v_t / (1 - self.beta ** (self.iteration_number + 1))\n update = self.alpha * v_t\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the raw update.\"\n \"Check if the gradient is not excessively small\"\n )\n\n # Write raw update to file for smoothing\n shutil.copy(self.raw_gradient_path, self.raw_update_path)\n self.set_h5_data(self.raw_update_path, update)", "def create_client_update_no_kmeans_fn():\n @tf.function\n def client_update(\n model,\n dataset,\n initial_weights,\n client_optimizer,\n client_weight_fn=None,\n ):\n \"\"\"Updates client model.\n\n Args:\n model: A `tff.learning.Model`.\n dataset: A 'tf.data.Dataset'.\n initial_weights: A `tff.learning.ModelWeights` from server.\n client_optimizer: A `tf.keras.optimizer.Optimizer` object.\n client_weight_fn: Optional function that takes the output of\n `model.report_local_unfinalized_metrics` and returns a tensor that\n provides the weight in the federated average of model deltas. If not\n provided, the default is the total number of examples processed on\n device.\n\n Returns:\n A 'ClientOutput`.\n \"\"\"\n model_weights = _get_weights(model)\n tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,\n initial_weights)\n\n # The training loop\n num_examples = tf.constant(0, dtype=tf.int32)\n for batch in dataset:\n with tf.GradientTape() as tape:\n output = model.forward_pass(batch)\n grads = tape.gradient(output.loss, model_weights.trainable)\n grads_and_vars = zip(grads, model_weights.trainable)\n client_optimizer.apply_gradients(grads_and_vars)\n num_examples += tf.shape(output.predictions)[0]\n\n aggregated_outputs = model.report_local_unfinalized_metrics()\n weights_delta = tf.nest.map_structure(lambda a, b: a - b,\n model_weights.trainable,\n initial_weights.trainable)\n weights_delta, has_non_finite_weight = (\n tensor_utils.zero_all_if_any_non_finite(weights_delta))\n\n if has_non_finite_weight > 0:\n client_weight = tf.constant(0, dtype=tf.float32)\n elif client_weight_fn is None:\n client_weight = tf.cast(num_examples, dtype=tf.float32)\n else:\n client_weight = client_weight_fn(aggregated_outputs)\n\n return ClientOutputNoKmeans(\n weights_delta, client_weight, aggregated_outputs,\n collections.OrderedDict([('num_examples', num_examples)]))\n\n return client_update", "def eval_minibatch(model, images, labels, mean, std, loss_func):\n model.eval()\n count = 0\n # input images are normalized to [0,1]. After normalization with \n # mean(per channel)=0.5, std(per channel)=0.5, x_norm lies in the range [-1,1]\n x_norm = normalization_function(images, mean, std) \n with torch.no_grad():\n if model.module.type=='SNN':\n output, _, _ = model(x_norm, 0, False)\n output = output/model.module.timesteps\n elif model.module.type =='ANN':\n output = model(x_norm) \n count = Accuracy(output, labels)\n loss = loss_func(output, labels)\n return count, loss", "def soft_update(local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def update_epoch(self, epoch, sess):", "def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def update(self, data: Tensor) -> None:\n if self.updateType == UpdateType.ALL:\n self.fit(data)\n else:\n self.fitLatents(data)", "def update_train(self, batch):\n\n online, target = batch\n online_ids, online_masks = online\n target_ids, target_masks = target\n\n input_ids_view_1 = online_ids.to(device)\n input_ids_view_2 = target_ids.to(device)\n masked_indexes_view_1 = online_masks.to(device)\n masked_indexes_view_2 = target_masks.to(device)\n # compute query feature\n predictions_from_view_1 = self.predictor(\n self.online_network(\n input_ids=input_ids_view_1,\n masked_index=masked_indexes_view_1,\n output_hidden_states=True,\n output_attentions=True,\n )[1]\n )\n\n predictions_from_view_2 = self.predictor(\n self.online_network(\n input_ids=input_ids_view_2,\n masked_index=masked_indexes_view_2,\n output_hidden_states=True,\n output_attentions=True,\n )[1]\n )\n\n # compute key features\n with torch.no_grad():\n targets_to_view_2 = self.target_network(\n input_ids=input_ids_view_1,\n masked_index=masked_indexes_view_1,\n output_hidden_states=True,\n output_attentions=True,\n )[1]\n\n targets_to_view_1 = self.target_network(\n input_ids=input_ids_view_2,\n masked_index=masked_indexes_view_2,\n output_hidden_states=True,\n output_attentions=True,\n )[1]\n\n loss = self.boyl_loss(predictions_from_view_1, targets_to_view_1)\n loss += self.boyl_loss(predictions_from_view_2, targets_to_view_2)\n return loss.mean()", "def update(self, x_train_single, updated_h):\n x_row = x_train_single.toarray()\n for i in range(self.num_models):\n self.models[i].partial_fit(x_row, [updated_h[i]])", "def update_local_model(self, fore_gradient, data_inst, coef, **training_info):\n pass", "def mamlplus_task_update_step(model, data, task_lr, criterion, args,\n step_idx, clone_params, epoch_idx=0,\n objs=None, top1=None, top5=None):\n\n input, target = data\n n = input.size(0)\n loss, logits, aux_logits = nasbench_model_forward(model, input, target, criterion)\n named_parameters = dict(model.module.named_parameters())\n use_second_order = args.maml_second_order and epoch_idx > args.mamlplus_first_order_to_second_order_epoch\n grads = torch.autograd.grad(loss, named_parameters.values(),\n create_graph=use_second_order, allow_unused=True)\n named_grads = dict(zip(named_parameters.keys(), grads))\n update_clone_params = dict()\n if not args.mamlplus_dynamic_lr_relu:\n lr_fn = lambda x: x\n else:\n lr_fn = lambda x: torch.nn.functional.relu(x) + args.mamlplus_dynamic_lr_min\n\n for m_name, module_params in clone_params.items():\n update_clone_params[m_name] = dict()\n for p_name in module_params.keys():\n grad = named_grads[m_name + '.' + p_name]\n if grad is not None:\n g_d = -grad.data * lr_fn(task_lr[m_name][p_name][str(step_idx)])\n update_clone_params[m_name][p_name] = clone_params[m_name][p_name] + g_d\n prec1, prec5 = accuracy(logits, target, topk=(1, 5))\n if objs:\n objs.update(loss.item(), n)\n if top1:\n top1.update(prec1.item(), n)\n if top5:\n top5.update(prec5.item(), n)\n return update_clone_params", "def testUpdateImproveStatistics(self):\n\n _, _, inputs = self._get_inputs()\n\n # Use small decay_rate to update faster.\n bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.1)\n out1 = bn(inputs, is_training=False, test_local_stats=False)\n\n # Build the update ops.\n bn(inputs, is_training=True)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n out_v = sess.run(out1)\n\n # Before updating the moving_mean the results are off.\n self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 2, 5)\n\n sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)))\n\n # After updating the moving_mean the results are better.\n out_v = sess.run(out1)\n self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 1, 2)", "def _update(self, datapoints: Tensor, **kwargs) -> None:\n\n xtol = 1e-6 if self._xtol is None else self._xtol\n maxfev = 100 if self._maxfev is None else self._maxfev\n\n # Using the latest param for covariance before calculating f_map\n self._update_covar(datapoints)\n\n # scipy newton raphson\n with torch.no_grad():\n # warm start\n init_x0_size = self.batch_shape + torch.Size([self.n])\n if self._x0 is None or torch.Size(self._x0.shape) != init_x0_size:\n sqrt_scale = (\n self.covar_module.outputscale.sqrt()\n .unsqueeze(-1)\n .detach()\n .cpu()\n .numpy()\n )\n # Heuristic intialization using winning count with perturbation\n # to avoid extreme or unprobable likelihood values\n win_count = self.D.sum(dim=-2).detach().cpu().numpy()\n wc_mean, wc_std = (\n win_count.mean(axis=-1, keepdims=True),\n win_count.std(axis=-1, keepdims=True).clip(min=1e-6),\n )\n x0 = (win_count - wc_mean) / wc_std\n # adding random perturbation to in case get stuck at strange init values\n x0 = x0 + 0.05 * np.random.standard_normal(init_x0_size)\n # scale x0 to be on roughly the right scale\n x0 = x0 * sqrt_scale\n else:\n x0 = self._x0\n\n if len(self.batch_shape) > 0:\n # batch mode, do optimize.fsolve sequentially on CPU\n # TODO: enable vectorization/parallelization here\n x0 = x0.reshape(-1, self.n)\n dp_v = datapoints.view(-1, self.n, self.dim).cpu()\n D_v = self.D.view(-1, self.m, self.n).cpu()\n DT_v = self.DT.view(-1, self.n, self.m).cpu()\n ch_v = self.covar_chol.view(-1, self.n, self.n).cpu()\n ci_v = self.covar_inv.view(-1, self.n, self.n).cpu()\n x = np.empty(x0.shape)\n for i in range(x0.shape[0]):\n fsolve_args = (dp_v[i], D_v[i], DT_v[i], ch_v[i], ci_v[i], True)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n x[i] = optimize.fsolve(\n x0=x0[i],\n func=self._grad_posterior_f,\n fprime=self._hess_posterior_f,\n xtol=xtol,\n maxfev=maxfev,\n args=fsolve_args,\n **kwargs,\n )\n x = x.reshape(*init_x0_size)\n else:\n # fsolve only works on CPU\n fsolve_args = (\n datapoints.cpu(),\n self.D.cpu(),\n self.DT.cpu(),\n self.covar_chol.cpu(),\n self.covar_inv.cpu(),\n True,\n )\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n x = optimize.fsolve(\n x0=x0,\n func=self._grad_posterior_f,\n fprime=self._hess_posterior_f,\n xtol=xtol,\n maxfev=maxfev,\n args=fsolve_args,\n **kwargs,\n )\n\n self._x0 = x.copy() # save for warm-starting\n f = torch.tensor(x, dtype=datapoints.dtype, device=datapoints.device)\n\n # To perform hyperparameter optimization, this needs to be recalculated\n # when calling forward() in order to obtain correct gradients\n # self.likelihood_hess is updated here is for the rare case where we\n # do not want to call forward()\n self.likelihood_hess = self.likelihood.negative_log_hessian_sum(\n utility=f, D=self.D\n )\n\n # Lazy update hlcov_eye, which is used in calculating posterior during training\n self.pred_cov_fac_need_update = True\n # fill in dummy values for hlcov_eye so that load_state_dict can function\n hlcov_eye_size = torch.Size((*self.likelihood_hess.shape[:-2], self.n, self.n))\n self.hlcov_eye = torch.empty(hlcov_eye_size)\n\n # Take two newton step on the posterior MAP point to fill\n # in gradients for pytorch. Using 2 instead of 1 since empirically sometimes\n # the first step results in gradients in the order of 1e-7 while the 2nd step\n # allows it go down further to the order of 1e-12 and stay there.\n self.utility = self._util_newton_updates(\n datapoints, f.clone().requires_grad_(True), max_iter=2\n )", "def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Seeds the given output Image with random pixels from the source Image.
def __seed_output_image(self, src_image: Image, out_image: Image) -> None: src_pixel_array = src_image[:, :].reshape((src_image.area, 3)) src_index_array = np.random.choice(np.arange(src_image.area), out_image.area) out_image[:, :] = np.take(src_pixel_array, src_index_array, axis=0).reshape(out_image.shape)
[ "def random_image(x, y, out):\n\n pixels = []\n for pixel in range(0, x*y):\n pixels.append(random_pixel())\n\n new_image(x, y, out, pixels)", "def randomize_pixels(image):\n shape_ = image.size()\n image_flat = image.view(-1, image.size(-1))\n shuffled_image = shuffle(image_flat)\n return shuffled_image.view(shape_)", "def _get_seed_img(self, seed_img):\n samples, c, w, h = utils.get_img_shape(self.img)\n if seed_img is None:\n seed_img = utils.generate_rand_img(c, w, h)\n else:\n if K.image_dim_ordering() == 'th':\n seed_img = seed_img.transpose(2, 0, 1)\n\n # Convert to image tensor containing samples.\n seed_img = np.array([seed_img], dtype=np.float32)\n return seed_img", "def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()", "def shuffle_pixels(\n image: Union[str, Image.Image],\n output_path: Optional[str] = None,\n factor: float = 1.0,\n seed: int = 10,\n metadata: Optional[List[Dict[str, Any]]] = None,\n) -> Image.Image:\n np.random.seed(seed)\n\n image = imutils.validate_and_load_image(image)\n\n assert 0.0 <= factor <= 1.0, \"'factor' must be a value in range [0, 1]\"\n\n func_kwargs = imutils.get_func_kwargs(metadata, locals())\n\n if factor == 0.0:\n aug_image = image\n else:\n aug_image = np.asarray(image, dtype=int)\n height, width = aug_image.shape[:2]\n number_of_channels = aug_image.size // (height * width)\n\n number_of_pixels = height * width\n aug_image = np.reshape(aug_image, (number_of_pixels, number_of_channels))\n\n mask = np.random.choice(\n number_of_pixels, size=int(factor * number_of_pixels), replace=False\n )\n pixels_to_be_shuffled = aug_image[mask]\n\n np.random.shuffle(pixels_to_be_shuffled)\n aug_image[mask] = pixels_to_be_shuffled\n\n aug_image = np.reshape(aug_image, (height, width, number_of_channels))\n aug_image = np.squeeze(aug_image)\n\n aug_image = Image.fromarray(aug_image.astype(\"uint8\"))\n\n imutils.get_metadata(\n metadata=metadata,\n function_name=\"shuffle_pixels\",\n aug_image=aug_image,\n **func_kwargs,\n )\n\n return imutils.ret_and_save_image(aug_image, output_path)", "def seed_images(image_set):\n\tseed_set = []\n\trandom_seed = lambda image,target: random.choice(list(zip(*np.where(image == target))))\n\tfor image in image_set:\n\t\tbseed = random_seed(image, BLACK)\n\t\twseed = random_seed(image, WHITE)\n\t\tseed_set.append((bseed, wseed))\n\treturn seed_set", "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap(i, 0, x_size, -1, 1)\n y = remap(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)", "def addMaskImage(img):\r\n [h, w, c] = img.shape\r\n h_start = np.random.randint(h/2,h-1)\r\n w_start = np.random.randint(w/2, w-1)\r\n img[h_start:h-1, :,0]= np.random.randint(0,120)\r\n img[h_start:h-1, :,1]= np.random.randint(0,120) \r\n img[h_start:h-1, :,2]= np.random.randint(0,120) \r\n img[:,w_start:w-1,0]= np.random.randint(0,120)\r\n img[:,w_start:w-1,1]= np.random.randint(0,120) \r\n img[:,w_start:w-1,2]= np.random.randint(0,120) \r\n img = np.uint8(img)\r\n return img, h_start, w_start", "def random(cls, resolution):\n # This will create the correct base array, which only contains regular 8 bit pixel values (range 0 to 256)\n random_array = np.random.randint(0, 256, resolution)\n # Creating a new PhantomImage object from this array and then returning the Image object\n return cls(random_array)", "def applyShotNoise(self):\n\n self.image = np.random.poisson(self.image).astype(np.float64)", "def distort_image(images, seed):\n color_ordering = contrib_stateless.stateless_random_normal(\n shape=images.shape[0:1],\n seed=tf.cast(tf.stack([0, seed]), tf.int32),\n dtype=tf.float32)\n\n # random flip doesn't work on a batch, and running it inside of a map_fn\n # triggers a memory error; skip it for now: we could alternatively move it\n # to the CPU.\n #\n # image = tf.image.random_flip_left_right(image)\n\n with tf.name_scope(\"distort_color\", values=[images]):\n def _a(image):\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.032)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n return image\n\n def _b(image):\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.032)\n return image\n\n images = tf.where(\n tf.less(color_ordering, 0),\n _a(images),\n _b(images)\n # tf.map_fn(_a, images),\n # tf.map_fn(_b, images)\n )\n\n # The random_* ops do not necessarily clamp.\n images = tf.clip_by_value(images, 0.0, 1.0)\n\n return images", "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)\n return 'saved'", "def distort_image(self, image):\n image_size = 32\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n print(\"Apply random cropping\")\n image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4, image_size + 4)\n image = tf.random_crop(image, [image_size, image_size, 3])\n print(\"Apply random flipping\")\n image = tf.image.random_flip_left_right(image)\n return image", "def generate_images(self, num_images, device=None):\n if device is None:\n device = self.device\n\n noise = torch.randn((num_images, self.nz), device=device)\n fake_images = self.forward(noise)\n\n return fake_images", "def random_crop(self, img, output_img_h = 0.5, output_img_w = 0.5, p = 0.5):\n if self.decision(p):\n height, width, channels = img.shape\n new_height = random.randint(int(height * output_img_h), height)\n new_width = random.randint(int(width * output_img_w), width)\n y = random.randint(0, height - new_height)\n x = random.randint(0, width - new_width)\n roi = img[y:y + new_height, x:x + new_width]\n # check if cut is ahve to much dark pixels, more then 20 %\n non_zeros = np.count_nonzero(roi)\n non_zeros_procent = non_zeros / roi.size\n if non_zeros_procent < 0.8:\n pass\n else:\n img = roi\n return img", "def generateRandomMask(size, p=0.5):\n mask_array = (np.random.random(size) > p).astype(int)\n mask = sitk.GetImageFromArray(mask_array) \n return mask", "def gen_random(output_dir):\n with h5py.File(os.path.join(output_dir, 'random.hdf5'), 'w') as hdf5_fh:\n # Note: We don't store labels. Image at index idx has label (idx % 10).\n\n train_set = (np.random.rand(NUM_TRAIN, NUM_CHANNELS, IMG_SIZE, IMG_SIZE) * 255).astype(np.uint8)\n hdf5_fh.create_dataset('train', data=train_set)\n\n test_set = (np.random.rand(NUM_TEST, NUM_CHANNELS, IMG_SIZE, IMG_SIZE) * 255).astype(np.uint8)\n hdf5_fh.create_dataset('test', data=test_set)", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def generate_dest_image(self):\n self.dest_image = np.zeros((self.dest_height, self.dest_width, 3))\n for h_index in xrange(self.dest_height):\n for w_index in xrange(self.dest_width):\n b, g, r, c = self.current_total_rgb_counter[h_index, w_index]\n if c != 0:\n b, g, r = map(\n lambda x: int(x/c),\n [b, g, r]\n )\n self.dest_image[h_index, w_index] = (b, g, r)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders the given pixel in the specified layer of the output Pyramid using the colour of a pixel from the source Pyramid with the closest neighbourhood to the output pixel.
def __render_output_pixel(self, src_pyramid: Pyramid, out_pyramid: Pyramid, level: int, out_point: Point) -> None: if level == self.__levels - 1: distances = self.__make_distance_matrix(src_pyramid[level], out_pyramid[level], self.__neighbourhood_padding[level], out_point, True) else: prev_distances = self.__make_distance_matrix(src_pyramid[level + 1], out_pyramid[level + 1], self.__neighbourhood_padding[level + 1], out_point // 2, False) next_distances = self.__make_distance_matrix(src_pyramid[level], out_pyramid[level], self.__neighbourhood_padding[level], out_point, True) distances = next_distances + np.kron(prev_distances, np.ones((2, 2))) candidate = np.unravel_index(np.argmin(distances), distances.shape) out_pyramid[level][out_point] = src_pyramid[level][candidate]
[ "def render_pixel(self, x, y, colour):\n # can't use the below as too slow\n self.renderer.fill((5 * x, 5 * y, 4, 4), sdl2.ext.Color(*colour))", "def render(self, config, input_rgba, coord):\n stack_rgba = [numpy.zeros(chan.shape, chan.dtype) for chan in input_rgba]\n \n for layer in self.layers:\n try:\n if layer.in_zoom(coord.zoom):\n stack_rgba = layer.render(config, stack_rgba, coord)\n\n except IOError:\n # Be permissive of I/O errors getting sub-layers, for example if a\n # proxy layer referenced here doesn't have an image for a zoom level.\n # TODO: regret this later.\n pass\n\n return blend_images(input_rgba, stack_rgba[:3], stack_rgba[3], 1, None)", "def render_pixel(game_map: PreGameMap, coord: Coordinate):\n if not((0 <= coord[0] < game_map.size[0]) and (0 <= coord[1] < game_map.size[1])):\n return\n terrain = TERRAIN.get(coord, None)\n if terrain == 'sea':\n game_map.display_coord(coord, 'blue')\n return\n if terrain is None:\n game_map.display_coord(coord, 'black')\n return\n value = FOREST.get(coord, 0)\n game_map.display_coord(coord, '#' + str(min(value, 9)) + '00000')", "def apply_color_to_layer(\n image: typing.ImageThreeChannel, color: typing.Color, layer=constants.Color.WHITE\n) -> typing.ImageThreeChannel:\n return np.where(image == layer, color, image)", "def intermediate_pixel(alpha, source_RGB, target_RGB):\n return int((1-alpha)*source_RGB+alpha*target_RGB)", "def render_network_output(self, rgb_density, ray_points):\n rgb, density = tf.split(rgb_density, [3, 1], axis=-1)\n rgb = tf.sigmoid(rgb)\n density = tf.nn.relu(density)\n rgb_density = tf.concat([rgb, density], axis=-1)\n\n dists = utils.get_distances_between_points(ray_points)\n rgb_render, a_render, weights = ray_radiance.compute_radiance(rgb_density,\n dists)\n if self.white_background:\n rgb_render = rgb_render + 1 - a_render\n return rgb_render, weights", "def _plotPixel(pic, x, y, color):\n if (0 <= x) and (x < getWidth(pic)) and (0 <= y) and (y < getHeight(pic)):\n pix = getPixel(pic, x, y)\n setColor(pix, color)", "def pixel(image, pos, color):\n r,g,b = color\n x,y = pos\n image.put(\"#%02x%02x%02x\" % (r,g,b), (x, y))", "def pixel(image, pos, color):\r\n r,g,b = color\r\n x,y = pos\r\n image.put(\"#%02x%02x%02x\" % (r,g,b), (x, y))", "def draw(self, layer: Layer) -> None:\r\n if layer and layer.layer_index >= self.num_layers:\r\n return\r\n\r\n pyxel.bltm(layer.offset.x, layer.offset.y, self.tilemap_id + layer.layer_index,\r\n self.rect_uv.x, self.rect_uv.y, self.rect_uv.w, self.rect_uv.h,\r\n colkey=layer.transparency_color)", "def write_pixel(color, img_size, location, image, scale_factor):\n x_location = scale(location.item(0), scale_factor)\n y_location = scale(location.item(1), scale_factor)\n\n img_cont = int(img_size/100)\n if img_cont == 0:\n image.putpixel((x_location, y_location), color)\n else:\n write_to_range(x_location-img_cont, x_location+img_cont, y_location-img_cont, y_location+img_cont, color, image, img_size)", "def rf_render_png(red_tile_col, green_tile_col, blue_tile_col):\n return _apply_column_function('rf_render_png', red_tile_col, green_tile_col, blue_tile_col)", "def doPixelDraw(self, x, y):\r\n self.parent.pixelChange(x, y, self.set_colour)", "def draw_to_buffer(self, col, row, color):\n self.gb_screen[col + (row * 160)] = color", "def color_right_side(gray: np.array, new_rgb: np.array, representative_colors: np.array,\n pixel_color_array: np.array) -> np.array:\n print(\"starting right side coloring\")\n\n ###BEGINNING OF GETTING RIGHT HAND GRAY SCORES###\n left_gray_scores_dict = {}\n\n # Iterate through left half of gray, skipping edges, use 499 as # pixels - 1, use 249 as # pixels /2 - 1\n for i in range(1, 499):\n for j in range(1, 249):\n \n #Calculates the gray score of a given 3x3 patch, the sum of the gray values of each pixel in the grid\n score = 0\n \n for x in range(-1, 2):\n for y in range(-1, 2):\n score += int(gray[i + x][j + y])\n \n left_gray_score = int(score / 9) \n\n if left_gray_score in left_gray_scores_dict:\n left_gray_scores_dict.get(left_gray_score).append((i, j))\n left_gray_scores_dict.update({left_gray_score: left_gray_scores_dict.get(left_gray_score)})\n else:\n left_gray_scores_dict[left_gray_score] = [(i, j)]\n \n print(\"This is the left gray dictionary\")\n print(left_gray_scores_dict.keys())\n \n ###END OF GETTING LEFT HAND GRAY SCORES###\n\n\n # Fill in right half of new_rgb with new colors, ignoring edges\n for i in range(1, 499):\n for j in range(250, 499):\n \n # Retrieve most 6 most similar patch centers and their similarity scores\n score = 0\n \n for x in range(-1, 2):\n for y in range(-1, 2):\n score += int(gray[i + x][j + y])\n \n #Calculate right gray score\n right_gray_score = int(score / 9) \n \n \n new_score_key_up = right_gray_score\n new_score_key_down = right_gray_score\n \n # Retrieve 6 similar patches from dictionary, the sum of the gray values of each pixel in the grid\n similar_gray_patch_coordinates = left_gray_scores_dict.get(right_gray_score)\n if similar_gray_patch_coordinates is None:\n similar_gray_patch_coordinates = []\n similar_gray_patches = [(0, x) for x in similar_gray_patch_coordinates]\n # print(\"similar patches\", len(similar_gray_patches), (right_i, right_j))\n \n # Add more coordinates until you hit 6\n while len(similar_gray_patches) < 6:\n \n new_score_key_up += 1\n new_score_key_down -= 1\n \n # Go one key up\n similar_gray_patch_up_coordinates = left_gray_scores_dict.get(new_score_key_up)\n if similar_gray_patch_up_coordinates is None:\n similar_gray_patch_up_coordinates = []\n similar_gray_patches_up = [(new_score_key_up - right_gray_score, x) for x in similar_gray_patch_up_coordinates]\n similar_gray_patches.extend(similar_gray_patches_up)\n \n # Go one key down\n similar_gray_patch_down_coordinates = left_gray_scores_dict.get(new_score_key_down)\n if similar_gray_patch_down_coordinates is None:\n similar_gray_patch_down_coordinates = []\n similar_gray_patches_down = [(right_gray_score - new_score_key_down, x) for x in\n similar_gray_patch_down_coordinates]\n similar_gray_patches.extend(similar_gray_patches_down)\n # print(\"papa\")\n \n similar_gray_patches = similar_gray_patches[:6]\n \n \n # print(\"retrieving 6 patches\")\n # print(similar_gray_patches)\n\n # Find representative color for each patch and add patch to\n patches_for_each_color = [[] for _ in range(5)]\n \n max_color_frequency = 0\n for patch in similar_gray_patches:\n color_index = int(pixel_color_array[(patch[1][0] * 500 + patch[1][1])][3])\n # color_index = new_rgb[patch[1][0]][patch[1][1]][3]\n # color_counts[color_index] += 1\n\n patches_for_each_color[color_index].append(patch)\n\n if len(patches_for_each_color[color_index]) > max_color_frequency:\n max_color_frequency = len(patches_for_each_color[color_index])\n\n # print(\"patches for each color\", patches_for_each_color, (i, j))\n # Retrieve all color indices with that max frequency\n most_frequent_color_indices = []\n for x in range(len(patches_for_each_color)):\n if len(patches_for_each_color[x]) == max_color_frequency:\n most_frequent_color_indices.append(x)\n\n # If there is a most represented color, make the corresponding pixel that color\n if len(most_frequent_color_indices) == 1:\n new_rgb[i][j] = representative_colors[most_frequent_color_indices[0]]\n # print(\"no tie\", (i, j), representative_colors[most_frequent_color_indices[0]])\n\n # Otherwise, break ties based on similarity score\n else:\n # print(\"there is a tie\", (i, j))\n potential_patches = []\n\n # Put all patches that map to the most represented colors into potential_patches\n for x in range(len(patches_for_each_color)):\n if len(patches_for_each_color[x]) == max_color_frequency:\n for patch in patches_for_each_color[x]:\n potential_patches.append(patch)\n\n # Select the color that is mapped to the most similar patch\n best_similarity_score = 100000\n most_similar_patch = None\n for patch in potential_patches:\n if patch[0] < best_similarity_score:\n best_similarity_score = patch[0]\n most_similar_patch = patch\n\n # Make the original pixel the same color as that of the most similar patch\n new_rgb[i][j] = new_rgb[most_similar_patch[1][0]][most_similar_patch[1][1]]\n\n return new_rgb", "def drawOutline(x, y, compIndices):\r\n lumin = luminMap[x][y]\r\n lumin_sum = 0\r\n for (dX, dY) in compIndices:\r\n lumin_sum += abs(lumin - luminMap[x + dX][y + dY])\r\n\r\n averageLumin = lumin_sum / len(compIndices)\r\n # The sqrt function will make the larger differences more prominent\r\n grayValue = int((1 - math.sqrt(averageLumin)) * 255)\r\n\r\n newImage.putpixel((x, y), (grayValue, grayValue, grayValue))", "def color_case(self, coordonate, color): \n x, y = coordonate[0], coordonate[1]\n self.render[x*self.unit_size+1:(x+1)*self.unit_size,\n y*self.unit_size+1:(y+1)*self.unit_size]=color", "def draw(self, color = Color.GREEN):\n self.image[self.x, self.y] = color", "def draw( self, x, y, color=0 ):\n if x >= 0 and y >= 0 and x < self.width and y < self.height:\n self.img.putpixel( ( x, y ), color )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a matrix containing the weighted squared difference of the pixel values between each window in the source Image and the window extracted from the output Image at the specified Point with the given padding.
def __make_distance_matrix(self, src_image: Image, out_image: Image, padding: int, out_point: Point, causal: bool) -> np.ndarray: # Extract the reference window and for the neighbourhood matching. out_window = out_image.extract(out_point, padding, 'wrap') out_filled = out_image.filled(out_point, padding, 'wrap', causal) # Construct a 2D Gaussian kernel that matches the padding size. gaussian_1D = signal.gaussian(2 * padding + 1, std=padding) gaussian_2D = np.outer(gaussian_1D, gaussian_1D) gaussian_2X = np.extract(out_filled, gaussian_2D) # Return the weighted squared difference of each neighbourhood in the # source Image with respect to the reference window. return self._apply_distance_filter(src_image, out_window, out_filled, gaussian_2X)
[ "def padding(self):\n pad = self.ntiles - self.windowsize\n return (int((pad - 1)/2.), int((pad + 1)/2.))", "def weighted_sum_extraction(cutout, trace, psf, ron = 12, gain = 1.2):\n ###NEW VERSION BELOW\n # width = len(cutout[0]) #we have square cutout\n # #buffer area on either ends of the trace\n # buffer = int(round(0.85*slit_length/2)) #imported from constant\n\n #width = len(cutout[0])\n spec = []\n var = []\n for i in range(len(trace)): #loop through x\n #print(i)\n #put psf at this location\n dim = np.array(cutout.shape) + np.array(psf.shape)\n #print(dim)\n weight = np.zeros( dim) #padded, to be cropped later\n\n #case where trace i is not in the image\n if trace[i] < 0 or trace[i] > cutout.shape[0]:\n spec += [0]\n var += [0]\n else:\n x = i + int(psf.shape[1]//2)\n #print(trace[i], psf.shape[0]//2)\n y = int(trace[i] + psf.shape[0]//2)\n #print(i, x, y - int(psf.shape[0]//2), y + int(psf.shape[0]//2)+1, np.shape(weight[y - int(psf.shape[0]//2): y + int(psf.shape[0]//2)+1, x - int(psf.shape[1]//2): x + int(psf.shape[1]//2)+1]))\n weight[y - int(psf.shape[0]//2): y + int(psf.shape[0]//2)+1, x - int(psf.shape[1]//2): x + int(psf.shape[1]//2)+1] = psf\n weight = weight[ int(psf.shape[0]//2): int(-psf.shape[0]//2), int(psf.shape[1]//2): int(-psf.shape[1]//2)]\n #print(weight.shape, cutout.shape)\n #plt.imshow(weight*cutout,origin = 'lower')\n #plt.show()\n\n spec += [np.sum(weight * cutout)/np.sum(weight)]\n #TODO: Is the variance calculation correct? Might need another weighted term. \n var += [np.sum(weight * (cutout/gain + (ron/gain)**2))] #variance assuming readout noise and photon noise\n\n return np.array(spec[::-1]), np.array(var[::-1]) #flip so long wavelenght is to the right", "def pad_edges(self, pad):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt=np.ones_like(dist)\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])", "def deconv_output_padding(input_shape, output_shape, kernel_size, stride=1, padding=0, dilation=1):\n output_shape_no_padding = convTranspose2d_output_shape(input_shape, kernel_size, stride, padding, dilation)\n return tuple(output_shape[i] - output_shape_no_padding[i] for i in (0,1))", "def windowing(input):\n return input * hamming(input.shape[1], sym=0)", "def get_pad_derivs(self, input_layer, delta_layer, layer_idx):\n \"\"\" Set local variables \"\"\"\n if (self.layer_names[layer_idx] == \"conv\"):\n filter_size = [self.kernel_layers[layer_idx].shape[1], self.kernel_layers[layer_idx].shape[2]]\n elif (self.layer_names[layer_idx] == \"pool\"):\n filter_size = self.pool_sizes[layer_idx]\n stride_size = self.stride_sizes[layer_idx]\n padding_fn = self.pad_fns[layer_idx]\n\n \"\"\" Start getting d pre-pad aˡ w.r.t δˡ⁺¹ \"\"\"\n num_inputs, input_rows, input_cols, num_channels = input_layer.shape[0], input_layer.shape[1], input_layer.shape[2], input_layer.shape[3]\n filter_rows, filter_cols, stride_rows, stride_cols = filter_size[0], filter_size[1], stride_size[0], stride_size[1]\n if (padding_fn == \"same\" and input_layer.shape != delta_layer.shape):\n width_padding = (input_cols * stride_cols) + filter_cols - input_cols - stride_cols\n height_padding = (input_rows * stride_rows) + filter_rows - input_rows - stride_rows\n row_padding_right = int(np.ceil(width_padding / 2))\n row_padding_left = int(np.floor(width_padding / 2))\n col_padding_bottom = int(np.ceil(height_padding / 2))\n col_padding_top = int(np.floor(height_padding / 2))\n padded_inputs_derivs = delta_layer[:, col_padding_top:(-col_padding_bottom if col_padding_bottom != 0 else None), row_padding_left:(-row_padding_right if row_padding_right != 0 else None),:]\n elif (padding_fn == \"valid\" or input_layer.shape == delta_layer.shape):\n max_num_rows = (int)(((input_rows - filter_rows) / stride_rows) + 1)\n max_num_cols = (int)(((input_cols - filter_cols) / stride_cols) + 1)\n cut_bottom_rows = input_rows - (filter_rows + (stride_rows * (max_num_rows - 1)))\n cut_right_cols = input_cols - (filter_cols + (stride_cols * (max_num_cols - 1)))\n padded_inputs_derivs = np.pad(delta_layer, [(0,0), (0, cut_bottom_rows), (0, cut_right_cols), (0, 0)], mode='constant')\n return padded_inputs_derivs", "def get_pad_derivs(orig_input_layer, delta_layer, layer_idx):\n\n \"\"\" Set local variables \"\"\"\n if (self.layer_names[layer_idx] == \"conv\"):\n filter_size = [self.kernel_layers[layer_idx].shape[1], self.kernel_layers[layer_idx].shape[2]]\n elif (self.layer_names[layer_idx] == \"pool\"):\n filter_size = self.pool_sizes[layer_idx]\n stride_size = self.stride_sizes[layer_idx]\n padding_fn = self.padding_fns[layer_idx]\n\n \"\"\" Start getting dδˡ⁺¹ w.r.t orig_input_layerˡ \"\"\"\n num_inputs, input_rows, input_cols, num_channels = orig_input_layer.shape[0], orig_input_layer.shape[1], orig_input_layer.shape[2], orig_input_layer.shape[3]\n filter_rows, filter_cols, stride_rows, stride_cols = filter_size[0], filter_size[1], stride_size[0], stride_size[1]\n if (padding_fn == \"same\"):\n width_padding = (input_cols * stride_cols) + filter_cols - input_cols - stride_cols\n height_padding = (input_rows * stride_rows) + filter_rows - input_rows - stride_rows\n row_padding_right = int(np.ceil(width_padding / 2))\n row_padding_left = int(np.floor(width_padding / 2))\n col_padding_bottom = int(np.ceil(height_padding / 2))\n col_padding_top = int(np.floor(height_padding / 2))\n padded_inputs_derivs = delta_layer[:, col_padding_top:-col_padding_bottom, row_padding_left:-row_padding_right, :]\n elif (padding_fn == \"valid\"):\n max_num_rows = (int)((input_rows - filter_rows) / stride_rows) + 1\n max_num_cols = (int)((input_cols - filter_cols) / stride_cols) + 1\n cut_bottom_rows = input_rows - (filter_rows + (stride_rows * (max_num_rows - 1)))\n cut_right_cols = input_cols - (filter_cols + (stride_cols * (max_num_cols - 1)))\n padded_inputs_derivs = np.pad(delta_layer, [(0,0), (0, cut_bottom_rows), (0, cut_right_cols), (0, 0)], mode='constant')\n return padded_inputs_derivs", "def dwindow(window):\r\n \r\n h=window\r\n nh=len(h)\r\n lh=(nh-1)/2\r\n stepheight=(h[0]+h[-1])/2.\r\n ramp=float((h[-1]-h[0]))/nh\r\n h2=np.zeros(nh+2)\r\n h2[1:nh+1]=h-stepheight-ramp*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n dwin=(h2[2:nh+2]-h2[0:nh])/2.+ramp\r\n dwin[0]=dwin[0]+stepheight\r\n dwin[-1]=dwin[-1]-stepheight\r\n \r\n return dwin", "def conv2d_backward(d_top, x, w, b, pad, stride):\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n (B, new_height, new_width, NF) = d_top.shape # get shape values that matter\n (_, H, W, C) = x.shape\n (_, FH, FW, _) = w.shape\n \n dx = np.zeros(x.shape)\n dw = np.zeros(w.shape)\n db = np.zeros(b.shape)\n \n for n in range(B): # for each sample in the batch\n d_padded = np.pad(dx[n,:,:,:], ((pad,pad),(pad,pad),(0,0)),'constant') # set the padded derivative\n padded = np.pad(x[n,:,:,:], ((pad,pad),(pad,pad),(0,0)),'constant') # and the padded balues\n for f in range(NF):\n for h in range(new_height): # for each pixel\n for c in range(new_width):\n row_start = h*stride # find the start and end points\n row_end = h*stride + FH\n col_start = c*stride\n col_end = c*stride + FW\n \n # the derivative is just the weight times the upper derivative\n d_padded[row_start:row_end,col_start:col_end,:] += w[f,:,:,:] * d_top[n,new_height-1,new_width-1,f]\n # dw is the padded values times the above derivative\n dw[n,:,:,:] += padded[row_start:row_end,col_start:col_end,:] * d_top[n,new_height-1,new_width-1,f]\n # db is just based on the upper derivative\n db[n] += d_top[n,new_height-1,new_width-1,f]\n # reshape the derivative to the dx shape to get dx\n dx[n,:,:,:] = d_padded[1:-1,1:-1,:]\n \n return dw,db", "def get_win_from_roi(roi_tuple, win_size, stride):\n\n res = []\n\n roi_img = roi_tuple[0]\n roi_ltx, roi_lty = roi_tuple[1]\n roi_h, roi_w, _ = roi_img.shape\n\n target_w, target_h = win_size\n stride_w, stride_h = stride\n\n # print(target_w, roi_w, target_h, roi_h)\n # assert target_w<=roi_w and target_h<=roi_h\n\n if target_w > roi_w or target_h > roi_h:\n pad_w = 0\n pad_h = 0\n if target_w > roi_w:\n pad_w = target_w - roi_w\n roi_w = target_w\n if target_h > roi_h:\n pad_h = target_h - roi_h\n roi_h = target_h\n roi_img = cv2.copyMakeBorder(roi_img, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, 0)\n\n new_w = target_w + np.ceil((roi_w - target_w) / stride_w) * stride_w\n new_h = target_h + np.ceil((roi_h - target_h) / stride_h) * stride_h\n\n stride_w_num = int((new_w - target_w) // stride_w + 1)\n stride_h_num = int((new_h - target_h) // stride_h + 1)\n\n # padding the img at right bottom\n # print(\"Before copyMakeBorder\")\n # print(new_h, roi_h, new_w, roi_w, roi_img.shape, type(roi_img[0,0,0]))\n roi_img_pad = cv2.copyMakeBorder(roi_img,0,int(new_h-roi_h),0,int(new_w-roi_w),cv2.BORDER_CONSTANT,0)\n # print(\"After copyMakeBorder\")\n\n\n # pdb.set_trace()\n\n for i in range(stride_w_num):\n for j in range(stride_h_num):\n win_topleft_x, win_topleft_y = i*stride_w, j*stride_h\n win_img = roi_img_pad[win_topleft_y: win_topleft_y+target_h, \\\n win_topleft_x: win_topleft_x+target_w, \\\n :]\n res.append((win_img.copy(), (roi_ltx+win_topleft_x, roi_lty+win_topleft_y)))\n\n return res", "def ret_values(data, window=3):\n em = np.zeros((data.shape[0] * data.shape[1], window ** 2))\n d_pad = np.pad(data, (1, 1), mode=\"symmetric\")\n col = 0\n for i in np.arange(1, d_pad.shape[0] - 1):\n for j in np.arange(1, d_pad.shape[1] - 1):\n fil_ = ret_neigh(i, j, window)\n em[col][:] = [d_pad[fil_[c][0]][fil_[c][1]] for c in range(window ** 2)]\n col += 1\n return em", "def pad(input_layer, layer_idx):\n\n \"\"\" Set local variables \"\"\"\n if (self.layer_names[layer_idx] == \"conv\"):\n filter_size = [self.kernel_layers[layer_idx].shape[1], self.kernel_layers[layer_idx].shape[2]]\n elif (self.layer_names[layer_idx] == \"pool\"):\n filter_size = self.pool_sizes[layer_idx]\n stride_size = self.stride_sizes[layer_idx]\n padding_fn = self.padding_fns[layer_idx]\n \n \"\"\" Start padding input layer \"\"\"\n num_inputs, input_rows, input_cols, num_channels = input_layer.shape[0], input_layer.shape[1], input_layer.shape[2], input_layer.shape[3]\n filter_rows, filter_cols, stride_rows, stride_cols = filter_size[0], filter_size[1], stride_size[0], stride_size[1]\n if (padding_fn == \"same\"):\n width_padding = (input_cols * stride_cols) + filter_cols - input_cols - stride_cols\n height_padding = (input_rows * stride_rows) + filter_rows - input_rows - stride_rows\n row_padding_right = int(np.ceil(width_padding / 2))\n row_padding_left = int(np.floor(width_padding / 2))\n col_padding_bottom = int(np.ceil(height_padding / 2))\n col_padding_top = int(np.floor(height_padding / 2))\n padded_inputs = np.pad(input_layer, [(0,0), (col_padding_top, col_padding_bottom), (row_padding_left, row_padding_right), (0,0)], mode='constant')\n elif (padding_fn == \"valid\"):\n max_num_rows = (int)((input_rows - filter_rows) / stride_rows) + 1\n max_num_cols = (int)((input_cols - filter_cols) / stride_cols) + 1\n padded_inputs = input_layer[:, :(filter_rows + (stride_rows * (max_num_rows - 1))), :(kernel_cols + (stride_cols * (max_num_cols - 1))), :]\n return padded_inputs", "def calculate_weights(self):\n weights = []\n for x in range(frame.rad, frame.window_x, frame.dx):\n for y in range(frame.rad, frame.window_y, frame.dy):\n obj = new_frame.create_general_object(x,y)\n hist = new_frame.create_general_hist(obj)\n # compare histograms to find weight\n weight = cv2.compareHist(frame.hist, hist, method=cv2.cv.CV_COMP_CORREL)\n # find distance away from old point, and normalize by max distance\n max_distance = float(self.find_hypotenuse(frame.window_x, frame.window_y))\n distance = self.find_hypotenuse(x-frame.x, y-frame.y) / max_distance\n # subtract normalized distance from weight\n weight = weight - distance\n # make sure no weights are negative\n if weight < 0:\n weight = 0\n # append weights to array\n weights.append(weight)\n self.weights = np.array(weights)", "def depthwise_conv2d_python_nchw(input_np, filter_np, stride, padding):\n batch, in_channel, in_height, in_width = input_np.shape\n _, channel_multiplier, filter_height, filter_width = filter_np.shape\n if isinstance(stride, int):\n stride_h = stride_w = stride\n else:\n stride_h, stride_w = stride\n\n # calculate output shape\n if padding == 'VALID':\n out_channel = in_channel * channel_multiplier\n out_height = (in_height - filter_height) // stride_h + 1\n out_width = (in_width - filter_width) // stride_w + 1\n output_np = np.zeros((batch, out_channel, out_height, out_width))\n for i in range(batch):\n for j in range(out_channel):\n output_np[i, j, :, :] = signal.convolve2d(input_np[i, j//channel_multiplier, :, :], \\\n np.rot90(filter_np[j//channel_multiplier, j%channel_multiplier, :, :], 2), \\\n mode='valid')[0:(in_height - filter_height + 1):stride_h, 0:(in_width - filter_height + 1):stride_w]\n if padding == 'SAME':\n out_channel = in_channel * channel_multiplier\n out_height = np.int(np.ceil(float(in_height) / float(stride_h)))\n out_width = np.int(np.ceil(float(in_width) / float(stride_w)))\n output_np = np.zeros((batch, out_channel, out_height, out_width))\n pad_along_height = np.int(np.max((out_height - 1) * stride_h + filter_height - in_height, 0))\n pad_along_width = np.int(np.max((out_width - 1) * stride_w + filter_width - in_width, 0))\n pad_top_tvm = np.int(np.ceil(float(pad_along_height) / 2))\n pad_left_tvm = np.int(np.ceil(float(pad_along_width) / 2))\n pad_top_scipy = np.int(np.ceil(float(filter_height - 1) / 2))\n pad_left_scipy = np.int(np.ceil(float(filter_width - 1) / 2))\n index_h = pad_top_scipy - pad_top_tvm\n index_w = pad_left_scipy - pad_left_tvm\n for i in range(batch):\n for j in range(out_channel):\n output_np[i, j, :, :] = signal.convolve2d(input_np[i, j//channel_multiplier, :, :], \\\n np.rot90(filter_np[j//channel_multiplier, j%channel_multiplier, :, :], 2), \\\n mode='same')[index_h:in_height:stride_h, index_w:in_width:stride_w]\n\n return output_np", "def find_w_size(self, name, output_width=30, min_w=10, max_w=75, step=5, offset=0, \n sum_=False, window=None):\n \n loss_arr = []\n val_loss_arr = []\n \n print(\"Finding best window size..\")\n print(f\"Model: {name}, output size: {output_width}\\n\")\n \n if window:\n \n self.create_train_test(name=name, f_size=window, offset=offset, output_width=output_width, sum_=sum_)\n model, loss, val_loss = get_model(name, self.trainX, self.trainY)\n \n else:\n \n for i in range(min_w, max_w, step):\n \n self.create_train_test(name=name, f_size=i, offset=offset, output_width=output_width, sum_=sum_)\n model, loss, val_loss = get_model(name, self.trainX, self.trainY)\n \n print(f\"For window of {i} values, MAPE = {loss}\")\n loss_arr.append(loss)\n val_loss_arr.append(val_loss)\n \n temp = np.insert(val_loss_arr, 0, val_loss_arr[0])\n temp = np.append(temp, val_loss_arr[-1])\n \n smooth = np.convolve(temp, [1, 2, 1], mode='valid')\n \n if (len(smooth)-np.argmin(smooth)) > 4:\n break\n \n print(\"Done\")\n \n val_loss_arr = np.insert(val_loss_arr, 0, val_loss_arr[0])\n val_loss_arr = np.append(val_loss_arr, val_loss_arr[-1])\n val_loss_arr_smooth = np.convolve(val_loss_arr, [1, 2, 1], mode='valid') \n \n idx = np.argmin(val_loss_arr_smooth)\n \n window_size = range(min_w, max_w, step)[idx]\n \n range_ = range(min_w, max_w, step)[:len(loss_arr)]\n plt.plot(range_, loss_arr, label=\"loss\", color=\"#33638DFF\")\n plt.plot(range_, val_loss_arr[1:-1], label=\"val_loss\", color=\"#3CBB75FF\")\n plt.plot(range_, val_loss_arr_smooth/4, \n label=\"smooth_val_loss\", color=\"#d18756\")\n \n plt.axvline(x=window_size, linestyle=\"--\", c=\"black\", lw=1)\n plt.legend()\n plt.title(name + \" model\")\n plt.xlabel(\"window size\")\n plt.ylabel(\"loss\")\n plt.show()\n \n print(f\"Best window size for {name} is {window_size}\\n\")\n\n return window_size", "def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data", "def align(src, dst, window = 15):\n displacements = []\n best_d = np.inf\n for y in range(-window, window, 1): # For each row in the window,\n for x in range(-window, window, 1): # For each column in the window,\n d = displacement(src, dst, x, y, cc_func=ssd)\n if d < best_d:\n best_d = d\n best_x = x\n best_y = y\n displacements.append(d)\n return best_x, best_y, displacements", "def pad(self, input_layer, layer_idx):\n \"\"\" Set local variables \"\"\"\n if (self.layer_names[layer_idx] == \"conv\"):\n filter_size = [self.kernel_layers[layer_idx].shape[1], self.kernel_layers[layer_idx].shape[2]]\n elif (self.layer_names[layer_idx] == \"pool\"):\n filter_size = self.pool_sizes[layer_idx]\n stride_size = self.stride_sizes[layer_idx]\n padding_fn = self.pad_fns[layer_idx]\n \n \"\"\" Start padding input layer \"\"\"\n num_inputs, input_rows, input_cols, num_channels = input_layer.shape[0], input_layer.shape[1], input_layer.shape[2], input_layer.shape[3]\n filter_rows, filter_cols, stride_rows, stride_cols = filter_size[0], filter_size[1], stride_size[0], stride_size[1]\n if (padding_fn == \"same\"):\n width_padding = (input_cols * stride_cols) + filter_cols - input_cols - stride_cols\n height_padding = (input_rows * stride_rows) + filter_rows - input_rows - stride_rows\n row_padding_right = int(np.ceil(width_padding / 2))\n row_padding_left = int(np.floor(width_padding / 2))\n col_padding_bottom = int(np.ceil(height_padding / 2))\n col_padding_top = int(np.floor(height_padding / 2))\n padded_inputs = np.pad(input_layer, [(0,0), (col_padding_top, col_padding_bottom), (row_padding_left, row_padding_right), (0,0)], mode='constant')\n elif (padding_fn == \"valid\"):\n max_num_rows = (int)(((input_rows - filter_rows) / stride_rows) + 1)\n max_num_cols = (int)(((input_cols - filter_cols) / stride_cols) + 1)\n padded_inputs = input_layer[:, :(filter_rows + (stride_rows * (max_num_rows - 1))), :(filter_cols + (stride_cols * (max_num_cols - 1))), :]\n return padded_inputs", "def pad_input(self, input, window):\n batch_size, nsample = input.shape\n stride = window // 2\n rest = window - (stride + nsample % window) % window\n if rest > 0:\n pad = torch.zeros(batch_size, rest).type(input.type())\n input = torch.cat([input, pad], 1)\n pad_aux = torch.zeros(batch_size, stride).type(input.type())\n input = torch.cat([pad_aux, input, pad_aux], 1)\n return input, rest" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the mouse_over variable and returns the button's action value when clicked.
def update(self, mouse_pos, mouse_up): if self.rect.collidepoint(mouse_pos): self.mouse_over = True if mouse_up: return self.action else: self.mouse_over = False
[ "def button_hovered(self):\n\n mouse_pos = self.mouse_pos()\n return self.option_buttons.get_clicked(mouse_pos)", "def update_button_hover_status(self):\n for button in self.playing_buttons:\n button.update(self.mousePos)", "def mouse_press(self, btn, x, y, modifiers):", "def hover(self, mousepos: Tuple[int, int]) -> None:\n if self.rect.collidepoint(mousepos):\n # Become darker when mouse is hovering over button\n self.color = tuple([self.color[i] - 2 if self.color[i] > 20 else self.color[i] for i in range(3)])\n else:\n # Become lighter when no mouse is hovering over button\n self.color = tuple([self.color[i] + 2 if self.color[i] < 40 else self.color[i] for i in range(3)])", "def buttons_mouse_over_internal(window):\r\n\r\n # reset flags\r\n window.reset_mouse_over_flags()\r\n\r\n # no click detection if window is not visible\r\n if not window.is_visible:\r\n return 0\r\n\r\n if adjusted_mouse_rect_collision(window, window.border_rect):\r\n window.m_border_rect = True\r\n\r\n # minimize button clicked\r\n if window.can_be_minimized and adjusted_mouse_rect_collision(window, window.minimize_button_rect):\r\n window.m_minimize_button = True\r\n\r\n if mouse0_cd():\r\n\r\n if window.is_minimized:\r\n maximize(window)\r\n\r\n elif not window.is_minimized:\r\n minimize(window)\r\n\r\n # post event that pywindowframes caught the mouse click\r\n post_event((window, \"pywindowframes_clicked\"))\r\n\r\n # close button clicked\r\n elif adjusted_mouse_rect_collision(window, window.close_button_rect):\r\n\r\n window.m_close_button = True\r\n\r\n if mouse0_cd():\r\n\r\n window.close()\r\n\r\n # post event that pywindowframes caught the mouse click\r\n post_event((window, \"pywindowframes_clicked\"))\r\n\r\n # top border is clicked but no button in top border\r\n else:\r\n if pg.mouse.get_pressed(num_buttons=3)[0]:\r\n\r\n window.is_dragged = True\r\n window.focus_window()\r\n\r\n # post event that pywindowframes caught the mouse click\r\n post_event((window, \"pywindowframes_clicked\"))\r\n\r\n # reset mouse rel (because it returns mouse rel since it was last _called_\r\n if not window.is_dragged:\r\n pg.mouse.get_rel()\r\n\r\n # window rect collision\r\n if adjusted_mouse_rect_collision(window, window.rect):\r\n\r\n window.m_window_rect = True\r\n\r\n if not window.is_minimized:\r\n if mouse0_cd():\r\n # print(\"Window clicked\")\r\n window.focus_window()\r\n\r\n # post event that pywindowframes caught the mouse click\r\n post_event((window, \"pywindowframes_clicked\"))\r\n\r\n else:\r\n window.m_window_rect = False", "def update(self, mouse_pos, mouse_press, button_to_be_activated_by=(1, 0, 0)):\r\n mx, my = mouse_pos\r\n self.hover = self.rect.collidepoint(mx, my)\r\n\r\n if self.hover and mouse_press == (1, 0, 0):\r\n self.pressed = True\r\n return True\r\n else:\r\n self.pressed = False\r\n return False", "def check_button_hover(coord, play_button, high_scores_button):\r\n x = coord[0]\r\n y = coord[1]\r\n play_x = (play_button.rect.x <= x <= play_button.rect.x + play_button.width)\r\n play_y = (play_button.rect.y <= y <= play_button.rect.y + play_button.height)\r\n scores_x = (high_scores_button.rect.x <= x <= high_scores_button.rect.x + high_scores_button.width)\r\n scores_y = (high_scores_button.rect.y <= y <= high_scores_button.rect.y + high_scores_button.height)\r\n if play_x and play_y:\r\n play_button.text_color = (0, 255, 0)\r\n else:\r\n play_button.text_color = (255, 255, 255)\r\n\r\n play_button.prep_msg()\r\n play_button.draw_button()\r\n\r\n if scores_x and scores_y:\r\n high_scores_button.text_color = (0, 255, 0)\r\n else:\r\n high_scores_button.text_color = (255, 255, 255)\r\n\r\n high_scores_button.prep_msg()\r\n high_scores_button.draw_button()", "def on_hover(self) -> None:", "def UpdateButtonOnScreen(self, button_ui_part, event):\r\n\r\n hit_test = self.HitTest(*event.GetPosition())\r\n\r\n if not hit_test or not button_ui_part:\r\n return\r\n \r\n state = AUI_BUTTON_STATE_NORMAL\r\n \r\n if hit_test == button_ui_part:\r\n if event.LeftDown():\r\n state = AUI_BUTTON_STATE_PRESSED\r\n else:\r\n state = AUI_BUTTON_STATE_HOVER\r\n else:\r\n if event.LeftDown():\r\n state = AUI_BUTTON_STATE_HOVER\r\n \r\n # now repaint the button with hover state\r\n cdc = wx.ClientDC(self._frame)\r\n\r\n # if the frame has a toolbar, the client area\r\n # origin will not be (0,0).\r\n pt = self._frame.GetClientAreaOrigin()\r\n if pt.x != 0 or pt.y != 0:\r\n cdc.SetDeviceOrigin(pt.x, pt.y)\r\n\r\n if hit_test.pane: \r\n self._art.DrawPaneButton(cdc, self._frame,\r\n button_ui_part.button.button_id,\r\n state,\r\n button_ui_part.rect, hit_test.pane)", "def button(self):\n return self.__button", "def update_action_tooltip(*args):\n return _ida_kernwin.update_action_tooltip(*args)", "def GetButton(*args, **kwargs):\n return _core_.MouseEvent_GetButton(*args, **kwargs)", "def on_mouse_release(self, x, y, button):\n pass", "def on_mouse_press(self, x, y, button):\n\n pass", "def on_mouse(self, event):", "def MouseOverItem(self,item):\r\n pass", "def getMouseFunc(self):\n return self.mousefnc.get(int(self.mod),{}).get(self.button,None)", "def hover(self, message='', update=True):\n\n if update:\n self._update_reference()\n\n self.source.mouse = webdriver.ActionChains(self.source.browser)\n self.utils.console(\"Hovering over {} {}...\".format(self.item_type, self.name) if message == '' else message)\n self.source.mouse.move_to_element(self._reference).perform()", "def _control_action(self, control, button):\n if button:\n if self._control_events == 1:\n button, value = self._controls[control]._action(button)\n return button, value\n else:\n for evt in range(self._control_events):\n button, value = self._controls[control]._action(button)\n self._control_events = 1\n return button, value\n else:\n return None, None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws element onto a surface
def draw(self, surface): surface.blit(self.image, self.rect)
[ "def draw(self,surface):\n if self.form==0:\n pygame.draw.circle(surface,self.color,self.position,self.radius,self.width)\n if self.form==1:\n pygame.draw.rect(surface,self.color,list(self.position)+list(self.size),self.width)\n if self.form==2:\n pygame.draw.line(surface,self.color,self.position,self.size,self.width)\n if self.form==3:\n if len(self.points)>1:\n pygame.draw.lines(surface,self.color,self.connect,self.points,self.width)", "def draw(self, surface):\n\t\tblit_position = self.position - Vector2(self.radius) # Subtracting vector to get top-left corner start\n\t\tsurface.blit(self.sprite, blit_position)", "def render(self, surface):\r\n surface.blit(self._image, self._rect)", "def draw(self, surface):\n # Clean before drawing\n self.surface.fill((*self.color, self.alpha))\n \n # Check if has sub objects and if so then draw them\n if self._sub_objects:\n\n for obj in self._sub_objects:\n # Draw subobjects and their subobjects to their surface\n obj.draw(self.surface)\n\n # Blit main surface to given surface\n surface.blit(self.surface, self.position)", "def draw(self, surface):\n # Clear before drawing\n self.surface.fill((0, 0, 0, 0))\n\n # Check if has sub objects and if so then draw them\n if self._sub_objects:\n\n for obj in self._sub_objects:\n # Draw subobjects and their subobjects to their surface\n obj.draw(self.surface)\n\n # Blit main surface to given surface\n surface.blit(self.surface, self.position)", "def show_surface(self):", "def draw(self, surface):\n temp = pygame.Surface(self.renderer.pixel_size)\n self.renderer.render_map(temp)\n pygame.transform.smoothscale(temp, surface.get_size(), surface)", "def draw(self, surface):\n color = pygame.Color(255, 255, 255)\n pygame.draw.circle(surface, color, self.position, Molecule.radius, 2)", "def draw(self, surface):\n checked_color = (0, 196, 0) if self.checked else pg.Color(\"white\")\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(self.color, self.rect.inflate(-2,-2))\n surface.fill(pg.Color(\"white\"), self.rect.inflate(-6,-6))\n surface.fill((205,205,205), self.rect.inflate(-8,-8))\n surface.fill(checked_color, self.select_rect)", "def draw_piece(self):\n self.screen.blit(self.image, self.rect)", "def draw(self, surface):\n\t\tangle = self.direction.angle_to(UP) # Translates spaceship's direction into rotation angle in degrees\n\t\trotated_surface = rotozoom(self.sprite, angle, 1.0) # Rotates the sprite. Last arg is scale change, hence 1.0\n\t\trotated_surface_size = Vector2(rotated_surface.get_size())\n\t\tblit_position = self.position - rotated_surface_size * 0.5 # Blit position calculated based on rotated surface size, which differs from original size\n\t\tsurface.blit(rotated_surface, blit_position)", "def draw(self, surface):\n for molecule in self.molecules:\n molecule.draw(surface)", "def draw(self, surface, offset=(0,0)):\n for button in self.buttons:\n button.draw(surface, offset)", "def draw(self, surface):\n self.__draw_background(surface)\n if self.grid_draw:\n self.__draw_grid(surface)\n\n for sprite in self.sprites():\n if sprite.visible:\n transformed_rect = sprite.rect.move(self.cam)\n if not self.view_rect.colliderect(transformed_rect):\n # Ignore sprites that are outside of the view rectangle\n if sprite.kill_when_off_screen:\n self.kill_sprite(sprite)\n else:\n surface.blit(sprite.image, transformed_rect)", "def draw(self, container): \r\n pass", "def draw_on(self, surface):\n for x, y in self.alive_cells():\n #size = (self.box_size, self.box_size)\n #position = (x * self.box_size, y * self.box_size)\n #thickness = 1\n pygame.draw.rect(surface, DARK_RED, (x * self.box_size, y * self.box_size,self.box_size, self.box_size ))", "def draw(self, surface):\n surface.blit(self.base, self.base_rect)\n surface.blit(self.barrel, self.rect)", "def draw(self):\n if not self.visible:\n return\n\n if self.isEnabled:\n \n # Draw the dragger's current appearance to the window.\n if self.dragging:\n self.window.blit(self.surfaceDown, self.rect)\n else: # mouse is up\n if self.mouseOver:\n self.window.blit(self.surfaceOver, self.rect)\n else:\n self.window.blit(self.surfaceUp, self.rect)\n else:\n self.window.blit(self.surfaceDisabled, self.rect)", "def draw(self):\r\n if not self.visible:\r\n return\r\n\r\n if self.isEnabled:\r\n \r\n # Draw the dragger's current appearance to the window.\r\n if self.dragging:\r\n self.window.blit(self.surfaceDown, self.rect)\r\n else: # mouse is up\r\n if self.mouseOver:\r\n self.window.blit(self.surfaceOver, self.rect)\r\n else:\r\n self.window.blit(self.surfaceUp, self.rect)\r\n else:\r\n self.window.blit(self.surfaceDisabled, self.rect)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles game loop until an action is return by a button in the buttons sprite renderer.
def game_loop(screen, buttons): while True: mouse_up = False for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONUP and event.button == 1: mouse_up = True screen.fill(BLACK) for button in buttons: ui_action = button.update(pygame.mouse.get_pos(), mouse_up) if ui_action is not None: return ui_action screen.blit(img, imgrect) buttons.draw(screen) pygame.display.flip()
[ "def _game_loop(self):\n self._keyboard_pressing()\n self._ship_action()\n self._torpedos_action()\n self._asteroids_action()\n self._check_lost_game()\n self._check_won_game()\n self._check_quit_game()", "def callback_game_loop(self) -> None:\n self._goal_generate()\n self._update()\n self.reset()\n\n while self._player != self._goal:\n self._update()\n action = self._action_callback(\n self._player.np,\n self._goal.np,\n *self._action_callback_args,\n )\n if action == \"QUIT\":\n break\n self._player_erase()\n self.FUNCMAP[action]()\n self._update()\n\n if self._display:\n time.sleep(0.1)\n try:\n if chr(cv2.waitKey(5)) in self.KEYMAP[\"QUIT\"]:\n break\n except ValueError:\n pass\n\n if self._display:\n print(f\"Steps taken: {self._routes[self._current_route_key]}\")\n\n if self._display:\n cv2.waitKey(0)", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def GAME_LOOP():\n pass", "def loop(self):\r\n while self.dispatch(True) is not QUIT:\r\n pass", "def GAMEOVER_LOOP():\n pass", "def main_loop(self) -> None:\n while True:\n player = self._players[self._current_player]\n hit = True\n while hit:\n self.select_square(player)\n if self.menu_called: # go to menu\n self.menu_called = False\n return\n hit = player.shoot()\n if player.has_won():\n self.display_manager.display_end_game_message(player)\n self.game_over = True\n return\n self._current_player = (self._current_player + 1) % len(self._players)", "def handle_left_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received left click:', row, ',', col\n celllist = self.board.opencell(row, col)\n if celllist == []:\n return\n for cell in celllist:\n row = cell[0]\n col = cell[1]\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Empty:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/OpenedSquare.png\"))\n elif cell_property == CellProperty.Mine:\n # Game over\n for row in range(self.rows):\n for col in range(self.cols):\n cell_property = self.board.getcellproperty(row, col)\n if cell_property == CellProperty.Mine:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/mine.ico\"))\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley3.ico\"))\n self.game_in_progress = False\n self.timer.stop()\n return\n elif cell_property == CellProperty.MineCountOne:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/1.png\"))\n elif cell_property == CellProperty.MineCountTwo:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/2.png\"))\n elif cell_property == CellProperty.MineCountThree:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/3.png\"))\n elif cell_property == CellProperty.MineCountFour:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/4.png\"))\n elif cell_property == CellProperty.MineCountFive:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/5.png\"))\n elif cell_property == CellProperty.MineCountSix:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/6.png\"))\n elif cell_property == CellProperty.MineCountSeven:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/7.png\"))\n elif cell_property == CellProperty.MineCountEight:\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/8.png\"))\n\n game_status = self.board.continuegame()\n print 'Game Status:', game_status\n if game_status == GameStatus.GameWon:\n self.timer.stop()\n self.game_in_progress = False\n player_name = QtGui.QInputDialog.getText(self, \"Name Please !!\",\\\n \"Enter your name for leader board:\")\n # TODO: Replace 1 with the time taken by the end user.\n LeaderBoard.insertnewscore(CURRENT_GAME_LEVEL, player_name[0], self.time)\n self.status_button.setIcon(QtGui.QIcon(\"icons/smiley.ico\"))\n print \"You have won the game\"", "def wait_for_button(self, block=True):\n # Loop until all buttons are released (if currently pressed)\n while self.enter.value() == 0 or self.page.value() == 0:\n pass\n\n # Wait for first button press\n checks = 0\n while self.enter.value() == 1 and self.page.value() == 1:\n checks += 1\n if not block and checks > NONBLOCKING_CHECKS:\n break\n\n if self.enter.value() == 0:\n # Wait for release\n while self.enter.value() == 0:\n pass\n return BUTTON_ENTER\n\n if self.page.value() == 0:\n # Wait for release\n while self.page.value() == 0:\n pass\n return BUTTON_PAGE\n return None", "def wait_keydown(self):\n while True:\n self.clock.tick(self.fps)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n return\n if event.type == pygame.KEYDOWN:\n return", "def _do_loop(self):\r\n # You don't need to change this method!\r\n self._game_loop()\r\n # Set the timer to go off again\r\n self._screen.update()\r\n self._screen.ontimer(self._do_loop, 5)", "def buttonPress():\n for i in range (3):\n print(\"Button Works!~\")", "def loop(self):\n while self.dispatch(True) is not QUIT:\n pass", "def stop_btn_handler():\r\n timer.stop()\r\n if game_is_running:\r\n register_attempt()\r\n set_game_running(False)", "def on_next_turn_click(self, button):\n if self.referee.is_game_over():\n Gtk.main_quit()\n else:\n self.do_next_turn(button)\n # if the game is over after this turn, we will shutdown on the next click,\n # so visually alert the player with the button label\n if self.referee.is_game_over():\n button.set_label(GAME_OVER_MSG)", "def start_btn_handler():\r\n timer.start()\r\n set_game_running(True)", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def func(self):\n # pause a little between each message.\n self.caller.msg(\"You reach out to press the big red button ...\")\n yield (2) # pause 2s before next message\n self.caller.msg(\"\\n\\n|wBOOOOM! A bright light blinds you!|n\")\n yield (1) # pause 1s before next message\n self.caller.msg(\"\\n\\n|xThe world goes dark ...|n\")\n\n name = self.caller.name\n self.caller.location.msg_contents(\n f\"{name} presses the button. BOOM! {name} is blinded by a flash!\", exclude=self.caller\n )\n self.obj.blind_target(self.caller)", "def run(self):\n #Runs the program until the button is pressed\n while(GPIO.input(self.button)==1):\n \n #Turn on the LED\n GPIO.output(self.green_led,GPIO.HIGH)\n \n #Collect delivered payload from the reciever\n (payload) = self.rcvr.slave(payload_fmt)\n \n #Ensures that if no payload is received then xdirection and ydirection still have a value\n if payload is None:\n xdirection = 0\n ydirection = 0\n \n else:\n xdirection = payload[0]\n ydirection = payload[1]\n \n \n if xdirection == 1\n self.turn_right()\n \n if xdirection == 2:\n self.turn_left()\n \n if xdirection == 3:\n self.go_straight()\n \n if ydirection == 2:\n self.forward()\n \n if ydirection == 1:\n self.backward()\n \n if ydirection == 3:\n self.motor_stop()\n\n time.sleep(0.2)\n \n #Begins cleanup function after the button is pressed\n self.cleanup()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
handle a keypress space > take a screen shot tab > start/stop recording a screencast escape > quit
def onKeypress(self, keycode): # space if keycode == 32: self._captureManager.writeImage('screenshot.png') # tab elif keycode == 9: if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo('screencast.avi') else: self._captureManager.stopWritingVideo() # escape elif keycode == 27: self._windowManager.destroyWindow()
[ "def onKeypress(self,keycode):\n if keycode==32:#space\n self._captureManager.writeImage('screennshot.png')\n elif keycode==9:#tab\n if not self._captureManager.isWritingVideo:\n self._captureManager.startWritingVideo('scrrencast.avi')\n else:\n self._captureManager.stopWritingVideo()\n elif keycode==27:#escape\n self._windowManager.detroyWindow()", "def onKeypress(self, keycode):\n if keycode == 32: # space\n self._captureManager.writeImage('screenshot.png')\n\n elif keycode == 9: # tab\n if not self._captureManager.isWritingVideo:\n self._captureManager.startWritingVideo(\n 'screencast.avi')\n else:\n self._captureManager.stopWritingVideo()\n\n elif keycode == 27: # escape\n self._windowManager.destroyAllWindow()\n \n elif keycode == 115: # s\n print('\\npress s -- show : show data')\n self._show_flag = False\n \n elif keycode == 116: # t\n self._count += 1\n if self._count%2 == 1:\n print('\\npress t -- test : switch to test model')\n self._test_flag = True\n\n else : \n print('\\npress t -- test : switch to work model')\n self._test_flag = False\n\n if self._count == 10:\n self._count = 0\n\n\n\n\n elif keycode == 110: # n\n pass\n\n elif keycode == 109: # m \n pass", "def onKeypress(self, keycode):\n if keycode == 32: # space\n print \"Space down\"\n fileCount = self._captureManager.getSreenShotCount()\n self._captureManager.writeImage(\"Test/regist_orignal_\" + fileCount + \".jpg\")\n elif keycode == 9: # tab\n print \"Tab Down\"\n if not self._captureManager.isWritingVideo:\n self._captureManager.startWritingVideo(\n 'test06081.avi')\n else:\n self._captureManager.stopWritingVideo()\n elif keycode == 27: # escape\n self._windowManager.destroyWindow()", "def _handle_key_down_events(self, event):\n\n if event.key == pygame.K_q:\n pygame.quit()\n sys.exit(0)\n # Press space for pause/play\n elif event.key == pygame.K_SPACE:\n self.paused = True if not self.paused else False\n # Press 'C' to click a screenshot\n elif event.key == pygame.K_c:\n pygame.image.save(self.screen,\n f\"screenshot{self.settings.s_shot}.png\")\n self.settings.s_shot += 1", "def play(self):\n os.system(\"xdotool key KP_Space\")", "def start_record(cr):\r\n \"\"\"Emulate the keyboard \"\"\"\r\n _player = input_playback.InputPlayback()\r\n _player.emulate(input_type='keyboard')\r\n _player.find_connected_inputs()\r\n \"\"\"To get list of UI elements\"\"\"\r\n ui = ui_utils.UI_Handler()\r\n ui.start_ui_root(cr)\r\n list=ui.get_name_role_list()\r\n \"\"\"To Open status tray and click on Screen Recording option\"\"\"\r\n logging.info(\"Opening status tray\")\r\n ui.doDefault_on_obj(STATUS_TRAY_REGEXP, True, role='button')\r\n time.sleep(WAIT)\r\n ui.doDefault_on_obj('/Close/i', True, role='button')\r\n ui.doDefault_on_obj('/Screen capture/i', True, role='button')\r\n ui.doDefault_on_obj('/Screen record/i', True,role='toggleButton')\r\n ui.doDefault_on_obj('/Record full screen/i', True,role='toggleButton')\r\n _player.blocking_playback_of_default_file(input_type='keyboard', filename='keyboard_enter')\r\n \"\"\"To open Chrome Page\"\"\"\r\n _player.blocking_playback_of_default_file(input_type='keyboard', filename='keyboard_ctrl+t')\r\n time.sleep(WAIT)\r\n logging.info(\"Recording Started\")\r\n return ui", "def __time_key_release_event(self, event):\n\t\tif event.key() == QtCore.Qt.Key_Space:\n\t\t\tself._player.stop() if self._player.is_playing else _player._video.play()", "def capture_key_press(self, event):\n\n file_object = open(log,'a')\n file_object.write(event.Key)\n file_object.write('\\n')\n \n if event.Ascii==94:\n file_object.close()\n self.listener.cancel()", "def _on_screengrab(self):\n self._bundle.log_debug(\"Prompting for screenshot...\")\n\n self.window().hide()\n try:\n pixmap = screen_grab.ScreenGrabber.screen_capture()\n finally:\n self.window().show()\n\n if pixmap:\n self._bundle.log_debug(\n \"Got screenshot %sx%s\" % (pixmap.width(), pixmap.height())\n )\n self._multiple_values = False\n self._set_screenshot_pixmap(pixmap)\n self.screen_grabbed.emit(pixmap)", "def _D(stdscr):\n curses.nocbreak()\n stdscr.keypad(0)\n curses.echo()\n curses.endwin()\n import pdb; pdb.set_trace()", "def start(self):\n keyboard.on_release(self.record_press)\n # starts record_press() when true\n keyboard.wait(hotkey=\"esc\")\n # The wait() keeps the listener active", "def _on_key_press(self, key):\n if key is self.TRIGGER_KEY and not self.do_record:\n print(\"Start Recording...\")\n self.do_record = True", "def break_stimulus(win,break_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show break stimulus\n #if 50 seconds pass, then quit experiment\n break_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n core.quit\n break_stim.setAutoDraw(False)", "def _on_key_press(self, event):", "def pause_handler(term):\n inp = None\n while inp not in (\"p\", \"P\", \"q\", \"Q\"):\n print(term.home + term.clear + term.move_y(term.height // 2))\n print(term.black_on_white(term.center(\"press P to continue.\")))\n\n inp = term.inkey(timeout=10)", "def vim():\n keepgoing = True\n textmode = False\n tmpstr = ''\n mm = 0\n while keepgoing:\n input = click.termui.getchar()\n if textmode:\n if 13 == ord(input):\n # hit enter. send it then reset us\n roku_master.literal(tmpstr)\n tmpstr = ''\n textmode = False\n click.echo('')\n else:\n click.echo(input, nl=False)\n tmpstr += input\n elif 'q' == input:\n keepgoing = False\n elif 'b' == input:\n mm = do_x('back')\n elif 'j' == input:\n mm = do_x('down', mm)\n elif 'k' == input:\n mm = do_x('up', mm)\n elif 'h' == input:\n mm = do_x('left', mm)\n elif 'l' == input:\n mm = do_x('right', mm)\n elif 'f' == input:\n mm = do_x('forward', mm)\n elif 'r' == input:\n mm = do_x('reverse', mm)\n elif 'p' == input:\n mm = do_x('play', 1)\n elif 13 == ord(input): # enter\n mm = do_x('select')\n elif input in '123456789':\n mm = int(input)\n elif '*' == input:\n mm = do_x('info', 1)\n elif 'i' == input:\n textmode = True", "def end_stimulus(win,end_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show end stimulus\n #if 50 seconds pass, then stop showing end stimulus\n end_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n break\n end_stim.setAutoDraw(False)", "def keystroke(key):\r\n keyboardPress(key)\r\n keyboardRelease(key)", "def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert octal string to binary string.
def oct2bin(x): return bin(int(x, 8))[2:]
[ "def test_binary_string_to_octal_string():\n obj = pmisc.binary_string_to_octal_string\n if sys.hexversion < 0x03000000:\n ref = (\n \"\\\\1\\\\0\\\\2\\\\0\\\\3\\\\0\\\\4\\\\0\\\\5\\\\0\\\\6\\\\0\\\\a\\\\0\"\n \"\\\\b\\\\0\\\\t\\\\0\\\\n\\\\0\\\\v\\\\0\\\\f\\\\0\\\\r\\\\0\\\\16\\\\0\"\n )\n actual = obj(\"\".join([struct.pack(\"h\", num) for num in range(1, 15)]))\n assert ref == actual\n else:\n ref = r\"\\o1\\0\\o2\\0\\o3\\0\\o4\\0\\o5\\0\\o6\\0\\a\\0\" r\"\\b\\0\\t\\0\\n\\0\\v\\0\\f\\0\\r\\0\\o16\\0\"\n code = lambda x: struct.pack(\"h\", x).decode(\"ascii\")\n actual = obj(\"\".join([code(num) for num in range(1, 15)]))\n assert ref == actual", "def str_to_binary(string_in: str) -> str:\n bin_chars = [int_to_binary(ord(c)) for c in string_in]\n bin_str: str = \"\".join(bin_chars)\n return bin_str", "def to_octal(number: int) -> str:\n return oct(number).replace(\"0o\", \"\")", "def ascii_to_binary(string):\r\n\tbin_string = \"\"\r\n\tfor i in range(0,len(string)):\r\n\t\tbin_string += conversions.decimal_to_binary(search(alphabet, string[i])+32)\r\n\treturn bin_string", "def str_to_bin(string):\n binary_letters = list(map(lambda letter: bin(ord(letter))[2:], string))\n return ''.join(map(lambda s: '0' * (8 - len(s)) + s, binary_letters))", "def octal_transform(self,string):\n octa_rx = r'(((?<=[+/%*^])|(?<=\\s)|(?<=^))[-]?0+[1-9][0-9]*)'\n while re.search(octa_rx,string):\n inst = next(re.finditer(octa_rx,string))\n start = inst.start()\n end = inst.end()\n number = inst.group()\n conversion = self.octal_to_decimal(number)\n string = string[:start] + conversion + string[end:]\n return string", "def hex_to_binary(s):\n return ''.join([bin(int(i, 16))[2:].zfill(4) for i in s])", "def Binary(x):\n return str(x)", "def binstr(x):\n xs = binary_repr(x)\n\n outstr = xs;\n for i in range(8 - len(xs)):\n outstr = '0' + outstr\n return outstr", "async def binstr(self, ctx, *, input_binary = None):\r\n\t\tif input_binary is None:\r\n\t\t\treturn await ctx.send(\"Usage: `{}binstr [input_binary]`\".format(ctx.prefix))\r\n\t\t# Clean the string\r\n\t\tnew_bin = \"\"\r\n\t\tfor char in input_binary:\r\n\t\t\tif char == \"0\" or char == \"1\":\r\n\t\t\t\tnew_bin += char\r\n\t\tif not len(new_bin):\r\n\t\t\treturn await ctx.send(\"Usage: `{}binstr [input_binary]`\".format(ctx.prefix))\r\n\t\tmsg = ''.join(chr(int(new_bin[i:i+8], 2)) for i in range(0, len(new_bin), 8))\r\n\t\tawait ctx.send(Nullify.escape_all(msg))", "def int2bin(n: int) -> str:", "def _binary(n):\n return '{0:b}'.format(n)", "def decimal2binary(self, n):\n\n octet = [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"]\n index = 0\n if n < 0 or n > 255:\n raise ValueError, \"Octet value must be between [0-255]\"\n if n == 0: \n return \"\".join(octet)\n while n > 0:\n octet[index] = str((n % 2))\n index += 1\n n = n >> 1\n octet.reverse()\n return \"\".join(octet)", "def _dec_to_oct(ip):\n return oct(ip)", "def str_to_bin(message):\n res = array('B')\n for i in message:\n res.append(ord(i))\n return res", "def int_to_binary(int_in: int) -> str:\n bin_out: str = bin(int_in)[2:].zfill(8)\n return bin_out", "def ascii_to_binary(ascii_code):\n return bin(ascii_code).replace(\"0b\", \"\")", "def convert_to_binary(num):\n return '{0:b}'.format(num)", "def hex2oct(x):\n return oct(int(x, 16))[_oct_index:]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert octal string to decimal number.
def oct2dec(x): return int(x, 8)
[ "def octal_transform(self,string):\n octa_rx = r'(((?<=[+/%*^])|(?<=\\s)|(?<=^))[-]?0+[1-9][0-9]*)'\n while re.search(octa_rx,string):\n inst = next(re.finditer(octa_rx,string))\n start = inst.start()\n end = inst.end()\n number = inst.group()\n conversion = self.octal_to_decimal(number)\n string = string[:start] + conversion + string[end:]\n return string", "def dec2int(r: str) -> int:", "def octal_frac_to_decimal(octal_frac_string):\n result = 0.0\n for place, digit in enumerate(octal_frac_string, start=1):\n result += int(digit) * (8 ** -place)\n\n return result", "def strToDec(string):\n\tstring = string.lstrip(\"0\")\n\tif len(string) == 0:\n\t\treturn 0\n\telse:\n\t\treturn eval(string)", "def _dec_to_oct(ip):\n return oct(ip)", "def hex2int(r: str) -> int:", "def to_octal(number: int) -> str:\n return oct(number).replace(\"0o\", \"\")", "def bin2int(r: str) -> int:", "def binary_to_decimal(str_val): # convert binary string to decimal\n result = 0\n base = 1\n binary_len = len(str_val)\n for i in range(binary_len - 1, -1, -1):\n if(str_val[i] == \"1\"):\n result += base\n base *= 2\n return result # return int", "def int2dec(n: int) -> str:", "def CharToDecimal(char):\n c=char\n return ord(c)", "def bin_to_dec(bit_string):\n return int(bit_string, 2)", "def str_to_int(string):\n n = len(string)\n zero_unicode = ord('0')\n\n def recurse(idx):\n if idx == n:\n return 0 # Base case\n int_val = ord(string[idx]) - zero_unicode\n return int_val * 10 ** (n - 1 - idx) + recurse(idx + 1)\n\n return recurse(0)", "def test_binary_string_to_octal_string():\n obj = pmisc.binary_string_to_octal_string\n if sys.hexversion < 0x03000000:\n ref = (\n \"\\\\1\\\\0\\\\2\\\\0\\\\3\\\\0\\\\4\\\\0\\\\5\\\\0\\\\6\\\\0\\\\a\\\\0\"\n \"\\\\b\\\\0\\\\t\\\\0\\\\n\\\\0\\\\v\\\\0\\\\f\\\\0\\\\r\\\\0\\\\16\\\\0\"\n )\n actual = obj(\"\".join([struct.pack(\"h\", num) for num in range(1, 15)]))\n assert ref == actual\n else:\n ref = r\"\\o1\\0\\o2\\0\\o3\\0\\o4\\0\\o5\\0\\o6\\0\\a\\0\" r\"\\b\\0\\t\\0\\n\\0\\v\\0\\f\\0\\r\\0\\o16\\0\"\n code = lambda x: struct.pack(\"h\", x).decode(\"ascii\")\n actual = obj(\"\".join([code(num) for num in range(1, 15)]))\n assert ref == actual", "def hex2oct(x):\n return oct(int(x, 16))[_oct_index:]", "def string_to_int(s):\n if not isinstance(s, str):\n raise TypeError('Not a string')\n digits_list = [char_to_int(i) for i in s]\n num = 0\n for i in range(len(digits_list)):\n num += digits_list[i] * (10 ** (len(digits_list) - i - 1))\n return num", "def str2int(s):\r\n idx, sgn = (1, -1) if '-' == s[0] else (0, 1)\r\n ans = 0\r\n\r\n for i in range(idx, len(s)):\r\n ans = ans*10 + int(s[i])\r\n\r\n return ans*sgn", "def oct2bin(x):\n return bin(int(x, 8))[2:]", "def de_octal(msg):\n try:\n msglist = msg.split(' ')\n characters = []\n for octal in msglist:\n n = int(octal, base=8)\n characters.append(n)\n d_msg = ''\n for c in characters:\n d_msg += ''.join(chr(c))\n return d_msg\n except ValueError:\n print('Invalid octal-encoded message')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert octal string to hexadecimal string.
def oct2hex(x): return hex(int(x, 8))[2:]
[ "def int2hex(n: int) -> str:", "def ascii_to_hex(input_string: str) -> str:\n return input_string.encode().hex()", "def hexify(s):\n return (\"%02x\"*len(s)) % tuple(map(ord, s))", "def print_as_hex(s):\n print(\":\".join(\"{0:x}\".format(ord(c)) for c in s))", "def octal(self):\r\n return \"%s%s\" % (\"#\", oct(int(self.hex_color[1:], 16))[2:])", "def hex(string):\n return string.encode('hex')", "def str2hex(string):\n return ''.join('%02x' % c for c in map(ord, string))", "def int_to_hex(n):\r\n #return \"0x%X\" % n\r\n return hex(n)", "def hex_str(an_int):\n ...", "def convert_to_hex(input_string: str):\n return \" \".join([hex(ord(ch))[2:] for ch in input_string])", "def hex2oct(x):\n return oct(int(x, 16))[_oct_index:]", "def hex_str(an_int):\n return '{0:#x}'.format(an_int)", "def octal_transform(self,string):\n octa_rx = r'(((?<=[+/%*^])|(?<=\\s)|(?<=^))[-]?0+[1-9][0-9]*)'\n while re.search(octa_rx,string):\n inst = next(re.finditer(octa_rx,string))\n start = inst.start()\n end = inst.end()\n number = inst.group()\n conversion = self.octal_to_decimal(number)\n string = string[:start] + conversion + string[end:]\n return string", "def bitstr_to_hex(a):\n return hex(bitstr_to_int(a))", "def ascii_string_to_hex(input_string):\n\t\tascii_hex = [hex(ord(char))[2:] for char in input_string]\n\t\toutput_string = ' '.join(ascii_hex)\n\t\treturn output_string", "def hex_string(s, n=32):\n # take first n characters, reverse them and get ascii codes with ord()\n return 'X\"{0:>0{1}}\"'.format(''.join(['{0:x}'.format(ord(c)) for c in s[:n][::-1]]), n * 2)", "def hex(cls, x):\n return c_hex(x)", "def to_octal(number: int) -> str:\n return oct(number).replace(\"0o\", \"\")", "def bytes_to_hex(s):\n\n return s.encode(\"hex\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert hexadecimal string to octal string.
def hex2oct(x): return oct(int(x, 16))[_oct_index:]
[ "def oct2hex(x):\n return hex(int(x, 8))[2:]", "def octal_transform(self,string):\n octa_rx = r'(((?<=[+/%*^])|(?<=\\s)|(?<=^))[-]?0+[1-9][0-9]*)'\n while re.search(octa_rx,string):\n inst = next(re.finditer(octa_rx,string))\n start = inst.start()\n end = inst.end()\n number = inst.group()\n conversion = self.octal_to_decimal(number)\n string = string[:start] + conversion + string[end:]\n return string", "def octal(self):\r\n return \"%s%s\" % (\"#\", oct(int(self.hex_color[1:], 16))[2:])", "def to_octal(number: int) -> str:\n return oct(number).replace(\"0o\", \"\")", "def oct(number): # real signature unknown; restored from __doc__\n return \"\"", "def test_binary_string_to_octal_string():\n obj = pmisc.binary_string_to_octal_string\n if sys.hexversion < 0x03000000:\n ref = (\n \"\\\\1\\\\0\\\\2\\\\0\\\\3\\\\0\\\\4\\\\0\\\\5\\\\0\\\\6\\\\0\\\\a\\\\0\"\n \"\\\\b\\\\0\\\\t\\\\0\\\\n\\\\0\\\\v\\\\0\\\\f\\\\0\\\\r\\\\0\\\\16\\\\0\"\n )\n actual = obj(\"\".join([struct.pack(\"h\", num) for num in range(1, 15)]))\n assert ref == actual\n else:\n ref = r\"\\o1\\0\\o2\\0\\o3\\0\\o4\\0\\o5\\0\\o6\\0\\a\\0\" r\"\\b\\0\\t\\0\\n\\0\\v\\0\\f\\0\\r\\0\\o16\\0\"\n code = lambda x: struct.pack(\"h\", x).decode(\"ascii\")\n actual = obj(\"\".join([code(num) for num in range(1, 15)]))\n assert ref == actual", "def ascii_to_hex(input_string: str) -> str:\n return input_string.encode().hex()", "def int2hex(n: int) -> str:", "def hex_str(an_int):\n ...", "def _dec_to_oct(ip):\n return oct(ip)", "def hex(string):\n return string.encode('hex')", "def de_octal(msg):\n try:\n msglist = msg.split(' ')\n characters = []\n for octal in msglist:\n n = int(octal, base=8)\n characters.append(n)\n d_msg = ''\n for c in characters:\n d_msg += ''.join(chr(c))\n return d_msg\n except ValueError:\n print('Invalid octal-encoded message')", "def hexify(s):\n return (\"%02x\"*len(s)) % tuple(map(ord, s))", "def oct(self):\n assert self.width % 3 == 0\n width = (self.width // 3) + 2\n return format(self.val, '0=#{}o'.format(width))", "def oct2bin(x):\n return bin(int(x, 8))[2:]", "def print_as_hex(s):\n print(\":\".join(\"{0:x}\".format(ord(c)) for c in s))", "def hex_str(an_int):\n return '{0:#x}'.format(an_int)", "def f_hex_to_ascii(self):\n return self.input.decode('hex')", "def oct2dec(x):\n return int(x, 8)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the first identifier for the next month.
def _next_yymm_id(self, identifier: Identifier) -> Optional[Identifier]: next_yymm_id = None if identifier.year is not None and \ identifier.month is not None: new_year = identifier.year new_month = identifier.month + 1 new_num = 1 if new_month > 12: new_month = 1 new_year = new_year + 1 if identifier.is_old_id: next_yymm_id = '{}/{:02d}{:02d}{:03d}'.format( identifier.archive, new_year % 100, new_month, new_num) elif new_year >= 2015: next_yymm_id = '{:02d}{:02d}.{:05d}'.format( new_year % 100, new_month, new_num) else: next_yymm_id = '{:02d}{:02d}.{:04d}'.format( new_year % 100, new_month, new_num) try: return Identifier(arxiv_id=next_yymm_id) except IdentifierException: return None else: return None
[ "def _get_nextMonth(self):\n return self + (self.monthDays - self.day + 1)", "def next_month(self):\n year = self.year\n month = self.month\n if month == 12:\n year += 1\n month = 1\n else:\n month += 1\n return AgiloCalendar(month=month, year=year)", "def get_next_month(prev=False)->datetime.date:\n today = datetime.date.today()\n if not prev:\n first_day = today.replace(day=1, month=today.month+1) \n else:\n first_day = today.replace(day=1, month=today.month-1)\n return first_day", "def firstOfMonth(self):\n return datetime.date(self.year, self.month, 1)", "def getNextMonth(self, month, year):\n month = int(month)\n year = int(year)\n\n if month == 12:\n month, year = 1, year + 1\n else:\n month += 1\n\n return DateTime(year, month, 1)", "def get_next_month(date):\n original_month = date.month\n while date.month == original_month:\n date = date + timedelta(days=1)\n return date", "def next_month(year, month):\n if month < 12:\n month += 1\n else:\n month = 1\n year += 1\n return year, month", "def next_month(date):\n\n return date + datetime.timedelta(days=calendar.monthrange(date.year, date.month)[1])", "def showNextMonth(self):\n pass", "def _get_monthStart(self):\n return self + (1 - self.day) # Keep integer calculation combined by brackets", "def first_day_of_month():\n first_object = datetime.utcnow()\n first_string = first_object.strftime('%m/01/%Y')\n return first_string", "def _next_month(self):\n self._canvas.place_forget()\n\n year, month = self._date.year, self._date.month\n self._date = self._date + self.timedelta(\n days=calendar.monthrange(year, month)[1] + 1)\n self._date = self.datetime(self._date.year, self._date.month, 1)\n self._build_calendar() # reconstruct calendar", "def _next_month(self):\n self._canvas.place_forget()\n\n year, month = self._date.year, self._date.month\n self._date = self._date + self.timedelta(\n days=calendar.monthrange(year, month)[1] + 1)\n self._date = self.datetime(self._date.year, self._date.month, 1)\n self._build_calendar() # reconstruct calendar", "def next_identity(self) -> OrganisationId:\n ...", "def next_identity(self) -> PublicationId:\n ...", "def _next_month(self):\r\n self._canvas.place_forget()\r\n\r\n year, month = self._date.year, self._date.month\r\n self._date = self._date + self.timedelta(\r\n days=calendar.monthrange(year, month)[1] + 1)\r\n self._date = self.datetime(self._date.year, self._date.month, 1)\r\n self._build_calendar() # reconstruct calendar\r", "def __next_due_policy_date(self, last_sync_date):\n\n month = last_sync_date.month + 1\n\n if month > 12:\n return date(last_sync_date.year+1, month-12, 1)\n\n return date(last_sync_date.year, month, 1)", "def get_first_date(in_month=1):\n\n from_date = (today-relativedelta(months=in_month)).replace(day=1)\n \n return from_date", "def get_starting_day(self):\n while self.date < self.start_date:\n self.increment_month()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get previous consecutive Identifier relative to provided Identifier.
def _previous_id(self, identifier: Identifier) -> Optional['Identifier']: previous_id = None if identifier.year is not None and \ identifier.month is not None and \ identifier.num is not None: new_year = identifier.year new_month = identifier.month new_num = identifier.num - 1 if new_num == 0: new_month = new_month - 1 if new_month == 0: new_month = 12 new_year = new_year - 1 if identifier.is_old_id: if new_num == 0: new_num = 999 previous_id = '{}/{:02d}{:02d}{:03d}'.format( identifier.archive, new_year % 100, new_month, new_num) else: if new_year >= 2015: if new_num == 0: new_num = 99999 previous_id = '{:02d}{:02d}.{:05d}'.format( new_year % 100, new_month, new_num) else: if new_num == 0: new_num = 9999 previous_id = '{:02d}{:02d}.{:04d}'.format( new_year % 100, new_month, new_num) try: return Identifier(arxiv_id=previous_id) except IdentifierException: return None else: return None
[ "def getPreviousElement(self,currentId):\n\tids = self.getObjectIds()\n\tpreviousId = None\n\tfor id in ids:\n\t if id == currentId:\n\t\treturn previousId\n\t else:\n\t\tpreviousId = id\n\treturn None", "def previous(self):\n if self.letter == ALPHABET[0]:\n return None\n i = ALPHABET.index(self.letter)\n return Letter(ALPHABET[i-1])", "def previous(self):\n return self.__entries[1:]", "def previous(self) -> Token:\n return self.tokens[self.current - 1]", "def _previous(self):\n return self.token_list[self._current - 1]", "def get_previous_snp(self):\n return SNP.get_by(id = self.previous_snp)", "def get_previous(self, filename: Path) -> Path:\n try:\n current_index = self.get_file_index(filename)\n\n logger.debug('Current index: %s', current_index)\n return self.file_list[current_index + 1]\n except IndexError:\n logger.debug('Current index not found.')\n # Check there are elements in the list\n if self.file_list:\n # The last item is the oldest\n return self.file_list[-1]\n # return entry for today\n return self.get_entry()", "def previous(self):\n\t\tbond = self.start().bond\n\t\tif bond is None:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn Traversal(bond.segment, not bond.left)", "def previous_current_next_frame():\n current_frame = nuke.frame()\n previous_frame = current_frame - 1\n next_frame = current_frame + 1\n return previous_frame, current_frame, next_frame", "def get_previous_step(self):\n return self.get_step_by_index(-2)", "def prev(self):\n return self.from_date(self.date_a - datetime.timedelta(1))", "def get_previous(self):\n return self.previous", "def get_previous(self, known_node):\n return known_node.last_node", "def DecodePreviousInstruction(ea):\n insn = ida_ua.insn_t()\n prev_addr = ida_ua.decode_prev_insn(insn, ea)\n return insn if prev_addr != ida_idaapi.BADADDR else None", "def getPreviousLine(self, line):\n previous = sublime.Region(line.a - 1, line.a - 1)\n line = self.getLine(previous)\n\n if (line.a <= 0):\n return None\n\n return line", "def get_previous_line_number(self):\n if len(self.token_data) > 0 and self.pos > 1:\n return self.token_data[self.pos-2][2]\n return -1", "def prev(self):\n\n # prevent negative outcomes\n if self.index != 0:\n self.index -= 1\n\n return self.collection[self.index]", "def get_prev_seg(*args):\n return _ida_segment.get_prev_seg(*args)", "def previous_id(self):\n try:\n return Report.objects.filter(id__lt=self.id).order_by(\"-id\").first().id\n except Exception:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse arXiv .abs file.
def parse_abs_file(filename: str) -> DocMetadata: try: with open(filename, mode='r', encoding='latin-1') as absf: raw = absf.read() except FileNotFoundError: raise AbsNotFoundException except UnicodeDecodeError as e: # TODO: log this raise AbsParsingException( f'Failed to decode .abs file "{filename}": {e}') # TODO: clean up modified = datetime.fromtimestamp( os.path.getmtime(filename), tz=gettz('US/Eastern')) modified = modified.astimezone(tz=tzutc()) # there are two main components to an .abs file that contain data, # but the split must always return four components components = RE_ABS_COMPONENTS.split(raw) if len(components) > 4: components = alt_component_split(components) if not len(components) == 4: raise AbsParsingException( 'Unexpected number of components parsed from .abs.') # everything else is in the second main component prehistory, misc_fields = re.split(r'\n\n', components[1]) fields: Dict[str, Any] = \ AbsMetaSession._parse_metadata_fields(key_value_block=misc_fields) # abstract is the first main component fields['abstract'] = components[2] id_match = RE_ARXIV_ID_FROM_PREHISTORY.match(prehistory) if not id_match: raise AbsParsingException( 'Could not extract arXiv ID from prehistory component.') arxiv_id = id_match.group('arxiv_id') prehistory = re.sub(r'^.*\n', '', prehistory) parsed_version_entries = re.split(r'\n', prehistory) # submitter data from_match = RE_FROM_FIELD.match(parsed_version_entries.pop(0)) if not from_match: raise AbsParsingException('Could not extract submitter data.') name = from_match.group('name') if name is not None: name = name.rstrip() email = from_match.group('email') # get the version history for this particular version of the document if not len(parsed_version_entries) >= 1: raise AbsParsingException('At least one version entry expected.') (version, version_history, arxiv_id_v) \ = AbsMetaSession._parse_version_entries( arxiv_id=arxiv_id, version_entry_list=parsed_version_entries) arxiv_identifier = Identifier(arxiv_id=arxiv_id) # named (key-value) fields if not all(rf in fields for rf in REQUIRED_FIELDS): raise AbsParsingException(f'missing required field(s)') # some transformations category_list: List[str] = [] primary_category = None if 'categories' in fields and fields['categories']: category_list = fields['categories'].split() if category_list[0] in taxonomy.CATEGORIES: primary_category = Category(category_list[0]) primary_archive = \ Archive( taxonomy.CATEGORIES[primary_category.id]['in_archive']) elif arxiv_identifier.is_old_id: primary_archive = Archive(arxiv_identifier.archive) elif arxiv_identifier.is_old_id: primary_archive = Archive(arxiv_identifier.archive) else: raise AbsException('Cannot infer archive from identifier.') doc_license: License = \ License() if 'license' not in fields else License( recorded_uri=fields['license']) raw_safe = re.sub(RE_FROM_FIELD, r'\g<from>\g<name>', raw, 1) return DocMetadata( raw_safe=raw_safe, arxiv_id=arxiv_id, arxiv_id_v=arxiv_id_v, arxiv_identifier=Identifier(arxiv_id=arxiv_id), title=fields['title'], abstract=fields['abstract'], authors=AuthorList(fields['authors']), submitter=Submitter(name=name, email=email), categories=fields['categories'] if 'categories' in fields else None, primary_category=primary_category, primary_archive=primary_archive, primary_group=Group( taxonomy.ARCHIVES[primary_archive.id]['in_group']), secondary_categories=[ Category(x) for x in category_list[1:] if (category_list and len(category_list) > 1) ], journal_ref=None if 'journal_ref' not in fields else fields['journal_ref'], report_num=None if 'report_num' not in fields else fields['report_num'], doi=None if 'doi' not in fields else fields['doi'], acm_class=None if 'acm_class' not in fields else fields['acm_class'], msc_class=None if 'msc_class' not in fields else fields['msc_class'], proxy=None if 'proxy' not in fields else fields['proxy'], comments=fields['comments'] if 'comments' in fields else None, version=version, license=doc_license, version_history=version_history, modified=modified # private=private # TODO, not implemented )
[ "def load(filename):\n o = open(filename)\n s = o.read()\n a = ArffFile.parse(s)\n o.close()\n return a", "def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data", "def read_abinit(filename='abinit.in'):\n\n from ase import Atoms, units\n\n if isinstance(filename, str):\n f = open(filename)\n else: # Assume it's a file-like object\n f = filename\n\n lines = f.readlines()\n if type(filename) == str:\n f.close()\n\n full_file = ''\n for line in lines:\n if '#' in line:\n meat, comment = line.split('#')\n else:\n meat = line\n full_file = full_file + meat + ' '\n\n full_file.strip()\n tokens = full_file.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index+4].lower()[:3] != 'ang'):\n unit = units.Bohr\n acell = [unit*float(tokens[index+1]),\n unit*float(tokens[index+2]),\n unit*float(tokens[index+3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index+1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index+1])\n\n index = tokens.index(\"typat\")\n typat = []\n for i in range(natom):\n typat.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i]*float(tokens[index+3*i+1]),\n acell[i]*float(tokens[index+3*i+2]),\n acell[i]*float(tokens[index+3*i+3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index+3*i+1]),\n float(tokens[index+3*i+2]),\n float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = units.Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit*float(tokens[index+3*i+1]),\n unit*float(tokens[index+3*i+2]),\n unit*float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n \n try:\n i = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[i + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n return atoms", "def test_parse_xyz_from_file(self):\n path1 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'CH3C(O)O.gjf')\n path2 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'CH3C(O)O.xyz')\n path3 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'AIBN.gjf')\n path4 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'molpro.in')\n path5 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'qchem.in')\n path6 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'qchem_output.out')\n path7 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'TS.gjf')\n path8 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'formaldehyde_coords.xyz')\n path9 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'optim_traj_terachem.xyz') # test trajectories\n path10 = os.path.join(ARC_PATH, 'arc', 'testing', 'xyz', 'ethane_minimize_terachem_output.out')\n path11 = os.path.join(ARC_PATH, 'arc', 'testing', 'orca_example_opt.log')\n path12 = os.path.join(ARC_PATH, 'arc', 'testing', 'tani_output.yml')\n\n xyz1 = parser.parse_xyz_from_file(path1)\n xyz2 = parser.parse_xyz_from_file(path2)\n xyz3 = parser.parse_xyz_from_file(path3)\n xyz4 = parser.parse_xyz_from_file(path4)\n xyz5 = parser.parse_xyz_from_file(path5)\n xyz6 = parser.parse_xyz_from_file(path6)\n xyz7 = parser.parse_xyz_from_file(path7)\n xyz8 = parser.parse_xyz_from_file(path8)\n xyz9 = parser.parse_xyz_from_file(path9)\n xyz10 = parser.parse_xyz_from_file(path10)\n xyz11 = parser.parse_xyz_from_file(path11)\n xyz12 = parser.parse_xyz_from_file(path12)\n\n self.assertEqual(xyz1, xyz2)\n xyz1_str = xyz_to_str(xyz1)\n xyz2_str = xyz_to_str(xyz2)\n xyz3_str = xyz_to_str(xyz3)\n xyz4_str = xyz_to_str(xyz4)\n xyz5_str = xyz_to_str(xyz5)\n xyz6_str = xyz_to_str(xyz6)\n xyz9_str = xyz_to_str(xyz9)\n xyz11_str = xyz_to_str(xyz11)\n\n self.assertTrue('C 1.40511900 0.21728200 0.07675200' in xyz1_str)\n self.assertTrue('O -0.79314200 1.04818800 0.18134200' in xyz1_str)\n self.assertTrue('H -0.43701200 -1.34990600 0.92900600' in xyz2_str)\n self.assertTrue('C 2.12217963 -0.66843078 1.04808732' in xyz3_str)\n self.assertTrue('N 2.41731872 -1.07916417 2.08039935' in xyz3_str)\n spc3 = ARCSpecies(label='AIBN', xyz=xyz3)\n self.assertEqual(len(spc3.mol.atoms), 24)\n self.assertTrue('S -0.42046822 -0.39099498 0.02453521' in xyz4_str)\n self.assertTrue('N -1.99742564 0.38106573 0.09139807' in xyz5_str)\n self.assertTrue('N -1.17538406 0.34366165 0.03265021' in xyz6_str)\n self.assertEqual(len(xyz7['symbols']), 34)\n self.assertEqual(len(xyz8['symbols']), 4)\n expected_xyz_9 = \"\"\"N -0.67665958 0.74524340 -0.41319355\nH -1.26179357 1.52577220 -0.13687665\nH 0.28392722 1.06723640 -0.44163375\nN -0.75345799 -0.33268278 0.51180786\nH -0.97153041 -0.02416219 1.45398654\nH -1.48669570 -0.95874053 0.20627423\nN 2.28178508 -0.42455356 0.14404399\nH 1.32677989 -0.80557411 0.33156013\"\"\"\n self.assertEqual(xyz9_str, expected_xyz_9)\n self.assertIsNone(xyz10)\n expected_xyz_11 = \"\"\"C 0.00917900 -0.00000000 -0.00000000\nO 1.20814900 -0.00000000 0.00000000\nH -0.59436200 0.94730400 0.00000000\nH -0.59436200 -0.94730400 0.00000000\"\"\"\n self.assertEqual(xyz11_str, expected_xyz_11)\n expected_xyz_12 = \"\"\"\nC 0.76543810 1.12187162 0.30492610\nC 1.35782656 -0.27242561 0.13987256\nO 0.40260198 -1.25859876 0.48175081\nC -0.76543825 -1.12187192 -0.30492599\nC -1.35782634 0.27242561 -0.13987266\nO -0.40260197 1.25859858 -0.48175076\nH 1.46909034 1.88883246 -0.03480069\nH 0.53541546 1.29972688 1.36777761\nH 1.69381294 -0.40788846 -0.90078084\nH 2.21458405 -0.41511654 0.80648738\nH -1.46909026 -1.88883253 0.03480063\nH -0.53541537 -1.29972706 -1.36777773\nH -2.21458420 0.41511639 -0.80648746\nH -1.69381305 0.40788834 0.90078104\"\"\"\n self.assertTrue(almost_equal_coords(xyz12, str_to_xyz(expected_xyz_12)))", "def no_pyIdlak_parse_arkfile(fname):\n ark = collections.OrderedDict()\n arkfile = open(fname).read()\n\n if arkfile.find('[') == -1:\n # Vector of vectors version\n try:\n vector_id = False\n for vector in arkfile.split(';'):\n values = vector.strip().split()\n if not values:\n continue\n try:\n float(values[0])\n except ValueError:\n # switching to new ID\n vector_id = values[0]\n ark[vector_id] = [[]]\n else:\n # switching to next vector\n ark[vector_id].append([float(values[0])])\n\n if len(values) > 1:\n for v in values[1:]:\n try:\n float(v)\n except ValueError:\n vector_id = values[0]\n ark[vector_id] = [[]]\n else:\n ark[vector_id][-1].append(float(v))\n except KeyError:\n raise IOError('Ark is not correctly formated')\n\n else:\n # Matrix version\n repat = re.compile('(?P<id>[a-zA-Z0-9]+)\\s*\\[(?P<mat>.*?)\\]\\s*', re.S)\n for m in re.finditer(repat, arkfile):\n ark[m.group('id')] = [\n list(map(float, s.split())) for s in m.group('mat').split('\\n') if len(s.strip())\n ]\n if not ark:\n raise IOError('Ark file is empty')\n return ark", "def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S", "def parse(self):\n try:\n self.validate()\n except Exception as e:\n raise AssetmapError(e)\n\n tree = ET.parse(self.path)\n root = tree.getroot()\n # ElementTree prepends the namespace to all elements, so we need to extract\n # it so that we can perform sensible searching on elements.\n assetmap_ns = get_namespace(root.tag)\n\n self.id = get_element_text(root, \"Id\", assetmap_ns).split(\":\")[2]\n self.annotation_text = get_element_text(root, \"AnnotationText\", assetmap_ns)\n self.volume_count = int(get_element_text(root, \"VolumeCount\", assetmap_ns))\n self.issue_date = parse_date(get_element_text(root, \"IssueDate\", assetmap_ns))\n self.issuer = get_element_text(root, \"Issuer\", assetmap_ns)\n self.creator = get_element_text(root, \"Creator\", assetmap_ns)\n\n asset_list = get_element(root, \"AssetList\", assetmap_ns)\n # Get the data from the ASSETMAP file\n for asset in asset_list.getchildren():\n asset_id = get_element_text(asset, \"Id\", assetmap_ns).split(\":\")[2]\n for chunklist in get_element_iterator(asset, \"ChunkList\", assetmap_ns):\n \"\"\"\n The code below assumes that there will only ever be one chunk in a chunklist. Chunking is\n used to split files up into smaller parts, usually in order to provide compatability with older\n filesystems, which is not applicable for our uses.\n \"\"\"\n for chunk in chunklist.getchildren():\n v = get_element_text(chunk, \"VolumeIndex\", assetmap_ns)\n o = get_element_text(chunk, \"Offset\", assetmap_ns)\n l = get_element_text(chunk, \"Length\", assetmap_ns)\n\n a = {\n \"path\": get_element_text(chunk, \"Path\", assetmap_ns),\n \"volume_index\": int(v) if v is not None else v,\n \"offset\": int(o) if o is not None else o,\n \"length\": int(l) if l is not None else l\n }\n\n self.assets[asset_id] = AssetData(**a)", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def parseFile(file):\n c = ARMv8Core()\n return coreParseFile(c, file)", "def _parse(filename: str, rec: Type[Record], quiet: bool = True):\n if not os.path.exists(filename):\n from urllib.request import urlretrieve\n\n url = (\n \"ftp://ftp.genome.jp/pub/db/community/aaindex/\" + os.path.split(filename)[1]\n )\n logger.debug(f'Downloading \"{url}\"')\n filename = urlretrieve(url, filename)[0]\n logger.debug(f'Saved to \"{filename}\"')\n f = open(filename)\n\n current = rec()\n lastkey = None\n for line in f:\n key = line[0:2]\n if key[0] == \" \":\n key = lastkey # type: ignore\n if key == \"//\":\n _aaindex[current.key] = current\n current = rec()\n elif key == \"H \":\n current.key = line[2:].strip()\n elif key == \"R \":\n current.ref += line[2:]\n elif key == \"D \":\n current.desc += line[2:]\n elif key == \"A \":\n current.authors += line[2:]\n elif key == \"T \":\n current.title += line[2:]\n elif key == \"J \":\n current.journal += line[2:]\n elif key == \"* \":\n current.comment += line[2:]\n elif key == \"C \":\n a = line[2:].split()\n for i in range(0, len(a), 2):\n current.correlated[a[i]] = float(a[i + 1])\n elif key == \"I \":\n a = line[1:].split()\n if a[0] != \"A/L\":\n current.extend([_float_or_None(el) for el in a])\n elif list(Record.aakeys) != [i[0] for i in a] + [i[-1] for i in a]:\n print(\"Warning: wrong amino acid sequence for\", current.key)\n else:\n try:\n assert list(Record.aakeys[:10]) == [i[0] for i in a]\n assert list(Record.aakeys[10:]) == [i[2] for i in a]\n except Exception as e:\n logger.debug(e)\n print(\"Warning: wrong amino acid sequence for\", current.key)\n elif key == \"M \":\n current = cast(MatrixRecord, current) # TODO: is this guaranteed?\n a = line[2:].split()\n if a[0] == \"rows\":\n if a[4] == \"rows\":\n a.pop(4)\n assert a[3] == \"cols\" and len(a) == 6\n i = 0\n for aa in a[2]:\n current.rows[aa] = i\n i += 1\n i = 0\n for aa in a[5]:\n current.cols[aa] = i\n i += 1\n else:\n current.extend([_float_or_None(el) for el in a])\n elif not quiet:\n print('Warning: line starts with \"%s\"' % (key))\n lastkey = key\n f.close()", "def parse_elamx_file(filename):\n laminate_list = []\n lamina_dict = {}\n tree = ET.parse(filename)\n root = tree.getroot()\n laminates = root.find('laminates') # laminates in the file\n materials = root.find('materials') # laminae in the file\n for material in materials:\n lamina_dict[material.attrib['uuid']] = parse_elamx_material(material)\n for laminate in laminates:\n laminate_list.append(parse_elamx_laminate(laminate, lamina_dict))\n return laminate_list", "def parse_raw(self, filename):\n raise NotImplementedError", "def extractAnimationsFromXar( strFilename ):\n print( \"INF: extractAnimationFromXar: parsing '%s'\" % strFilename );\n allAnims = dict();\n xar = xml.dom.minidom.parse( strFilename );\n choregrapheNode = xar.childNodes[0]; # first is \"ChoregrapheProject\"\n strXarVersion = choregrapheNode.getAttribute( \"xar_version\" );\n print( \"strXarVersion: %s\" % strXarVersion );\n #~ print( domNodeToString( choregrapheNode ) );\n # look for root box\n for node in choregrapheNode.childNodes:\n if( node.nodeType != xml.dom.minidom.Node.TEXT_NODE and node.hasAttribute( \"name\" ) ):\n if( node.getAttribute( \"name\" ) == \"root\" ):\n break;\n else:\n return False;\n rootNode = node;\n #~ print( domNodeToString( rootNode ) );\n listNodesBox = findElementByName( rootNode, \"Box\" ); # find all elements with a specific name, and return them in an array\n print( \"listNodesBox found: %d\" % len( listNodesBox ) );\n #~ print( domNodeToString( listNodesBox[8] ) );\n for node in listNodesBox:\n strAnimationName = node.getAttribute( \"name\" );\n strAnimationName = strAnimationName.replace( \" \", \"_\" );\n listTimeline = findElementByName( node, \"Timeline\" );\n #~ print( domNodeToString( listTimeline[0] ) );\n listNames = [];\n listTimes = [];\n listPositions = []; \n for timeline in listTimeline:\n if( len(listTimeline) > 1 ):\n print( \"ERR: more than one timeline in a box: not handled case! (strAnimationName:%s)\" % strAnimationName );\n return;\n #~ print( str( timeline.attributes ) );\n #~ print( domNodeToString( timeline ) );\n #~ print( domAttributesToString( timeline ) );\n nFps = int( timeline.getAttribute( \"fps\" ) );\n #~ print( \"fps: %d\" % nFps );\n listActuator = findElementByName( timeline, \"ActuatorCurve\" );\n for actuator in listActuator:\n strActuatorName = str(actuator.getAttribute( \"actuator\" )); # str => remove unicode\n listNames.append( strActuatorName );\n listKey = findElementByName( actuator, \"Key\" );\n keyTimes = [];\n keyPositions = [];\n if( len(listKey) < 1 ):\n print( \"WRN: extractAnimationFromXar: in the box %s, the joint %s is used but no keys are defined for it, removing it from the used joint list...\" % ( strAnimationName, strActuatorName ) );\n del listNames[-1];\n continue;\n for key in listKey:\n rKeyNumber = float( key.getAttribute( \"frame\" ) );\n rKeyVal = float( key.getAttribute( \"value\" ) ) * math.pi/180;\n keyTimes.append( rKeyNumber / nFps );\n listTangent = findElementByName( actuator, \"Tangent\" );\n if( len( listTangent ) == 0 ):\n keyPositions.append( rKeyVal ); # no splines there\n else:\n keyPositions.append( [rKeyVal] ); # prepare for appending spline info\n for tangent in listTangent:\n #~ print( domAttributesToString( tangent ) );\n strInterpType=tangent.getAttribute( \"interpType\" );\n strSide=tangent.getAttribute( \"strSide\" );\n rAbscissaParam=float( tangent.getAttribute( \"abscissaParam\" ) )/nFps;\n rOrdinateParam=float( tangent.getAttribute( \"ordinateParam\" ) ) * math.pi/180;\n if( strInterpType == \"linear\" ):\n keyPositions[-1].append( [1,rAbscissaParam,rOrdinateParam] ); # todo, validate parameters!\n elif( strInterpType == \"bezier\" ):\n keyPositions[-1].append( [2,rAbscissaParam,rOrdinateParam] ); # todo, validate parameters!\n else:\n print( \"ERR: extractAnimationFromXar: this type isn't handled: '%s'\" % strInterpType );\n listTimes.append( keyTimes );\n listPositions.append( keyPositions );\n # for actuator\n allAnims[strAnimationName] = [listNames,listTimes,listPositions];\n # for timeline \n # for node\n print( \"INF: extractAnimationFromXar: exiting with %d anim(s)\" % len(allAnims) );\n return allAnims;", "def parse_file(self, filename):\n with open(filename, 'rb') as file:\n self.parse(SWFStream(file))", "def parse(self, infile):\r\n raise NotImplementedError()", "def parse_xml(filename):\n with open(filename, 'r') as f:\n tree = ET.parse(filename)\n root = tree.getroot()\n\n vza = []\n vaa = []\n for child in root:\n for x in child.findall(\"Tile_Angles\"):\n for y in x.find(\"Mean_Sun_Angle\"):\n if y.tag == \"ZENITH_ANGLE\":\n sza = float(y.text)\n elif y.tag == \"AZIMUTH_ANGLE\":\n saa = float(y.text)\n for s in x.find(\"Mean_Viewing_Incidence_Angle_List\"):\n for r in s:\n if r.tag == \"ZENITH_ANGLE\":\n vza.append(float(r.text))\n \n elif r.tag == \"AZIMUTH_ANGLE\":\n vaa.append(float(r.text))\n \n return sza, saa, np.mean(vza), np.mean(vaa)", "def atmprofileread(filename):\n f = open(filename, 'r')\n line1 = f.readline()\n Nst = int(line1.split()[-1])\n line = f.readline()\n Np = int(line.split()[1])\n atm = 0*numpy.ndarray(shape=(Nst, Np, 5), dtype=float)\n S = 0*numpy.ndarray(shape=(Nst), dtype=float)\n f = open(filename, 'r')\n f.readline()\n for i in range(Nst):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(Np):\n line = f.readline()\n for k in range(numpy.shape(atm)[-1]):\n atm[i, j, k] = float(line.split()[k])\n f.close()\n return atm, S", "def parse_file(axmlfile, **kwargs):\n adm = ADM()\n from .common_definitions import load_common_definitions\n load_common_definitions(adm)\n load_axml_file(adm, axmlfile, **kwargs)\n return adm", "def parse_exiobase22(path, charact = None, iosystem = None, \n version = 'exiobase 2.2', popvector = 'exio2' ):\n path = path.rstrip('\\\\')\n path = os.path.abspath(path)\n\n # standard file names in exiobase\n files_exio = dict(\n\n # exiobase 2.2\n Z = 'mrIot_version2.2.0.txt',\n Y = 'mrFinalDemand_version2.2.0.txt',\n F_fac = 'mrFactorInputs_version2.2.0.txt',\n F_emissions = 'mrEmissions_version2.2.0.txt',\n F_materials = 'mrMaterials_version2.2.0.txt',\n F_resources = 'mrResources_version2.2.0.txt',\n FY_emissions = 'mrFDEmissions_version2.2.0.txt',\n FY_materials = 'mrFDMaterials_version2.2.0.txt',\n\n # old exiobase 2.1 filenames\n #Z = 'mrIot.txt',\n #Y = 'mrFinalDemand.txt',\n #F_fac = 'mrFactorInputs.txt',\n #F_emissions = 'mrEmissions.txt',\n #F_materials = 'mrMaterials.txt',\n #F_resources = 'mrResources.txt',\n #FY_emissions = 'mrFDEmissions.txt',\n #FY_materials = 'mrFDMaterials.txt',\n )\n\n # check if source exiobase is complete\n _intersect = [val for val in files_exio.values() \n if val in os.listdir(path)]\n if len(_intersect) != len(files_exio.values()):\n raise pymrio.core.EXIOError('EXIOBASE files missing')\n\n # number of row and column headers in EXIOBASE \n head_col = dict()\n head_row = dict()\n head_col['Z'] = 3 # number of cols containing row headers at the beginning\n head_row['Z'] = 2 # number of rows containing col headers at the top\n head_col['Y'] = 3\n head_row['Y'] = 2\n head_col['F_fac'] = 2\n head_row['F_fac'] = 2\n head_col['F_emissions'] = 3\n head_row['F_emissions'] = 2\n head_col['F_materials'] = 2\n head_row['F_materials'] = 2\n head_col['F_resources'] = 3\n head_row['F_resources'] = 2\n head_col['FY_emissions'] = 3\n head_row['FY_emissions'] = 2\n head_col['FY_materials'] = 2\n head_row['FY_materials'] = 2\n\n # read the data into pandas\n logging.info('Read exiobase2 from {}'.format(path))\n data = {key:pd.read_table(os.path.join(path, files_exio[key]), \n index_col = list(range(head_col[key])), \n header = list(range(head_row[key]))) \n for key in files_exio}\n \n # refine multiindex and save units\n data['Z'].index.names = ['region', 'sector', 'unit']\n data['Z'].columns.names = ['region', 'sector']\n data['unit'] = pd.DataFrame(data['Z'].iloc[:, 0].\n reset_index(level='unit').unit)\n data['Z'].reset_index(level='unit', drop=True, inplace=True)\n data['Y'].index.names = ['region', 'sector', 'unit']\n data['Y'].columns.names = ['region', 'category']\n data['Y'].reset_index(level='unit', drop=True, inplace=True)\n ext_unit = dict()\n for key in ['F_fac', 'F_emissions', 'F_materials', \n 'F_resources', 'FY_emissions', 'FY_materials']:\n if head_col[key] == 3:\n data[key].index.names = ['stressor', 'compartment', 'unit']\n if head_col[key] == 2:\n data[key].index.names = ['stressor', 'unit']\n if 'FY' in key:\n data[key].columns.names = ['region', 'category']\n data[key].reset_index(level='unit', drop=True, inplace=True)\n else:\n data[key].columns.names = ['region', 'sector']\n ext_unit[key] = pd.DataFrame(data[key].iloc[:, 0].\n reset_index(level='unit').unit)\n data[key].reset_index(level='unit', drop=True, inplace=True)\n if key is 'F_resources': \n data[key].reset_index(level='compartment', \n drop=True, inplace=True)\n ext_unit[key].reset_index(level='compartment', \n drop=True, inplace=True)\n\n # build the extensions\n ext=dict()\n ext['factor_inputs'] = {'F':data['F_fac'], \n 'unit':ext_unit['F_fac'], 'name':'factor input'}\n ext['emissions'] = {'F':data['F_emissions'], 'FY':data['FY_emissions'], \n 'unit':ext_unit['F_emissions'], 'name':'emissons'}\n ext['materials'] = {'F':data['F_materials'], 'FY':data['FY_materials'], \n 'unit':ext_unit['F_materials'], \n 'name':'material extraction'}\n ext['resources'] = {'F':data['F_resources'], \n 'unit':ext_unit['F_resources'], 'name':'resources'}\n\n # read the characterisation matrices if available\n # and build one extension with the impacts\n if charact:\n # dict with correspondence to the extensions\n Qsheets = {'Q_factorinputs':'factor_inputs', \n 'Q_emission':'emissions', \n 'Q_materials':'materials', \n 'Q_resources':'resources'}\n Q_head_col = dict()\n Q_head_row = dict()\n Q_head_col_rowname = dict()\n Q_head_col_rowunit= dict()\n Q_head_col_metadata= dict()\n # number of cols containing row headers at the beginning\n Q_head_col['Q_emission'] = 4 \n # number of rows containing col headers at the top - this will be\n # skipped\n Q_head_row['Q_emission'] = 3 \n # assuming the same classification as in the extensions\n Q_head_col['Q_factorinputs'] = 2 \n Q_head_row['Q_factorinputs'] = 2 \n Q_head_col['Q_resources'] = 2 \n Q_head_row['Q_resources'] = 3 \n Q_head_col['Q_materials'] = 2 \n Q_head_row['Q_materials'] = 2\n\n # column to use as name for the rows\n Q_head_col_rowname['Q_emission'] = 1 \n Q_head_col_rowname['Q_factorinputs'] = 0 \n Q_head_col_rowname['Q_resources'] = 0 \n Q_head_col_rowname['Q_materials'] = 0 \n\n # column to use as unit for the rows which gives also the last column\n # before the data\n Q_head_col_rowunit['Q_emission'] = 3 \n Q_head_col_rowunit['Q_factorinputs'] = 1\n Q_head_col_rowunit['Q_resources'] = 1 \n Q_head_col_rowunit['Q_materials'] = 1 \n\n charac_data = {Qname:pd.read_excel(charact, \n sheetname = Qname, \n skiprows = list(range(0, Q_head_row[Qname])), \n header=None) \n for Qname in Qsheets} \n\n _units = dict()\n # temp for the calculated impacts which than \n # get summarized in the 'impact'\n _impact = dict() \n impact = dict()\n for Qname in Qsheets:\n # unfortunately the names in Q_emissions are \n # not completely unique - fix that\n _index = charac_data[Qname][Q_head_col_rowname[Qname]]\n if Qname is 'Q_emission':\n _index[42] = _index[42] + ' 2008'\n _index[43] = _index[43] + ' 2008'\n _index[44] = _index[44] + ' 2010'\n _index[45] = _index[45] + ' 2010'\n charac_data[Qname].index = (\n charac_data[Qname][Q_head_col_rowname[Qname]])\n\n _units[Qname] = pd.DataFrame(\n charac_data[Qname].iloc[:, Q_head_col_rowunit[Qname]])\n _units[Qname].columns = ['unit']\n _units[Qname].index.name = 'impact'\n charac_data[Qname] = charac_data[Qname].ix[:, \n Q_head_col_rowunit[Qname]+1:]\n charac_data[Qname].index.name = 'impact'\n \n if 'FY' in ext[Qsheets[Qname]]:\n _FY = ext[Qsheets[Qname]]['FY'].values\n else:\n _FY = np.zeros([ext[Qsheets[Qname]]['F'].shape[0], \n data['Y'].shape[1]])\n _impact[Qname] = {'F':charac_data[Qname].dot(\n ext[Qsheets[Qname]]['F'].values),\n 'FY':charac_data[Qname].dot(_FY),\n 'unit':_units[Qname]\n }\n\n impact['F'] = (_impact['Q_factorinputs']['F']\n .append(_impact['Q_emission']['F'])\n .append(_impact['Q_materials']['F'])\n .append(_impact['Q_resources']['F']))\n impact['FY'] = (_impact['Q_factorinputs']['FY']\n .append(_impact['Q_emission']['FY'])\n .append(_impact['Q_materials']['FY'])\n .append(_impact['Q_resources']['FY']))\n impact['F'].columns = ext['emissions']['F'].columns \n impact['FY'].columns = ext['emissions']['FY'].columns \n impact['unit'] = (_impact['Q_factorinputs']['unit']\n .append(_impact['Q_emission']['unit'])\n .append(_impact['Q_materials']['unit'])\n .append(_impact['Q_resources']['unit']))\n impact['name'] = 'impact'\n ext['impacts'] = impact\n \n if popvector is 'exio2':\n popdata = pd.read_table(os.path.join(PYMRIO_PATH['exio20'], \n './misc/population.txt'), \n index_col=0).astype(float)\n else:\n popdata = popvector\n\n return IOSystem( Z = data['Z'], Y = data['Y'], unit = data['unit'], population = popdata, **ext)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a specific version of a paper's abstract metadata.
def _get_version(self, identifier: Identifier, version: Optional[int] = None) -> DocMetadata: parent_path = self._get_parent_path(identifier=identifier, version=version) path = os.path.join(parent_path, (f'{identifier.filename}.abs' if not version else f'{identifier.filename}v{version}.abs')) return self.parse_abs_file(filename=path)
[ "def get_abstract(paper):\n link = paper[\"link\"]\n try:\n abstract = (\n BeautifulSoup(requests.get(link).text, features=\"html.parser\")\n .find(\"div\", {\"id\": \"Abs1-content\"})\n .text\n )\n paper[\"abstract\"] = abstract\n except Exception as E:\n print(E, \" - \", paper[\"title\"])\n return paper", "def extract_meta_from_remote(paper_id):\n # $ Query Paper\n paper = arxiv.query(id_list=[paper_id])[0]\n # $ Set the Arxiv Object to ensure Proper extraction\n return ArxivIdentity.from_arxiv_response(paper),paper", "def get_version_meta(version, verbose):\n if version == \"20w14~\":\n # April fools snapshot, labeled 20w14~ ingame but 20w14infinite in the launcher\n version = \"20w14infinite\"\n\n if version in _cached_version_metas:\n return _cached_version_metas[version]\n\n version_manifest = get_version_manifest()\n for version_info in version_manifest[\"versions\"]:\n if version_info[\"id\"] == version:\n address = version_info[\"url\"]\n break\n else:\n if verbose:\n print(\"Failed to find %s in the main version manifest; using legacy site\" % version)\n address = LEGACY_VERSION_META % {'version': version}\n if verbose:\n print(\"Loading version manifest for %s from %s\" % (version, address))\n meta = _load_json(address)\n\n _cached_version_metas[version] = meta\n return meta", "def _get_version(name):\n from mne.datasets._fetch import fetch_dataset\n\n if not has_dataset(name):\n return None\n dataset_params = MNE_DATASETS[name]\n dataset_params[\"dataset_name\"] = name\n config_key = MNE_DATASETS[name][\"config_key\"]\n\n # get download path for specific dataset\n path = _get_path(path=None, key=config_key, name=name)\n\n return fetch_dataset(dataset_params, path=path, return_version=True)[1]", "def query_metadata(paper_id):\n dynamodb = boto3.client('dynamodb', region_name=MAIN_TABLE_ARN.region)\n key = {\n 'Partition': {\n 'S': 'metadata:%s' % paper_id\n }\n }\n response = dynamodb.get_item(\n TableName=MAIN_TABLE_ARN.resource,\n Key=key)\n return parse_metadata(response['Item'])", "def get_paper_abstract(tree):\n\tpath = '//h2[text() = \"Abstract\"]/following-sibling::p/text()'\n\tabstract = tree.xpath(path)\n\t# If paper page contains the abstract, xpath returns a list with single string element\n\t# Access list to get the abstract string to return\n\tif abstract and abstract[0] != \"No abstract available.\":\n\t\tabstract = abstract[0]\n\t\n\treturn abstract", "def GetMetadata(self):\n return self.dict['meta']", "def Abstract(self, default=None):\n tmp = self.data.get('metadata', {}).get('abstracts', [{}])[0]\n return tmp.get('value', default)", "def readMetadata(repo, version):\n if repo.type == 'android':\n command = ['hg', '-R', repo.repository, 'id', '-r', version, '-n']\n result = subprocess.check_output(command)\n revision = re.sub(r'\\D', '', result)\n\n return {\n 'revision': revision,\n 'version': version,\n 'minSdkVersion': get_min_sdk_version(repo, version),\n 'basename': os.path.basename(repo.repository),\n }\n elif repo.type == 'safari':\n metadata = repo.readMetadata(version)\n certs = read_certificates_and_key(repo.keyFile)[0]\n\n return {\n 'certificateID': get_developer_identifier(certs),\n 'version': version,\n 'shortVersion': version,\n 'basename': metadata.get('general', 'basename'),\n 'updatedFromGallery': True,\n }\n elif repo.type == 'ie':\n return {\n 'version': version,\n 'basename': os.path.basename(repo.repository),\n }\n else:\n raise Exception('unknown repository type %r' % repo.type)", "def get_abstract(doi):\n xml = download_article(doi)\n et = ET.fromstring(xml)\n coredata = et.find('article:coredata', elsevier_ns)\n abstract = coredata.find('dc:description', elsevier_ns)\n abs_text = abstract.text\n return abs_text", "def _get_paper(paper_entry: dict, publication: Publication) -> Paper:\n\n paper_title = paper_entry.get('title', None)\n\n if paper_title is None or len(paper_title) == 0:\n return None\n\n paper_publication_date = paper_entry.get('publication_date', None)\n paper_doi = paper_entry.get('doi', None)\n paper_citations = paper_entry.get('citing_paper_count', None)\n paper_abstract = paper_entry.get('abstract', None)\n paper_urls = {paper_entry.get('pdf_url')}\n paper_pages = None\n paper_number_of_pages = None\n\n try:\n paper_keywords = set([ x.strip() for x in paper_entry.get(\n 'index_terms').get('author_terms').get('terms')])\n except Exception as e:\n paper_keywords = set()\n\n if paper_publication_date is not None:\n try:\n paper_publication_date_split = paper_publication_date.split(' ')\n day = int(paper_publication_date_split[0].split('-')[0])\n month = int(common_util.get_numeric_month_by_string(\n paper_publication_date_split[1]))\n year = int(paper_publication_date_split[2])\n\n paper_publication_date = datetime.date(year, month, day)\n except Exception as e:\n pass\n\n if not isinstance(paper_publication_date, datetime.date):\n paper_publication_date = datetime.date(\n paper_entry.get('publication_year'), 1, 1)\n\n if paper_publication_date is None:\n return None\n\n paper_authors = []\n for author in paper_entry.get('authors').get('authors'):\n paper_authors.append(author.get('full_name'))\n\n start_page = paper_entry.get('start_page', None)\n end_page = paper_entry.get('end_page', None)\n \n\n if start_page is not None and end_page is not None:\n try:\n paper_pages = f\"{paper_entry.get('start_page')}-{paper_entry.get('end_page')}\"\n paper_number_of_pages = abs(\n int(paper_entry.get('start_page'))-int(paper_entry.get('end_page')))+1\n except Exception: # pragma: no cover\n pass\n\n paper = Paper(paper_title, paper_abstract, paper_authors, publication,\n paper_publication_date, paper_urls, paper_doi, paper_citations, \n paper_keywords, None, paper_number_of_pages, paper_pages)\n\n return paper", "def meta_version(self):\n return self._meta_version", "def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None", "def view_specific_paper_version():\n paper = db.paper(request.args(0))\n if paper is None:\n session.flash = T('No such paper')\n redirect(URL('default', 'index'))\n form = SQLFORM(db.paper, record=paper, readonly=True)\n all_versions_link = A('All versions', _href=URL('default', 'view_paper_versions', args=[paper.paper_id]))\n return dict(form=form,\n all_versions_link=all_versions_link)", "def meta(self):\n return self.spec.meta", "def readMetadata(repo, version):\n if repo.type == 'android':\n command = ['hg', '-R', repo.repository, 'id', '-r', version, '-n']\n result = subprocess.check_output(command)\n revision = re.sub(r'\\D', '', result)\n\n command = ['hg', '-R', repo.repository, 'cat', '-r', version, os.path.join(repo.repository, 'AndroidManifest.xml')]\n result = subprocess.check_output(command)\n manifest = dom.parseString(result)\n usesSdk = manifest.getElementsByTagName('uses-sdk')[0]\n\n return {\n 'revision': revision,\n 'minSdkVersion': usesSdk.attributes[\"android:minSdkVersion\"].value,\n }\n else:\n files = subprocess.check_output(['hg', '-R', repo.repository, 'locate', '-r', version]).splitlines()\n if 'metadata.%s' % repo.type in files:\n command = ['hg', '-R', repo.repository, 'cat', '-r', version, os.path.join(repo.repository, 'metadata.%s' % repo.type)]\n result = subprocess.check_output(command)\n else:\n # Fall back to platform-independent metadata file for now\n command = ['hg', '-R', repo.repository, 'cat', '-r', version, os.path.join(repo.repository, 'metadata')]\n result = subprocess.check_output(command)\n\n parser = SafeConfigParser()\n parser.readfp(StringIO(result))\n\n result = {\n 'extensionID': parser.get('general', 'id'),\n 'version': version,\n 'compat': []\n }\n for key, value in KNOWN_APPS.iteritems():\n if parser.has_option('compat', key):\n minVersion, maxVersion = parser.get('compat', key).split('/')\n result['compat'].append({'id': value, 'minVersion': minVersion, 'maxVersion': maxVersion})\n return result", "def get_paper(self, query):\n if isinstance(query, str):\n query = self.interpret(query)\n if query.startswith(\"Ti=\"):\n paper = self.evaluate(query, attribs=self.ATTRIBS)\n elif query.startswith(\"AA.\"):\n print('query is an author')\n return NotImplementedError\n else:\n return NotImplementedError\n elif isinstance(query, int):\n return self.evaluate(f'Id={query}', attribs=self.ATTRIBS)[0]\n else:\n return None", "def itemmeta(self, kind):\n response = self.get(\"%s/_new\" % self._kindmap[kind])\n content = _load_atom(response, MATCH_ENTRY_CONTENT)\n return _parse_atom_metadata(content)", "def get_pmid_metadata(pmid):\n query = \"ext_id:{} src:med\".format(str(pmid))\n try:\n result = search(query, result_type=\"core\")\n except NoDataError:\n raise\n else:\n result.meta = result.json()[\"resultList\"][\"result\"][0]\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the absolute parent path of the provided identifier.
def _get_parent_path(self, identifier: Identifier, version: Optional[int] = None) -> str: parent_path = os.path.join( (self.latest_versions_path if not version else self.original_versions_path), ('arxiv' if not identifier.is_old_id or identifier.archive is None else identifier.archive), 'papers', identifier.yymm, ) return parent_path
[ "def get_parent_url(self, identifier, node_details=None):\n return structures_module.nodes.get_parent_url(self.khoros_object, identifier, node_details)", "def get_parent(file_path: str):\n return str(Path(file_path).parent)", "def get_parent_path(self):\n return os.path.abspath(os.path.join(self.current_path, os.pardir))", "def get_parent_url(self, identifier=None, category_details=None):\n return structures_module.categories.get_parent_url(self.khoros_object, identifier, category_details)", "def get_parent_id_from_trace_id():\n trace_id = get_trace_id()\n return trace_id.parent_id", "def get_parent(self, the_id: str) -> str:\n\n parents = self.parent_types[the_id]\n return parents[1] if len(parents) > 1 else ''", "def get_parent_id(self, identifier=None, category_details=None):\n return structures_module.categories.get_parent_id(self.khoros_object, identifier, category_details)", "def parent(self):\n if self._path == sep:\n return None\n elif self._parent is None:\n self._parent = Path(first(split(self._path)))\n return self._parent\n else:\n return self._parent", "def parent_dir(path):\n parent = os.path.dirname(os.path.dirname(os.path.join(path, \"\")))\n return parent", "def _extract_parent_path(node_path):\n path_parts = node_path.strip('/').split('/')\n parent_path = '/' + '/'.join(path_parts[:-1])\n return parent_path", "def parent_dir(self):\n parent = os.path.dirname(self.dirn)\n if self.is_subdir:\n parent = os.path.basename(parent)\n else:\n if self.platform is not None and parent.endswith(self.platform):\n parent = parent[:-len(self.platform)].rstrip(os.sep)\n if self.year is not None and parent.endswith(str(year)):\n parent = parent[:-len(str(year))].rstrip(os.sep)\n return parent", "def get_parent_dir(path):\n return os.path.dirname(path)", "def get_parent(self):\n if not self._elements:\n return None\n return Path(self._elements[:-1])", "def get_parent_directory(src: str) -> str:\n return src[: src.rfind(os.path.sep)]", "def get_parent_dir(path):\n\n return os.path.abspath(os.path.join(path, os.pardir))", "def parent_address(self):\n address = self.address\n if address.startswith(\"/\"):\n address = address[1:]\n if address.endswith(\"/\"):\n address = address[:-1]\n\n if \"/\" in address:\n # Return everything before the last / sign\n return address.rsplit(\"/\", 1)[0]\n elif address:\n return \"\"\n else:\n return None", "def _parent_path(pkg, pkg_path):\n parent = pkg_path[: -len(pkg)] if pkg_path.endswith(pkg) else pkg_path\n return parent.rstrip(\"/\" + os.sep)", "def get_testcase_parent(testcase):\n\n result = ''\n\n basename = path.basename(testcase)\n\n # Separate the ids and the file extension\n ext = get_extension(basename)\n clean_name = basename.replace(ext, '')\n\n ids = re.split(',|\\.', clean_name)\n \n # Collect the delimiters:\n delims = []\n for char in clean_name:\n if char in ['.', ',']:\n delims.append(char)\n\n # Count the number of ids\n id_cnt = len(ids)\n\n # If there are more than 1 ids, set the result value\n if id_cnt > 1:\n parent_ids = ids[:-1]\n\n # Reconstruct the name with the delimiters and the parent ids\n result = parent_ids[0]\n iter = 0\n for elem in parent_ids[1:]:\n result += delims[iter] + elem\n iter += 1\n result += ext\n\n return result", "def parent_dir(current_dir):\n return os.path.abspath(os.path.join(current_dir, os.pardir))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the version entries from the arXiv .abs file.
def _parse_version_entries(arxiv_id: str, version_entry_list: List) \ -> Tuple[int, List[VersionEntry], str]: version_count = 0 version_entries = list() for parsed_version_entry in version_entry_list: version_count += 1 date_match = RE_DATE_COMPONENTS.match(parsed_version_entry) if not date_match: raise AbsParsingException( 'Could not extract date components from date line.') try: sd = date_match.group('date') submitted_date = parser.parse(date_match.group('date')) except (ValueError, TypeError): raise AbsParsingException( f'Could not parse submitted date {sd} as datetime') source_type = SourceType(code=date_match.group('source_type')) ve = VersionEntry( raw=date_match.group(0), source_type=source_type, size_kilobytes=int(date_match.group('size_kilobytes')), submitted_date=submitted_date, version=version_count ) version_entries.append(ve) return ( version_count, version_entries, f"{arxiv_id}v" f"{version_entries[-1].version}")
[ "def parse_abs_file(filename: str) -> DocMetadata:\n try:\n with open(filename, mode='r', encoding='latin-1') as absf:\n raw = absf.read()\n except FileNotFoundError:\n raise AbsNotFoundException\n except UnicodeDecodeError as e:\n # TODO: log this\n raise AbsParsingException(\n f'Failed to decode .abs file \"{filename}\": {e}')\n\n # TODO: clean up\n modified = datetime.fromtimestamp(\n os.path.getmtime(filename), tz=gettz('US/Eastern'))\n modified = modified.astimezone(tz=tzutc())\n\n # there are two main components to an .abs file that contain data,\n # but the split must always return four components\n components = RE_ABS_COMPONENTS.split(raw)\n if len(components) > 4:\n components = alt_component_split(components)\n if not len(components) == 4:\n raise AbsParsingException(\n 'Unexpected number of components parsed from .abs.')\n\n # everything else is in the second main component\n prehistory, misc_fields = re.split(r'\\n\\n', components[1])\n\n fields: Dict[str, Any] = \\\n AbsMetaSession._parse_metadata_fields(key_value_block=misc_fields)\n\n # abstract is the first main component\n fields['abstract'] = components[2]\n\n id_match = RE_ARXIV_ID_FROM_PREHISTORY.match(prehistory)\n\n if not id_match:\n raise AbsParsingException(\n 'Could not extract arXiv ID from prehistory component.')\n\n arxiv_id = id_match.group('arxiv_id')\n\n prehistory = re.sub(r'^.*\\n', '', prehistory)\n parsed_version_entries = re.split(r'\\n', prehistory)\n\n # submitter data\n from_match = RE_FROM_FIELD.match(parsed_version_entries.pop(0))\n if not from_match:\n raise AbsParsingException('Could not extract submitter data.')\n name = from_match.group('name')\n if name is not None:\n name = name.rstrip()\n email = from_match.group('email')\n\n # get the version history for this particular version of the document\n if not len(parsed_version_entries) >= 1:\n raise AbsParsingException('At least one version entry expected.')\n\n (version, version_history, arxiv_id_v) \\\n = AbsMetaSession._parse_version_entries(\n arxiv_id=arxiv_id,\n version_entry_list=parsed_version_entries)\n\n arxiv_identifier = Identifier(arxiv_id=arxiv_id)\n\n # named (key-value) fields\n if not all(rf in fields for rf in REQUIRED_FIELDS):\n raise AbsParsingException(f'missing required field(s)')\n\n # some transformations\n category_list: List[str] = []\n primary_category = None\n\n if 'categories' in fields and fields['categories']:\n category_list = fields['categories'].split()\n if category_list[0] in taxonomy.CATEGORIES:\n primary_category = Category(category_list[0])\n primary_archive = \\\n Archive(\n taxonomy.CATEGORIES[primary_category.id]['in_archive'])\n elif arxiv_identifier.is_old_id:\n primary_archive = Archive(arxiv_identifier.archive)\n elif arxiv_identifier.is_old_id:\n primary_archive = Archive(arxiv_identifier.archive)\n else:\n raise AbsException('Cannot infer archive from identifier.')\n\n doc_license: License = \\\n License() if 'license' not in fields else License(\n recorded_uri=fields['license'])\n raw_safe = re.sub(RE_FROM_FIELD, r'\\g<from>\\g<name>', raw, 1)\n\n return DocMetadata(\n raw_safe=raw_safe,\n arxiv_id=arxiv_id,\n arxiv_id_v=arxiv_id_v,\n arxiv_identifier=Identifier(arxiv_id=arxiv_id),\n title=fields['title'],\n abstract=fields['abstract'],\n authors=AuthorList(fields['authors']),\n submitter=Submitter(name=name, email=email),\n categories=fields['categories'] if 'categories' in fields else None,\n primary_category=primary_category,\n primary_archive=primary_archive,\n primary_group=Group(\n taxonomy.ARCHIVES[primary_archive.id]['in_group']),\n secondary_categories=[\n Category(x) for x in category_list[1:]\n if (category_list and len(category_list) > 1)\n ],\n journal_ref=None if 'journal_ref' not in fields\n else fields['journal_ref'],\n report_num=None if 'report_num' not in fields\n else fields['report_num'],\n doi=None if 'doi' not in fields else fields['doi'],\n acm_class=None if 'acm_class' not in fields else\n fields['acm_class'],\n msc_class=None if 'msc_class' not in fields else\n fields['msc_class'],\n proxy=None if 'proxy' not in fields else fields['proxy'],\n comments=fields['comments'] if 'comments' in fields else None,\n version=version,\n license=doc_license,\n version_history=version_history,\n modified=modified\n # private=private # TODO, not implemented\n )", "def no_pyIdlak_parse_arkfile(fname):\n ark = collections.OrderedDict()\n arkfile = open(fname).read()\n\n if arkfile.find('[') == -1:\n # Vector of vectors version\n try:\n vector_id = False\n for vector in arkfile.split(';'):\n values = vector.strip().split()\n if not values:\n continue\n try:\n float(values[0])\n except ValueError:\n # switching to new ID\n vector_id = values[0]\n ark[vector_id] = [[]]\n else:\n # switching to next vector\n ark[vector_id].append([float(values[0])])\n\n if len(values) > 1:\n for v in values[1:]:\n try:\n float(v)\n except ValueError:\n vector_id = values[0]\n ark[vector_id] = [[]]\n else:\n ark[vector_id][-1].append(float(v))\n except KeyError:\n raise IOError('Ark is not correctly formated')\n\n else:\n # Matrix version\n repat = re.compile('(?P<id>[a-zA-Z0-9]+)\\s*\\[(?P<mat>.*?)\\]\\s*', re.S)\n for m in re.finditer(repat, arkfile):\n ark[m.group('id')] = [\n list(map(float, s.split())) for s in m.group('mat').split('\\n') if len(s.strip())\n ]\n if not ark:\n raise IOError('Ark file is empty')\n return ark", "def test_version(self):\n k, v = self.base.parse_ver('51')\n self.assertEqual(k, 'fileversion')\n self.assertEqual(v, '51')", "def test_full_info_all_versions(self):\n expect = (\n (\"v2014.1.4.1rc3-n/a-abcdefff\", (2014, 1, 4, 1, \"rc\", 3, -1, \"abcdefff\")),\n (\"v3.4.1.1\", (3, 4, 1, 1, \"\", 0, 0, None)),\n (\"v3000\", (3000, None, None, 0, \"\", 0, 0, None)),\n (\"v3000.0\", (3000, 0, None, 0, \"\", 0, 0, None)),\n (\"v4518.1\", (4518, 1, None, 0, \"\", 0, 0, None)),\n (\"v3000rc1\", (3000, None, None, 0, \"rc\", 2, 0, None)),\n (\"v3000rc1-n/a-abcdefff\", (3000, None, None, 0, \"rc\", 1, -1, \"abcdefff\")),\n )\n\n for vstr, full_info in expect:\n saltstack_version = SaltStackVersion.parse(vstr)\n assert saltstack_version.full_info_all_versions, full_info\n assert len(saltstack_version.full_info_all_versions) == len(full_info)", "def load_version_information() -> None:\n to_update = {\"VERSION_MAJOR\", \"VERSION_MINOR\", \"VERSION_PATCH\", \"VERSION_SUFFIX\"}\n with VERSION_FILE.open(\"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n name, _, value = line.strip().partition(\"=\")\n # Don't overwrite random variables by trusting an external file.\n var = name.strip()\n if var in to_update:\n globals()[var] = value.strip()", "def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)", "def parse_version(version):\n return [int(num) for num in version.split('.')]", "def _parse_version(self):\n self.version = {\n \"release\": None,\n \"major\": None,\n \"minor\": None\n }\n self.version.update(self.data[\"version\"])", "def read_rd_file( filename, version ):\n global data_parcels\n\n bunpat_bits_per_word = 16\n\n parse_begin = time.time()\n\n file = open(filename, 'r')\n \n fdata = file_data()\n fdata.name = os.path.split( filename )[1]\n\n fheader = file_header()\n fdata.fheader = deepcopy( fheader )\n del fheader\n\n cheader = cesr_header()\n fdata.cheader = deepcopy( cheader )\n del cheader\n\n EOF_found = False;\n\n # Read and store supported version-specific file header values\n (line, EOF_found) = populate_attribs_from_file(\n header_field_sets[version],\n custom_field_reader_sets[version],\n fdata.fheader,\n file)\n\n\n # Process the hex-word representation of th ebunch pattern\n # and store it as a list of the integers 0 & 1.\n if version >= 3:\n fdata.fheader.bunch_pattern_hex.reverse()\n for word in fdata.fheader.bunch_pattern_hex:\n unpadded = str(bin(word)[2:])\n padding = bunpat_bits_per_word - (len(unpadded))\n value = ''\n for padval in range(0,padding):\n value = value + '0'\n value = value + unpadded\n # Reverse each word's bit string for proper ordering of\n # bunch bits the same as list indexing.\n value = value[::-1]\n for char in value:\n fdata.fheader.bunch_pattern.append( int(char) )\n\n\n #print fdata.fheader.bunch_pattern_hex\n #print \"\"\n #print fdata.fheader.bunch_pattern\n \n\n## # Read and store supported CESR header values\n## FIXME (Breaks reading of files lower than V3) FIXME\n if version >= 3:\n (line, EOF_found) = populate_attribs_from_file(\n cesr_field_sets[version],\n custom_field_writer_sets[version],\n fdata.cheader,\n file)\n\n \n # Process each instrument header TBT data (sub)section\n inst_idx = 0\n\n while line != \"\":\n\n #sys.stdout.write(\"read_rd_file LINE: \" + line)\n \n iheader = inst_header()\n\n #print \"read_rd_file INST HEADER READ\"\n # Store supported instrument header values\n (line, EOF_found) = populate_attribs_from_file(\n inst_field_sets[version],\n custom_field_reader_sets[version],\n iheader,\n file)\n\n # If attribute populator returned EOF, parsing is done; break out.\n if EOF_found == True:\n #print \"read_rd_file BREAKING!\"\n break\n\n\n idata = inst_data()\n fdata.insts.append( deepcopy(idata) )\n del idata\n\n fdata.insts[inst_idx].iheader = deepcopy( iheader )\n del iheader\n\n if version != 1 and version != 1.01:\n num_turns = fdata.insts[inst_idx].iheader.num_turns\n else:\n num_turns = -1\n\n ##print \"DATABLOCK read\"\n for method in datablock_read_methods[version]:\n method( fdata, inst_idx, file)\n\n inst_idx = inst_idx + 1 \n\n parse_end = time.time()\n\n fdata.read_time_s = (parse_end - parse_begin)\n\n data_parcels.append( deepcopy(fdata) )\n \n del fdata\n elapsed = (parse_end - parse_begin)\n return elapsed", "def read_abinit(filename='abinit.in'):\n\n from ase import Atoms, units\n\n if isinstance(filename, str):\n f = open(filename)\n else: # Assume it's a file-like object\n f = filename\n\n lines = f.readlines()\n if type(filename) == str:\n f.close()\n\n full_file = ''\n for line in lines:\n if '#' in line:\n meat, comment = line.split('#')\n else:\n meat = line\n full_file = full_file + meat + ' '\n\n full_file.strip()\n tokens = full_file.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index+4].lower()[:3] != 'ang'):\n unit = units.Bohr\n acell = [unit*float(tokens[index+1]),\n unit*float(tokens[index+2]),\n unit*float(tokens[index+3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index+1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index+1])\n\n index = tokens.index(\"typat\")\n typat = []\n for i in range(natom):\n typat.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i]*float(tokens[index+3*i+1]),\n acell[i]*float(tokens[index+3*i+2]),\n acell[i]*float(tokens[index+3*i+3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index+3*i+1]),\n float(tokens[index+3*i+2]),\n float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = units.Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit*float(tokens[index+3*i+1]),\n unit*float(tokens[index+3*i+2]),\n unit*float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n \n try:\n i = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[i + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n return atoms", "def parse(self):\n self._version = self.data", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def _parseK3AFile(self, path):\n print(u'[Parse] K3A project file: %s' % path)\n tree = ET.parse(path)\n for name in tree.getiterator('ProjectName'):\n self._name = name.text\n break;\n for version in tree.getiterator('Version'):\n self._version = int(version.text)\n break;\n\n if self._version not in __SUPPORTED_K3A_VERSIONS__:\n print(u'WARNING: Project version (%s) do not match supported parser versions (%s), parse may be incorrect' % (self._version, __SUPPORTED_K3A_VERSIONS__))", "def parse_version_info(ymlPath):\n\n if not os.path.exists(ymlPath):\n log_utils.warning(\"the apktool.yml is not exists \" + ymlPath)\n return (\"0\", \"1.0.0\")\n\n ymlFile = open(ymlPath, 'r')\n lines = ymlFile.readlines()\n ymlFile.close()\n\n versionCode = \"0\"\n versionName = \"1.0.0\"\n\n for line in lines:\n if 'versionCode' in line:\n versionCode = line.replace('versionCode:', '').strip().replace(\"'\", \"\")\n\n elif 'versionName' in line:\n versionName = line.replace('versionName:', '').strip().replace(\"'\", \"\")\n\n return (versionCode, versionName)", "def rd_file_version( filename ):\n version = 0\n \n file = open(filename, 'r')\n line = file.readline()\n line = file.readline()\n if line.find(\"Command_ID\") != -1:\n version = 3\n return version\n \n if line.find(\"RAW DATA - EXTENDED\") != -1:\n version = 2\n return version\n \n if line.find(\"RAW DATA\") != -1:\n for lnum in range(1, 14):\n line = file.readline()\n if line.find(\"#----\") != -1:\n version = 1\n else:\n version = 1.01\n # Close and reopen file to reset input\n file.close()\n file = open(filename, 'r')\n line = file.readline()\n line = file.readline()\n return version\n \n return version", "def test_arp_v_centos_7_7(self):\n self.assertEqual(jc.parsers.arp.parse(self.centos_7_7_arp_v, quiet=True), self.centos_7_7_arp_v_json)", "def _parse_version(version_string):\n result = []\n for part in version_string.split(\".\"):\n m = re.match(\"^([0-9]*)(.*)$\", part)\n if m.group(1):\n result.append(int(m.group(1)))\n if m.group(2):\n result.append(m.group(2))\n return tuple(result)", "def parse_version(self, version):\n major, minor, update = version.split('.')\n modification, fix = update[:2], update[2:]\n\n # remove leading zeros from raw version parts\n return {\n 'major': str(int(major)),\n 'minor': str(int(minor)),\n 'mod': str(int(modification)),\n 'fix': str(int(fix)),\n }", "def parse_version(header, data):\n log = unpack('<I', data)\n game, save = unpack('<7sxf', header)\n if save == -1:\n save = unpack('<I', header)\n if save == 37:\n save = 37.0\n else:\n save /= (1<<16)\n version = get_version(game.decode('ascii'), round(save, 2), log)\n return version, game.decode('ascii'), round(save, 2), log" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of dissemination formats.
def get_dissemination_formats(docmeta: DocMetadata, format_pref: Optional[str] = None, add_sciencewise: bool = False) -> List: return current_session().get_dissemination_formats(docmeta, format_pref, add_sciencewise)
[ "def formats(self):\n logger.debug(\"Get formats\")\n return self._raw_api.formats.get()", "def get_download_formats(self):\r\n return self.get_all_formats()", "def list_formats(cls):\n\n return [x.name for x in pkg_resources.iter_entry_points('iotile_analytics.save_format')]", "def get_export_formats(self):\n return [f for f in self.formats if f().can_export()]", "def export_formats(self):\n return list(self._export_formats)", "def get_all_formats(self):\r\n result = []\r\n for fmt in settings.MEDIA_PREFERENCE:\r\n url = getattr(self, 'video_%s_url' % fmt, None)\r\n\r\n # skip empty urls and unsupported formats\r\n if not url:\r\n continue\r\n\r\n try:\r\n mime_type = MIMETYPES_MAP[fmt]\r\n except KeyError:\r\n raise LookupError('No mimetype registered for \"%s\"' % fmt)\r\n\r\n result.append({\r\n 'url': url,\r\n 'length': getattr(self, 'video_%s_length' % fmt),\r\n 'display': mime_type.split('/')[1],\r\n 'mime_type': mime_type,\r\n 'download_only': getattr(self, 'video_%s_download_only' % fmt),\r\n })\r\n\r\n return result", "def get_formats(cls):\n return cls._class_registry.keys()", "def get_archive_formats():\n formats = [(name, registry[2]) for name, registry in\n _ARCHIVE_FORMATS.items()]\n formats.sort()\n return formats", "def get_feed_formats(self):\r\n fmts = self.get_all_formats()\r\n if self.is_youtube():\r\n fmts.append({\r\n 'url': self.source_url,\r\n 'mime_type': 'video/flv',\r\n })\r\n return fmts", "def get_import_formats(self):\n return [f for f in self.formats if f().can_import()]", "def list_metadata_formats(url):\n return sickle_operations.sickle_list_metadata_formats(url)", "def _detect_formats(self) -> Set[str]:\n format_ = []\n try:\n format_ = self.schema.get(\"format\", self.schema[\"type\"])\n if not isinstance(format_, List):\n format_ = [format_]\n except KeyError:\n pass\n return set(format_)", "def current_legal_formats(self, card):\n # formats = FormatBasecard.objects.filter(basecard__id=card.basecard.id,\n # format__start_date__lte=datetime.today(),\n # format__end_date__gte=datetime.today())\n formats = Format.objects.filter(formatbasecard__basecard_id=card.basecard.id,\n start_date__lte=timezone.now(),\n end_date__gt=timezone.now())\n return formats", "def all_formats_by_repeater(cls, repeater_class, for_domain=None):\n return cls.get_collection(repeater_class).get_all_formats(for_domain=for_domain)", "def prepare_formats(self, obj):\n if hasattr(obj, 'metadata'):\n return [format.value for format in obj.metadata.formats.all()]\n else:\n return []", "def get_unpack_formats():\n formats = [(name, info[0], info[3]) for name, info in\n _UNPACK_FORMATS.items()]\n formats.sort()\n return formats", "def show_formats(self):\n\n # the link below is Ginger by Wizkid, you can replace it with any other\n # Youtube link\n # demo link 'https://www.youtube.com/watch?v=YSy2lBZ1QrA'\n # self.url = sys.argv[1]\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for format in self.result['formats']:\n format_id = format['format_id']\n filesize = size(format['filesize']\n ) if format['filesize'] else 0\n if format['ext'] == 'mp4':\n ext = format['ext']\n else:\n continue\n format_note = format['format_note']\n full_info = ' '.join([str('id=' + format_id), str(format_note),\n str(ext), str(filesize)])\n print(full_info)\n print()\n print(f\"Pick a format to download \\n {self.result['title']}\")\n\n self.request_id()", "def formats():\n if PIL_ENABLED:\n return 'BMP', 'EPS', 'GIF', 'JPEG', 'MSP', 'PCX', 'PNG', 'SVG', 'TIFF', 'XBM'\n else:\n return 'EPS', 'SVG'", "def get_formats(pub):\n formats = {}\n if 'rel_abs' in pub:\n formats['abstract'] = pub['rel_abs']\n # The publication JSON does not contain enough information generally\n # to identify the URL for the various formats. Therefore we have to\n # load the landing page for the article and parse out various URLs\n # to reliably get to the desired content.\n landing_page_res = requests.get(pub['rel_link'])\n\n # The URL for the full PDF and XML is often different in format than\n # the rel_site URL so we need to get the link to it from the content\n # of the landing page. The XML URL doesn't explicitly appear in the\n # page content therefore we work with the citation_pdf_url and get\n # URLs for both the PDF and the XML.\n pdf_xml_url_base = get_pdf_xml_url_base(landing_page_res.text)\n if pdf_xml_url_base:\n formats['pdf'] = pdf_xml_url_base + '.full.pdf'\n formats['xml'] = pdf_xml_url_base + '.source.xml'\n text_url_base = get_text_url_base(landing_page_res.text)\n if text_url_base:\n formats['txt'] = text_url_base + 'txt'\n return formats" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the closest mesh triangles ids from a transomr position
def closestTriangleToTransform(transform, meshName): faceVertices, points = meshData.getMeshData(meshName) vertexFaces = meshData.getMeshVertexFaces(faceVertices) point = np.array(cmds.xform(transform, q=1, ws=1, t=1), dtype=np.double) return meshData.getClosestTriangle(point, points, vertexFaces, faceVertices)
[ "def closest_points_on_mesh(self, points):\n dist, ind_mesh = trimesh.proximity.ProximityQuery(self.mesh).vertex(points)\n self.log.debug('Distance to mesh {}'.format(dist))\n ind_coord = self.trimesh_to_coord[ind_mesh]\n assert (self.coords[ind_coord] == self.mesh.vertices[ind_mesh]).all()\n return ind_coord", "def MeshVtxAdjacentVtxs (strMesh, index, blnAbsolutConnections=False, blnCreate=False):\n \"\"\"custom function\"\"\"\n #-----------------------------------------------------------------------------------------------------------------------------------------\n def CullDuplicates(seq, idfun=None): \n # order preserving \n if idfun is None: \n def idfun(x): return x \n seen = {} \n result = [] \n for item in seq: \n marker = idfun(item) \n if marker in seen: continue \n seen[marker] = 1 \n result.append(item) \n return result\n #-----------------------------------------------------------------------------------------------------------------------------------------\n MeshVtxAdjacentVtxs = []\n if rs.IsMesh(strMesh)==False : \n print \"strMesh is not an mesh\"\n return None\n if type(index)==type(\"string\"):\n print \"index is not an integer\"\n return None\n if type(index)==type(0.1): index = int(index)\n\n arrVertices = rs.MeshVertices (strMesh)\n arrFaceVertices = rs.MeshFaceVertices(strMesh)\n\n intCount = 0\n arrAdjacentVtxs = []\n for arrFace in arrFaceVertices:\n blnIsAdjacent = False\n for arrVtxIndex in arrFace:\n if arrVtxIndex == index :\n blnIsAdjacent = True\n if blnIsAdjacent :\n if blnAbsolutConnections :\n if arrFace[2]==arrFace[3] :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex)\n else :\n if index == arrFace[0] :\n arrAdjacentVtxs.append( arrFace[3] )\n arrAdjacentVtxs.append( arrFace[1] )\n elif index == arrFace[1] :\n arrAdjacentVtxs.append( arrFace[0] )\n arrAdjacentVtxs.append( arrFace[2] )\n elif index == arrFace[2] :\n arrAdjacentVtxs.append( arrFace[1] )\n arrAdjacentVtxs.append( arrFace[3] )\n elif index == arrFace(3) :\n arrAdjacentVtxs.append( arrFace[2] )\n arrAdjacentVtxs.append( arrFace[0] )\n else :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex )\n if type(arrAdjacentVtxs) != type([]) : return None\n arrOrderAdjacentVtxs = CullDuplicates(arrAdjacentVtxs)\n if blnCreate :\n arrStrPts = []\n for arrVtxIndex in arrOrderAdjacentVtxs:\n rs.AddPoint ( arrVertices[arrVtxIndex] )\n arrStrPts.append( arrVertices[arrVtxIndex] )\n return arrStrPts\n else :\n return arrOrderAdjacentVtxs", "def get_triu_indices(num_nodes):\n ones = torch.ones(num_nodes, num_nodes)\n eye = torch.eye(num_nodes, num_nodes)\n triu_indices = (ones.triu() - eye).nonzero().t()\n triu_indices = triu_indices[0] * num_nodes + triu_indices[1]\n return triu_indices", "def getProximity(tuples):\n\t\t\tsortedIndices = [indices for indices in tuples]\n\t\t\t#return abs(sortedIndices[0][1] - sortedIndices[-1][0])\n\t\t\treturn sortedIndices[-1][0] - sortedIndices[0][1]", "def getVertexPositions(transform_name):\n positions = pm.xform('{}.vtx[*]'.format(transform_name), q=True, ws=True, t=True)\n return zip(positions[0::3], positions[1::3], positions[2::3])", "def getClosestVtxFromMeshComponent(mesh, objArray=None):\n mesh = pm.PyNode(mesh)\n meshShp = mesh.getShape()\n mySets = []\n\n if objArray is None:\n objArray = pm.selected()\n\n cPnt=pm.createNode('closestPointOnMesh')\n pm.connectAttr (meshShp+'.outMesh', cPnt+'.inMesh')\n\n pm.select(cl=1)\n for obj in objArray:\n objShp = obj.getShape()\n setName = '%s_vtxSet'%obj\n\n if pm.objExists(setName):\n pm.delete(setName)\n\n pm.sets(n = setName)\n\n vtxs=[]\n if objShp.type()=='mesh':\n c = 1\n for i in range(0, objShp.numVertices()):\n cPnt.inPosition.set( objShp.getPoint(i, space='world') )\n\n myVtx = pm.PyNode( '%s.vtx[%s]'%(mesh, cPnt.closestVertexIndex.get()) )\n\n if myVtx not in vtxs:\n vtxs.append(myVtx)\n\n pm.select(myVtx)\n pm.sets(setName, add=pm.selected())\n\n pm.delete(cPnt)\n\n pm.select(cl=1)\n pm.select(mySets)", "def find_triangle(self):\r\n vertexes = []\r\n for v in self.sides:\r\n # проходим по всем сторонам и берем за стартовую координату начало стороны - вершину\r\n fix_x, fix_y = v[0]\r\n\r\n # перебираем все координаты других сторон\r\n for side in self.sides:\r\n for i in range(len(side)):\r\n # текущая координата - сх, су\r\n cx, cy = side[i]\r\n if cx == fix_x and cy == fix_y:\r\n continue\r\n\r\n res = self.find_vertex(fix_x, fix_y, cx, cy)\r\n # если результат False, значит, вершинка не нашлась\r\n if not res:\r\n continue\r\n\r\n # сразу считаем длину стороны\r\n cl = self.get_length(fix_x, fix_y, cx, cy)\r\n # если мы до этого не находили вершин, закинем текущий сет как решение\r\n if not vertexes:\r\n vertexes = [(fix_x, fix_y), (cx, cy), (res[0], res[1]), cl]\r\n\r\n else:\r\n # иначе, будем проверять до длине стороны. Если найденная больше - заменяем\r\n if vertexes[3] < cl:\r\n vertexes = [(fix_x, fix_y), (cx, cy), (res[0], res[1]), cl]\r\n\r\n return vertexes[:-1]", "def Triangulate(self, p_int, vtkIdList, vtkPoints):\n ...", "def compute_coord_from_triangule_locate_eqs(p,pts_world_csys, pts_dist):\n x, y, z = p\n n = pts_world_csys.shape[1] \n result = []\n for i in range(n):\n delta = np.linalg.norm(np.array([x, y, z]) - pts_world_csys[:,i])\n result.append(delta - pts_dist[i])\n \n return result", "def exportTriangles(self):\n return [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]", "def closest_block():\n data = rospy.wait_for_message(\"fiducial_transforms\", FiducialTransformArray)\n if len(data.transforms) >= 1:\n # Store an array and check the closest to the robot\n for i in range(len(data.transforms)):\n x_array = [0 for i in range(len(data.transforms))]\n x_array[i] = data.transforms[i].transform.translation.y\n max_value = max(x_array)\n max_index = x_array.index(max_value)\n print(max_index)\n x = data.transforms[max_index].transform.translation.x\n y = data.transforms[max_index].transform.translation.y\n z = data.transforms[max_index].transform.translation.z\n x_q = data.transforms[max_index].transform.rotation.x\n y_q = data.transforms[max_index].transform.rotation.y\n z_q = data.transforms[max_index].transform.rotation.z\n w_q = data.transforms[max_index].transform.rotation.w\n return x, y, z, x_q, y_q, z_q, w_q", "def peer_indices(i):\n return (set.union(set(row_indices(i)),\n set(column_indices(i)),\n set(box_indices(i))\n ) - {i})", "def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc", "def FindClosestNPoints(self, p_int, , vtkIdList):\n ...", "def _sort_closest_triangles(surf, electrode, intersval):\n dvect = norm(electrode - surf['pos'], axis=1)\n closevert = where(dvect < intersval)[0]\n dvecti = argsort(dvect[closevert])\n sortvert = closevert[dvecti]\n\n # l. 176-192\n sorttri = []\n tri = surf['tri'].copy()\n for cv in sortvert:\n rows = concatenate([where(cv == tri[:, i])[0] for i in range(3)])\n tri[rows, :] = 0\n sorttri.extend(rows.tolist())\n\n return sorttri", "def verts_packed_to_mesh_idx(self):\n self._compute_packed()\n return self._verts_packed_to_mesh_idx", "def calculationTriangles(locations):\n # triangulation\n triangulation = scipy.spatial.Delaunay(locations.T)\n triangles = triangulation.simplices.copy()\n return triangulation, triangles", "def posRel(face):\r\n\r\n\tif face==0:\r\n\t\treturn [[4,0,3,6],[1,0,3,6],[5,6,3,0],[3,2,5,8],2]\r\n\telif face==1:\r\n\t\treturn [[4,6,7,8],[2,0,3,6],[5,0,1,2],[0,2,5,8],3]\r\n\telif face==2:\r\n\t\treturn [[4,8,5,2],[3,0,3,6],[5,2,5,8],[1,2,5,8],0]\r\n\telif face==3:\r\n\t\treturn [[4,2,1,0],[0,0,3,6],[5,8,7,6],[2,2,5,8],1]\r\n\telif face==4:\r\n\t\treturn [[3,2,1,0],[2,2,1,0],[1,0,1,2],[0,0,1,2],5]\r\n\telif face==5:\r\n\t\treturn [[1,6,7,8],[2,6,7,8],[3,8,7,6],[0,8,7,6],4]", "def map_lines_to_tris(line_cells, tri_cells):\n tri_cells = np.asarray(tri_cells)\n line_cells = np.asarray(line_cells)\n line_cells.sort(axis=1)\n\n nlines = line_cells.shape[0]\n\n # Check triangles, element node indices always monotonic\n assert tri_cells.shape[1] == 3\n assert np.all(tri_cells[:, 0] < tri_cells[:, 1])\n assert np.all(tri_cells[:, 1] < tri_cells[:, 2])\n\n elem_nos = np.zeros(nlines, dtype=np.int64)\n local_idx = np.zeros(nlines, dtype=np.int64)\n\n # Lines are number by the vertex they *don't* contain\n idx_map = [(0, 1, 2),\n (0, 2, 1),\n (1, 2, 0)]\n\n # Where do we find this line in the tris?\n # NB: FEniCS has a mesh.topology() tool but I wasn't able\n # to achieve the same result, so rolled my own.\n for i in range(nlines):\n for j in range(3):\n result = np.where((tri_cells[:, idx_map[j][:2]] ==\n line_cells[i, :]).all(axis=1))[0]\n if len(result) > 0:\n elem_nos[i] = result[0]\n local_idx[i] = idx_map[j][2]\n break\n\n return elem_nos, local_idx" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the skinweights form the skincluster, create a skintransforms node and connect it to drive the transforms
def createSkinTansformNode(skinCluster, transforms): node = cmds.createNode('skinTransforms') influences = cmds.listConnections('{}.matrix'.format(skinCluster), s=1, d=0) for i, jnt in enumerate(influences): cmds.connectAttr('{}.worldMatrix[0]'.format(jnt), '{}.matrix[{}]'.format(node,i)) m = cmds.getAttr('{}.wim[0]'.format(jnt)) cmds.setAttr ('{}.bindPreMatrix[{}]'.format(node, i), m, type="matrix") mesh = cmds.deformer(skinCluster, q=1, g=1)[0] for i, each in enumerate(transforms): triangle = closestTriangleToTransform(each, mesh) weights=list() positions=list() for vtx in triangle: vtxName = '{}.vtx[{}]'.format(mesh, vtx) weights.extend(cmds.skinPercent(skinCluster, vtxName, query=True, value=True )) positions.extend(cmds.xform(vtxName, q=1, ws=1, t=1)) cmds.setAttr('{}.weightsList[{}].weights'.format(node, i), weights, type="doubleArray") cmds.setAttr('{}.weightsList[{}].points'.format(node, i), positions, type="doubleArray") m = cmds.getAttr('{}.wm[0]'.format(each)); cmds.setAttr('{}.weightsList[{}].pbm'.format(node, i), m, type="matrix") for i, loc in enumerate(transforms): dec = cmds.createNode('decomposeMatrix') cmds.connectAttr('{}.outputs[{}]'.format(node, i), '{}.inputMatrix'.format(dec)) cmds.connectAttr('{}.outputTranslate'.format(dec), '{}.translate'.format(loc)) cmds.connectAttr('{}.outputRotate'.format(dec), '{}.rotate'.format(loc)) cmds.connectAttr('{}.outputScale'.format(dec), '{}.scale'.format(loc)) return node
[ "def get_skin_data(skin_cluster):\n # Create skin cluster function set\n skin_cluster_node = to_m_object(skin_cluster)\n skin_cluster_fn = OpenMayaAnim.MFnSkinCluster(skin_cluster_node)\n \n # Get MPlugs for weights\n weight_list_plug = skin_cluster_fn.findPlug(\"weightList\")\n weights_plug = skin_cluster_fn.findPlug(\"weights\")\n weight_list_obj = weight_list_plug.attribute()\n weight_obj = weights_plug.attribute()\n weight_inf_ids = OpenMaya.MIntArray()\n \n skin_weights = {}\n \n # Get current ids\n inf_ids = get_influence_ids(skin_cluster)\n vert_count = weight_list_plug.numElements()\n \n for vert_index in range(vert_count):\n data = {}\n \n # Get inf indexes of non-zero weights\n weights_plug.selectAncestorLogicalIndex(vert_index, weight_list_obj)\n weights_plug.getExistingArrayAttributeIndices(weight_inf_ids)\n \n # {inf_name:weight_value...}\n vert_weights = {}\n inf_plug = OpenMaya.MPlug(weights_plug)\n \n for inf_id in weight_inf_ids:\n inf_plug.selectAncestorLogicalIndex(inf_id, weight_obj)\n \n try:\n inf_name = inf_ids[inf_id]\n vert_weights[inf_name] = inf_plug.asDouble()\n except KeyError:\n pass\n \n data[\"weights\"] = vert_weights\n \n dq_value = cmds.getAttr(\"{0}.bw[{1}]\".format(skin_cluster, vert_index) )\n data[\"dq\"] = dq_value\n \n skin_weights[vert_index] = data\n \n return skin_weights", "def copy_cluster_weights(shape, weight_file, method=\"bilinear\"):\n\n # gets the temporary folder path\n temp_path = get_temp_folder()\n short_name = get_prefix_less_name(shape)\n\n for node in weight_file:\n if not weight_file[node]:\n continue\n cmds.deformerWeights(weight_file[node], im=True, shape=short_name,\n deformer=node, path=temp_path, method=method,\n vertexConnections=True)", "def ImportSkinWeights(self):\n\n # Get directory to save to\n filename = QFileDialog.getOpenFileName(self, \"Import Skin Weights Data\", dir=self._filename, filter=(\"JSON (*.json)\"))[0]\n # Validate directory\n if len(self._filename) < 1:\n print(\"File invalid\")\n ret = QMessageBox.critical(self, self.tr(\"Warning\"),\n self.tr(\"File invalid.\"),\n QMessageBox.Ok)\n return\n else:\n self._filename = filename\n\n # Init data\n data = []\n # Import data from file\n with open(self._filename) as inFile:\n data = json.load(inFile)\n\n cmds.undoInfo(openChunk=True)\n cmds.select(cl=True)\n for d in data or []:\n s = d[\"object\"]\n skin = mel.eval(\"findRelatedSkinCluster {}\".format(s))\n\n if skin != None:\n for idx, vertex in enumerate(d[\"vertices\"] or []):\n influences = []\n for inf in vertex:\n joint = inf[0]\n weight = inf[1]\n influences.append( (joint, weight) )\n\n #if unicode(joint) not in cmds.skinCluster(skin, query=True, inf=True):\n #cmds.select(s, replace=True)\n #cmds.skinCluster(skin, edit=True, addInfluence=unicode(joint))#, normalizeWeights=True, forceNormalizeWeights=True)\n\n\n vtx = s + \".vtx[{}]\".format(idx)\n cmds.skinPercent(skin, vtx, transformValue=influences)\n\n cmds.select(s)\n\n cmds.undoInfo(closeChunk=True)\n\n # Success message\n msgStr = \"Imported skin weights from '{}'\".format(self._filename)\n print(msgStr)\n message = QMessageBox()\n message.setText(msgStr)\n message.setStandardButtons(QMessageBox.Ok)\n message.exec_()", "def export_skinweights(node, path=DEFAULT_PATH, export=True):\n json_data = {}\n\n # return here if node doesn't have a skincluster\n if not bgdev.utils.skincluster.get_skincluster(node):\n return json_data\n\n modules = check_libraries()\n\n # create the folder if doesn't exist\n if not os.path.exists(path):\n os.makedirs(path)\n\n # export skinweights\n init_layers = initialize_layers(node)\n ng_data = modules[\"importExport\"].LayerData()\n ng_data.loadFrom(node)\n\n # convert longName to short (for hierarchy changes)\n for each in ng_data.influences:\n each.path = each.path.rpartition(\"|\")[-1]\n\n for layer in ng_data.layers:\n for each in layer.influences:\n each.influenceName = each.influenceName.rpartition(\"|\")[-1]\n\n exporter = modules[\"importExport\"].JsonExporter()\n json_data = exporter.process(ng_data)\n if export:\n export_layer_data(node, json_data, path)\n LOG.info(\"Saving %r weights...\", str(node))\n\n if init_layers:\n remove_layers(node)\n\n return json_data", "def get_skin_weights(skin_cluster, mesh_shape_name):\n\n if python.is_string(skin_cluster):\n skin_cluster, _ = get_skin_cluster(skin_cluster)\n if not skin_cluster:\n return None\n\n mesh_path, mesh_components = mesh.get_mesh_path_and_components(mesh_shape_name)\n if not mesh_path or not mesh_components:\n return None\n\n influences_array = maya.api.OpenMaya.MIntArray()\n path_array = skin_cluster.influenceObjects()\n influences_count = len(path_array)\n for i in range(influences_count):\n influences_array.append(skin_cluster.indexForInfluenceObject(path_array[i]))\n\n weights = skin_cluster.getWeights(mesh_path, mesh_components, influences_array)\n\n return weights", "def skinCluster(*args, addInfluence: Union[AnyStr, List[AnyStr]]=\"\", addToSelection: bool=True,\n after: bool=True, afterReference: bool=True, baseShape: Union[AnyStr,\n List[AnyStr]]=\"\", before: bool=True, bindMethod: Union[int, bool]=0,\n deformerTools: bool=True, dropoffRate: Union[float, bool]=0.0, exclusive:\n Union[AnyStr, bool]=\"\", forceNormalizeWeights: bool=True, frontOfChain:\n bool=True, geometry: Union[AnyStr, List[AnyStr], bool]=\"\", geometryIndices:\n bool=True, heatmapFalloff: float=0.0, ignoreBindPose: bool=True,\n ignoreHierarchy: bool=True, ignoreSelected: bool=True, includeHiddenSelections:\n bool=False, influence: Union[AnyStr, bool]=\"\", lockWeights: bool=True,\n maximumInfluences: Union[int, bool]=0, moveJointsMode: bool=True, name:\n AnyStr=\"\", normalizeWeights: Union[int, bool]=0, nurbsSamples: int=0,\n obeyMaxInfluences: bool=True, parallel: bool=True, polySmoothness: float=0.0,\n prune: bool=True, recacheBindMatrices: bool=True, remove: Union[bool,\n List[bool]]=True, removeFromSelection: bool=True, removeInfluence: Union[AnyStr,\n List[AnyStr]]=\"\", removeUnusedInfluence: bool=True, selectInfluenceVerts:\n AnyStr=\"\", skinMethod: Union[int, bool]=1, smoothWeights: float=0.0,\n smoothWeightsMaxIterations: int=2, split: bool=True, toSelectedBones: bool=True,\n toSkeletonAndTransforms: bool=True, unbind: bool=True, unbindKeepHistory:\n bool=True, useGeometry: bool=True, volumeBind: float=0.0, volumeType: int=0,\n weight: float=0.0, weightDistribution: Union[int, bool]=1, weightedInfluence:\n bool=True, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass", "def getSkinnedMeshes(skin_clusters):\n skin_info = {}\n for skin_cluster in skin_clusters:\n joints = skin_cluster.influenceObjects()\n root_joint = getRootParent(joints[0])\n geometry = set(skin_cluster.getGeometry())\n skin_info[root_joint] = skin_info[root_joint] | geometry if root_joint in skin_info else geometry\n\n return skin_info", "def create_skincluster_backup(shape, skin_node):\n\n logger.info(\"Creating skin backup for {}\".format(skin_node))\n\n # gets the skin cluster influences\n influences = cmds.listConnections(\"{}.matrix\".format(skin_node))\n\n # creates a duplicate shape of the given shape\n holder_name = \"{}_flex_skin_shape_holder\".format(\n get_prefix_less_name(shape))\n shape_duplicate = create_duplicate(shape, holder_name)\n\n # creates new skin cluster node on duplicate\n skin_holder = cmds.skinCluster(influences, shape_duplicate, bindMethod=0,\n obeyMaxInfluences=False, skinMethod=0,\n weightDistribution=0, normalizeWeights=1,\n removeUnusedInfluence=False, name=\"{}_SKN\"\n .format(holder_name))\n\n # copy the given skin node weights to back up shape\n copy_skin_weights(skin_node, skin_holder[0])\n\n return [\"{}\".format(skin_holder[0])]", "def copy_skin_weights(source_skin, target_skin):\n\n # gets the shape back from the source_skin and target_skin\n # need to do this as providing the sourceSkin and destinationSkin arguments\n # to the copySkinWeights command does not update correctly the shapes\n\n source_shape = cmds.ls(cmds.listHistory(\"{}.outputGeometry\".format(\n source_skin), pdo=False, future=True), dag=True,\n noIntermediate=True)\n target_shape = cmds.ls(cmds.listHistory(\n \"{}.outputGeometry\".format(target_skin),\n pdo=False, future=True), dag=True,\n noIntermediate=True)\n\n # checks if source and target shapes list are bigger than 1\n if len(source_shape) > 1:\n source_shape = source_shape[0]\n if len(target_shape) > 1:\n target_shape = target_shape[0]\n\n cmds.select(source_shape, target_shape)\n\n # copy skin command\n cmds.copySkinWeights(surfaceAssociation=\"closestPoint\", noMirror=True,\n influenceAssociation=(\"label\",\n \"closestJoint\",\n \"oneToOne\"))\n\n # forces refresh\n cmds.refresh()", "def getSkinCluster(_transform):\n result = []\n if not (pm.objExists(_transform)):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n \n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False", "def createAndImport(cls, filePath=None, shape=None):\n\n if not shape:\n try:\n shape = cmds.ls(sl=1)[0]\n\n except:\n raise RuntimeError('No shape selected')\n\n if filePath == None:\n startDir = cmds.workspace(q=1, rootDirectory=1)\n filePath = cmds.fileDialog2(dialogStyle=2, fileMode=1, startingDirectory=startDir,\n fileFilter='Skin Files (*%s)' % SkinCluster.kFileExtension)\n\n if not filePath:\n return\n if not isinstance(filePath, basestring):\n filePath = filePath[0]\n\n # Read the data from the file\n fh = open(filePath, 'rb')\n data = pickle.load(fh)\n fh.close()\n\n # Make sure the vertex count is the same\n meshVertices = cmds.polyEvaluate(shape, vertex=1)\n\n importedVertices = len(data['blendWeights'])\n if meshVertices != importedVertices:\n raise RuntimeError('Vertex counts do not match. %d != %d' % (meshVertices, importedVertices))\n\n\n # check if the shape already has a skinCluster\n if SkinCluster.getSkinCluster(shape):\n skinCluster = SkinCluster(shape)\n else:\n # create a new skinCluster\n joints = data['weights'].keys()\n\n # Make sure all the joints exist\n\n unusedImports = []\n # Create a set for get which joint in the scene doesn't have weights\n noMatch = set([SkinCluster.removeNamespaceFromString(x) for x in cmds.ls(type='joint')])\n\n for j in joints:\n if j in noMatch:\n noMatch.remove(j)\n else:\n unusedImports.append(j)\n\n # Remapping the joints\n # if there were unmapped influences ask the user to map them\n if unusedImports and noMatch:\n\n mappingDialog = WeightRemapDialog(getMayaWindow())\n mappingDialog.setInfluences(unusedImports, noMatch)\n mappingDialog.exec_()\n\n for src, dst in mappingDialog.mapping.items():\n # swap the mapping\n data['weights'][dst] = data['weights'][src]\n del data['weights'][src]\n\n # Create the skinCluster with post normalization so setting the weights does not\n # normalize all weights\n joints = data['weights'].keys()\n\n skinCluster = cmds.skinCluster(joints, shape, tsb=1, nw=2, n=data['name'])\n skinCluster = SkinCluster(shape)\n\n skinCluster.setData(data)\n print \"Imported %s\" % filePath", "def set_skin_weights(obj, skin_data, vert_indexes, normalize=False):\n # Get skin cluster\n skin_cluster = get_skin_cluster(obj)\n if skin_cluster is None:\n OpenMaya.MGlobal.displayError(\"Unable to detect a skinCluster on {0}.\".format(obj))\n return\n\n # Get influence info to map with\n inf_data = get_influence_ids(skin_cluster)\n inf_ids = list(inf_data.keys())\n inf_names = list(inf_data.values())\n \n # Remove all existing weights\n if is_curve(obj):\n plug = \"{0}.cv\".format(obj)\n else:\n plug = \"{0}.vtx\".format(obj)\n\n selected_vertexes = [\n \"{0}[{1}]\".format(plug, index)\n for index in vert_indexes\n ]\n\n cmds.setAttr(\"{0}.nw\".format(skin_cluster), 0)\n cmds.skinPercent(skin_cluster, selected_vertexes, prw=100, nrm=0)\n \n # Apply weights per vert\n for vert_index in vert_indexes:\n weight_list_attr = \"{0}.weightList[{1}]\".format(skin_cluster, vert_index)\n for inf_name, weight_value in skin_data[vert_index][\"weights\"].items():\n index = inf_names.index(inf_name)\n weight_attr = \".weights[{0}]\".format(inf_ids[index])\n cmds.setAttr(\"{0}{1}\".format(weight_list_attr, weight_attr), weight_value)\n \n # Apply dual-quarternions\n dq_value = skin_data[vert_index][\"dq\"]\n cmds.setAttr(\"{0}.bw[{1}]\".format(skin_cluster, vert_index), dq_value)\n \n # Re-enable weights normalizing\n cmds.setAttr(\"{0}.nw\".format(skin_cluster), 1)\n \n if normalize:\n cmds.skinCluster(skin_cluster, e=True, forceNormalizeWeights=True)", "def getSkinCluster(src):\n\n if cmds.nodeType(src) == \"skinCluster\":\n srcSkin = src\n else:\n srcSkin = mel.eval('findRelatedSkinCluster(\"' + src + '\")')\n\n return srcSkin", "def ExportSkinWeights(self):\n\n # Get objects from table\n selected = self.objectTable.GetObjects()\n\n # Warning & early-out\n if len(selected) < 1:\n print(\"No objects to export\")\n ret = QMessageBox.warning(self, self.tr(\"Warning\"),\n self.tr(\"No objects to export.\"),\n QMessageBox.Ok)\n return\n\n # Get directory to save to\n filename = QFileDialog.getSaveFileName(self, \"Export Skin Weights Data\", dir=self._filename, filter=(\"JSON (*.json)\"))[0]\n # Validate directory\n if len(self._filename) < 1:\n print(\"File invalid\")\n ret = QMessageBox.critical(self, self.tr(\"Warning\"),\n self.tr(\"File invalid.\"),\n QMessageBox.Ok)\n return\n else:\n self._filename = filename\n\n # Init data\n data = []\n\n # For each selected object\n for s in selected:\n skin = mel.eval(\"findRelatedSkinCluster {}\".format(s))\n\n numVertices = cmds.polyEvaluate(v=True)\n d = {\"object\": s, \"numVertices\": numVertices, \"skinCluster\": skin, \"vertices\": []}\n if skin:\n influences = cmds.skinCluster(skin, query=True, inf=True)\n for i in range(0, numVertices):\n vtx = (s + \".vtx[{}]\").format(i)\n\n newData = []\n joints = cmds.skinPercent(skin, vtx, query=True, transform=None) or []\n weights = cmds.skinPercent(skin, vtx, query=True, v=True) or []\n for idx, j in enumerate(joints):\n newData.append( (j, weights[idx]) )\n d[\"vertices\"].append(newData)\n data.append(d)\n\n\n with open(self._filename, \"w\") as outFile:\n json.dump(data, outFile, indent=4)\n\n print(\"Exported skin weights to '{}'\".format(file))\n message = QMessageBox()\n message.setText(\"Successfully exported skin weights\")\n message.setStandardButtons(QMessageBox.Ok)\n message.exec_()", "def extractWeight(self, skinCluster_index= 0, target_mesh = None, _surfaceAssociation='closestPoint', _influenceAssociation=['oneToOne', 'oneToOne']):\n \n if target_mesh == None:\n _target_mesh = self.cleanDuplicate()\n else:\n _target_mesh = target_mesh\n\n \n skincluster = self.all_skinCluster[skinCluster_index]\n destination_skin = pm.duplicate(skincluster, ic=1)[0]\n destination_skin.outputGeometry[0] >> _target_mesh.inMesh\n return(_target_mesh, destination_skin)", "def getSkinCluster(self):\n result = []\n if not (pm.objExists(self.final_mesh)):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(self.final_mesh) + '\")') \n if validList == None:\n return result \n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r = True)\n\n result_node = pm.selected()\n if result_node == []:\n return []\n elif len(result_node) > 1:\n return result_node or []\n else:\n return result_node", "def apply_weight_sharing(model, bits=5):\n for p in model.parameters():\n # if 'weight' not in name:\n # continue\n data = p.data\n if data.numel() < 2**bits:\n continue\n weight = data.cpu().numpy()\n shape = weight.shape\n # print(shape)\n mat = weight.reshape(-1,1)\n mat = csc_matrix(mat)\n min_ = min(mat.data)\n max_ = max(mat.data)\n space = np.linspace(min_, max_, num=2**bits)\n kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1,1), n_init=1, precompute_distances=True, algorithm=\"full\")\n kmeans.fit(mat.data.reshape(-1,1))\n new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(-1)\n mat.data = new_weight\n mat_n = mat.toarray()\n p.data = torch.from_numpy(mat_n.reshape(shape)).to('cuda')", "def export_multiple_skinweights(nodes, path=DEFAULT_PATH):\n data = {}\n for each in sorted(nodes):\n weights = export_skinweights(each)\n if weights:\n data[each] = weights\n\n # create parent folder if doesn't exists\n parent = os.path.dirname(path)\n if not os.path.exists(parent):\n os.makedirs(parent)\n\n # export combined weights data\n with open(path, \"w\") as stream:\n json.dump(data, stream, indent=4)\n LOG.info(\"Weights successfully exported!\")", "def get_skin_cluster(dag_path=None):\n\n if not dag_path:\n return None, None\n\n if not python.is_string(dag_path):\n dag_path = dag_path.fullPathName()\n\n skin_cluster = maya.cmds.ls(maya.cmds.listHistory(dag_path), type='skinCluster')\n if not skin_cluster:\n return None, None\n\n skin_name = skin_cluster[0]\n selection_list = api.SelectionList()\n selection_list.create_by_name(skin_name)\n\n skin_node = selection_list.get_depend_node(0)\n skin_node = maya.api.OpenMayaAnim.MFnSkinCluster(skin_node)\n\n return skin_node, skin_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes the value of var to value, in target card json target
def doEdit(var, value, target): currentValue = target.get(var, "") newValue = Simplifier.simplify(str(value).replace(f"{{{var}}}", str(currentValue))) target[var] = newValue
[ "def set_value_directly(self, client, var_value):\n pass", "def set_data_value(var, value):\n\n substitute(data, var, value)", "def set_value(self, var_value):\n pass", "def update_card(card, card_data):\n card.name = card_data[\"name\"]\n card.hero = card_data[\"playerClass\"]\n card.img_url = card_data[\"img\"]\n card.dbfId = card_data[\"dbfId\"]\n card.set = card_data[\"cardSet\"]\n card.rarity = card_data[\"rarity\"]\n card.cost = card_data[\"cost\"]\n card.save()", "def set(self, path: str, value: Any) -> None:\n pydash.set_(self.json, path, value)", "def set_var(self,variable,value):\n self.template=self.template.replace(\"@{}@\".format(variable),value)", "def set_variable(self, request, context):\n response = SetVariableResponse()\n value = decode(request.value)\n self._delegator.set_variable(request.component, request.variable, value)\n return response", "def set_card (self, card):\n\t\tif ((card == 1) or (card == 2)):\n\t\t\tself._card = card\n\t\telse:\n\t\t\tsys.stderr.write(\"\\nERROR : %s card number must be 1 or 2 so it can't be %s !\\n\" % (self._target_id, card))\n\t\t\tsys.exit(1)", "def bcp_player_variable(self, name, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player[name] = value", "def set_custom_variable(self, key, value):\n self.logger.info(\"Set custom variable : %s:%s\" % (key, value))\n\n try:\n if 'custom_variables' not in self._answer_payload:\n self._answer_payload['custom_variables'] = {}\n self._answer_payload['custom_variables'][key] = value\n except Exception as e:\n self.logger.error(\"Error on set custom variables : %s\" % e)", "def set_custom_value(self, value):\n self.logger.info(\"Set custom value : %s\" % value)\n\n try:\n self._answer_payload['custom_value'] = value\n except Exception as e:\n self.logger.error(\"Error on set custom variables : %s\" % e)", "def set(self,obj,value):\r\n\t\tvalue = self.parse(value)\r\n\t\tsetattr(obj,self.name,value)", "def set_field_value(json_data, path, value):\n # call parse_json to get the complete format string\n complete_path = get_complete_path(json_data, path)\n\n if not complete_path:\n raise Exception(\"path error: %s\" % path)\n\n formats = complete_path.split(\"_\")\n obj, size = json_data, len(formats)\n\n for index in range(size):\n try:\n field = formats[index]\n field = int(field)\n except ValueError as e:\n pass\n\n if index == size - 1:\n obj[field] = value\n else:\n obj = obj[field]\n\n return json_data", "def test_set_value():\n accessories = Accessories.from_file(\"tests/fixtures/koogeek_ls1.json\")\n\n on_char = accessories.aid(1).characteristics.iid(8)\n assert on_char.value is False\n\n on_char.value = True\n\n assert on_char.value is True", "def _set_variable_into_namespace(namespace, key, value):\n namespace.set_dynamic_variable(key, value)", "def var_value(self, var_value):\n self._var_value = var_value", "def set_value(self, name, test, path):\n self.value = test.params.get(name, path, self.default)", "def update_pref(self, req, trigger, target, key, opts, pref):", "def set(cls, name, value):\n\t\t\n\t\t# --- Identify frame ---\n\t\tframe = cls.__identifyFrame(name)\n\t\t\n\t\t# --- Remove frame prefix ---\n\t\tname = name[3:]\n\t\t\n\t\t# --- Check if exists ---\n\t\tif name not in frame:\n\t\t\tError.exit(Error.varExistence, \"Couldn't set value to non-existing variable '{0}'\".format(name))\n\t\t\n\t\t# --- Get actual value ---\n\t\tif type(value) == var:\t# If trying to add var (e.g. MOVE GF@aaa GF@bbb)\n\t\t\tvalue = value.getValue()\t# Save its value not whole object\n\t\t\t\n\t\t# --- Save value to frame ---\n\t\tframe[name] = value;" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a numeric PIN with length digits
def get_pin(length=6): pin = str(random.sample(range(10 ** (length - 1), 10 ** length), 1)[0]) print("pin "+pin) return pin
[ "def __generate_pin(cls) -> str:\n return str(randbelow(10 ** cls.PIN_DIGITS)).zfill(cls.PIN_DIGITS)", "def randomPin(length):\n return ''.join(secrets.choice(string.digits) for i in range(length))", "def _get_pin(self, length=5):\n return str(random.sample(range(10**(length-1), 10**length), 1)[0])", "def get_pin_digits(pin):\n digits = []\n for i in range(1, 5):\n digit = pin % 10\n pin = int(pin / 10)\n digits = [digit] + digits\n return digits", "def numeric_password(settings):\n return \"1\" * settings.MIN_PASSWORD_LENGTH", "def password_digit():\n return cinco0_md5()[5]", "def safe_number(self):\n mask = '*' * (len(self.account_number) - 4)\n return '{0}{1}'.format(mask, self.account_number[-4:])", "def generate_password(length=20):\r\n # type: (int) -> str\r\n return ('%0'+str(length)+'x') % random.randrange(16 ** length)", "def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])", "def genRingSetting(self):\n num = random.randrange(0,25) #Generates a random number from 0 to 25\n if num < 10: #If the number is a single digit\n num = str(num) #Turn it into a string\n num = '0' + num #Add a 0 before it\n return str(num) #Return the string of the number to the user in double digit format", "def get_passport_number():\n func = WorkerApi.rand_int_str\n return f'{func(2)} {func(2)} {func(6)}'", "def encrypt_PIN(self, PIN):\n assert self.is_number_of_max_digits(PIN, 4)\n return self.encrypt_number(self.rsa_pin, PIN)", "def num_string(length):\n import math\n base = 10\n lines = []\n for ndigit in range(int(math.log10(length)) + 1):\n interval = int(math.pow(base, ndigit))\n lines.append(''.join(\n (str(n % base) + (' ' * (interval-1)))\n for n in range((length - 1) // interval + 1)))\n return '\\n'.join(lines)", "def _format_int(n, length=20):\n return f'{n: >{length}}'", "def get_random_string(length):\n return \"{0:0{1}x}\".format(random.getrandbits(length * 4), length)", "def getpassword(minlen=8,maxlen=8):\n\n pwdlen=random.randint(minlen,maxlen)\n zeichensatz=string.lowercase+string.uppercase+string.digits\n passwort=\"\"\n\n for zeichen in range(pwdlen):\n zufallszeichen=random.randint(0, 61)\n passwort+=zeichensatz[zufallszeichen]\n\n return passwort", "def luhn_check_num(digits):\n return str(Bank.luhn_control(digits) * 9 % 10)", "def lsdigit(number, index):\n return int(number % (10**(index + 1)) / (10**index))", "def int_to_address(n, length):\n return \"{0:b}\".format(n).zfill(length)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fixture for DataFrame of ints which are constant per column
def int_frame_const_col(): df = DataFrame( np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1, columns=["A", "B", "C"], ) return df
[ "def fixture_int_dataframe() -> pd.DataFrame:\n return pd.DataFrame({\"a\": [-1, 0, 1]})", "def test_get_integer_dataframe():\n dfg = DataFrameGenerator()\n df = dfg.get_integer_dataframe(10, 5)\n assert df.shape == (10, 5), \"Wrong size dataframe generated\"\n assert list(df.columns) == [\"Integer0\", \"Integer1\", \"Integer2\", \"Integer3\", \"Integer4\"], \"Wrong column names\"\n assert df.dtypes.values.all() == int, \"Wrong data types\"", "def _to_constant_df(self, num):\n if isinstance(num, pd.DataFrame):\n# pdb.set_trace()\n return num\n else:\n return self.data['ones'].copy() * num", "def test_df():\n return pd.DataFrame({\n 'intcol': [1, 2, 3],\n 'strcol': ['four', 'five', 'six'],\n 'floatcol': [7.0, 8.0, 9.0]\n })", "def add_integer_column(df, colname, minval, maxval):\n values = np.random.randint(minval, maxval, len(df.index))\n df[colname] = values\n return df", "def test_integer_col_na_values(self):\n x = map(str, range(20))\n x[13] = \"\"\n df = pd.DataFrame.from_dict(dict(a=x, b=x))\n specs = dict(delimiter=',', dtypes={'a': int, 'b': int})\n schema = dict(data=specs)\n with DummyProjectFactory(schema, df) as project:\n df = project.load_dataset(\"data\")\n self.assertEqual(df['a'].dtype, float)\n self.assertEqual(df['b'].dtype, float)", "def generate_uniform_data():\n data = pd.DataFrame()\n x = np.array([])\n y = np.array([])\n\n for xval in np.arange(0, 1, 1/100):\n for yval in np.arange(0, 1, 1/100):\n x = np.append(x, xval)\n y = np.append(y, yval)\n data['x'] = x\n data['y'] = y\n comp = discriminator(data['x'])\n data['realclass'] = None\n data.loc[data['y'] > comp, 'realclass'] = CLASS_A\n data.loc[data['y'] < comp, 'realclass'] = CLASS_B\n\n return data", "def create_dummies(df):", "def test_int_data(self):\n self.assert_dim_type_supported({\"x\": \"uniform(0, 5000, discrete=True)\"})", "def test_num_cols(self):\n def num_cols(nvals, base):\n \"\"\"Returns the number of columns output for a given number of distinct input values\"\"\"\n vals = [str(i) for i in range(nvals)]\n df = pd.DataFrame({'vals': vals})\n encoder = encoders.BaseNEncoder(base=base)\n encoder.fit(df)\n return len(list(encoder.transform(df)))\n\n self.assertEqual(num_cols(1, 2), 1)\n self.assertEqual(num_cols(2, 2), 2)\n self.assertEqual(num_cols(3, 2), 2)\n self.assertEqual(num_cols(4, 2), 3)\n self.assertEqual(num_cols(7, 2), 3)\n self.assertEqual(num_cols(8, 2), 4)\n self.assertEqual(num_cols(62, 2), 6)\n self.assertEqual(num_cols(63, 2), 6)\n self.assertEqual(num_cols(64, 2), 7)\n self.assertEqual(num_cols(65, 2), 7)\n\n # nvals = 0 returns the original dataframe unchanged, so it still has 1 column even though\n # logically there should be zero.\n self.assertEqual(num_cols(0, 2), 1)\n\n self.assertEqual(num_cols(55, 7), 3)", "def df_numeric_column(min_value=0, max_value=1, num_rows=100):\n # Generate numeric column\n return pd.Series(np.random.uniform(min_value, max_value, num_rows))", "def test_column_index(self):\n c = Column('foo', range(3))\n self.assertEqual(c[0], 0)\n self.assertEqual(c[1], 1)\n self.assertEqual(c[2], 2)", "def test_sample_constant_column(self):\n # Setup\n instance = GaussianMultivariate()\n X = np.array([\n [1.0, 2.0],\n [1.0, 3.0],\n [1.0, 4.0],\n [1.0, 5.0]\n ])\n instance.fit(X)\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.shape == (5, 2)\n results = result[~result.isna()].all()\n assert results.all()\n assert result.loc[:, 0].equals(pd.Series([1.0, 1.0, 1.0, 1.0, 1.0], name=0))\n\n # This is to check that the samples on the non constant column are not constant too.\n assert len(result.loc[:, 1].unique()) > 1\n\n covariance = instance.covariance\n assert (~pd.isna(covariance)).all().all()", "def testExampleDataFrameGeneration(self):\n df = generate_dataframe()\n columns = self.all_fields_except(['random'])\n self.assertDataFrameCorrect(df, 'dataframe_result.csv',\n check_data=columns, check_types=columns)", "def generate_fake_data(column_type):\n return faker_for_columntype[type(column_type)]()", "def mixed_type_frame() -> DataFrame:\n return DataFrame(\n {\n \"a\": 1.0,\n \"b\": 2,\n \"c\": \"foo\",\n \"float32\": np.array([1.0] * 10, dtype=\"float32\"),\n \"int32\": np.array([1] * 10, dtype=\"int32\"),\n },\n index=np.arange(10),\n )", "def test_factorize_columns_invalid_input(dataframe):\n with pytest.raises(NotImplementedError):\n dataframe.factorize_columns(1)", "def test_init_from_dataframe():\n df = pd.read_csv(\"tests/data/dataset/dataset_dataframe.csv\")\n ds = Dataset(df)\n assert len(ds.dimensions) == 2\n assert ds.dimension(\"gender\")\n assert ds.dimension(\"region\")\n assert ds.json[\"size\"] == [2,2]", "def test_real_data(self):\n self.assert_dim_type_supported({\"x\": \"uniform(0, 5)\"})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logs a message, preserving the progress bar correct output format.
def log_message(self, message: str) -> None: from tqdm import tqdm tqdm.write(message, file=self.tqdm_kwargs.get("file", None))
[ "def progress(message):\n\tprint_(message, 3, inline=True)", "def log(self, msg):\n\n print(f'{self.prefix} - {msg}')", "def log(self, level: int, message: str):\n\n log.log(level, message)\n\n if self.command:\n self.command.write(level, text=message)", "def log_message(self, *args, **kwargs):\n pass", "def show_progress(self, message=None):\r\n if self.in_progress_hanging:\r\n if message is None:\r\n sys.stdout.write('.')\r\n sys.stdout.flush()\r\n else:\r\n if self.last_message:\r\n padding = ' ' * max(0, len(self.last_message)-len(message))\r\n else:\r\n padding = ''\r\n sys.stdout.write('\\r%s%s%s%s' % (' '*self.indent, self.in_progress, message, padding))\r\n sys.stdout.flush()\r\n self.last_message = message", "def _progress(self, msg, *a, **k):\n if self.verbosity >= 1:\n logger.info(msg, *a, **k)", "def _print(self, message):\n if self.log is not None:\n self.log.info(message)", "def _log(self, msg):\n self.telegram_queue.put(f\"{__name__.split('.')[-1]}: {msg}\")", "def log(self, level, message):\n if level <= self.verbosity:\n print(message)", "def log_output(self, prefix, message):\n\t\tsys.stdout.write(\"%s %s\\n\" % (prefix, message))", "def log(self, message):\n #logs.logger.debug(\"asyncore log: %s\" % message)", "def log(self, message):\n #logs.logger.debug(\"asyncore log: %s\" % message)\n pass", "def log(message):\n if not options[\"quiet\"]:\n print message", "def log(message):\n stderr.write(\"%s\\n\" % message)", "def log(self, level, message):\n if int(level) <= self.verbosity:\n print message", "def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))", "def log(self, msg, level=LOG_INFO):\n self.send_command('log', {\n 'msg': msg,\n 'level': level,\n })", "def write(message):\n demisto.info(message)", "def log(self, message):\n val = \"{}\".format(message)\n # self.vim.command('redraws!')\n self.vim.out_write(val + '\\n')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode the sequence as a list of floats using the provided vocab.
def encode_dna_as_floats(sequence: Iterable[str], vocab: str = dc_constants.VOCAB, offset: int = 0) -> Optional[Iterable[float]]: ids = [] for base in sequence: if base not in vocab: return None base_id = float(vocab.index(base) + offset) ids.append(base_id) return ids
[ "def __data_to_vector(vocab_list, input_set):\n out = [0] * len(vocab_list)\n\n for word in input_set:\n if word in vocab_list:\n # Calculate word frequency\n out[vocab_list.index(word)] += 1\n return out", "def savefloatlist(doc, pnode, name, value):\n subnode = ET.SubElement(pnode, name)\n subnode.text = ','.join([str(x) for x in value])\n return subnode", "def put_float_list_to_feature(seq_example: tf.train.SequenceExample,\n value: Sequence[Sequence[float]], key: str):\n for s in value:\n seq_example.feature_lists.feature_list.get_or_create(\n key).feature.add().float_list.value[:] = s", "def encode_sequence(text: List[str], vocab: Dict) -> Sequence[int]:\n return [vocab[ngram] if ngram in vocab else vocab[\"<UNK>\"] for ngram in text]", "def sequence_to_list_ids(sequence, vocab):\n pass", "def getfloatlist(node):\n try:\n return [float(x) for x in node.text.split(\",\")]\n except ValueError:\n raise XMLError(\"Invalid float list for \" + node.tag)", "def floats(self) -> List[NumericType]:\n return [float(v) for v in self._record]", "def toFloatList(values):\n\treturn list(map(lambda va: float(va), values))", "def get_tf(word_lst, vocab):\n count_of_each_word = Counter(word_lst)\n doc_word_count = len(word_lst)\n return np.array([count_of_each_word[v] / doc_word_count if v in count_of_each_word else 0 for v in vocab])", "def Convert(known_face_encodings): # , face_to_compare):\r\n print('Convert')\r\n lst = ''\r\n lst = known_face_encodings.split(',')\r\n arr = np.array([], dtype='float64')\r\n for word in lst:\r\n n = float(word)\r\n arr = np.append(arr, n)\r\n print(arr)\r\n return arr", "def add_float_feature(val):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[val]))", "def dump_vocab(vocab, path, encoding=\"Utf-8\"):\n with open(path, \"w\", encoding=encoding) as fout:\n for word, freq in vocab:\n fout.write(\"%s\\t%d\\n\" % (word, freq))", "def _float_feature(list_of_floats):\n return tf.train.Feature(float_list=tf.train.FloatList(value=list_of_floats))", "def encode_and_flatten(\n raw_text_iter: IterableDataset[str],\n tokenizer: Callable[[str], list[str]],\n vocab: Vocab,\n) -> torch.Tensor:\n data = [\n torch.tensor(vocab(tokenizer(item)), dtype=torch.long)\n for item in raw_text_iter\n ]\n return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))", "def to_context_vec( iterable, context=FloatContext ):\n to_float = context.from_int\n return [to_float(x) for x in iterable]", "def data_process(vocab):\n book = open('../goblet_book.txt', 'r')\n\n data = [torch.tensor([vocab[char] for char in list(line)],\n dtype=torch.long) for line in book.readlines()]\n book.close()\n\n return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))", "def floats(float_list):\n return [ float(number) for number in float_list ]", "def transform_frequencies(texts: list):\n tokenized = [text.split() for text in texts]\n words = sum(tokenized, [])\n vocabulary = dict(Counter(words))\n converted_texts = np.zeros((len(texts), len(vocabulary)))\n for i, text in enumerate(texts):\n converted_texts[i] = [vocabulary[word] if word in text else 0 for word in vocabulary]\n\n return converted_texts", "def test_asfloats(self):\n input = dict(counts=[[0,1], [2,3], [4,5], [6,7], [8,9]])\n expect = numpy.array(input['counts'], dtype=float)\n coll = RegionCollection(**input)\n float_coll = coll.asfloats()\n self.assertEqual(float_coll.counts, coll.counts)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the sequence with GAP_OR_PAD and GAP_OR_PAD tokens removed.
def get_sequence_without_gaps_or_padding(sequence: str) -> str: return sequence.replace(dc_constants.GAP_OR_PAD, '').replace(dc_constants.GAP_OR_PAD, '')
[ "def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s", "def cleangaps(self):\n \n \n if self.type == 'prot':\n gapchars = 'x-?'\n elif self.type == 'dna':\n gapchars = 'n-?' \n else:\n raise SeqError('Can only remove gaps from DNA and protein sequences!')\n \n seqlist = []\n for char in self.sequence:\n if char.lower() not in gapchars:\n seqlist.append(char)\n \n self.sequence = ''.join(seqlist)", "def omit_gap_pos(self, allowed_gap_frac=1 - eps, motif_length=1):\n is_array = isinstance(self, ArrayAlignment)\n try:\n alpha = self.moltype.alphabets.degen_gapped\n except:\n alpha = self.moltype.alphabet\n\n gaps = list(self.moltype.gaps)\n if is_array:\n gaps = list(map(alpha.index, gaps))\n\n gaps_ok = GapsOk(\n gaps, allowed_gap_frac, is_array=is_array, motif_length=motif_length\n )\n # if we're not deleting the 'naughty' seqs that contribute to the\n # gaps, it's easy...\n result = self.filtered(gaps_ok, motif_length=motif_length)\n return result", "def omit_gap_seqs(self, allowed_gap_frac=0):\n gaps_ok = GapsOk(list(self.moltype.gaps), allowed_frac=allowed_gap_frac)\n\n return self.take_seqs_if(gaps_ok)", "def omit_gap_sequences(self, maximum_gap_frequency):\n # handle empty Alignment case\n if self.is_empty():\n return self.__class__([])\n\n base_frequencies = self.kmer_frequencies(k=1, relative=True)\n gap_chars = self[0].gap_chars\n seqs_to_keep = []\n for seq, f in zip(self, base_frequencies):\n gap_frequency = sum([f[c] if c in f else 0.0 for c in gap_chars])\n if gap_frequency <= maximum_gap_frequency:\n seqs_to_keep.append(seq.metadata['id'])\n return self.subalignment(seqs_to_keep=seqs_to_keep)", "def remove_pad(x, pad_remover, mode):\n # Concatenate all tokens (without padding)\n x = flatten_all_but_last(x)\n\n # Remove padding for training and eval\n if mode != ModeKeys.PREDICT:\n # This is a hack to allows inference when the <go> token\n # is detected as padding and removed. This works for now because there is\n # no padding at inference.\n x = pad_remover.remove(x)\n\n x = tf.expand_dims(x, axis=0) # Now batch_size=1\n return x", "def degap(self):\n gaps = self.gap_alphabet()\n indices = [i for i, e in enumerate(self) if e not in gaps]\n return self[indices]", "def remove_tokens(self, text):\r\n\r\n return text.replace(self.PAD_TK, \"\").replace(self.UNK_TK, \"\")", "def omit_gap_runs(self, allowed_run=1):\n ok_gap_run = GapsOk(self.moltype.gaps, gap_run=True, allowed_run=allowed_run)\n\n return self.take_seqs_if(ok_gap_run)", "def trimSequences(sequences):\n starts = []\n ends = []\n for s in sequences:\n starts.append(re.search('[atcgATCG]', str(s.seq)).start())\n ends.append(len(s) - re.search('[atcgATCG]', str(s.seq)[::-1]).end())\n res = []\n for s in sequences:\n res.append(s[max(starts):min(ends)])\n return(res)", "def omit_gap_positions(self, maximum_gap_frequency):\n # handle empty Alignment case\n if self.is_empty():\n return self.__class__([])\n\n position_frequencies = self.position_frequencies()\n gap_chars = self[0].gap_chars\n\n positions_to_keep = []\n for i, f in enumerate(position_frequencies):\n gap_frequency = sum([f[c] if c in f else 0.0 for c in gap_chars])\n if gap_frequency <= maximum_gap_frequency:\n positions_to_keep.append(i)\n return self.subalignment(positions_to_keep=positions_to_keep)", "def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)", "def gyration_orbits(self):\n ASMs = list(self)\n perm = Permutation([ASMs.index(asm.gyration())+1 for asm in ASMs])\n return tuple([tuple([ASMs[i-1] for i in cyc])\n for cyc in perm.cycle_tuples()])", "def degap(self):\n return SequenceCollection([seq.degap() for seq in self])", "def removePolyA(seq):\n i = len(seq)-1\n while i >= 0:\n if seq[i].lower() != \"a\":\n break\n i -= 1\n nonAPos = i\n if len(seq)-nonAPos >= 7:\n seq = seq[0:nonAPos+1]\n return seq", "def remove_every_other(seq):\n\n # print(seq[::2])\n return seq[::2]", "def _chop_excess_tokens(self, sequence: List, total_len: int):\n excess = total_len - self.model_max_sequence_length\n del sequence[-1 * excess :]", "def strip_seqs(self):\n # nxt: points to first character beyond current escape sequence.\n # width: currently estimated display length.\n input = self.padd()\n outp = u''\n nxt = 0\n for idx in range(0, len(input)):\n if idx == nxt:\n # at sequence, point beyond it,\n nxt = idx + measure_length(input[idx:], self._term)\n if nxt <= idx:\n # append non-sequence to outp,\n outp += input[idx]\n # point beyond next sequence, if any,\n # otherwise point to next character\n nxt = idx + measure_length(input[idx:], self._term) + 1\n return outp", "def remove_EOS_PAD(long_phrase):\n i=0\n phrase= []\n\n while len(long_phrase+[0])>i and (long_phrase+[0])[i] not in (0,1):\n phrase.append(long_phrase[i])\n i+=1\n return phrase" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns start and end coordinates of label in the reference genome. Querying the reference genome for these coordinates will produce the label sequence. We need to add 1 to either start or end depending on the orientation of the reference.
def get_label_start_end( label_base_positions: Iterable[int], strand: bed_pb2.BedRecord.Strand) -> Tuple[Optional[int], Optional[int]]: # Gap and padding tokens may have a position of -1, since they are not # actually present in the reference. Remove all instances of -1, since we do # not want to consider it when computing min/max position. valid_label_base_positions = set(label_base_positions) valid_label_base_positions.discard(-1) if not valid_label_base_positions: return None, None start = min(valid_label_base_positions) end = max(valid_label_base_positions) if strand == bed_pb2.BedRecord.Strand.FORWARD_STRAND: end += 1 elif strand == bed_pb2.BedRecord.Strand.REVERSE_STRAND: start -= 1 else: raise ValueError('Strand must be set.') return start, end
[ "def get_refseq_pos(self, preyid):\n\n prey_start_aa = string.atoi(self.ivv_to_refseq.val_accord_hd(\n preyid, \"Prey Region Start (AA)\"))\n prey_end_aa = string.atoi(self.ivv_to_refseq.val_accord_hd(\n preyid, \"Prey Region End (AA)\"))\n refseqid = self.get_refseq(preyid)\n \n cds_start, cds_end = self.get_refseq_CDS(refseqid)\n return (refseqid,\n cds_start + (prey_start_aa - 1)*3,\n cds_start + (prey_end_aa - 1)*3 + 2)", "def labelpos(self):\n return self._labelpos", "def get_label_range(self):\n bbox = calc_bbox3d(self.label_image > 0)\n slice_range = bbox[-1]\n return slice_range.start, slice_range.stop", "def getStartAndEndCoordinates(alignedSegment):\n return alignedSegment.reference_start, getFirstNonClippedPositionInRead(alignedSegment, readSeq), \\\n alignedSegment.reference_end-1, getLastNonClippedPositionInRead(alignedSegment, readSeq)", "def getLabelPos(self, label):\n return [self.rect[0]+self.rect[2]//2-label.get_rect().width//2, self.rect[1]+self.rect[3]//2-20]", "def get_label_position(self) -> LabelPositionStr:\n return LABEL_POSITION.inverse[self.labelPosition()]", "def get_feature_start_end(feature_record):\n return (feature_record.location.start.position+1, feature_record.location.end.position)", "def get_xref_pos(self):\n\n ret = 0\n prev_line = None\n\n for line in self._reverse_lines():\n if line == '':\n continue\n elif line == 'startxref':\n break\n\n prev_line = line\n\n if prev_line is None:\n logger.error('Can not find startxref')\n return ret\n\n prev_line = self._strip_comment(prev_line)\n\n if prev_line.isdigit():\n ret = int(prev_line)\n logger.debug('startxref = %s', ret)\n\n return ret", "def extract_pos(self, value_start, value_end, strand):\r\n start = int(value_start)\r\n if value_start == value_end:\r\n end = start + consts.TARGET_LEN - 1\r\n else:\r\n end = int(value_end)\r\n return start, end", "def LabelStart(self):\n return self._get_attribute('labelStart')", "def get_timerange(self, label_metadata):\n\n label_metadata = label_metadata.sort_values(0)\n start = dt.datetime.fromtimestamp(int(label_metadata.iloc[0,1]))\n end = dt.datetime.fromtimestamp(int(label_metadata.iloc[-1,1]))\n return start,end", "def translate_to_genomic_coords(start, end, frame, genome_size):\n nucleic_start = start * 3\n nucleic_end = end * 3\n if frame == 1:\n genomic_start = nucleic_start - 2\n genomic_end = nucleic_end - 2\n if frame == 2:\n genomic_start = nucleic_start - 1\n genomic_end = nucleic_end - 1\n if frame == 3:\n genomic_start = nucleic_start\n genomic_end = nucleic_end\n if frame == 4:\n genomic_start = genome_size - (nucleic_start - 2)\n genomic_end = genome_size - (nucleic_end - 2)\n if frame == 5:\n genomic_start = genome_size - (nucleic_start - 1)\n genomic_end = genome_size - (nucleic_end -1)\n if frame == 6:\n genomic_start = genome_size - nucleic_start\n genomic_end = genome_size - nucleic_end\n\n if frame in [1,2,3]:\n strand = '+'\n elif frame in [4,5,6]:\n strand = '-'\n else:\n raise ValueError(\"frame should be one of 1,2,3,4,5,6\")\n\n return genomic_start, genomic_end, strand", "def get_start_end_xy(self,i):\n\t direction_inc=[[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1]]\n\t x_start=self.position[i][1]\n\t y_start=self.position[i][0]\n\t x_end=x_start+(len(self.words[i])-1)*direction_inc[self.direction[i]][1]\n\t y_end=y_start+(len(self.words[i])-1)*direction_inc[self.direction[i]][0]\n\t return (x_start,y_start),(x_end,y_end)", "def get_reference_position(self):\n return self.get_num(self.query(\"REFP?\"))", "def get_ref_seq(self):\n s = []\n\n reflen = len(self.msa.loc[self.reference])\n\n for i in range(reflen):\n reference_res = str(self.msa.iloc[:, i].loc[self.reference])\n\n if reference_res not in \"-.\":\n s.append(reference_res)\n return \"\".join(s)", "def get_ref_pos(self):\n out = []\n reflen = len(self.msa.loc[self.reference])\n for i in range(reflen):\n if list(self.msa.iloc[:, i].loc[self.reference])[0] not in \".-\":\n out.append(i)\n return out", "def retrieve_nuc_seq_pos( self, pos_start, pos_end, str_form = False ):\n\n if not pos_start in self.arr_genome_pos or not pos_end in self.arr_genome_pos:\n return None\n \n #retrieve the index of the genomic positions as this is needed to retrieve the nucleotide position\n i_pos_start = self.arr_genome_pos.index( pos_start )\n i_pos_end = self.arr_genome_pos.index( pos_end )\n #retrieve nucleotide sequence before pos_oi\n return self.retrieve_nuc_seq( i_pos_start, i_pos_end, str_form )", "def _get_start(self, variant, reference_start, cigar, ignore_softclip=False):\n indels = get_indel_from_cigar(cigar, ignore_softclip)\n start = variant.POS - reference_start - 1\n # for pos, val in indels.iteritems(): # python2\n for pos, val in indels.items():\n if pos > start:\n break\n if val[0] == 'I':\n start += val[1]\n elif val[0] == 'D':\n start -= val[1]\n return start", "def _get_label_offset(dataframe, offset=0.01):\n\n x_offset = (dataframe.iloc[:, 0].max() - dataframe.iloc[:, 0].min()) * offset\n y_offset = (dataframe.iloc[:, 1].max() - dataframe.iloc[:, 1].min()) * offset\n\n return x_offset, y_offset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets subreads/encoded field from example as a string.
def get_encoded_subreads_from_example(example): return example.features.feature['subreads/encoded'].bytes_list.value[0]
[ "def get_field(self, bib_entry, field):\n output = bib_entry.fields[field] if field in bib_entry.fields else \"\"\n return self.strip_braces(output)", "def _get_field_for_example(data, example_id, field):\n matches_id = data[\"cifar10_test_test_idx\"] == example_id\n return data[matches_id][field].values", "def get_field(self, field_name, unused_args, unused_kwargs):\n attributes = field_name.split('.')\n name = attributes.pop(0)\n if attributes and attributes[0] == 'content':\n value = self.content.get(name)\n attributes.pop(0)\n if not attributes:\n attributes = ['default', 'line']\n else:\n value = self.__dict__.get(name, '')\n if not value:\n return '', name\n label = True\n parts = []\n for attr in attributes:\n if attr == 'line':\n parts.append('\\n')\n elif attr == 'key':\n parts.append(name)\n parts.append('=')\n parts.append(unicode(value))\n value = None\n elif attr == 'value':\n label = False\n elif attr in ('default', 'flattened', 'json', 'yaml'):\n buf = StringIO.StringIO()\n buf.write('\\n')\n if label:\n value = {name: value}\n resource_printer.Print(value, attr, out=buf, single=True)\n value = buf.getvalue()\n if value.endswith('\\n'):\n value = value[:-1]\n if value:\n parts.append(unicode(value))\n return ''.join(parts), name", "def get_encoded_label_from_example(example):\n return example.features.feature['label/encoded'].bytes_list.value[0]", "def get_encoded_deepconsensus_input_from_example(example):\n return example.features.feature[\n 'deepconsensus_input/encoded'].bytes_list.value[0]", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_str(self, name):\n return str(self.field(name).toString())", "def _nested_lookup(doc, field):\n value = doc\n keys = field.split(\".\")\n try:\n for k in keys:\n value = value[k]\n except KeyError:\n return None\n\n return str(value)", "def get_ontology_field(sample, biosample_name, is_list=False):\n if is_list:\n tmp = list()\n if biosample_name in sample['characteristics']:\n for item in sample['characteristics'][biosample_name]:\n tmp.append(item['ontologyTerms'][0])\n return tmp\n else:\n if biosample_name in sample['characteristics']:\n return sample['characteristics'][biosample_name][0][\n 'ontologyTerms'][0]\n else:\n return ''", "def get_field(key, field, decode=False, pipe=client):\n redis_key = create_key(key, field)\n return native_str(pipe.get(redis_key)) if decode else pipe.get(redis_key)", "def getFormatFieldExtraText(self, formatName, fieldName):\n nodeFormat = (globalref.mainControl.activeControl.model.\n formats[formatName])\n field = nodeFormat.fieldDict[fieldName]\n return (field.prefix, field.suffix)", "def get_sequence_string(seq):\n if type(seq) == Bio.SeqRecord:\n seqstr = seq.seq.tostring()\n elif type(seq) == Bio.Seq.Seq:\n seqstr = seq.tostring()\n else:\n seqstr = seq\n return seqstr", "def Value(self) -> str:", "def get_subfield(cls, event, field_whole):\n field_split = field_whole.split('.')\n event_field = event\n for field in field_split:\n if len(event_field) > 0:\n event_field = event_field[field]\n else:\n return None\n return event_field", "def string(self): \n return self.__string", "def subfield():\n return Subfield()", "def __str__(self):\n # self._examples.values\n string = \"\"\n for e in self._examples:\n for i, v in enumerate(e.values):\n if self._attributes[i].type == 'Nominal':\n string = string + self._attributes[i].domain[v]\n else:\n string = string + v\n if i == len(e.values) - 1:\n string = string + \"\\n\"\n else:\n string = string + \" \"\n return string", "def get_bytestring(self, eieio_type):", "def base_field(self):\n return self.field[1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the subreads/shape field from example as a list of int64.
def get_subreads_shape_from_example(example): assert len(example.features.feature['subreads/shape'].int64_list.value) == 3 return example.features.feature['subreads/shape'].int64_list.value[:]
[ "def get_int64_list(feature_name,\n example):\n return get_feature(feature_name, example).int64_list", "def get_encoded_subreads_from_example(example):\n return example.features.feature['subreads/encoded'].bytes_list.value[0]", "def __getAllFieldIDsFromFieldIDAndSizeAsIntList(self, fieldID, sx, sy):\n sFields = self.__getAllFieldIDsFromFieldIDAndSizeAsString(fieldID, sx, sy)\n listFields = sFields.split(',') #Stringarray\n \n for i in range(0, len(listFields)):\n listFields[i] = int(listFields[i])\n \n return listFields", "def get_list_of_int2(self):\n pass", "def field_ids(self):\n if hasattr(self, '_m_field_ids'):\n return self._m_field_ids if hasattr(self, '_m_field_ids') else None\n\n _pos = self._io.pos()\n self._io.seek(self.header.field_ids_off)\n self._debug['_m_field_ids']['start'] = self._io.pos()\n self._m_field_ids = [None] * (self.header.field_ids_size)\n for i in range(self.header.field_ids_size):\n if not 'arr' in self._debug['_m_field_ids']:\n self._debug['_m_field_ids']['arr'] = []\n self._debug['_m_field_ids']['arr'].append({'start': self._io.pos()})\n _t__m_field_ids = Dex.FieldIdItem(self._io, self, self._root)\n _t__m_field_ids._read()\n self._m_field_ids[i] = _t__m_field_ids\n self._debug['_m_field_ids']['arr'][i]['end'] = self._io.pos()\n\n self._debug['_m_field_ids']['end'] = self._io.pos()\n self._io.seek(_pos)\n return self._m_field_ids if hasattr(self, '_m_field_ids') else None", "def read_ints(self, dtype='i4'):\n return self.read_record(dtype)", "def fields(self):\n assert self.is_block()\n assert self.tag () != OCamlValue.DOUBLE_ARRAY_TAG # FIXME not implemented\n\n words = self.size_words()\n if words is None:\n return [None]\n\n a = []\n for i in range(int(words)):\n field = self._unsafe_field(i)\n a.append(field)\n if field is None:\n break # Append a single invalid value to indicate out-of-bounds to the user\n return a", "def get_reads(self, roi):\n reads, _ = self.get_reads_and_counts(roi)\n return reads", "def convert_to_array(self): \n self.reads = np.asarray(self.reads, dtype=\"int64\")\n self.sampling=True", "def read_numbers(self):\n image = self.read_resize()\n return self.process_image(image)", "def get_label_shape_from_example(example):\n assert len(example.features.feature['label/shape'].int64_list.value) == 1\n return example.features.feature['label/shape'].int64_list.value[:]", "def get_num_passes_from_example(example):\n assert len(\n example.features.feature['subreads/num_passes'].int64_list.value) == 1\n return example.features.feature['subreads/num_passes'].int64_list.value[0]", "def IntegerList(self):\n return self.list", "def getRefReads(self):# -> int\n return self.refReads", "def convert_to_list(self): \n self.reads = list(self.reads)\n self.sampling = False", "def get_coords(self, field):\n return_list = []\n\n coords = self.coords()\n for i, coord in enumerate(self[field].dims):\n if coord in coords:\n return_list.append(self[coord])\n else:\n return_list.append(np.arange(len(self[field].shape[i])))\n\n return return_list", "def get_srid_list(self):\n # Pull the meta data for the model\n opts = self.__class__._meta\n \n # Filter the field set down to the polygon fields\n fields = [i.name for i in opts.fields if i.name.startswith('polygon_')]\n \n # Return the SRID number that comes after the underscore.\n return [int(i.split('_')[1]) for i in fields]", "def readArrayList(self, loader):\n size = self.readInt()\n if size < 0:\n return\n return map(lambda x: self.readValue(loader), range(size))", "def get_ids_as_slice_or_list(self):\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the subreads/num_passes field from example as a list of int64.
def get_num_passes_from_example(example): assert len( example.features.feature['subreads/num_passes'].int64_list.value) == 1 return example.features.feature['subreads/num_passes'].int64_list.value[0]
[ "def getRefReads(self):# -> int\n return self.refReads", "def list_of_runnums (ins, exp) :\n try : expruns = experiment_info.experiment_runs(ins, exp)\n #if exp == 'xcs83814' : return []\n except : return []\n\n return [int(rec['num']) for rec in expruns]\n #runs = experiment_info.experiment_runs(ins, exp)\n #lst = []\n #for rec in runs :\n # lst.append( int(rec['num']) )\n #return lst", "def get_subreads_shape_from_example(example):\n assert len(example.features.feature['subreads/shape'].int64_list.value) == 3\n return example.features.feature['subreads/shape'].int64_list.value[:]", "def f_get_seq_len_list(self):\n return [x.seq_length() for x in self.m_seq_info]", "def get_int64_list(feature_name,\n example):\n return get_feature(feature_name, example).int64_list", "def _get_samples(self) -> int:\n return self._samples", "def get_run_numbers(self) -> List[Tuple]:\n return self.run_numbers", "def _expected_reads(run_info_file):\n reads = []\n if os.path.exists(run_info_file):\n tree = ElementTree()\n tree.parse(run_info_file)\n read_elem = tree.find(\"Run/Reads\")\n reads = read_elem.findall(\"Read\")\n return len(reads)", "def gatherReadCounts(samplesList, scriptsDir, threads, alignmentPath, outRoot, stype, mode):\n reads = 0\n ext = \".pruned.bam\"\n if mode == \"all_reads\":\n ext = \".bam\"\n for i in range(len(samplesList)):\n bam = os.path.join(alignmentPath, outRoot) + \".\" + stype + \".\" + str(i) + ext\n reads += int(subprocess.run([os.path.join(scriptsDir, \"get_readcount.sh\"), bam, str(threads)], capture_output=True, text=True).stdout.strip(\"\\n\"))\n return reads", "def get_encoded_subreads_from_example(example):\n return example.features.feature['subreads/encoded'].bytes_list.value[0]", "def getAltReads(self):# -> int\n return self.altReads", "def calculate_total_number_of_reads(conversionResults, numOfLanes, numOfSamples):\n total_number_of_reads = 0\n for lane in range(numOfLanes):\n for sample in range(numOfSamples):\n numberOfReads = int(conversionResults[lane][\"DemuxResults\"][sample][\"NumberReads\"])\n total_number_of_reads += numberOfReads\n return total_number_of_reads", "def get_reads(self, roi):\n reads, _ = self.get_reads_and_counts(roi)\n return reads", "def get_number_of_loadable_samples(self):\n return sum([loader.get_number_of_loadable_samples() for loader in self.loaders])", "def sum_reads(self, sample):\n total_reads = 0.0\n arts = lims.get_artifacts(samplelimsid = sample.id, process_type = self.process_types)\n for art in arts:\n if art.qc_flag == 'PASSED' and '# Reads' in art.udf:\n total_reads += float(art.udf.get('# Reads'))\n return total_reads/1000000", "def get_runs(self) -> int:", "def _get_run_length_ac(self):\n self._run_length_ac = []\n for block in self.data:\n self._run_length_ac.extend(\n encode_run_length(tuple(iter_zig_zag(block))[1:])\n )", "def samples(self) -> List[int]:\n return self.shadow_tree.get_node_samples()[self.id]", "def getRuns():\r\n \r\n total=0\r\n try:\r\n total = int(runs.get())\r\n except:\r\n return\r\n return(total)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets label/encoded field from example as a string.
def get_encoded_label_from_example(example): return example.features.feature['label/encoded'].bytes_list.value[0]
[ "def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None", "def get_label_field(self):\n\n return self.label_field", "def get_label(issue):\n\t# if exists then return else return empty string\n\tlabels = issue.get('fields', {}).get('labels')\n\tif labels:\n\t\treturn ' '.join(labels)\n\telse:\n\t\t''", "def field_label(self):\n return self._field_label", "def get_label(self) -> str:\n pass", "def label(field):\n if hasattr(field,'long_name'):\n return field.long_name\n elif hasattr(field,'units'):\n return \"%s (%s)\"%(field.nxname,field.units)\n else:\n return field.nxname", "def label_text(boundfield):\r\n return boundfield.label", "def get_label_member(self):\n\n return self.label_field", "def get_field(self, field_name, unused_args, unused_kwargs):\n attributes = field_name.split('.')\n name = attributes.pop(0)\n if attributes and attributes[0] == 'content':\n value = self.content.get(name)\n attributes.pop(0)\n if not attributes:\n attributes = ['default', 'line']\n else:\n value = self.__dict__.get(name, '')\n if not value:\n return '', name\n label = True\n parts = []\n for attr in attributes:\n if attr == 'line':\n parts.append('\\n')\n elif attr == 'key':\n parts.append(name)\n parts.append('=')\n parts.append(unicode(value))\n value = None\n elif attr == 'value':\n label = False\n elif attr in ('default', 'flattened', 'json', 'yaml'):\n buf = StringIO.StringIO()\n buf.write('\\n')\n if label:\n value = {name: value}\n resource_printer.Print(value, attr, out=buf, single=True)\n value = buf.getvalue()\n if value.endswith('\\n'):\n value = value[:-1]\n if value:\n parts.append(unicode(value))\n return ''.join(parts), name", "def getLabel(self):\n result = self.content[:12]\n if result == \"\":\n if self.tags:\n result = str(self.tags.first)\n return result", "def get_label(self, label):\n return self.labels[label]", "def get_label(self):\n\n return str(self.lbl_framesource.text())", "def get_label_text(self, label) -> str:\n return self.labelText(LABELS.inv[label])", "def get_label(self) -> str:\n if self.found:\n return self.detail['label']\n else:\n logger.warning(\"Return empty for label as fail to find ontology on OLS for term \"+self.short_term)\n return \"\"", "def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))", "def get_label(self, ):\n return self.attrs.get(self.AttributeNames.LABEL, None)", "def getLabel(self):\n return self.content[:12]", "def label_to_name(self, label):\r\n return self.labels[label]", "def serialize_example(snippet, lab):\n feature = {\n 'snippet': _bytes_feature(snippet),\n 'label': _int64_feature(lab) \n }\n \n # Create a Features message using tf.train.Example.\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the label/shape field from example as a list of int64.
def get_label_shape_from_example(example): assert len(example.features.feature['label/shape'].int64_list.value) == 1 return example.features.feature['label/shape'].int64_list.value[:]
[ "def get_int64_list(feature_name,\n example):\n return get_feature(feature_name, example).int64_list", "def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None", "def provide_label(self):\n return [(k, v.shape) for k, v in self.label]", "def provide_label(self):\n res = [(k, tuple(list(v.shape[0:]))) for k, v in self.label]\n print(\"label : \" + str(res))\n return res", "def get_labels(self) -> List[int]:\n return [self.dataset[i][1] for i in range(self.num_samples())]", "def provide_label(self):\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.label]", "def _get_label(self, label_buf):\n label_buf_len = self.data_size * self.LABEL_SIZE + 8\n label_offset = 8\n label_arr = []\n while label_offset < label_buf_len:\n temp = struct.unpack_from(self._LABEL_SIZE_FMT, label_buf, label_offset)[0]\n if self.one_hot:\n vec = np.zeros(10)\n vec[temp] = 1\n label_arr.append(vec)\n else:\n label_arr.append(temp)\n label_offset += self.LABEL_SIZE\n return label_arr", "def labels(self) -> t.List[int]:\n return [p.label for p in self.partials]", "def get_labels(self) -> List[str]:\n return [\"0\", \"1\"]", "def get_labels(self) -> np.ndarray:\n return self._dataset.get_labels()[self._ids]", "def _generate_elements(example, label):\n\n class_label = None\n parsed = tf.train.Example.FromString(example.numpy())\n if parsed.features.feature[label].int64_list.value:\n val = parsed.features.feature[label].int64_list.value\n if len(val) > 0:\n class_label = val[0]\n else:\n val = parsed.features.feature[label].bytes_list.value\n if len(val) > 0:\n class_label = val[0].decode()\n return (class_label, parsed)", "def _int64_list_feature(values):\n if not isinstance(values, collections.Iterable):\n values = [values]\n\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))", "def label_ids(self):\n return self._label_ids", "def __get_ohe_label__(self, label_idx) -> List[int]:\n\n label = [0] * self.n_classes\n label[label_idx] = 1\n\n return label", "def get_subreads_shape_from_example(example):\n assert len(example.features.feature['subreads/shape'].int64_list.value) == 3\n return example.features.feature['subreads/shape'].int64_list.value[:]", "def labels(self):\n return np.array(self._label)", "def sample_ids_dtype(self):\n # Copied from the abstract seq2seq.CustomHelper class.\n return tf.int32", "def label_list(self):\n return self._label_list", "def format_for_scikit(labels, dataset):\n nd = []\n l = [int(lab) for lab in labels]\n for i in dataset:\n tmp = [int(v) for v in i.values()]\n nd.append(tmp)\n return l,nd" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets deepconsensus_input/encoded field from example as a string.
def get_encoded_deepconsensus_input_from_example(example): return example.features.feature[ 'deepconsensus_input/encoded'].bytes_list.value[0]
[ "def get_encoded_label_from_example(example):\n return example.features.feature['label/encoded'].bytes_list.value[0]", "def get_encoded_subreads_from_example(example):\n return example.features.feature['subreads/encoded'].bytes_list.value[0]", "def deepconsensus_input_to_example(\n deepconsensus_input: deepconsensus_pb2.DeepConsensusInput,\n example_height: int,\n inference: bool,\n counters: Optional[Dict[str, metrics.Metrics.counter]] = None,\n) -> Optional[tf.train.Example]:\n if not deepconsensus_input.subreads:\n if counters and counters['examples_no_subreads_counter']:\n counters['examples_no_subreads_counter'].inc()\n return\n\n # Get the example_width from the first subreads.\n example_width = len(deepconsensus_input.subreads[0].bases)\n\n # The full example will include 4 rows for the signal to noise ratio (sn)\n # values. The remaining rows will contain three sets of per-base values:\n # the base, pulse width (pw), and interpulse distance (ip). Some models\n # may use only a subset of this information downstream.\n per_base_rows = get_per_base_rows(example_height)\n if per_base_rows < 0 or per_base_rows % 4 != 0:\n raise ValueError('example_height - 5 must be non-negative, and divisible '\n 'by four.')\n max_passes = get_max_passes(example_height)\n\n if len(deepconsensus_input.subreads) > max_passes:\n # Increment a counter if the number of subreads from the\n # deepconsensus_input is more than the `max_passes` derived from the\n # input `example_height`.\n # But still continue.\n if counters and counters['examples_with_discarded_subreads']:\n counters['examples_with_discarded_subreads'].inc()\n\n example = tf.train.Example()\n features = example.features\n data = np.zeros(\n shape=(example_height, example_width, 1), dtype=dc_constants.NP_DATA_TYPE)\n data += dc_constants.GAP_OR_PAD_INT\n\n # Number of subreads is capped at num_subreads. In the cases of fewer\n # subreads, rows are left empty.\n kept_subreads = 0\n # Add extra dimension so that shape is (example_width, 1).\n base_indices, pw_indices, ip_indices, strand_indices, ccs_indices, sn_indices = get_indices(\n max_passes)\n for i in range(min(len(deepconsensus_input.subreads), max_passes)):\n subread = deepconsensus_input.subreads[i]\n # Each tuple should already be padded to the appropriate length.\n assert len(subread.bases) == example_width\n\n encoded_bases = encode_dna_as_floats(subread.bases) # pytype: disable=wrong-arg-types\n assert encoded_bases is not None\n data[base_indices[0] + i] += np.expand_dims(np.array(encoded_bases), -1)\n data[pw_indices[0] + i] += np.expand_dims(np.array(subread.pw), -1)\n data[ip_indices[0] + i] += np.expand_dims(np.array(subread.ip), -1)\n data[strand_indices[0] + i] += np.expand_dims(\n np.expand_dims(np.array(subread.subread_strand), -1), -1)\n kept_subreads += 1\n\n if kept_subreads == 0:\n if counters and counters['examples_no_subreads_counter']:\n counters['examples_no_subreads_counter'].inc()\n return\n\n if deepconsensus_input.ccs_sequence:\n encoded_ccs_bases = encode_dna_as_floats(deepconsensus_input.ccs_sequence) # pytype: disable=wrong-arg-types\n data[slice(*ccs_indices)] += np.expand_dims(np.array(encoded_ccs_bases), -1)\n\n data[slice(*sn_indices)] += np.expand_dims(\n np.expand_dims(np.array(deepconsensus_input.sn), -1), -1)\n\n features.feature['subreads/encoded'].bytes_list.value.append(data.tostring())\n features.feature['subreads/shape'].int64_list.value.extend(data.shape)\n features.feature['subreads/num_passes'].int64_list.value.append(kept_subreads)\n\n if not inference:\n label_bases_list = encode_dna_as_floats(deepconsensus_input.label.bases) # pytype: disable=wrong-arg-types\n assert label_bases_list is not None\n # Final shape of label should be (example_width, ).\n label_matrix = np.array(label_bases_list).astype(dc_constants.NP_DATA_TYPE)\n features.feature['label/encoded'].bytes_list.value.append(\n label_matrix.tostring())\n features.feature['label/shape'].int64_list.value.extend(label_matrix.shape)\n features.feature['deepconsensus_input/encoded'].bytes_list.value.append(\n deepconsensus_input.SerializeToString())\n return example", "def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None", "def variant_from_example(example):\n features = example.features.feature\n var_string = features['variant/encoded'].bytes_list.value[0]\n return variants_pb2.Variant.FromString(var_string)", "def example_to_data(self, example):\n raise NotImplementedError", "def serialize_example(snippet, lab):\n feature = {\n 'snippet': _bytes_feature(snippet),\n 'label': _int64_feature(lab) \n }\n \n # Create a Features message using tf.train.Example.\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "def _get_encoding_form(self, input):\n if self.inference_procedure == 'direct':\n return input\n else:\n raise NotImplementedError", "def get_llvm_str(self):", "def _create_serialized_example(predecessor, current, successor, vocab):\n example = tf.train.Example(features=tf.train.Features(feature={\n \"decode_pre\": _int64_feature(_sentence_to_ids(predecessor, vocab)),\n \"encode\": _int64_feature(_sentence_to_ids(current, vocab)),\n \"decode_post\": _int64_feature(_sentence_to_ids(successor, vocab)),\n }))\n\n return example.SerializeToString()", "def CSourceToInput(source: str) -> str:\n bytecode = CSourceToBytecode(source)\n return bytecode", "def process_example(example_string: tf.train.Example,\n schema: tfgnn.GraphSchema):\n spec = tfgnn.create_graph_spec_from_schema_pb(schema)\n graph = tfgnn.parse_single_example(spec, example_string)\n\n # Note: the output tags cannot be structured; they must be single string\n # objects.\n for key, tensor in iter_stats_graph(graph):\n if isinstance(tensor, tf.RaggedTensor):\n tensor = tensor.flat_values\n for value in tensor.numpy().flat:\n yield beam.pvalue.TaggedOutput(key, value)", "def parse_preprocessed_example(example_proto):\n features = {\n 'spec': tf.VarLenFeature(dtype=tf.float32),\n 'spectrogram_hash': tf.FixedLenFeature(shape=(), dtype=tf.int64),\n 'labels': tf.VarLenFeature(dtype=tf.float32),\n 'label_weights': tf.VarLenFeature(dtype=tf.float32),\n 'length': tf.FixedLenFeature(shape=(), dtype=tf.int64),\n 'onsets': tf.VarLenFeature(dtype=tf.float32),\n 'offsets': tf.VarLenFeature(dtype=tf.float32),\n 'velocities': tf.VarLenFeature(dtype=tf.float32),\n 'sequence_id': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'note_sequence': tf.FixedLenFeature(shape=(), dtype=tf.string),\n }\n record = tf.parse_single_example(example_proto, features)\n input_tensors = InputTensors(\n spec=tf.sparse.to_dense(record['spec']),\n spectrogram_hash=record['spectrogram_hash'],\n labels=tf.sparse.to_dense(record['labels']),\n label_weights=tf.sparse.to_dense(record['label_weights']),\n length=record['length'],\n onsets=tf.sparse.to_dense(record['onsets']),\n offsets=tf.sparse.to_dense(record['offsets']),\n velocities=tf.sparse.to_dense(record['velocities']),\n sequence_id=record['sequence_id'],\n note_sequence=record['note_sequence'])\n return input_tensors", "def instruction_to_string(self, instruction):\r\n if isinstance(instruction, dict):\r\n return instruction['description']\r\n elif self.allowed_states:\r\n return str(self.allowed_states[instruction])\r\n else:\r\n return str(instruction)", "def _get_field_for_example(data, example_id, field):\n matches_id = data[\"cifar10_test_test_idx\"] == example_id\n return data[matches_id][field].values", "def get_example(example_id=None):\n # This is all local, requires no external GPT3 calls\n # Return all examples\n if not example_id:\n return json.dumps(gpt.get_all_examples())\n\n example = gpt.get_example(example_id)\n if not example:\n return error(\"id not found\", HTTPStatus.NOT_FOUND)\n return json.dumps(example.as_dict())", "def get_example_from_prop_spec(self, prop_spec):\n if 'example' in prop_spec.keys() and self.use_example: # From example\n return prop_spec['example']\n elif 'default' in prop_spec.keys(): # From default\n return prop_spec['default']\n elif 'enum' in prop_spec.keys(): # From enum\n return prop_spec['enum'][0]\n elif '$ref' in prop_spec.keys(): # From definition\n return self._example_from_definition(prop_spec)\n elif 'type' not in prop_spec: # Complex type\n return self._example_from_complex_def(prop_spec)\n elif prop_spec['type'] == 'array': # Array\n return self._example_from_array_spec(prop_spec)\n elif prop_spec['type'] == 'file': # File\n return (StringIO('my file contents'), 'hello world.txt')\n else: # Basic types\n if 'format' in prop_spec.keys() and prop_spec['format'] == 'date-time':\n return self._get_example_from_basic_type('datetime')[0]\n elif isinstance(prop_spec['type'], list): # Type is a list\n return self._get_example_from_basic_type(prop_spec['type'][0])[0]\n else:\n return self._get_example_from_basic_type(prop_spec['type'])[0]", "def resolve_input(event, config):\n write_error('Starting resolve_input')\n if ('cumulus_message' in config and 'input' in config['cumulus_message']):\n input_path = config['cumulus_message']['input']\n return resolve_path_str(event, input_path)\n write_error('End resolve_input')\n return event.get('payload')", "def get_encoded(self):\n return self.key" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns tf.Example created from the given DeepConsensusInput proto.
def deepconsensus_input_to_example( deepconsensus_input: deepconsensus_pb2.DeepConsensusInput, example_height: int, inference: bool, counters: Optional[Dict[str, metrics.Metrics.counter]] = None, ) -> Optional[tf.train.Example]: if not deepconsensus_input.subreads: if counters and counters['examples_no_subreads_counter']: counters['examples_no_subreads_counter'].inc() return # Get the example_width from the first subreads. example_width = len(deepconsensus_input.subreads[0].bases) # The full example will include 4 rows for the signal to noise ratio (sn) # values. The remaining rows will contain three sets of per-base values: # the base, pulse width (pw), and interpulse distance (ip). Some models # may use only a subset of this information downstream. per_base_rows = get_per_base_rows(example_height) if per_base_rows < 0 or per_base_rows % 4 != 0: raise ValueError('example_height - 5 must be non-negative, and divisible ' 'by four.') max_passes = get_max_passes(example_height) if len(deepconsensus_input.subreads) > max_passes: # Increment a counter if the number of subreads from the # deepconsensus_input is more than the `max_passes` derived from the # input `example_height`. # But still continue. if counters and counters['examples_with_discarded_subreads']: counters['examples_with_discarded_subreads'].inc() example = tf.train.Example() features = example.features data = np.zeros( shape=(example_height, example_width, 1), dtype=dc_constants.NP_DATA_TYPE) data += dc_constants.GAP_OR_PAD_INT # Number of subreads is capped at num_subreads. In the cases of fewer # subreads, rows are left empty. kept_subreads = 0 # Add extra dimension so that shape is (example_width, 1). base_indices, pw_indices, ip_indices, strand_indices, ccs_indices, sn_indices = get_indices( max_passes) for i in range(min(len(deepconsensus_input.subreads), max_passes)): subread = deepconsensus_input.subreads[i] # Each tuple should already be padded to the appropriate length. assert len(subread.bases) == example_width encoded_bases = encode_dna_as_floats(subread.bases) # pytype: disable=wrong-arg-types assert encoded_bases is not None data[base_indices[0] + i] += np.expand_dims(np.array(encoded_bases), -1) data[pw_indices[0] + i] += np.expand_dims(np.array(subread.pw), -1) data[ip_indices[0] + i] += np.expand_dims(np.array(subread.ip), -1) data[strand_indices[0] + i] += np.expand_dims( np.expand_dims(np.array(subread.subread_strand), -1), -1) kept_subreads += 1 if kept_subreads == 0: if counters and counters['examples_no_subreads_counter']: counters['examples_no_subreads_counter'].inc() return if deepconsensus_input.ccs_sequence: encoded_ccs_bases = encode_dna_as_floats(deepconsensus_input.ccs_sequence) # pytype: disable=wrong-arg-types data[slice(*ccs_indices)] += np.expand_dims(np.array(encoded_ccs_bases), -1) data[slice(*sn_indices)] += np.expand_dims( np.expand_dims(np.array(deepconsensus_input.sn), -1), -1) features.feature['subreads/encoded'].bytes_list.value.append(data.tostring()) features.feature['subreads/shape'].int64_list.value.extend(data.shape) features.feature['subreads/num_passes'].int64_list.value.append(kept_subreads) if not inference: label_bases_list = encode_dna_as_floats(deepconsensus_input.label.bases) # pytype: disable=wrong-arg-types assert label_bases_list is not None # Final shape of label should be (example_width, ). label_matrix = np.array(label_bases_list).astype(dc_constants.NP_DATA_TYPE) features.feature['label/encoded'].bytes_list.value.append( label_matrix.tostring()) features.feature['label/shape'].int64_list.value.extend(label_matrix.shape) features.feature['deepconsensus_input/encoded'].bytes_list.value.append( deepconsensus_input.SerializeToString()) return example
[ "def get_encoded_deepconsensus_input_from_example(example):\n return example.features.feature[\n 'deepconsensus_input/encoded'].bytes_list.value[0]", "def parse_preprocessed_example(example_proto):\n features = {\n 'spec': tf.VarLenFeature(dtype=tf.float32),\n 'spectrogram_hash': tf.FixedLenFeature(shape=(), dtype=tf.int64),\n 'labels': tf.VarLenFeature(dtype=tf.float32),\n 'label_weights': tf.VarLenFeature(dtype=tf.float32),\n 'length': tf.FixedLenFeature(shape=(), dtype=tf.int64),\n 'onsets': tf.VarLenFeature(dtype=tf.float32),\n 'offsets': tf.VarLenFeature(dtype=tf.float32),\n 'velocities': tf.VarLenFeature(dtype=tf.float32),\n 'sequence_id': tf.FixedLenFeature(shape=(), dtype=tf.string),\n 'note_sequence': tf.FixedLenFeature(shape=(), dtype=tf.string),\n }\n record = tf.parse_single_example(example_proto, features)\n input_tensors = InputTensors(\n spec=tf.sparse.to_dense(record['spec']),\n spectrogram_hash=record['spectrogram_hash'],\n labels=tf.sparse.to_dense(record['labels']),\n label_weights=tf.sparse.to_dense(record['label_weights']),\n length=record['length'],\n onsets=tf.sparse.to_dense(record['onsets']),\n offsets=tf.sparse.to_dense(record['offsets']),\n velocities=tf.sparse.to_dense(record['velocities']),\n sequence_id=record['sequence_id'],\n note_sequence=record['note_sequence'])\n return input_tensors", "def _create_serialized_example(input1, input2, label):\n input1_str = _string_feature(input1)\n input2_str = _string_feature(input2)\n label_str = _label_feature(label)\n example = tf.train.Example(features=tf.train.Features(feature={\n \"input1\": input1_str,\n \"input2\": input2_str,\n \"labels\": label_str,\n }))\n return example.SerializeToString()", "def fromTFExample(bytestr):\n import tensorflow as tf\n example = tf.train.Example()\n example.ParseFromString(bytestr)\n return example", "def input_tensors_to_example(inputs, hparams):\n del hparams\n\n feature = {\n 'spec': tf.train.Feature(\n float_list=tf.train.FloatList(value=inputs.spec.flatten())),\n 'spectrogram_hash': tf.train.Feature(\n int64_list=tf.train.Int64List(value=[inputs.spectrogram_hash])),\n 'labels': tf.train.Feature(\n float_list=tf.train.FloatList(value=inputs.labels.flatten())),\n 'label_weights': tf.train.Feature(\n float_list=tf.train.FloatList(value=inputs.label_weights.flatten())),\n 'length': tf.train.Feature(\n int64_list=tf.train.Int64List(value=[inputs.length])),\n 'onsets': tf.train.Feature(\n float_list=tf.train.FloatList(value=inputs.onsets.flatten())),\n 'offsets': tf.train.Feature(\n float_list=tf.train.FloatList(value=inputs.offsets.flatten())),\n 'velocities': tf.train.Feature(\n float_list=tf.train.FloatList(value=inputs.velocities.flatten())),\n 'sequence_id': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[inputs.sequence_id])),\n 'note_sequence': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[inputs.note_sequence])),\n }\n\n return tf.train.Example(features=tf.train.Features(feature=feature))", "def _make_example(X, y, n, target_type='int'):\n\n feature = {}\n feature['X'] = tf.train.Feature(\n float_list=tf.train.FloatList(value=X.flatten()))\n feature['n'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=n.flatten()))\n\n if target_type == 'int':\n feature['y'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=y.flatten()))\n elif target_type in ['float', 'signal']:\n y = y.astype(np.float32)\n feature['y'] = tf.train.Feature(\n float_list=tf.train.FloatList(value=y.flatten()))\n else:\n raise ValueError('Invalid target type.')\n\n # Construct the Example proto object\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n return example", "def _create_serialized_example(predecessor, current, successor, vocab):\n example = tf.train.Example(features=tf.train.Features(feature={\n \"decode_pre\": _int64_feature(_sentence_to_ids(predecessor, vocab)),\n \"encode\": _int64_feature(_sentence_to_ids(current, vocab)),\n \"decode_post\": _int64_feature(_sentence_to_ids(successor, vocab)),\n }))\n\n return example.SerializeToString()", "def build(input_reader_config):\n if not isinstance(input_reader_config, input_reader_pb2.InputReader):\n raise ValueError('input_reader_config not of type '\n 'input_reader_pb2.InputReader.')\n\n if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':\n config = input_reader_config.tf_record_input_reader\n _, string_tensor = parallel_reader.parallel_read(\n config.input_path,\n reader_class=tf.TFRecordReader,\n num_epochs=(input_reader_config.num_epochs\n if input_reader_config.num_epochs else None),\n num_readers=input_reader_config.num_readers,\n shuffle=input_reader_config.shuffle,\n dtypes=[tf.string, tf.string],\n capacity=input_reader_config.queue_capacity,\n min_after_dequeue=input_reader_config.min_after_dequeue)\n\n return tf_example_decoder.TfExampleDecoder().decode(string_tensor)\n\n raise ValueError('Unsupported input_reader_config.')", "def create_example(vector, label, record_id):\n features = {\n 'id': _bytes_feature(str(record_id)),\n 'waves': _float_feature(np.asarray(vector)),\n 'label': _int64_feature(np.asarray(label)),\n }\n return tf.train.Example(features=tf.train.Features(feature=features))", "def create_example_train(row, vocab):\n context, utterance, label = row\n context_transformed = transform_sentence(context, vocab)\n utterance_transformed = transform_sentence(utterance, vocab)\n context_len = len(next(vocab._tokenizer([context])))\n utterance_len = len(next(vocab._tokenizer([utterance])))\n label = int(float(label))\n\n # New Example\n example = tf.train.Example()\n example.features.feature[\"context\"].int64_list.value.extend(context_transformed)\n example.features.feature[\"utterance\"].int64_list.value.extend(utterance_transformed)\n example.features.feature[\"context_len\"].int64_list.value.extend([context_len])\n example.features.feature[\"utterance_len\"].int64_list.value.extend([utterance_len])\n example.features.feature[\"label\"].int64_list.value.extend([label])\n return example", "def to_tfexample(self):\n features = dict(\n structure=tf.train.Feature(\n int64_list=tf.train.Int64List(value=self.structure)),\n reward=tf.train.Feature(\n float_list=tf.train.FloatList(value=[self.reward])),\n batch_index=tf.train.Feature(\n int64_list=tf.train.Int64List(value=[self.batch_index])))\n return tf.train.Example(features=tf.train.Features(feature=features))", "def convert_to_tf_example(\n patient_data: Tuple[str, Dict[str, object]]\n) -> tf.train.Example:\n try:\n data = patient_data[1]\n patient = data[\"patient\"][0]\n studies = data[\"studies\"][0]\n \n features = convert_patient_to_feature(patient)\n for study_id, study in studies:\n study_data = convert_study_to_feature(study)\n for feature in study_data:\n features.update(feature)\n return tf.train.Example(features=tf.train.Features(feature=features),)\n except Exception as e:\n _logger.error(\n f\"Error occurred when creating a TFRecord. patient_data: {data.get('patient', data)}. Error: {e}.\"\n )\n return tf.train.Example(features=tf.train.Features(feature={}),)", "def parser(record):\n record_spec = {\n \"input\": tf.FixedLenFeature([seq_len], tf.int64),\n \"labels\": tf.FixedLenFeature([tgt_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_len],tf.float32),\n \"target_mask\": tf.FixedLenFeature([tgt_len],tf.float32)\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n _convert_example(example, use_bfloat16)\n\n for k, v in example.items():\n tf.logging.info(\"%s: %s\", k, v)\n\n return example", "def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n input_ids, segment_ids, input_mask = \\\n tokenizer.encode_text(text_a=example.text_a,\n text_b=example.text_b,\n max_seq_length=max_seq_length)\n\n label_id = label_map[example.label]\n\n # here we disable the verbose printing of the data\n if ex_index < 0:\n logging.info(\"*** Example ***\")\n logging.info(\"guid: %s\", example.guid)\n logging.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\n logging.info(\"input_ids length: %d\", len(input_ids))\n logging.info(\"input_mask: %s\", \" \".join([str(x) for x in input_mask]))\n logging.info(\"segment_ids: %s\", \" \".join([str(x) for x in segment_ids]))\n logging.info(\"label: %s (id = %d)\", example.label, label_id)\n\n feature = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature", "def create_example_albert(example: Example, tokenizer: Tokenizer) -> Example:\n # substitute [MASK]\n text_input = re.sub(r'( \\[MASK\\])|(\\[MASK\\])', tokenizer.mask_token,\n example['text'])\n\n # get BatchEncoding\n instance = tokenizer(text=text_input,\n padding='do_not_pad',\n add_special_tokens=True)\n # locate pad token\n target_tok_idx = try_get_index(instance.tokens(), tokenizer.mask_token)\n\n # add example-general info\n instance['target_tok_idx'] = target_tok_idx\n # get color input_ids\n option_encodings = get_option_encodings(COLORS, tokenizer, True)\n option_input_ids = [o.input_ids[0] for o in option_encodings]\n instance['option_input_ids'] = option_input_ids\n\n return instance", "def check_example(self, example, example_height, example_width, inference):\n\n encoded_subreads = tf_example_utils.get_encoded_subreads_from_example(\n example)\n subreads_shape = tf_example_utils.get_subreads_shape_from_example(example)\n num_passes = tf_example_utils.get_num_passes_from_example(example)\n encoded_deepconsensus_input = tf_example_utils.get_encoded_deepconsensus_input_from_example(\n example)\n deepconsensus_input = deepconsensus_pb2.DeepConsensusInput.FromString(\n encoded_deepconsensus_input)\n\n # Sanity check the DeepConsensusInput proto and num_passes.\n self.assertGreater(num_passes, 0)\n self.assertLessEqual(num_passes, len(deepconsensus_input.subreads))\n self.assertNotEmpty(deepconsensus_input.subreads)\n\n # Check that saved shapes are correct and that arrays have the correct\n # number of elements.\n self.assertEqual(subreads_shape, [example_height, example_width, 1])\n self.assertEqual(\n np.fromstring(encoded_subreads, dc_constants.NP_DATA_TYPE).size,\n np.prod(subreads_shape))\n if not inference:\n encoded_label = tf_example_utils.get_encoded_label_from_example(example)\n label_shape = tf_example_utils.get_label_shape_from_example(example)\n self.assertEqual(label_shape, [example_width])\n self.assertEqual(\n np.fromstring(encoded_label, dc_constants.NP_DATA_TYPE).size,\n np.prod(label_shape))", "def create_example_test(row, vocab):\n context, utterance = row[:2]\n distractors = row[2:]\n context_len = len(next(vocab._tokenizer([context])))\n utterance_len = len(next(vocab._tokenizer([utterance])))\n context_transformed = transform_sentence(context, vocab)\n utterance_transformed = transform_sentence(utterance, vocab)\n\n # New Example\n example = tf.train.Example()\n example.features.feature[\"context\"].int64_list.value.extend(context_transformed)\n example.features.feature[\"utterance\"].int64_list.value.extend(utterance_transformed)\n example.features.feature[\"context_len\"].int64_list.value.extend([context_len])\n example.features.feature[\"utterance_len\"].int64_list.value.extend([utterance_len])\n\n # Distractor sequences\n for i, distractor in enumerate(distractors):\n dis_key = \"distractor_{}\".format(i)\n dis_len_key = \"distractor_{}_len\".format(i)\n # Distractor Length Feature\n dis_len = len(next(vocab._tokenizer([distractor])))\n example.features.feature[dis_len_key].int64_list.value.extend([dis_len])\n # Distractor Text Feature\n dis_transformed = transform_sentence(distractor, vocab)\n example.features.feature[dis_key].int64_list.value.extend(dis_transformed)\n return example", "def parse_serialized_simulation_example(example_proto, metadata):\n if 'context_mean' in metadata:\n feature_description = _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT\n else:\n feature_description = _FEATURE_DESCRIPTION\n context, parsed_features = tf.io.parse_single_sequence_example(\n example_proto,\n context_features=_CONTEXT_FEATURES,\n sequence_features=feature_description)\n for feature_key, item in parsed_features.items():\n convert_fn = functools.partial(\n convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in'])\n parsed_features[feature_key] = tf.py_function(\n convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out'])\n\n # There is an extra frame at the beginning so we can calculate pos change\n # for all frames used in the paper.\n position_shape = [metadata['sequence_length'] + 1, -1, metadata['dim']]\n\n # Reshape positions to correct dim:\n parsed_features['position'] = tf.reshape(parsed_features['position'],\n position_shape)\n # Set correct shapes of the remaining tensors.\n sequence_length = metadata['sequence_length'] + 1\n if 'context_mean' in metadata:\n context_feat_len = len(metadata['context_mean'])\n parsed_features['step_context'] = tf.reshape(\n parsed_features['step_context'],\n [sequence_length, context_feat_len])\n # Decode particle type explicitly\n context['particle_type'] = tf.py_function(\n functools.partial(convert_fn, encoded_dtype=np.int64),\n inp=[context['particle_type'].values],\n Tout=[tf.int64])\n context['particle_type'] = tf.reshape(context['particle_type'], [-1])\n return context, parsed_features", "def convert_instance_to_tf_example(tokenizer, instance, max_seq_length):\n\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n\n # These are not needed but are made to fit the BERT api\n masked_lm_positions = [0] * max_seq_length\n masked_lm_ids = [0] * max_seq_length\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n if isinstance(instance.label, list):\n features[\"order\"] = create_int_feature(instance.label)\n else:\n features[\"next_sentence_labels\"] = create_int_feature([instance.label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n return tf_example, features" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add external padding to bases, PW, IP, and cigar.
def pad_bases_pw_ip_cigar(read: deepconsensus_pb2.Subread, padded_len: int) -> None: pad_amt = padded_len - len(read.bases) if pad_amt > 0: str_padding = dc_constants.GAP_OR_PAD * pad_amt list_padding = [dc_constants.GAP_OR_PAD_INT] * pad_amt read.bases = read.bases + str_padding read.pw[:] = list(read.pw) + list_padding read.ip[:] = list(read.ip) + list_padding read.expanded_cigar = read.expanded_cigar + str_padding
[ "def _add_padding(input_str):\r\n padding_len = AES.block_size - len(input_str) % AES.block_size\r\n return input_str + padding_len * chr(padding_len)", "def add_padding(im, pad):\n\n return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')", "def _pad(payload):\n\t\tlength = AES.block_size - (len(payload) % AES.block_size)\n\t\tif length == AES.block_size:\n\t\t\treturn payload #no padding required\n\t\tpadding = chr(length)*length\n\t\treturn payload + padding", "def pad(data):\r\n bytes_to_pad = AES.block_size - len(data) % AES.block_size\r\n return data + (bytes_to_pad * chr(bytes_to_pad))", "def _compute_causal_padding(self):\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n if self.data_format == \"channels_last\":\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n else:\n causal_padding = [[0, 0], [0, 0], [left_pad, 0]]\n return causal_padding", "def _pad(data, pad_with=PADDING):\n return data + (BLOCK_SIZE - len(data) % BLOCK_SIZE) * PADDING", "def padding(self, text):\n add = 16 - (len(text) % 16)\n return text + ('\\0' * add)", "def pad_pw_ip(subreads: List[reads_pb2.Read], max_length: int) -> None:\n for read in subreads:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n assert len(pw) == len(ip)\n pad_length = max_length - len(pw)\n pw_ip_padding = [dc_constants.GAP_OR_PAD_INT] * pad_length\n struct_utils.set_int_field(read.info, 'pw', pw + pw_ip_padding)\n struct_utils.set_int_field(read.info, 'ip', ip + pw_ip_padding)", "def pkcs5_pad(self,s):\n return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)", "def pad(data):\r\n length = 16 - (len(data) % 16)\r\n return data + bytes([length])*length", "def add_padding(message: str) -> str:\n\n padding = len(message)\n while padding % 16 != 0:\n padding += 1\n padding -= len(message)\n return message + '\\0' * padding", "def pad_base64_str(str):\n missing_padding = len(str) % 4\n if missing_padding != 0:\n str += '=' * (4 - missing_padding)\n return str", "def _compute_causal_padding(self):\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n\n if self.data_format == 'channels_last':\n if self.rank == 1:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n elif self.rank == 2:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0], [0, 0]]\n elif self.rank == 3:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0], [0, 0], [0, 0]]\n else:\n raise ValueError()\n return causal_padding\n else:\n raise ValueError('No support for NCHW yet')", "def ab_pad(string, block_size=16, random_generator=random_util.sort_of_random_bytes):\n bytes_to_pad = block_size - (len(string) % block_size)\n padding = random_generator(bytes_to_pad)\n return padding + string", "def set_padding(self, pad=(0, 0, 0, 0)):\n self._static_padding = make_padding(pad)\n self._static_pos = self._get_static_point()", "def _derive_padding_crypto(self, seed, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])", "def _create_bbox_padding_map(self):\n for i, img_coco_info in enumerate(self._coco_gt.dataset['images']):\n h_pad, w_pad, scaling = compute_padding_values(img_coco_info['height'], img_coco_info['width'],\n self._image_height, self._image_width)\n self._bbox_pad_mapping[img_coco_info['id']] = PadInfo(h_pad, w_pad, scaling)", "def _derive_padding_crypto(self, seed, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:],\n counter_wraparound=True)", "def _dynamic_padding(self, batch_data, pad_id = 0 ):\n #print 'dynamic _padding...'\n #print 'pad_id' + str(pad_id)\n max_p_len = 1000\n max_q_len =1000\n pad_p_len = min(max_p_len, max(batch_data['passage_length']))+1\n #print 'pad_p_len' + str(pad_p_len)\n pad_q_len = min(max_q_len, max(batch_data['question_length']))\n #print 'pad_q_len' + str(pad_q_len)\n #for ids in batch_data['passage_token_ids'] :\n #print 'padding: '\n #print (ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the max passes for bases/PW/IP.
def get_max_passes(example_height: int) -> int: return (example_height - 5) // 4
[ "def most_secure_password():\n passwords = []\n secure = {}\n query = Login.select(Login)\n for row in query:\n passwords.append(row.password)\n\n one_small_char_pattern = \"[a-z]+\" #1\n one_big_char_pattern = \"[A-Z]+\" #2\n one_digit_pattern = \"\\d+\" #1 \n eights_digits_pattern = \"\\w{8}\" #5\n special_charachter_pattern = \"[|\\^&+\\-%*/=!>]{1}\" #3\n\n points = 0\n for i in passwords:\n points = 0\n if(re.search(one_small_char_pattern,i)):\n points += 1\n\n if(re.search(one_big_char_pattern,i)):\n points += 2\n\n if(re.search(one_digit_pattern,i)):\n points += 1\n\n if(re.search(eights_digits_pattern,i)):\n points += 5\n\n if(re.search(special_charachter_pattern,i)):\n points += 3\n\n secure.update({i:points})\n\n max_val = max(secure.values())\n max_key = [i for i,j in secure.items() if j == max_val]\n click.echo(str(max_key) + \" most secure password, get's the max point \" + str(max_val))", "def get_max_attempts(self):\n\t\treturn self._max_attempts", "def calculate_maxmium_number_of_turns(self):\n self.number_of_turns_max = self.copper_fill_factor*self.core.A_w/self.A_cu", "def get_max_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x800c+i,100)/100 for i in range(4)])", "def get_max_control_iterations():\n\treturn dsslib.SolutionI(ctypes.c_int32(24), ctypes.c_int32(0))", "def part_2(distances: Distances) -> int:\n\n result, _ = max(generate_routes(distances))\n print(f\"part 2: longest route has distance {result}\")\n return result", "def maxloginattempts(self) :\n\t\ttry :\n\t\t\treturn self._maxloginattempts\n\t\texcept Exception as e:\n\t\t\traise e", "def __find_max_distance(self):\n return utils.find_max_distance(self.__game)", "def count_routes_max_stops(self, src, dest, max_stops):\n\n criteria = lambda stops, distance: stops <= max_stops # inconsistent max, per test cases\n return len(self.routes_with_criteria(src, dest, criteria))", "def fail_max(self) -> int:\n return self._fail_max", "def max_creds(self) -> int:\n return self._max_creds", "def max_chain_height(self):\n return self.client.call('GET',\n self.name + 'active-peers/max-chain-height')", "def max_primer_length(tm_max):\n return int((float(tm_max) + 7.5) / 2.5)", "def part_2(password: str) -> int:\n steps = len(find_longest_path(password))\n print(f\"part 2: longest path to reach the vault is {steps} steps long\")\n return steps", "def get_max_iterations():\n\treturn dsslib.SolutionI(ctypes.c_int32(8), ctypes.c_int32(0))", "def max_attempts(self):\n return 1", "def password_count(self) -> int:\n return pulumi.get(self, \"password_count\")", "def get_max_id() -> int:\n passes = get_boarding_passes()\n row_ids = [parse_boarding_pass(row_id) for row_id in passes]\n return max(row_ids)", "def get_max_seat_id(boarding_passes: list) -> int:\n return max(get_seat_id(boarding_pass) for boarding_pass in boarding_passes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
How many IPs are allowed by the blacklist? >>> example_blacklist = RangeSet.from_file(data_path(__file__, 'example.txt')) >>> print(example_blacklist) 02, 48 >>> part_2(example_blacklist, total_ips_count=10)
def part_2(ranges: 'RangeSet', total_ips_count: int = 1 << 32) -> int: allowed_count = total_ips_count - len(ranges) print(f"part 2: there are total {allowed_count} allowed IPs") return allowed_count
[ "def loadMaxIPlist(self, filename):\r\n #I need to put this in a try/catch block later \r\n \r\n maxIPlist=10\r\n linecount=0 \r\n iplist=[]\r\n with open(filename, 'r') as infile:\r\n element = infile.readline()\r\n while element:\r\n \r\n linecount +=1\r\n if linecount < maxIPlist:\r\n iplist.append(element)\r\n element = infile.readline()\r\n \r\n self.objdict['IPADDRESS']=iplist\r\n print(\"Loaded \", linecount, \" ip addresses\")\r\n\r\n return(linecount)", "def ips(self):\n return 0", "def getNumRanges(self) -> int:\n ...", "def ip_count(self) -> int:\n return pulumi.get(self, \"ip_count\")", "def main():\n\n insert_blacklist_rules(get_blacklisted_ipaddresses())", "def getNumberOfIPs(self):\n cnt=0\n for node in self.traverseTrees():\n ipb=node.value\n cnt+=len(ipb)\n return cnt", "def getMaxTaskCount():", "def read_and_count(counter, filename, max_lines):\n # list and counter for baskets\n baskets = []\n n_baskets = 0\n # open file and read lines\n with open(filename) as fp:\n # loop through every line in file\n for line in fp:\n # break if reached % of file needed\n if n_baskets >= max_lines:\n break\n # remove trailing char and split by space to get list of numbers\n line = line.rstrip().split(\" \")\n # create basket from line\n basket = MiningLibrary.basket_to_int(line)\n # add basket to list\n baskets.append(basket)\n # update counter occurence for count of single items\n counter = MiningLibrary.count_single(counter, basket)\n # increment counter\n n_baskets += 1\n # return the baskets and updated counter\n return (counter, n_baskets, baskets)", "def n_available_tasks(app_id, user_id=None, user_ip=None):\r\n\r\n if user_id and not user_ip:\r\n query = text('''SELECT COUNT(id) AS n_tasks FROM task WHERE NOT EXISTS\r\n (SELECT task_id FROM task_run WHERE\r\n app_id=:app_id AND user_id=:user_id AND task_id=task.id)\r\n AND app_id=:app_id AND state !='completed';''')\r\n result = db.engine.execute(query, app_id=app_id, user_id=user_id)\r\n else:\r\n if not user_ip:\r\n user_ip = '127.0.0.1'\r\n query = text('''SELECT COUNT(id) AS n_tasks FROM task WHERE NOT EXISTS\r\n (SELECT task_id FROM task_run WHERE\r\n app_id=:app_id AND user_ip=:user_ip AND task_id=task.id)\r\n AND app_id=:app_id AND state !='completed';''')\r\n result = db.engine.execute(query, app_id=app_id, user_ip=user_ip)\r\n n_tasks = 0\r\n for row in result:\r\n n_tasks = row.n_tasks\r\n return n_tasks", "def dont_give_me_five(start, end):\r\n my_list = [a for a in range(start, end + 1)]\r\n result = []\r\n for n in my_list:\r\n if list(str(n)).count('5') == 0:\r\n result.append(n)\r\n return len(result)", "def count_banlist():\n cwd = \"/tmp\"\n p1 = subprocess.Popen(\"varnishadm ban.list\" ,shell=True , stdout=subprocess.PIPE)\n p2 = subprocess.Popen(\"wc -l\" , shell=True , stdin=p1.stdout , stdout=subprocess.PIPE)\n out , err = p2.communicate()\n\n result = out.splitlines()[0]\n return result", "def count_spf_ips(domain, domain_name, spf_record_text):\n try:\n count = 0\n ips = find_spf_ips(domain, domain_name, spf_record_text)\n for ip in ips:\n if '/' in ip:\n bits = 32 - int(ip[(ip.index('/') + 1):])\n count += pow(2,bits)\n else:\n count += 1\n domain.spf_ips = ips\n domain.spf_count_ips = count\n except Exception as error:\n handle_error(\"[SPF IPs]\", domain, error)\n return", "def secondary_private_ip_address_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_private_ip_address_count\")", "def get_count_rem_local_ips(self):\n return len(self.remaining_local_ips)", "def part_two(rucksacks: list) -> int:\n summ = 0\n for i in range(0, len(rucksacks), 3):\n first_group = set(rucksacks[i])\n second_group = set(rucksacks[i + 1])\n third_group = set(rucksacks[i + 2])\n badge = first_group.intersection(second_group).intersection(third_group)\n badge = list(badge)[0] # extract item id from set\n summ += PRIORITY.get(badge, 0)\n return summ", "def get_num_of_baskets(self):\n return self.num_of_baskets", "def translate_ip_counts():\n # %%\n # red = Redis(\"localhost\")\n\n keys = red.keys( '/ip/*' )\n\n for k in keys:\n v = red.get(k)\n ip = k.split( '/')[2]\n red.hset( '/count_by_ip', ip, v )\n # %%", "def Count():\n return CheckForError(lib.LineSpacings_Get_Count())", "def get_total_shareholders() -> int:\n return len(balance_of)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logical disjunction of two ranges. Contains items present in either. However if the two ranges are disjunct (no common items), `None` is returned. >>> Range(10, 20) | Range(1, 3) >>> Range(10, 20) | Range(1, 9) Range(1, 20) >>> Range(10, 20) | Range(1, 10) Range(1, 20) >>> Range(10, 20) | Range(1, 14) Range(1, 20) >>> Range(10, 20) | Range(1, 20) Range(1, 20) >>> Range(10, 20) | Range(1, 25) Range(1, 25) >>> Range(10, 20) | Range(9, 9) Range(9, 20) >>> Range(10, 20) | Range(9, 15) Range(9, 20) >>> Range(10, 20) | Range(9, 22) Range(9, 22) >>> Range(10, 20) | Range(10, 10) Range(10, 20) >>> Range(10, 20) | Range(10, 20) Range(10, 20) >>> Range(10, 20) | Range(10, 22) Range(10, 22) >>> Range(10, 20) | Range(14, 17) Range(10, 20) >>> Range(10, 20) | Range(14, 20) Range(10, 20) >>> Range(10, 20) | Range(14, 23) Range(10, 23) >>> Range(10, 20) | Range(20, 21) Range(10, 21) >>> Range(10, 20) | Range(21, 21) Range(10, 21) >>> Range(10, 20) | Range(21, 24) Range(10, 24) >>> Range(10, 20) | Range(22, 22) >>> Range(10, 20) | Range(22, 24)
def __or__(self, other): if not isinstance(other, Range): raise TypeError( f"unsupported operand types for |: " f"{type(self).__name__!r} and {type(other).__name__!r}" ) if self == other: return Range(self.vmin, self.vmax) elif self.vmax < other.vmin - 1: return None elif self.vmin > other.vmax + 1: return None return Range( vmin=min(self.vmin, other.vmin), vmax=max(self.vmax, other.vmax) )
[ "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def isdisjoint(self, other: Union[Rangelike, Iterable[Rangelike]]) -> bool:\n # convert to RangeSet\n other = RangeSet._to_rangeset(other)\n # O(n^2) comparison\n # TODO improve efficiency by mergesort/short-circuiting\n return all(rng1.isdisjoint(rng2) for rng1 in self._ranges for rng2 in other._ranges)", "def union(self, other):\n\n # Consider empty ranges\n if not self:\n return other\n elif not other:\n return self\n elif not self.overlap(other) and not self.adjacent(other):\n raise ValueError(\"Ranges must be either adjacent or overlapping\")\n\n if self.lower == other.lower:\n lower = self.lower\n lower_inc = self.lower_inc or other.lower_inc\n elif self.lower < other.lower:\n lower = self.lower\n lower_inc = self.lower_inc\n else:\n lower = other.lower\n lower_inc = other.lower_inc\n\n if self.upper == other.upper:\n upper = self.upper\n upper_inc = self.upper_inc or other.upper_inc\n elif self.upper < other.upper:\n upper = other.upper\n upper_inc = other.upper_inc\n else:\n upper = self.upper\n upper_inc = self.upper_inc\n\n return self.__class__(lower, upper, lower_inc, upper_inc)", "def overlaps(self, other):\n this = set(range(self.start, self.end + 1))\n that = set(range(other.start, other.end + 1))\n return not this.isdisjoint(that)", "def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))", "def _resolveRangeOverlap(self, rng1, rng2):\r\n begin1,end1,step1 = rng1\r\n begin2,end2,step2 = rng2\r\n \r\n # No overlap? Then don't modify anything\r\n if end1 is not None and begin2>end1:\r\n return None\r\n \r\n # The ranges overlap...\r\n \r\n # Remove all initial values from rng2 that are also part of rng1.\r\n # First check if begin2 is part of rng1\r\n if (begin2-begin1)%step1==0:\r\n # Does rng2 use a step size that is a multiple of the step size of rng1?\r\n if step2%step1==0: #step1==step2:\r\n # Does rng2 completely lie within rng1? Then just ignore rng2\r\n if end1 is None or (end2 is not None and end2<=end1):\r\n return [rng1]\r\n else:\r\n # Set the begin of rng2 to the first value behind the end of rng1\r\n n = ((end1-begin2)//step2)+1\r\n begin2 += n*step2\r\n # Different steps, so only the first value is identical\r\n else:\r\n # Is rng2 just one single value? Then we can ignore rng2\r\n # (because this value is also part of rng1)\r\n if begin2==end2:\r\n return [rng1]\r\n else:\r\n begin2 += step2\r\n\r\n # If the ranges don't overlap anymore, then we are done.\r\n if end1 is not None and begin2>end1:\r\n return [rng1,(begin2,end2,step2)]\r\n\r\n \r\n # At this point, it is guaranteed that...\r\n # - ...rng1 and rng2 don't begin with the same value (i.e. begin1<begin2 is always true)\r\n # - ...begin2 is not part of rng1\r\n # - ...rng1 and the adjusted rng2 still overlap\r\n \r\n res = []\r\n # Split off the first part of rng1 (everything that is before rng2)\r\n # -> adjust rng1 so that it only contains the remaining range\r\n n = (begin2-begin1-1)//step1\r\n e1 = begin1+n*step1\r\n res.append((begin1,e1,step1))\r\n begin1 = e1+step1\r\n\r\n # begin1 is now greater than begin2 (they can't be equal because we know\r\n # that begin2 is not part of the initial rng1)\r\n\r\n res.append((begin2,end2,step2))\r\n res.append((begin1,end1,step1))\r\n \r\n # res now contains 3 ranges. The first one is guaranteed to be unique\r\n # and doesn't overlap anymore. The other two may still overlap but\r\n # this is dealt with in a subsequent iteration.\r\n \r\n return res", "def overlapping_ranges(\n ranges_1: Sequence[Tuple[int, int]],\n ranges_2: Sequence[Tuple[int, int]],\n) -> List[Tuple[int, int]]:\n return [\n (max(first[0], second[0]), min(first[1], second[1]))\n for first in ranges_1\n for second in ranges_2\n if max(first[0], second[0]) < min(first[1], second[1])\n ]", "def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False", "def is_overlapped(self, another: 'Range') -> bool:\n a, b = self, another\n if a.start > b.start:\n a, b = b, a\n\n return a.end > b.start", "def overlap(start1, end1, start2, end2):\r\n return end1 >= start2 and end2 >= start1", "def overlaps(self, other):\n if not self.is_numeric_range() == other.is_numeric_range():\n # Comparing numbers and strings may give unexpected behavior. Conceptually\n # they *don't* overlap, but in Python they do; better to leave it to the user.\n raise TypeError(\"Only DataRanges of the same type (numeric or lexicographic)\"\n \" can be tested for overlap.\")\n # We standardize our logic by figuring out which is the \"lesser\" (leftmost on numberline)\n if not self.min_is_finite() or (other.min_is_finite() and self.min < other.min):\n lesser, greater = self, other\n else:\n lesser, greater = other, self\n return (lesser.max > greater.min or\n (lesser.max == greater.min and (lesser.max_inclusive or greater.min_inclusive)))", "def overlap(self,qrange, retranges=False):\n \"\"\" ERROR: after a couple hours work, I realized this isn't what\n we want.\"\"\"\n totaloverlap = 0\n if retranges:\n ranges = []\n for r in self.rangelist:\n overlap = r.overlap(qrange)\n totaloverlap += overlap\n if overlap > 0 and retranges:\n ranges.append(r)\n if retranges:\n return totaloverlap, ranges\n else:\n return totaloverlap", "def overlap(start1, end1, start2, end2):\n return (end1 >= start2) and (end2 >= start1)", "def _compare_ranges(spec1, spec2, drop_prerelease1=False, drop_prerelease2=False): # noqa\n # Test for overlapping semver ranges.\n r1 = Range(spec1, True)\n r2 = Range(spec2, True)\n\n # If either range is empty, we cannot verify.\n if not r1.range or not r2.range:\n return\n\n # Set return_value to a sentinel value\n return_value = False\n\n # r1.set may be a list of ranges if the range involved an ||, so we need to test for overlaps between each pair.\n for r1set, r2set in itertools.product(r1.set, r2.set):\n x1 = r1set[0].semver\n x2 = r1set[-1].semver\n y1 = r2set[0].semver\n y2 = r2set[-1].semver\n\n if x1.prerelease and drop_prerelease1:\n x1 = x1.inc(\"patch\")\n\n if y1.prerelease and drop_prerelease2:\n y1 = y1.inc(\"patch\")\n\n o1 = r1set[0].operator\n o2 = r2set[0].operator\n\n # We do not handle (<) specifiers.\n if o1.startswith(\"<\") or o2.startswith(\"<\"):\n continue\n\n # Handle single value specifiers.\n lx = lte if x1 == x2 else lt\n ly = lte if y1 == y2 else lt\n gx = gte if x1 == x2 else gt\n gy = gte if x1 == x2 else gt\n\n # Handle unbounded (>) specifiers.\n def noop(x, y, z):\n return True\n\n if x1 == x2 and o1.startswith(\">\"):\n lx = noop\n if y1 == y2 and o2.startswith(\">\"):\n ly = noop\n\n # Check for overlap.\n if (\n gte(x1, y1, True)\n and ly(x1, y2, True)\n or gy(x2, y1, True)\n and ly(x2, y2, True)\n or gte(y1, x1, True)\n and lx(y1, x2, True)\n or gx(y2, x1, True)\n and lx(y2, x2, True)\n ):\n # if we ever find an overlap, we can return immediately\n return 0\n\n if gte(y1, x2, True):\n if return_value is False:\n # We can possibly return 1\n return_value = 1\n elif return_value == -1:\n # conflicting information, so we must return None\n return_value = None\n continue\n\n if gte(x1, y2, True):\n if return_value is False:\n return_value = -1\n elif return_value == 1:\n # conflicting information, so we must return None\n return_value = None\n continue\n\n msg = \"Unexpected case comparing version ranges\"\n raise AssertionError(msg)\n\n if return_value is False:\n return_value = None\n return return_value", "def select_overlap(self, other):\n a1, b1 = self.select_start(), self.select_end()\n a2, b2 = other.select_start(), other.select_end()\n return (a1 < a2 < b1) or (a1 < b2 < b1)", "def is_conflicting(range_start, range_end, event_start, event_end):\n return (range_start <= event_start < range_end or\n range_start < event_end <= range_end or\n (event_start < range_start and event_end > range_end))", "def range_union(ranges):\n union = []\n for r in sorted(ranges, key=lambda r: r.start):\n if len(union) > 0 and union[-1].stop >= r.start:\n union[-1] = range(union[-1].start, max(union[-1].stop, r.stop))\n else:\n union.append(r)\n return union", "def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the donor_participant of this AllOfCreateClaimResponseClaim.
def donor_participant(self) -> Object: return self._donor_participant
[ "def donor_participant(self, donor_participant: Object):\n\n self._donor_participant = donor_participant", "def participant(self) -> AllOfCancelClaimRequestParticipant:\n return self._participant", "def participant(self) -> AllOfAcknowledgeClaimRequestParticipant:\n return self._participant", "def participant(self) -> AllOfCreateCidSetFileRequestParticipant:\n return self._participant", "def participant(self):\n return self._participant", "def project_participant(self):\n return self._project_participant", "def get_donor(self, name):\n return self.donors.get(name, Donor(name))", "def find_donor(self, existing_donor):\n donor_object = None\n for donor in self.donor_list:\n if existing_donor == donor._full_name:\n donor_object = donor\n break\n return donor_object", "def get_thank_you(self, donor):\r\n donor_dict = {'name': donor.name, 'donation': donor.donations[-1],\r\n 'num_donations': len(donor.donations)}\r\n donor_dict['multiple'] = 's' if len(donor.donations) > 1 else ''\r\n\r\n thankyou = ('Dear {name}:\\n'\r\n 'Thank you for your generous donation of '\r\n '${donation:.2f}.\\nI really appreciate your '\r\n '{num_donations}\\ndonation{multiple} to our '\r\n 'organization.\\nI assure you that your contributions '\r\n 'will be put to\\ngood use!\\n\\n'\r\n 'Regards,\\nBen').format(**donor_dict)\r\n return thankyou", "def participant_id(self):\n return self.data[\"id\"]", "def donor_count(self):\n return self.donor_set.count()", "def get_murderer (self):\n return self.get_suspect(self.murderer)", "def claim_id(self) -> str:\n return self._claim_id", "def get_participant_for_user(self, user):\n participant = None\n\n if user.is_anonymous():\n return None\n\n if self.participation_mode == ContestParticipationMode.Team:\n participant = self.participants.filter(teamparticipant__team__members=user).first()\n if self.participation_mode == ContestParticipationMode.Individual:\n participant = self.participants.filter(individualparticipant__user=user).first()\n\n return participant", "def participante(self) -> Participante:\n return self._participante", "def get_patron_id(self):\r\n return self._patron_id", "def get_donor_email(self):\n input_name = self.get_donor()\n if input_name in self.all_donors:\n print(self.r.hget(input_name, 'email'))", "def generate_thank_you_note(self):\n donor_name = self.name\n donation = self.donations[len(self.donations)-1]\n txt = f'Dear {donor_name},\\n Thank you for your generous donation of ${donation}'\n return txt", "def participant(self, participant: AllOfCancelClaimRequestParticipant):\n if participant is None:\n raise ValueError(\"Invalid value for `participant`, must not be `None`\") # noqa: E501\n\n self._participant = participant" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the donor_participant of this AllOfCreateClaimResponseClaim.
def donor_participant(self, donor_participant: Object): self._donor_participant = donor_participant
[ "def participant(self, participant: AllOfCancelClaimRequestParticipant):\n if participant is None:\n raise ValueError(\"Invalid value for `participant`, must not be `None`\") # noqa: E501\n\n self._participant = participant", "def donor_participant(self) -> Object:\n return self._donor_participant", "def participant(self, participant: AllOfCreateCidSetFileRequestParticipant):\n if participant is None:\n raise ValueError(\"Invalid value for `participant`, must not be `None`\") # noqa: E501\n\n self._participant = participant", "def project_participant(self, project_participant):\n\n self._project_participant = project_participant", "def participant(self, participant: AllOfAcknowledgeClaimRequestParticipant):\n if participant is None:\n raise ValueError(\"Invalid value for `participant`, must not be `None`\") # noqa: E501\n\n self._participant = participant", "def participant(self, participant):\n\n self._participant = participant", "def owner_participant_uuid(self, owner_participant_uuid):\n\n self._owner_participant_uuid = owner_participant_uuid", "def conversation_participant_arn(self, conversation_participant_arn):\n\n self._conversation_participant_arn = conversation_participant_arn", "def add_donor(self, donor):\n if donor in self:\n self._donors[donor.key] += donor\n else:\n self._donors[donor.key] = donor", "def add_donor(self, donor):\n self.donor_list.append(donor)\n for donation in donor.donations:\n self.donation_log.append(donation)", "def add(self, donor):\n self.donors[donor.name] = donor", "def conversation_participant_uuid(self, conversation_participant_uuid):\n\n self._conversation_participant_uuid = conversation_participant_uuid", "def payer_principal(self, payer_principal):\n\n self._payer_principal = payer_principal", "def generate_donor_files(self, dir_path, donor=None):\n donors = self if donor is None else (donor, )\n for donor in donors:\n with open(dir_path / donor.file_name, 'w') as f:\n f.write(donor.get_thank_you_text())", "def add_donor_object(self, donor_object):\n self.donors.append(donor_object)", "def participant(self) -> AllOfCancelClaimRequestParticipant:\n return self._participant", "def set_known_creator(self, target_item, creator_Q, reference):\n creator_item = self.wd.QtoItemPage(creator_Q)\n self.wd.addNewClaim(\n u'P170',\n WD.Statement(creator_item),\n target_item,\n reference)", "def participant(self) -> AllOfCreateCidSetFileRequestParticipant:\n return self._participant", "def contributor(self, contributor):\n\n self._contributor = contributor" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the resolution_period_end of this AllOfCreateClaimResponseClaim.
def resolution_period_end(self) -> datetime: return self._resolution_period_end
[ "def resolution_period_end(self, resolution_period_end: datetime):\n if resolution_period_end is None:\n raise ValueError(\"Invalid value for `resolution_period_end`, must not be `None`\") # noqa: E501\n\n self._resolution_period_end = resolution_period_end", "def period_end(self):\n return self._period_end", "def completion_period_end(self) -> datetime:\n return self._completion_period_end", "def period_end(fact):\n period = fact.period_aspect_value\n if period.period_type == xbrl.PeriodType.START_END:\n return period.end\n elif period.period_type == xbrl.PeriodType.INSTANT:\n return period.instant\n else:\n return datetime.datetime.max", "def expected_last_period_end(self):\n return self._expected_last_period_end", "def reporting_end_date(self):\n return self._reportingEndDate", "def end_date(self):\n if not self.intervals:\n return None\n return self.intervals[-1].end.date()", "def roa_validity_end_date(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"roa_validity_end_date\")", "def get_end_time(self):\n if self.invalid():\n return None\n interval = self.get_interval(ARBITRARY_DATE)\n return interval.end.time()", "def review_history_period_end_date_time(self) -> Optional[str]:\n return pulumi.get(self, \"review_history_period_end_date_time\")", "def maxend (self):\n\n maxend = self.list[0][\"period\"].end\n for actor in self.list:\n if maxend < actor[\"period\"].end:\n maxend = actor[\"period\"].end\n return maxend", "def completion_period_end(self, completion_period_end: datetime):\n\n self._completion_period_end = completion_period_end", "def end_time(self):\n return RPR.GetAudioAccessorEndTime(self.id)", "def end_date(self):\n return self._end_date", "def cal_end(self):\n return self.datetime_end", "def reporting_period_ends(instance,dei_namespace):\n\n reporting_period_end_for_legal_entity = {}\n\n dim_LegalEntityAxis = instance.dts.resolve_concept(xml.QName('LegalEntityAxis',dei_namespace))\n concept_DocumentPeriodEndDate = instance.dts.resolve_concept(xml.QName('DocumentPeriodEndDate',dei_namespace))\n for fact in instance.facts.filter(concept_DocumentPeriodEndDate):\n # Amendment: Use the period end date of the context and not the DocumentPeriodEndDate value! \n end_date = fact.period_aspect_value.end\n\n legal_entity = dimension_value(fact,dim_LegalEntityAxis)\n if legal_entity not in reporting_period_end_for_legal_entity or reporting_period_end_for_legal_entity[legal_entity][1] < end_date:\n reporting_period_end_for_legal_entity[legal_entity] = (fact,end_date)\n\n return reporting_period_end_for_legal_entity", "def scheduled_end_date_time(self):\n if \"scheduledEndDateTime\" in self._prop_dict:\n if isinstance(self._prop_dict[\"scheduledEndDateTime\"], OneDriveObjectBase):\n return self._prop_dict[\"scheduledEndDateTime\"]\n else :\n self._prop_dict[\"scheduledEndDateTime\"] = DateTimeTimeZone(self._prop_dict[\"scheduledEndDateTime\"])\n return self._prop_dict[\"scheduledEndDateTime\"]\n\n return None", "def end_time(self):\n return from_ts(self.end_time_ms)", "def get_end_date_long(self):\n return self.end_date_long" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the resolution_period_end of this AllOfCreateClaimResponseClaim.
def resolution_period_end(self, resolution_period_end: datetime): if resolution_period_end is None: raise ValueError("Invalid value for `resolution_period_end`, must not be `None`") # noqa: E501 self._resolution_period_end = resolution_period_end
[ "def completion_period_end(self, completion_period_end: datetime):\n\n self._completion_period_end = completion_period_end", "def period_end(self, period_end):\n\n self._period_end = period_end", "def resolution_period_end(self) -> datetime:\n return self._resolution_period_end", "def expected_last_period_end(self, expected_last_period_end):\n\n self._expected_last_period_end = expected_last_period_end", "def end_fact(self, end_fact):\n self._end_fact = end_fact", "def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date(self, end_date):\n self._end_date = end_date", "def period_end(fact):\n period = fact.period_aspect_value\n if period.period_type == xbrl.PeriodType.START_END:\n return period.end\n elif period.period_type == xbrl.PeriodType.INSTANT:\n return period.instant\n else:\n return datetime.datetime.max", "def quota_end_date(self, quota_end_date):\n\n self._quota_end_date = quota_end_date", "def evaluation_period(self, evaluation_period):\n\n self._evaluation_period = evaluation_period", "def end_ms(self, end_ms):\n if self._configuration.client_side_validation and end_ms is None:\n raise ValueError(\"Invalid value for `end_ms`, must not be `None`\") # noqa: E501\n\n self._end_ms = end_ms", "def set_end_date_long(self, end_date_long):\n self.end_date_long = end_date_long", "def end_ms(self, end_ms):\n\n self._end_ms = end_ms", "def reporting_period_ends(instance,dei_namespace):\n\n reporting_period_end_for_legal_entity = {}\n\n dim_LegalEntityAxis = instance.dts.resolve_concept(xml.QName('LegalEntityAxis',dei_namespace))\n concept_DocumentPeriodEndDate = instance.dts.resolve_concept(xml.QName('DocumentPeriodEndDate',dei_namespace))\n for fact in instance.facts.filter(concept_DocumentPeriodEndDate):\n # Amendment: Use the period end date of the context and not the DocumentPeriodEndDate value! \n end_date = fact.period_aspect_value.end\n\n legal_entity = dimension_value(fact,dim_LegalEntityAxis)\n if legal_entity not in reporting_period_end_for_legal_entity or reporting_period_end_for_legal_entity[legal_entity][1] < end_date:\n reporting_period_end_for_legal_entity[legal_entity] = (fact,end_date)\n\n return reporting_period_end_for_legal_entity", "def end_times(self, end_times):\n\n self._end_times = end_times", "def end_date(self, end_date):\n if end_date is None:\n end_date = datetime.utcnow()\n\n self._end_date = dt_utils.parse_date(end_date)", "def completion_period_end(self) -> datetime:\n return self._completion_period_end", "def end_time(self, end_time):\n\n self._end_time = end_time" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the completion_period_end of this AllOfCreateClaimResponseClaim.
def completion_period_end(self) -> datetime: return self._completion_period_end
[ "def completion_period_end(self, completion_period_end: datetime):\n\n self._completion_period_end = completion_period_end", "def period_end(self):\n return self._period_end", "def resolution_period_end(self) -> datetime:\n return self._resolution_period_end", "def expected_last_period_end(self):\n return self._expected_last_period_end", "def acquisition_end_time(self):\n return self._acquisition_end_time", "def end_time(self):\n return RPR.GetAudioAccessorEndTime(self.id)", "def get_end(self):\n return self.start + timedelta(minutes=self.duration)", "def end_time(self):\n return self._end_time", "def completed(self):\n if not self.completion_ts:\n return None\n return datetime.utcfromtimestamp(self.completion_ts)", "def get_end_time(self):\n if self.invalid():\n return None\n interval = self.get_interval(ARBITRARY_DATE)\n return interval.end.time()", "def completion_time(self) -> datetime:\n return self._completion_time", "def cal_end(self):\n return self.datetime_end", "def end_date(self):\n return self._end_date", "def end_date(self):\n if not self.intervals:\n return None\n return self.intervals[-1].end.date()", "def last(self) -> 'outputs.CommitmentPeriodResponse':\n return pulumi.get(self, \"last\")", "def end_date(self):\n return self.__end_date", "def end_date(self) -> date:\n return self._end_date", "def get_bundle_end_date(self):\n if not self._bundle_end_date:\n max_date = self.engine.execute(\n \"\"\"\n SELECT\n MAX(end_date)\n FROM (\n SELECT\n end_date\n FROM\n equities\n UNION\n SELECT\n end_date\n FROM\n futures_contracts\n )\n \"\"\"\n ).scalar()\n self._bundle_end_date = pd.Timestamp(max_date, tz=\"UTC\")\n\n return self._bundle_end_date", "def period_end(fact):\n period = fact.period_aspect_value\n if period.period_type == xbrl.PeriodType.START_END:\n return period.end\n elif period.period_type == xbrl.PeriodType.INSTANT:\n return period.instant\n else:\n return datetime.datetime.max" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the completion_period_end of this AllOfCreateClaimResponseClaim.
def completion_period_end(self, completion_period_end: datetime): self._completion_period_end = completion_period_end
[ "def period_end(self, period_end):\n\n self._period_end = period_end", "def completion_period_end(self) -> datetime:\n return self._completion_period_end", "def resolution_period_end(self, resolution_period_end: datetime):\n if resolution_period_end is None:\n raise ValueError(\"Invalid value for `resolution_period_end`, must not be `None`\") # noqa: E501\n\n self._resolution_period_end = resolution_period_end", "def expected_last_period_end(self, expected_last_period_end):\n\n self._expected_last_period_end = expected_last_period_end", "def end_date(self, end_date):\n\n self._end_date = end_date", "def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time", "def end_date(self, end_date):\n self._end_date = end_date", "def end_date(self, end_date):\n if end_date is None:\n end_date = datetime.utcnow()\n\n self._end_date = dt_utils.parse_date(end_date)", "def final_verification_complete(self, final_verification_complete):\n self._final_verification_complete = final_verification_complete", "def quota_end_date(self, quota_end_date):\n\n self._quota_end_date = quota_end_date", "def end_time(self, end_time):\n\n self._end_time = end_time", "def completion_time(self, completion_time: datetime):\n\n self._completion_time = completion_time", "def end_timestamp(self, end_timestamp):\n\n self._end_timestamp = end_timestamp", "def completion_time(self, completion_time):\n if completion_time is None:\n raise ValueError(\"Invalid value for `completion_time`, must not be `None`\")\n\n self._completion_time = completion_time", "def completion_timeout_in_minutes(self, completion_timeout_in_minutes):\n\n self._completion_timeout_in_minutes = completion_timeout_in_minutes", "def end_duration_secs_or_frames(self, end_duration_secs_or_frames):\n\n self._end_duration_secs_or_frames = end_duration_secs_or_frames", "def end_times(self, end_times):\n\n self._end_times = end_times", "def end_time_stamp(self, end_time_stamp):\n\n self._end_time_stamp = end_time_stamp", "def set_end_date_long(self, end_date_long):\n self.end_date_long = end_date_long" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the last_modified of this AllOfCreateClaimResponseClaim.
def last_modified(self) -> datetime: return self._last_modified
[ "def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")", "def last_modified_date_time(self):\n if \"lastModifiedDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastModifiedDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None", "def last_modified_by(self):\n if \"lastModifiedBy\" in self._prop_dict:\n if isinstance(self._prop_dict[\"lastModifiedBy\"], OneDriveObjectBase):\n return self._prop_dict[\"lastModifiedBy\"]\n else :\n self._prop_dict[\"lastModifiedBy\"] = IdentitySet(self._prop_dict[\"lastModifiedBy\"])\n return self._prop_dict[\"lastModifiedBy\"]\n\n return None", "def last_modified(self):\n last_changed_file = Session.query(sa.func.max(Entity.last_modified_date)).filter_by(project=self).first()[0]\n\n if last_changed_file:\n return max(self.last_modified_date, last_changed_file)\n \n return self.last_modified_date", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetLastModifiedDate', self.handle)", "def last_modified_time(self) -> float:\n return self._last_modified_time", "def get_last_modified(self, request, obj):\n if self.last_modified_field:\n return getattr(obj, self.last_modified_field)\n\n return None", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetLastModifiedDate', self.handle)", "def last_modify_time(self):\n return self._last_modify_time", "def last_modified(self):\n\n if not self._absuri:\n self._absuri = self._getcell('URI')\n\n if self._absuri is None:\n raise DataError('Cannot get file: does not exists')\n\n info = self._intf._get_head(self._absuri)\n return info['last-modified']", "def last_modified_by(self):\n return self._last_modified_by", "def date_modified(self):\n return self._date_modified", "def last_modified(self):\n uri = '%s/subjects?columns=last_modified' % self._uri\n\n return dict(JsonTable(self._intf._get_json(uri),\n order_by=['ID', 'last_modified']\n ).select(['ID', 'last_modified']\n ).items()\n )", "def claim_response_with_last_updated(self):\n self.run_task_by_parameters(\n base_path=\"/v2/fhir/ClaimResponse\",\n params={\"mbi\": next(self.pac_hashed_mbis)} | self.PAC_LAST_UPDATED,\n name=(\n \"/v2/fhir/claimResponse search by mbi hash /\"\n f\" {params_to_str(self.PAC_LAST_UPDATED)}\"\n ),\n )", "def date_modified(self) -> datetime:\n return self._date_modified", "def last_modify_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modify_time\")", "def last_modified_at(self):\n return self.viztrail.last_modified_at", "def last_modified_by(self) -> str:\n return pulumi.get(self, \"last_modified_by\")", "def get_last_modified() -> str:\n service = get_authenticated_service(\"drive\", \"v3\")\n response = (\n service.files().get(fileId=SPREADSHEET_ID, fields=\"modifiedTime\").execute()\n )\n return response[\"modifiedTime\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the last_modified of this AllOfCreateClaimResponseClaim.
def last_modified(self, last_modified: datetime): if last_modified is None: raise ValueError("Invalid value for `last_modified`, must not be `None`") # noqa: E501 self._last_modified = last_modified
[ "def last_modified(self, last_modified):\n\n self._last_modified = last_modified", "def last_modified(self, last_modified):\n self._last_modified = last_modified", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def last_modified_date(self, last_modified_date):\n\n self._last_modified_date = last_modified_date", "def last_modified_time(self, last_modified_time):\n\n self._last_modified_time = last_modified_time", "def set_last_modification_date(metadata: types.Metadata) -> None:\n metadata['meta_modify_date'] = str(datetime.datetime.utcnow())", "def last_modified_time(self, last_modified_time: float):\n\n self._last_modified_time = last_modified_time", "def claim_response_with_last_updated(self):\n self.run_task_by_parameters(\n base_path=\"/v2/fhir/ClaimResponse\",\n params={\"mbi\": next(self.pac_hashed_mbis)} | self.PAC_LAST_UPDATED,\n name=(\n \"/v2/fhir/claimResponse search by mbi hash /\"\n f\" {params_to_str(self.PAC_LAST_UPDATED)}\"\n ),\n )", "def last_modified_date_time(self, last_modified_date_time):\n\n self._last_modified_date_time = last_modified_date_time", "def setLastModified(when):", "def last_modified_on(self, last_modified_on):\n\n self._last_modified_on = last_modified_on", "def last_modified_user(self, last_modified_user):\n\n self._last_modified_user = last_modified_user", "def last_modified_dts(self, last_modified_dts):\n\n self._last_modified_dts = last_modified_dts", "def last_modification(self, last_modification):\n\n self._last_modification = last_modification", "def last_modified_by(self):\n if \"lastModifiedBy\" in self._prop_dict:\n if isinstance(self._prop_dict[\"lastModifiedBy\"], OneDriveObjectBase):\n return self._prop_dict[\"lastModifiedBy\"]\n else :\n self._prop_dict[\"lastModifiedBy\"] = IdentitySet(self._prop_dict[\"lastModifiedBy\"])\n return self._prop_dict[\"lastModifiedBy\"]\n\n return None", "def last_modification_date(self, last_modification_date):\n\n self._last_modification_date = last_modification_date", "def claim_response_with_service_date_and_last_updated(self):\n self.run_task_by_parameters(\n base_path=\"/v2/fhir/ClaimResponse\",\n params={\"mbi\": next(self.pac_hashed_mbis)} | self.PAC_SERVICE_DATE_LAST_UPDATED,\n name=(\n \"/v2/fhir/claimResponse search by mbi hash /\"\n f\" {params_to_str(self.PAC_SERVICE_DATE_LAST_UPDATED)}\"\n ),\n )", "def claim_with_last_updated(self):\n self.run_task_by_parameters(\n base_path=\"/v2/fhir/Claim\",\n params={\"mbi\": next(self.pac_hashed_mbis)} | self.PAC_LAST_UPDATED,\n name=f\"/v2/fhir/claim search by mbi hash / {params_to_str(self.PAC_LAST_UPDATED)}\",\n )", "def date_modified(self, date_modified):\n \n self._date_modified = date_modified" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes an annotation on file according to the darkflow format.
def write_as_frcnn(annotation: pd.DataFrame, path_to_annotations: str, image_id: str): annotation.to_csv(os.path.join(path_to_annotations, '..', 'annotations.txt'), header=None, index=None, mode='a', sep=',')
[ "def writeAnnotations(b, f, ld, n=0):\n \n if args.format.lower() == 'kitti':\n writeKitty(b, os.path.join(ld, \"%06d.txt\" % n))\n elif args.format.lower() == 'voc':\n writeVOC(b, ld, f)\n elif args.format.lower() == 'darknet':\n writeDarknet(b, os.path.join(ld, \"%06d.txt\" % n))", "def write_annotation(self, ann_file, img_path, new_img_name):\n if self.type == \"imagenet\":\n label = self.in_annotations[img_path]\n logger.debug(f\"Img {img_path}, imagenet label {label}\")\n ann_file.write(str(label) + \"\\n\")\n elif self.type == \"coco\":\n ann_file.write(\"detection_results {\\n\")\n for obj in self.in_annotations[img_path].keys():\n ann_file.write(\" objects {\\n\")\n ann_file.write(f\" class_id: {self.in_annotations[img_path][obj]['label']}\\n\")\n ann_file.write(\" bounding_box {\\n\")\n ann_file.write(f\" normalized_top: {self.in_annotations[img_path][obj]['normalized_bbox'][0]}\\n\")\n ann_file.write(f\" normalized_bottom: {self.in_annotations[img_path][obj]['normalized_bbox'][1]}\\n\")\n ann_file.write(f\" normalized_left: {self.in_annotations[img_path][obj]['normalized_bbox'][2]}\\n\")\n ann_file.write(f\" normalized_right: {self.in_annotations[img_path][obj]['normalized_bbox'][3]}\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(f' image_name: \"{new_img_name}\"\\n')\n ann_file.write(f' image_id: {int(new_img_name.split(\".\")[0])}\\n')\n ann_file.write(\"}\\n\")", "def write_annotations(self):\n self.annotation_file.parent.mkdir(exist_ok=True, parents=True)\n self.annotation_file.write_bytes(pickle.dumps((self.annotations, self._timepoint_annotations)))", "def _save_annotation(annotation, filename):\n\n pil_image = Image.fromarray(annotation.astype(dtype=np.uint8))\n '''\n with tf.io.gfile.GFile(filename, mode='w') as f:\n #with open(filename, mode='w') as f:\n print(f)\n pil_image.save(f, 'PNG')\n '''\n pil_image.save(filename)", "def dump_annotations(self):\n fname = 'annotations'\n if self.split is not None:\n fname = 'annotations_{}'.format(self.split)\n fname = os.path.join(self.dest_folder, '{}.json'.format(fname))\n self.save(self.dataset, fname, \"annotations\")", "def convert_to_annotation(file, output):\n resource = parse_bel_resource(file)\n\n write_annotation(\n keyword=resource['Namespace']['Keyword'],\n values={k: '' for k in resource['Values']},\n citation_name=resource['Citation']['NameString'],\n description=resource['Namespace']['DescriptionString'],\n file=output,\n )", "def write_annotations(self, file, seqrecords):\n\n file.write('!gaf-version: {}\\n'.format(GAF_VERSION))\n file.write('!Project Name: OMA Fast Function Projection\\n')\n file.write('!Date created: {}\\n'.format(time.strftime(\"%c\")))\n file.write('!Contact Email: contact@omabrowser.org\\n')\n for anno in self.iter_projected_goannotations(seqrecords):\n GOA.writerec(anno, file, GOA.GAF20FIELDS)", "def write_annotations(self, output_file):\n logging.info(self._header)\n np.savetxt(output_file, self._zeroes, header=\" \".join(self._header),fmt='%i',comments='')", "def write_annotations(annotations, filename):\n\n with open(filename, \"w\") as file:\n for img in annotations:\n line = img + \" \"\n\n for obj in annotations[img]:\n for bounding_box in annotations[img][obj]:\n for coord in bounding_box:\n line += str(int(coord)) + \",\"\n line += str(obj) + \" \"\n\n file.write(line + \"\\n\")\n\n print(\"Wrote annotations to {}\".format(filename))", "def convert_to_annotation(file, output):\n from pybel.resources.definitions import write_annotation\n\n resource = parse_bel_resource(file)\n\n write_annotation(\n keyword=resource['Namespace']['Keyword'],\n values={k: '' for k in resource['Values']},\n citation_name=resource['Citation']['NameString'],\n description=resource['Namespace']['DescriptionString'],\n file=output\n )", "def write_flow(flow, filename):\n f = open(filename, 'wb')\n magic = numpy.array([202021.25], dtype=numpy.float32)\n (height, width) = flow.shape[0:2]\n w = numpy.array([width], dtype=numpy.int32)\n h = numpy.array([height], dtype=numpy.int32)\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n flow.tofile(f)\n f.close()", "def write_annot(filepath, labels, ctab, names, fill_ctab=True):\n with open(filepath, \"wb\") as fobj:\n dt = _ANNOT_DT\n vnum = len(labels)\n\n def write(num, dtype=dt):\n np.array([num]).astype(dtype).tofile(fobj)\n\n def write_string(s):\n s = (s if isinstance(s, bytes) else s.encode()) + b'\\x00'\n write(len(s))\n write(s, dtype='|S%d' % len(s))\n\n # Generate annotation values for each ctab entry\n if fill_ctab:\n ctab = np.hstack((ctab[:, :4], _pack_rgb(ctab[:, :3])))\n elif not np.array_equal(ctab[:, [4]], _pack_rgb(ctab[:, :3])):\n warnings.warn('Annotation values in {} will be incorrect'.format(\n filepath))\n\n # vtxct\n write(vnum)\n\n # convert labels into coded CLUT values\n clut_labels = ctab[:, -1][labels]\n clut_labels[np.where(labels == -1)] = 0\n\n # vno, label\n data = np.vstack((np.array(range(vnum)),\n clut_labels)).T.astype(dt)\n data.tofile(fobj)\n\n # tag\n write(1)\n\n # ctabversion\n write(-2)\n\n # maxstruc\n write(np.max(labels) + 1)\n\n # File of LUT is unknown.\n write_string('NOFILE')\n\n # num_entries\n write(ctab.shape[0])\n\n for ind, (clu, name) in enumerate(zip(ctab, names)):\n write(ind)\n write_string(name)\n for val in clu[:-1]:\n write(val)", "def write_annotations_tsv_file(file_path: Path, annotations: List[\"Annotation\"],\n missing_ids: List[UniqueId]):\n logger.info(\"Saving annotations to TSV file: {}.\".format(file_path))\n Destination = traffic_annotation.TrafficSemantics.Destination\n CookiesAllowed = traffic_annotation.TrafficPolicy.CookiesAllowed\n\n lines = []\n title = \"Unique ID\\tLast Update\\tSender\\tDescription\\tTrigger\\tData\\t\" + \\\n \"Destination\\tCookies Allowed\\tCookies Store\\tSetting\\tChrome Policy\\t\" + \\\n \"Comments\\tSource File\"\n\n column_count = title.count(\"\\t\")\n for missing_id in missing_ids:\n lines.append(missing_id + \"\\t\" * column_count)\n\n for annotation in annotations:\n if annotation.type.value != \"definition\":\n continue\n\n # TODO(nicolaso): Use StringIO for faster concatenation.\n\n line = annotation.proto.unique_id\n # Placeholder for Last Update Date, will be updated in the scripts.\n line += \"\\t\"\n\n # Semantics.\n semantics = annotation.proto.semantics\n semantics_list = [\n semantics.sender,\n escape_for_tsv(semantics.description),\n escape_for_tsv(semantics.trigger),\n escape_for_tsv(semantics.data),\n ]\n\n for semantic_info in semantics_list:\n line += \"\\t{}\".format(semantic_info)\n\n destination_names = {\n Destination.WEBSITE: \"Website\",\n Destination.GOOGLE_OWNED_SERVICE: \"Google\",\n Destination.LOCAL: \"Local\",\n Destination.PROXIED_GOOGLE_OWNED_SERVICE: \"Proxied to Google\",\n Destination.OTHER: \"Other\",\n }\n if (semantics.destination == Destination.OTHER\n and semantics.destination_other):\n line += \"\\tOther: {}\".format(semantics.destination_other)\n elif semantics.destination in destination_names:\n line += \"\\t{}\".format(destination_names[semantics.destination])\n else:\n raise ValueError(\n \"Invalid value for the semantics.destination field: {}\".format(\n semantics.destination))\n\n # Policy.\n policy = annotation.proto.policy\n if annotation.proto.policy.cookies_allowed == CookiesAllowed.YES:\n line += \"\\tYes\"\n else:\n line += \"\\tNo\"\n\n line += \"\\t{}\".format(escape_for_tsv(policy.cookies_store))\n line += \"\\t{}\".format(escape_for_tsv(policy.setting))\n\n # Chrome policies.\n if annotation.has_policy():\n policies_text = policy_to_text(\n chain(policy.chrome_policy, policy.chrome_device_policy))\n else:\n policies_text = policy.policy_exception_justification\n line += \"\\t{}\".format(escape_for_tsv(policies_text))\n\n # Comments.\n line += \"\\t{}\".format(escape_for_tsv(annotation.proto.comments))\n # Source.\n source = annotation.proto.source\n code_search_link = \"https://cs.chromium.org/chromium/src/\"\n line += \"\\t{}{}?l={}\".format(code_search_link, source.file, source.line)\n lines.append(line)\n\n lines.sort()\n lines.insert(0, title)\n report = \"\\n\".join(lines) + \"\\n\"\n\n file_path.write_text(report, encoding=\"utf-8\")", "def save_annotated_image(self, file: Path) -> None:\n pass", "def dump_cvat_task_annotations(db_task, db_user, scheme, host, format_name=None):\n timestamp = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n\n if format_name is None:\n if db_task.mode == 'annotation':\n format_name = \"CVAT for images 1.1\"\n else:\n format_name = \"CVAT for video 1.1\"\n\n output_file_path = os.path.join(\n db_task.get_task_dirname(),\n '{}.{}.{}.zip'.format(db_task.id, db_user.username, timestamp)\n )\n\n cvat_export_task(\n task_id=db_task.id,\n dst_file=output_file_path,\n format_name=format_name,\n server_url=scheme + host,\n save_images=True,\n )\n return output_file_path", "def write_flo(flow, filename):\n flow = flow[0, :, :, :]\n f = open(filename, 'wb')\n magic = np.array([202021.25], dtype=np.float32)\n height, width = flow.shape[:2]\n magic.tofile(f)\n np.int32(width).tofile(f)\n np.int32(height).tofile(f)\n data = np.float32(flow).flatten()\n data.tofile(f)\n f.close()", "def writeAnnotationsToFile(self, annotatedRegions):\n\n\t\t#The writeToCsv does not seem to work somehow, what if we do this by hand? Can we then write to file?\n\t\twriteToCsvManual(sys.argv[2], annotatedRegions)\n\t\t\t\n\t\t#write the merged dictionary to csv, the order of the annotations and regions should column-wise be the same. \n\t\t#writeToCsv('test.csv', annotatedRegions, False)\t", "def _export_annotations_as_dicom(self):\n self.overwrite_annotations()\n self.save_dicom(os.path.join(os.path.dirname(self.dicomdir_path), self.ANNOTATED_DICOM_DIRECTORY))", "def save_annotations(self):\n r = requests.get(\n f'{self.api_host}/v1/entity-annotations?'\n f'annotation_type=Source reliability (binary)&size=100',\n headers=self.get_request_headers()\n )\n\n entity_annotations = r.json().get('entity_annotations')\n\n for annotation in entity_annotations:\n annotation_id = annotation.get('entity_id')\n with open(\n f'{self.data_folder}/annotations/{annotation_id}.json',\n 'w'\n ) as f:\n json.dump(annotation, f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an offset, plaintext, and oracle, forges a block with the proper padding.
def forge_block(offset, plaintext, oracle): b_size, _, _ = challenge_12.determine_block_stats(oracle) new_padding = b"A" * (b_size - offset) payload = new_padding + challenge_09.pkcs7(plaintext, b_size) ciphertext = oracle(payload) return challenge_07.as_blocks(ciphertext, b_size)[1]
[ "def forge_padding_block(oracle):\n b_size, pt_size, padding = challenge_12.determine_block_stats(oracle)\n new_padding = b\"A\" * padding\n\n return challenge_07.as_blocks(oracle(new_padding), b_size)[-1]", "def pad(plain_text):\n number_of_bytes_to_pad = block_size - len(plain_text) % block_size\n ascii_string = chr(number_of_bytes_to_pad)\n padding_str = number_of_bytes_to_pad * ascii_string\n padded_plain_text = plain_text + padding_str\n return padded_plain_text", "def find_block_length(encryption_oracle):\n my_text = ''\n ciphertext = encryption_oracle(my_text)\n initial_len = len(ciphertext)\n new_len = initial_len\n\n while new_len == initial_len:\n my_text += 'A'\n ciphertext = encryption_oracle(my_text)\n new_len = len(ciphertext)\n\n return new_len - initial_len", "def break_aes_using_padding_leak(cipher, init_vector, has_valid_padding):\n block_size = 16\n mutate = lambda text, i, c: text[:i] + c + text[i+1:]\n # get padding size\n pad_size = 0\n # second_last block index\n sl_block = len(cipher) - block_size*2\n for i in range(block_size):\n # check if pad_size is block_size - i\n change = 'b' if cipher[sl_block+i] == 'a' else 'a'\n if not has_valid_padding(\n mutate(cipher, sl_block+i, change), init_vector):\n pad_size = block_size - i\n break\n\n # we know pad size which means we know last pad_size bytes of result.\n prexor = FrequencyAnalyzer.get_repeating_xor(\n chr(pad_size)*pad_size, cipher[-pad_size-block_size:-block_size])\n iv_and_cipher = init_vector + cipher\n for i in range(len(prexor), len(cipher)):\n pad_size = (len(prexor) % block_size) + 1\n # decrypt byte at target_index in this iteration.\n target_index = len(cipher) - len(prexor) - 1\n for char in range(256):\n # temper iv_and_cipher\n attack = mutate(iv_and_cipher, target_index, chr(char))\n xor = FrequencyAnalyzer.get_repeating_xor(\n chr(pad_size)*(pad_size-1), prexor[:pad_size-1])\n attack = attack[:target_index+1] + xor\n # add next block\n attack = (attack +\n iv_and_cipher[len(attack):len(attack)+block_size])\n flipped_iv = attack[:block_size]\n flipped_cipher = attack[block_size:]\n if has_valid_padding(flipped_cipher, flipped_iv):\n prexor = chr(pad_size^char) + prexor\n break\n blocks = zip(\n Crypto.get_blocks(iv_and_cipher), Crypto.get_blocks(prexor))\n return Crypto.unpad_pkcs7(''.join(\n [FrequencyAnalyzer.get_repeating_xor(a, b) for a, b in blocks]))", "def _oracle_says_padding_correct(self, ciphertext: bytes) -> bool:\n raise NotImplementedError", "def ecb_cut_and_paste(encryption_oracle):\n\n # The first plaintext that will be encrypted is:\n # block 1: block 2 (pkcs7 padded): and (omitting the padding):\n # email=xxxxxxxxxx admin\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b &uid=10&role=user\n prefix_len = AES.block_size - len(\"email=\")\n suffix_len = AES.block_size - len(\"admin\")\n email1 = 'x' * prefix_len + \"admin\" + (chr(suffix_len) * suffix_len)\n encrypted1 = encryption_oracle.encrypt(email1)\n\n # The second plaintext that will be encrypted is:\n # block 1: block 2: block 3\n # email=master@me. com&uid=10&role= user\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\n email2 = \"master@me.com\"\n encrypted2 = encryption_oracle.encrypt(email2)\n\n # The forced ciphertext will cut and paste the previous ciphertexts to be decrypted as:\n # block 1: block 2: block 3:\n # email=master@me. com&uid=10&role= admin\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\n forced = encrypted2[:32] + encrypted1[16:32]\n\n return forced", "def pad_plain_txt_raw(self):\n l, q = len(self.plain_txt_raw_orgnl), self.taille_bloc\n self.nb_oct_pad = (((l // q) + 2) * q) - l\n ol = [0] * self.nb_oct_pad\n self.plain_txt_raw = self.plain_txt_raw_orgnl + ol\n f = self.fission_octet_fus(self.nb_oct_pad, 1, self.taille_bloc)\n self.plain_txt_raw[-self.taille_bloc:] = f", "def _PadText(self, text):\r\n if len(text) in (16, 24, 32):\r\n return text\r\n return text + (SecretsManager.BLOCK_SIZE -\r\n len(text) % SecretsManager.BLOCK_SIZE) * SecretsManager.PADDING", "def pad_to_block_size(self, text, block_size, bZeroPad):\n text_length = len(text)\n amount_to_pad = block_size - (text_length % block_size)\n if (amount_to_pad == block_size):\n if (bZeroPad == 0):\n amount_to_pad = 0\n for i in range(0, amount_to_pad, 1):\n text += bytes(chr(amount_to_pad), 'ascii')\n return text", "def __init__(self, algo, mode=blockalgo.MODE_ECB):\n self.mode = mode\n self.algo = algo\n block_size = self.algo.block_size\n self.pad = lambda s: s + (block_size - len(s) % block_size) * \\\n chr(block_size - len(s) % block_size)\n self.unpad = lambda s: s[:-ord(s[len(s) - 1:])]", "def add_padding(self, text):\n\n for word in text.split(' '):\n # 5 character blocks added straight\n if len(word) == 5:\n self.output += word + ' '\n # calling the helper method to fill the blocks\n elif len(word) < 5:\n self._helper(word)\n # split the block up into 5 letter chunks\n elif len(word) > 5:\n block = ''\n for letter in word:\n block += letter\n if len(block) == 5:\n # append the chunk to output\n self.output += block + ' '\n block = ''\n self._helper(block)\n\n return self.output.upper()", "def ab_pad(string, block_size=16, random_generator=random_util.sort_of_random_bytes):\n bytes_to_pad = block_size - (len(string) % block_size)\n padding = random_generator(bytes_to_pad)\n return padding + string", "def padding_found(original_block, modified_block):\n global current_padding\n global plaintext_string\n global intermediate_string\n index = BLOCK_SIZE - ord(current_padding)\n\n intermediate_state = ord(modified_block[index]) ^ ord(current_padding)\n plaintext_value = intermediate_state ^ ord(original_block[index])\n\n intermediate_string = chr(intermediate_state) + intermediate_string\n plaintext_string = chr(plaintext_value) + plaintext_string\n # Used when the first actual padding byte has been found to skip the rest of them\n if len(plaintext_string) == 1:\n skip_plaintext_padding(plaintext_value)\n return plaintext_value\n\n old_padding = current_padding\n current_padding = chr((ord(current_padding) % BLOCK_SIZE) + 1)\n\n new_padding_chars = \"\"\n i = (BLOCK_SIZE - ARRAY_INDEX)\n while i >= index:\n intermediate_state = ord(modified_block[i]) ^ ord(old_padding)\n updated_padding = chr(ord(current_padding) ^ intermediate_state)\n new_padding_chars = updated_padding + new_padding_chars\n i -= 1\n return new_padding_chars", "def _pad(payload):\n\t\tlength = AES.block_size - (len(payload) % AES.block_size)\n\t\tif length == AES.block_size:\n\t\t\treturn payload #no padding required\n\t\tpadding = chr(length)*length\n\t\treturn payload + padding", "def skip_plaintext_padding(plaintext_padding):\n global intermediate_string\n global modified_blocks\n global plaintext_string\n global current_padding\n cur_block_index = (len(BLOCKS) - ARRAY_INDEX) - 1\n new_padding = plaintext_padding + 1\n new_padding_chars = chr(ord(intermediate_string) ^ new_padding)\n cur_char = (BLOCK_SIZE - ARRAY_INDEX) - ord(current_padding)\n while cur_char >= (BLOCK_SIZE - plaintext_padding):\n intermediate_state = chr(ord(BLOCKS[cur_block_index][cur_char]) ^ plaintext_padding)\n plaintext_string = chr(plaintext_padding) + plaintext_string\n intermediate_string = intermediate_state + intermediate_string\n new_padding_chars = chr(ord(intermediate_state) ^ new_padding) + new_padding_chars\n cur_char -= 1\n non_padding_char = BLOCK_SIZE - plaintext_padding\n modified_blocks[cur_block_index] = modified_blocks[cur_block_index][:non_padding_char] + new_padding_chars\n current_padding = chr(new_padding)\n return", "def _pad(data, pad_with=PADDING):\n return data + (BLOCK_SIZE - len(data) % BLOCK_SIZE) * PADDING", "def pad(octet_string, block_size=16):\n pad_length = block_size - len(octet_string)\n return octet_string + '\\x80' + '\\x00'*(pad_length-1)", "def pad_pkcs7(text, block_size=16):\n # Important: padding is added even if message length is multiple of\n # block size.\n pad_size = block_size - len(text) % block_size\n pad_char = chr(pad_size)\n return text + pad_char*pad_size", "def padding(self, text):\n add = 16 - (len(text) % 16)\n return text + ('\\0' * add)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an oracle, forges a block with all PKCS7 padding (which occurs when the length of a plaintext is an integer multiple of the block size)
def forge_padding_block(oracle): b_size, pt_size, padding = challenge_12.determine_block_stats(oracle) new_padding = b"A" * padding return challenge_07.as_blocks(oracle(new_padding), b_size)[-1]
[ "def forge_block(offset, plaintext, oracle):\n b_size, _, _ = challenge_12.determine_block_stats(oracle)\n new_padding = b\"A\" * (b_size - offset)\n payload = new_padding + challenge_09.pkcs7(plaintext, b_size)\n ciphertext = oracle(payload)\n\n return challenge_07.as_blocks(ciphertext, b_size)[1]", "def pad_pkcs7(text, block_size=16):\n # Important: padding is added even if message length is multiple of\n # block size.\n pad_size = block_size - len(text) % block_size\n pad_char = chr(pad_size)\n return text + pad_char*pad_size", "def find_block_length(encryption_oracle):\n my_text = ''\n ciphertext = encryption_oracle(my_text)\n initial_len = len(ciphertext)\n new_len = initial_len\n\n while new_len == initial_len:\n my_text += 'A'\n ciphertext = encryption_oracle(my_text)\n new_len = len(ciphertext)\n\n return new_len - initial_len", "def execute_pkcs7_padding(b, sz):\n\n # Just use utils function.\n return utils.pkcs7_pad(b, sz)", "def unpad_pkcs7(text, block_size=16):\n if len(text) == 0 or len(text) % block_size != 0:\n raise ValueError(\"Input text length is invalid %s\" % len(text))\n pad_size = ord(text[-1:])\n padding = chr(pad_size)*pad_size\n if padding != text[-pad_size:]:\n raise ValueError(\"Invalid Padding.\")\n return text[:-pad_size]", "def pkcs7_pad_bytes(input_bytes, block_size):\r\n return pad(input_bytes, block_size)", "def _oracle_says_padding_correct(self, ciphertext: bytes) -> bool:\n raise NotImplementedError", "def break_aes_using_padding_leak(cipher, init_vector, has_valid_padding):\n block_size = 16\n mutate = lambda text, i, c: text[:i] + c + text[i+1:]\n # get padding size\n pad_size = 0\n # second_last block index\n sl_block = len(cipher) - block_size*2\n for i in range(block_size):\n # check if pad_size is block_size - i\n change = 'b' if cipher[sl_block+i] == 'a' else 'a'\n if not has_valid_padding(\n mutate(cipher, sl_block+i, change), init_vector):\n pad_size = block_size - i\n break\n\n # we know pad size which means we know last pad_size bytes of result.\n prexor = FrequencyAnalyzer.get_repeating_xor(\n chr(pad_size)*pad_size, cipher[-pad_size-block_size:-block_size])\n iv_and_cipher = init_vector + cipher\n for i in range(len(prexor), len(cipher)):\n pad_size = (len(prexor) % block_size) + 1\n # decrypt byte at target_index in this iteration.\n target_index = len(cipher) - len(prexor) - 1\n for char in range(256):\n # temper iv_and_cipher\n attack = mutate(iv_and_cipher, target_index, chr(char))\n xor = FrequencyAnalyzer.get_repeating_xor(\n chr(pad_size)*(pad_size-1), prexor[:pad_size-1])\n attack = attack[:target_index+1] + xor\n # add next block\n attack = (attack +\n iv_and_cipher[len(attack):len(attack)+block_size])\n flipped_iv = attack[:block_size]\n flipped_cipher = attack[block_size:]\n if has_valid_padding(flipped_cipher, flipped_iv):\n prexor = chr(pad_size^char) + prexor\n break\n blocks = zip(\n Crypto.get_blocks(iv_and_cipher), Crypto.get_blocks(prexor))\n return Crypto.unpad_pkcs7(''.join(\n [FrequencyAnalyzer.get_repeating_xor(a, b) for a, b in blocks]))", "def pkcs7_pad(message, block_size):\n\n # If the length of the given message is already equal to the block size, there is no need to pad\n if len(message) == block_size:\n return message\n\n # Otherwise compute the padding byte and return the padded message\n ch = block_size - len(message) % block_size\n return message + bytes([ch] * ch)", "def pad(plain_text):\n number_of_bytes_to_pad = block_size - len(plain_text) % block_size\n ascii_string = chr(number_of_bytes_to_pad)\n padding_str = number_of_bytes_to_pad * ascii_string\n padded_plain_text = plain_text + padding_str\n return padded_plain_text", "def strip_pkcs7(plaintext, blocksize=16):\n lastblock = plaintext[-16:]\n end = lastblock[-1]\n if end > blocksize:\n raise ValueError(\"PKCS7 ERROR: Padding byte is larger than blocksize\")\n\n text, padding = lastblock[:-end], lastblock[-end:]\n padsize = blocksize - len(text)\n\n if len(padding) != padsize:\n raise ValueError(\"PKCS7 ERROR: Incorrect amount of bytes in padding\")\n\n for b in padding:\n if b != padsize:\n raise ValueError(\"PKCS7 ERROR: Padding byte is incorrect value\")\n\n return text", "def _pad_bytes(data):\r\n\tpadder = symmetric_padding.PKCS7(algorithms.AES.block_size).padder()\r\n\tpadded_data = padder.update(data)\r\n\tpadded_data += padder.finalize()\r\n\treturn padded_data", "def ecb_cut_and_paste(encryption_oracle):\n\n # The first plaintext that will be encrypted is:\n # block 1: block 2 (pkcs7 padded): and (omitting the padding):\n # email=xxxxxxxxxx admin\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b &uid=10&role=user\n prefix_len = AES.block_size - len(\"email=\")\n suffix_len = AES.block_size - len(\"admin\")\n email1 = 'x' * prefix_len + \"admin\" + (chr(suffix_len) * suffix_len)\n encrypted1 = encryption_oracle.encrypt(email1)\n\n # The second plaintext that will be encrypted is:\n # block 1: block 2: block 3\n # email=master@me. com&uid=10&role= user\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\\x0c\n email2 = \"master@me.com\"\n encrypted2 = encryption_oracle.encrypt(email2)\n\n # The forced ciphertext will cut and paste the previous ciphertexts to be decrypted as:\n # block 1: block 2: block 3:\n # email=master@me. com&uid=10&role= admin\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\\x0b\n forced = encrypted2[:32] + encrypted1[16:32]\n\n return forced", "def detection_oracle(cyphertext: bytes) -> AES.MODE_ECB | AES.MODE_CBC:\n for i in range(0, len(cyphertext), 16): # iterating over every block\n if cyphertext[i:(i+16)] in cyphertext[(i+16):]: # if the first block was ever repeated, likely ECB\n return AES.MODE_ECB\n return AES.MODE_CBC", "def _pad(payload):\n\t\tlength = AES.block_size - (len(payload) % AES.block_size)\n\t\tif length == AES.block_size:\n\t\t\treturn payload #no padding required\n\t\tpadding = chr(length)*length\n\t\treturn payload + padding", "def pad(octet_string, block_size=16):\n pad_length = block_size - len(octet_string)\n return octet_string + '\\x80' + '\\x00'*(pad_length-1)", "def test_pad(content):\n padded = Cryptographer._pad(content)\n assert len(padded) % AES.block_size == 0", "def find_padding(known, iter_len=1):\n pad = None\n starting_length = oracle(known)\n for i in range(32):\n test_pad = random_nonb64_string(i)\n padded_length = oracle(known + test_pad)\n if padded_length != starting_length:\n pad = test_pad[:-iter_len]\n break\n return pad", "def recover(n):\n recovered = b\"\"\n b_size = get_blocksize() \n rep = (n//b_size)+1\n for j in range(n):\n app = b\"A\"*((rep*b_size -(len(recovered)+1))) + recovered\n first = encrypt_oracle(b\"A\"*(rep*b_size - (len(recovered)+1)))[(rep-1)*b_size:rep*b_size]\n i = 0\n while i< 256:\n if first == encrypt_oracle(app + bytes(chr(i),'utf-8'))[(rep-1)*b_size:rep*b_size]:\n recovered += bytes(chr(i), 'utf-8')\n i = 256\n i+=1\n return recovered" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the file from fin and saves the file in wav format in fout
def convert_to_wav(fin, fout): temp = subprocess.run(["ffmpeg", "-i", fin, fout], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[ "def decode_to_wav(self, filename):\n pass", "def addWav(self, fp):\n assert os.path.exists(fp) == True\n self.wav = fp", "def convert_wav(src_wav, dst_wav, subtype='PCM_16'):\n assert os.path.exists(src_wav), \"{} not exists!\".format(src_wav)\n data, sr = soundfile.read(src_wav)\n soundfile.write(dst_wav, data, sr, subtype=subtype)", "def _convert_to_wav(self):\n cmd = ' '.join(['ffmpeg -i', self.music_file.name, \n '-y -acodec pcm_s16le -ac 1 -ar 44100', self.wave_file.name])\n self._seek_all()\n ret = call(cmd, shell=True)\n return ret", "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def save_audio_file(self):\n\n # has not recorded audio\n if not self.is_audio_record:\n print(\"***you did not set the record flag!\")\n return\n\n import soundfile\n\n # save audio\n soundfile.write('{}out_audio.wav'.format(self.mic_params['plot_path']), self.collector.x_all, self.feature_params['fs'], subtype=None, endian=None, format=None, closefd=True)", "def save(self):\r\n self.__ensure_dir__(self.dir)\r\n wavfile.write(os.path.join(self.dir, self.filename), self.fs, self.data)", "def write_wav(contin, filename):\n print filename\n if contin.domain_samples.dimensionality != U_.sec.dimensionality:\n raise NotImplementedError\n\n else:\n with open(filename, 'wb') as wav_file:\n sp.io.wavfile.write(\n wav_file,\n rate=contin.sample_rate.to(U_.Hz).magnitude,\n data=contin.values.magnitude)", "def open_wav_f(fiel):\n return wave.open(fiel, 'rb')", "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "def _init_wav_file(self):\n self.stringio = cStringIO.StringIO()\n self.wavefile = wave.open(self.stringio, 'wb')\n self.wavefile.setnchannels(self.channels)\n self.wavefile.setsampwidth(\n self.pyaudio.get_sample_size(pyaudio.paInt16))\n self.wavefile.setframerate(self.rate)", "def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def load_librosa (self):\r\n for index, data in tqdm(enumerate (self.file_list)):\r\n sound, sampling_rate = librosa.load(data, sr = self.target_sampling)\r\n librosa.output.write_wav(self.save_file_path + \"/\" + \"{:07d}\".format(index+1) + \".wav\", sound, self.target_sampling)", "def wavwrite(fname, Fs, xt):\n # convert to np.int16 data type\n xt = np.array((2**15-1)*xt, np.int16)\n sio_wav.write(fname, Fs, xt)", "def read_wav(fileName):\r\n samplingRate, data = wf.read(fileName)\r\n if data.dtype == 'int16':\r\n data = data/(2**15)\r\n if data.dtype == 'int32':\r\n data = data/(2**31)\r\n signal = SignalObj(data, 'time', samplingRate=samplingRate)\r\n return signal", "def mp3_to_wav(self, file):\n sound = AudioSegment.from_mp3(file)\n file = file[:-4]\n sound.export(file+\".wav\", format=\"wav\")", "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set the message data business_id to a specific value
def step_impl_the_ru_is_set_to(context, business_id): context.bdd_helper.message_data["business_id"] = business_id
[ "def business_id(self, business_id):\n self._business_id = business_id", "def business_id(self, business_id):\n\n self._business_id = business_id", "def business_phone(self, business_phone):\n\n self._business_phone = business_phone", "def business_key(self, business_key):\n\n self._business_key = business_key", "def business_phone_number(self, business_phone_number):\n\n self._business_phone_number = business_phone_number", "def business_id(self):\n return self._business_id", "def get_business_id(self):\n rest_dic = self.get_restaurant()\n business_id = rest_dic[\"business_id\"]\n return business_id", "def bus_ob_id(self, bus_ob_id):\n\n self._bus_ob_id = bus_ob_id", "def _set_id(self, value):\n pass", "def business_email(self, business_email):\n\n self._business_email = business_email", "def mailing_id(self, val: str):\n self._mailing_id = val", "def foreign_data_id(self, foreign_data_id):\n\n self._foreign_data_id = foreign_data_id", "def business_model(self, business_model):\n\n self._business_model = business_model", "def message_id(self, val: str):\n self._message_id = val", "def business_account(self, business_account):\n\n self._business_account = business_account", "def business_unit(self, business_unit):\n\n self._business_unit = business_unit", "def set_company_id_value(self, company_id_value):\n self.company_id_value = company_id_value", "def business_identification_no(self, business_identification_no):\n if business_identification_no is not None and len(business_identification_no) > 25:\n raise ValueError(\"Invalid value for `business_identification_no`, length must be less than or equal to `25`\")\n\n self._business_identification_no = business_identification_no", "def set(self, value):\n value = utils.data_factory(value)\n self._message.set_body_value(value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a submission from deviantArt. Ignores flash content. Uses a combination of the DA backend and HTML scraping.
def import_submission(self, submission: praw.objects.Submission) -> dict: try: if self.regex_direct.match(urlsplit(submission.url).netloc): r = requests.head(submission.url, headers=self.headers) mime_text = r.headers.get('Content-Type') mime = mimeparse.parse_mime_type(mime_text) if mime[0] == 'image': self.log.debug('DA link is a direct image') data = {'author': 'An unknown DA author', 'source': submission.url, 'import_urls': [submission.url], 'importer_display': {'header': 'Mirrored deviantArt image ' 'by an unknown author:\n\n'}} return data if not self.regex.match(urlsplit(submission.url).netloc): return None query_url = 'http://backend.deviantart.com/oembed?{}'.format( urlencode({'format': 'json', 'url': submission.url})) self.log.debug('%s is valid DA url.', submission.url) self.log.debug('Querying DA API %s', query_url) response = json.loads(self.read_url(query_url)) if response['type'] not in ('link', 'photo'): self.log.debug('Response is not link or photo') return None self.log.debug('Author name: %s', response['author_name']) # Using the official DA API data = {'author': response['author_name'], 'source': submission.url, 'importer_display': {'header': 'Mirrored deviantArt image by the author "{}":\n\n'.format( response['author_name'])}} if response['type'] == 'link': data['import_urls'] = [response['fullsize_url']] self.log.debug('Found DA API url %s', data['import_urls']) try: # Trying to scrape manually bs = BeautifulSoup(self.read_url(submission.url)) # Checking for flash animation, because mirroring a preview # for a flash animation is stupid is_flash = bool(bs.select('iframe[class~=flashtime]')) is_madefire = bool(bs.select('iframe[class~=madefire-player]')) if is_flash or is_madefire: self.log.info('DA url is flash, no preview needed.') return None # Seems to alternate between the two full_view = (bs.select('img[class~=fullview]') or bs.select('img[class~=dev-content-full]')) if full_view: full_url = full_view[0]['src'] self.log.debug('Found full DA image url: %s', full_url) data['import_urls'] = [full_url] except Exception as e: self.log.error(traceback.format_exc()) if 'import_urls' not in data: self.log.debug('No url found for DA image.') return None return data except Exception as e: self.log.error('Deviantart Error: %s', traceback.format_exc()) return None
[ "def scrape_submission(submission_url):\n\n\t'''\n\tScrape Data\n\t'''\n\n\t# Get submission dict\n\tsubmission_dict = reddit.extract_post_data(submission_url=submission_url)\n\n\t# Get list of comments_dicts\n\tsubmission_object = submission_dict.get('submission_object')\n\tcomments_dict = reddit.extract_post_comments_data(submission_object)\n\n\t'''\n\tExit if no comments were extracted from the submission\n\t'''\n\n\tif not len(comments_dict.get('data')) > 0:\n\t\tlogger.info('Data extraction yielded zero comments. Aborting sentiment analysis and database insertion.')\n\t\treturn\n\n\t'''\n\tAnalyze Sentiment\n\t'''\n\n\t# Call sentimentanalysis to analyze the comments and append the dicts\n\tSentimentAnalysis.list_parser(comments_dict)\n\n\t'''\n\tInsert to Database\n\t'''\n\n\t# Create instance of database_manager\n\tdatabase_manager = DatabaseManager()\n\n\t# Check if submission exists\n\tif database_manager.check_submission_exists(submission_dict):\n\t\t# Delete the submission and associated data if exists\n\t\tdatabase_manager.delete_submission(submission_dict)\n\n\t# Insert new submission info into database\n\tnew_submission = database_manager.insert_submission(submission_dict)\n\n\t# Insert comments if submission inserted successfully\n\tif new_submission is not None:\n\t\tdatabase_manager.insert_comments(comments_dict, new_submission)\n\t\tdatabase_manager.insert_sentiment(comments_dict)\n\n\t# Returns submission_id\n\treturn submission_dict.get('id')", "def submitData():\n\n # Get the action to be performed\n action = request.post_vars.action\n if action == \"vulnerability\":\n return import_vul_ui()\n elif action == \"vulnerability_part1\":\n return import_vul_csv_part1()\n elif action == \"vulnerability_part2\":\n return import_vul_csv_part2()\n elif action in (\"map\", \"image\", \"other\", \"vca\"):\n return import_document(action)\n elif action == \"demographics\":\n return import_demo_ui()\n elif action == \"demographics_part1\":\n return import_demo_csv_part1()\n elif action == \"demographics_part2\":\n return import_demo_csv_part2()", "def import_submission(self, submission: praw.objects.Submission) -> dict:\n try:\n if not self.regex.match(urlsplit(submission.url).netloc):\n return None\n data = {'author': 'a gyazo.com user',\n 'source': submission.url,\n 'importer_display':\n {'header': 'Imported gyazo.com image:\\n\\n'}}\n r = requests.head(submission.url, headers=self.headers)\n if r.status_code == 301:\n return None\n\n mime_text = r.headers.get('Content-Type')\n mime = mimeparse.parse_mime_type(mime_text)\n # If we're already given an image...\n if mime[0] == 'image':\n # Use the already given URL\n image_url = submission.url\n else:\n # Otherwise, use the gyazo oEmbed API.\n response = requests.get(\n 'https://api.gyazo.com/api/oembed/',\n {'url': submission.url},\n headers=self.headers).json()\n if response.get('type') == 'photo':\n image_url = response.get('url')\n else:\n # This is something that is not a photo. Do not scrape.\n return None\n\n assert image_url\n data['import_urls'] = [image_url]\n return data\n except Exception:\n self.log.error('Could not import gyazo URL %s (%s)',\n submission.url, traceback.format_exc())\n return None", "def submitData():\n\n # Get the action to be performed\n action = request.vars.action\n if action == \"vulnerability\":\n return import_vul_create()\n elif action == \"vulnerability_part1\":\n return import_vul_part1()\n elif action == \"vulnerability_part2\":\n return import_vul_part2()\n elif action == \"map\":\n return import_image(action)\n elif action == \"image\":\n return import_image(action)\n elif action == \"other\":\n return import_image(action)\n elif action == \"vca\":\n return import_image(action)\n elif action == \"demographics\":\n return import_demo_create()\n elif action == \"demographics_part1\":\n return import_demo_part1()\n elif action == \"demographics_part2\":\n return import_demo_part2()", "def send_submission(url, payload, pre_submission, row_id, survey_dict):\n response = post(url, data=payload, cookies=pre_submission.cookies)\n filename = '%s/submissions/%s--%s.html' % (\n survey_dict['name'], survey_dict['filename_prefix'], row_id)\n database.save_file(response.content, name=filename)\n tree = html.fromstring(response.content)\n success_response = get_success_response(tree)\n return success_response", "def submission():\n\n # @ToDo: Something better than this crude check\n if not auth.s3_logged_in():\n auth.permission.fail()\n\n from io import StringIO\n import cgi\n from lxml import etree\n\n source = request.post_vars.get(\"xml_submission_file\", None)\n if isinstance(source, cgi.FieldStorage):\n if source.filename:\n xmlinput = source.file\n else:\n xmlinput = source.value\n\n if isinstance(xmlinput, str):\n xmlinput = StringIO(xmlinput)\n elif request.env.request_method == \"HEAD\":\n raise HTTP(204)\n else:\n raise HTTP(400, \"Invalid Request: Expected an XForm\")\n\n tree = etree.parse(xmlinput)\n tablename = tree.getroot().tag\n\n resource = s3db.resource(tablename)\n\n stylesheet = os.path.join(request.folder, \"static\", \"formats\", \"odk\",\n \"import.xsl\")\n\n try:\n result = resource.import_xml(source=tree, stylesheet=stylesheet)\n except (IOError, SyntaxError):\n raise HTTP(500, \"Internal server error\")\n\n # Parse response\n status = json.loads(result)[\"statuscode\"]\n\n if status == \"200\":\n r = HTTP(201, \"Saved\") # ODK Collect only accepts 201\n r.headers[\"Location\"] = request.env.http_host\n raise r\n else:\n raise HTTP(status, result)", "def extract_src(session, file_name, submission_num):\n # Gets the HTML page for the submission page\n response = session.get(\"https://dmoj.ca/src/\" + submission_num + \"/raw\")\n with open(file_name, \"w\") as f:\n f.write(response.text)", "def post(self):\n if (not self.request.get(\"file\") or\n not self.request.get(\"name\") or\n not self.request.get(\"format\")):\n self.redirect(\"/import?msg=REQUIRED_FIELD\")\n return\n snapshot = model.Snapshot()\n snapshot.type = \"import\"\n snapshot.user = users.get_current_user()\n snapshot.status = \"building\"\n snapshot.put()\n\n logging.info(snapshot.key().id())\n\n try:\n taskqueue.add(url=\"/worker/import\",\n params={\"file\": self.request.get(\"file\"),\n \"name\": self.request.get(\"name\"),\n \"format\": self.request.get(\"format\"),\n \"id\": snapshot.key().id()})\n except taskqueue.TaskTooLargeError, e:\n logging.info(e, exc_info=True)\n self.redirect(\"/import?msg=FILE_TOO_LARGE\")\n return\n\n self.redirect(\"/import\")", "def import_submission_for_form(request, username, id_string, service):\n return PROVIDERS.get(service.lower(), {}) \\\n .get('imp_form', unknown_service)(request, username, None)", "def experimentImport(request):\n if request.method == 'POST':\n form = ImportForm(request.POST, request.FILES)\n if form.is_valid():\n json_data = request.FILES['import_file'].read()\n ExperimentAdmin.importFromJSON(request, json_data)\n return redirect('/admin/experiments/experiment')\n form = ImportForm()\n return render(request, 'admin/experiments/import_form.html', {'form': form})", "def import_entry(self, entry, feed_obj):\n from djangofeeds.models import BlacklistedDomain\n self.logger.debug(\"Importing entry %s...\" % feed_obj.feed_url)\n\n fields = self.post_fields_parsed(entry, feed_obj)\n \n # Extract link from summary instead of link field if pattern specified.\n if feed_obj.summary_detail_link_regex:\n if 'summary_detail' in entry:\n # Old Reddit RSS format.\n link_matches = re.findall(\n feed_obj.summary_detail_link_regex,\n entry['summary_detail']['value'],\n re.I|re.DOTALL)\n self.logger.debug('Summary detail links: '+str(link_matches))\n if link_matches:\n fields['link'] = link_matches[0].strip()\n elif 'summary' in entry:\n # New Reddit RSS format.\n link_matches = re.findall(\n feed_obj.summary_detail_link_regex,\n entry['summary'],\n re.I|re.DOTALL)\n self.logger.debug('Summary detail links v2: '+str(link_matches))\n if link_matches:\n fields['link'] = link_matches[0].strip()\n \n if conf.LINK_URL_REGEXES:\n for pattern in conf.LINK_URL_REGEXES:\n _matches = pattern.findall(fields['link'])\n if _matches:\n fields['link'] = _matches[0]\n \n # Check to see if domain has been blacklisted.\n if BlacklistedDomain.is_blacklisted(fields['link']):\n self.logger.info(\"Ignoring blacklisted URL: %s\" % (\n fields['link']))\n return\n \n post = self.post_model.objects.update_or_create(feed_obj, **fields)\n if not post:\n self.logger.debug(\"Unable to update or create post from entry: %s\" % (\n entry))\n return\n\n if not post.article_content and conf.GET_ARTICLE_CONTENT:\n if webarticle2text:\n self.logger.debug('Download article content from %s...' % post.link)\n try:\n post.retrieve_article_content()\n #except urllib2.HTTPError, e:\n except Exception, e:\n self.logger.error('Error: Unable to retrieve %s: %s' % (post.link, e))\n else:\n self.logger.warn('Unable to download article content. GET_ARTICLE_CONTENT = True but webarticle2text not installed.')\n\n if self.include_enclosures:\n post.enclosures.add(*(self.get_enclosures(entry) or []))\n if self.include_categories:\n post.categories.add(*(self.get_categories(entry) or []))\n\n self.logger.debug(\"ie: %s Post successfully imported...\" % (\n feed_obj.feed_url))\n\n return post", "def questions_import():\n if request.method == \"POST\":\n file = request.form['questions']\n for item in json.loads(file):\n try:\n models.Questions.add(questionid=item['Id'], userid=1000,\n question=item['Question'],\n answer=item['Answer'])\n except:\n models.Questions.add(questionid=item['id'], userid=1000,\n question=item['question'],\n answer=item['answer'])\n return redirect(url_for('index'))\n return render_template('questionimported.html')", "def import_submission(conn, submission):\n with conn.begin():\n mbids = []\n if submission['mbid']:\n mbids.append(submission['mbid'])\n if submission['puid']:\n min_duration = submission['length'] - 15\n max_duration = submission['length'] + 15\n mbids.extend(find_puid_mbids(conn, submission['puid'], min_duration, max_duration))\n logger.info(\"Importing submission %d with MBIDs %s\",\n submission['id'], ', '.join(mbids))\n matches = lookup_fingerprint(conn,\n submission['fingerprint'], submission['length'],\n TRACK_MERGE_TRESHOLD, FINGERPRINT_MERGE_TRESHOLD, fast=True)\n fingerprint = {\n 'id': None,\n 'track_id': None,\n 'fingerprint': submission['fingerprint'],\n 'length': submission['length'],\n 'bitrate': submission['bitrate'],\n 'source_id': submission['source_id'],\n 'format_id': submission['format_id'],\n }\n if matches:\n match = matches[0]\n logger.debug(\"Matches %d results, the top result (%s) is %d%% similar\",\n len(matches), match['id'], match['score'] * 100)\n fingerprint['track_id'] = match['track_id']\n if match['score'] > FINGERPRINT_MERGE_TRESHOLD:\n fingerprint['id'] = match['id']\n if not fingerprint['track_id']:\n fingerprint['track_id'] = insert_track(conn)\n logger.info('Added new track %d', fingerprint['track_id'])\n if not fingerprint['id']:\n fingerprint['id'] = insert_fingerprint(conn, fingerprint)\n logger.info('Added new fingerprint %d', fingerprint['id'])\n for mbid in mbids:\n if insert_mbid(conn, fingerprint['track_id'], mbid):\n logger.info('Added MBID %s to track %d', mbid, fingerprint['track_id'])\n update_stmt = schema.submission.update().where(\n schema.submission.c.id == submission['id'])\n conn.execute(update_stmt.values(handled=True))\n return fingerprint", "def test_72_bulk_epicollect_import_non_html(self, Mock, mock):\r\n html_request = FakeRequest('Not an application/json', 200,\r\n {'content-type': 'text/html'})\r\n Mock.return_value = html_request\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'epicollect_project': 'fakeproject',\r\n 'epicollect_form': 'fakeform',\r\n 'formtype': 'json'},\r\n follow_redirects=True)\r\n msg = \"Oops! That project and form do not look like the right one.\"\r\n assert msg in res.data", "def strip_praw_submission(cls, sub):\n\n reddit_link = re.compile(\n r'https?://(www\\.)?(np\\.)?redd(it\\.com|\\.it)/r/.*')\n author = getattr(sub, 'author', '[deleted]')\n name = getattr(author, 'name', '[deleted]')\n flair = getattr(sub, 'link_flair_text', '')\n\n data = {}\n data['object'] = sub\n data['type'] = 'Submission'\n data['title'] = sub.title\n data['text'] = sub.selftext\n data['html'] = sub.selftext_html or ''\n data['created'] = cls.humanize_timestamp(sub.created_utc)\n data['created_long'] = cls.humanize_timestamp(sub.created_utc, True)\n data['comments'] = '{0} comments'.format(sub.num_comments)\n data['score'] = '{0} pts'.format('-' if sub.hide_score else sub.score)\n data['author'] = name\n data['permalink'] = sub.permalink\n data['subreddit'] = six.text_type(sub.subreddit)\n data['flair'] = '[{0}]'.format(flair.strip(' []')) if flair else ''\n data['url_full'] = sub.url\n data['likes'] = sub.likes\n data['gold'] = sub.gilded\n data['nsfw'] = sub.over_18\n data['stickied'] = sub.stickied\n data['hidden'] = sub.hidden\n data['xpost_subreddit'] = None\n data['index'] = None # This is filled in later by the method caller\n data['saved'] = sub.saved\n if sub.edited:\n data['edited'] = '(edit {})'.format(\n cls.humanize_timestamp(sub.edited))\n data['edited_long'] = '(edit {})'.format(\n cls.humanize_timestamp(sub.edited, True))\n else:\n data['edited'] = ''\n data['edited_long'] = ''\n\n if sub.url.split('/r/')[-1] == sub.permalink.split('/r/')[-1]:\n data['url'] = 'self.{0}'.format(data['subreddit'])\n data['url_type'] = 'selfpost'\n elif reddit_link.match(sub.url):\n # Strip the subreddit name from the permalink to avoid having\n # submission.subreddit.url make a separate API call\n url_parts = sub.url.split('/')\n data['xpost_subreddit'] = url_parts[4]\n data['url'] = 'self.{0}'.format(url_parts[4])\n if 'comments' in url_parts:\n data['url_type'] = 'x-post submission'\n else:\n data['url_type'] = 'x-post subreddit'\n else:\n data['url'] = sub.url\n data['url_type'] = 'external'\n\n return data", "def query_edgar_for_submission_text(filing_href: str) -> Report:\n\n def _submission_text_href_from_filing_html(html) -> str:\n path1 = (\n \"//table[@summary = 'Document Format Files']\"\n \"//td[contains(text(), 'submission')]\"\n \"//following-sibling::td\"\n \"/a[contains(@href, '.txt')]/@href\"\n )\n return _extract_href_from_filing_html(html, \"submission txt\", path1)\n\n return _query_edgar_for_filing_document(filing_href, _submission_text_href_from_filing_html)", "def process(submitted_doc):\n\n submitted_data = submitted_doc.data\n url = submitted_data.get('originalUrl')\n if not url:\n raise ProcessingInputError('POST[\"data\"] does not have \"originalUrl\" set!')\n url = url.strip()\n\n doc_file = download(url)\n if not doc_file.tika_data:\n with doc_file.open() as f:\n doc_file.tika_data = tika_parse(f)\n doc_file.save()\n\n data = dict(submitted_data.items())\n data.update(doc_file.tika_data)\n\n doc, created = models.Document.objects.update_or_create(\n file_url=url,\n parsed=data,\n submit=submitted_doc,\n file=doc_file,\n )\n\n return doc, created", "def extract_submission_data(submission_id):\n\n try:\n submission = Submission.objects.get(id=submission_id)\n except Submission.DoesNotExist:\n logger.critical(\n \"{} Submission {} does not exist\".format(\n SUBMISSION_LOGS_PREFIX, submission_id\n )\n )\n traceback.print_exc()\n # return from here so that the message can be acked\n # This also indicates that we don't want to take action\n # for message corresponding to which submission entry\n # does not exist\n return None\n # Ignore submissions with status cancelled\n if submission.status == Submission.CANCELLED:\n logger.info(\n \"{} Submission {} was cancelled by the user\".format(\n SUBMISSION_LOGS_PREFIX, submission_id\n )\n )\n return None\n\n if submission.challenge_phase.challenge.is_static_dataset_code_upload:\n input_file = submission.submission_input_file\n else:\n input_file = submission.input_file\n submission_input_file = input_file.url\n submission_input_file = return_file_url_per_environment(\n submission_input_file\n )\n\n submission_data_directory = SUBMISSION_DATA_DIR.format(\n submission_id=submission.id\n )\n submission_input_file_name = os.path.basename(input_file.name)\n submission_input_file_path = SUBMISSION_INPUT_FILE_PATH.format(\n submission_id=submission.id, input_file=submission_input_file_name\n )\n # create submission directory\n create_dir_as_python_package(submission_data_directory)\n\n download_and_extract_file(\n submission_input_file, submission_input_file_path\n )\n\n return submission", "def process_submission(self, submission):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Post event to all rulesets
def all_events_request(): result = [] message = json.loads(request.stream.read().decode('utf-8')) for ruleset_name in host.list_rulesets(): result.append(host.post(ruleset_name, message)) return jsonify(result)
[ "def post_events(ruleset_name):\n message = json.loads(request.stream.read().decode('utf-8'))\n result = host.post(ruleset_name, message)\n return jsonify(result)", "def register_rules_set(self, rules_set):\n self.rules_set = rules_set", "def update_rule_list(self):\n self.rules = self.rule_manager.get_all_rules()", "def _do_rule_processing(self, line, events):\n\n for rule in self.rules:\n match = rule.regexp.search(line)\n if match:\n events.append(Event(self, rule.handler, LogMatch(line, match)))\n if rule.quick:\n break", "def rule(self, rules):\n\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n self.__addRule(rule)", "def save_formset(self, request, form, formset, change):\n # Save inline Rulesets\n formset.save()\n # Save Group instance and update permissions\n form.instance.save(update_fields=['name'])", "def apply_ruleset(self, ruleset):\n updates = [self._get_lexicon_update(ruleset['lexicon'])]\n updates += ruleset['rules']\n self.apply_updates(updates)", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def rule(self, rule):\n self._rule = rule", "def rulesetsRefreshed(self):\n self.remoteBots.allowSync = True\n self.remoteBots.syncRequests()", "def rule(self, rule: List[Rule]):\n\n self._rule = rule", "def register_rules(self, **kwargs):\n for rule in self.rules:\n r = rule.format(slug=self.slug, **kwargs)\n with self.app.app_context():\n self.app.add_url_rule(r, endpoint=self.slug)\n self.rules_set = True\n return self", "def add_rule(self, rule):\n self.rules.append(rule)", "def check_rules(self, event):\n logger.debug(\"Checking rules\")\n # Iterate through rules and try to apply them\n for rule in RippleConfig().rules[:]:\n event_type = event['type']\n if self.match_condition(event_type, rule):\n # Currently putting in pathname as key, need to\n # think of a better way to handle \"other\" information\n send_event = {'event': {\n 'type': event_type,\n 'size': event['bytes'],\n 'key': event['key'],\n 'pathname': event['key'],\n 'path': event['key'],\n 'name': event['key'],\n 'shmid': event['shmid'],\n 'perms': event['perms'],\n 'owner': event['owner'],\n 'status': event['status'],\n 'uuid': str(uuid.uuid4()),\n 'hash': 'hashvalue'\n }\n }\n print (\"Sending event: %s\" % send_event)\n send_event.update(rule)\n\n # Now push it down the queue\n message = json.dumps(send_event)\n RippleConfig().queue.put(message)\n logger.debug(\"Sent data to queue\")\n\n return None", "def add_rule(self, rule):\n \n self.rules.append(rule)", "def add_rules(self, *rules):\n for r in rules:\n self.add_rule(r)", "def add_rules(self, rules):\n if self.rule_book is not None:\n self.rule_book.add_rules(rules)", "def test_post_entry_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def add_rule(self, rule):\n self.__rules__.append(rule)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set ruleset state sid
def set_state_sid_request(ruleset_name, sid): message = json.loads(request.stream.read().decode('utf-8')) message['sid'] = sid result = host.patch_state(ruleset_name, message) return jsonify(result)
[ "def sid(self, sid):\n self._sid = sid", "def set_state(self, niface, state):\n self.d.set_state(niface, state)", "def state_id(self, state_id):\n self._state_id = state_id", "def state_id(self, state_id):\n\n self._state_id = state_id", "def state_id(self, s):\n pass", "def set_state(self, state):\n set_state(\n state.format(relation_name=self.relation_name),\n {'relation': self.relation_name})", "def set_domain_sid(self, sid):\n dsdb._samdb_set_domain_sid(self, sid)", "def set_outlet_state(self, outlet_id, outlet_state):\n raise NotImplementedError", "def set_state(self,state):\n self.__state = state", "def rule_id(self, rule_id):\n self._rule_id = rule_id", "def set_state(self, state):\n self.state = state", "def setIdentity(self) -> None:\n ...", "def set_state(self, outlet_state):\n self.bank.set_outlet_state(self.identifier, outlet_state)", "def set_state( state_dic, session_id, state ):\n try:\n state_dic[ session_id ][\"state\"] = state\n except:\n state_dic[ session_id ]= {\"state\" : state,\"waiting\" : { }}", "def set_id(self, ssc_id):\r\n self.ssc_id = ssc_id", "def psid(self, psid):\n\n self._psid = psid", "def set_state(self, state=0):\r\n return self._arm.set_state(state=state)", "def rule_id(self, rule_id):\n\n self._rule_id = rule_id", "def assign_state(self,state_obj):\r\n self.room_state=state_obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get ruleset state sid
def get_state_sid_request(ruleset_name, sid): result = host.get_state(ruleset_name, sid) return jsonify(result)
[ "def survey_state_id(self):\n return self._get('survey_state')", "def state_id(self, s):\n pass", "def state_id(self):\n return self._state_id", "def get_sid(self):\n return self.sid", "def sid(self):\n return self._sid", "def sid(self):\n return self.data[''].sid", "def sbml_id(self) -> sp.Symbol:\n return self._sbml_id", "def get_sid(self):\n resdat = self.req().read() #phew, that was easy :)\n print resdat\n resdat = self.parse_response(resdat)\n if (resdat[0][1][0] != \"c\"):\n return None\n sid = resdat[0][1][1]\n return sid", "def getSymbolicState(self):\n return self._stateSymb", "def get_sid(self):\n path = self.get_file_path()\n cur_sid = Sid(path=path)\n return cur_sid", "def sid(self): # pylint: disable=invalid-name\n\n return 's{}'.format(self._id)", "def SRID():\r\n return SurveyPointMixin._SRID", "def set_state_sid_request(ruleset_name, sid):\n message = json.loads(request.stream.read().decode('utf-8'))\n message['sid'] = sid\n result = host.patch_state(ruleset_name, message)\n return jsonify(result)", "def getId(self):\n return _libsbml.Rule_getId(self)", "def config_rule_state(self) -> str:\n return pulumi.get(self, \"config_rule_state\")", "def rule_id(self):\n return self._rule_id", "def stream_id(self):\n return self.__sid", "def sis_source_id(self):\r\n return self._sis_source_id", "def _get_session_state(self):\n r = self._http_client.get(\"/sessions\", [200])\n sessions = r.json()[\"sessions\"]\n filtered_sessions = [s for s in sessions if s[\"id\"] == int(self._id)]\n \n if len(filtered_sessions) != 1:\n raise AssertionError(\"Expected one session of id {} but got {} sessions.\"\n .format(self._id, len(filtered_sessions)))\n \n session = filtered_sessions[0]\n return session['state']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Post events to the ruleset
def post_events(ruleset_name): message = json.loads(request.stream.read().decode('utf-8')) result = host.post(ruleset_name, message) return jsonify(result)
[ "def _do_rule_processing(self, line, events):\n\n for rule in self.rules:\n match = rule.regexp.search(line)\n if match:\n events.append(Event(self, rule.handler, LogMatch(line, match)))\n if rule.quick:\n break", "def PostEvent(*args, **kwargs):\n return _core_.PostEvent(*args, **kwargs)", "def all_events_request():\n result = []\n message = json.loads(request.stream.read().decode('utf-8'))\n for ruleset_name in host.list_rulesets():\n result.append(host.post(ruleset_name, message))\n return jsonify(result)", "def test_post_add_log_event(self):\n pass", "def check_rules(self, event):\n logger.debug(\"Checking rules\")\n # Iterate through rules and try to apply them\n for rule in RippleConfig().rules[:]:\n event_type = event['type']\n if self.match_condition(event_type, rule):\n # Currently putting in pathname as key, need to\n # think of a better way to handle \"other\" information\n send_event = {'event': {\n 'type': event_type,\n 'size': event['bytes'],\n 'key': event['key'],\n 'pathname': event['key'],\n 'path': event['key'],\n 'name': event['key'],\n 'shmid': event['shmid'],\n 'perms': event['perms'],\n 'owner': event['owner'],\n 'status': event['status'],\n 'uuid': str(uuid.uuid4()),\n 'hash': 'hashvalue'\n }\n }\n print (\"Sending event: %s\" % send_event)\n send_event.update(rule)\n\n # Now push it down the queue\n message = json.dumps(send_event)\n RippleConfig().queue.put(message)\n logger.debug(\"Sent data to queue\")\n\n return None", "def post(self, resource, data):\n url = \"{}/{}/{}/events/\".format(self.base_url, self.api_version,\n event.collection_name)\n headers = {\"Content-Type\": \"application/json\", \"X-Tastemakers-Key\": self.api_key}\n payload = json.dumps(data)\n response = requests.post(url, data=payload, headers=headers)\n if response.status_code != 201:\n error = response.json\n raise exceptions.TastemakersApiError(error)", "def add_events(self, *events):\n for event in events:\n self.event(event)", "def do_group(self, arg):\n group_events = graph.request('{}/events'.format(arg))\n try:\n for events in group_events['data']:\n event_data = loads(json.dumps(events))\n result = self.posts.insert_one(event_data)\n print(\"**** Events from {} group page have been added! ****\". format(arg))\n except:\n print(\"**** No souch group ID exists! ****\")", "def process(self, event):\n pass", "def record_event(event):\n events.append(event)", "def send(self, events, validation_hit=False, postpone=False, date=None):\n\n # check for any missing or invalid parameters among automatically collected and recommended event types\n self._check_params(events)\n self._check_date_not_in_future(date)\n\n if postpone is True:\n # build event list to send later\n for event in events:\n event[\"_timestamp_micros\"] = self._get_timestamp(time.time())\n self._event_list.append(event)\n else:\n # batch events into sets of 25 events, the maximum allowed.\n batched_event_list = [\n events[event : event + 25] for event in range(0, len(events), 25)\n ]\n # send http post request\n self._http_post(\n batched_event_list, validation_hit=validation_hit, date=date\n )", "def post(self, request, *args, **kwargs):\n self.request = request\n self.event = self.get_other_object()\n return super().post(request, *args, **kwargs)", "def add_cron_events(cron_events):", "def onEvent(self, event):", "def post_event(self, event_name, run_time=0):\n self.machine.events.post(event_name)\n self.advance_time_and_run(run_time)", "def post(self):\n # se captura y se parsea a json el body del request recibido por el\n # webhook\n request_body = json.loads(self.request.body)\n\n for body in request_body:\n \"\"\" Evaluar el tipo de evento ya que trae campos diferentes \"\"\"\n logging.info(request_body)\n\n event = str(body['event'])\n correo = str(body['email'])\n numero_folio = str(body['numero_folio'])\n tipo_dte = str(body['tipo_dte'])\n\n logging.info(event)\n\n if event and correo and numero_folio and tipo_dte:\n\n if event == 'processed':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.processed_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.processed_event = event\n email_model.processed_sg_event_id = body['sg_event_id']\n email_model.processed_sg_message_id = body['sg_message_id']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.processed_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.processed_event = event\n e.processed_sg_event_id = body['sg_event_id']\n e.processed_sg_message_id = body['sg_message_id']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'delivered':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.delivered_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.delivered_event = event\n email_model.delivered_sg_event_id = body['sg_event_id']\n email_model.delivered_sg_message_id = body['sg_message_id']\n email_model.delivered_response = body['response']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.delivered_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.delivered_event = event\n e.delivered_sg_event_id = body['sg_event_id']\n e.delivered_sg_message_id = body['sg_message_id']\n e.delivered_response = body['response']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'open':\n model = EmailModel()\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n if email_model.opened_first_date == None:\n email_model.opened_first_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.opened_last_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.opened_event = event\n email_model.opened_ip = body['ip']\n email_model.opened_user_agent = body['useragent']\n email_model.opened_sg_event_id = body['sg_event_id']\n email_model.opened_sg_message_id = body['sg_message_id']\n model.email_add_count(email_model)\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n if e.opened_first_date == None:\n e.opened_first_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.opened_last_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.opened_event = event\n e.opened_ip = body['ip']\n e.opened_user_agent = body['useragent']\n e.opened_sg_event_id = body['sg_event_id']\n e.opened_sg_message_id = body['sg_message_id']\n e.email_add_count(e)\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'dropped':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.dropped_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.dropped_sg_event_id = body['sg_event_id']\n email_model.dropped_sg_message_id = body['sg_message_id']\n email_model.dropped_reason = body['reason']\n email_model.dropped_event = event\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.dropped_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.dropped_sg_event_id = body['sg_event_id']\n e.dropped_sg_message_id = body['sg_message_id']\n e.dropped_reason = body['reason']\n e.dropped_event = event\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'bounce':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.bounce_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.bounce_event = event\n email_model.bounce_sg_event_id = body['sg_event_id']\n email_model.bounce_sg_message_id = body['sg_message_id']\n email_model.bounce_reason = body['reason']\n email_model.bounce_status = body['status']\n email_model.bounce_type = body['type']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.bounce_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.bounce_event = event\n e.bounce_sg_event_id = body['sg_event_id']\n e.bounce_sg_message_id = body['sg_message_id']\n e.bounce_reason = str(body['reason']).decode(\"utf-8\")\n e.bounce_status = body['status']\n e.bounce_type = body['type']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'unsubscribe':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.unsubscribe_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.unsubscribe_uid = body['uid']\n email_model.unsubscribe_purchase = body['purchase']\n email_model.unsubscribe_id = body['id']\n email_model.unsubscribe_event = body['event']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.unsubscribe_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.unsubscribe_uid = body['uid']\n e.unsubscribe_purchase = body['purchase']\n e.unsubscribe_id = body['id']\n e.unsubscribe_event = body['event']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n else:\n logging.info('body con campos vacios')", "def write_event(self, event):\n self.events_written.append(event)", "def _emit(self, event):\n receivers = [task for task in self._tasks if task is not self]\n for task in receivers:\n task.post(event)\n return None", "def subscribe(self, evts):\n for evt in evts:\n dispatcher.subscribe(evt, self.post)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Post sid events to the ruleset
def post_sid_events(ruleset_name, sid): message = json.loads(request.stream.read().decode('utf-8')) message['sid'] = sid result = host.post(ruleset_name, message) return jsonify(result)
[ "def post_events(ruleset_name):\n message = json.loads(request.stream.read().decode('utf-8'))\n result = host.post(ruleset_name, message)\n return jsonify(result)", "def _srsp_event_set(self, zpi_cmd):\r\n if zpi_cmd not in self._srsp_events:\r\n self._srsp_events[zpi_cmd] = threading.Event()\r\n\r\n self._srsp_events[zpi_cmd].set()", "def set_state_sid_request(ruleset_name, sid):\n message = json.loads(request.stream.read().decode('utf-8'))\n message['sid'] = sid\n result = host.patch_state(ruleset_name, message)\n return jsonify(result)", "def sid(self, sid):\n self._sid = sid", "def do_group(self, arg):\n group_events = graph.request('{}/events'.format(arg))\n try:\n for events in group_events['data']:\n event_data = loads(json.dumps(events))\n result = self.posts.insert_one(event_data)\n print(\"**** Events from {} group page have been added! ****\". format(arg))\n except:\n print(\"**** No souch group ID exists! ****\")", "def informed_consent_on_post_save(sender, instance, raw, created, **kwargs):\n if not raw:\n if created:\n pass\n # instance.registration_update_or_create()\n # update_model_fields(instance=instance,\n # model_cls=['subject_identifier', instance.subject_identifier])\n try:\n OnSchedule.objects.get(\n subject_identifier=instance.subject_identifier, )\n except OnSchedule.DoesNotExist:\n onschedule_model = 'training_subject.onschedule'\n put_on_schedule(schedule_name='training_subject_visit_schedule', instance=instance, onschedule_model=onschedule_model)", "def _store_event(server, headers, events):\n batch = list()\n for event in events:\n batch.append({\"method\": \"events.create\", \"params\": event})\n pryv.batch_call(server, headers, batch)", "def post(self, rule_id):\n\n user = g.get('user', None)\n\n # convert DateTime object from client to matching database format\n start_time = datetime.datetime.strptime(self._args[\"start_time\"].dt_format, \"%Y-%m-%dT%H:%M:%S%f\")\n\n # create subscription in DB\n self._db.create_subscription(user[\"user_id\"], rule_id, start_time, self._args[\"interval\"])\n\n return self._db.get_rule(rule_id), 201", "def process_event(self, st):\n self.server.process_event(st)", "def setFilterOnRule(request):\n\t\n\tlogger = logging.getLogger(__name__)\n\t\n\t# Get some initial post values for processing.\n\truleIds = request.POST.getlist('id')\n\tsensors = request.POST.getlist('sensors')\n\tcommentString = request.POST['comment']\n\tforce = request.POST['force']\n\tresponse = []\n\t\n\t# If the ruleIds list is empty, it means a SID has been entered manually.\n\tif len(ruleIds) == 0:\n\t\t# Grab the value from the POST.\n\t\truleSID = request.POST['sid']\n\t\t\n\t\t# Match the GID:SID pattern, if its not there, throw exception.\n\t\ttry:\n\t\t\tmatchPattern = r\"(\\d+):(\\d+)\"\n\t\t\tpattern = re.compile(matchPattern)\n\t\t\tresult = pattern.match(ruleSID)\n\t\t\t\n\t\t\truleGID = result.group(1)\n\t\t\truleSID = result.group(2)\n\t\texcept:\n\t\t\tresponse.append({'response': 'invalidGIDSIDFormat', 'text': 'Please format in the GID:SID syntax.'})\n\t\t\tlogger.warning(\"Invalid GID:SID syntax provided: \"+str(ruleSID)+\".\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# Try to find a generator object with the GID supplied, if it doesnt exist, throw exception.\n\t\ttry:\n\t\t\tg = Generator.objects.filter(GID=ruleGID).count() # There might be more than one.\n\t\t\tif g == 0:\n\t\t\t\tresponse.append({'response': 'gidDoesNotExist', 'text': 'GID '+ruleGID+' does not exist.'})\n\t\t\t\tlogger.warning(\"'GID \"+str(ruleGID)+\" could not be found.\")\n\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\texcept Generator.DoesNotExist:\n\t\t\tresponse.append({'response': 'gidDoesNotExist', 'text': 'GID '+ruleGID+' does not exist.'})\n\t\t\tlogger.warning(\"'GID \"+str(ruleGID)+\" could not be found.\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# Try to find a rule object with the SID supplied, if it doesnt exist, throw exception.\n\t\ttry:\n\t\t\truleIds.append(Rule.objects.get(SID=ruleSID).id)\n\t\texcept Rule.DoesNotExist:\n\t\t\tresponse.append({'response': 'sidDoesNotExist', 'text': 'SID '+ruleSID+' does not exist.'})\n\t\t\tlogger.warning(\"'SID \"+str(ruleSID)+\" could not be found.\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t# If force is false, it means we have to check everything.\t\t\t\t\n\tif force == \"False\":\n\t\t\n\t\tfor sensor in sensors:\n\t\t\ttry:\n\t\t\t\tSensor.objects.get(id=int(sensor))\n\t\t\texcept Sensor.DoesNotExist:\n\t\t\t\tresponse.append({'response': 'sensorDoesNotExist', 'text': 'Sensor with DB ID '+sensor+' does not exist.'})\n\t\t\t\tlogger.warning(\"Sensor with DB ID \"+str(sensor)+\" could not be found.\")\n\t\t\t\treturn HttpResponse(json.dumps(response))\t\t\t\n\t\t\n\t\t# We iterate through all selected sensors and rules to see if a threshold already exists.\n\t\t# We warn the user if there are thresholds. We also check to see if the rule objects selected exist. \t\n\t\tfor sensor in sensors:\n\t\t\ts = Sensor.objects.get(id=sensor)\n\n\t\t\tfor ruleId in ruleIds:\n\t\t\t\ttry:\n\t\t\t\t\tr = Rule.objects.get(id=ruleId)\n\t\t\t\t\tif r.eventFilters.filter(sensor=s).count() > 0:\n\t\t\t\t\t\tif len(response) == 0:\n\t\t\t\t\t\t\tresponse.append({'response': 'thresholdExists', 'text': 'Thresholds already exists, do you want to overwrite?.', 'sids': []})\n\t\t\t\t\t\tresponse[0]['sids'].append(r.SID)\n\t\t\t\t\t\tresponse[0]['sids']=list(set(response[0]['sids']))\n\t\t\t\texcept Rule.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'ruleDoesNotExist', 'text': 'Rule with DB ID '+ruleId+' does not exist.'})\n\t\t\t\t\tlogger.warning(\"Rule with DB ID \"+str(ruleId)+\" could not be found.\")\n\t\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\t\n\t\t# Warn the user if the comment string is empty.\n\t\tif commentString == \"\":\n\t\t\tresponse.append({'response': 'noComment', 'text': 'You have not set any comments on this action, are you sure you want to proceed?.'})\n\t\t\n\t\t# Warn the user since all sensors is default.\n\t\tif \"1\" in sensors:\n\t\t\tresponse.append({'response': 'allSensors', 'text': 'You are setting this threshold on all sensors, are you sure you want to do that?.'})\n\t\t\n\t\t# If any responses were triggered, return them. Else, we set force to true and implement the threshold.\n\t\tif len(response) > 0:\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\telse:\n\t\t\tforce=\"True\"\n\t\n\t# The user either wants us to continue or there were no warnings.\n\tif force == \"True\":\n\t\tfilterType = request.POST['filterType']\n\t\ttcount = int(request.POST['count'])\n\t\ttseconds = int(request.POST['seconds'])\n\t\t\n\t\tif filterType == 'eventFilter':\n\t\t\tttype = int(request.POST['type'])\n\t\t\n\t\t\t# We make sure type is in the correct range.\n\t\t\tif ttype not in range(1,4):\n\t\t\t\tresponse.append({'response': 'typeOutOfRange', 'text': 'Type value out of range.'})\n\t\t\t\tlogger.warning(\"Type value out of range: \"+str(ttype)+\".\")\n\t\t\t\treturn HttpResponse(json.dumps(response))\n\t\n\t\tttrack = int(request.POST['track'])\n\t\t\n\t\t# We make sure track is in the correct range.\n\t\tif ttrack not in range(1,3):\n\t\t\tresponse.append({'response': 'trackOutOfRange', 'text': 'Track value out of range.'})\n\t\t\tlogger.warning(\"Track value out of range: \"+str(ttrack)+\".\")\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\t\n\t\t# If this is an edit, we have to do some things with the comment object.\n\t\tif request.POST.get('edit'):\n\t\t\teditid = int(request.POST['edit'])\n\t\t\tif filterType == 'eventFilter':\n\t\t\t\ttry:\n\t\t\t\t\t# Grab the object and delete its comment object.\n\t\t\t\t\teFilter = EventFilter.objects.get(id=editid)\n\t\t\t\t\tif eFilter.comment is not None:\n\t\t\t\t\t\tcomment = Comment.objects.get(id=eFilter.comment.id)\n\t\t\t\t\t\tcomment.delete()\n\t\t\t\t\t\n\t\t\t\texcept Comment.DoesNotExist:\n\t\t\t\t\tlogger.warning(\"Could not find Comment with id \"+str(eFilter.comment.id)+\".\")\n\t\t\t\texcept EventFilter.DoesNotExist:\n\t\t\t\t\tlogger.warning(\"Could not find EventFilter with id \"+str(editid)+\".\")\n\t\t\t\t\t\n\t\t\telif filterType == 'detectionFilter':\n\t\t\t\ttry:\n\t\t\t\t\t# Grab the object and delete its comment object.\n\t\t\t\t\tdFilter = DetectionFilter.objects.get(id=editid)\n\t\t\t\t\tif dFilter.comment is not None:\n\t\t\t\t\t\tcomment = Comment.objects.get(id=dFilter.comment.id)\n\t\t\t\t\t\tcomment.delete()\n\t\t\t\t\t\t\n\t\t\t\texcept Comment.DoesNotExist:\n\t\t\t\t\tlogger.warning(\"Could not find Comment with id \"+str(dFilter.comment.id)+\".\")\n\t\t\t\texcept DetectionFilter.DoesNotExist:\n\t\t\t\t\tlogger.warning(\"Could not find DetecionFilter with id \"+str(editid)+\".\")\n\t\t\n\t\t# We iterate over all the rules and sensors to implement the threshold.\n\t\ttry:\n\t\t\tfor ruleId in ruleIds:\n\t\t\t\tfor sensorId in sensors:\n\t\t\t\t\ttrule = Rule.objects.get(id=ruleId)\n\t\t\t\t\ttsensor = Sensor.objects.get(id=int(sensorId))\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif filterType == 'eventFilter':\n\t\t\t\t\t\t\tcomment = Comment.objects.create(user=request.user,comment=commentString, type=\"newEventFilter\")\n\t\t\t\t\t\t\targuments = {'rule':trule, 'sensor':tsensor, 'comment':comment, 'eventFilterType':ttype, 'track':ttrack, 'count':tcount, 'seconds':tseconds}\n\t\t\t\t\t\t\tfilterObject = EventFilter.objects.get(rule=trule, sensor=tsensor)\n\t\t\t\t\t\t\tfilterObject.eventFilterType = ttype\n\t\t\t\t\t\telif filterType == 'detectionFilter':\n\t\t\t\t\t\t\tcomment = Comment.objects.create(user=request.user,comment=commentString, type=\"newDetectionFilter\")\n\t\t\t\t\t\t\targuments = {'rule':trule, 'sensor':tsensor, 'comment':comment, 'track':ttrack, 'count':tcount, 'seconds':tseconds}\n\t\t\t\t\t\t\tfilterObject = DetectionFilter.objects.get(rule=trule, sensor=tsensor)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise InvalidValueError(filterType+\" is not a valid filter type!\")\n\t\t\t\t\t\t\n\t\t\t\t\t\tfilterObject.track = ttrack\n\t\t\t\t\t\tfilterObject.count = tcount\n\t\t\t\t\t\tfilterObject.seconds = tseconds\n\t\t\t\t\t\tfilterObject.comment = comment\n\t\t\t\t\t\tfilterObject.save()\n\t\t\t\t\t\tlogger.info(\"EventFilter successfully updated on rule: \"+str(trule)+\".\")\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\texcept EventFilter.DoesNotExist:\n\t\t\t\t\t\tfilterObject = EventFilter.objects.create(**arguments)\n\t\t\t\t\t\tfilterObject.save()\n\t\t\t\t\t\tlogger.info(\"event_filter successfully added to rule: \"+str(trule)+\".\")\n\t\t\t\t\texcept DetectionFilter.DoesNotExist:\n\t\t\t\t\t\tfilterObject = DetectionFilter.objects.create(**arguments)\n\t\t\t\t\t\tfilterObject.save()\n\t\t\t\t\t\tlogger.info(\"detection_filter successfully added to rule: \"+str(trule)+\".\")\n\t\t\t\n\t\t\tresponse.append({'response': 'filterAdded', 'text': filterType+' successfully added.'})\n\t\t\n\t\t\treturn HttpResponse(json.dumps(response))\n\t\texcept Exception as e: # Something went wrong.\n\t\t\tresponse.append({'response': 'addFilterFailure', 'text': 'Failed when trying to add filter.'})\n\t\t\tlogger.error(\"Failed when trying to add filter: \"+e.message)\n\t\t\treturn HttpResponse(json.dumps(response))", "def post(self, resource, data):\n url = \"{}/{}/{}/events/\".format(self.base_url, self.api_version,\n event.collection_name)\n headers = {\"Content-Type\": \"application/json\", \"X-Tastemakers-Key\": self.api_key}\n payload = json.dumps(data)\n response = requests.post(url, data=payload, headers=headers)\n if response.status_code != 201:\n error = response.json\n raise exceptions.TastemakersApiError(error)", "def record_event(event):\n events.append(event)", "def put(self, rule_id):\n\n user = g.get('user', None)\n\n # convert DateTime object from client to matching database format\n start_time = datetime.datetime.strptime(self._args[\"start_time\"].dt_format, \"%Y-%m-%dT%H:%M:%S%f\")\n\n # create subscription in DB\n self._db.update_subscription(user[\"user_id\"], rule_id, start_time, self._args[\"interval\"])\n\n return resource_helper.empty_response(204)", "def listen(self, ids):", "def _push(self, server):\n defns = [self.get_id(ident) for ident in list(self.ids)]\n #for ident in list(self.ids):\n # defn = self.get_id(ident)\n if len(defns) == 0:\n return\n self.app.logger.info(f\"Updating {server} with {len(defns)} records\")\n url = f\"{server}/add_record\"\n try:\n resp = requests.post(url, json=defns)\n except Exception as e:\n self.app.logger.error(str(e))\n return\n if not resp.ok:\n self.app.logger.error(f\"{resp.reason} {resp.content}\")\n return\n self._server_updated[server] = True", "def add(userid, event_name):", "def write_event(self, event):\n self.events_written.append(event)", "def save_event(self, data):\n rdb.table(self.rdb_table).insert(data)", "def publishEvent(eventName,publisher, msg):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function populates an instance of DeadlineTab with the UI controls that make up the submission dialog. This tab is instantiated by Katana every time the user selects "Tabs > Thinkbox > Submit to Deadline" from the menu bar in Katana. Essentially, this function serves as a deferred __init__ implementation for the tab class that can be easily updated via the Deadline repository.
def PopulateSubmitter( gui ): global submissionInfo print( "Grabbing submitter info..." ) try: stringSubInfo = CallDeadlineCommand( [ "-prettyJSON", "-GetSubmissionInfo", "Pools", "Groups", "MaxPriority", "UserHomeDir", "RepoDir:submission/Katana/Main", "RepoDir:submission/Integration/Main", ], useDeadlineBg=True ) output = json.loads( stringSubInfo, encoding="utf-8" ) except: print( "Unable to get submitter info from Deadline:\n\n" + traceback.format_exc() ) raise if output[ "ok" ]: submissionInfo = output[ "result" ] else: print( "DeadlineCommand returned a bad result and was unable to grab the submitter info.\n\n" + output[ "result" ] ) raise ValueError( output[ "result" ] ) # Create a widget with a vertical box layout as a container for widgets to include in the tab scrollWidget = QWidget() scrollLayout = QGridLayout(scrollWidget) scrollLayout.setSpacing(4) scrollLayout.setContentsMargins(4, 4, 4, 4) buttonLayout = QHBoxLayout() # First layout: General options scrollLayout.addWidget(CreateSeparator( "Job Description" ),0,0,1,3) jobNameLabel = QLabel( "Job Name" ) jobNameLabel.setToolTip("The name of your job. This is optional, and if left blank, it will default to 'Untitled'.") scrollLayout.addWidget(jobNameLabel,1,0) gui.jobNameWidget = QLineEdit( os.path.basename(FarmAPI.GetKatanaFileName()).split('.')[0] ) scrollLayout.addWidget(gui.jobNameWidget, 1, 1, 1, 1 ) commentLabel = QLabel( "Comment" ) commentLabel.setToolTip("A simple description of your job. This is optional and can be left blank.") scrollLayout.addWidget(commentLabel,2,0) gui.commentWidget = QLineEdit( "" ) scrollLayout.addWidget(gui.commentWidget, 2, 1, 1, 1 ) departmentLabel = QLabel( "Department" ) departmentLabel.setToolTip( "The department you belong to. This is optional and can be left blank." ) scrollLayout.addWidget(departmentLabel, 3, 0) gui.departmentWidget = QLineEdit( "" ) scrollLayout.addWidget(gui.departmentWidget, 3, 1, 1, 1 ) # Second layout: Job options scrollLayout.addWidget(CreateSeparator( "Job Options" ),4,0,1,3) pools = submissionInfo["Pools"] poolLabel = QLabel( "Pool" ) poolLabel.setToolTip( "The pool that your job will be submitted to." ) scrollLayout.addWidget(poolLabel, 5, 0) gui.poolsWidget = QComboBox() gui.poolsWidget.addItems(pools) scrollLayout.addWidget(gui.poolsWidget, 5, 1 ) secondPoolLabel = QLabel( "Secondary Pool" ) secondPoolLabel.setToolTip( "The secondary pool lets you specify a pool to use if the primary pool does not have any available Slaves." ) scrollLayout.addWidget(secondPoolLabel, 6, 0 ) gui.secondPoolsWidget = QComboBox() gui.secondPoolsWidget.addItems(pools) scrollLayout.addWidget(gui.secondPoolsWidget, 6, 1 ) groups = submissionInfo[ "Groups" ] groupLabel = QLabel( "Group" ) groupLabel.setToolTip( "The group that your job will be submitted to." ) scrollLayout.addWidget(groupLabel, 7, 0) gui.groupWidget = QComboBox() gui.groupWidget.addItems(groups) scrollLayout.addWidget(gui.groupWidget, 7, 1) priorityLabel = QLabel( "Priority" ) priorityLabel.setToolTip( "A job can have a numeric priority from 0 to 100, where 0 is the lowest priority and 100 is the highest." ) scrollLayout.addWidget(priorityLabel, 8, 0) maxPriority = submissionInfo["MaxPriority"] gui.priorityBox = QSpinBox() gui.priorityBox.setMinimum(0) gui.priorityBox.setMaximum( maxPriority ) scrollLayout.addWidget(gui.priorityBox, 8, 1) taskTimeoutLabel = QLabel( "Task Timeout" ) taskTimeoutLabel.setToolTip( "The number of minutes a Slave has to render a task for this job before it requeues it. Specify 0 for no limit." ) scrollLayout.addWidget(taskTimeoutLabel, 9, 0) gui.taskTimeoutBox = QSpinBox() gui.taskTimeoutBox.setMinimum(0) gui.taskTimeoutBox.setMaximum(10000) scrollLayout.addWidget(gui.taskTimeoutBox, 9, 1) concurrentTasksLabel = QLabel( "Concurrent Tasks" ) concurrentTasksLabel.setToolTip("The number of tasks that can render concurrently on a single Slave. This is useful if the rendering application only uses one thread to render and your Slaves have multiple CPUs.") scrollLayout.addWidget(concurrentTasksLabel, 10, 0 ) gui.concurrentTasksWidget = QSpinBox( ) scrollLayout.addWidget(gui.concurrentTasksWidget, 10, 1) gui.concurrentTasksWidget.setMinimum(1) gui.concurrentTasksWidget.setMaximum(16) gui.limitTasksSlaveLimit = QCheckBox( "Limit Tasks To Slave's Task Limit" ) gui.limitTasksSlaveLimit.setToolTip( "If you limit the tasks to a Slave's task limit, then by default, the Slave won't dequeue more tasks then it has CPUs. This task limit can be overridden for individual Slaves by an administrator." ) scrollLayout.addWidget(gui.limitTasksSlaveLimit, 10, 2) machineLimitLabel = QLabel( "Machine Limit" ) machineLimitLabel.setToolTip("Use the Machine Limit to specify the maximum number of machines that can render your job at one time. Specify 0 for no limit.") scrollLayout.addWidget( machineLimitLabel, 11, 0 ) gui.machineLimitWidget = QSpinBox() scrollLayout.addWidget(gui.machineLimitWidget, 11, 1) gui.isBlackListWidget = QCheckBox( "Machine List Is Blacklist" ) gui.isBlackListWidget.setToolTip("You can force the job to render on specific machines by using a whitelist, or you can avoid specific machines by using a blacklist.") scrollLayout.addWidget(gui.isBlackListWidget, 11, 2) machineListLabel = QLabel( "Machine List" ) machineListLabel.setToolTip("The whitelisted or blacklisted list of machines.") scrollLayout.addWidget( machineListLabel, 12, 0 ) machineListLayout = QHBoxLayout() gui.machineListWidget = QLineEdit( "" ) machineListLayout.addWidget(gui.machineListWidget) getMachineListWidget = QPushButton( "..." ) getMachineListWidget.pressed.connect( lambda: BrowseMachineList(gui.machineListWidget) ) machineListLayout.addWidget(getMachineListWidget) scrollLayout.addLayout( machineListLayout, 12, 1, 1, 2 ) limitsLabel = QLabel( "Limits" ) limitsLabel.setToolTip("The Limits that your job requires.") scrollLayout.addWidget( limitsLabel, 13, 0 ) limitsLayout = QHBoxLayout() gui.limitsWidget = QLineEdit( "" ) limitsLayout.addWidget(gui.limitsWidget) getLimitsWidget = QPushButton( "..." ) getLimitsWidget.pressed.connect( lambda: BrowseLimitList(gui.limitsWidget) ) limitsLayout.addWidget(getLimitsWidget) scrollLayout.addLayout( limitsLayout, 13, 1, 1, 2 ) dependenciesLabel = QLabel( "Dependencies" ) dependenciesLabel.setToolTip("Specify existing jobs that this job will be dependent on. This job will not start until the specified dependencies finish rendering.") scrollLayout.addWidget( dependenciesLabel, 14, 0 ) dependenciesLayout = QHBoxLayout() gui.dependenciesWidget = QLineEdit( "" ) dependenciesLayout.addWidget(gui.dependenciesWidget) getDependenciesWidget = QPushButton( "..." ) getDependenciesWidget.pressed.connect( lambda: BrowseDependencyList(gui.dependenciesWidget) ) dependenciesLayout.addWidget(getDependenciesWidget) scrollLayout.addLayout( dependenciesLayout, 14, 1, 1, 2 ) onJobCompleteLabel = QLabel( "On Job Complete" ) onJobCompleteLabel.setToolTip("If desired, you can automatically archive or delete the job when it completes.") scrollLayout.addWidget( onJobCompleteLabel, 15, 0 ) gui.onJobCompleteWidget = QComboBox( ) gui.onJobCompleteWidget.addItems(["Nothing", "Archive", "Delete"]) scrollLayout.addWidget(gui.onJobCompleteWidget, 15, 1) gui.submitSuspendedWidget = QCheckBox( "Submit Job as Suspended" ) gui.submitSuspendedWidget.setToolTip( "If enabled, the job will submit in the suspended state. This is useful if you don't want the job to start rendering right away. Just resume it from the Monitor when you want it to render.") scrollLayout.addWidget(gui.submitSuspendedWidget, 15, 2) # Third layout: Katana options scrollLayout.addWidget(CreateSeparator( "Katana Options" ),16,0,1,3) frameRangeLabel = QLabel( "Frame Range" ) frameRangeLabel.setToolTip("The list of frames to render.") scrollLayout.addWidget( frameRangeLabel, 17, 0 ) gui.frameRangeWidget = QLineEdit( "" ) # Populate based on frame range scrollLayout.addWidget( gui.frameRangeWidget, 17, 1, 1, 1 ) frameRange = FarmAPI.GetSceneFrameRange() gui.frameRangeWidget.setText( str(frameRange['start']) + "-" + str(frameRange['end']) ) gui.submitSceneBox = QCheckBox( "Submit Katana Scene File" ) gui.submitSceneBox.setToolTip( "If this option is enabled, the scene file will be submitted with the job, and then copied locally to the Slave machine during rendering." ) scrollLayout.addWidget(gui.submitSceneBox, 17, 2 ) framesPerTaskLabel = QLabel( "Frames Per Task" ) framesPerTaskLabel.setToolTip( "This is the number of frames that will be rendered at a time for each job task." ) scrollLayout.addWidget( framesPerTaskLabel, 18, 0 ) gui.framesPerTaskWidget = QSpinBox( ) gui.framesPerTaskWidget.setMinimum(1) scrollLayout.addWidget( gui.framesPerTaskWidget, 18, 1, 1, 1 ) gui.useWorkingDirectory = QCheckBox( "Use Working Directory" ) gui.useWorkingDirectory.setToolTip( "If enabled, the current working directory will be used during rendering. This is required if your Katana project file contains relative paths." ) gui.useWorkingDirectory.setChecked(True) scrollLayout.addWidget( gui.useWorkingDirectory, 18, 2 ) renderNodeSelectLabel = QLabel( "Render Node Submission" ) renderNodeSelectLabel.setToolTip( "Choose to render the whole scene, render all nodes as separate jobs, or render separate nodes" ) scrollLayout.addWidget( renderNodeSelectLabel, 19, 0 ) gui.renderSelectBox = QComboBox() gui.renderSelectBox.addItems( ["Submit All Render Nodes As Separate Jobs", "Select Render Node"] ) scrollLayout.addWidget( gui.renderSelectBox, 19, 1 ) gui.includeImageWrite = QCheckBox( "Include ImageWrite Nodes" ) gui.includeImageWrite.setToolTip( "If enabled, ImageWrite nodes will be included for submission." ) scrollLayout.addWidget( gui.includeImageWrite, 19, 2 ) renderNodeLabel = QLabel( "Render Node" ) renderNodeLabel.setToolTip( "Set the render node to render with, or leave blank to use the node already set." ) scrollLayout.addWidget( renderNodeLabel, 20, 0 ) gui.frameDependent = QCheckBox( "Submit Jobs As Frame Dependent" ) gui.frameDependent.setToolTip( "If enabled, the Katana Job(s) will have Frame Dependencies. If your scene contains static content, do not use!" ) scrollLayout.addWidget( gui.frameDependent, 20, 2 ) gui.renderNodeBox = QComboBox() gui.renderSelectBox.currentIndexChanged.connect( lambda: RenderSelectionChanged( gui.renderSelectBox, gui.renderNodeBox ) ) scrollLayout.addWidget( gui.renderNodeBox, 20, 1) gui.renderNodeBox.setDisabled(True) # Submit button buttonLayoutSpacer = QSpacerItem( 0, 0, QSizePolicy.MinimumExpanding, QSizePolicy.Minimum ) buttonLayout.addItem( buttonLayoutSpacer ) gui.pipelineToolStatusLabel = QLabel( "No Pipeline Tools Set" ) gui.pipelineToolStatusLabel.setAlignment( QtCore.Qt.AlignCenter ) buttonLayout.addWidget( gui.pipelineToolStatusLabel ) pipelineToolsButton = QPushButton( "Pipeline Tools" ) pipelineToolsButton.pressed.connect( lambda: PipelineToolsClicked( gui ) ) buttonLayout.addWidget( pipelineToolsButton ) submitButton = QPushButton( "Submit" ) submitButton.pressed.connect( lambda: SubmitPressed(gui) ) buttonLayout.addWidget( submitButton ) scrollLayout.addLayout( buttonLayout,21,0,1,3 ) verticalStretchLayout = QVBoxLayout() verticalStretchLayout.addStretch() scrollLayout.addLayout( verticalStretchLayout, 22, 0 ) scrollArea = QScrollArea() scrollArea.setWidget(scrollWidget) scrollArea.setWidgetResizable(True) scrollArea.setFrameStyle(QFrame.NoFrame + QFrame.Plain) vLayout = QVBoxLayout() vLayout.setObjectName('vLayout') vLayout.addWidget(scrollArea) gui.setLayout(vLayout) LoadStickySettings( gui ) try: pipelineToolStatusMessage = RetrievePipelineToolStatus( raiseOnExitCode=True ) except subprocess.CalledProcessError as e: pipelineToolStatusMessage = HandlePipelineToolsCalledProcessError( e ) UpdatePipelineToolStatusLabel( gui, pipelineToolStatusMessage ) # Populate the render node drop down based on the effective check state # of the "Include ImageWrite Nodes" checkbox after sticky settings are applied PopulateRenderNodeDropDown(gui.includeImageWrite.isChecked(), gui.renderNodeBox) # We delay wiring up this signal handler until after the sticky settings are applied to avoid # rebuilding the drop-down list multiple times unnecessarily gui.includeImageWrite.stateChanged.connect(lambda checked: PopulateRenderNodeDropDown(checked, gui.renderNodeBox)) # Check if this tab is part of a pane in the main window, or if it is contained in a floating pane if gui.window() != UI4.App.MainWindow.CurrentMainWindow(): # Resize the floating pane's window to accommodate the tab's widgets requiredSize = scrollWidget.sizeHint() gui.window().resize(max(requiredSize.width() + 20, 200), min(requiredSize.height() + 40, 1000))
[ "def _create_tabbed_pages(self):\n\n notebook = self.top.get_object('notebook')\n notebook_ref = self.top.get_object('notebook_ref')\n\n self._add_tab(notebook, self.primtab)\n self._add_tab(notebook_ref, self.reftab)\n self.track_ref_for_deletion(\"primtab\")\n self.track_ref_for_deletion(\"reftab\")\n\n self.srcref_list = CitationEmbedList(self.dbstate,\n self.uistate,\n self.track,\n self.source.get_citation_list())\n self._add_tab(notebook, self.srcref_list)\n self.track_ref_for_deletion(\"srcref_list\")\n\n self.attr_list = AttrEmbedList(self.dbstate,\n self.uistate,\n self.track,\n self.source.get_attribute_list())\n self._add_tab(notebook, self.attr_list)\n self.track_ref_for_deletion(\"attr_list\")\n\n self.note_tab = NoteTab(self.dbstate,\n self.uistate,\n self.track,\n self.source.get_note_list(),\n notetype=NoteType.EVENT)\n self._add_tab(notebook, self.note_tab)\n self.track_ref_for_deletion(\"note_tab\")\n \n self.note_ref_tab = NoteTab(self.dbstate,\n self.uistate,\n self.track,\n self.source_ref.get_note_list(),\n notetype=NoteType.EVENTREF)\n self._add_tab(notebook_ref, self.note_ref_tab)\n self.track_ref_for_deletion(\"note_ref_tab\")\n \n self.gallery_tab = GalleryTab(self.dbstate,\n self.uistate,\n self.track,\n self.source.get_media_list())\n self._add_tab(notebook, self.gallery_tab)\n self.track_ref_for_deletion(\"gallery_tab\")\n\n self.backref_tab = EventBackRefList(self.dbstate,\n self.uistate,\n self.track,\n self.db.find_backlink_handles(self.source.handle),\n self.enable_warnbox)\n self._add_tab(notebook, self.backref_tab)\n self.track_ref_for_deletion(\"backref_tab\")\n\n self.attr_ref_list = AttrEmbedList(self.dbstate,\n self.uistate,\n self.track,\n self.source_ref.get_attribute_list())\n self._add_tab(notebook_ref, self.attr_ref_list)\n self.track_ref_for_deletion(\"attr_ref_list\")\n\n self._setup_notebook_tabs( notebook)\n self._setup_notebook_tabs( notebook_ref)", "def __init__(self):\r\n super().__init__()\r\n self._setupTab1()", "def _init_tabs(self):\n\n # Add tab_widget and widgets for the different analysis steps\n self.tab_order = ('Files', 'Setup', 'Noisy Pixel', 'Clustering', 'Pre-alignment', 'Track finding',\n 'Alignment', 'Track fitting', 'Residuals', 'Efficiency')\n\n # Add QTabWidget for tab_widget\n self.tabs = QtWidgets.QTabWidget()\n\n # Initialize each tab\n for name in self.tab_order:\n if name == 'Files':\n widget = FilesTab(parent=self.tabs)\n else:\n # Add dummy widget\n widget = QtWidgets.QWidget(parent=self.tabs)\n\n self.tw[name] = widget\n self.tabs.addTab(self.tw[name], name)\n\n # Disable all tabs but FilesTab. Enable tabs later via self.enable_tabs()\n if not _DEBUG:\n self.handle_tabs(enable=False)\n else:\n self.handle_tabs(enable=True)\n\n # Add to main layout\n self.main_splitter.addWidget(self.tabs)", "def initTabs(self):\n # Create a tab widget\n tabWidget = QtWidgets.QTabWidget(self)\n\n # Create the configuration tab\n self.confTab = ConfigurationTab(self)\n # Create the input files tab\n self.inputTab = InputFilesTab(self)\n # Create the transcode tab\n self.transcodeTab = TranscodeTab(self)\n\n tabWidget.addTab(self.confTab, \"&Output Configuration\")\n tabWidget.addTab(self.inputTab, \"&Input files\")\n tabWidget.addTab(self.transcodeTab, \"&Transcoding\")\n\n # Connect signals\n self.transcodeTab.launchTranscoding.connect(self.launchTranscoding)\n\n return tabWidget", "def controls_setup(self):\n\n self.inbox = element.NavigationTab(self, css_selector='.messaging a.nav-inbox', alias='Inbox Tab')\n self.sent = element.NavigationTab(self, css_selector='.messaging a.nav-sent', alias='Sent Tab')\n self.write = element.NavigationTab(self, css_selector='.messaging a.nav-write', alias='Write Tab')\n self.archives = element.NavigationTab(self, css_selector='.messaging a.nav-archive', alias='Archives Tab')\n self.trash = element.NavigationTab(self, css_selector='.messaging a.nav-trash', alias='Trash Tab')", "def init_tab(self):\n pass", "def createTabs(self):\r\n self.tab1 = QWidget()\r\n self.tab2 = QWidget()\r\n self.tab3 = QWidget()\r\n self.tab4 = QWidget()\r\n self.tab5 = QWidget()\r\n self.tab6 = QWidget()\r\n self.tab7 = QWidget()\r\n self.tab8 = QWidget()\r\n self.addTab(self.tab1, \"Registro\")\r\n self.addTab(self.tab2, \"Base de Datos\")\r\n self.addTab(self.tab3, \"Ingresos\")\r\n self.addTab(self.tab4, \"Compras\")\r\n self.addTab(self.tab5, \"Gastos\")\r\n self.addTab(self.tab6, \"Res. Diarios\")\r\n self.addTab(self.tab7, \"Res. Mensuales\")\r\n self.addTab(self.tab8, \"Res. Anuales\")", "def _create_data_tabs(self):\n self.tab_ctrl.tab.children = []\n for name, (ctrl_cls, args) in self._get_tab_definitions().items():\n ctrl = ctrl_cls(*args)\n # add to tabs\n self.tab_ctrl.add_tab(name, control=ctrl)\n # Set these controls as named attributes on the object\n setattr(self, name.replace(\" \", \"_\"), ctrl)", "def __init__(self, execution):\n self.execution = execution\n\n self.form = {}\n if len(PARAMS) == 0:\n input_boxes = [] #[urwid.Text('Changing the default parameters not allowed')]\n else:\n input_boxes = [urwid.Text('Change the default parameters for the jobs:')]\n for k, v in PARAMS.items():\n edit_box = urwid.Edit(('edittxt', v + ': '), str(self.execution.job_params.get(k, PARAM_DEFAULT_VALUE)))\n input_boxes.append(urwid.AttrMap(edit_box, 'editbx', 'editfc'))\n self.form[k] = edit_box\n\n input_boxes.append(create_button('Change', self.resubmit))\n\n self.widget = urwid.Padding(urwid.Pile(input_boxes), align='center')\n\n BaseTimedWidgetWrap.__init__(self, self.widget)", "def CreateTabs(self):\r\n self.tabs = QtGui.QTabWidget()\r\n self.tabs.addTab(self.fileTab, \"Rinex file\")\r\n self.tabs.addTab(self.navPlotTab, \"Orbit parameter plot\")\r\n self.tabs.addTab(self.obsPlotTab, \"Multipath plot\")", "def _create_tab(tabtype, name, *args, **kwargs):\n\ttab = tabtype(name, *args, **kwargs)\n\n\ttab.window = builder.get_new_output_window()\n\ttab.window.show_all()\n\n\tmgmt.set_font(tab.window.textview, mgmt.get_font())\n\n\tconnect_tab_callbacks(tab, (\"new_message\",\"new_name\",\n\t\t\"server_connected\",\"new_markup\"))\n\n\ttab.input_history = InputHistory(\n\t\ttext_callback = widgets.get_object(\"input_entry\").get_text)\n\n\treturn tab", "def _tabs(self):\n selenium_utils.scroll_into_view(self._driver, self.container_element)\n return {\n self._elements.RELATED_ASMTS_TAB: AssessmentRelatedAsmtsTable,\n self._elements.RELATED_ISSUES_TAB: AssessmentRelatedIssuesTable,\n self._elements.CHANGE_LOG_TAB: self._log_tab_validate}", "def __init__(self, parent=None):\n super().__init__(parent);\n tabBar=EditableTabBar(parent);\n self.setTabBar(tabBar);", "def create_tab(self, type):\n tab = Tabs(self.builder.get_object('window1'), type)\n label_widget = tab.get_label_widget()\n\n # connect label_widget's close button to close_tab()\n label_widget.get_children()[-1].connect('clicked', self.close_tab)\n label_widget.show_all()\n\n # set save, run, terminal button active if not\n save_button = self.builder.get_object('save')\n run_button = self.builder.get_object('run')\n terminal_button = self.builder.get_object('terminal')\n\n for button in [save_button, run_button, terminal_button]:\n button.set_sensitive(True)\n\n return tab, label_widget", "def create_live_tab(self):\n\n self.liveLayout = QGridLayout()\n self.textButton = QPushButton(\"Start live observation\")\n self.textButton.setMinimumHeight(60)\n self.textButton.clicked.connect(self.start_live_observation)\n self.liveLayout.addWidget(self.textButton)\n self.lbTimeLive = QLabel()\n self.lbTimeLive.setAlignment(Qt.AlignCenter)\n\n font = QFont(\"Monospace\")\n font.setPointSize(48)\n self.lbTimeLive.setFont(font)\n if self.timeFormat == HHMMSS:\n self.lbTimeLive.setText(\"00:00:00.000\")\n if self.timeFormat == S:\n self.lbTimeLive.setText(\"0.000\")\n\n self.liveLayout.addWidget(self.lbTimeLive)\n self.liveTab = QWidget()\n self.liveTab.setLayout(self.liveLayout)\n self.toolBox.insertItem(2, self.liveTab, \"Live\")", "def setupTabs(self, elmerDefs, Section, ID):\n self.ID = ID\n self.qhash.clear()\n\n layout = self.layout()\n if(layout is not None):\n item = layout.takeAt(0)\n while(item != 0):\n item = None\n if(self.tabWidget is not None):\n self.tabWidget.clear()\n self.tabWidget = None\n item = layout.takeAt(0)\n self.layout = None\n\n # get root element\n self._root = elmerDefs.documentElement()\n\n self.tabWidget = QtGui.QTabWidget()\n self.tabWidget.setUsesScrollButtons(True)\n self.tabWidget.setElideMode(QtCore.Qt.ElideNone)\n\n self._all_stuff = self._root.firstChildElement(\"ALL\")\n self._element = self._root.firstChildElement(\"PDE\")\n\n self.tabs = 0\n\n while(self._element.isNull() is False):\n self._name = self._element.firstChildElement(\"Name\")\n grid = QtGui.QGridLayout()\n params = 0\n for x in range(0, 2):\n if(x == 0):\n if(str(self._name.text()).strip() == \"General\"):\n continue\n self._section = self._all_stuff.firstChildElement(Section)\n else:\n self._section = self._element.firstChildElement(Section)\n\n self._param = self._section.firstChildElement(\"Parameter\")\n\n while(self._param.isNull() is False):\n h = hash_entry_t()\n # label\n widget_type = self._param.attribute(\"Widget\", \"Edit\")\n widget_enabled = self._param.attribute(\"Enabled\", \"True\")\n widget_visible = self._param.attribute(\"Visible\", \"True\")\n paramType = str(self._param.firstChildElement(\"Type\").text()).strip()\n labelName = str(self._param.firstChildElement(\"Name\").text()).strip()\n sifName = str(self._param.firstChildElement(\"SifName\").text()).strip()\n if(sifName == \"\"):\n sifName = labelName\n paramDefault = str(self._param.firstChildElement(\"DefaultValue\").text()).strip()\n whatis = str(self._param.firstChildElement(\"Whatis\").text()).strip()\n statusTip = str(self._param.firstChildElement(\"StatusTip\").text()).strip()\n fullName = \"/\" + str(self._name.text()).strip() + \"/\"\n fullName = fullName + Section + \"/\" + labelName + \"/\" + str(ID)\n h.widget = None\n if(widget_type == \"Edit\"):\n edit = DynLineEdit()\n h.widget = edit.lineEdit\n edit.lineEdit.setText(paramDefault)\n edit.name = fullName\n edit.lineEdit.returnPressed.connect(edit.editSlot)\n edit.lineEdit.textChanged.connect(self._textChangedSlot)\n\n elif(widget_type == \"TextEdit\"):\n textEdit = QtGui.QTextEdit()\n currentFont = textEdit.currentFont()\n fontMetrics = QFontMetrics(currentFont)\n fontHeight = fontMetrics.height()\n textEdit.setMinimumHeight(5*fontHeight)\n textEdit.setMaximumHeight(8*fontHeight)\n h.widget = textEdit\n\n elif(widget_type == \"Combo\"):\n combo = QtGui.QComboBox()\n h.widget = combo\n count = 0\n active = 0\n item = self._param.firstChildElement(\"Item\")\n while (item.isNull() is False):\n itemType = item.attribute(\"Type\", \"\")\n if(itemType == \"Active\"):\n active = count\n itemName = item.firstChildElement(\"Name\")\n count += 1\n combo.insertItem(count,str(itemName.text()).strip())\n item = item.nextSiblingElement(\"Item\")\n combo.setCurrentIndex(active)\n combo.currentIndexChanged.connect(self._comboSlot)\n\n elif(widget_type == \"CheckBox\"):\n l = QtGui.QCheckBox()\n h.widget = l\n l.setText(\"\")\n l.setChecked(False)\n if(paramDefault == \"True\"):\n l.setChecked(True)\n l.stateChanged.connect(self._lSlot)\n\n elif(widget_type == \"Label\"):\n label = QtGui.QLabel()\n font = QFont()\n font.setBold(True)\n font.setUnderline(True)\n label.setFont(font)\n label.setText(labelName)\n h.widget = label\n\n if(h.widget):\n h.widget.setWhatsThis(whatis)\n h.widget.setStatusTip(statusTip)\n h.widget.setProperty(\"dom address\", fullName)\n h.elem = self._param\n if(widget_enabled == \"False\"):\n h.widget.setEnabled(False)\n if(widget_type != \"TextEdit\"):\n h.widget.setFixedHeight(18)\n if(widget_type == \"TextEdit\"):\n textEditLabel = QtGui.QLabel()\n textEditLabel.setText(labelName)\n h.label = textEditLabel\n grid.addWidget(h.widget, params, 0, 1, 2)\n\n if(widget_visible == \"False\"):\n h.label.hide()\n h.widget.hide()\n\n elif(widget_type != \"Label\"):\n label = QtGui.QLabel()\n label.setText(labelName)\n h.label = label\n grid.addWidget(h.label, params, 0)\n grid.addWidget(h.widget, params, 1)\n\n if(widget_visible == \"False\"):\n h.label.hide()\n h.widget.hide()\n else:\n h.label = None\n grid.addWidget(h.widget, params, 0)\n self.qhash.update({fullName: h})\n\n self._param = self._param.nextSiblingElement(\"Parameter\")\n params += 1\n\n dummyWidget = QtGui.QWidget()\n grid.addWidget(dummyWidget, params, 0)\n grid.setRowStretch(params, 1)\n\n frmWidget = QtGui.QWidget()\n frmWidget.setLayout(grid)\n\n src = QtGui.QScrollArea()\n src.setWidget(frmWidget)\n src.setMinimumHeight(300)\n src.setWidgetResizable(True)\n\n if(params > 0):\n self.tabWidget.addTab(src, str(self._name.text()).strip())\n\n self.tabs += 1\n self._element = self._element.nextSiblingElement(\"PDE\")\n\n # Buttons:\n lbl = QtGui.QLabel()\n lbl.setText(\"Name:\")\n self.nameEdit = QtGui.QLineEdit()\n self.nameEdit.setText(Section + \" \" + str(ID+1))\n\n self.applyButton = QtGui.QPushButton(\"&Apply\")\n # applyButton.setIcon(addIcon)\n self.applyButton.clicked.connect(self._applyButtonClicked)\n\n self.discardButton = QtGui.QPushButton(\"&Remove\")\n # discardButton.setIcon(removeIcon)\n self.discardButton.clicked.connect(self._discardButtonClicked)\n\n self.okButton = QtGui.QPushButton(\"&OK\")\n # okButton.setIcon(okIcon)\n self.okButton.clicked.connect(self._okButtonClicked)\n\n self.newButton = QtGui.QPushButton(\"&New\")\n # self.newButton.setIcon(newIcon)\n self.newButton.clicked.connect(self._newButtonClicked)\n\n nameLayout = QtGui.QHBoxLayout()\n nameLayout.addWidget(lbl)\n nameLayout.addWidget(self.nameEdit)\n\n buttonLayout = QtGui.QHBoxLayout()\n buttonLayout.addWidget(self.newButton)\n buttonLayout.addWidget(self.applyButton)\n buttonLayout.addWidget(self.okButton)\n buttonLayout.addWidget(self.discardButton)\n\n spareButtonLayout = QtGui.QHBoxLayout()\n self.spareButton = QtGui.QPushButton(\"SpareButton\")\n self.spareButton.setVisible(False)\n spareButtonLayout.addWidget(self.spareButton)\n self.spareButton.clicked.connect(self._spareButtonClicked)\n\n self.spareScroll = QtGui.QScrollArea()\n self.spareScroll.hide()\n\n mainLayout = QtGui.QVBoxLayout()\n mainLayout.addWidget(self.tabWidget)\n mainLayout.addWidget(self.spareScroll)\n mainLayout.addLayout(spareButtonLayout)\n mainLayout.addLayout(nameLayout)\n mainLayout.addLayout(buttonLayout)\n self.setLayout(mainLayout)\n\n self.setWindowTitle(Section)", "def show_create(self):\n\t\t# Get a rectangle with amargin.\n\t\trect = self.renderer._get_rect()\n\t\trect = (rect[0] + 16, rect[1] + 16, rect[2] - 16, rect[3] - 16)\n\n\t\tself.f_tab = ow.Table(4, 2)\n\t\tself.f_tab.topleft = (rect[0], rect[1])\n\n\t\t# Name of the game textbox.\n\t\tself.e_gamename = ow.Entry(\"Ship Wreckyard\")\n\t\tself.l_gamename = ow.Label(\"Name of the game: \")\n\t\tself.f_tab.add_child(0, 0, self.l_gamename)\n\t\tself.f_tab.add_child(0, 1, self.e_gamename)\n\n\t\t# Number of players.\n\t\tself.e_players = ow.Entry(\"2\")\n\t\tself.l_players = ow.Label(\"Number of players: \")\n\t\tself.f_tab.add_child(1, 0, self.l_players)\n\t\tself.f_tab.add_child(1, 1, self.e_players)\n\n\t\t# Board size.\n\t\tself.l_boardw = ow.Label(\"Board width: \")\n\t\tself.e_boardw = ow.Entry(\"10\")\n\t\tself.l_boardh = ow.Label(\"Board height: \")\n\t\tself.e_boardh = ow.Entry(\"10\")\n\t\tself.f_tab.add_child(2, 0, self.l_boardw)\n\t\tself.f_tab.add_child(2, 1, self.e_boardw)\n\t\tself.f_tab.add_child(3, 0, self.l_boardh)\n\t\tself.f_tab.add_child(3, 1, self.e_boardh)\n\n\t\t# Create Game button.\n\t\tself.b_cancel = ow.Button(\"Cancel\")\n\t\tself.b_cancel.topleft = (rect[2] - self.b_cancel.width - 100, rect[3] - self.b_cancel.height)\n\t\tself.b_cancel.connect_signal(oc.SIG_CLICKED, self.do_lobby)\n\n\t\t# Cancel button.\n\t\tself.b_create = ow.Button(\"Start Game\")\n\t\tself.b_create.topleft = (rect[2] - self.b_create.width, rect[3] - self.b_create.height)\n\t\tself.b_create.connect_signal(oc.SIG_CLICKED, self.do_start_hosted)\n\n\t\t# Add all the widgets.\n\t\tself.renderer.add_widget(self.f_tab)\n\t\tself.renderer.add_widget(self.b_create)\n\t\tself.renderer.add_widget(self.b_cancel)", "def __init__(self, **kwargs):\n TabBar.__init__(self, **kwargs)", "def create_tab(self, tab, text):\n widget = tab.get_widget()\n self.tab_bar.add(widget, text=text)\n self.tabs[tab] = '.' + widget.winfo_name()\n\n # Select the newly created tab\n last_index = self.tab_bar.index(tk.END)\n self.tab_bar.select(last_index - 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Augments a staged job info submission file with the appropriate properties for the Pipeline Tool settings.
def ConcatenatePipelineSettingsToJob( jobInfoPath, batchName ): global submissionInfo jobWriterPath = os.path.join( submissionInfo["RepoDirs"]["submission/Integration/Main"], "JobWriter.py" ) scenePath = NodegraphAPI.GetSourceFile() argArray = ["-ExecuteScript", jobWriterPath, "Katana", "--write", "--scene-path", scenePath, "--job-path", jobInfoPath, "--batch-name", batchName] CallDeadlineCommand( argArray, False )
[ "def generate_job_info_file(**kwargs):\n\tif kwargs['renderLayer']:\n\t\tjob_info_file_suffix = \"_%s_deadlineJobInfo.txt\" % kwargs['renderLayer']\n\telse:\n\t\tjob_info_file_suffix = \"_deadlineJobInfo.txt\"\n\tjob_info_file = temp_file(kwargs['scene'], suffix=job_info_file_suffix)\n\n\twith open(job_info_file, 'w') as fh:\n\t\tfh.write(\"Plugin=%s\\n\" % kwargs['plugin'])\n\n\t\tif kwargs['renderLayer']:\n\t\t\tfh.write(\"Name=%s - %s\\n\" % (kwargs['jobName'], kwargs['renderLayer']))\n\t\t\tfh.write(\"BatchName=%s\\n\" % kwargs['jobName'])\n\t\telse:\n\t\t\tfh.write(\"Name=%s\\n\" % kwargs['jobName'])\n\n\t\tfh.write(\"Comment=%s\\n\" % kwargs['comment'])\n\t\tfh.write(\"Frames=%s\\n\" % kwargs['frames'])\n\t\tfh.write(\"ChunkSize=%s\\n\" % kwargs['taskSize'])\n\t\tfh.write(\"Pool=%s\\n\" % kwargs['pool'])\n\t\tfh.write(\"SecondaryPool=%s\\n\" % kwargs['secondaryPool'])\n\t\tfh.write(\"Group=%s\\n\" % kwargs['group'])\n\t\tfh.write(\"Priority=%s\\n\" % kwargs['priority'])\n\t\tfh.write(\"UserName=%s\\n\" % kwargs['username'])\n\t\tif kwargs['priority'] == 0:\n\t\t\tfh.write(\"InitialStatus=Suspended\\n\")\n\n\t\ttry:\n\t\t\tif kwargs['renderLayer']: # Single layer output\n\t\t\t\toutput_path = kwargs['output'][kwargs['renderLayer']]\n\t\t\t\tfh.write(\"OutputDirectory0=%s\\n\" % output_path[0])\n\t\t\t\tfh.write(\"OutputFilename0=%s\\n\" % output_path[1])\n\t\t\telse: # Multiple layer outputs\n\t\t\t\tfor i, layer in enumerate(kwargs['output']):\n\t\t\t\t\toutput_path = kwargs['output'][layer]\n\t\t\t\t\tfh.write(\"OutputDirectory%d=%s\\n\" % (i, output_path[0]))\n\t\t\t\t\tfh.write(\"OutputFilename%d=%s\\n\" % (i, output_path[1]))\n\t\texcept:\n\t\t\tverbose.warning(\"Could not determine render output path(s).\")\n\n\t\tfor i, key in enumerate(kwargs['envVars']):\n\t\t\ttry:\n\t\t\t\tfh.write(\"EnvironmentKeyValue%d=%s=%s\\n\" % (i, key, os.environ[key]))\n\t\t\texcept KeyError:\n\t\t\t\tverbose.warning(\"Environment variable '%s' not set.\" % key)\n\n\t\ttry:\n\t\t\tfh.write(\"ExtraInfo0=%s\\n\" % os.environ['RQ_JOB'])\n\t\t\tfh.write(\"ExtraInfo1=%s\\n\" % os.environ['RQ_SHOT'])\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\tfh.write(\"ExtraInfo2=%s\\n\" % kwargs['submitter'])\n\n\treturn job_info_file", "def _edit_job_file(self, edits):\n for key in edits:\n self.json_dict[key] = edits[key]", "def submission_upload_info(vmcfg, courseId, assignment, account, isTeamAccount, isGraded):\n\n vmpaths = paths.VmcheckerPaths(vmcfg.root_path())\n sbroot = vmpaths.dir_cur_submission_root(assignment, account)\n grade_file = paths.submission_results_grade(sbroot)\n sbcfg = paths.submission_config_file(sbroot)\n if not os.path.exists(sbcfg):\n return _(\"No submission exists for this assignment\")\n\n late_penalty = update_db.compute_late_penalty(assignment, account, vmcfg)\n ta_penalty = update_db.compute_TA_penalty(grade_file)\n deadline_str = vmcfg.assignments().get(assignment, 'Deadline')\n total_points = int(vmcfg.assignments().get(assignment, 'TotalPoints'))\n deadline_struct = time.strptime(vmcfg.assignments().get(assignment, 'Deadline'),\n penalty.DATE_FORMAT)\n sss = submissions.Submissions(vmpaths)\n upload_time_str = sss.get_upload_time_str(assignment, account)\n upload_time_struct = sss.get_upload_time_struct(assignment, account)\n\n deadline_explanation = penalty.verbose_time_difference(upload_time_struct, deadline_struct)\n\n submitter_explanation = None\n if isTeamAccount:\n submitting_user = sss.get_submitting_user(assignment, account)\n if submitting_user is not None:\n submitter_explanation = _(\"Submitted by\") + \": \" + submitting_user\n\n max_line_width = 0\n rows_to_print = []\n\n if submitter_explanation is not None:\n rows_to_print += [\n [ submitter_explanation ],\n [ '' ]\n ]\n\n rows_to_print += [\n [ _(\"Submission date\"), upload_time_str ],\n [ _(\"Assignment deadline\"), deadline_str ],\n [ deadline_explanation ]\n ]\n\n if isGraded or not vmcfg.assignments().is_deadline_hard(assignment):\n rows_to_print += [\n [ '' ]\n ]\n\n if not vmcfg.assignments().is_deadline_hard(assignment):\n rows_to_print += [\n [ _(\"Penalty (late submission)\"), str(late_penalty) ],\n ]\n\n if isGraded:\n rows_to_print += [\n [ _(\"Penalty (grading)\"), str(ta_penalty) ],\n [ _(\"Penalty (total)\"), str(ta_penalty + late_penalty) ],\n [ '' ],\n [ _(\"Grade\"), str(total_points + ta_penalty + late_penalty) ]\n ]\n\n for row in rows_to_print:\n row[0] = row[0].decode(\"utf-8\")\n if len(row) == 2 and len(row[0]) > max_line_width:\n max_line_width = len(row[0])\n\n if isGraded:\n # Put a dashed line just above the 'Grade' line\n rows_to_print[len(rows_to_print) - 2][0] = '-' * max_line_width\n\n ret = u\"\"\n for row in rows_to_print:\n if len(row) == 1:\n ret += row[0] + \"\\n\"\n elif len(row) == 2:\n ret += unicode(\"{0[0]:<\" + str(max_line_width) + \"} : {0[1]}\\n\").format(row)\n\n ret += \"\\n\"\n\n return ret", "def update_metadata(self):\n self.open_metadata()\n submitted = {\n 'file_name' : self.file_name,\n 'study_id' : self.study_id,\n 'pub_id' : self.pub_id,\n 'species' : self.species,\n 'cell_type' : self.cell_type,\n 'protocol' : self.protocol,\n 'entry_count' : self.entry_count,\n 'processed_date' : self.processed_date\n }\n self.metadata.append(submitted, ignore_index=True)", "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def submit_pipeline_jobs(pipeline_file,args):\r\n\r\n\ttempDict = {}\t\r\n\ttempDict['project_name'] = str(args.jid).split(\".py\")[0][:10] ## project name too long. Job not submitted.\r\n\targsDict = vars(args)\r\n\t# print argsDict\r\n\tmyList,myJobs = parse_LSF_job_specification(pipeline_file)\r\n\tmyJobID_list = [] ## for backtrack if any job failed to submit\r\n\tfor x in myList:\r\n\t\t### x = job_name\r\n\t\t\r\n\t\tcurrent_command = \"\\n\".join(myJobs[x][2])\r\n\t\tcurrent_command = multireplace(current_command,argsDict)\r\n\t\tnumber_cores = myJobs[x][3]\r\n\t\tif number_cores == \"\":\r\n\t\t\tnumber_cores = 1\r\n\t\tmemory_request = myJobs[x][4]\r\n\t\tbqueue = myJobs[x][6]\r\n\t\tif memory_request == \"\":\r\n\t\t\tmemory_request = 4000\r\n\t\tif bqueue == \"\":\r\n\t\t\tbqueue = 'standard'\r\n\t\tmyDict = dp(tempDict)\r\n\t\tmyDict['job_name'] = x\r\n\t\tmyDict['jid'] = args.jid\r\n\t\t\r\n\t\tmyDict['number_cores'] = number_cores\r\n\t\tmyDict['bqueue'] = bqueue\r\n\t\t# myDict['memory_request'] = memory_request\r\n\t\t# print memory_request\r\n\t\tmyDict['memory_request'] = multireplace(memory_request,argsDict)\r\n\t\tmyDict['bqueue'] = multireplace(bqueue,argsDict)\r\n\t\ttry:\r\n\t\t\tmyDict['sample_list'] = argsDict[myJobs[x][5]]\r\n\t\t\tdos2unix(myDict['sample_list'])\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\ttry:\r\n\t\t\tmyDict['sample_list'] = argsDict[myJobs[x][5]]\r\n\t\t\tmyDict['number_lines'] = get_number_lines(myDict['sample_list'])\r\n\t\texcept:\r\n\t\t\tmyDict['sample_list'] = p_dir+\"../share/misc/sample.tsv\"\r\n\t\t\tmyDict['number_lines'] = 1\r\n\t\tif myJobs[x][1] == \"\":\r\n\t\t\tmyDict['dependencies'] = \"\"\r\n\t\telse:\r\n\t\t\t## two types:\r\n\t\t\tdep_job_name = myJobs[x][1]\r\n\t\t\t# print (\"###DEBUG##\",myJobs)\r\n\t\t\t# print (\"###DEBUG##\",myJobs[x])\r\n\t\t\tif dep_job_name==\"all\":\r\n\t\t\t\tdep_all_list = [\"ended(%s)\"%(var) for var in myJobID_list]\r\n\t\t\t\tmyDict['dependencies'] = '#BSUB -w \"%s\"'%(\" && \".join(dep_all_list))\r\n\t\t\telif \"[*]\" in dep_job_name:\r\n\t\t\t\tmyDict['dependencies'] = lsf_job_dep_template%(myJobs[dep_job_name.replace(\"[*]\",\"\")][-1]+\"[*]\")\r\n\t\t\telse:\r\n\t\t\t\tmyDict['dependencies'] = lsf_job_dep_template%(myJobs[dep_job_name][-1])\r\n\t\tmyDict['commands'] = current_command\r\n\t\ttry:\r\n\t\t\tjobID = submit_job(myDict)\r\n\t\t\t# os.system(\"mv %s.lsf %s\"%(x,args.jid))\r\n\t\t\tif not \"skip\" in jobID:\r\n\t\t\t\tmyJobID_list.append(jobID)\r\n\t\texcept:\r\n\t\t\tlogging.error(\"%s is failed to submit. The failing is likely to be caused by incorrect input format.\"%(x))\r\n\t\t\tfor k in myDict:\r\n\t\t\t\tprint (k,myDict[k])\r\n\t\t\tfor k in myJobs:\r\n\t\t\t\tprint (k,myJobs[k])\r\n\t\t\t# print myDict['sample_list'],(get_number_lines(myDict['sample_list']))\r\n\t\t\tprint (\"-------------------see info above-----------------\")\r\n\t\t\tfor j in myJobID_list:\r\n\t\t\t\tos.system(\"bkill %s\"%(j))\r\n\t\t\tlogging.error(\"Program exiting! Please check input!\")\r\n\t\t\tsys.exit(1)\r\n\t\tmyJobs[x].append(jobID)\r\n\t\tlogging.info(\"%s has been submitted; JobID: %s\"%(x,jobID))\r\n\tsend_user_command(getpass.getuser(),args.jid)", "def modify_job(self, job: Dict) -> Dict:\n\n self.logging_actor.info.remote(self.id, \"modify_job\", time.asctime())\n if \"jobPars\" not in job:\n return job\n cmd = job[\"jobPars\"]\n inputEVNTFile = re.findall(r\"\\-\\-inputEVNTFile=([\\w\\.\\,]*) \\-\", cmd)\n if len(inputEVNTFile) != 1:\n return job\n inFiles = [os.path.join(os.path.expandvars(self.config.harvester['endpoint']), x) for x in inputEVNTFile[0].split(\",\")]\n inFiles = \",\".join(inFiles[0:1])\n self.logging_actor.info.remote(self.id, f\"inFiles: {inFiles}\", time.asctime())\n cmd = re.sub(r\"\\-\\-inputEVNTFile=([\\w\\.\\,]*) \\-\", f\"--inputEVNTFile={inFiles} -\", cmd)\n self.logging_actor.info.remote(self.id, f\"cmd: {cmd}\", time.asctime())\n job[\"jobPars\"] = cmd\n return job", "def _augment_pipeline_cfg(self):", "def extend(args):\n # load workflows\n workflow_orig = read_workflow(args.orig_wf)\n workflow_extend = read_workflow(args.extend_wf)\n\n # extend\n workflow_orig.extend(workflow_extend)\n\n # dump in new file\n filename = args.output if args.output else args.orig_wf\n dump_workflow(workflow_orig, filename)", "def replace_submission(template_job, name, counter, filename):\n template_job=template_job.replace('<jobname>',name+\"_\"+str(counter))\n template_job=template_job.replace('<outfile>',filename)\n job_out=open('job.sh','w')\n job_out.write(template_job)\n job_out.close()", "def add_merge_job(dax, final_name, chunk, level, job_number, final):\n j = Job(name=\"merge.sh\")\n out_file_name = final_name + \"-%d-%d.tar.gz\" %(level, job_number)\n out_file = File(out_file_name)\n if final:\n out_file_name = final_name\n out_file = File(final_name)\n j.uses(out_file, link=Link.OUTPUT, transfer=final)\n j.addArguments(out_file)\n for f in chunk:\n flfn = File(f)\n j.uses(flfn, link=Link.INPUT)\n j.addArguments(flfn)\n j.addProfile(Profile(Namespace.CONDOR, 'request_disk', '100 GB'))\n dax.addJob(j)\n return out_file_name", "def ingest_specific_aw1_manifest(self, filename):\n try:\n self.ingester = GenomicFileIngester(job_id=self.job_id,\n job_run_id=self.job_run.id,\n bucket=self.bucket_name,\n target_file=filename,\n _controller=self)\n\n self.job_result = self.ingester.generate_file_queue_and_do_ingestion()\n except RuntimeError:\n self.job_result = GenomicSubProcessResult.ERROR", "def format_preparation_files(run_dir, sample_sheet, output_dir, pipeline,\n verbose):\n sample_sheet = KLSampleSheet(sample_sheet)\n df_sheet = sample_sheet_to_dataframe(sample_sheet)\n\n if pipeline == 'atropos-and-bowtie2':\n click.echo('Stats collection is not supported for pipeline '\n 'atropos-and-bowtie2')\n else:\n stats = run_counts(run_dir, sample_sheet)\n\n stats['sample_name'] = \\\n df_sheet.set_index('lane', append=True)['sample_name']\n\n # returns a map of (run, project_name, lane) -> preparation frame\n preps = preparations_for_run(run_dir, df_sheet, pipeline=pipeline)\n\n os.makedirs(output_dir, exist_ok=True)\n\n for (run, project, lane), df in preps.items():\n fp = os.path.join(output_dir, f'{run}.{project}.{lane}.tsv')\n\n if pipeline == 'fastp-and-minimap2':\n # stats are indexed by sample name and lane, lane is the first\n # level index. When merging, make sure to select the lane subset\n # that we care about, otherwise we'll end up with repeated rows\n df = df.merge(stats.xs(lane, level=1), how='left',\n on='sample_name')\n\n # strip qiita_id from project names in sample_project column\n df['sample_project'] = df['sample_project'].map(\n lambda x: re.sub(r'_\\d+$', r'', x))\n\n # center_project_name is a legacy column that should mirror\n # the values for sample_project.\n df['center_project_name'] = df['sample_project']\n\n df.to_csv(fp, sep='\\t', index=False)\n\n if verbose:\n project_name = remove_qiita_id(project)\n # assume qiita_id is extractable and is an integer, given that\n # we have already passed error-checking.\n qiita_id = project.replace(project_name + '_', '')\n print(\"%s\\t%s\" % (qiita_id, abspath(fp)))", "def edit_job_file(job_file_name, out_file_name, edits):\n o_job = JsonJobsFile()\n o_job._raw_read(job_file_name)\n o_job._edit_job_file(edits)\n o_job.write_as(out_file_name)", "def upload_input_files_metadata(self):\n if os.environ.get('TEST_POSTJOB_NO_STATUS_UPDATE', False):\n return\n temp_storage_site = self.executed_site\n if self.job_report.get('temp_storage_site', 'unknown') != 'unknown':\n temp_storage_site = self.job_report.get('temp_storage_site')\n if not 'source' in self.job_report.get('steps', {}).get('cmsRun', {}).get('input', {}):\n self.logger.info(\"Skipping input filemetadata upload as no inputs were found\")\n return\n direct_stageout = int(self.job_report.get('direct_stageout', 0))\n for ifile in self.job_report['steps']['cmsRun']['input']['source']:\n if ifile['input_source_class'] != 'PoolSource' or ifile.get('input_type', '') != \"primaryFiles\":\n continue\n ## Many of these parameters are not needed and are using fake/defined values\n if not ifile['lfn']: # there are valid use case with no input LFN but we need to count files for crab report\n lfn = '/store/user/dummy/DummyLFN'\n else:\n lfn = ifile['lfn']\n # beware the case where cmsRun was only given a PFN to run on and still we get it in the LFN field\n try:\n Lexicon.lfn(lfn) # will raise if testLfn is not a valid lfn\n except AssertionError:\n lfn = '/store/user/dummy/DummyLFN'\n\n lfn = lfn + \"_\" + str(self.job_id) ## jobs can analyze the same input\n configreq = {\"taskname\" : self.job_ad['CRAB_ReqName'],\n \"globalTag\" : \"None\",\n \"jobid\" : self.job_id,\n \"outsize\" : \"0\",\n \"publishdataname\" : self.publish_name,\n \"appver\" : self.job_ad['CRAB_JobSW'],\n \"outtype\" : \"POOLIN\", ## file['input_source_class'],\n \"checksummd5\" : \"0\",\n \"checksumcksum\" : \"0\",\n \"checksumadler32\" : \"0\",\n \"outlocation\" : self.job_ad['CRAB_AsyncDest'],\n \"outtmplocation\" : temp_storage_site,\n \"acquisitionera\" : \"null\", ## Not implemented\n \"outlfn\" : lfn,\n \"outtmplfn\" : self.source_dir, ## does not have sense for input files\n \"events\" : ifile.get('events', 0),\n \"outdatasetname\" : G_FAKE_OUTDATASET,\n \"directstageout\" : direct_stageout\n }\n #TODO: there could be a better py3 way to get lists of outfileruns/lumis\n outfileruns = []\n outfilelumis = []\n for run, lumis in ifile['runs'].items():\n outfileruns.append(str(run))\n outfilelumis.append(','.join(map(str, lumis)))\n\n configreq = [item for item in configreq.items()] # make a real list of (k,v) pairs as rest_api requires\n #configreq['outfileruns'] = [run for run in outfileruns]\n #configreq['outfilelumis'] = [lumis for lumis in outfilelumis]\n for run in outfileruns:\n configreq.append((\"outfileruns\", run))\n for lumis in outfilelumis:\n configreq.append((\"outfilelumis\", lumis))\n\n rest_api = 'filemetadata'\n msg = \"Uploading input metadata for %s to https://%s: %s\" % (lfn, self.rest_url+rest_api, configreq)\n self.logger.debug(msg)\n try:\n self.crabserver.put(api=rest_api, data=encodeRequest(configreq))\n except HTTPException as hte:\n msg = \"Error uploading input file metadata: %s\" % (str(hte.headers))\n self.logger.error(msg)\n raise", "def prepare_fasta(self):\n self.sname_dict , self.sheader_dict , self.only_sname_dict , self.only_header_dict = self.map_submission_names()\n alpha_tuple_list = [ (i.split(\":\")[0] , i.split(\":\")[1]) for i in self.sname_dict.keys() ] \n self.alpha_file_list = [x[1] for x in alpha_tuple_list]\n\n self.existing_seq = self.get_fasta_header(self.SUBMITTED_SEQ)\n self.existing_snames = [ name.split(\":\")[1] for name, rname in self.sname_dict.items() if rname in self.existing_seq ]\n self.alpha_file_list = [e for i, e in enumerate(self.alpha_file_list) if e not in self.existing_snames]\n alpha_file_path_list = [ self.SEQ_PATH + \"/\" + r + \"/\" + self.FASTA_PATH + \"/\" + s for (r,s) in alpha_tuple_list if s in self.alpha_file_list ] \n alpha_file_path_list = self.get_file_list(alpha_file_path_list, self.CONSENSUS_FASTA)\n \n\n self.outseq = self.ALPHA_PREFIX + \"-jhu_sequences.fasta\"\n self.all_outseq = self.ALPHA_PREFIX + \"-all_local_jhu_sequences.fasta\"\n filt_fnames = self.concat_fasta_files(alpha_file_path_list,self.outseq)\n \n ori_headers = self.get_fasta_header(self.outseq)\n #new_headers = [ i.rsplit(\"/\",3)[1].split(\".nanopolish\")[0] for i in ori_headers ] \n new_headers = {}\n for header in ori_headers:\n m = re.search(self.header_pat,header)\n new_head = m.group(0)\n if header not in new_headers:\n new_headers[header] = new_head\n \n #new_headers = { i : re.search(self.header_pat,i).group(0) for i in ori_headers } \n #new_headers = [ i.split(\"4-draft-consensus/\")[1] for i in new_headers ] \n self.rename_submission_fasta(self.outseq,new_headers) \n # Rename header if not in self.only_header_dict\n ### This part of the code is to deal with names of format MDHP-18 to be renamed to MDHP-00018\n #for k ,i in new_headers.items():\n # if i not in self.only_header_dict:\n # parts = i.split(\"-\")\n # if len(parts[1].split(\"_\")[0]) < 5:\n # new_value = \"-\".join([parts[0],parts[1].split(\"_\")[0].zfill(5)+\"_\"+parts[1].split(\"_\")[1]])\n # new_headers[k] = new_value\n \n rename_dict = { key : self.only_header_dict[key] if key in self.only_header_dict else key for key in new_headers.values() }\n self.rename_submission_fasta(self.outseq,rename_dict) \n self.all_jhu_list = [self.SUBMITTED_SEQ ,self.outseq ]\n self.concat_fasta_files(self.all_jhu_list,self.all_outseq)\n return ( self.outseq , self.all_outseq )\n # return final fasta", "def patch(self):\n self.rename_source()\n add_extra_files(self, self.common, self.assets)\n\n # Avoid WM_PROJECT_INST_DIR for ThirdParty, site or jobControl.\n # Use openfoam-site.patch to handle jobControl, site.\n #\n # Filtering: bashrc,cshrc (using a patch is less flexible)\n edits = {\n \"WM_THIRD_PARTY_DIR\": r\"$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party\",\n \"WM_VERSION\": str(self.version), # consistency\n \"FOAMY_HEX_MESH\": \"\", # This is horrible (unset variable?)\n }\n rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc\n edits, posix=join_path(\"etc\", \"bashrc\"), cshell=join_path(\"etc\", \"cshrc\")\n )", "def append_job(self, job_specific):\n\n\t\tself.modified_time = time.time()\n\n\t\tnew_job = None\n\n\t\t## Make the new job name and path\n\t\tnew_job_name = 'job{0}'.format(str(len(self)).zfill(4))\n\t\tpath = os.path.join(self.path, '{0}_v0'.format(new_job_name))\n\n\t\t## Collect outputs of the previous job in the chain\n\t\tinputs = [submission for submission in self[-1].get_latest()]\n\t\tjob_specific['datasets'] = inputs\n\t\tinput_file_path = None\n\n\t\t## Steer job creation by job type\n\t\tjob_type = job_specific['job_type']\n\n\t\tif job_type == 'prun':\n\t\t\tnew_job = JobPrun(new_job_name, self, path, input_file_path, job_specific)\n\n\t\tif job_type == 'taskid':\n\t\t\tnew_job = JobTaskID(new_job_name, self, path, input_file_path, job_specific)\n\n\t\tif job_type == 'pathena-trf':\n\t\t\tnew_job = JobPathenaTrf(new_job_name, self, path, input_file_path, job_specific)\n\n\t\tif job_type == 'pathena-algo':\n\t\t\tnew_job = JobPathenaAlgo(new_job_name, self, path, input_file_path, job_specific)\n\n\t\tif job_type == 'eventloop':\n\t\t\tnew_job = JobEventLoop(new_job_name, self, path, input_file_path, job_specific)\n\n\t\tnew_job.index = len(self)\n\t\tself.append(new_job)", "def rewrite_info_file(self):\n now = datetime.datetime.now()\n file_text = self.run_info_dict['file_header']+'\\n' + \\\n '! File Last Updated on ' + str(now.year)+'.'+str(now.month)+'.'+ str(now.day)+ \\\n ' at ' + str(now.hour)+':'+str(now.minute)+':'+ str(now.second) +'\\n\\n'\n for keys in self.run_info_dict.keys():\n if keys != 'file_header':\n file_text = file_text+str(keys)+' '+str(self.run_info_dict[keys])+'\\n'\n\n f = open(self.run_info_file,'w')\n f.write(file_text)\n f.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grabs a status message from the JobWriter that indicates which pipeline tools have settings enabled for the current scene.
def RetrievePipelineToolStatus( raiseOnExitCode=False ): global submissionInfo scenePath = NodegraphAPI.GetSourceFile() jobWriterPath = os.path.join(submissionInfo["RepoDirs"]["submission/Integration/Main"], "JobWriter.py") argArray = ["-ExecuteScript", jobWriterPath, "Katana", "--status", "--scene-path", scenePath] statusMessage = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=raiseOnExitCode) return statusMessage
[ "def settings_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"settings_status\")", "def get_status(self) -> Text:\n store = self.metadata_store\n return store.get_pipeline_status(self)", "def SubToolSetStatus(SubtoolIndex, Value) -> int:\n pass", "def runner_status():\n print(\"Runner status: {}\".format(RunnerInstance.instance().status()))", "def pipeline_status(self):\n response = self._session.get(\n urlparse.urljoin(self._pipeline, 'status'),\n )\n try:\n return response.json()\n except ValueError:\n logger.warning('Failed to decode JSON from response: %s' % response.text)\n return response.text", "def get_jobconfig_status(self):\n return self.response_json[\"success\"]", "def stage_status(self) -> str:\n return pulumi.get(self, \"stage_status\")", "def pipeline_status_path(self):\n return '/_ah/pipeline/status?root=%s&auto=false' % self.root_pipeline_id", "def UpdatePipelineToolStatusLabel( gui, statusMessage ):\n gui.pipelineToolStatusLabel.setText( statusMessage )", "def set_statusbar(self):\n\n status_text = \"\"\n\n # Tell if input images are loaded.\n if self.status_list[self.status_pointer[\"initialized\"]]:\n status_text += \"Process initialized\"\n\n # Tell if input images are loaded.\n if self.status_list[self.status_pointer[\"bw_loaded\"]]:\n if self.status_list[self.status_pointer[\"color_loaded\"]]:\n status_text += \", B/W reference and color frames loaded\"\n else:\n status_text += \", B/W reference frame loaded\"\n\n # Tell if rigid transformation is done.\n if not self.configuration.skip_rigid_transformation:\n if self.status_list[self.status_pointer[\"rigid_transformed\"]]:\n status_text += \", rigid transformation computed\"\n # Tell if optical flow has been computed.\n if not self.configuration.skip_optical_flow:\n if self.status_list[self.status_pointer[\"optical_flow_computed\"]]:\n status_text += \", images pixel-wise aligned\"\n\n # Tell if the LRGB image is computed.\n if self.status_list[self.status_pointer[\"lrgb_computed\"]]:\n status_text += \", LRGB image computed\"\n\n # Tell if results are written to disk.\n if self.status_list[self.status_pointer[\"results_saved\"]]:\n status_text += \", results written to disk\"\n\n # Tell if the workflow thread is busy at this point.\n if self.status_busy:\n status_text += \", busy\"\n\n # Write the complete message to the status bar.\n self.ui.statusbar.showMessage(status_text)", "def pipeline_status_path(self):\n return '%s/status?root=%s&auto=false' % (self.base_path, self.pipeline_id)", "def status(self):\n return self.getall(WMSCOMMAND.STATUS_WMS)", "def getPanelStatus(self) -> dict:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.getPanelStatus()\r\n return {}", "def maybePrintTagsToStatusBar(self):\n \n scene = self.scene()\n\n if scene != None and self.artifact != None:\n \n # Only act if there are tags for this item artifact.\n tags = self.artifact.getTags()\n \n if len(tags) > 0:\n \n statusStr = \"Tags: [\"\n \n for tag in tags:\n statusStr += tag + \", \"\n \n if statusStr.endswith(\", \"):\n statusStr = statusStr[:-2]\n \n statusStr += \"]\"\n \n scene.statusMessageUpdate.emit(statusStr)", "def amtool_status(self, mess, args):\n self.log.info(\"Current config {0}\".format(self.config))\n self.log.info(\n \"Alertmanager @ {0}\".format(self.config['server_address']))\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_status()\n return result", "def check_flags(self):\n\n resp = self.db.get_collector_detail(self.project_id, self.collector_id)\n collector = resp['collector']\n\n return {\n 'run': collector['collector']['run'],\n 'collect': collector['collector']['collect'],\n 'update': collector['collector']['update'],\n 'active': collector['active']\n }", "def getWorkerStatus(self):\n url = core.makeurl(self.apiurl, ['build', '_workerstatus'])\n f = core.http_GET(url)\n tree = ElementTree.parse(f).getroot()\n workerstatus = []\n for worker in tree.findall('building'):\n d = {'id': worker.get('workerid'),\n 'status': 'building'}\n for attr in ('hostarch', 'project', 'package', 'starttime'):\n d[attr] = worker.get(attr)\n d['target'] = '/'.join((worker.get('repository'), worker.get('arch')))\n d['started'] = time.asctime(time.localtime(float(worker.get('starttime'))))\n workerstatus.append(d)\n for worker in tree.findall('idle'):\n d = {'id': worker.get('workerid'),\n 'hostarch': worker.get('hostarch'),\n 'status': 'idle'}\n workerstatus.append(d)\n return workerstatus", "def _get_status(self):\n held_msg=\"\"\n return u'%s%s' % (self.get_status_display(), held_msg)", "def status(self):\n stat = self.run_status.get()\n return stat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modifies the Pipeline Tool status label UI element with the supplied message
def UpdatePipelineToolStatusLabel( gui, statusMessage ): gui.pipelineToolStatusLabel.setText( statusMessage )
[ "def set_status_label(self, msg):\n self.status_label.setText('<b>{}</b>'.format(msg))", "def updateStatus(self, message):\n self.statusArea.setText(message + \"\\n\")", "def set_status_text(self, message, field=0):\n logging.info(\"setting field: %s to string: %s\" % (field, message))\n self.statusbar.SetStatusText(message, field)", "def set_status_text(self, value):\n self.status_bar.SetStatusText(value)", "def show_status(self, status, type):\n if type == \"Positive\":\n self.label.setStyleSheet(\"\"\"color: green;\n font: 12pt \\\"Segoe Print\\\"; \"\"\")\n else:\n self.label.setStyleSheet(\"\"\"color: black;\n font: 12pt \\\"Segoe Print\\\"; \"\"\")\n self.label.setText(status)", "def set_status(self, text):\n self.statusBar().showMessage(text)", "def showStatus(self, message):\n self.status_bar.showMessage(message)", "def bs_displayMessage(status, message):\n # exit if this function run in batch mode.\n if pm.about(batch=True):\n return False\n # Base color Text Color.\n statusColors = {\n 'error': ((255, 40, 20), (0, 0, 0)),\n 'warning': ((255, 177, 86), (0, 0, 0)),\n 'success': ((140, 230, 140), (0, 0, 0))}\n # commandLine1 will be unique in maya in all cases.\n commandLinePtr = omui.MQtUtil.findControl('commandLine1')\n commandLine = wrapInstance(long(commandLinePtr), QtGui.QWidget)\n # get result Line.\n resultLine = commandLine.findChildren(QtGui.QLineEdit)[0]\n palette = resultLine.palette()\n palette.setBrush(QtGui.QPalette.Base, QtGui.QColor(*statusColors[status][0]))\n palette.setColor(QtGui.QPalette.Text, QtGui.QColor(*statusColors[status][1]))\n resultLine.setPalette(palette)\n resultLine.setText('[ ' + status + ' ] ' + message)\n pm.refresh()", "def show_status_message(msg, is_ok = None, priority = 0):\n status_message_manager.add(StatusMessage.status(msg, priority = priority, is_ok = is_ok))", "async def update_status_message(self):\n embed, components = self.get_status_embed_and_components()\n await self.client.message_edit(self.status_message, embed = embed, components = components)", "def _ShowStatus(self, status):\n self._status.setText(status)", "def display_status(self, message):\n raise NotImplementedError()", "def display_message(self, message):\n context_id = self.status_bar.get_context_id(\"\")\n self.status_bar.show()\n self.status_bar.push(context_id, message)", "def custom_status_label(self, custom_status_label):\n\n self._custom_status_label = custom_status_label", "def update_status(self, tag):\n status = 'Processing ' + tag\n idc.msg('\\n%-35s' % status)\n ida_kernwin.hide_wait_box()\n ida_kernwin.show_wait_box(status)", "def __set_status_label_data(self, control, colour, text, *args):\n args_len = len(args)\n if args_len > 1:\n params = args\n elif args_len == 1:\n params = args[0]\n else:\n params = ()\n control.SetLabel(text % params)\n control.SetForegroundColour(colour)", "def set_explorer_status_label(self, incoming_message2):\n incoming_message2 += \" Files Found\"\n self.explorer_label2.config(text=incoming_message2)", "def ar_displayMessage(status, message):\n # exit if this function run in batch mode.\n if pm.about(batch=True):\n return False\n # Base color Text Color.\n statusColors = {\n 'error': ((255, 40, 20), (0, 0, 0)),\n 'warning': ((255, 177, 86), (0, 0, 0)),\n 'success': ((140, 230, 140), (0, 0, 0))}\n # commandLine1 will be unique in maya in all cases.\n commandLinePtr = omui.MQtUtil.findControl('commandLine1')\n commandLine = wrapInstance(long(commandLinePtr), QtGui.QWidget)\n # get result Line.\n resultLine = commandLine.findChildren(QtGui.QLineEdit)[0]\n palette = resultLine.palette()\n palette.setBrush(QtGui.QPalette.Base, QtGui.QColor(*statusColors[status][0]))\n palette.setColor(QtGui.QPalette.Text, QtGui.QColor(*statusColors[status][1]))\n resultLine.setPalette(palette)\n resultLine.setText('[ ' + status + ' ] ' + message)\n pm.refresh()", "def update_status(self, msg):\n self.status = self._parse_status(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic error handling when the a pipeline tools script run via deadline command returns a nonzero exit code. Generates a technical error message for a given subprocess.CalledProcessError instance and displays it in the Katana console. Similarly, a humanreadable error message is presented to the user in a modal dialog. The technical error message contains the full commandline arguments, exit code, and standard output from the called process. Returns a userfriendly error message that can be presented to the user in the pipeline tools status label
def HandlePipelineToolsCalledProcessError( exc ): errorMsg = StringIO() errorMsg.write( "Pipeline Tools encountered an error - the command:" ) errorMsg.write( os.linesep * 2 ) errorMsg.write( exc.cmd ) errorMsg.write( os.linesep * 2 ) errorMsg.write( "return a non-zero (%d) exit code" % exc.returncode ) if exc.output: errorMsg.write( " and the following output:" ) errorMsg.write( os.linesep * 2 ) errorMsg.write( exc.output ) errorMsg = errorMsg.getvalue() # On Windows, print statements output to the console window that is created minimized when Katana launches print( errorMsg ) # Display a human-readable generic error message ShowModalDialog( "Pipeline Tools Error", "Pipeline Tools encountered an error. Check the Katana console for more detailed information." ) return "Pipeline Tools Error"
[ "def handle_build_error(error):\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (\n ' '.join(error.argv), str(error.error_code)))", "def print_unable_to_run(exc: \"CalledProcessError\"):\n _print(str(exc), level=MessageLevel.QUIET)", "def set_launch_failed(self):\n self.diagnostics = textwrap.dedent(\n f\"\"\"\n Application {self.app_id} failed 1 times (global limit =2; local limit is =1) due to AM Container for appattempt_1670152552564_21143_000001 exited with exitCode: 7\n Failing this attempt.Diagnostics: [2022-12-14 10:27:49.976]Exception from container-launch.\n Container id: container_e5070_1670152552564_21143_01_000001\n Exit code: 7\n Exception message: Launch container failed\n Shell error output: Unable to find image 'test-repository/test-image:1234' locally\n docker: Error response from daemon: <some error response here, left it out>\n See 'docker run --help'.\n \"\"\"\n )\n return self.set_failed()", "def error_exit(self, msg):\n wrappedmsg = textwrap.fill(msg, 78)\n fullmsg = \"%s\\n%s\" % (wrappedmsg, self.get_usage_command())\n raise SBToolError(fullmsg, True)", "def _error(msg, status=codes.EXIT_FAILURE):\n sys.stderr.write(\"[!] %s\\n\" % str(msg))\n sys.exit(status)", "def error_exit():\n print(\"Invalid arguments!\")\n print(\"Type -h to get help.\")\n exit(0)", "def displayError(*args, **kwargs):\n \n pass", "def _raise_command_exception(args, returncode, output):\n message = ('Command failed with status {}: {}\\n'\n 'Output:-----------------------------------------\\n{}\\n'\n '------------------------------------------------\\n').format(\n returncode, args, output)\n raise Exception(message)", "def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe", "def _message_failed_job(self):\n self.ensure_one()\n return _(\"Something bad happened during the execution of the job. \"\n \"More details in the 'Exception Information' section.\")", "def error(message):\n sys.exit(\"Error: {}\".format(message))", "def user_error(string):\n if not __name__ == '__main__':\n raise UserError(string)\n sys.stderr.write(\"%s: %s\\n\" % (options.script_name, string))\n sys.exit(1)", "def vpython_error_message():\n error_message = (\n \"<p>&#9888; Sorry, spacesimmer! OrbitX has crashed for \"\n \"some reason.</p>\"\n\n \"<p>Any information that OrbitX has on the crash has \"\n \"been saved to a logfile. If you want to get this problem fixed,\"\n \" send the contents of the log file \"\n \"<blockquote>\" +\n logs.logfile_name.replace('\\\\', '\\\\\\\\') +\n \"</blockquote> \"\n \"to Patrick Melanson along with a description of what was \"\n \"happening in the program when it crashed.</p>\"\n\n \"<p>Again, thank you for using OrbitX!</p>\"\n )\n vpython.canvas.get_selected().append_to_caption(f\"\"\"<script>\n if (document.querySelector('div.error') == null) {{\n error_div = document.createElement('div');\n error_div.className = 'error';\n error_div.innerHTML = \"{error_message}\";\n document.querySelector('body').prepend(error_div);\n }}\n </script>\"\"\")\n vpython.canvas.get_selected().append_to_caption(\"\"\"<style>\n .error {\n color: #D8000C !important;\n background-color: #FFBABA;\n margin: 10px 0;\n padding: 10px;\n border-radius: 5px 5px 5px 5px;\n width: 700px;\n }\n span.code {\n color: #D8000C !important;\n font-family: monospace;\n }\n blockquote {\n font-family: monospace;\n }\n </style>\"\"\")\n\n time.sleep(0.1) # Let vpython send out this update", "def error(message):\n print(message)\n exit(-1)", "def command_failed_error(cmd):\n\n output_1 = colored(' - Error: Failed to run command ', 'red')\n output_2 = command(cmd)\n return output_1 + output_2 + '\\n'", "def CallDeadlineCommand(arguments, hideWindow=True, useArgFile=False, useDeadlineBg=False, raiseOnExitCode=False):\n deadlineCommand = GetDeadlineCommand( useDeadlineBg )\n tmpdir = None\n\n if useArgFile or useDeadlineBg:\n tmpdir = tempfile.mkdtemp()\n\n if useDeadlineBg:\n arguments = [ \"-outputfiles\", os.path.join( tmpdir, \"dlout.txt\" ), os.path.join( tmpdir, \"dlexit.txt\" ) ] + arguments\n\n startupinfo = None\n creationflags = 0\n\n if os.name == 'nt':\n if hideWindow:\n # Python 2.6 has subprocess.STARTF_USESHOWWINDOW, and Python 2.7 has subprocess._subprocess.STARTF_USESHOWWINDOW, so check for both.\n if hasattr( subprocess, '_subprocess' ) and hasattr( subprocess._subprocess, 'STARTF_USESHOWWINDOW' ):\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\n elif hasattr( subprocess, 'STARTF_USESHOWWINDOW' ):\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n else:\n # still show top-level windows, but don't show a console window\n CREATE_NO_WINDOW = 0x08000000 # MSDN process creation flag\n creationflags = CREATE_NO_WINDOW\n\n if useArgFile:\n arguments = [ CreateArgFile( arguments, tmpdir ) ]\n\n arguments.insert( 0, deadlineCommand )\n\n # Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards.\n proc = subprocess.Popen( arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, creationflags=creationflags )\n output, errors = proc.communicate()\n\n if raiseOnExitCode and proc.returncode != 0:\n try:\n # The quote function was moved to shutil in python 3\n from shutil import quote as shell_quote\n except ImportError:\n # In python 2, quote lived in the pipes module\n from pipes import quote as shell_quote\n cmd = ' '.join([shell_quote(arg) for arg in arguments])\n raise subprocess.CalledProcessError(proc.returncode, cmd, output)\n\n if useDeadlineBg:\n with io.open( os.path.join( tmpdir, \"dlout.txt\" ), 'r', encoding='utf-8' ) as fileHandle:\n output = fileHandle.read()\n\n if tmpdir:\n try:\n shutil.rmtree( tmpdir )\n except:\n print( 'Failed to remove temp directory: \"%s\"' % tmpdir )\n\n return output.strip()", "def called_process_error2exit_decorator(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except subprocess.CalledProcessError as e:\n print(\"{err}:\\n{msg}\".format(err=str(e), msg=e.output))\n sys.exit(1)\n return func_wrapper", "def inputErrExit(self):\n print (self.msg)\n self.help()\n sys.exit(0)", "def check_returncode(self):\n if self.returncode:\n raise CalledProcessError(self.returncode, self.args, self.stdout,\n self.stderr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Opens the a dialog for viewing and modifying the job's pipeline tool settings. The dialog is launched in a deadline command subprocess. All settings are maintained by the JobWriter using a combination of the application name and the scene path.
def OpenIntegrationWindow( raiseOnExitCode=False ): global submissionInfo integrationPath = os.path.join( submissionInfo["RepoDirs"]["submission/Integration/Main"], "IntegrationUIStandAlone.py" ) scenePath = NodegraphAPI.GetSourceFile() if not scenePath: raise SceneNotSavedError() argArray = ["-ExecuteScript", integrationPath, "-v", "2", "-d", "Katana", "Draft", "Shotgun", "FTrack", "--path", scenePath] try: pipelineToolStatus = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=True) except subprocess.CalledProcessError as e: pipelineToolStatus = HandlePipelineToolsCalledProcessError( e ) return pipelineToolStatus
[ "def shotWinUI(*args):\n### ---------- should check for current project\n if cmds.window(\"shotWin\", exists = True):\n cmds.deleteUI(\"shotWin\")\n\n widgets[\"win\"] = cmds.window(\"shotWin\", t= \"Charlex Shot Manager\", w=1000, h=560, s=False)\n widgets[\"mainCLO\"] = cmds.columnLayout(w=1000, h=560)\n\n #######################\n #top bar layout\n #######################\n\n #rowlayout\n widgets[\"bannerFLO\"] = cmds.formLayout(w=1000, h=50, bgc=(.300,.3,.300))\n widgets[\"bannerImage\"] = cmds.image(image=\"{0}/banner_shotWin.png\".format(pi.images))\n widgets[\"spotImage\"] = cmds.iconTextButton(style=\"iconOnly\", image = \"{0}/defaultSpotImage.jpg\".format(pi.images), w=50, h=50, ann=ann[\"spotIcon\"], c=changeSpotIcon)\n widgets[\"projectText\"] = cmds.text(l=\"Project Name: Spot Name\", font = \"boldLabelFont\")\n widgets[\"sceneText\"] = cmds.text(l=\"Current Scene\") \n widgets[\"projectButton\"] = cmds.button(l=\"Change Job\", w = 100, h= 40, bgc= (.5,.5,.5), ann = ann[\"proj\"], c=setProject)\n widgets[\"refreshButton\"] = cmds.button(l=\"Refresh\", w = 60, h= 40, bgc= (.2,.2,.2), c = populateWindow)\n widgets[\"exploreButton\"] = cmds.button(l=\"Explore\\nReference\", w = 60, h= 40, bgc= (.7,.5,.3), c=exploreReference)\n\n cmds.formLayout(widgets[\"bannerFLO\"], e=True, af = [(widgets[\"bannerImage\"], \"top\", 0),\n (widgets[\"bannerImage\"], \"left\", 0),\n (widgets[\"projectText\"], \"left\", 400),\n (widgets[\"projectText\"], \"top\", 5),\n (widgets[\"sceneText\"], \"top\", 25),\n (widgets[\"spotImage\"], \"left\", 335), \n (widgets[\"sceneText\"], \"left\", 400),\n (widgets[\"projectButton\"], \"left\", 740),\n (widgets[\"projectButton\"], \"top\", 5),\n (widgets[\"refreshButton\"], \"left\", 850),\n (widgets[\"refreshButton\"], \"top\", 5),\n (widgets[\"exploreButton\"], \"left\", 920),\n (widgets[\"exploreButton\"], \"top\", 5), \n ])\n\n ######################\n #bottom layout\n ########################\n cmds.setParent(widgets[\"mainCLO\"])\n widgets[\"lowFLO\"] = cmds.formLayout()\n widgets[\"lowTLO\"] = cmds.tabLayout(bgc = (.2, .2, .2 ))\n\n ################\n #shots tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"shotsFLO\"] = cmds.formLayout(\"Shots - Anim, Light and FX\",w=1000, h=500, bgc = (.4,.4,.4))\n \n ##############\n #shot asset List layout\n ###############\n widgets[\"shotAssListCLO\"] = cmds.columnLayout(w=240, bgc = (.5, .5,.5))\n widgets[\"shotAssListFLO\"] = cmds.formLayout(w=240, h= 500)\n widgets[\"shotAssListTSL\"] = cmds.textScrollList(w=240, h=465, ams=True) \n\n widgets[\"shotAssListTitleText\"] = cmds.text(l=\"Referenced Assets In Current Scene\", font = \"boldLabelFont\", al=\"center\", ann=ann[\"reffedAssets\"])\n\n cmds.formLayout(widgets[\"shotAssListFLO\"], e=True, af = [\n (widgets[\"shotAssListTSL\"], \"top\", 35),\n (widgets[\"shotAssListTSL\"], \"left\", 0),\n \n (widgets[\"shotAssListTitleText\"], \"top\", 5),\n (widgets[\"shotAssListTitleText\"], \"left\", 20),\n ])\n\n ##############\n #shot List layout\n ###############\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotListCLO\"] = cmds.columnLayout(w=130, bgc = (.5, .5, .5))\n widgets[\"shotListFLO\"] = cmds.formLayout(w=130, h= 500)\n widgets[\"shotListTSL\"] = cmds.textScrollList(w=130, h=460)\n widgets[\"shotListTitleText\"] = cmds.text(l=\"Shot List\", font = \"boldLabelFont\", ann=ann[\"shotList\"])\n widgets[\"shotListCharText\"] = cmds.text(l=\"Shots\")\n\n cmds.formLayout(widgets[\"shotListFLO\"], e=True, af = [\n (widgets[\"shotListTSL\"], \"top\", 40), \n (widgets[\"shotListTSL\"], \"left\", 0),\n (widgets[\"shotListTitleText\"], \"top\", 5),\n (widgets[\"shotListTitleText\"], \"left\", 30),\n (widgets[\"shotListCharText\"], \"top\", 25),\n (widgets[\"shotListCharText\"], \"left\", 5),\n ])\n\n ##############\n #shot Status layout\n ############### \n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotInfoAssListTLO\"] = cmds.tabLayout(w=200, h=500)\n widgets[\"shotInfoFLO\"] = cmds.formLayout(\"ShotInfo\", w=200, h=500, bgc= (.5, .5, .5))\n widgets[\"shotInfoTitleText\"] = cmds.text(l=\"Shot Information\", font = \"boldLabelFont\")\n widgets[\"shotInfoNameText\"] = cmds.text(l=\"<Shot Name>\", font = \"boldLabelFont\", al=\"center\", w=200)\n widgets[\"shotInfoVariantText\"] = cmds.text(l=\"<Var Name>\", font = \"boldLabelFont\", al=\"center\", w=200) \n widgets[\"shotInfoPic\"] = cmds.image(image = \"{0}/kitten-photo-632-3.jpg\".format(pi.images), w= 154, h=154)\n widgets[\"shotAnnCB\"] = cmds.checkBox(l=\"Tooltips popups?\", value=tooltips, changeCommand=tooltipSet)\n\n cmds.formLayout(widgets[\"shotInfoFLO\"], e=True, af =[\n (widgets[\"shotInfoNameText\"], \"top\", 60),\n (widgets[\"shotInfoNameText\"], \"left\", 0),\n (widgets[\"shotInfoVariantText\"], \"top\", 80),\n (widgets[\"shotInfoVariantText\"], \"left\", 0), \n (widgets[\"shotInfoPic\"], \"top\", 110),\n (widgets[\"shotInfoPic\"], \"left\", 23),\n (widgets[\"shotInfoTitleText\"], \"top\", 5),\n (widgets[\"shotInfoTitleText\"], \"left\", 35),\n (widgets[\"shotAnnCB\"], \"top\", 420),\n (widgets[\"shotAnnCB\"], \"left\", 50), \n ])\n\n cmds.setParent(widgets[\"shotInfoAssListTLO\"])\n widgets[\"shotAssRigListTLO\"] = cmds.tabLayout(\"ProjAssets\", w=200, h=500) \n widgets[\"shotAssRigCharListCLO\"] = cmds.columnLayout(\"Chars\", w=200, h=500)\n widgets[\"shotAssRigCharListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigPropListCLO\"] = cmds.columnLayout(\"Props\", w=200, h=500)\n widgets[\"shotAssRigPropListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigSetListCLO\"] = cmds.columnLayout(\"Sets\", w=200, h=500)\n widgets[\"shotAssRigSetListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAnmMstListCLO\"] = cmds.columnLayout(\"Anm\", w=200, h=500)\n widgets[\"shotAnmMstListTSL\"] = cmds.textScrollList(w=200, h=450) \n ###############\n #Shot Action layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotActionFLO\"] = cmds.formLayout(w=150, h=500, bgc =(.5, .5, .5))\n widgets[\"shotActionRefAssBut\"] = cmds.button(l=\"-> Ref Asset In ->\", w=130, h=20, bgc = (.7,.7,.7), c=referenceAsset, ann=ann[\"refAsset\"]) \n widgets[\"shotActionReplaceBut\"] = cmds.button(l=\"-> Replace Reference ->\", w=130, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"replace\"], c=replaceReference)\n widgets[\"shotActionRefMultBut\"] = cmds.button(l=\"-> Ref Multiple ->\", w=100, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"refMult\"], c=referenceMultiple)\n widgets[\"shotActionRefMultIFG\"] = cmds.intFieldGrp(w=20, v1=1)\n widgets[\"shotActionReloadBut\"] = cmds.button(l=\"Reload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=reloadReference, ann=ann[\"reload\"])\n widgets[\"shotActionUnloadBut\"] = cmds.button(l=\"Unload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=unloadReference, ann=ann[\"unload\"])\n widgets[\"shotActionRemoveBut\"] = cmds.button(l=\"Remove Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=removeReference, ann=ann[\"remove\"])\n widgets[\"shotActionQIncrBut\"] = cmds.button(l=\"Quick Increment\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=quickIncrement, ann=ann[\"qkIncr\"])\n widgets[\"shotActionNewShotBut\"] = cmds.button(l=\"Create new shot\", en=True, w=130, h=20, bgc = (.7,.7,.7), c=createNewShot, ann=ann[\"crtShot\"]) \n widgets[\"shotActionTitle\"] = cmds.text(l=\"Shot Actions\", font = \"boldLabelFont\")\n\n # create an embedded tab layout for each type of button!\n widgets[\"shotActionTypeTLO\"] = cmds.tabLayout(\"Specific Actions\", w=150, h=180, bgc=(.2,.2,.2))\n\n widgets[\"shotActionTypeAnmSLO\"] = cmds.scrollLayout(\"Anm\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeAnmFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .45, .4))\n widgets[\"shotActionExpAnimBut\"] = cmds.button(l=\"Export Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=exportAnimation, ann=ann[\"expAnim\"])\n widgets[\"shotActionImpAnimBut\"] = cmds.button(l=\"Import Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=importAnimation, ann=ann[\"impAnim\"])\n widgets[\"shotActionRefToBut\"] = cmds.button(l=\"-> Reference To\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=referenceTo, ann=ann[\"refTo\"])\n widgets[\"shotActionCtrlMkBut\"] = cmds.button(l=\"Ctrl On Selection\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=controlMaker, ann=ann[\"ctrlMk\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeLgtSLO\"] = cmds.scrollLayout(\"Lgt\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeLgtFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .4, .45))\n widgets[\"shotActionGenericBut\"] = cmds.button(l=\"Render Setup\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=renderSetup, ann=ann[\"rendGen\"])\n\n widgets[\"shotActionMtlBut\"] = cmds.button(l=\"-> Apply Mtl To Sel ->\", w=130, h=20, en=False, bgc = (.7,.7,.7), ann=ann[\"mtlApply\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeFxSLO\"] = cmds.scrollLayout(\"Fx\", w=150, h=240, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeFxFLO\"] = cmds.formLayout(w=150,h=180, bgc=(.45, .4, .4))\n \n\n#---------------- add any fx buttons here and then postion them below \n\n cmds.formLayout(widgets[\"shotActionTypeLgtFLO\"], e=True, af = [\n (widgets[\"shotActionGenericBut\"], \"top\", 10),\n (widgets[\"shotActionGenericBut\"], \"left\", 2),\n (widgets[\"shotActionMtlBut\"], \"top\", 40),\n (widgets[\"shotActionMtlBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionTypeAnmFLO\"], e=True, af = [\n (widgets[\"shotActionExpAnimBut\"], \"top\", 10),\n (widgets[\"shotActionExpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionImpAnimBut\"], \"top\", 40),\n (widgets[\"shotActionImpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionRefToBut\"], \"top\", 70),\n (widgets[\"shotActionRefToBut\"], \"left\", 2),\n (widgets[\"shotActionCtrlMkBut\"], \"top\", 100),\n (widgets[\"shotActionCtrlMkBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionFLO\"], e=True, af = [\n (widgets[\"shotActionTitle\"], \"top\", 5),\n (widgets[\"shotActionTitle\"], \"left\", 35),\n (widgets[\"shotActionRefAssBut\"], \"top\", 30),\n (widgets[\"shotActionRefAssBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultBut\"], \"top\", 60),\n (widgets[\"shotActionRefMultBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultIFG\"], \"top\", 60),\n (widgets[\"shotActionRefMultIFG\"], \"left\", 110),\n (widgets[\"shotActionReloadBut\"], \"top\", 90),\n (widgets[\"shotActionReloadBut\"], \"left\", 10),\n (widgets[\"shotActionUnloadBut\"], \"top\", 120),\n (widgets[\"shotActionUnloadBut\"], \"left\", 10),\n (widgets[\"shotActionRemoveBut\"], \"top\", 150),\n (widgets[\"shotActionRemoveBut\"], \"left\", 10),\n (widgets[\"shotActionReplaceBut\"], \"top\", 180),\n (widgets[\"shotActionReplaceBut\"], \"left\", 10),\n (widgets[\"shotActionQIncrBut\"], \"top\", 210),\n (widgets[\"shotActionQIncrBut\"], \"left\", 10),\n (widgets[\"shotActionTypeTLO\"], \"top\", 270),\n (widgets[\"shotActionTypeTLO\"], \"left\", 0), \n (widgets[\"shotActionNewShotBut\"], \"top\", 470),\n (widgets[\"shotActionNewShotBut\"], \"left\", 10), \n ])\n\n ###############\n #Shot anmLgt tab layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"anmLgtFLO\"] = cmds.formLayout(w=250, h=500, bgc = (.4, .4, .4))\n widgets[\"anmLgtTLO\"] = cmds.tabLayout(w=250, h=500, bgc = (.4,.4,.4), changeCommand = varTabChange)\n ###############\n #shot anm tab layout\n ###############\n widgets[\"anmTabCLO\"] = cmds.columnLayout(\"ANM\", w=250, bgc = (.4, .45, .4))\n #################\n #anm info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"anmVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"anmLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"anmLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n cmds.separator(h=5)\n\n #################\n #anm 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmWSFLO\"] = cmds.frameLayout(\"Animation Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"anmWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.45,.4))\n\n widgets[\"anmWSOpenBut\"] = cmds.button(l=\"Open Latest\\nAnim\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"anmWSIncrBut\"] = cmds.button(l=\"Increment Anim Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), ann=ann[\"incrWS\"], c = partial(incrementWorkshop, \"anm\"))\n widgets[\"anmWSPrevBut\"] = cmds.button(l=\"Previous Anim Workshops\", w=160, bgc = (.7,.7,.7), en=False, ann=ann[\"prevWS\"])\n widgets[\"anmWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"anmWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"anm\"), ann=ann[\"crtVariant\"])\n widgets[\"anmVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, bgc = (.7,.7,.7), en=False, c=createShotIcon, ann=ann[\"crtIcon\"]) \n\n cmds.formLayout(widgets[\"anmWSFoLO\"], e=True, af = [\n (widgets[\"anmWSOpenBut\"], \"left\", 5),\n (widgets[\"anmWSOpenBut\"], \"top\", 10),\n (widgets[\"anmWSIncrBut\"], \"left\", 80),\n (widgets[\"anmWSIncrBut\"], \"top\", 10),\n (widgets[\"anmWSInfoBut\"], \"left\", 5),\n (widgets[\"anmWSInfoBut\"], \"top\", 65),\n (widgets[\"anmWSPrevBut\"], \"left\", 80),\n (widgets[\"anmWSPrevBut\"], \"top\", 65),\n (widgets[\"anmWSNewVarBut\"], \"left\", 5),\n (widgets[\"anmWSNewVarBut\"], \"top\", 105),\n (widgets[\"anmVarIconBut\"], \"left\", 170),\n (widgets[\"anmVarIconBut\"], \"top\", 105), \n ])\n #################\n #anm 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmMstFLO\"] = cmds.frameLayout(\"Animation Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"anmMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.45,.4))\n widgets[\"anmMstOpenBut\"] = cmds.button(l=\"Open Anim\\nMaster\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"anmMstIncrBut\"] = cmds.button(l=\"Publish Anim Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"anmMstBgIncrBut\"] = cmds.button(l=\"BG Publish Anim Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"])\n widgets[\"anmMstPrevBut\"] = cmds.button(l=\"Previous Anim Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"anmMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"])\n\n\n \n cmds.formLayout(widgets[\"anmMstFoLO\"], e=True, af = [\n (widgets[\"anmMstOpenBut\"], \"left\", 5),\n (widgets[\"anmMstOpenBut\"], \"top\", 10),\n (widgets[\"anmMstIncrBut\"], \"left\", 80),\n (widgets[\"anmMstIncrBut\"], \"top\", 10),\n (widgets[\"anmMstBgIncrBut\"], \"left\", 5),\n (widgets[\"anmMstBgIncrBut\"], \"top\", 65), \n (widgets[\"anmMstInfoBut\"], \"left\", 5),\n (widgets[\"anmMstInfoBut\"], \"top\", 95), \n (widgets[\"anmMstPrevBut\"], \"left\", 80),\n (widgets[\"anmMstPrevBut\"], \"top\", 95), \n \n ])\n ###############\n #shot Lgt tab layout\n ################ \n cmds.setParent(widgets[\"anmLgtTLO\"]) \n widgets[\"lgtTabCLO\"] = cmds.columnLayout(\"LGT\", w=250, bgc = (.4,.4,.45))\n #################\n #lgt info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"lgtVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"lgtLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"lgtLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtWSFLO\"] = cmds.frameLayout(\"Lighting Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"lgtWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.4,.45))\n\n widgets[\"lgtWSOpenBut\"] = cmds.button(l=\"Open Latest\\nLight\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"lgtWSIncrBut\"] = cmds.button(l=\"Increment Light Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"lgt\"), ann=ann[\"incrWS\"])\n widgets[\"lgtWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"])\n widgets[\"lgtWSPrevBut\"] = cmds.button(l=\"Previous Light Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"lgtWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"lgt\"), ann=ann[\"crtVariant\"]) \n widgets[\"lgtVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"])\n\n cmds.formLayout(widgets[\"lgtWSFoLO\"], e=True, af = [\n (widgets[\"lgtWSOpenBut\"], \"left\", 5),\n (widgets[\"lgtWSOpenBut\"], \"top\", 10),\n (widgets[\"lgtWSIncrBut\"], \"left\", 80),\n (widgets[\"lgtWSIncrBut\"], \"top\", 10),\n (widgets[\"lgtWSInfoBut\"], \"left\", 5),\n (widgets[\"lgtWSInfoBut\"], \"top\", 65),\n (widgets[\"lgtWSPrevBut\"], \"left\", 80),\n (widgets[\"lgtWSPrevBut\"], \"top\", 65),\n (widgets[\"lgtWSNewVarBut\"], \"left\", 5),\n (widgets[\"lgtWSNewVarBut\"], \"top\", 105),\n (widgets[\"lgtVarIconBut\"], \"left\", 170),\n (widgets[\"lgtVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtMstFLO\"] = cmds.frameLayout(\"Lighting Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"lgtMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.4,.45))\n widgets[\"lgtMstOpenBut\"] = cmds.button(l=\"Open\\nLight Master\", w=70, h=50, en=True, bgc = (.5,.7,.5), c=partial(openShotMaster, \"lgt\"), ann=ann[\"openMst\"])\n widgets[\"lgtMstIncrBut\"] = cmds.button(l=\"Publish Light Master\\n(Keep Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"lgtMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"lgtMstPrevBut\"] = cmds.button(l=\"Previous Light Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"lgtMstBgIncrBut\"] = cmds.button(l=\" BG Publish Light Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"lgtMstFoLO\"], e=True, af = [\n (widgets[\"lgtMstOpenBut\"], \"left\", 5),\n (widgets[\"lgtMstOpenBut\"], \"top\", 10),\n (widgets[\"lgtMstIncrBut\"], \"left\", 80),\n (widgets[\"lgtMstIncrBut\"], \"top\", 10),\n (widgets[\"lgtMstBgIncrBut\"], \"left\", 5),\n (widgets[\"lgtMstBgIncrBut\"], \"top\", 65), \n (widgets[\"lgtMstInfoBut\"], \"left\", 5),\n (widgets[\"lgtMstInfoBut\"], \"top\", 95),\n (widgets[\"lgtMstPrevBut\"], \"left\", 80),\n (widgets[\"lgtMstPrevBut\"], \"top\", 95),\n \n ]) \n\n ###############\n #shot anm tab layout\n ###############\n cmds.setParent(widgets[\"anmLgtTLO\"])\n widgets[\"fxTabCLO\"] = cmds.columnLayout(\"FX\", w=250, bgc = (.45, .4, .4))\n #################\n #fx info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"fxVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"fxLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"fxLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxWSFLO\"] = cmds.frameLayout(\"FX Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"fxWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.45,.4,.4))\n\n widgets[\"fxWSOpenBut\"] = cmds.button(l=\"Open Latest\\nFX\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"fxWSIncrBut\"] = cmds.button(l=\"Increment FX Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"fx\"), ann=ann[\"incrWS\"])\n widgets[\"fxWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"fxWSPrevBut\"] = cmds.button(l=\"Previous FX Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"fxWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"fx\"), ann=ann[\"crtVariant\"])\n widgets[\"fxVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"]) \n \n cmds.formLayout(widgets[\"fxWSFoLO\"], e=True, af = [\n (widgets[\"fxWSOpenBut\"], \"left\", 5),\n (widgets[\"fxWSOpenBut\"], \"top\", 10),\n (widgets[\"fxWSIncrBut\"], \"left\", 80),\n (widgets[\"fxWSIncrBut\"], \"top\", 10),\n (widgets[\"fxWSInfoBut\"], \"left\", 5),\n (widgets[\"fxWSInfoBut\"], \"top\", 65),\n (widgets[\"fxWSPrevBut\"], \"left\", 80),\n (widgets[\"fxWSPrevBut\"], \"top\", 65),\n (widgets[\"fxWSNewVarBut\"], \"left\", 5),\n (widgets[\"fxWSNewVarBut\"], \"top\", 105),\n (widgets[\"fxVarIconBut\"], \"left\", 170),\n (widgets[\"fxVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxMstFLO\"] = cmds.frameLayout(\"FX Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"fxMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.45,.4,.4))\n widgets[\"fxMstOpenBut\"] = cmds.button(l=\"Open\\nFX Master\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"fxMstIncrBut\"] = cmds.button(l=\"Publish FX Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"fxMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"fxMstPrevBut\"] = cmds.button(l=\"Previous FX Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"fxMstBgIncrBut\"] = cmds.button(l=\" BG Publish FX Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"fxMstFoLO\"], e=True, af = [\n (widgets[\"fxMstOpenBut\"], \"left\", 5),\n (widgets[\"fxMstOpenBut\"], \"top\", 10),\n (widgets[\"fxMstIncrBut\"], \"left\", 80),\n (widgets[\"fxMstIncrBut\"], \"top\", 10),\n (widgets[\"fxMstBgIncrBut\"], \"left\", 5),\n (widgets[\"fxMstBgIncrBut\"], \"top\", 65), \n (widgets[\"fxMstInfoBut\"], \"left\", 5),\n (widgets[\"fxMstInfoBut\"], \"top\", 95),\n (widgets[\"fxMstPrevBut\"], \"left\", 80),\n (widgets[\"fxMstPrevBut\"], \"top\", 95),\n \n ]) \n\n\n cmds.setParent(widgets[\"anmLgtFLO\"])\n widgets[\"anmLgtTitleText\"] = cmds.text(l=\"Variant Files\", font = \"boldLabelFont\", ann=ann[\"varFile\"]) \n\n cmds.formLayout(widgets[\"anmLgtFLO\"], e=True, af = [(widgets[\"anmLgtTitleText\"], \"top\", 5), (widgets[\"anmLgtTitleText\"], \"left\", 135)])\n\n ###################\n # - -- Shot Tab form setup\n ##################\n cmds.formLayout(widgets[\"shotsFLO\"], e=True, af = [\n (widgets[\"shotListCLO\"], \"left\", 0),\n (widgets[\"shotListCLO\"], \"top\", 0),\n (widgets[\"anmLgtFLO\"], \"left\", 134),\n (widgets[\"anmLgtFLO\"], \"top\", 0), \n (widgets[\"shotInfoAssListTLO\"], \"left\", 387),\n (widgets[\"shotInfoAssListTLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"left\", 594),\n (widgets[\"shotAssListCLO\"], \"top\", 0),\n (widgets[\"shotAssListCLO\"], \"left\", 752)\n ])\n\n ################\n #Misc tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"miscFLO\"] = cmds.formLayout(\"Other Shot Tools\",width=1000, height=500, backgroundColor = (.4,.4,.4))\n\n widgets[\"animationTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .35, .3))\n widgets[\"animationRCLO\"] = cmds.rowColumnLayout(\"animation\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"lightingTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .32, .35))\n widgets[\"lightingRCLO\"] = cmds.rowColumnLayout(\"lighting\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5]) \n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"fxTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.35, .3, .3))\n widgets[\"fxRCLO\"] = cmds.rowColumnLayout(\"fx\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"charlexTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.55, .55, .55))\n widgets[\"charlexRCLO\"] = cmds.rowColumnLayout(\"charlex_general\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.formLayout(widgets[\"miscFLO\"], e=True, af=[\n (widgets[\"charlexTLO\"], \"top\", 0),\n (widgets[\"charlexTLO\"], \"left\", 0),\n (widgets[\"animationTLO\"], \"top\", 0),\n (widgets[\"animationTLO\"], \"left\", 500),\n (widgets[\"lightingTLO\"], \"top\", 250),\n (widgets[\"lightingTLO\"], \"left\", 0),\n (widgets[\"fxTLO\"], \"top\", 250),\n (widgets[\"fxTLO\"], \"left\", 500) \n ])\n\n # get the dictionary of scripts, calls and annotations from the database\n dbPath =os.path.join(os.getenv(\"MAYA_ROOT\"), \"scripts\", \"chrlx_pipe\", \"chrlxScriptList.json\")\n with open(dbPath, \"r\") as f:\n scriptList = json.load(f)\n\n # populate the row column layouts with buttons and funcs from the database\n btl.buttonsToLayout(widgets[\"animationRCLO\"], scriptList[\"shot\"][\"animation\"], width=117, height=40, color=(.38, .3, .38))\n btl.buttonsToLayout(widgets[\"lightingRCLO\"], scriptList[\"shot\"][\"lighting\"], width=117, height=40, color=(.37,.34, .3))\n btl.buttonsToLayout(widgets[\"fxRCLO\"], scriptList[\"shot\"][\"fx\"], width=117, height=40, color=(.35, .3, .3))\n btl.buttonsToLayout(widgets[\"charlexRCLO\"], scriptList[\"shot\"][\"charlex\"], width=117, height=40, color=(.3, .3, .3))\n\n # widgets[\"miscCLO\"] = cmds.columnLayout(\"Other Pipeline Tools\",w=1000, h=500, bgc = (.4,.4,.4))\n # cmds.text(l=\"------ANIM STUFF-------\")\n # cmds.text(l=\"TODO - export cam(s) for nuke, etc\")\n # cmds.text(l=\"TODO - create a new prop from selected geo (propify)\") \n # cmds.text(l=\"TODO - blasting, rendering stuff?\")\n # cmds.text(l=\"TODO - export data (text file of scene locations?)\")\n # cmds.text(l=\"TODO - create render cam? Should this be in the main anim increment? (probably both)\")\n\n # cmds.text(l=\"------LGT STUFF--------\")\n # cmds.text(l=\"TODO - set up current scene for maxwell, arnold\")\n # cmds.text(l=\"TODO - convert an external image to icon (char or project)\")\n # cmds.text(l=\"TODO - revert ['ROLL BACK'] to master version? (replaces master and grabs that workshop\")\n # cmds.text(l=\"TODO - function to add your folder to the WIP folder in this project - save current to WIP folder\")\n # cmds.text(l=\"TODO - explore various frame (render) folders in explorer\")\n # cmds.text(l=\"TODO - various preset light setups/rigs? \")\n\n\n ######################\n #show window\n ######################\n cmds.window(widgets[\"win\"], e=True, w=1000, h=580)\n cmds.showWindow(widgets[\"win\"])\n\n #start us off\n populateWindow()", "def create_job(self):\n job = Job()\n process = Process()\n process.process_graph = {\"load_collection1\": {\"process_id\": \"load_collection\", \"arguments\": {}}}\n\n job.process = process\n\n self.dlg = JobAdaptDialog(iface=self.iface, job=job, backend=self.backend, main_dia=self)\n self.dlg.manualButton.setIcon(QIcon(os.path.join(os.path.dirname(__file__),\n 'images/info_icon.png')))\n self.dlg.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.dlg.show()", "def on_miProjectSettings(self):\n self.log.detail(\">>> Launch 'Project Settings' ...\")\n #-- Check User Grade --#\n if not self.foundation.userGroups._user.grade <= 3:\n mess = \"Your grade does not allow you to edit project settings !\"\n self.log.error(mess)\n pQt.errorDialog(mess, self)\n raise UserWarning(mess)\n #-- Check Project --#\n if self.foundation.project.project is None:\n mess = \"!!! No project loaded. Load project to edit its settings !!!\"\n pQt.errorDialog(mess, self)\n raise IOError(mess)\n #-- Launch Dialog --#\n self.dial_projectSettings = dialogsUi.ProjectSettings(parent=self)\n self.dial_projectSettings.exec_()", "def optionsWindow():\n\t# create the main interface\n\tif cmds.window(kSetupOptionsWindow, q=True, ex=True):\n\t\tcmds.deleteUI(kSetupOptionsWindow)\n\tmainWindow = cmds.window(kSetupOptionsWindow, title='%s Options'%kToolName, menuBar=True, wh=(545,350))\n\t\n\t# build the menu bar\n\tcmds.menu(label='Help')\n\tamui.helpMenuItem(kToolName, __file__)\n\tamui.aboutMenuItem(kToolName, kVersionNumber, kVersionDate)\n\t\n\tmainForm = cmds.formLayout(nd=100)\n\t\n\t# build the section to get information about the new twist joints\n\tif_suffixName = cmds.textFieldGrp(text='_Twist', label='Suffix of New Twist Joints:')\n\tif_numberTwistJoints = cmds.intSliderGrp(v=3, min=1, max=10, fmn=1, fmx=100, label='Number of Twist Joints:', field=True)\n\t\n\t# position the input fields for the twist joints\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_suffixName, 'left', 30), (if_suffixName, 'top', 5)], attachNone=[(if_suffixName, 'right'), (if_suffixName, 'bottom')])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_numberTwistJoints, 'left', 30)], attachNone=[(if_numberTwistJoints, 'right'), (if_numberTwistJoints, 'bottom')], attachControl=[(if_numberTwistJoints, 'top', 5, if_suffixName)])\n\t\n\t# build the section to get information for the hip constraint\n\tconstraintFrame = eval('cmds.frameLayout(collapsable=True, label=\"Hip Constraint Options:\" %s)'%amui.__frameAlignCenter__)\n\tconstraintForm = cmds.formLayout(nd=100)\n\t\n\t# attempt to guess what the pelvis is if there is a selection when the GUI is created\n\tpelvisText = 'CenterRoot'\n\tsel = cmds.ls(sl=True, l=True, type='transform')\n\tif sel and len(sel) > 0: # BUG: in Maya 8.5, a selection of length 0 returns None rather than an empty list\n\t\ttry:\n\t\t\thip = cmds.listRelatives(sel[0], p=True, f=True) # just use the first knee in the selection\n\t\t\tpelvis = cmds.listRelatives(hip[0], p=True, f=True)\n\t\t\tpelvisText = pelvis[0]\n\t\texcept: pass\n\t\t\n\tif_pelvis = cmds.textFieldGrp(label='Pelvis Object:', tx=pelvisText)\n\tif_hipAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Hip Aim Axis:')\n\tif_hipFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Hip Front Axis:')\n\tif_pelvisAimAxis = cmds.floatFieldGrp(v1=0, v2=1, v3=0, nf=3, pre=4, label='Pelvis Aim Axis:')\n\tif_pelvisFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Pelvis Front Axis:')\n\t\n\t# position the input fields for the hip constraint\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvis, 'left', 30), (if_pelvis, 'top', 5)], attachNone=[(if_pelvis, 'right'), (if_pelvis, 'bottom')])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_hipAimAxis, 'left', 30)], attachNone=[(if_hipAimAxis, 'right'), (if_hipAimAxis, 'bottom')], attachControl=[(if_hipAimAxis, 'top', 5, if_pelvis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_hipFrontAxis, 'left', 30)], attachNone=[(if_hipFrontAxis, 'right'), (if_hipFrontAxis, 'bottom')], attachControl=[(if_hipFrontAxis, 'top', 5, if_hipAimAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvisAimAxis, 'left', 30)], attachNone=[(if_pelvisAimAxis, 'right'), (if_pelvisAimAxis, 'bottom')], attachControl=[(if_pelvisAimAxis, 'top', 5, if_hipFrontAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_pelvisFrontAxis, 'left', 30)], attachNone=[(if_pelvisFrontAxis, 'right'), (if_pelvisFrontAxis, 'bottom')], attachControl=[(if_pelvisFrontAxis, 'top', 5, if_pelvisAimAxis)])\n\t\n\tcmds.setParent('..') # go up to constraintForm\n\tcmds.setParent('..') # go up to mainForm\n\t\n\t# position the frame for the hip constraint\n\tcmds.formLayout(mainForm, edit=True, attachPosition=[(constraintFrame, 'left', -1, 0), (constraintFrame, 'right', -1, 100)], attachControl=[(constraintFrame, 'top', 5, if_numberTwistJoints)], attachNone=[(constraintFrame, 'bottom')])\n\t\n\t# create the buttons to execute the script\n\tcmd_create='amTools.rigging.hipSetup.doOptions (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'%(\n\t\tif_suffixName, \n\t\tif_numberTwistJoints, \n\t\tif_pelvis, \n\t\tif_hipAimAxis, \n\t\tif_hipFrontAxis, \n\t\tif_pelvisAimAxis, \n\t\tif_pelvisFrontAxis)\n\tutils.ui.threeButtonLayout(mainForm, mainWindow, cmd_create)\n\t\n\tcmds.showWindow(mainWindow)", "def openTB4Settings(self):\n self.TB4_Window = QtWidgets.QDialog()\n self.TB4_ui = Ui_robotFourConfig()\n self.TB4_ui.setupUi(self.TB4_Window)\n self.TB4_Window.show()", "def openTB1Settings(self):\n self.TB1_Window = QtWidgets.QDialog()\n self.TB1_ui = Ui_robotOneConfig()\n self.TB1_ui.setupUi(self.TB1_Window)\n self.TB1_Window.show()", "def openTB3Settings(self):\n self.TB3_Window = QtWidgets.QDialog()\n self.TB3_ui = Ui_robotThreeConfig()\n self.TB3_ui.setupUi(self.TB3_Window)\n self.TB3_Window.show()", "def demo_launch():\n import acm\n shell = acm.UX().SessionManager().Shell()\n dialog = TextObjectEditDialog(\"pb_funds\", editable=True)\n builder = dialog.create_layout()\n acm.UX().Dialogs().ShowCustomDialogModal(shell, builder, dialog)", "def _display_job_menu(self):\n self.job_menu = self.window.add.options(*CHARACTER_JOB_OBJECTS.keys())\n self.job_menu.config(width=15)\n self.job_menu.grid(\n column=self.window.get_middle_column(),\n row=self.window.get_middle_row(),\n )\n\n self.job_menu_label = self.window.add.label(text=\"Select your job\", bg=\"#2C3E50\", fg=\"white\")\n self.job_menu_label.grid(column=self.window.get_middle_column(), row=self.window.get_middle_row() - 1)", "def openRocConfig(self):\n self.rocConfig_Window = QtWidgets.QDialog()\n self.rocConfig_ui = Ui_rocConfigure()\n self.rocConfig_ui.setupUi(self.rocConfig_Window)\n self.rocConfig_Window.show()", "def _launch(self, *args, **kwargs):\n\n self._stats.start_time = time.time()\n exc_type, exc_value, exc_tb = None, None, None\n try:\n kwargs['settings'] = self._settings\n kwargs['config'] = self._config\n kwargs['dev'] = self._dev\n tool_data = self.launch(*args, **kwargs)\n if tool_data and tool_data.get('tool') is not None:\n tool_data['tool'].ID = self.ID\n tool_data['tool'].PACKAGE = self.PACKAGE\n if self._settings.get('dockable', False):\n uid = None\n # TODO: Add option in settings to check if a tool can be opened multiple times or not\n # TODO: Make this piece of code DCC agnostic\n # if multiple_tools:\n # uid = \"{0} [{1}]\".format(self.uiData[\"label\"], str(uuid.uuid4()))\n ui_label = self._config.get('name', default='')\n ui_icon = self._config.get('icon', default='tpdcc')\n if dcc.is_maya():\n from tpDcc.dccs.maya.ui import window\n bootstrap_widget = window.BootStrapWidget(\n tool_data['tool'], title=ui_label, icon=resources.icon(ui_icon), uid=uid)\n tool_data['bootstrap'] = bootstrap_widget\n tool_data['bootstrap'].show(\n retain=False, dockable=True, tabToControl=('AttributeEditor', -1), floating=False)\n self._bootstrap.append(bootstrap_widget)\n except Exception:\n exc_type, exc_value, exc_tb = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_tb)\n raise\n finally:\n tb = None\n if exc_type and exc_value and exc_tb:\n tb = traceback.format_exception(exc_type, exc_value, exc_tb)\n self._stats.finish(tb)\n\n return tool_data", "def interactiveMode(self):\n if self.temp_configuration.check('showSplash'):\n self.splashScreen.finish(self.builderWindow)\n debug.DebugPrint.getInstance().register_splash(None)\n self.splashScreen = None\n # self.builderWindow.modulePalette.updateFromModuleRegistry()\n # self.builderWindow.modulePalette.connect_registry_signals()\n self.builderWindow.link_registry()\n \n self.process_interactive_input()\n if not self.temp_configuration.showSpreadsheetOnly:\n self.showBuilderWindow()\n else:\n self.builderWindow.hide()\n self.builderWindow.create_first_vistrail()\n self.builderWindow.check_running_jobs()", "def optionsWindow():\n\t# create the main interface\n\tif cmds.window(kMakeRibbonsOptionsWindow, q=True, ex=True):\n\t\tcmds.deleteUI(kMakeRibbonsOptionsWindow)\n\tmainWindow = cmds.window(kMakeRibbonsOptionsWindow, title='%s Options'%kToolName, menuBar=True, wh=(545,465))\n\t\n\t# build the menu bar\n\tcmds.menu(label='Help')\n\tamui.helpMenuItem(kToolName, __file__)\n\tamui.aboutMenuItem(kToolName, kVersionNumber, kVersionDate)\n\t\n\tmainForm = cmds.formLayout(nd=100)\n\t\n\t# build the section to get information about the new ribbons\n\tif_width = cmds.floatSliderGrp(v=3.0, min=0.0, max=100, fmn=0.0, fmx=100, label='Width:', field=True)\n\tif_divisionsType = cmds.radioButtonGrp(label='Compute Divisions:', labelArray2=['Statically', 'Dynamically'], numberOfRadioButtons=2, select=2)\n\tif_divisions = cmds.intSliderGrp(v=1, min=1, max=100, fmn=0, fmx=100, label='Divisions:', field=True, en=False)\n\tif_divisionsPerUnit = cmds.floatSliderGrp(v=0.5, min=0.0, max=10.0, fmn=0.0, fmx=100, label='Divisions per Unit:', field=True)\n\tcmds.radioButtonGrp(if_divisionsType, e=True, \n\t\tcc=('amTools.modeling.makeRibbons.doRadioDivisions(\"%s\", \"%s\", \"%s\")'%(if_divisionsType, if_divisions, if_divisionsPerUnit)))\n\tif_taper = cmds.floatSliderGrp(v=1.0, min=0.0, max=15.0, fmn=0.0, fmx=15.0, label='Taper:', field=True)\n\tif_frontTwist = cmds.floatSliderGrp(v=0.0, min=-180.0, max=180.0, fmn=-360.0, fmx=360.0, label='Front Twist:', field=True)\n\tif_lengthTwist = cmds.floatSliderGrp(v=0.0, min=-180.0, max=180.0, fmn=-180.0, fmx=180.0, label='Length Twist:', field=True)\n\tif_upVector = cmds.floatFieldGrp(v1=0, v2=1, v3=0, nf=3, pre=4, label='Base Up Vector:')\n\t\n\t# position the input fields for the new parents\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_width, 'left', 30), (if_width, 'top', 5)], attachNone=[(if_width, 'right'), (if_width, 'bottom')])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_divisionsType, 'left', 30)], attachNone=[(if_divisionsType, 'right'), (if_divisionsType, 'bottom')], attachControl=[(if_divisionsType, 'top', 5, if_width)])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_divisions, 'left', 30)], attachNone=[(if_divisions, 'right'), (if_divisions, 'bottom')], attachControl=[(if_divisions, 'top', 5, if_divisionsType)])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_divisionsPerUnit, 'left', 30)], attachNone=[(if_divisionsPerUnit, 'right'), (if_divisionsPerUnit, 'bottom')], attachControl=[(if_divisionsPerUnit, 'top', 5, if_divisions)])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_taper, 'left', 30)], attachNone=[(if_taper, 'right'), (if_taper, 'bottom')], attachControl=[(if_taper, 'top', 5, if_divisionsPerUnit)])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_frontTwist, 'left', 30)], attachNone=[(if_frontTwist, 'right'), (if_frontTwist, 'bottom')], attachControl=[(if_frontTwist, 'top', 5, if_taper)])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_lengthTwist, 'left', 30)], attachNone=[(if_lengthTwist, 'right'), (if_lengthTwist, 'bottom')], attachControl=[(if_lengthTwist, 'top', 5, if_frontTwist)])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_upVector, 'left', 30)], attachNone=[(if_upVector, 'right'), (if_upVector, 'bottom')], attachControl=[(if_upVector, 'top', 5, if_lengthTwist)])\n\t\n\t# build the section to get information for the uv generation options\n\tuvFrame = eval('cmds.frameLayout(collapsable=True, label=\"UV Generation Options:\" %s)'%amui.__frameAlignCenter__)\n\tuvForm = cmds.formLayout(nd=100)\n\t\n\tif_uvScale = cmds.floatSliderGrp(v=1.0, min=0.0, max=1.0, fmn=-1.0, fmx=1.0, label='UV Scale:', field=True, en=False)\n\tif_uvScaleType = cmds.radioButtonGrp(label='UV Normalization:', labelArray3=['None', 'Longest', 'Shortest'], numberOfRadioButtons=3, select=2)\n\tcmds.radioButtonGrp(if_uvScaleType, e=True, cc=('amTools.modeling.makeRibbons.doRadioUVs(\"%s\", \"%s\")'%(if_uvScaleType, if_uvScale)))\n\tif_uvPinLocation = cmds.radioButtonGrp(label='Pin UVs:', labelArray2=['Top', 'Bottom'], numberOfRadioButtons=2, select=2)\n\t\n\t# position the input fields for the uv generation options\n\tcmds.formLayout(uvForm, edit=True, attachForm=[(if_uvScale, 'left', 30), (if_uvScale, 'top', 5)], attachNone=[(if_uvScale, 'right'), (if_uvScale, 'bottom')])\n\tcmds.formLayout(uvForm, edit=True, attachForm=[(if_uvScaleType, 'left', 30)], attachNone=[(if_uvScaleType, 'right'), (if_uvScaleType, 'bottom')], attachControl=[(if_uvScaleType, 'top', 5, if_uvScale)])\n\tcmds.formLayout(uvForm, edit=True, attachForm=[(if_uvPinLocation, 'left', 30)], attachNone=[(if_uvPinLocation, 'right'), (if_uvPinLocation, 'bottom')], attachControl=[(if_uvPinLocation, 'top', 5, if_uvScaleType)])\n\t\n\tcmds.setParent('..') # Go up to uvForm\n\tcmds.setParent('..') # Go up to mainForm\n\t\n\t# position the frame for the uv generation options\n\tcmds.formLayout(mainForm, edit=True, attachPosition=[(uvFrame, 'left', -1, 0), (uvFrame, 'right', -1, 100)], attachControl=[(uvFrame, 'top', 5, if_upVector)], attachNone=[(uvFrame, 'bottom')])\n\t\n\t# create the buttons to execute the script\n\tcmd_create = 'amTools.modeling.makeRibbons.doOptions (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'%(if_width, if_divisionsType, if_divisions, if_divisionsPerUnit, if_taper, if_frontTwist, if_lengthTwist, if_upVector, if_uvScale, if_uvScaleType, if_uvPinLocation)\n\tamui.threeButtonLayout(mainForm, mainWindow, cmd_create)\n\t\n\tcmds.showWindow(mainWindow)", "def openTB2Settings(self):\n self.TB2_Window = QtWidgets.QDialog()\n self.TB2_ui = Ui_robotTwoConfig()\n self.TB2_ui.setupUi(self.TB2_Window)\n self.TB2_Window.show()", "def openGameTools(*args):\n pyqt.showDialog(gameTools)", "def PopulateSubmitter( gui ):\n global submissionInfo\n print( \"Grabbing submitter info...\" )\n try:\n stringSubInfo = CallDeadlineCommand( [ \"-prettyJSON\", \"-GetSubmissionInfo\", \"Pools\", \"Groups\", \"MaxPriority\", \"UserHomeDir\", \"RepoDir:submission/Katana/Main\", \"RepoDir:submission/Integration/Main\", ], useDeadlineBg=True )\n output = json.loads( stringSubInfo, encoding=\"utf-8\" )\n except:\n print( \"Unable to get submitter info from Deadline:\\n\\n\" + traceback.format_exc() )\n raise\n if output[ \"ok\" ]:\n submissionInfo = output[ \"result\" ]\n else:\n print( \"DeadlineCommand returned a bad result and was unable to grab the submitter info.\\n\\n\" + output[ \"result\" ] )\n raise ValueError( output[ \"result\" ] )\n # Create a widget with a vertical box layout as a container for widgets to include in the tab\n scrollWidget = QWidget()\n scrollLayout = QGridLayout(scrollWidget)\n scrollLayout.setSpacing(4)\n scrollLayout.setContentsMargins(4, 4, 4, 4)\n\n buttonLayout = QHBoxLayout()\n\n # First layout: General options\n scrollLayout.addWidget(CreateSeparator( \"Job Description\" ),0,0,1,3)\n\n jobNameLabel = QLabel( \"Job Name\" )\n jobNameLabel.setToolTip(\"The name of your job. This is optional, and if left blank, it will default to 'Untitled'.\")\n scrollLayout.addWidget(jobNameLabel,1,0)\n gui.jobNameWidget = QLineEdit( os.path.basename(FarmAPI.GetKatanaFileName()).split('.')[0] )\n scrollLayout.addWidget(gui.jobNameWidget, 1, 1, 1, 1 )\n\n commentLabel = QLabel( \"Comment\" )\n commentLabel.setToolTip(\"A simple description of your job. This is optional and can be left blank.\")\n scrollLayout.addWidget(commentLabel,2,0)\n gui.commentWidget = QLineEdit( \"\" )\n scrollLayout.addWidget(gui.commentWidget, 2, 1, 1, 1 )\n\n departmentLabel = QLabel( \"Department\" )\n departmentLabel.setToolTip( \"The department you belong to. This is optional and can be left blank.\" )\n scrollLayout.addWidget(departmentLabel, 3, 0)\n gui.departmentWidget = QLineEdit( \"\" )\n scrollLayout.addWidget(gui.departmentWidget, 3, 1, 1, 1 )\n\n # Second layout: Job options\n scrollLayout.addWidget(CreateSeparator( \"Job Options\" ),4,0,1,3)\n\n pools = submissionInfo[\"Pools\"]\n poolLabel = QLabel( \"Pool\" )\n poolLabel.setToolTip( \"The pool that your job will be submitted to.\" )\n scrollLayout.addWidget(poolLabel, 5, 0)\n\n gui.poolsWidget = QComboBox()\n gui.poolsWidget.addItems(pools)\n scrollLayout.addWidget(gui.poolsWidget, 5, 1 )\n\n secondPoolLabel = QLabel( \"Secondary Pool\" )\n secondPoolLabel.setToolTip( \"The secondary pool lets you specify a pool to use if the primary pool does not have any available Slaves.\" )\n scrollLayout.addWidget(secondPoolLabel, 6, 0 )\n\n gui.secondPoolsWidget = QComboBox()\n gui.secondPoolsWidget.addItems(pools)\n scrollLayout.addWidget(gui.secondPoolsWidget, 6, 1 )\n\n groups = submissionInfo[ \"Groups\" ]\n groupLabel = QLabel( \"Group\" )\n groupLabel.setToolTip( \"The group that your job will be submitted to.\" )\n scrollLayout.addWidget(groupLabel, 7, 0)\n\n gui.groupWidget = QComboBox()\n gui.groupWidget.addItems(groups)\n scrollLayout.addWidget(gui.groupWidget, 7, 1)\n\n priorityLabel = QLabel( \"Priority\" )\n priorityLabel.setToolTip( \"A job can have a numeric priority from 0 to 100, where 0 is the lowest priority and 100 is the highest.\" )\n scrollLayout.addWidget(priorityLabel, 8, 0)\n\n maxPriority = submissionInfo[\"MaxPriority\"]\n\n gui.priorityBox = QSpinBox()\n gui.priorityBox.setMinimum(0)\n gui.priorityBox.setMaximum( maxPriority )\n scrollLayout.addWidget(gui.priorityBox, 8, 1)\n\n taskTimeoutLabel = QLabel( \"Task Timeout\" )\n taskTimeoutLabel.setToolTip( \"The number of minutes a Slave has to render a task for this job before it requeues it. Specify 0 for no limit.\" )\n scrollLayout.addWidget(taskTimeoutLabel, 9, 0)\n\n gui.taskTimeoutBox = QSpinBox()\n gui.taskTimeoutBox.setMinimum(0)\n gui.taskTimeoutBox.setMaximum(10000)\n scrollLayout.addWidget(gui.taskTimeoutBox, 9, 1)\n\n concurrentTasksLabel = QLabel( \"Concurrent Tasks\" )\n concurrentTasksLabel.setToolTip(\"The number of tasks that can render concurrently on a single Slave. This is useful if the rendering application only uses one thread to render and your Slaves have multiple CPUs.\")\n scrollLayout.addWidget(concurrentTasksLabel, 10, 0 )\n gui.concurrentTasksWidget = QSpinBox( )\n scrollLayout.addWidget(gui.concurrentTasksWidget, 10, 1)\n gui.concurrentTasksWidget.setMinimum(1)\n gui.concurrentTasksWidget.setMaximum(16)\n gui.limitTasksSlaveLimit = QCheckBox( \"Limit Tasks To Slave's Task Limit\" )\n gui.limitTasksSlaveLimit.setToolTip( \"If you limit the tasks to a Slave's task limit, then by default, the Slave won't dequeue more tasks then it has CPUs. This task limit can be overridden for individual Slaves by an administrator.\" )\n scrollLayout.addWidget(gui.limitTasksSlaveLimit, 10, 2)\n\n machineLimitLabel = QLabel( \"Machine Limit\" )\n machineLimitLabel.setToolTip(\"Use the Machine Limit to specify the maximum number of machines that can render your job at one time. Specify 0 for no limit.\")\n scrollLayout.addWidget( machineLimitLabel, 11, 0 )\n\n gui.machineLimitWidget = QSpinBox()\n scrollLayout.addWidget(gui.machineLimitWidget, 11, 1)\n gui.isBlackListWidget = QCheckBox( \"Machine List Is Blacklist\" )\n gui.isBlackListWidget.setToolTip(\"You can force the job to render on specific machines by using a whitelist, or you can avoid specific machines by using a blacklist.\")\n scrollLayout.addWidget(gui.isBlackListWidget, 11, 2)\n\n machineListLabel = QLabel( \"Machine List\" )\n machineListLabel.setToolTip(\"The whitelisted or blacklisted list of machines.\")\n scrollLayout.addWidget( machineListLabel, 12, 0 )\n\n machineListLayout = QHBoxLayout()\n gui.machineListWidget = QLineEdit( \"\" )\n machineListLayout.addWidget(gui.machineListWidget)\n getMachineListWidget = QPushButton( \"...\" )\n getMachineListWidget.pressed.connect( lambda: BrowseMachineList(gui.machineListWidget) )\n machineListLayout.addWidget(getMachineListWidget)\n scrollLayout.addLayout( machineListLayout, 12, 1, 1, 2 )\n\n limitsLabel = QLabel( \"Limits\" )\n limitsLabel.setToolTip(\"The Limits that your job requires.\")\n scrollLayout.addWidget( limitsLabel, 13, 0 )\n limitsLayout = QHBoxLayout()\n gui.limitsWidget = QLineEdit( \"\" )\n limitsLayout.addWidget(gui.limitsWidget)\n getLimitsWidget = QPushButton( \"...\" )\n getLimitsWidget.pressed.connect( lambda: BrowseLimitList(gui.limitsWidget) )\n limitsLayout.addWidget(getLimitsWidget)\n scrollLayout.addLayout( limitsLayout, 13, 1, 1, 2 )\n\n dependenciesLabel = QLabel( \"Dependencies\" )\n dependenciesLabel.setToolTip(\"Specify existing jobs that this job will be dependent on. This job will not start until the specified dependencies finish rendering.\")\n scrollLayout.addWidget( dependenciesLabel, 14, 0 )\n dependenciesLayout = QHBoxLayout()\n gui.dependenciesWidget = QLineEdit( \"\" )\n dependenciesLayout.addWidget(gui.dependenciesWidget)\n getDependenciesWidget = QPushButton( \"...\" )\n getDependenciesWidget.pressed.connect( lambda: BrowseDependencyList(gui.dependenciesWidget) )\n dependenciesLayout.addWidget(getDependenciesWidget)\n scrollLayout.addLayout( dependenciesLayout, 14, 1, 1, 2 )\n\n onJobCompleteLabel = QLabel( \"On Job Complete\" )\n onJobCompleteLabel.setToolTip(\"If desired, you can automatically archive or delete the job when it completes.\")\n scrollLayout.addWidget( onJobCompleteLabel, 15, 0 )\n gui.onJobCompleteWidget = QComboBox( )\n gui.onJobCompleteWidget.addItems([\"Nothing\", \"Archive\", \"Delete\"])\n scrollLayout.addWidget(gui.onJobCompleteWidget, 15, 1)\n gui.submitSuspendedWidget = QCheckBox( \"Submit Job as Suspended\" )\n gui.submitSuspendedWidget.setToolTip( \"If enabled, the job will submit in the suspended state. This is useful if you don't want the job to start rendering right away. Just resume it from the Monitor when you want it to render.\")\n scrollLayout.addWidget(gui.submitSuspendedWidget, 15, 2)\n\n # Third layout: Katana options\n scrollLayout.addWidget(CreateSeparator( \"Katana Options\" ),16,0,1,3)\n\n frameRangeLabel = QLabel( \"Frame Range\" )\n frameRangeLabel.setToolTip(\"The list of frames to render.\")\n scrollLayout.addWidget( frameRangeLabel, 17, 0 )\n gui.frameRangeWidget = QLineEdit( \"\" ) # Populate based on frame range\n scrollLayout.addWidget( gui.frameRangeWidget, 17, 1, 1, 1 )\n\n frameRange = FarmAPI.GetSceneFrameRange()\n gui.frameRangeWidget.setText( str(frameRange['start']) + \"-\" + str(frameRange['end']) )\n\n gui.submitSceneBox = QCheckBox( \"Submit Katana Scene File\" )\n gui.submitSceneBox.setToolTip( \"If this option is enabled, the scene file will be submitted with the job, and then copied locally to the Slave machine during rendering.\" )\n scrollLayout.addWidget(gui.submitSceneBox, 17, 2 )\n\n framesPerTaskLabel = QLabel( \"Frames Per Task\" )\n framesPerTaskLabel.setToolTip( \"This is the number of frames that will be rendered at a time for each job task.\" )\n scrollLayout.addWidget( framesPerTaskLabel, 18, 0 )\n gui.framesPerTaskWidget = QSpinBox( )\n gui.framesPerTaskWidget.setMinimum(1)\n scrollLayout.addWidget( gui.framesPerTaskWidget, 18, 1, 1, 1 )\n\n gui.useWorkingDirectory = QCheckBox( \"Use Working Directory\" )\n gui.useWorkingDirectory.setToolTip( \"If enabled, the current working directory will be used during rendering. This is required if your Katana project file contains relative paths.\" )\n gui.useWorkingDirectory.setChecked(True)\n scrollLayout.addWidget( gui.useWorkingDirectory, 18, 2 )\n\n renderNodeSelectLabel = QLabel( \"Render Node Submission\" )\n renderNodeSelectLabel.setToolTip( \"Choose to render the whole scene, render all nodes as separate jobs, or render separate nodes\" )\n scrollLayout.addWidget( renderNodeSelectLabel, 19, 0 )\n\n gui.renderSelectBox = QComboBox()\n gui.renderSelectBox.addItems( [\"Submit All Render Nodes As Separate Jobs\", \"Select Render Node\"] )\n scrollLayout.addWidget( gui.renderSelectBox, 19, 1 )\n\n gui.includeImageWrite = QCheckBox( \"Include ImageWrite Nodes\" )\n gui.includeImageWrite.setToolTip( \"If enabled, ImageWrite nodes will be included for submission.\" )\n scrollLayout.addWidget( gui.includeImageWrite, 19, 2 )\n\n renderNodeLabel = QLabel( \"Render Node\" )\n renderNodeLabel.setToolTip( \"Set the render node to render with, or leave blank to use the node already set.\" )\n scrollLayout.addWidget( renderNodeLabel, 20, 0 )\n\n gui.frameDependent = QCheckBox( \"Submit Jobs As Frame Dependent\" )\n gui.frameDependent.setToolTip( \"If enabled, the Katana Job(s) will have Frame Dependencies. If your scene contains static content, do not use!\" )\n scrollLayout.addWidget( gui.frameDependent, 20, 2 )\n\n gui.renderNodeBox = QComboBox()\n gui.renderSelectBox.currentIndexChanged.connect( lambda: RenderSelectionChanged( gui.renderSelectBox, gui.renderNodeBox ) )\n scrollLayout.addWidget( gui.renderNodeBox, 20, 1)\n gui.renderNodeBox.setDisabled(True)\n # Submit button\n buttonLayoutSpacer = QSpacerItem( 0, 0, QSizePolicy.MinimumExpanding, QSizePolicy.Minimum )\n buttonLayout.addItem( buttonLayoutSpacer )\n\n gui.pipelineToolStatusLabel = QLabel( \"No Pipeline Tools Set\" )\n gui.pipelineToolStatusLabel.setAlignment( QtCore.Qt.AlignCenter )\n buttonLayout.addWidget( gui.pipelineToolStatusLabel )\n pipelineToolsButton = QPushButton( \"Pipeline Tools\" )\n pipelineToolsButton.pressed.connect( lambda: PipelineToolsClicked( gui ) )\n buttonLayout.addWidget( pipelineToolsButton )\n\n submitButton = QPushButton( \"Submit\" )\n submitButton.pressed.connect( lambda: SubmitPressed(gui) )\n buttonLayout.addWidget( submitButton )\n\n scrollLayout.addLayout( buttonLayout,21,0,1,3 )\n\n verticalStretchLayout = QVBoxLayout()\n verticalStretchLayout.addStretch()\n scrollLayout.addLayout( verticalStretchLayout, 22, 0 )\n\n scrollArea = QScrollArea()\n scrollArea.setWidget(scrollWidget)\n scrollArea.setWidgetResizable(True)\n scrollArea.setFrameStyle(QFrame.NoFrame + QFrame.Plain)\n\n vLayout = QVBoxLayout()\n vLayout.setObjectName('vLayout')\n vLayout.addWidget(scrollArea)\n\n gui.setLayout(vLayout)\n\n LoadStickySettings( gui )\n try:\n pipelineToolStatusMessage = RetrievePipelineToolStatus( raiseOnExitCode=True )\n except subprocess.CalledProcessError as e:\n pipelineToolStatusMessage = HandlePipelineToolsCalledProcessError( e )\n UpdatePipelineToolStatusLabel( gui, pipelineToolStatusMessage )\n\n # Populate the render node drop down based on the effective check state\n # of the \"Include ImageWrite Nodes\" checkbox after sticky settings are applied\n PopulateRenderNodeDropDown(gui.includeImageWrite.isChecked(), gui.renderNodeBox)\n # We delay wiring up this signal handler until after the sticky settings are applied to avoid\n # rebuilding the drop-down list multiple times unnecessarily\n gui.includeImageWrite.stateChanged.connect(lambda checked: PopulateRenderNodeDropDown(checked, gui.renderNodeBox))\n\n # Check if this tab is part of a pane in the main window, or if it is contained in a floating pane\n if gui.window() != UI4.App.MainWindow.CurrentMainWindow():\n # Resize the floating pane's window to accommodate the tab's widgets\n requiredSize = scrollWidget.sizeHint()\n gui.window().resize(max(requiredSize.width() + 20, 200), min(requiredSize.height() + 40, 1000))", "def launch_reporteditor():\r\n import sys\r\n from PyQt4 import QtGui\r\n from freeseer.frontend.reporteditor.reporteditor import ReportEditorApp\r\n\r\n profile = settings.profile_manager.get()\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig,\r\n storage_args=['Global'], read_only=True)\r\n db = profile.get_database()\r\n\r\n app = QtGui.QApplication(sys.argv)\r\n main = ReportEditorApp(config, db)\r\n main.show()\r\n sys.exit(app.exec_())", "def build_gui():\n\n form = sg.FlexForm('Script runner')\n layout = [\n [sg.Text(\"Do you want to run the script?\", size=(35, 1))],\n [sg.Yes(), sg.No()]\n ]\n\n button, values = sg.Window('Script runner', layout, auto_close=True,\n auto_close_duration=4).Read()\n\n if button == 'Yes':\n run()\n else:\n form3 = sg.FlexForm(\"Bye\")\n ha_det = [\n [sg.Text(f\"OK. Bye!\", size=(17, 1))]\n ]\n sg.Window(\"Bye\", ha_det, auto_close=True, auto_close_duration=2).Read()", "def open_options_window(self):\r\n\r\n window_options = OptionsWindow(self.master)\r\n window_options.lift()\r\n window_options.grab_set()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the path to DeadlineCommand.
def GetDeadlineCommand( useDeadlineBg=False ): deadlineBin = "" try: deadlineBin = os.environ[ 'DEADLINE_PATH' ] except KeyError: # if the error is a key error it means that DEADLINE_PATH is not set. however Deadline command may be in the PATH or on OSX it could be in the file /Users/Shared/Thinkbox/DEADLINE_PATH pass # On OSX, we look for the DEADLINE_PATH file if the environment variable does not exist. if deadlineBin == "" and os.path.exists( "/Users/Shared/Thinkbox/DEADLINE_PATH" ): with io.open( "/Users/Shared/Thinkbox/DEADLINE_PATH", encoding="utf-8" ) as f: deadlineBin = f.read().strip() exeName = "deadlinecommand" if useDeadlineBg: exeName += "bg" deadlineCommand = os.path.join( deadlineBin, exeName ) return deadlineCommand
[ "def _get_cmd_line_file_path(self):\n return join(self._active_output_dir, \"cmd_line\")", "def command_path(\n sibling_ctx: click.core.Context,\n command: click.core.Command,\n) -> str:\n command_path_list = sibling_ctx.command_path.split()\n command_path_list[-1] = command.name\n return ' '.join(command_path_list)", "def rel_command(self):\n return self.command.lstrip('/')", "def get_command_name(self):\n return self.__module__.rsplit('.', 1)[1]", "def command_directory_name(self):\n return self._command_directory_name", "def command(self):\n if self._command is None:\n if '/' in self.suffix:\n self._command = self.suffix[:self.suffix.find('/')]\n else:\n self._command = self.suffix\n return self._command", "def get_command(self):\n return 'date && cd ' + \\\n os.path.join(ChronosJob.cloud_path_dict[self.cloud], \\\n 'userfiles', self.job_dir_relative_path) + \\\n ' && python3 /home/src/gene_prioritization.py ' + \\\n ' -run_directory ./' + \\\n ' -run_file run.yml' + \\\n ' && date;'", "def GetActiveToolPath() -> str:\n pass", "def get_exec_command(self):\n if not 'exec_path' in self.config:\n msg = \"\"\"\nError: self.exec_path has not been set yet.\nYou should write a path to the PMD executable in {0}.\nIt should be in JSON format like,\n::\n\n {\n exec_path: /home/username/bin/pmd\n }\n\n\n\"\"\".format(get_conf_path())\n raise RuntimeError(msg)\n\n text = self.config['exec_path']\n # text = 'mpirun -np {{NPARA}} {path}'.format(path=self.config['exec_path']) \\\n # +' > out.pmd 2>&1'\n return text", "def nice_path(self, env=None):\n\t\treturn self.path_from(self.ctx.launch_node())", "def get_command_name(self) -> str:\n pass", "def get_command_path(command):\n def excutable(command_path):\n return os.path.isfile(command_path) and os.access(command_path, os.X_OK)\n\n for path in os.environ[\"PATH\"].split(os.pathsep):\n command_path = os.path.join(path, command)\n if excutable(command_path):\n return command_path\n\n return None", "def get_command(self):\n return self.command", "def real_path(self):\n\t\treturn self.args[0]", "def getCommandLine():\n import sys, os\n cmdline = os.path.abspath(sys.argv[0])\n for elem in sys.argv[1:]:\n cmdline += ' ' + ecohydrolib.util.getAbsolutePathOfItem(elem)\n return cmdline", "def get_command(self):\n\n if self.pc >= len(self.commands):\n raise ValueError('unexpected end of program')\n return self.commands[self.pc]", "def path(self):\n return pjoin(self._dj._jobsdir, self._status, self.full_name())", "def runner_path():\n git_base = os.popen('git rev-parse --show-toplevel').read().strip()\n return os.path.join(git_base, RUNNER_SCRIPT_BASENAME)", "def executable_path(self):\n prepend = self._active_environment(ActiveEnvironment).prepend\n return prepend.get(\"PATH\", \"\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }