query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
|---|---|---|---|
Return RMSLE from the prediction and the expected answer.
|
def get_RMSLE(pred, truth):
assert len(pred) == len(truth)
diff_vect = np.log(pred + 1) - np.log(truth + 1)
diff_sum = np.sum(np.power(diff_vect, 2))
return np.sqrt(diff_sum / len(pred))
|
[
"def calc_rmsle(y: np.ndarray, y_hat: np.ndarray) -> float:\n pass",
"def reserrorcalc(test_set, model):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n residual_err = sum((model.predict(X) - Y) ** 2)\n return residual_err",
"def RMSE(prediction, actual):\n #subtract = np.multiply(actual - prediction, abs(np.sign(actual)))\n #N = np.sum(abs(np.sign(actual)))\n #MSE = np.sum(np.square(np.multiply(actual - prediction, abs(np.sign(actual)))))/(np.sum(abs(np.sign(actual))))\n RMSE = np.sqrt(np.sum(np.square(np.multiply(actual - prediction, abs(np.sign(actual)))))/(np.sum(abs(np.sign(actual)))))\n return RMSE",
"def l2err(prediction,ytest):\n return np.linalg.norm(np.subtract(prediction,ytest))",
"def rmsle(y, y_):\n assert len(y) == len(y_)\n return np.sqrt(np.mean((np.log(1 + y) - np.log(1 + y_))**2))",
"def l2err(prediction, ytest):\n return np.linalg.norm(np.subtract(prediction, ytest))",
"def mle(data):\n\t\"\"\" return (tau, sigma ) \"\"\"\n\tcount_state_state,count_state_word,all_words = counts(data)\n\tsmooth_denom = len(all_words)\n\tsigma = get_sigma(count_state_state)\n\ttau = get_tau(count_state_word, smooth_denom)\n\treturn (tau,sigma)",
"def evaluate(self, X_test, y_test):\n y_pred = self.pipeline.predict(X_test)\n rmse = round(compute_rmse(y_pred, y_test), 2)\n self.mlflow_log_metric(\"rmse\", rmse)\n return rmse",
"def eval_rmse(pred, truth):\n logger.info(\"Evaluating RMSE...\")\n pred = pred.mean(0)\n error = pred - truth\n return (error * error).reshape(truth.shape[:-2] + (-1,)).mean(-1).sqrt()",
"def acc_rmse(y_pred,y_test):\n print('计算 决定系数...')\n\n error = []\n for i in range(len(y_pred)):\n error.append(y_pred[i] - y_test[i]) # 预测值 与 实际值误差\n\n squaredError = []\n for val in error:\n squaredError.append(val * val) # target-prediction之差平方\n\n RMSE = np.sqrt(sum(squaredError) / len(squaredError))\n return RMSE",
"def calculateRmse(self, actualData, predictedData):\n\n mse = mean_squared_error(actualData, predictedData)\n return sqrt(mse)",
"def rmsle(self):\n return self._metric_json['rmsle']",
"def evaluate(self, X_test, y_test):\n \n y_pred = self.pipeline.predict(X_test)\n test_rmse = compute_rmse(y_pred, y_test)\n print(\"test rmse:\", test_rmse)\n return test_rmse",
"def RMSE(target_actual, target_predicted):\n return sqrt(mean_squared_error(target_actual, target_predicted))",
"def wrmse(self):\n sig1 = self.sig1\n sig2 = self.sig2\n w1 = self.w1\n w2 = self.w2\n w12 = self.w12\n if w12 is None:\n if np.any(w1) and np.any(w2): # check for division w/ 0\n top = np.sum(w1 * w2 * (sig1 - sig2)**2)\n down = np.sum(w1 * w2)\n wrmse = np.sqrt(top / down)\n else:\n wrmse = 100 # random big number\n else:\n top = np.sum(w12 * (sig1 - sig2)**2)\n down = np.sum(w12)\n wrmse = np.sqrt(top / down)\n return wrmse",
"def evaluate(self, X_test, y_test):\n y_pred = self.pipeline.predict(X_test)\n rmse = compute_rmse(y_pred, y_test)\n return round(rmse, 2)",
"def evaluate_model_rmse_on_business(model, features, labels, train_size):\n features = features[(train_size-min_months):, :]\n labels = labels[(train_size-min_months):]\n all_X, all_Y = reshape_inputs(features, labels)\n predicted = np.squeeze(model.predict(all_X))\n train_rmse = compute_rmse(predicted[:min_months], labels[:min_months])\n test_rmse = compute_rmse(predicted[min_months:], labels[min_months:])\n test_relative_err = compute_relative_error(predicted[min_months:], labels[min_months:])\n #can compute relative error here too\n return {'train_rmse':train_rmse, 'test_rmse':test_rmse, 'predicted':predicted,'test_relative_err':test_relative_err ,'features':features, 'labels':labels}",
"def mse(result, expected):\n total_square_sum = 0\n for index1 in range(0, len(result)):\n total_square_sum += (result[index1] - expected[index1]) ** 2\n return total_square_sum / float(len(result))",
"def mse_r2(true, predicted):\n # Reshaping set of images\n # n_imgs, nx, ny = true.shape\n # true = np.reshape(true, (n_imgs, nx*ny))\n # predicted = np.reshape(predicted, (n_imgs, nx*ny))\n nx = 33\n ny = 33\n\n # Compute MSE\n se = np.sum((true - predicted)**2, axis=1)\n mse = se*(nx*ny)**-1\n\n # Compute R squared\n mean = np.mean(true, axis=1)\n r2 = 1 - se*np.sum((true - np.expand_dims(mean, axis=1))**2, axis=1)**-1\n\n return mse, r2"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract diagonal of ``(Jᵀ S) (Jᵀ S)ᵀ`` where ``J`` is the bias Jacobian.
|
def extract_bias_diagonal(module: Linear, S: Tensor, sum_batch: bool = True) -> Tensor:
additional_axes = list(range(2, module.input0.dim()))
if additional_axes:
JS = S.sum(additional_axes)
else:
JS = S
equation = f"vno->{'' if sum_batch else 'n'}o"
return einsum(equation, JS**2)
|
[
"def extract_diagonal(self, matrix: Array) -> Tuple[Array, List[Error]]:\n diagonal = matrix[:, :self.J].diagonal()\n return diagonal, []",
"def diag(B,s,H,ia,ib,ic,chia,chic):\n # Get a guess for the ground state based on the old MPS\n d = B[0].shape[0]\n theta0 = np.tensordot(np.diag(s[ia]),np.tensordot(B[ib],B[ic],axes=(2,1)),axes=(1,1))\n theta0 = np.reshape(np.transpose(theta0,(1,0,2,3)),((chia*chic)*(d**2)))\n\n # Diagonalize Hamiltonian\n e0,v0 = arp.eigsh(H,k=1,which='SA',return_eigenvectors=True,v0=theta0,ncv=20)\n \n return np.reshape(v0.squeeze(),(d*chia,d*chic)),e0",
"def diagonal_matrix(self):\n A = self.symmetric_matrix()\n B = self.base_ring()\n basis = [vector(B,{2:0,i:1}) for i in range(3)]\n for i in range(3):\n zerovalue = (basis[i]*A*basis[i].column()== 0)\n if zerovalue:\n for j in range(i+1,3):\n if basis[j]*A*basis[j].column() != 0:\n b = basis[i]\n basis[i] = basis[j]\n basis[j] = b\n zerovalue = False\n if zerovalue:\n for j in range(i+1,3):\n if basis[i]*A*basis[j].column() != 0:\n basis[i] = basis[i]+basis[j]\n zerovalue = False\n if not zerovalue:\n l = (basis[i]*A*basis[i].column())\n for j in range(i+1,3):\n basis[j] = basis[j] - \\\n (basis[i]*A*basis[j].column())/l * basis[i]\n T = Matrix(basis).transpose()\n return T.transpose()*A*T, T",
"def task6_diagonal(matrix):\n return np.diagonal(matrix)",
"def get_diagonal_down(matrix, start_row):\n\tsequence = []\n\tcol = 0\n\tfor row in matrix[start_row:]:\n\t if col < len(matrix[0]):\n\t\t\tsequence.append(row[col])\n\t\t\tcol += 1\n\treturn sequence\n\t\t\n\t\"\"\"Don't know if this can be achieved using list comprehension \n\t- the following statement just returns all numbers of the rows, starting with start_row.\"\"\"\n\t#return [row[col] for row in matrix[start_row:] for col in range(len(matrix[0]))]",
"def get_fwd_diag(b):\r\n return [b[0][2], b[1][1], b[2][0]]",
"def diagonal(matrix):\n if sp.sparse.issparse(matrix):\n diag = np.array(matrix.diagonal())\n else:\n diag = np.diagonal(matrix).copy()\n return diag",
"def _diagf(X):\n return np.triu(np.tril(X))",
"def diag(x):\r\n xx = as_tensor_variable(x)\r\n if xx.type.ndim == 1:\r\n return alloc_diag(xx)\r\n elif xx.type.ndim == 2:\r\n return extract_diag(xx)\r\n else:\r\n raise TypeError('diag requires vector or matrix argument', x)",
"def get_diagonal(matrix):\n\tdegree_vector = tf.reduce_sum(matrix, 1)\n\tdiagonal = tf.diag(degree_vector, name = 'diagonal')\n\treturn diagonal",
"def hessian_diag(\n loss: LossFun,\n params: Any,\n inputs: jnp.DeviceArray,\n targets: jnp.DeviceArray,\n) -> jnp.DeviceArray:\n vs = jnp.eye(ravel(params).size)\n comp = lambda v: jnp.vdot(v, ravel(hvp(loss, v, params, inputs, targets)))\n return jax.vmap(comp)(vs)",
"def _diagk(X, k):\n X = np.asanyarray(X)\n s = X.shape\n if len(s) > 1:\n D = np.diag(X, k)\n else:\n D = np.array([])\n\n return D",
"def get_off_diagonal(matrix):\n\toff_diag = scipy.array(matrix, dtype=matrix.dtype)\n\toff_diag[scipy.diag_indices_from(matrix)] = 0\n\treturn off_diag",
"def diag(expr):\n expr = AffAtom.cast_to_const(expr)\n if expr.is_vector():\n return diag_vec(vec(expr))\n elif expr.ndim == 2 and expr.shape[0] == expr.shape[1]:\n return diag_mat(expr)\n else:\n raise ValueError(\"Argument to diag must be a vector or square matrix.\")",
"def build_linear_diags(self):\n N = self.N\n dx = self.dx\n j = self._j # Index of the mid-point\n\n diags = np.zeros((2*self._j+1, self.N))\n\n # Advection term\n cff1 = -1/(2*dx)\n\n # Need to stagger these diagonals so lower and upper bands are symmetric\n diags[j-1, :-2] += -1*cff1*self.c[2:]\n diags[j+1, :] += 1*cff1*self.c[:]\n\n # Sponge term\n x = np.arange(0,N*dx,dx)\n rdist = x[-1] - x # Distance from right boundary\n spongefac = -np.exp(-6*rdist/self.spongedist)/self.spongetime\n diags[j,:] += spongefac \n\n return diags",
"def _calculate_diag(self) -> np.ndarray:\n diags = np.zeros(self.n)\n for i in range(self.n):\n diags[i] = 1 / np.linalg.norm(self.X[:, i] - self.W @ self.H[:, i])\n return np.diag(diags)",
"def diag(diag_elements):\n return tf.diag(tf.reshape(diag_elements, [-1]))",
"def _diag_neighbors(self, point):\n return [point - self.NS - 1, \n point - self.NS + 1, \n point + self.NS - 1, \n point + self.NS + 1]",
"def _diag_neighbors(self, point):\n return [point-self.NS-1, point-self.NS+1,\n point+self.NS-1, point+self.NS+1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the path of the Bohrium systemwide configuration file
|
def config_file(self):
return join_path(self.prefix.etc.bohrium, "config.ini")
|
[
"def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath",
"def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME",
"def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')",
"def path():\n click.echo(_user_config_file_path())",
"def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None",
"def _get_path_to_monitor_user_config_file():\n # Get platform and hwsku path\n (platform_path, hwsku_path) = _get_path_to_platform_hwsku()\n\n # First check for the presence of the new 'port_config.ini' file\n config_file_path = \"/\".join([hwsku_path, MONITOR_USER_CFG_FILE_NAME])\n\n return config_file_path",
"def _find_config_path(self):\n for _dir in (os.environ['WinDir'], self.install_dir):\n path = os.path.join(_dir, 'Sandboxie.ini')\n if os.path.exists(path):\n return path\n return None",
"def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path",
"def get_config_path() -> Path:\n config = os.getenv('TOM_CONFIG', '')\n return Path(config)",
"def _rc_conf_path(self):\n return f\"{self.path}/root/etc/rc.conf\"",
"def get_config_file():\n home_path = path_join(expanduser('~'), CONFIG_FILENAME)\n cwd_path = path_join(getcwd(), CONFIG_FILENAME)\n if isfile(home_path):\n return home_path\n elif isfile(cwd_path):\n return cwd_path\n return None",
"def _app_config_file() -> str:\n if 'AISCALATOR_HOME' in os.environ:\n home = os.environ['AISCALATOR_HOME']\n file = os.path.join(home, \"config\", \"aiscalator.conf\")\n if os.path.exists(file):\n return file\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator',\n 'config', 'aiscalator.conf')",
"def system_conf_dir(self):\n return buildconfig.SPD_CONF_PATH",
"def _config_path(self) -> str:\n return self._config_path_for(dir_path=self._dir_path)",
"def config_path(self):\n\n return os.path.join(self.git.toplevel_path, CONFIG_FILENAME)",
"def get_global_config_path():\n\n return \"/etc/dapsenv/dapsenv.conf\"",
"def _get_config_file_path() -> str:\n if os.name == \"nt\":\n logger.debug(\"Windows OS detected\")\n path = os.path.join(os.path.expandvars(\"%LOCALAPPDATA%\"), \"dragonchain\", \"credentials\")\n else:\n logger.debug(\"Posix OS detected\")\n path = os.path.join(os.path.expanduser(\"~\"), \".dragonchain\", \"credentials\")\n logger.debug(\"Credentials file path: {}\".format(path))\n return os.fsdecode(path)",
"def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')",
"def search_config(cls):\n filename = 'steinbit.cfg'\n defaults = [\n os.path.join(os.getcwd(), filename),\n os.path.expanduser('~/' + filename),\n os.getenv('STEINBIT', default=None)]\n for path in defaults:\n if path and os.path.isfile(path):\n return path\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate a list of ranks that get harder to obtain as they approach the maximum
|
def generate_ranks(maximum: int, steps: int) -> List[int]:
ranks = []
for i in range(steps):
ranks += [maximum]
maximum = int(maximum * 0.75)
RANK_CUTOFFS = list(reversed(ranks))
return RANK_CUTOFFS
|
[
"def get_rankings(elos):\n #get expected scores (or probabilities)\n expected_scores = get_win_prob(elos)\n\n rankings = [] #will have rankings in order (1st, 2nd, ...)\n for i in range(len(elos)): #determine 1st, then 2nd, etc. in order\n total_weight = 1\n for player_index in rankings: \n total_weight -= expected_scores[player_index]\n for j in range(len(elos)):\n #if this is the last possible player, then just add it \n #if j == (len(elos) - 1):\n # rankings.append(j)\n # break\n if not j in rankings: #if this player has not won already\n if random.random() < ( expected_scores[j] / total_weight ): \n rankings.append(j)\n break\n total_weight -= expected_scores[j]\n return rankings",
"def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))",
"def get_rank_probabilities(n: int) -> List[float]:\n alpha = 3.5\n ranks = [1 / i**alpha for i in range(1, n + 1)]\n\n return [r / sum(ranks) for r in ranks]",
"def compute_ranks(x):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n # print(\"RANKS\")\n return ranks",
"def ranks(self):\n\n if self.solved:\n if self.boundR != None:\n cnstrs = self.resultA[-1]\n else:\n cnstrs = self.resultA\n return [Linalg.rank(a, self.eps, self.eps) for a in cnstrs]\n else:\n raise ValueError('The problem has not been solved yet so the ranks of the constraints can not be evaluated.')",
"def findRelativeRanks(self, nums: List[int]) -> List[str]:\n scores = sorted(nums, reverse=True)\n rewards = {}\n for i, score in enumerate(scores):\n if i == 0:\n reward = 'Gold Medal'\n elif i == 1:\n reward = 'Silver Medal'\n elif i == 2:\n reward = 'Bronze Medal'\n else:\n reward = str(i + 1)\n rewards[score] = reward\n return [rewards[score] for score in nums]",
"def card_ranks(cards):\r\n assignValue = {\r\n 'T': 10,\r\n 'J': 11,\r\n 'Q': 12,\r\n 'K': 13,\r\n 'A': 14\r\n }\r\n ranks = []\r\n for r, s in cards:\r\n if assignValue.has_key(r):\r\n ranks.append(assignValue[r])\r\n else:\r\n ranks.append(int(r))\r\n #ranks = [assignValue[] if r in assignValue else int(r) for r,s in cards]\r\n ranks.sort(reverse=True)\r\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks",
"def get_ranks(cards):\n #cards = numeric_ranks(cards) # Convert rank letters to numbers (e.g. J to 11)\n return [int(card[0:-1]) for card in cards]",
"def get_rank(input_list):\n tmp = (-np.array(input_list)).argsort()\n ranks = np.empty_like(tmp)\n ranks[tmp] = np.arange(len(input_list))\n return ranks + 1",
"def ranks(self):\n return self._rk",
"def makeRankings(roulette):\n\tranks = copy.deepcopy(roulette)\n\tranks.sort(key = lambda x: -x['val'])\n\tratio = 0.5\n\tfor i,x in enumerate(ranks):\n\t\tx['weight'] = ratio**(i+1)\n\tranks.sort(key = lambda x: -x['weight'])\n\treturn ranks",
"def poker(hands):\r\n return allmax(hands, key=hand_rank)",
"def MapToRanks(t):\n # pair up each value with its index\n pairs = enumerate(t)\n \n # sort by value\n sorted_pairs = sorted(pairs, key=lambda pair: pair[1])\n\n # pair up each pair with its rank\n ranked = enumerate(sorted_pairs)\n\n # sort by index\n resorted = sorted(ranked, key=lambda trip: trip[1][0])\n\n # extract the ranks\n ranks = [trip[0]+1 for trip in resorted]\n return ranks",
"def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)",
"def compute_ranks(x):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks",
"def mrr(ranks):\n return sum([1/v for v in ranks])/len(ranks)",
"def findRelativeRanks(self, nums):\n\t\tsort_nums = sorted(nums,reverse=True)\n\t\tfor idx,num in enumerate(sort_nums):\n\t\t\tif idx+1 == 1:\n\t\t\t\tnums[nums.index(num)] = \"Gold Medal\"\n\t\t\telif idx + 1 == 2:\n\t\t\t\tnums[nums.index(num)] = \"Silver Medal\"\n\t\t\telif idx + 1 == 3:\n\t\t\t\tnums[nums.index(num)] = \"Bronze Medal\"\n\t\t\telse:\n\t\t\t\tnums[nums.index(num)] = idx+1\n\t\treturn nums",
"def climbingLeaderboard(scores, alice):\n unique_scores = list({score: None for score in scores}.keys())[::-1]\n ranks = []\n # last_score_index = 0\n for game_score in alice:\n for i, score in enumerate(unique_scores):\n if score > game_score:\n ranks.append(len(unique_scores) - i + 1)\n break\n elif score == game_score:\n ranks.append(len(unique_scores) - i)\n break\n elif i == len(unique_scores) - 1:\n ranks.append(1)\n else:\n continue\n\n return ranks",
"def get_rankings(game, games, scale=False):\n\n points = dict()\n for g in games:\n if g[\"team_H\"] not in points:\n points[g[\"team_H\"]] = 0\n if g[\"team_A\"] not in points:\n points[g[\"team_A\"]] = 0\n if int(g[\"score\"][\"final\"][\"home\"]) > int(g[\"score\"][\"final\"][\"away\"]):\n points[g[\"team_H\"]] += 3\n elif int(g[\"score\"][\"final\"][\"home\"]) < int(g[\"score\"][\"final\"][\"away\"]):\n points[g[\"team_A\"]] += 3\n else:\n points[g[\"team_H\"]] += 1\n points[g[\"team_A\"]] += 1\n if game[\"team_H\"] not in points:\n points[game[\"team_H\"]] = 0\n if game[\"team_A\"] not in points:\n points[game[\"team_A\"]] = 0\n rank_team_H = 1\n rank_team_A = 1\n for t in points:\n if points[t] > points[game[\"team_H\"]] and t != game[\"team_H\"]:\n rank_team_H += 1\n if points[t] > points[game[\"team_A\"]] and t != game[\"team_A\"]:\n rank_team_A += 1\n if scale:\n nb_teams = len(points)\n rank_team_H /= float(nb_teams)\n rank_team_A /= float(nb_teams)\n return [rank_team_H] + [rank_team_A]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the rank for a given number of points
|
def get_rank(points: int, cutoffs: List[int]) -> int:
rank = 0
for i, cutoff in enumerate(cutoffs):
if points < cutoff:
if i == 0:
break
else:
rank = i - 1
break
else:
rank = RANK_COUNT - 1
return rank
|
[
"def calculate_league_points(rank: int):\n points_dict = {\n 1: 10,\n 2: 7,\n 3: 5,\n 4: 3,\n 5: 1\n }\n if rank > 5:\n return 0\n else:\n return points_dict[rank]",
"def _get_rank(self,fitness):\n # infact you can get the order or rank by only once sort.\n rank=fitness[:,0].argsort().argsort() # [n]\n return rank",
"def get_ranked_points(zpoints, dsq):\n pos_map = calc_positions(zpoints, dsq)\n rpoints = calc_ranked_points(pos_map, dsq)\n return rpoints",
"def rank(self):\n return self.n.cardinality()",
"def rank(self):\n return 0",
"def get_num_hit_rank(boxes_truth, boxes_pred, rank):\n\n def is_hit(box_truth, box_pred):\n return is_label_match_rank(box_truth, box_pred, rank)\n\n return get_num_hit(boxes_truth, boxes_pred, is_hit)",
"def get_rank() -> int:\n return _env.rank",
"def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r",
"def get_rank(self, score, answer, entities_space, num_ent):\n if answer not in entities_space:\n rank = num_ent\n else:\n answer_prob = score[entities_space.index(answer)]\n score.sort(reverse=True)\n rank = score.index(answer_prob) + 1\n return rank",
"def get_rank(self):\r\n return self.rank",
"def recall_at_n(ranks, n=3):\n num = len([rank for rank in ranks if rank <= n])\n return num / len(ranks)",
"def rank(x, i):\n a = x[i]\n return sum(int(b < a or (b == a and j < i)) for j, b in enumerate(x))",
"def get_rank(input_list):\n tmp = (-np.array(input_list)).argsort()\n ranks = np.empty_like(tmp)\n ranks[tmp] = np.arange(len(input_list))\n return ranks + 1",
"def calculate_ranks(true_entity_score, all_scores):\n assert len(true_entity_score.shape) == 2\n assert len(all_scores.shape) == 2\n\n all_scores = all_scores > true_entity_score\n true_rank = all_scores.sum(dim=1) + 1\n\n return true_rank",
"def points(self):\r\n\t\tif self.rank() >= 9:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn 0",
"def get_total_ranking(self):\n output = 0\n for i in self.__ranking:\n output += i\n return output",
"def tied_rank(x):\n sorted_x = sorted(zip(x, range(len(x))))\n r = [0 for k in x]\n cur_val = sorted_x[0][0]\n last_rank = 0\n for i in range(len(sorted_x)):\n if cur_val != sorted_x[i][0]:\n cur_val = sorted_x[i][0]\n for j in range(last_rank, i):\n r[sorted_x[j][1]] = float(last_rank + 1 + i) / 2.0\n last_rank = i\n if i == len(sorted_x) - 1:\n for j in range(last_rank, i + 1):\n r[sorted_x[j][1]] = float(last_rank + i + 2) / 2.0\n return r",
"def get_ranking_team(points_dict, team):\n if team not in points_dict:\n points_dict[team] = 0\n rank_team = 1\n for t in points_dict:\n if points_dict[t] > points_dict[team] and t != team:\n rank_team += 1\n return rank_team",
"def ranks(self):\n return self._rk"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
moves the target further out as a % of the screen
|
def move_target(self, distance_adjustment):
self.x = float(self.screen_rect.right - self.width)
self.x = self.x * distance_adjustment
self.rect.x = self.x
|
[
"def assign_upLimit():\r\n player.rect.y = 25",
"def move_target(self):\n\n global N\n\n if self.target[0] < 0.1 or self.target[0] > 1.1:\n N *= -1\n\n self.target[0] += N * 0.002\n self.target[1] += N * 0.002",
"def assign_downLimit():\r\n player.rect.y = 100",
"def target(self):\r\n # Converti la position réelle en unité moteur\r\n self.move_to(float(self.IHM.lineEdit_Target.text()))",
"def move_finger5(percent):\n percent = _clamp_percent(percent)\n _send_request(f5=percent)",
"def _walk(self):\n new_pos = self.rect.move((self.move, 0)) # move 9 pixel to the right per frame\n if self.rect.left < self.area.left or self.rect.right > self.area.right:\n self.move = -self.move # move to the opposite direction when the chimp position exceeds the screen\n new_pos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(\n self.image, 1, 0\n ) # mirror the chimp to make it looks like turning around\n self.rect = new_pos",
"def move_finger4(percent):\n percent = _clamp_percent(percent)\n _send_request(f4=percent)",
"def half_down(self):\r\n self.set_position(3, self.currentPosition[3] - self.downOffset // 2, 500, wait=True)\r\n sleep(0.4)",
"def miningCameraUp(self):\n pyautogui.moveRel(0, -235, 0.5)",
"def set_fan_target(self, target_percent):\n self.__fan_target = target_percent\n self.fan_speed_dac.set_output_scaled(1.0 - (target_percent / 100.0))",
"def assign_rightLimit():\r\n player.rect.x = WIDTH - 75",
"def move_finger3(percent):\n percent = _clamp_percent(percent)\n _send_request(f3=percent)",
"def nextTarget(self):\n if self.stepsToTarget:\n t = self.stepsToTarget.pop(0)\n self.currentTarget = pygame.Vector2(t[0]+0.5, t[1]+1)*TILE_WIDTH\n movementX = self.currentTarget.x - posToVect(self.pos).x\n self.direction = 0 if movementX > 0 else 1 if movementX < 0 else self.direction\n self.pos = t\n else:\n self.stepsToTarget = None\n self.currentTarget = None\n self.finalTarget = None",
"def slew_to_target(self):\n separation_limit = 0.5 * u.degree\n\n if self.has_autoguider and self.autoguider.is_guiding:\n try:\n self.autoguider.stop_guiding()\n except Exception as e:\n self.logger.warning(\"Problem stopping autoguide\")\n\n # Slew to target\n self.mount.slew_to_target()\n\n self.status() # Send status update and update `is_tracking`\n\n # WARNING: Some kind of timeout needed\n while not self.mount.is_tracking and self.mount.distance_from_target() >= separation_limit:\n self.logger.debug(\"Slewing to target\")\n time.sleep(1)\n\n # Turn on autoguiding\n if self.has_autoguider:\n try:\n self.autoguider.autoguide()\n except error.PanError:\n self.logger.warning(\"Continuing without guiding\")",
"def set_movement_ratio(val):\n monospace.Ship.drag_ratio = 1 + val / 10",
"def moves_up(self):\n st.pygame.draw.rect(\n st.SURFACE,\n st.BLACK,\n (self.x,\n self.y,\n self.width,\n self.height))\n self.y -= self.speed",
"def move_up(self):\r\n if self.rect.top > 0:\r\n self.rect.top -= self.speed",
"def move_up(self):\n self.move_measurement(-1)",
"def droite(self):\n self.__y += 1\n if self.__y > 10:\n self.__y = 10"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks top to target to see if it hit top of screen
|
def check_top(self):
if self.rect.top <=0:
self.target_direction = 1
|
[
"def _check_autos_top(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tfor auto in self.autos.sprites():\n\t\t\tif auto.rect.top <= screen_rect.top:\n\t\t\t\t# Treat this the same as if the pigeon got hit.\n\t\t\t\tself._pigeon_hit()\n\t\t\t\tbreak",
"def is_on_top(self):\n return self.own_order == self._top_manager_order",
"def did_collide_top_bottom(self):\n\n y_coord = self.getY()\n return y_coord < 0 or (y_coord + self.ball_size[1]) > Configuration.windowHeight",
"def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.top <= 0:\n\t\t\treturn True",
"def check_above_surface(self, obj):\n inv_world_matrix = self.surface.matrix_world.inverted()\n\n for point in get_bounds(obj):\n ray_start = inv_world_matrix @ (point + self.up_direction)\n ray_direction = inv_world_matrix @ (self.surface.location + (-1 * self.up_direction))\n\n is_hit, hit_location, _, _ = self.surface.ray_cast(ray_start, ray_direction)\n\n if not is_hit:\n return False\n\n return True",
"def top_visible(self) -> bool:\n return self.vertical_scroll == 0",
"def check_in_screen(self):\n if self.rect.colliderect(screen_rect) and not self.moving:\n return True\n return False",
"def is_target(self):\n\t\treturn self.window and self.window.target is self",
"def at_target(self):\n return self.location == self.target_location",
"def is_target(top_container):\n\tif '.' not in top_container.get('barcode', ''):\n\t\treturn True\n\telse:\n\t\treturn False",
"def check_ball_on_target():\n\n pass",
"def test_top(self):\n self.stack.top()",
"def always_top(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-topmost'))",
"def is_top(module_index):\n return ( (module_index>=736)&(module_index<832) )",
"def IsTopSnappable(self):\r\n \r\n return self.HasFlag(self.optionTopSnapped)",
"def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")",
"def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.bottom >= screen_rect.bottom:\r\n return True",
"def is_top_corner(self) -> bool:\n return self.id == 0",
"def HasGripperTop(self):\r\n\r\n return self.HasFlag(self.optionGripperTop)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Setup strategies to use by the validator. These strategies can be provided
|
def _using(*args, validator: "DictValidator") -> "DictValidator":
def setup_strategy(validator, strategy) -> "DictValidator":
if isinstance(strategy, SortingStrategy):
validator.sorting = strategy
elif isinstance(strategy, FilteringStrategy):
validator.filtering = strategy
elif isinstance(strategy, PrintingStrategy):
validator.printing = strategy
else:
raise CertumException("The strategy provided for the validator is unknown.")
return validator
for arg in args:
if isinstance(arg, list):
for strategy in arg:
validator = setup_strategy(validator, strategy)
elif isinstance(arg, Strategy):
validator = setup_strategy(validator, arg)
else:
raise CertumException("The strategy provided for the validator is unknown.")
return validator
|
[
"def initialize_location_strategies(self):\n locator_manager.register_locators(\"sf\", lex_locators)\n locator_manager.register_locators(\"text\", \"Salesforce.Locate Element by Text\")\n locator_manager.register_locators(\"title\", \"Salesforce.Locate Element by Title\")\n\n # This does the work of actually adding all of the above-registered\n # location strategies, plus any that were registered by keyword\n # libraries.\n locator_manager.add_location_strategies()",
"def strategies():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 35, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(RFindStrategy,\n *(limit, source)))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # # Add HPS Dojo strategies\n # strategies.extend(\n # generate_meta_strategy_pair(HPSDojoStrategy))\n\n # # Add testing please ignore strategies\n # strategies.extend(\n # generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n return strategies",
"def strategy(self):\n raise NotImplementedError",
"def set_strategy(self, strategy):\n self.strategy = strategy",
"def set_validators(self):\n self.tp_t_input.setValidator(self.float_validator)\n self.tp_p_input.setValidator(self.float_validator)\n\n self.ps_p_input.setValidator(self.float_validator)\n self.ps_s_input.setValidator(self.float_validator)\n self.ps_t_guess.setValidator(self.float_validator)\n\n self.ph_p_input.setValidator(self.float_validator)\n self.ph_h_input.setValidator(self.float_validator)\n self.ph_tol.setValidator(self.float_validator)\n self.ph_t_guess.setValidator(self.float_validator)\n\n self.uv_u_input.setValidator(self.float_validator)\n self.uv_v_input.setValidator(self.float_validator)\n self.uv_t_guess.setValidator(self.float_validator)\n self.uv_p_guess.setValidator(self.float_validator)",
"def setup_scheme(self):\n # TODO: apply_bcs should be False for advection but this means\n # tests with KGOs fail\n apply_bcs = True\n self.setup_equation(self.equation)\n for _, scheme in self.active_transport:\n scheme.setup(self.equation, apply_bcs, transport)\n self.setup_transporting_velocity(scheme)\n\n apply_bcs = True\n for _, scheme in self.diffusion_schemes:\n scheme.setup(self.equation, apply_bcs, diffusion)\n for _, scheme in self.physics_schemes:\n apply_bcs = True\n scheme.setup(self.equation, apply_bcs, physics)",
"def init_strategies(self, agents = None, strategy = None):\n assert self.round == 0\n \n if isinstance(agents, Agent):\n agents = [agents]\n \n agents = agents or self.agents()\n \n for agent in agents:\n agent.change_strategy(\n 0, strategy or np.random.choice(self.strategy_list)\n )",
"def build_strategy(self):\n self.strategy_cls = self._build_strategy(parameters=None)",
"def _setup_deployment_strategy(self):\n self.strat_name = self.dc['physical_provisioner.deployment_strategy']\n if self.strat_name:\n # if there is a deployment strategy specified, get it and use it\n self.strategy = self.get_unique_doc(\n name=self.strat_name,\n schema=\"shipyard/DeploymentStrategy/v1\"\n )\n else:\n # The default behavior is to deploy all nodes, and fail if\n # any nodes fail to deploy.\n self.strat_name = 'all-at-once (defaulted)'\n self.strategy = _default_deployment_strategy()\n LOG.info(\"Strategy Name: %s has %s groups\",\n self.strat_name,\n len(self.strategy.get('groups', [])))",
"def set_strategies(players, strategies):\n if players.num_players != len(strategies):\n raise ValueError(\"len(strategies) must equal num_players\")\n for player, strategy in zip(players.tuple_, strategies):\n player.play = MethodType(strategy, player, Player)",
"def test_unexpected_strategy():\n assert strategies == {\n 'css': FindByCss,\n 'xpath': FindByXPath,\n 'tag': FindByTag,\n 'name': FindByName,\n 'text': FindByText,\n 'id': FindById,\n 'value': FindByValue,\n }",
"def strategy(func):\n strategies.append(func)\n return func",
"def setup_strategy(devices):\n import tensorflow as tf\n\n setup_devices(devices)\n\n return tf.distribute.MirroredStrategy()",
"def test__init__(self):\n assert Strategy(\n location=self.location,\n mint_quantities=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n service_data=self.service_data,\n personality_data=self.personality_data,\n classification=self.classification,\n from_supply=self.from_supply,\n to_supply=self.to_supply,\n value=self.value,\n token_type=1,\n name=\"strategy\",\n skill_context=self.skill.skill_context,\n )",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations",
"def _get_strategies(self) -> Dict[str, str]:\n strategies = [method for method in dir(self) if STRATEGY_IDENTIFIER in method]\n\n if not strategies:\n logger.warning(\n \"There are no strategy provided. \"\n \"Make sure the implemented strategy methods \"\n \"start contain the '%s' term.\" % STRATEGY_IDENTIFIER\n )\n return {str(n_method): method for n_method, method in enumerate(strategies)}",
"def test_strategies(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.options.auto_fence = True\n self.supervisor.supvisors.options.conciliation_strategy = 1\n self.supervisor.supvisors.options.starting_strategy = 2\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertDictEqual({'auto-fencing': True, 'starting': 'MOST_LOADED',\n 'conciliation': 'INFANTICIDE'}, rpc.get_strategies())",
"def training_pattern_setup(self, **overrides):\n raise NotImplementedError",
"def validate_backend_setup(cls):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This is an ADMM solver for the (Latent variable) Single Graphical Lasso problem (SGL). If ``latent=False``, this function solves
|
def ADMM_SGL(S, lambda1, Omega_0, Theta_0=np.array([]), X_0=np.array([]),
rho=1., max_iter=1000, tol=1e-7, rtol=1e-4, stopping_criterion='boyd',\
update_rho=True, verbose=False, measure=False, latent=False, mu1=None):
assert Omega_0.shape == S.shape
assert S.shape[0] == S.shape[1]
assert lambda1 > 0
assert stopping_criterion in ["boyd", "kkt"]
if latent:
assert mu1 is not None
assert mu1 > 0
(p, p) = S.shape
assert rho > 0, "ADMM penalization parameter must be positive."
# initialize
Omega_t = Omega_0.copy()
if len(Theta_0) == 0:
Theta_0 = Omega_0.copy()
if len(X_0) == 0:
X_0 = np.zeros((p, p))
Theta_t = Theta_0.copy()
L_t = np.zeros((p, p))
X_t = X_0.copy()
runtime = np.zeros(max_iter)
residual = np.zeros(max_iter)
status = ''
if verbose:
print("------------ADMM Algorithm for Single Graphical Lasso----------------")
if stopping_criterion == 'boyd':
hdr_fmt = "%4s\t%10s\t%10s\t%10s\t%10s"
out_fmt = "%4d\t%10.4g\t%10.4g\t%10.4g\t%10.4g"
print(hdr_fmt % ("iter", "r_t", "s_t", "eps_pri", "eps_dual"))
elif stopping_criterion == 'kkt':
hdr_fmt = "%4s\t%10s"
out_fmt = "%4d\t%10.4g"
print(hdr_fmt % ("iter", "kkt residual"))
##################################################################
### MAIN LOOP STARTS
##################################################################
for iter_t in np.arange(max_iter):
if measure:
start = time.time()
# Omega Update
W_t = Theta_t - L_t - X_t - (1 / rho) * S
eigD, eigQ = np.linalg.eigh(W_t)
Omega_t_1 = Omega_t.copy()
Omega_t = phiplus(beta=1 / rho, D=eigD, Q=eigQ)
# Theta Update
Theta_t = prox_od_1norm(Omega_t + L_t + X_t, (1 / rho) * lambda1)
# L Update
if latent:
C_t = Theta_t - X_t - Omega_t
# C_t = (C_t.T + C_t)/2
eigD1, eigQ1 = np.linalg.eigh(C_t)
L_t = prox_rank_norm(C_t, mu1/rho, D=eigD1, Q=eigQ1)
# X Update
X_t = X_t + Omega_t - Theta_t + L_t
if measure:
end = time.time()
runtime[iter_t] = end - start
# Stopping criterion
if stopping_criterion == 'boyd':
r_t,s_t,e_pri,e_dual = ADMM_stopping_criterion(Omega_t, Omega_t_1, Theta_t, L_t, X_t,\
S, rho, tol, rtol, latent)
# update rho
if update_rho:
if r_t >= 10*s_t:
rho_new = 2*rho
elif s_t >= 10*r_t:
rho_new = 0.5*rho
else:
rho_new = 1.*rho
# rescale dual variables
X_t = (rho/rho_new)*X_t
rho = rho_new
residual[iter_t] = max(r_t,s_t)
if verbose:
print(out_fmt % (iter_t,r_t,s_t,e_pri,e_dual))
if (r_t <= e_pri) and (s_t <= e_dual):
status = 'optimal'
break
elif stopping_criterion == 'kkt':
eta_A = kkt_stopping_criterion(Omega_t, Theta_t, L_t, rho * X_t, S, lambda1, latent, mu1)
residual[iter_t] = eta_A
if verbose:
print(out_fmt % (iter_t,eta_A))
if eta_A <= tol:
status = 'optimal'
break
##################################################################
### MAIN LOOP FINISHED
##################################################################
# retrieve status (partially optimal or max iter)
if status != 'optimal':
if stopping_criterion == 'boyd':
if (r_t <= e_pri):
status = 'primal optimal'
elif (s_t <= e_dual):
status = 'dual optimal'
else:
status = 'max iterations reached'
else:
status = 'max iterations reached'
print(f"ADMM terminated after {iter_t+1} iterations with status: {status}.")
### CHECK FOR SYMMETRY
if abs((Omega_t).T - Omega_t).max() > 1e-5:
warnings.warn(f"Omega variable is not symmetric, largest deviation is {abs((Omega_t).T - Omega_t).max()}.")
if abs((Theta_t).T - Theta_t).max() > 1e-5:
warnings.warn(f"Theta variable is not symmetric, largest deviation is {abs((Theta_t).T - Theta_t).max()}.")
if abs((L_t).T - L_t).max() > 1e-5:
warnings.warn(f"L variable is not symmetric, largest deviation is {abs((L_t).T - L_t).max()}.")
### CHECK FOR POSDEF
D = np.linalg.eigvalsh(Theta_t - L_t)
if D.min() <= 0:
print(
f"WARNING: Theta (Theta - L resp.) is not positive definite. Solve to higher accuracy! (min EV is {D.min()})")
if latent:
D = np.linalg.eigvalsh(L_t)
if D.min() < -1e-8:
print(f"WARNING: L is not positive semidefinite. Solve to higher accuracy! (min EV is {D.min()})")
if latent:
sol = {'Omega': Omega_t, 'Theta': Theta_t, 'L': L_t, 'X': X_t}
else:
sol = {'Omega': Omega_t, 'Theta': Theta_t, 'X': X_t}
if measure:
info = {'status': status, 'runtime': runtime[:iter_t+1], 'residual': residual[:iter_t+1]}
else:
info = {'status': status}
return sol, info
|
[
"def solve(self, Omega_0 = None, solver_params = dict(), tol = 1e-8, rtol = 1e-7, solver = 'admm', verbose = False):\n \n assert solver in [\"admm\"], \"Currently only the ADMM solver is supported as it is implemented for all cases.\"\n assert self.reg_params.get('lambda1') is not None, \"Regularization parameters need to be set first (at least lambda1), see function glasso_problem.set_reg_params()\"\n \n # if solver == \"ppdna\":\n # assert self.multiple,\"PPDNA solver is only supported for MULTIPLE Graphical Lassp problems.\"\n # assert not self.latent, \"PPDNA solver is only supported for problems without latent variables.\"\n # assert self.conforming, \"PPDNA solver is only supported for problems with conforming dimensions.\"\n \n \n self.set_start_point(Omega_0)\n self.tol = tol\n self.rtol = rtol\n \n self.solver_params = self._default_solver_params()\n self.solver_params.update(solver_params)\n self.solver_params[\"verbose\"] = verbose\n \n #print(f\"\\n Solve problem with {solver.upper()} solver... \\n \")\n \n if not self.multiple:\n if self.latent:\n sol, info = ADMM_SGL(S = self.S, lambda1 = self.reg_params['lambda1'], Omega_0 = self.Omega_0, \\\n tol = self.tol , rtol = self.rtol, latent = self.latent, mu1 = self.reg_params['mu1'], **self.solver_params)\n \n else:\n sol = block_SGL(S = self.S, lambda1 = self.reg_params['lambda1'], Omega_0 = self.Omega_0, \\\n tol = self.tol, rtol = self.tol, **self.solver_params)\n info = {}\n \n \n elif self.conforming: \n sol, info = ADMM_MGL(S = self.S, lambda1 = self.reg_params['lambda1'], lambda2 = self.reg_params['lambda2'], reg = self.reg,\\\n Omega_0 = self.Omega_0, latent = self.latent, mu1 = self.reg_params['mu1'],\\\n tol = self.tol, rtol = self.rtol, **self.solver_params)\n \n \n else:\n sol, info = ext_ADMM_MGL(S = self.S, lambda1 = self.reg_params['lambda1'], lambda2 = self.reg_params['lambda2'], reg = self.reg,\\\n Omega_0 = self.Omega_0, G = self.G, tol = self.tol, rtol = self.rtol,\\\n latent = self.latent, mu1 = self.reg_params['mu1'], **self.solver_params)\n \n \n # rescale\n if self.do_scaling:\n sol['Theta'] = self._rescale_to_covariances(sol['Theta'], self._scale)\n if self.latent:\n sol['L'] = self._rescale_to_covariances(sol['L'], self._scale)\n \n \n # set the computed solution\n if self.latent:\n self.solution._set_solution(Theta = sol['Theta'], L = sol['L']) \n else:\n self.solution._set_solution(Theta = sol['Theta']) \n \n self.solver_info = info.copy()\n return",
"def __solve_full_linear_problem(self):\n samples = []\n\n for news in self.news_pool:\n samples += [news.sampled_quality] * self.layout_slots\n\n self.full_C = np.array(samples) * self.full_lambdas\n\n linear_problem = opt.linprog(A_ub=self.full_A, b_ub=self.full_B, c=self.full_C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n result = self.__de_randomize_LP(self.news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n return result",
"def optimise_latents(self):\n xtemp=np.zeros(self.GP.X.shape)\n for i,yy in enumerate(self.GP.Y):\n original_x = self.GP.X[i].copy()\n #gtol=1e-10,epsilon=1e-10,\n xopt = optimize.fmin_cg(self.ll,self.GP.X[i],fprime=self.ll_grad,disp=True,args=(i,))\n #xopt=SCG(self.ll,self.ll_grad,self.GP.X[i],optargs=(i,),display=False)\n self.GP.X[i] = original_x\n xtemp[i] = xopt\n \n self.GP.X=xtemp.copy()",
"def test_lin_admm(self):\n X = Variable((10,5))\n B = np.reshape(np.arange(50), (10,5))\n prox_fns = [sum_squares(X, b=B)]\n sltn = ladmm.solve(prox_fns, [], 0.1, max_iters=500, eps_rel=1e-5, eps_abs=1e-5)\n self.assertItemsAlmostEqual(X.value, B, places=2)\n self.assertAlmostEqual(sltn, 0)\n\n prox_fns = [norm1(X, b=B, beta=2)]\n sltn = ladmm.solve(prox_fns, [], 0.1, max_iters=500, eps_rel=1e-5, eps_abs=1e-5)\n self.assertItemsAlmostEqual(X.value, B/2., places=2)\n self.assertAlmostEqual(sltn, 0, places=2)\n\n prox_fns = [norm1(X), sum_squares(X, b=B)]\n sltn = ladmm.solve(prox_fns, [], 0.1, max_iters=500, eps_rel=1e-5, eps_abs=1e-5)\n\n cvx_X = cvx.Variable(10, 5)\n cost = cvx.sum_squares(cvx_X - B) + cvx.norm(cvx_X, 1)\n prob = cvx.Problem(cvx.Minimize(cost))\n prob.solve()\n self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)\n self.assertAlmostEqual(sltn, prob.value)\n\n psi_fns, omega_fns = ladmm.partition(prox_fns)\n sltn = ladmm.solve(psi_fns, omega_fns, 0.1, max_iters=500, eps_rel=1e-5, eps_abs=1e-5)\n self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)\n self.assertAlmostEqual(sltn, prob.value)\n\n # With linear operators.\n kernel = np.array([1,2,3])\n kernel_mat = np.matrix(\"2 1 3; 3 2 1; 1 3 2\")\n L = np.linalg.norm(kernel_mat)\n x = Variable(3)\n b = np.array([-41,413,2])\n prox_fns = [nonneg(x), sum_squares(conv(kernel, x), b=b)]\n sltn = ladmm.solve(prox_fns, [], 0.1, max_iters=3000, eps_abs=1e-5,\n eps_rel=1e-5)\n\n cvx_X = cvx.Variable(3)\n cost = cvx.norm(kernel_mat*cvx_X - b)\n prob = cvx.Problem(cvx.Minimize(cost), [cvx_X >= 0])\n prob.solve()\n self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)\n\n psi_fns, omega_fns = ladmm.partition(prox_fns)\n sltn = ladmm.solve(psi_fns, omega_fns, 0.1, max_iters=3000, eps_abs=1e-5,\n eps_rel=1e-5)\n self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)",
"def admm_linearized_krylov(alpha, g, L, A, b, niter_arn, sigma, niter,\n **kwargs):\n h, q = arnoldi(A, b, x0, niter_arn)\n beta = (A(x0) - b).norm()\n\n Qn = KrylovSpaceEmbedding(q[:-1])\n Qnp1 = KrylovSpaceEmbedding(q)\n H = odl.MatrixOperator(h)\n assert alpha in H.domain\n assert Qnp1.domain == H.range\n assert L.domain == Qnp1.range\n\n g_transl = g.translated(-L(x0))\n\n U = L * Qn\n S = odl.BroadcastOperator(H, U)\n\n f = odl.solvers.ZeroFunctional(alpha.space)\n e1 = H.range.zero()\n e1[0] = 1\n data_fit = odl.solvers.L2NormSquared(H.range).translated(beta * e1)\n G = odl.solvers.SeparableSum(data_fit, g_transl)\n\n opnorm_H = odl.power_method_opnorm(H, maxiter=50)\n tau = 0.5 * sigma / opnorm_H ** 2\n odl.solvers.admm_linearized(alpha, f, G, S, tau, sigma, niter,\n **kwargs)",
"def block_SGL(S, lambda1, Omega_0, Theta_0=None, X_0=None, rho=1., max_iter=1000, \n tol=1e-7, rtol=1e-3, stopping_criterion=\"boyd\",\n update_rho=True, verbose=False, measure=False):\n assert Omega_0.shape == S.shape\n assert S.shape[0] == S.shape[1]\n assert lambda1 > 0\n\n (p, p) = S.shape\n\n if Theta_0 is None:\n Theta_0 = Omega_0.copy()\n if X_0 is None:\n X_0 = np.zeros((p, p))\n\n # compute connected components of S with lambda_1 threshold\n numC, allC = get_connected_components(S, lambda1)\n\n allOmega = list()\n allTheta = list()\n allX = list()\n\n for i in range(numC):\n C = allC[i]\n\n # single node connected components have a closed form solution, see Witten, Friedman, Simon \"NEW INSIGHTS FOR THE GRAPHICAL LASSO \"\n if len(C) == 1:\n # we use the OFF-DIAGONAL l1-penalty, otherwise it would be 1/(S[C,C]+lambda1)\n closed_sol = 1 / (S[C, C])\n\n allOmega.append(closed_sol)\n allTheta.append(closed_sol)\n allX.append(np.array([0]))\n\n\n # else solve Graphical Lasso for the corresponding block\n else:\n block_S = S[np.ix_(C, C)]\n block_sol, block_info = ADMM_SGL(S=block_S, lambda1=lambda1, Omega_0=Omega_0[np.ix_(C, C)],\n Theta_0=Theta_0[np.ix_(C, C)], X_0=X_0[np.ix_(C, C)], tol=tol, rtol=rtol,\n stopping_criterion=stopping_criterion, update_rho=update_rho,\n rho=rho, max_iter=max_iter, verbose=verbose, measure=measure)\n\n allOmega.append(block_sol['Omega'])\n allTheta.append(block_sol['Theta'])\n allX.append(block_sol['X'])\n\n # compute inverse permutation\n per = np.hstack(allC)\n per1 = invert_permutation(per)\n\n # construct solution by applying inverse permutation indexing\n sol = dict()\n sol['Omega'] = block_diag(*allOmega)[np.ix_(per1, per1)]\n sol['Theta'] = block_diag(*allTheta)[np.ix_(per1, per1)]\n sol['X'] = block_diag(*allX)[np.ix_(per1, per1)]\n\n return sol",
"def set_DirichletSS_sparse(self):\n \n \n self.set_Dirichlet_vessel(self.inlet)\n\n\n self.tissue_consumption(self.Mt)\n \n #REINITIALISATION OF THE VECTOR OF TISSUE PHI!!!\n self.phi_t=np.zeros(len(self.phit))\n \n self.set_Dirichlet_north(0)\n self.set_Dirichlet_east(0)\n self.set_Dirichlet_west(0)\n \n self.A.eliminate_zeros()",
"def factor_model_to_ls(D, L, sparsity_lvl=0.2):\n\n m = D.shape[0]\n X = scipy.linalg.sqrtm(D + L @ L.T)\n\n # generate ground truth model\n beta = 10 * np.random.randn(m)\n indices = np.random.choice(np.arange(m), replace=False, size=int(m * sparsity_lvl))\n beta[indices] = 0\n\n y = X @ beta + np.random.randn(m)\n\n return X, y",
"def solver(eqns, params):\n\n # First generates the linear system.\n mat, vec = get_linear_sys(eqns, params)\n\n print(\n 'Invoking the numpy.linalg.lstsq function...'\n )\n start_time = time.process_time()\n\n res = lstsq(mat, vec, **kwargs)\n\n print(\n 'Finished: {!s}sec.'.format(time.process_time() - start_time)\n )\n\n return res[0]",
"def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'",
"def write_ldl_lsolve(f, variables):\n\n data = variables['data']\n priv = variables['priv']\n Lp = priv['L']['p']\n\n f.write(\"void LDL_lsolve(LDL_int n, c_float X [ ], LDL_int Lp [ ]\")\n f.write(\", LDL_int Li [ ], c_float Lx [ ]){\\n\")\n f.write(\"LDL_int p;\\n\")\n\n # Unroll for loop\n for j in range(data['m'] + data['n']):\n if Lp[j+1] > Lp[j]: # Write loop ONLY if necessary\n f.write(\"for (p = %i ; p < %i ; p++){\\n\" % (Lp[j], Lp[j+1]))\n f.write(\"X [Li [p]] -= Lx [p] * X [%i];\\n\" % (j))\n f.write(\"}\\n\")\n\n # Close function\n f.write(\"}\\n\\n\")",
"def solve_linear_static(system, formulation, component):\n solfac = SolverFactory()\n solfac.set_system(system)\n solfac.set_analysis_type('static')\n solfac.set_linear_solver('scipy-sparse')\n\n solver, solver_options = solfac.create_solver()\n\n solution_writer = AmfeSolution()\n\n no_of_dofs = system.dimension\n q0 = np.zeros(no_of_dofs)\n dq0 = q0\n ddq0 = dq0\n\n q = solver.solve(system.K(q0, dq0, 0), system.f_ext(q0, dq0, 0))\n u, du, ddu = formulation.recover(q, dq0, ddq0, 0)\n solution_writer.write_timestep(0, u, None, None)\n logger = logging.getLogger(__name__)\n logger.info('Strains and stresses are currently not supported for linear models. Only nonlinear kinematics are '\n 'currently used during their calculation.')\n\n print('Solution finished')\n return solution_writer",
"def dLdp(C1s,C0s,ks,bs,sigma=1):\n # return np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma))\n \n # A = FIM(q,ps,C1s,C0s,ks,bs,sigma)\n \n # Construct A(q,ps)\n A = FIM(C1s,C0s,ks,bs,sigma)\n\n # Construct dAdp(q,ps)\n dAdp = jit(jacfwd(A,argnums=1))\n \n # Construct inv_A(q,ps)\n inv_A=lambda q,ps: jnp.linalg.inv(A(q,ps))\n \n # print(np.trace(-dAinv(inv_A,dAdp),axis1=0,axis2=1)-np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma)))\n \n # Construct dLdP(q,ps)\n\n\n\n return lambda q,ps: -np.array(jnp.trace(dAinv(inv_A(q,ps),dAdp(q,ps)),axis1=0,axis2=1))",
"def direct_logl_matrix(gsplaq, gss, dataset, directModel,\n minProbClip=1e-6):\n if len(gsplaq.get_all_strs()) > 0: # skip cases with no strings\n plaq_ds = gsplaq.expand_aliases(dataset, circuit_simplifier=directModel)\n plaq_pr = gss.create_plaquette(_objs.Circuit((\"GsigmaLbl\",)))\n plaq_pr.simplify_circuits(directModel)\n\n cnts = total_count_matrix(plaq_ds, dataset)\n probs = probability_matrices(plaq_pr, directModel) # no probs_precomp_dict\n freqs = frequency_matrices(plaq_ds, dataset)\n\n ret = _np.empty((plaq_ds.rows, plaq_ds.cols), 'd')\n for (i, j, opstr, elIndices, _), (_, _, _, elIndices_ds, _) in zip(\n plaq_pr.iter_simplified(), plaq_ds.iter_simplified()):\n logLs = _tools.two_delta_loglfn(cnts[elIndices_ds], probs[elIndices],\n freqs[elIndices_ds], minProbClip)\n ret[i, j] = sum(logLs) # sum all elements for each (i,j) pair\n return ret\n else:\n return _np.nan * _np.ones((gsplaq.rows, gsplaq.cols), 'd')",
"def LDL_sparse(matrix):\n Lrowstruc = matrix.row_structure_symbolic_cholesky()\n L = matrix.eye(matrix.rows)\n D = matrix.zeros(matrix.rows, matrix.cols)\n\n for i in range(len(Lrowstruc)):\n for j in Lrowstruc[i]:\n if i != j:\n L[i, j] = matrix[i, j]\n summ = 0\n for p1 in Lrowstruc[i]:\n if p1 < j:\n for p2 in Lrowstruc[j]:\n if p2 < j:\n if p1 == p2: #cancel possible ici\n summ += L[i, p1]*L[j, p1]*D[p1, p1]\n else:\n break\n else:\n break\n L[i, j] -= summ #ici\n L[i, j] = (L[i,j] / D[j, j]).cancel() #ici\n else: # i == j\n D[i, i] = matrix[i, i].cancel() ### cancel rajouté\n summ = 0\n for k in Lrowstruc[i]:\n if k < i:\n summ += (L[i, k]**2*D[k, k]).cancel() ### cancelrajouté\n else:\n break\n D[i, i] -= summ\n D[i,i] = D[i,i].cancel() #rajouté\n\n return L, D",
"def experiment_linear_l1(_):\n # Attack epsilon is manually set according to the norm of the min-norm\n # solution found using cvxpy for d/n=10. That is max-margin=1/min-norm.\n # Min linf-norm solution found (norm=0.0422)\n # Min l2-norm solution found (norm=0.3411)\n # Min l1-norm solution found (norm=1.8497)\n # Min l4-norm solution found (norm=0.0002)\n # Min l1.5-norm solution found (norm=0.5274)\n return experiment_linear_lp(\n adv_norm_type='l1',\n dual_norm_type='linf',\n baseline_norm_types=['l2'],\n attack_step_dir='grad_max')",
"def train(X,y,Xtest, name, inpycharm, numintervals=100,ldeltamin=-5,ldeltamax=5,rho=1,alpha=1,debug=False):\n print 'train LMM-Lasso'\n\n [n_s,n_f] = X.shape\n assert X.shape[0]==y.shape[0], 'dimensions do not match'\n if y.ndim==1:\n y = SP.reshape(y,(n_s,1))\n\n # train/read in null model and transform data so that they correspond to uncorrelated pseudo-individuals\n X, y, delta0 = svd_rotate(y, X, numintervals, ldeltamin, ldeltamax)\n\n # train lasso on residuals incoorporating screening rules\n beta, nz_inds, scr_inds, path, screening_rule, times_solver, lmax, solver = train_lasso_multiple(X, y)\n if beta.shape == weights.T.shape:\n beta = beta.T\n\n \"\"\" train LMM-Lasso without screening rules, useful for comparison\n paok = map(lambda x: train_lasso_sklearn(X, y, x), path)\n weights = np.asarray([p[0] for p in paok]).T\n time_lasso = np.asarray([p[1] for p in paok])\n nzlasso = np.asarray([p[2] for p in paok])\n plot_speedup(X, time_lasso, name, lmax, solver, inpycharm, screening_rules=screening_rule, path=path, times_solver=times_solver)\n res['weights'] = weights\n res['time'] = time_diff\n res['time_solutions_admm'] = time_lasso\n res['non-zero indeces lasso'] = nzlasso\n res['mean_lasso'] = np.reshape(mean_lasso, (mean_lasso.shape[0],mean_lasso.shape[1]))\n \"\"\"\n # calculate the mean of the Gaussian predictive distribution\n mean_ada = predict_phenotype(X, n_f, delta0, y, beta, Xtest, weights)\n\n amountof_screened_feat = list(map(lambda x: len(x), scr_inds))\n nz_inds = list(map(lambda x: len(x), nz_inds))\n\n res = {}\n res['ldelta0'] = ldelta0\n res['weights_of_adascreen'] = beta\n res['lambda_path'] = path\n res['number_screened_f'] = amountof_screened_feat\n res['screening_rule'] = screening_rule\n res['lmax'] =lmax\n res['non-zero indeces ada'] =nz_inds\n res['mean_ada'] = np.reshape(mean_ada, (mean_ada.shape[0],mean_ada.shape[1]))\n return res",
"def calc_lampam_sym(ss, constraints):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n\n n_plies_in_panels = 2 * np.size(ss) # laminate ply count\n\n cos_sin = np.empty((4, n_plies_in_panels // 2), float)\n for ind in range(n_plies_in_panels // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, ))\n\n for_the_top = np.arange(n_plies_in_panels // 2)\n z_0 = np.ones(n_plies_in_panels // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2 / n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n return lampam",
"def generate_direct_solver(self, grid=None):\n if grid is None:\n # LOG.debug(\"Generate Solver for internal Spare Matrix: %s\" % self.sp_matrix)\n solver = spla.factorized(self.sp_matrix)\n else:\n # LOG.debug(\"Generate Solver for given Grid %s\" % (grid,))\n sp_matrix = self.to_sparse_matrix(grid, \"csc\")\n # LOG.debug(\" with Sparse Matrix: %s\" % sp_matrix.todense())\n # print(\"Jahier\\n\", sp_matrix.todense())\n # print(\"Jahier.shape\\n\", sp_matrix.todense().shape)\n solver = spla.factorized(sp_matrix)\n return solver"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This is a wrapper for solving SGL problems on connected components of the solution and solving each block separately. See Witten, Friedman, Simon "New Insights for the Graphical Lasso" for details. It solves
|
def block_SGL(S, lambda1, Omega_0, Theta_0=None, X_0=None, rho=1., max_iter=1000,
tol=1e-7, rtol=1e-3, stopping_criterion="boyd",
update_rho=True, verbose=False, measure=False):
assert Omega_0.shape == S.shape
assert S.shape[0] == S.shape[1]
assert lambda1 > 0
(p, p) = S.shape
if Theta_0 is None:
Theta_0 = Omega_0.copy()
if X_0 is None:
X_0 = np.zeros((p, p))
# compute connected components of S with lambda_1 threshold
numC, allC = get_connected_components(S, lambda1)
allOmega = list()
allTheta = list()
allX = list()
for i in range(numC):
C = allC[i]
# single node connected components have a closed form solution, see Witten, Friedman, Simon "NEW INSIGHTS FOR THE GRAPHICAL LASSO "
if len(C) == 1:
# we use the OFF-DIAGONAL l1-penalty, otherwise it would be 1/(S[C,C]+lambda1)
closed_sol = 1 / (S[C, C])
allOmega.append(closed_sol)
allTheta.append(closed_sol)
allX.append(np.array([0]))
# else solve Graphical Lasso for the corresponding block
else:
block_S = S[np.ix_(C, C)]
block_sol, block_info = ADMM_SGL(S=block_S, lambda1=lambda1, Omega_0=Omega_0[np.ix_(C, C)],
Theta_0=Theta_0[np.ix_(C, C)], X_0=X_0[np.ix_(C, C)], tol=tol, rtol=rtol,
stopping_criterion=stopping_criterion, update_rho=update_rho,
rho=rho, max_iter=max_iter, verbose=verbose, measure=measure)
allOmega.append(block_sol['Omega'])
allTheta.append(block_sol['Theta'])
allX.append(block_sol['X'])
# compute inverse permutation
per = np.hstack(allC)
per1 = invert_permutation(per)
# construct solution by applying inverse permutation indexing
sol = dict()
sol['Omega'] = block_diag(*allOmega)[np.ix_(per1, per1)]
sol['Theta'] = block_diag(*allTheta)[np.ix_(per1, per1)]
sol['X'] = block_diag(*allX)[np.ix_(per1, per1)]
return sol
|
[
"def solver_one(state):\n\n # Obtain the different state elements\n glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(state)\n\n # Obtain parameters and their variation ranges (initially decoupled)\n params_list = get_parameters(state)\n\n # Map params to the objects where they appear\n map_parameters(params_list, state) # Modifies \"params_list\"\n\n # Obtain \"partition\" categories\n # Partition categories are categories provoking totally decoupled systems. The most important one is GEO\n partition_categories = get_graph_partitioning_categories(state)\n\n # Obtain \"observation variation\" categories\n # Observation variation categories are those which for a given scenario and partition (i.e., fixed subsystem)\n # produce variation in quantitative observations, not in the system structure. The most important are TIME and OBSERVER\n obs_variation_categories = get_observation_variation_categories(state)\n\n # Empty indicators collector\n indicators = get_empty_indicators_collector()\n\n for scenario in get_scenario_generator(params_list, state):\n # \"scenario\" contains a list of parameters and their values\n for partition in get_partition_generator(scenario, partition_categories, state):\n # \"partition\" contains a list of categories and their specific values\n # Build MSM for the partition categories\n msm = build_msm_from_parsed(scenario, partition, state)\n # TODO Remove processors not in the calculations (unit processors)\n cleanup_unused_processors(msm) # Modify \"msm\"\n for obs_variation in get_obs_variation_generator(obs_variation_categories, state):\n # TODO \"obs_variation\" contains a list of categories and their specific values\n # TODO Build flow graph with observations filtered according to \"obs_variation\".\n # Nodes keep link to interface AND a value if there is one.\n # Edges keep link to: hierarchy OR flow OR scale change (and context)\n reset_msm_solution_observations(msm)\n fg = get_flow_graph(msm, obs_variation)\n for sub_fg in nx.weakly_connected_component_subgraphs(fg):\n # Solve the sub_fg. Attach solutions to Nodes of \"sub_fg\"\n solve(sub_fg) # Modify \"fg\"\n put_solution_into_msm(msm, sub_fg) # Modify \"msm\"\n compute_local_indicators(msm)\n compute_global_indicators(msm)\n collect_indicators(indicators, msm) # Elaborate output matrices\n return indicators",
"def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True",
"def __solve_full_linear_problem(self):\n samples = []\n\n for news in self.news_pool:\n samples += [news.sampled_quality] * self.layout_slots\n\n self.full_C = np.array(samples) * self.full_lambdas\n\n linear_problem = opt.linprog(A_ub=self.full_A, b_ub=self.full_B, c=self.full_C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n result = self.__de_randomize_LP(self.news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n return result",
"def __Solve__( X0, eps, eqmwrap, maxit, Dampening, DampeningThresh):\n\n\n\n #initializations\n X = X0\n\n nx, T = X.shape\n C = zeros((nx,nx,T))\n d = zeros((nx,T))\n dX = zeros((nx,T))\n Fx = zeros((nx,T))\n\n\n for it in range(maxit+1):\n\n C[:,:,0] = zeros(nx)\n d[:,0] = zeros(nx)\n for t in range(1,T-1):\n Fx[:,[t]],S_L, S, S_P = eqmwrap(X[:,t-1:t+2],eps[:,t])\n C[:,:,t] = np.linalg.solve( S - np.dot(S_L,C[:,:,t-1]),S_P)\n # print Fx[:,[t]].shape\n # print np.dot(S_L,d[:,[t-1]]).shape\n # print S.shape \n # print np.dot(S_L,C[:,:,t-1]).shape\n # print (S - np.dot(S_L,C[:,:,t-1]) ).shape\n d[:,t] = -np.linalg.solve(S - np.dot(S_L,C[:,:,t-1]) ,Fx[:,t] + np.dot(S_L,d[:,t-1]))\n \n \n \n residual = max(abs(Fx.flatten()))\n if residual < 1e-8:\n print 'done'\n break\n else:\n print 'iteration ' + str(it) + '...residual ' + str(residual)\n if residual < DampeningThresh:\n Dampening = 1\n \n \n\n \n \n dX[:,T-1] = zeros(nx)\n for t in range(T-2,0,-1):\n dX[:,t] = d[:,t] - np.dot(C[:,:,t],dX[:,t+1])\n \n \n \n X = X + Dampening * dX\n \n\n if it >= maxit:\n warnings.warn('simulation did not converge after {} iterations.'.format(maxit))\n \n\n\n return X",
"def solve(self):\n #problem = C.Problem(C.MinConflictsSolver(100000))\n #problem = C.Problem(C.BacktrackingSolver())\n #problem = C.Problem(C.RecursiveBacktrackingSolver())\n problem = C.Problem()\n # Add variables\n for y in range(self.rows):\n for x in range(self.cols):\n if (x, y) in self.given_values:\n problem.addVariable((x, y), [self.given_values[x, y]])\n else:\n problem.addVariable((x, y), range(1, len(self.segment_of((x, y))) + 1))\n # Add constraints: each segment must have all values different\n for segment in self.segments:\n for pos1 in segment:\n for pos2 in segment:\n if pos1 < pos2:\n problem.addConstraint(lambda p1, p2: p1 != p2,\n (pos1, pos2))\n # Add look-around constraints\n for y in range(self.rows):\n for x in range(self.cols):\n self.add_look_constraints((x, y), problem)\n # Solve the CSP\n solution = problem.getSolution()\n assert solution is not None\n #print(problem.getSolutions())\n self.solved_values = solution",
"def solve(self, Omega_0 = None, solver_params = dict(), tol = 1e-8, rtol = 1e-7, solver = 'admm', verbose = False):\n \n assert solver in [\"admm\"], \"Currently only the ADMM solver is supported as it is implemented for all cases.\"\n assert self.reg_params.get('lambda1') is not None, \"Regularization parameters need to be set first (at least lambda1), see function glasso_problem.set_reg_params()\"\n \n # if solver == \"ppdna\":\n # assert self.multiple,\"PPDNA solver is only supported for MULTIPLE Graphical Lassp problems.\"\n # assert not self.latent, \"PPDNA solver is only supported for problems without latent variables.\"\n # assert self.conforming, \"PPDNA solver is only supported for problems with conforming dimensions.\"\n \n \n self.set_start_point(Omega_0)\n self.tol = tol\n self.rtol = rtol\n \n self.solver_params = self._default_solver_params()\n self.solver_params.update(solver_params)\n self.solver_params[\"verbose\"] = verbose\n \n #print(f\"\\n Solve problem with {solver.upper()} solver... \\n \")\n \n if not self.multiple:\n if self.latent:\n sol, info = ADMM_SGL(S = self.S, lambda1 = self.reg_params['lambda1'], Omega_0 = self.Omega_0, \\\n tol = self.tol , rtol = self.rtol, latent = self.latent, mu1 = self.reg_params['mu1'], **self.solver_params)\n \n else:\n sol = block_SGL(S = self.S, lambda1 = self.reg_params['lambda1'], Omega_0 = self.Omega_0, \\\n tol = self.tol, rtol = self.tol, **self.solver_params)\n info = {}\n \n \n elif self.conforming: \n sol, info = ADMM_MGL(S = self.S, lambda1 = self.reg_params['lambda1'], lambda2 = self.reg_params['lambda2'], reg = self.reg,\\\n Omega_0 = self.Omega_0, latent = self.latent, mu1 = self.reg_params['mu1'],\\\n tol = self.tol, rtol = self.rtol, **self.solver_params)\n \n \n else:\n sol, info = ext_ADMM_MGL(S = self.S, lambda1 = self.reg_params['lambda1'], lambda2 = self.reg_params['lambda2'], reg = self.reg,\\\n Omega_0 = self.Omega_0, G = self.G, tol = self.tol, rtol = self.rtol,\\\n latent = self.latent, mu1 = self.reg_params['mu1'], **self.solver_params)\n \n \n # rescale\n if self.do_scaling:\n sol['Theta'] = self._rescale_to_covariances(sol['Theta'], self._scale)\n if self.latent:\n sol['L'] = self._rescale_to_covariances(sol['L'], self._scale)\n \n \n # set the computed solution\n if self.latent:\n self.solution._set_solution(Theta = sol['Theta'], L = sol['L']) \n else:\n self.solution._set_solution(Theta = sol['Theta']) \n \n self.solver_info = info.copy()\n return",
"def __solve_linear_problem(self, continuity_relaxation=True):\n result = [0] * self.layout_slots\n self.news_pool.sort(key=lambda x: (x.news_category, x.sampled_quality), reverse=True)\n LP_news_pool = []\n done_for_category = False\n category_count = 0\n prev_category = self.news_pool[0].news_category\n # First build a subset of news to easily handle the LP resolution\n for news in self.news_pool:\n if prev_category != news.news_category:\n if category_count < self.layout_slots:\n raise RuntimeWarning(\"Not enough news per category found. There should be at least \" +\n str(self.layout_slots) + \" news with category = \" + prev_category + \", but \"\n \"only \" + str(category_count) + \"are present. The allocation maybe \"\n \"sub-optimal.\")\n category_count = 0\n done_for_category = False\n prev_category = news.news_category\n if not done_for_category:\n LP_news_pool.append(news)\n category_count += 1\n if category_count == self.layout_slots:\n done_for_category = True\n\n # If not all the required news are present, add some other news at random.\n while len(LP_news_pool) < len(self.categories) * self.layout_slots:\n random_news = np.random.choice(self.news_pool)\n if random_news not in LP_news_pool:\n LP_news_pool.append(random_news)\n\n LP_news_pool.sort(key=lambda x: x.news_category, reverse=False)\n thetas = []\n # Compute the vector of coefficients for the LP objective function\n for news in LP_news_pool:\n thetas += [news.sampled_quality] * self.layout_slots\n self.C = list(np.array(thetas) * np.array(self.lambdas))\n\n # Then solve an LP or an ILP\n if continuity_relaxation:\n linear_problem = opt.linprog(A_ub=self.A, b_ub=self.B, c=self.C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n self.measure_allocation_diversity_bounds_errors(slots_assegnation_probabilities, LP_news_pool, iter=10)\n\n result = self.__de_randomize_LP(LP_news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n else:\n # INITIALIZES AN INTEGER LINEAR PROBLEM\n ILP = LpProblem(\"News_ILP\", LpMaximize)\n ILP_variables = []\n\n for cat in range(len(self.categories)):\n for j in range(self.layout_slots):\n for s in range(self.layout_slots):\n ILP_variables.append(LpVariable(name=str(cat) + \"_\" + str(j) + \"_\" + str(s), lowBound=0, upBound=1, cat=\"Binary\"))\n\n # Objective function addition to the problem\n C = list(np.array(self.C) * -1)\n ILP += lpSum([C[i] * ILP_variables[i] for i in range(len(self.C))])\n\n # Category constraints addition to the problem\n for i in range(len(self.categories)):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n # Slots capacity constraints addition to the problem\n for i in range(len(self.categories), len(self.categories) + self.layout_slots):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n # News capacity constraints addition to the problem\n for i in range(len(self.categories) + self.layout_slots, len(self.categories) + self.layout_slots + len(self.categories) * self.layout_slots):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n ILP.solve()\n\n # FOR EACH SLOT, ISOLATES THE CORRESPONDING VARIABLES\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(ILP.variables()):\n tmp_slot_probabilities.append(ILP.variables().__getitem__(i))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n # TAKES THE VARIABLES WHICH VALUE IS 1, THEN ALLOCATES THE CORRESPONDING NEWS IN THE RESULT PAGE\n for i in range(len(result)):\n for probabilities in slots_assegnation_probabilities[i]:\n if probabilities.varValue > 0:\n var_name = probabilities.name\n break\n indexes = var_name.split(\"_\")\n category_index = int(indexes[0])\n news_number = int(indexes[1])\n news_index = category_index * self.layout_slots + news_number\n result[i] = LP_news_pool[news_index]\n\n return result",
"def solve(self):",
"def problem_formulation_seond_stage(self):\n\n ng = self.ng\n nb = self.nb\n nl = self.nl\n T = self.T\n bus = self.bus\n branch = self.branch\n gen = self.gen\n # Number of variables\n nx = ng * T + nb * T + nb * T + nl * T\n lb = zeros((nx, 1))\n ub = zeros((nx, 1))\n c = zeros((nx, 1))\n pg = 0\n pd = 1\n bigM = 10 ** 4\n\n for i in range(T):\n for j in range(ng):\n # real-time power dispatch\n lb[pg * ng * T + i * ng + j] = 0\n ub[pg * ng * T + i * ng + j] = gen[j, PMAX]\n for j in range(nb):\n # load shedding at different buses\n lb[pd * ng * T + i * nb + j] = 0\n ub[pd * ng * T + i * nb + j] = bigM\n c[pd * ng * T + i * nb + j] = bigM\n\n for i in range(T):\n for j in range(nb):\n # The bus angle\n lb[ng * T + nb * T + i * nb + j] = -360\n ub[ng * T + nb * T + i * nb + j] = 360\n if bus[j, BUS_TYPE] == REF:\n lb[ng * T + nb * T + i * nb + j] = 0\n ub[ng * T + nb * T + i * nb + j] = 0\n\n for i in range(T):\n for j in range(nl):\n # The power flow\n lb[ng * T + nb * T + nb * T + i * nl + j] = -branch[j, RATE_A]\n ub[ng * T + nb * T + nb * T + i * nl + j] = branch[j, RATE_A]\n # Construct the constraint set\n # 3.1) Power balance constraints\n NX = self.nx\n nu = self.nu\n Cg = self.Cg\n Cft = self.Cft\n\n E_temp = zeros((T * nb, NX))\n M_temp = zeros((T * nb, nu))\n G_temp = zeros((T * nb, nx))\n h_temp = zeros((T * nb, 1))\n for i in range(T):\n # For the unit\n G_temp[i * nb:(i + 1) * nb, pg * ng * T + i * ng:pg * ng * T + (i + 1) * ng] = Cg.todense()\n # For the load shedding\n G_temp[i * nb:(i + 1) * nb, pd * ng * T + i * nb:pd * ng * T + (i + 1) * nb] = eye(nb)\n # For the transmission lines\n G_temp[i * nb:(i + 1) * nb, ng * T + nb * T + nb * T + i * nl: ng * T + nb * T + nb * T + (i + 1) * nl] = -(\n Cft.transpose()).todense()\n M_temp[i * nb:(i + 1) * nb, i * nb:(i + 1) * nb] = -eye(nb)\n # Update G,M,E,h\n G = concatenate([G_temp, -G_temp])\n M = concatenate([M_temp, -M_temp])\n E = concatenate([E_temp, -E_temp])\n h = concatenate([h_temp, -h_temp])\n # 3.2 Line flow equation\n E_temp = zeros((T * nl, NX))\n M_temp = zeros((T * nl, nu))\n G_temp = zeros((T * nl, nx))\n h_temp = zeros((T * nl, 1))\n\n X = zeros((nl, nl))\n for i in range(nl):\n X[i, i] = 1 / branch[i, BR_X]\n\n for i in range(T):\n # For the unit\n G_temp[i * nl:(i + 1) * nl,\n ng * T + nb * T + nb * T + i * nl:ng * T + nb * T + nb * T + (i + 1) * nl] = -eye(nl)\n G_temp[i * nl:(i + 1) * nl, ng * T + nb * T + i * nb: ng * T + nb * T + (i + 1) * nb] = X.dot(Cft.todense())\n G = concatenate([G, G_temp, -G_temp])\n M = concatenate([M, M_temp, -M_temp])\n E = concatenate([E, E_temp, -E_temp])\n h = concatenate([h, h_temp, -h_temp])\n\n # 3.3) Power range limitation\n E_temp = zeros((T * ng, NX))\n M_temp = zeros((T * ng, nu))\n G_temp = zeros((T * ng, nx))\n h_temp = zeros((T * ng, 1))\n for i in range(T):\n for j in range(ng):\n G_temp[i * ng + j, pg * ng * T + i * ng + j] = 1\n E_temp[i * ng + j, PG * ng * T + i * ng + j] = -1\n E_temp[i * ng + j, RD * ng * T + i * ng + j] = 1\n G = concatenate([G, G_temp])\n M = concatenate([M, M_temp])\n E = concatenate([E, E_temp])\n h = concatenate([h, h_temp])\n\n E_temp = zeros((T * ng, NX))\n M_temp = zeros((T * ng, nu))\n G_temp = zeros((T * ng, nx))\n h_temp = zeros((T * ng, 1))\n for i in range(T):\n for j in range(ng):\n G_temp[i * ng + j, pg * ng * T + i * ng + j] = -1\n E_temp[i * ng + j, PG * ng * T + i * ng + j] = 1\n E_temp[i * ng + j, RU * ng * T + i * ng + j] = 1\n G = concatenate([G, G_temp])\n M = concatenate([M, M_temp])\n E = concatenate([E, E_temp])\n h = concatenate([h, h_temp])\n # 3.4) Load shedding constraint\n E_temp = zeros((T * nb, NX))\n M_temp = zeros((T * nb, nu))\n G_temp = zeros((T * nb, nx))\n h_temp = zeros((T * nb, 1))\n for i in range(T):\n for j in range(nb):\n G_temp[i * nb + j, ng * T + i * nb + j] = -1\n M_temp[i * nb + j, i * nb + j] = 1\n G = concatenate([G, G_temp])\n M = concatenate([M, M_temp])\n E = concatenate([E, E_temp])\n h = concatenate([h, h_temp])\n # 3.5) Upper and lower boundary inforamtion\n E_temp = zeros((nx, NX))\n M_temp = zeros((nx, nu))\n G_temp = eye(nx)\n h_temp = lb\n G = concatenate([G, G_temp])\n M = concatenate([M, M_temp])\n E = concatenate([E, E_temp])\n h = concatenate([h, h_temp])\n\n E_temp = zeros((nx, NX))\n M_temp = zeros((nx, nu))\n G_temp = -eye(nx)\n h_temp = -ub\n\n G = concatenate([G, G_temp])\n M = concatenate([M, M_temp])\n E = concatenate([E, E_temp])\n h = concatenate([h, h_temp])\n d = c\n\n model = {\"G\": G,\n \"M\": M,\n \"E\": E,\n \"h\": h,\n \"d\": d}\n # Modify the lower boundary\n\n return model",
"def KS_matsolve_parallel(\n self, T_sparse, B_sparse, v, xgrid, bc, solve_type, eigs_min_guess\n ):\n if solve_type == \"guess\":\n dtype = np.float64\n else:\n dtype = self.fp\n # compute the number of grid points\n N = np.size(xgrid)\n\n # Compute the number pmax of distinct diagonizations to be solved\n pmax = config.spindims * config.lmax\n\n # now flatten the potential matrix over spins\n v_flat = np.zeros((pmax, N), dtype=dtype)\n eigs_guess_flat = np.zeros((pmax), dtype=dtype)\n for i in range(np.shape(v)[0]):\n for l in range(config.lmax):\n if self.grid_type == \"log\":\n v_corr = 0.5 * (l + 0.5) ** 2 * np.exp(-2 * xgrid)\n else:\n v_corr = 3 / (32 * xgrid**4) + l * (l + 1) / (2 * xgrid**4)\n v_flat[l + (i * config.lmax)] = v[i] + v_corr\n eigs_guess_flat[l + (i * config.lmax)] = eigs_min_guess[i, l]\n\n # make temporary folder with random name to store arrays\n while True:\n try:\n joblib_folder = \"atoMEC_tmpdata_\" + \"\".join(\n random.choices(string.ascii_uppercase + string.digits, k=20)\n )\n os.mkdir(joblib_folder)\n break\n except FileExistsError as e:\n print(e)\n\n # dump and load the large numpy arrays from file\n data_filename_memmap = os.path.join(joblib_folder, \"data_memmap\")\n dump((T_sparse, B_sparse, v_flat), data_filename_memmap)\n T_sparse, B_sparse, v_flat = load(data_filename_memmap, mmap_mode=\"r\")\n\n # set up the parallel job\n with Parallel(n_jobs=config.numcores) as parallel:\n X = parallel(\n delayed(self.diag_H)(\n q,\n T_sparse,\n B_sparse,\n v_flat,\n xgrid,\n config.nmax,\n bc,\n eigs_guess_flat,\n solve_type,\n )\n for q in range(pmax)\n )\n\n # remove the joblib arrays\n try:\n shutil.rmtree(joblib_folder)\n except: # noqa\n print(\"Could not clean-up automatically.\")\n\n if solve_type == \"full\":\n # retrieve the eigfuncs and eigvals from the joblib output\n eigfuncs_flat = np.zeros((pmax, config.nmax, N), dtype=dtype)\n eigvals_flat = np.zeros((pmax, config.nmax), dtype=dtype)\n for q in range(pmax):\n eigfuncs_flat[q] = X[q][0]\n eigvals_flat[q] = X[q][1]\n\n # unflatten eigfuncs / eigvals so they return to original shape\n eigfuncs = eigfuncs_flat.reshape(\n config.spindims, config.lmax, config.nmax, N\n )\n eigvals = eigvals_flat.reshape(config.spindims, config.lmax, config.nmax)\n\n return eigfuncs, eigvals\n\n elif solve_type == \"guess\":\n for q in range(pmax):\n eigs_guess_flat[q] = X[q][1]\n eigfuncs_null = X[:][0]\n\n eigs_guess = eigs_guess_flat.reshape(config.spindims, config.lmax)\n\n return eigfuncs_null, eigs_guess",
"def KS_matsolve_serial(\n self, T_sparse, B_sparse, v, xgrid, bc, solve_type, eigs_min_guess\n ):\n if solve_type == \"guess\":\n dtype = np.float64\n else:\n dtype = self.fp\n # compute the number of grid points\n N = np.size(xgrid)\n\n # initialize the eigenfunctions and their eigenvalues\n eigfuncs = np.zeros((config.spindims, config.lmax, config.nmax, N), dtype=dtype)\n eigvals = np.zeros((config.spindims, config.lmax, config.nmax), dtype=dtype)\n eigs_guess = np.zeros((config.spindims, config.lmax), dtype=dtype)\n\n # A new Hamiltonian has to be re-constructed for every value of l and each spin\n # channel if spin-polarized\n for l in range(config.lmax):\n # diagonalize Hamiltonian using scipy\n for i in range(np.shape(v)[0]):\n # fill potential matrices\n if self.grid_type == \"log\":\n v_corr = 0.5 * (l + 0.5) ** 2 * np.exp(-2 * xgrid)\n else:\n v_corr = 3 / (32 * xgrid**4) + l * (l + 1) / (2 * xgrid**4)\n V_mat_sparse = diags([v[i] + v_corr], offsets=[0], dtype=dtype)\n\n # construct Hamiltonians\n H_sparse = T_sparse + B_sparse @ V_mat_sparse\n\n # if dirichlet solve on (N-1) x (N-1) grid\n if bc == \"dirichlet\":\n H_sparse_s = self.mat_convert_dirichlet(H_sparse)\n B_sparse_s = self.mat_convert_dirichlet(B_sparse)\n # if neumann don't change anything\n elif bc == \"neumann\":\n H_sparse_s = H_sparse\n B_sparse_s = B_sparse\n\n # we seek the lowest nmax eigenvalues from sparse matrix diagonalization\n # use 'shift-invert mode' to find the eigenvalues nearest in magnitude\n # to the est. lowest eigenvalue from full diagonalization on coarse grid\n if solve_type == \"full\":\n eigs_up, vecs_up = eigs(\n H_sparse_s,\n k=config.nmax,\n M=B_sparse_s,\n which=\"LM\",\n sigma=eigs_min_guess[i, l],\n tol=config.conv_params[\"eigtol\"],\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=np.ComplexWarning)\n vecs_up = vecs_up.astype(self.fp)\n eigs_up = eigs_up.astype(self.fp)\n\n K = np.zeros((N, config.nmax), dtype=dtype)\n if self.grid_type == \"log\":\n prefac = -2 * np.exp(2 * xgrid)\n else:\n prefac = 8 * xgrid**2\n for n in range(config.nmax):\n K[:, n] = prefac * (v[i] + v_corr - eigs_up.real[n])\n\n eigfuncs[i, l], eigvals[i, l] = self.update_orbs(\n vecs_up, eigs_up, xgrid, bc, K, self.grid_type\n )\n\n elif solve_type == \"guess\":\n # estimate the lowest eigenvalues for a given value of l\n B_dense = B_sparse.todense()\n invB = linalg.inv(B_dense)\n eigs_up = linalg.eigvals(\n invB * H_sparse.todense(), check_finite=False\n )\n if not np.all(np.isclose(invB * B_dense, np.eye(len(xgrid)))):\n print(\"Warning: B matrix in eigs_guess is ill-conditioned\")\n\n # sort the eigenvalues to find the lowest\n idr = np.argsort(eigs_up)\n eigs_guess[i, l] = np.array(eigs_up[idr].real, dtype=dtype)[0]\n\n # dummy variable for the null eigenfucntions\n eigfuncs_null = eigfuncs\n\n if solve_type == \"full\":\n return eigfuncs, eigvals\n else:\n return eigfuncs_null, eigs_guess",
"def get_solutions_helper(self):\n # Base case\n if len(self.unseen) == 1:\n solution = self.solution.copy()\n self.solutions.append(solution)\n # Step case\n else:\n for vertex in self.GRAPH:\n if vertex in self.unseen:\n for edge in self.GRAPH[vertex]:\n if edge[\"jump\"] in self.unseen and edge[\"end\"] not in self.unseen:\n # Peg solitaire jump\n self.unseen.remove(vertex)\n self.unseen.remove(edge[\"jump\"])\n self.unseen.add(edge[\"end\"])\n # Backtracking ready\n step = [vertex in self.unseen for vertex in self.GRAPH]\n self.solution.append(step)\n\n # Backtracking step\n self.get_solutions_helper()\n\n # Peg solitaire unjump\n self.unseen.add(vertex)\n self.unseen.add(edge[\"jump\"])\n self.unseen.remove(edge[\"end\"])\n # Backtracking unready\n self.solution.pop()",
"def solve(self, wls):\n\n self.wls = S.asarray(wls)\n\n multilayer = self.multilayer\n theta_inc = self.theta_inc\n\n nlayers = len(multilayer)\n d = S.array([l.thickness for l in multilayer]).ravel()\n\n Rs = S.zeros_like(self.wls)\n Ts = S.zeros_like(self.wls)\n Rp = S.zeros_like(self.wls)\n Tp = S.zeros_like(self.wls)\n\n Dp = S.zeros((2, 2), dtype=complex)\n Ds = S.zeros((2, 2), dtype=complex)\n P = S.zeros((2, 2), dtype=complex)\n Ms = S.zeros((2, 2), dtype=complex)\n Mp = S.zeros((2, 2), dtype=complex)\n k = S.zeros((nlayers, 2), dtype=complex)\n\n ntot = S.zeros((self.wls.size, nlayers), dtype=complex)\n for i, l in enumerate(multilayer):\n # ntot[:,i] = l.mat.n(self.wls,l.mat.T0)\n ntot[:, i] = l.mat.n(self.wls, l.mat.toc.T0)\n\n for iwl, wl in enumerate(self.wls):\n\n n = ntot[iwl, :]\n theta = snell(theta_inc, n)\n\n k[:, 0] = 2 * S.pi * n / wl * S.cos(theta)\n k[:, 1] = 2 * S.pi * n / wl * S.sin(theta)\n\n Ds = [[1.0, 1.0], [n[0] * S.cos(theta[0]), -n[0] * S.cos(theta[0])]]\n Dp = [[S.cos(theta[0]), S.cos(theta[0])], [n[0], -n[0]]]\n Ms = inv(Ds)\n Mp = inv(Dp)\n\n for nn, dd, tt, kk in zip(n[1:-1], d[1:-1], theta[1:-1], k[1:-1, 0]):\n\n Ds = [[1.0, 1.0], [nn * S.cos(tt), -nn * S.cos(tt)]]\n Dp = [[S.cos(tt), S.cos(tt)], [nn, -nn]]\n phi = kk * dd\n P = [[S.exp(1j * phi), 0], [0, S.exp(-1j * phi)]]\n Ms = S.dot(Ms, S.dot(Ds, S.dot(P, inv(Ds))))\n Mp = S.dot(Mp, S.dot(Dp, S.dot(P, inv(Dp))))\n\n Ds = [[1.0, 1.0], [n[-1] * S.cos(theta[-1]), -n[-1] * S.cos(theta[-1])]]\n Dp = [[S.cos(theta[-1]), S.cos(theta[-1])], [n[-1], -n[-1]]]\n Ms = S.dot(Ms, Ds)\n Mp = S.dot(Mp, Dp)\n\n rs = Ms[1, 0] / Ms[0, 0]\n ts = 1.0 / Ms[0, 0]\n\n rp = Mp[1, 0] / Mp[0, 0]\n tp = 1.0 / Mp[0, 0]\n\n Rs[iwl] = S.absolute(rs) ** 2\n Ts[iwl] = (\n S.absolute((n[-1] * S.cos(theta[-1])) / (n[0] * S.cos(theta[0])))\n * S.absolute(ts) ** 2\n )\n Rp[iwl] = S.absolute(rp) ** 2\n Tp[iwl] = (\n S.absolute((n[-1] * S.cos(theta[-1])) / (n[0] * S.cos(theta[0])))\n * S.absolute(tp) ** 2\n )\n\n self.Rs = Rs\n self.Ts = Ts\n self.Rp = Rp\n self.Tp = Tp\n return self",
"def nodal2D_steady_fixed_source(Dims,Lengths,BCs,D,Sigma,Q, tolerance=1.0e-12, phi_solution=0., LOUD=False, maxits=100):\n I = Dims[0]\n J = Dims[1]\n K = Dims[2]\n L = I*J*K\n Nx = Lengths[0]\n Ny = Lengths[1]\n Nz = Lengths[2]\n \n hx,hy,hz = np.array(Lengths)/np.array(Dims)\n ihx2,ihy2,ihz2 = (1.0/hx**2,1.0/hy**2,1.0/hz**2)\n\n if (type(phi_solution) != np.ndarray):\n phi_solution = np.zeros((2,I,J,5))\n phi_new = phi_solution.copy()\n iteration = 1\n converged = 0\n localBCs = np.ones((2,3))\n\n #reshape Q if necessary\n if Q.shape != (I,J,K,5):\n Q_new = np.zeros((I,J,K,5))\n Q_new[:,:,:,0] = Q[:,:,:]\n Q = Q_new\n\n #iterate over the x directions\n k=0\n while not(converged):\n \n #Solve for x direction\n d = 0 #solv direction\n tr_id = 1 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(i==0):\n phi_left = phi_solution[d,i-1,j,:]\n C = positive_current(phi_left,hx/2,hx,D[i-1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[0,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(i==(I-1)):\n phi_rt = phi_solution[d,i+1,j,:]\n C = negative_current(phi_rt,-hx/2,hx,D[i+1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[1,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if i==0:\n nbr_ids = [i,i,i+1] #Assume constant along left edge\n elif i==(I-1):\n nbr_ids = [i-1,i,i] #assume constant along right edge\n else:\n nbr_ids = [i-1,i,i+1] #interior cell\n\n if not j==(J-1):\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n else:\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n #Ltop_quad = (0., 0, 0)\n\n if not j==0:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n else:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n #Lbot_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n# print(\"\\n X Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n\n Q_local = np.array(Q[i,j,k,:])\n for dof in range(len(Ltop_quad)):\n Q_local[dof] -= 1/hy*(Ltop_quad[dof] - Lbot_quad[dof])\n\n# print(\"The transverse leakage magnitude is: \",-1./hy*(Ltop_quad[0] - Lbot_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n #Compute the new x fluxes\n phi_new[0,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hx,localBCs)\n phi,a1,a2,a3,a4 = phi_new[0,i,j,:]\n# print(\"The reaction magnitude: \", phi_new[0,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hx*(current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) - current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k])))\n# print(\"\")\n\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[0,i-1,j,:],hx/2,hx,D[i-1,j,k]),\n negative_current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[0,i+1,j,:],-hx/2,hx,D[i+1,j,k]),\n positive_current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n \n #Solve for y direction\n d = 1 #solv direction\n tr_id = 0 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(j==0):\n phi_left = phi_solution[d,i,j-1,:]\n C = positive_current(phi_left,hy/2,hy,D[i,j-1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[2,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(j==(J-1)):\n phi_rt = phi_solution[d,i,j+1,:]\n C = negative_current(phi_rt,-hy/2,hy,D[i,j+1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[3,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if j==0:\n nbr_ids = [j,j,j+1] #Assume constant along left edge\n elif j==(J-1):\n nbr_ids = [j-1,j,j] #assume constant along right edge\n else:\n nbr_ids = [j-1,j,j+1] #interior cell\n\n if not i==(I-1):\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n else:\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n\n if not i==0:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n else:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n #Llft_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n Q_local = np.array(Q[i,j,k,:])\n# print(\"\\n Y Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n for dof in range(len(Lrgt_quad)):\n Q_local[dof] -= 1/hx*(Lrgt_quad[dof] - Llft_quad[dof])\n# print(\"The transverse leakage magnitude is: \",-1./hx*(Lrgt_quad[0] - Llft_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n phi_new[1,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hy,localBCs)\n# print(\"The reaction magnitude: \", phi_new[1,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hy*(current(phi_new[1,i,j,:],hy/2,hy,D[i,j,k]) - current(phi_new[1,i,j,:],-hy/2,hy,D[i,j,k])))\n# print(\"\")\n phi,a1,a2,a3,a4 = phi_new[1,i,j,:]\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[i-1,:],h/2,h,D[i]),negative_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[i+1,:],-h/2,h,D[i]),positive_current(phi_new[i,:],h/2,h,D[i]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n# print(\"X solution\", phi_new[0,:,:,0])\n# print(\"Y solution\", phi_new[1,:,:,0])\n\n #Compute total change in x and y\n relchange = np.linalg.norm( np.reshape(phi_new-phi_solution, 5*I*J*K*2))/np.linalg.norm( np.reshape(phi_new, 5*I*J*K*2))\n reldiff = np.linalg.norm( np.reshape(phi_new[0,:,:,0] - phi_new[1,:,:,0], I*J*K)/np.linalg.norm( np.reshape(phi_new[0,:,:,0],I*J*K)) )\n converged = (relchange < tolerance) or (iteration >= maxits)\n if (LOUD):\n print(\"Iteration\",iteration,\": relative change total =\",relchange,\"relative difference X Y\",reldiff)\n iteration += 1 \n phi_solution = phi_new.copy()\n\n\n x = np.linspace(hx*.5,Nx-hx*.5,I)\n y = np.linspace(hy*.5,Ny-hy*.5,J)\n z = np.linspace(hz*.5,Nz-hz*.5,K)\n return x,y,z,phi_solution[0,:,:,0].reshape(I,J,1)#+phi_solution[1,:,:,0].reshape(I,J,1)))",
"def solve(self, solver):\n solver.solve()",
"def solve(self):\n ...",
"def solver(eqns, params):\n\n # First generates the linear system.\n mat, vec = get_linear_sys(eqns, params)\n\n print(\n 'Invoking the numpy.linalg.lstsq function...'\n )\n start_time = time.process_time()\n\n res = lstsq(mat, vec, **kwargs)\n\n print(\n 'Finished: {!s}sec.'.format(time.process_time() - start_time)\n )\n\n return res[0]",
"def g_solving_subproblem_of_ALR(self,vehicle_id):\r\n global_LB = -10000\r\n global_UB = 10000\r\n iteration_for_RSP = 20\r\n optimal_solution_for_RSP = None\r\n self.multiplier_v = 0.5\r\n\r\n # solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 3)\r\n\r\n # obtain the variance\r\n y_ =self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB = 0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 1)\r\n LB += self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching_mean\r\n UB = Label_cost_for_lagrangian_mean + self.reliability * (variance) ** 0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector[0]\r\n\r\n # step 3: update multipliers\r\n if variance- y != 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB - global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB",
"def partial_solver2(g, lam):\n #Following the same method as the winning core, so same notations are used \n #Following the pseudo code given on the report\n #First we compute the solution set for player 0 for FixWP_0(lam) which is an under-approximation of player 0 winning region\n wp_w0 = solve_fixwp(g, 0, lam)\n #can happen that wp_w0 is empty, so checking \n if len(wp_w0) != 0:\n #compute attractor in g for player 0 to the target set wp_w0\n att = attr(g, wp_w0, 0)[0]\n #computing a subgame g_prime that only contains states of (S \\ a_prime)\n states_prime = [s for s in g.get_nodes() if not s in att]\n g_prime = g.subgame(states_prime)\n #recursively compute the partial solutions of g_prime\n (W0_prime, W1_prime) = partial_solver2(g_prime, lam)\n #we know that a_prime is part of player 0 winning region so we add it to W0_prime\n W0 = att + W0_prime\n W1 = W1_prime\n return (W0, W1)\n #we're in the case where wp_w0 was empty. this part of the algorithm is really similar to the part upside\n #computing wp_w1 that is the solution set for player 1 for FixWP_1(lam), an under-approximation of player 1 winning region\n wp_w1 = solve_fixwp(g, 1, lam)\n #can happen that wp_w1 is empty, so checking \n if len(wp_w1) != 0:\n #compute attractor in g for player 1 to the target set wp_w1\n att = attr(g, wp_w1, 1)[0]\n #computing a subgame g_prime that only contains states of (S \\ a_prime)\n states_prime = [s for s in g.get_nodes() if not s in att]\n g_prime = g.subgame(states_prime)\n #recursively compute the partial solutions of g_prime\n (W0_prime, W1_prime) = partial_solver2(g_prime, lam)\n #we know that a_prime is part of player 1 winning region so we add it to W1_prime\n W1 = att + W1_prime\n W0 = W0_prime\n return (W0, W1)\n #we're in the cas where wp_w0 and wp_w1 was empty. In this case we can't say anything about the winning regions of the players.\n return ([], [])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The argument p is assumed to be some permutation of 0, 1, ..., len(p)1. Returns an array s, where s[i] gives the index of i in p.
|
def invert_permutation(p):
s = np.empty_like(p)
s[p] = np.arange(p.size)
return s
|
[
"def invert_permutation(p):\n s = np.empty(p.size, p.dtype)\n s[p] = np.arange(p.size)\n return s",
"def _generate_pair_positions(self, s, p):\n r = np.random.choice(s + p, s, replace=False)\n paired = []\n unpaired = []\n i = 0\n for j in range(s + p):\n if j in r:\n paired.append(i)\n i += 2\n else:\n unpaired.append(i)\n i += 1\n return np.array(paired), np.array(unpaired)",
"def random_choice(arr, p):\n return arr[np.searchsorted(np.cumsum(p), np.random.random(), side=\"right\")]",
"def permute_2d(m, p):\r\n return m[p][:, p]\r\n # unused below\r\n m_t = transpose(m)\r\n r_t = take(m_t, p, axis=0)\r\n return take(transpose(r_t), p, axis=0)",
"def perm_conjugate(p, s):\n q = [None] * len(p)\n for i in range(len(p)):\n q[s[i]] = s[p[i]]\n return q",
"def rankperm(p):\n p = np.array(p)\n q = np.array(p).argsort()\n r = 0\n for k in range(len(p) - 1, 0, -1):\n s = p[k]\n p[k], p[q[k]] = p[q[k]], p[k]\n q[k], q[s] = q[s], q[k]\n r += s * np.math.factorial(k)\n return r",
"def pseudorandom(n, p, key):\n import numpy as np\n p = list(p)\n cp = np.cumsum([0] + p)\n assert np.allclose(1, cp[-1])\n assert len(p) < 256\n\n x = np.random.RandomState(key).random_sample(n)\n out = np.empty(n, dtype='i1')\n\n for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):\n out[(x >= low) & (x < high)] = i\n return out",
"def lift_perm(p: Dict[int, int]) -> np.ndarray:\n n = len(p)\n pm = np.zeros((1 << n, 1 << n), dtype=complex)\n for i in range(1 << n):\n j = 0\n mask = 1 << n\n for q in range(n):\n mask >>= 1\n if (i & mask) != 0:\n j |= 1 << (n - 1 - p[q])\n pm[j][i] = 1\n return pm",
"def modes_to_index(modes, p, m):\n mx = choose(m + p - 1, p)\n out = sum([choose(m - modes[p - i] + i - 2, i) for i in range(1, p + 1)])\n return mx - out - 1",
"def index_to_feature(p, dims):\n feature = []\n for dim in dims:\n feature.append(p % dim)\n p //= dim\n return feature",
"def permute(seq, permutation):\n return [seq[i] for i in permutation]",
"def perm_to_index(perm: Sequence[int], perm_index_to_index: np.ndarray) -> int:\n\n return perm_index_to_index[np.ravel_multi_index(\n tuple(perm), tuple(len(perm) for _ in range(len(perm))))]",
"def big_l_prime_array(p, n):\n lp = [0] * len(p)\n for j in range(len(p)-1):\n i = len(p) - n[j]\n if i < len(p):\n lp[i] = j + 1\n\n return lp",
"def _get_perm(s, t):\n\n n = len(s)\n assert n == len(set(s))\n assert sorted(s) == sorted(t)\n if s == t:\n return range(n)\n i = dict((k, j) for j, k in enumerate(s))\n return [i[k] for k in t]",
"def pivot_index(a, j, p=None):\n n = len(a)\n k = p or j\n while k < n and a[k][j] == 0:\n k += 1\n return k if k < n else None",
"def decoder(permutation):\n depermutation = []\n for x in range (0, len (permutation)):\n depermutation.append (permutation.index(x))\n return depermutation",
"def cartesian_product_indexing(permutation):\n permutation_length = [np.arange(len(p)) for p in permutation if len(p) != 0]\n if permutation_length == []:\n return np.asarray([])\n\n indexes = cartesian_product(*permutation_length) # cartesian indexes\n return np.asarray(\n [np.hstack([permutation[i][j] for i, j in enumerate(ix)]) for ix in indexes]\n )",
"def query(self, p):\n kmer = p[:self.k]\n i = bisect.bisect_left(self.index, (kmer, -1)) # -1 is larger than the number of index. Therefore, the offset retrieved will be at the leftmost position.\n hits = []\n while i < len(self.index):\n if self.index[i][0] != kmer: # If mismatch, the p is definitely not existed in t. Return empty list.\n break\n hits.append(self.index[i][1])\n i += 1\n return hits",
"def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize a HOOMD device given the parse arguments.
|
def make_hoomd_device(args):
if args.device == 'CPU':
device = hoomd.device.CPU()
elif args.device == 'GPU':
device = hoomd.device.GPU()
else:
raise ValueError(f'Invalid device {args.device}.')
if not args.verbose:
device.notice_level = 0
return device
|
[
"def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media_player\"], attribute = self.args.get(\"photo_attribute\", \"entity_picture\"))",
"def init_argument_parser(modules, device=None):\n parser = ArgumentParser()\n if device:\n parser.add_argument('--help-device', action='store_true',\n help='Print help for arguments specific to device')\n modules.append('libregice')\n init_modules_args(device, parser, modules)\n return parser",
"def init( d = None ):\n global device\n if d is not None:\n device = d\n State.init()",
"def initialise_device(self):\n pass",
"def opmd_init(h5):\n d = {\n 'basePath':'/data/%T/',\n 'dataType':'openPMD',\n 'openPMD':'2.0.0',\n 'openPMDextension':'BeamPhysics;SpeciesType',\n 'particlesPath':'particles/' \n }\n for k,v in d.items():\n h5.attrs[k] = np.string_(v)\n h5.create_group('/data/')",
"def __init__(self, device_dict):\n diff = set(device_dict.keys()) - set(YAMLKeyword.__dict__.keys())\n if len(diff) > 0:\n six.print_('Wrong key detected:')\n six.print_(diff)\n raise KeyError(str(diff))\n self.__dict__.update(device_dict)\n if self.system == SystemType.android:\n pass\n elif self.system == SystemType.arm_linux:\n try:\n sh.ssh('-q', '%s@%s' % (self.username, self.address),\n 'exit')\n except sh.ErrorReturnCode as e:\n six.print_('device connect failed, '\n 'please check your authentication',\n file=sys.stderr)\n raise e",
"def initialize(self, args):\n\t\tpass",
"def __init__(self, args):\n app_config = args['app_config']\n host_config = args['host_config']\n self.dry_run = args['dry_run']\n self.debug_command = args['debug_command']\n self.list_hosts = args['list_hosts']\n\n self.app_config = self._load_application_config(app_config)\n self.host_config = self._load_host_config(host_config)",
"def initialize():\n\n global cmdarg\n # Open syslog for error message tracking\n syslog.openlog(\"munin-chrony\", 0, syslog.LOG_DAEMON)\n\n # Try to get the command-line argument, if there is one (usually either\n # 'config' or nothing)\n try:\n cmdarg = sys.argv[1]\n except IndexError:\n # It's not actually an error if this is out of range -- it just means\n # there wasn't an argument, so don't run in config mode\n cmdarg = \"\"",
"def __init__(self, arlo, device, sensor_type):\n\n sensor_details = SENSOR_TYPES[sensor_type]\n\n if device is None:\n self._name = sensor_details[0]\n self._unique_id = sensor_type\n self._device = arlo\n else:\n self._name = \"{0} {1}\".format(sensor_details[0], device.name)\n self._unique_id = (\n \"{0}_{1}\".format(sensor_details[0], device.entity_id)\n .lower()\n .replace(\" \", \"_\")\n )\n self._device = device\n\n self._sensor_type = sensor_type\n self._icon = \"mdi:{}\".format(sensor_details[2])\n self._state = None\n self._attr = sensor_details[3]\n _LOGGER.info(\"ArloSensor: %s created\", self._name)",
"def __init__(self):\n cdb.initialize()\n cred.Store.initialize()\n self._vlan_mgr = importutils.import_object(conf.MANAGER_CLASS)\n for key in conf.PLUGINS[const.PLUGINS].keys():\n plugin_obj = conf.PLUGINS[const.PLUGINS][key]\n self._plugins[key] = importutils.import_object(plugin_obj)\n LOG.debug(\"Loaded device plugin %s\\n\" %\n conf.PLUGINS[const.PLUGINS][key])\n if key in conf.PLUGINS[const.INVENTORY].keys():\n inventory_obj = conf.PLUGINS[const.INVENTORY][key]\n self._inventory[key] = importutils.import_object(inventory_obj)\n LOG.debug(\"Loaded device inventory %s\\n\" %\n conf.PLUGINS[const.INVENTORY][key])\n\n LOG.debug(\"%s.%s init done\" % (__name__, self.__class__.__name__))",
"def __init__(self, client, device, name, model_type):\n LGEDevice.__init__(self, client, device)\n\n import wideq\n self._wp = wideq.WPDevice(client, device)\n\n self._wp.monitor_start()\n self._wp.monitor_start()\n self._wp.delete_permission()\n self._wp.delete_permission()\n\n # The response from the monitoring query.\n self._state = None\n self._name = name\n self._type = model_type\n\n self.update()",
"def __init__(self):\n nvmlInit()\n n_devices = nvmlDeviceGetCount()\n devices_handlers_list = [nvmlDeviceGetHandleByIndex(i) for i in range(n_devices)]\n\n self.devices = {\n '{}-{}'.format(NvmlHandler.exec_nvml_function(nvmlDeviceGetName, device).decode('ascii'), i): device\n for i, device in enumerate(devices_handlers_list)\n }",
"def __init__(self, ds=None, **kwargs) :\n self._name = self.__class__.__name__\n print('In %s.__init__' % self._name)\n\n HexDataIO.__init__(self, **kwargs)\n\n DIO = self\n if ds is None :\n DIO.open_input_data(self.DSNAME, **kwargs)\n else :\n DIO.use_psana_dataset(ds, pbits=0o377 if self.VERBOSE else 0)\n \n self._init_calib_and_sorter()\n\n self.t0_sec = self.t1_sec = time()",
"def __init__(self, config_parser, **kwargs):\n BaseAgent.__init__(self, config_parser)",
"def open(self):\n # Move all of the connection arguments into connect_args\n connect_args = {}\n\n # check for mode\n if self.get_option('port') is None:\n if self.get_option('mode') == 'telnet':\n connect_args['port'] = 23\n elif self.get_option('mode') == 'serial':\n connect_args['port'] = '/dev/ttyUSB0'\n else:\n connect_args['port'] = 830\n else:\n connect_args['port'] = self.get_option('port')\n\n if (self.get_option('mode') == 'telnet' or\n self.get_option('mode') == 'serial'):\n if self.get_option('baud') is None:\n # Default baud if serial or telnet mode\n connect_args['baud'] = 9600\n if self.get_option('attempts') is None:\n # Default attempts if serial or telnet mode\n connect_args['attempts'] = 10\n\n connect_args['host'] = self.get_option('host')\n # connect_args['port'] = self.get_option('port')\n connect_args['user'] = self.get_option('remote_user')\n connect_args['passwd'] = self.get_option('password')\n connect_args['ssh_private_key_file'] = self.get_option('private_key_file')\n connect_args['ssh_config'] = self.get_option('pyez_ssh_config')\n connect_args['timeout'] = self.get_option('persistent_connect_timeout')\n try:\n log_connect_args = dict(connect_args)\n log_connect_args[\"passwd\"] = \"NOT_LOGGING_PARAMETER\"\n\n self.queue_message(\"vvvv\", \"Creating device parameters: %s\" % log_connect_args)\n timeout = connect_args.pop(\"timeout\")\n self.dev = jnpr.junos.device.Device(**connect_args)\n self.queue_message(\"vvvv\", \"Opening device.\")\n self.dev.open()\n self.queue_message(\"vvvv\", \"Device opened.\")\n\n self.dev.timeout = self.get_option('persistent_command_timeout')\n self.queue_message(\"vvvv\", \"Setting default device timeout to %d.\" % timeout)\n # Exceptions raised by close() or open() are all sub-classes of\n # ConnectError, so this should catch all connection-related exceptions\n # raised from PyEZ.\n except pyez_exception.ConnectError as ex:\n raise AnsibleError(\"Unable to make a PyEZ connection: %s\" % (str(ex)))",
"def __init__(self, device_configuration, command_parser):\n\n # Call the VirtualDriver constructor\n super(MXL_Balloon_Tracker,self).__init__(device_configuration, command_parser)\n\n # Initialize the driver's command handler\n self._command_handler = BalloonHandler(self)\n\n # Create the Direct Downlink APRS tracking service\n self._aprs_service = Direct_Downlink_APRS_Service('direct_downlink_aprs_service', 'tracker', device_configuration)\n\n # Setup tracker attributes\n self.last_known_location = None",
"def cmd_init(args):\n cmd_genesis(args)\n cmd_nodeinit(args)",
"def __init__(self, device_mode, loop):\n self.loop = loop\n self.device_mode = device_mode\n if self.device_mode == \"stationary\":\n self.openface = OpenFaceInstance()\n self.openface.startProcess()\n self.stationary_eye_tracker = StationaryEyeTracker()\n elif self.device_mode == \"mobile\":\n self.openpose = OpenPoseInstance()\n self.openpose.startProcess()\n self.mobile_eye_tracker = MobileEyeTracker()\n self.mobile_eye_tracker.calibrate()\n\n self.wristband = Wristband(self.loop)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Execute the benchmark and report the performance.
|
def execute(self):
print_verbose_messages = (self.verbose
and self.device.communicator.rank == 0)
# Ensure that all ops are attached (needed for is_tuning_complete).
self.run(0)
if print_verbose_messages:
print(f'Running {type(self).__name__} benchmark')
if print_verbose_messages:
print(f'.. warming up for {self.warmup_steps} steps')
self.run(self.warmup_steps)
if (isinstance(self.device, hoomd.device.GPU)
and hasattr(self.sim.operations, 'is_tuning_complete')):
while not self.sim.operations.is_tuning_complete:
if print_verbose_messages:
print('.. autotuning GPU kernel parameters for '
f'{self.warmup_steps} steps')
self.run(self.warmup_steps)
if print_verbose_messages:
print(f'.. running for {self.benchmark_steps} steps '
f'{self.repeat} time(s)')
# benchmark
performance = []
if isinstance(self.device, hoomd.device.GPU):
with self.device.enable_profiling():
for i in range(self.repeat):
self.run(self.benchmark_steps)
performance.append(self.get_performance())
if print_verbose_messages:
print(f'.. {performance[-1]} {self.units}')
else:
for i in range(self.repeat):
self.run(self.benchmark_steps)
performance.append(self.get_performance())
if print_verbose_messages:
print(f'.. {performance[-1]} {self.units}')
return performance
|
[
"def run_benchmark(self):\n return 0",
"def execute_benchmark(self, benchmark_file):\n benchmark = Benchmark(benchmark_file, self.config,\n self.config.start_time or time.localtime())\n self.check_existing_results(benchmark)\n\n self.executor.init(self.config, benchmark)\n output_handler = OutputHandler(benchmark, self.executor.get_system_info())\n\n logging.debug(\"I'm benchmarking {0} consisting of {1} run sets.\".format(\n repr(benchmark_file), len(benchmark.run_sets)))\n\n try:\n result = self.executor.execute_benchmark(benchmark, output_handler)\n finally:\n # remove useless log folder if it is empty\n try:\n os.rmdir(benchmark.log_folder)\n except:\n pass\n\n if self.config.commit and not self.stopped_by_interrupt:\n try:\n util.add_files_to_git_repository(self.config.output_path,\n output_handler.all_created_files,\n self.config.commit_message+'\\n\\n'+output_handler.description)\n except OSError as e:\n logging.warning('Could not add files to git repository: ' + str(e))\n return result",
"def report_benchmarks(self):\n if not self.options.benchmark:\n return\n\n time_elapsed = self.end_time - self.start_time\n statistics = [('seconds elapsed', time_elapsed)]\n add_statistic = statistics.append\n for statistic in (defaults.STATISTIC_NAMES + ('files',)):\n value = self.file_checker_manager.statistics[statistic]\n total_description = 'total ' + statistic + ' processed'\n add_statistic((total_description, value))\n per_second_description = statistic + ' processed per second'\n add_statistic((per_second_description, int(value / time_elapsed)))\n\n self.formatter.show_benchmarks(statistics)",
"def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n stats = bench.run()\n extras = {}\n extras['examples_per_sec'] = stats.get('images_per_sec')\n if 'last_average_loss' in stats:\n extras['last_average_loss'] = stats['last_average_loss']\n if 'top_1_accuracy' in stats:\n extras['top_1_accuracy'] = stats['top_1_accuracy']\n if 'top_5_accuracy' in stats:\n extras['top_5_accuracy'] = stats['top_5_accuracy']\n self.report_benchmark(\n iters=stats.get('num_steps'),\n wall_time=stats.get('average_wall_time'),\n extras=extras)",
"def benchmark(self):\n self.benchmark_setup()\n self.benchmark_forward()\n self.benchmark_backward()",
"def test_run_benchmark(self):\n if self.ssh is None:\n raise self.skipException(\"Booting failed\")\n ssh = self.ssh\n\n self._correct_ns_if_needed(ssh)\n\n ssh.exec_command(\"sudo apt-get update && sudo apt-get upgrade -fy\")\n ssh.exec_command(\"sudo apt-get update\")\n ssh.exec_command(\"sudo apt-get install -y make gcc\")\n ssh.exec_command(\"sudo apt-get install -y libx11-dev libgl1-mesa-dev \"\n \"libxext-dev perl perl-modules\")\n ssh.exec_command(\"wget http://byte-unixbench.googlecode.com/files\"\n \"/UnixBench5.1.3.tgz\")\n ssh.exec_command(\"tar xvf UnixBench5.1.3.tgz\")\n resp = ssh.exec_command(\"cd UnixBench && ./Run\")\n\n i = resp.find(\"---------------\")\n if i != -1:\n resp = resp[i:]\n resp = \"zone: \" + self.instance.placement + \"\\n\" + resp\n\n fail = None\n reference = self._get_benchmark_data()\n for k, v in reference.iteritems():\n i1 = resp.lower().find(k)\n if i1 == -1:\n continue\n\n k = resp[i1:i1 + len(k)]\n i2 = resp.find(\"\\n\", i1)\n outp = resp[i1 + len(k):i2].split()[:2]\n if len(outp) < 2:\n continue\n\n self.addDetail(k, test_content.text_content(\n outp[1] + \"|\" + outp[0] + \"|Min: \" + v[0] + \"|Max: \" + v[1]))\n\n if fail is None and float(outp[0]) < float(v[0]):\n fail = (outp[0], outp[1], k, v[0])\n\n if fail is not None:\n self.assertGreaterEqual(fail[0], fail[1],\n fail[2] + \": \" +\n fail[0] + \" \" + fail[1] + \" (current) < \" +\n fail[3] + \" \" + fail[1] + \" (AWS)\")",
"def main(cfg: DictConfig):\n benchmark_time(cfg)",
"def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)",
"def main(ctx: click.Context):\n click.secho(\"MySQL Benchmark\", bold=True)\n results = []\n with click.progressbar(range(ctx.obj[\"count\"])) as bar:\n for number in bar:\n response = requests.get(url=f'{ctx.obj[\"hostname\"]}/api/mysql.php')\n if response.status_code != 200:\n raise click.ClickException(\n f'{ctx.obj[\"hostname\"]}/api/mysql.php Not Found!'\n )\n\n response = requests.get(url=f'{ctx.obj[\"hostname\"]}/api/mysql.php')\n response.raise_for_status()\n results.append(\n BenchmarkResult(\n timestamp=time.time(), number=number, data=response.json()\n )\n )\n time.sleep(ctx.obj[\"sleep\"])\n\n insert_timings = get_timings(results, \"insert\")\n insert_single_transaction_timings = get_timings(\n results, \"insertSingleTransaction\"\n )\n result = {\n \"results\": results,\n \"timings\": {\n \"insert\": calculate_timing_stats(insert_timings),\n \"insert_single_transaction\": calculate_timing_stats(\n insert_single_transaction_timings\n ),\n },\n }\n table = render_table(result)\n click.echo(table)",
"def main():\n performance = []\n performance.append(tester.train_and_test(World_grid_1D))\n performance.append(tester.train_and_test(World_grid_1D_delay))\n performance.append(tester.train_and_test(World_grid_1D_ms))\n performance.append(tester.train_and_test(World_grid_1D_noise))\n performance.append(tester.train_and_test(World_grid_2D))\n performance.append(tester.train_and_test(World_grid_2D_dc,\n training_period=2e4))\n performance.append(tester.train_and_test(World_image_1D))\n performance.append(tester.train_and_test(World_image_2D,\n training_period=2e4))\n performance.append(tester.train_and_test(World_fruit))\n print('Individual benchmark scores: {0:.2}'.format(performance))\n print('Overall benchmark score: {0:.2}'.format(np.mean(performance))) \n \n # Block the program, displaying all plots.\n # When the plot windows are closed, the program closes.\n plt.show()",
"def main(*args):\n if len(args) >= 2:\n trial_id = bson.objectid.ObjectId(args[0])\n benchmark_id = bson.objectid.ObjectId(args[1])\n experiment_id = bson.objectid.ObjectId(args[2]) if len(args) >= 3 else None\n\n config = global_conf.load_global_config('config.yml')\n logging.config.dictConfig(config['logging'])\n log = logging.getLogger(__name__)\n db_client = database.client.DatabaseClient(config=config)\n\n trial_result = dh.load_object(db_client, db_client.trials_collection, trial_id)\n benchmark = dh.load_object(db_client, db_client.benchmarks_collection, benchmark_id)\n experiment = dh.load_object(db_client, db_client.experiments_collection, experiment_id)\n\n log.info(\"Benchmarking result {0} with benchmark {1}\".format(trial_id, benchmark_id))\n success = False\n retry = True\n if benchmark is not None and trial_result is not None:\n if not benchmark.is_trial_appropriate(trial_result):\n retry = False\n else:\n try:\n benchmark_result = benchmark.benchmark_results(trial_result)\n except Exception:\n benchmark_result = None\n log.error(\"Exception while benchmarking {0} with benchmark {1}:\\n{2}\".format(\n trial_id, benchmark_id, traceback.format_exc()))\n if benchmark_result is not None:\n benchmark_result_id = db_client.results_collection.insert(benchmark_result.serialize())\n log.info(\"Successfully benchmarked trial {0} with benchmark {1}, producing result {2}\".format(\n trial_id, benchmark_id, benchmark_result_id))\n if experiment is not None:\n experiment.add_benchmark_result(trial_result_id=trial_id, benchmark_id=benchmark_id,\n benchmark_result_id=benchmark_result_id, db_client=db_client)\n success = True\n if not success and experiment is not None:\n if retry:\n experiment.retry_benchmark(trial_result_id=trial_id, benchmark_id=benchmark_id, db_client=db_client)\n else:\n experiment.mark_benchmark_unsupported(trial_result_id=trial_id, benchmark_id=benchmark_id,\n db_client=db_client)",
"def run_benchmark():\n import argparse\n parser = argparse.ArgumentParser(description='Benchmark alchemically modified system against unmodified system.')\n parser.add_argument('--platform', dest='platform_name', action='store', default=None, help='platform name to benchmark (default: None)')\n options = parser.parse_args()\n\n from sams.tests import testsystems\n for testsystem_name in ['AblImatinibExplicitAlchemical']:\n cls = getattr(testsystems, testsystem_name)\n testsystem = cls()\n factory_args = { 'ligand_atoms' : testsystem.alchemical_atoms, 'receptor_atoms' : range(0,4266) }\n benchmark(testsystem.system, testsystem.positions, platform_name=options.platform_name, nsteps=5000, timestep=1.0*unit.femtoseconds, factory_args=factory_args)",
"def main():\n logging.basicConfig(level=\"INFO\")\n assert len(sys.argv) == 2, \"Exactly one positional argument (path to the raw dataset) is \"\\\n \"needed. \\n\\nE.g. `python sparsity_benchmark ~/bff_data/final_table`\"\n\n # Prepares data for the benchmark, may take a while\n data_parameters = DATA_PARAMETERS.copy()\n data_parameters[\"input_file\"] = sys.argv[1]\n data_parameters[\"preprocessed_file\"] = os.path.join(\n os.path.dirname(data_parameters[\"input_file\"]),\n \"preprocessed_dataset.pkl\"\n )\n data_preprocessor = preprocess_dataset(data_parameters=data_parameters)\n\n # Note: the features here should be in range [0, ~1.2], according to the original experiments.\n # 0 corresponds to no data, everything else is linearly scaled from dB units.\n features, _ = data_preprocessor.load_dataset()\n\n logging.info(\"Starting benchmarks\")\n noisy_features = benchmark_noise(\n features=features,\n data_parameters=data_parameters,\n experiment_parameters=EXPERIMENT_PARAMETERS\n )\n benchmark_binarization(\n noisy_features=noisy_features,\n data_parameters=data_parameters,\n experiment_parameters=EXPERIMENT_PARAMETERS\n )\n logging.info(\"Done\")",
"def test_bench():\n os.chdir(get_repo_root_path())\n\n # Newer versions of git check the ownership of directories.\n # We need to add an exception for /workdir which is shared, so that\n # the git commands don't fail.\n config_cmd = \"git config --global --add safe.directory /workdir\"\n subprocess.run(config_cmd, shell=True, check=True)\n\n # Get numbers for current HEAD.\n return_code, stdout, stderr = _run_cargo_bench(PR_BENCH_RESULTS_FILE)\n # Even if it is the first time this test is run, the benchmark tests should\n # pass. For this purpose, we need to explicitly check the return code.\n assert return_code == 0, \"stdout: {}\\n stderr: {}\".format(stdout, stderr)\n\n # Get numbers from upstream tip, without the changes from the current PR.\n _git_checkout_upstream_branch()\n return_code, stdout, stderr = _run_cargo_bench(UPSTREAM_BENCH_RESULTS_FILE)\n\n # Before checking any results, let's just go back to the PR branch.\n # This way we make sure that the cleanup always happens even if the test\n # fails.\n _git_checkout_pr_branch()\n\n if return_code == 0:\n # In case this benchmark also ran successfully, we can call critcmp and\n # compare the results.\n _run_critcmp()\n else:\n # The benchmark did not run successfully, but it might be that it is\n # because a benchmark does not exist. In this case, we do not want to\n # fail the test.\n if \"error: no bench target named `main`\" in stderr:\n # This is a bit of a &*%^ way of checking if the benchmark does not\n # exist. Hopefully it will be possible to check it in another way\n # ...soon\n print(\"There are no benchmarks in main. No comparison can happen.\")\n else:\n assert return_code == 0, \"stdout: {}\\n stderr: {}\".format(stdout, stderr)",
"def Run(benchmark_spec):\n spark_cluster = benchmark_spec.spark_service\n jar_start = datetime.datetime.now()\n\n stdout_path = None\n results = []\n jarfile = (FLAGS.spark_jarfile or\n spark_cluster.GetExampleJar(spark_service.SPARK_JOB_TYPE))\n try:\n if FLAGS.spark_print_stdout:\n # We need to get a name for a temporary file, so we create\n # a file, then close it, and use that path name.\n stdout_file = tempfile.NamedTemporaryFile(suffix='.stdout',\n prefix='spark_benchmark',\n delete=False)\n stdout_path = stdout_file.name\n stdout_file.close()\n\n stats = spark_cluster.SubmitJob(jarfile,\n FLAGS.spark_classname,\n job_arguments=FLAGS.spark_job_arguments,\n job_stdout_file=stdout_path,\n job_type=FLAGS.spark_job_type)\n if not stats[spark_service.SUCCESS]:\n raise Exception('Class {0} from jar {1} did not run'.format(\n FLAGS.spark_classname, jarfile))\n jar_end = datetime.datetime.now()\n if stdout_path:\n with open(stdout_path, 'r') as f:\n logging.info('The output of the job is ' + f.read())\n metadata = spark_cluster.GetMetadata()\n metadata.update({'jarfile': jarfile,\n 'class': FLAGS.spark_classname,\n 'job_arguments': str(FLAGS.spark_job_arguments),\n 'print_stdout': str(FLAGS.spark_print_stdout)})\n\n results.append(sample.Sample('wall_time',\n (jar_end - jar_start).total_seconds(),\n 'seconds', metadata))\n if spark_service.RUNTIME in stats:\n results.append(sample.Sample('runtime',\n stats[spark_service.RUNTIME],\n 'seconds', metadata))\n if spark_service.WAITING in stats:\n results.append(sample.Sample('pending_time',\n stats[spark_service.WAITING],\n 'seconds', metadata))\n\n if not spark_cluster.user_managed:\n create_time = (spark_cluster.resource_ready_time -\n spark_cluster.create_start_time)\n results.append(sample.Sample('cluster_create_time', create_time,\n 'seconds', metadata))\n finally:\n if stdout_path and os.path.isfile(stdout_path):\n os.remove(stdout_path)\n\n return results",
"def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret",
"def run(self):\n start = time.time()\n self.write_to_status(self.status_fields)\n count = range(self.count)\n \n # parallelize using multithreading\n with concurrent.futures.ThreadPoolExecutor() as executor:\n future_runs = {executor.submit(self.run_single_test): num for num in count}\n for future in concurrent.futures.as_completed(future_runs):\n pass\n \n end = time.time()\n print(\"completed in {}.\".format(end - start))",
"def RunBenchmark(path_to_apk, run_label):\n # `path_to_apk` is similar to `./out/59.0.3071.132_arm_MonochromeStable.apk`\n chrome_version = ChromeVersion(path_to_apk.split('/')[-1].split('_')[0])\n subprocess.call(['adb', 'install', '-r', '-d', path_to_apk])\n subprocess.call([os.path.join(utils.CHROMIUM_SRC, 'tools',\n 'perf', 'run_benchmark'),\n '--browser=android-system-chrome',\n '--pageset-repeat=1', # could remove this later\n '--results-label=%s' % str(chrome_version),\n # TODO(wangge):not sure if we should run in compatibility\n # mode even for the later version, probably add a check in\n # caller to determine if we should run it in compatibility\n # mode and add an argument `run_in_compatibility_mode` to\n # the `RunBenchmark` function\n '--compatibility-mode=no-field-trials',\n '--compatibility-mode=ignore-certificate-errors',\n '--compatibility-mode=legacy-command-line-path',\n '--compatibility-mode=gpu-benchmarking-fallbacks',\n '--story-filter=wikipedia', # could remove this\n # thinking of adding an argument to the tool to set this\n '--output-dir=%s' % os.path.join(\n utils.APP_ROOT, 'results', run_label,\n str(chrome_version.milestone)),\n # thinking of adding an argument to the tool to set this too\n 'system_health.memory_mobile'])",
"def run(self):\n self.speed_test.start()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make an ArgumentParser instance for benchmark options.
|
def make_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--device',
type=str,
choices=['CPU', 'GPU'],
help='Execution device.',
required=True)
parser.add_argument('-N',
type=int,
default=DEFAULT_N,
help='Number of particles.')
parser.add_argument('--rho',
type=float,
default=DEFAULT_RHO,
help='Number density.')
parser.add_argument('--dimensions',
type=int,
choices=[2, 3],
help='Number of dimensions.',
default=DEFAULT_DIMENSIONS)
parser.add_argument('--warmup_steps',
type=int,
default=DEFAULT_WARMUP_STEPS,
help='Number of timesteps to run before timing.')
parser.add_argument('--benchmark_steps',
type=int,
default=DEFAULT_BENCHMARK_STEPS,
help='Number of timesteps to run in the benchmark.')
parser.add_argument('--repeat',
type=int,
default=DEFAULT_REPEAT,
help='Number of times to repeat the run.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Verbose output.')
return parser
|
[
"def make_argument_parser():\n parser = Benchmark.make_argument_parser()\n parser.add_argument('--skip-reference',\n action='store_true',\n help='Skip the reference simulation run.')\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n # parser.add_argument(\n # '-s', '--scrpfrom', help='url you will be scraping')\n parser.add_argument('url', help='url')\n\n return parser",
"def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main",
"def set_options():\n parser = argparse.ArgumentParser(description='test hexrd.quadrature')\n\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser",
"def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')",
"def setup_options_parser(self, argparser):\n pass",
"def parser(cls, *, with_showtb=False):\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='produce more output')\n parser.add_argument('-q', '--quiet', action='count', default=0,\n help='produce less output')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n default=False, help='do not actually make changes')\n\n if with_showtb:\n parser.add_argument('--traceback', action='store_true',\n default=False, help='do not hide tracebacks')\n\n return parser",
"def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"--gen-answer-file\", help=\"Generate a template of an answer file, using this option excludes all other option\")\n parser.add_option(\"--answer-file\", help=\"Runs the configuration in none-interactive mode, extracting all information from the \\\n configuration file. using this option excludes all other option\")\n parser.add_option(\"--no-mem-check\", help=\"Disable minimum memory check\", action=\"store_true\", default=False)\n\n # For each group, create a group option\n for group in controller.getAllGroups():\n groupParser = OptionGroup(parser, group.getKey(\"DESCRIPTION\"))\n\n for param in group.getAllParams():\n cmdOption = param.getKey(\"CMD_OPTION\")\n paramUsage = param.getKey(\"USAGE\")\n optionsList = param.getKey(\"OPTION_LIST\")\n useDefault = param.getKey(\"USE_DEFAULT\")\n if not useDefault:\n if optionsList:\n groupParser.add_option(\"--%s\" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)\n else:\n groupParser.add_option(\"--%s\" % cmdOption, help=paramUsage)\n\n # Add group parser to main parser\n parser.add_option_group(groupParser)\n\n return parser",
"def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser",
"def parseOptions():\n \n parser=op.OptionParser(usage=\"Usage: %prog [options]\"\n ,version=\"%prog 1.0\",description=r\"Randomly generates the content of a text file.\")\n \n #add options\n addParserOptions(parser)\n \n #parse command line options\n return parser.parse_args()",
"def initCmdLineParser():\n\n # Init parser and all general flags\n usage = \"usage: %prog [options] [--help]\"\n parser = OptionParser(usage=usage, version=\"0.1\")\n\n parser.add_option(\"-d\", \"--daemon\", action=\"store_true\", default=False, help=\"daemon mode\")\n parser.add_option(\"-c\", \"--config\", help=\"install config file\", default = 'test.conf')\n parser.add_option(\"-D\", \"--debug\", action=\"store_true\", help=\"debug mode\", default = False)\n\n parser.add_option(\"-a\", \"--add\", action=\"store_true\", help=\"add node to cluster\", default = False)\n parser.add_option(\"-p\", \"--port\", help= \"http server port\", default = '8999')\n\n\n return parser",
"def build_parser():\n parser = argparse.ArgumentParser(description='The classic FizzBuzz game in programmatic form.', add_help=False)\n parser.add_argument('-h', '--help', default=argparse.SUPPRESS, action='help',\n help='Show this help message and exit.')\n parser.add_argument('-s', '--start', default=1, type=int, action='store', metavar='START',\n help='The number to start FizzBuzzing at (inclusive).')\n parser.add_argument('stop', type=int, action='store', metavar='STOP',\n help='The number to end FizzBuzzing at (exclusive).')\n return parser",
"def mujoco_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(1e6))\n parser.add_argument('--play', default=False, action='store_true')\n return parser",
"def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser",
"def prepare_optparser ():\n usage = \"usage: %prog <-f bamfilename -d fileOut>[options]\"\n description = \"-f is bamfilename, -d is for result output.\"\n optparser = OptionParser(version=\"%prog\",description=description,usage=usage,add_help_option=False)\n optparser.add_option(\"-h\",\"--help\",action=\"help\",help=\"Show this help message and exit.\")\n optparser.add_option(\"-f\",\"--bamfilename\",dest=\"bamfilename\",type=\"string\",\n help=\"wt file.\")\n optparser.add_option(\"-d\",\"--outdir\",dest=\"outdir\",type=\"string\",\n help=\"mut file.\") \n return optparser",
"def parse_options() -> Namespace:\n\n opt_parser = OptionParser(\n \"liftoff\",\n [\n \"script\",\n \"config_path\",\n \"procs_no\",\n \"gpus\",\n \"per_gpu\",\n \"no_detach\",\n \"verbose\",\n \"copy_to_clipboard\",\n \"time_limit\", # This should be removed in favour of start_by\n \"start_by\",\n \"end_by\",\n \"optimize\",\n \"args\",\n \"filters\",\n \"results_path\",\n \"name\",\n \"max_runs\",\n \"shuffle\",\n ],\n )\n return opt_parser.parse_args()",
"def get_base_argument_parser(\n **kwargs\n) -> ArgumentParser:\n\n parser = ArgumentParser(\n allow_abbrev=False,\n add_help=False,\n **kwargs\n )\n\n parser.add_argument(\n '--help',\n action='store_true',\n help='Pass this flag to print usage and argument descriptions.'\n )\n\n parser.add_argument(\n '--log',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='Logging level.'\n )\n\n return parser",
"def atari_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(10e6))\n return parser"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make an ArgumentParser instance for comparative benchmark options.
|
def make_argument_parser():
parser = Benchmark.make_argument_parser()
parser.add_argument('--skip-reference',
action='store_true',
help='Skip the reference simulation run.')
return parser
|
[
"def make_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device',\n type=str,\n choices=['CPU', 'GPU'],\n help='Execution device.',\n required=True)\n parser.add_argument('-N',\n type=int,\n default=DEFAULT_N,\n help='Number of particles.')\n parser.add_argument('--rho',\n type=float,\n default=DEFAULT_RHO,\n help='Number density.')\n parser.add_argument('--dimensions',\n type=int,\n choices=[2, 3],\n help='Number of dimensions.',\n default=DEFAULT_DIMENSIONS)\n parser.add_argument('--warmup_steps',\n type=int,\n default=DEFAULT_WARMUP_STEPS,\n help='Number of timesteps to run before timing.')\n parser.add_argument('--benchmark_steps',\n type=int,\n default=DEFAULT_BENCHMARK_STEPS,\n help='Number of timesteps to run in the benchmark.')\n parser.add_argument('--repeat',\n type=int,\n default=DEFAULT_REPEAT,\n help='Number of times to repeat the run.')\n parser.add_argument('-v',\n '--verbose',\n action='store_true',\n help='Verbose output.')\n return parser",
"def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main",
"def create_parser():\n parser = argparse.ArgumentParser()\n # parser.add_argument(\n # '-s', '--scrpfrom', help='url you will be scraping')\n parser.add_argument('url', help='url')\n\n return parser",
"def set_options():\n parser = argparse.ArgumentParser(description='test hexrd.quadrature')\n\n return parser",
"def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser",
"def parser(cls, *, with_showtb=False):\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='produce more output')\n parser.add_argument('-q', '--quiet', action='count', default=0,\n help='produce less output')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n default=False, help='do not actually make changes')\n\n if with_showtb:\n parser.add_argument('--traceback', action='store_true',\n default=False, help='do not hide tracebacks')\n\n return parser",
"def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"--gen-answer-file\", help=\"Generate a template of an answer file, using this option excludes all other option\")\n parser.add_option(\"--answer-file\", help=\"Runs the configuration in none-interactive mode, extracting all information from the \\\n configuration file. using this option excludes all other option\")\n parser.add_option(\"--no-mem-check\", help=\"Disable minimum memory check\", action=\"store_true\", default=False)\n\n # For each group, create a group option\n for group in controller.getAllGroups():\n groupParser = OptionGroup(parser, group.getKey(\"DESCRIPTION\"))\n\n for param in group.getAllParams():\n cmdOption = param.getKey(\"CMD_OPTION\")\n paramUsage = param.getKey(\"USAGE\")\n optionsList = param.getKey(\"OPTION_LIST\")\n useDefault = param.getKey(\"USE_DEFAULT\")\n if not useDefault:\n if optionsList:\n groupParser.add_option(\"--%s\" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)\n else:\n groupParser.add_option(\"--%s\" % cmdOption, help=paramUsage)\n\n # Add group parser to main parser\n parser.add_option_group(groupParser)\n\n return parser",
"def build_parser():\r\n parser = ArgumentParser()\r\n \r\n parser.add_argument('algorithm_name', type=str,\r\n help='Name of algorithm to look up input file.', metavar='D')\r\n \r\n return parser",
"def setup_options_parser(self, argparser):\n pass",
"def setParser():\n parser = argparse.ArgumentParser(\n prog=\"Nussinov Algorithm Solver\",\n description=\"A program that runs Nussinov's Algorithm on a given RNA strand and returns the most viable pairings.\"\n )\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-f\", \"--filepath\", help=\"the path to a text file with a sequence\")\n group.add_argument(\"-s\", \"--sequence\", help=\"the RNA sequence to evaluate\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"More verbose output\")\n parser.add_argument(\"-u\", \"--uncommon\", action=\"store_true\", help=\"Use Uncommon RNA matches (G,U)\")\n return parser",
"def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser",
"def parseOptions():\n \n parser=op.OptionParser(usage=\"Usage: %prog [options]\"\n ,version=\"%prog 1.0\",description=r\"Randomly generates the content of a text file.\")\n \n #add options\n addParserOptions(parser)\n \n #parse command line options\n return parser.parse_args()",
"def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser",
"def mujoco_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(1e6))\n parser.add_argument('--play', default=False, action='store_true')\n return parser",
"def atari_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(10e6))\n return parser",
"def prepare_optparser ():\n usage = \"usage: %prog <-f bamfilename -d fileOut>[options]\"\n description = \"-f is bamfilename, -d is for result output.\"\n optparser = OptionParser(version=\"%prog\",description=description,usage=usage,add_help_option=False)\n optparser.add_option(\"-h\",\"--help\",action=\"help\",help=\"Show this help message and exit.\")\n optparser.add_option(\"-f\",\"--bamfilename\",dest=\"bamfilename\",type=\"string\",\n help=\"wt file.\")\n optparser.add_option(\"-d\",\"--outdir\",dest=\"outdir\",type=\"string\",\n help=\"mut file.\") \n return optparser",
"def get_base_argument_parser(\n **kwargs\n) -> ArgumentParser:\n\n parser = ArgumentParser(\n allow_abbrev=False,\n add_help=False,\n **kwargs\n )\n\n parser.add_argument(\n '--help',\n action='store_true',\n help='Pass this flag to print usage and argument descriptions.'\n )\n\n parser.add_argument(\n '--log',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n help='Logging level.'\n )\n\n return parser",
"def setup_argument_parser():\n arg_parser = argparse.ArgumentParser(prog='src')\n # User needs to specify the URL from which to begin the search\n arg_parser.add_argument('url',\n help='URL to start from')\n # User may provide a limit on how many URLs to see\n arg_parser.add_argument('-l', '--limit',\n dest='limit',\n type=int,\n help='number of URLs to display, 100 by default',\n default=100)\n # User may provide a list of user agents to cycle through\n arg_parser.add_argument('-ua' '--user-agents',\n dest='agents',\n nargs='*',\n help='user agent(s) to use for requests',\n default=[])\n return arg_parser"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Applies default secthresh & exclusion radius constraints
|
def apply_default_constraints(self):
try:
self.apply_secthresh(pipeline_weaksec(self.koi))
except NoWeakSecondaryError:
logging.warning('No secondary eclipse threshold set for {}'.format(self.koi))
self.set_maxrad(default_r_exclusion(self.koi))
|
[
"def apply_constraints(self):\n pass",
"def constraints(self):",
"def applyConstraints(self):\n pass",
"def _setSimplexWithinRangeBoundary(self, radius=None):\n x0 = self.population[0]\n #code modified from park-1.2/park/simplex.py (version 1257)\n if self._useStrictRange:\n x0 = self._clipGuessWithinRangeBoundary(x0)\n\n if radius is None: radius = 0.05 # nonzdelt=0.05 from scipy-0.9\n val = x0*(1+radius)\n val[val==0] = (radius**2) * 0.1 # zdelt=0.00025 update from scipy-0.9\n if not self._useStrictRange:\n self.population[0] = x0\n return val\n\n lo = self._strictMin\n hi = self._strictMax\n radius = clip(radius,0,0.5)\n # rescale val by bounded range...\n # (increases fit for tight bounds; makes worse[?] for large bounds)\n bounded = ~numpy.isinf(lo) & ~numpy.isinf(hi)\n val[bounded] = x0[bounded] + (hi[bounded]-lo[bounded])*radius\n # crop val at bounds\n settings = numpy.seterr(all='ignore')\n val[val<lo] = lo[val<lo]\n val[val>hi] = hi[val>hi]\n numpy.seterr(**settings)\n # handle collisions (when val[i] == x0[i])\n collision = val==x0\n if numpy.any(collision):\n rval = x0*(1-radius)\n rval[rval==0] = -radius\n rval[bounded] = x0[bounded] - (hi[bounded]-lo[bounded])*radius\n val[collision] = rval[collision]\n # make tolerance relative for bounded parameters\n # tol = numpy.ones(x0.shape)*xtol\n # tol[bounded] = (hi[bounded]-lo[bounded])*xtol\n # xtol = tol\n self.population[0] = x0\n return val",
"def checkStaticConstraints(self):\r\n constraintList = []\r\n stepsize = 0.01\r\n tolerance = 0\r\n \"\"\"discretise area constraints into points relative to starting area origin\"\"\"\r\n if self.worldModel.currentArea.posRelToTopNode[0] < self.worldModel.targetArea.posRelToTopNode[0]:\r\n x = -self.worldModel.currentArea.xDim / 2 +tolerance\r\n y = -self.worldModel.currentArea.yDim / 2 +tolerance\r\n while y <= self.worldModel.currentArea.yDim/2-tolerance:\r\n point = [-self.worldModel.currentArea.xDim/2+tolerance, y]\r\n constraintList.append(point)\r\n y += stepsize\r\n while x <= self.worldModel.currentArea.xDim/2+self.worldModel.targetArea.xDim-tolerance:\r\n point = [x, self.worldModel.currentArea.yDim/2-tolerance]\r\n constraintList.append(point)\r\n x += stepsize\r\n while y >= -self.worldModel.currentArea.yDim/2+tolerance:\r\n point = [self.worldModel.currentArea.xDim/2+self.worldModel.targetArea.xDim-tolerance, y]\r\n constraintList.append(point)\r\n y -= stepsize\r\n while x >= -self.worldModel.currentArea.xDim/2+tolerance:\r\n point = [x, -self.worldModel.currentArea.yDim/2+tolerance]\r\n constraintList.append(point)\r\n x -= stepsize\r\n if self.worldModel.currentArea.posRelToTopNode[0] > self.worldModel.targetArea.posRelToTopNode[0]:\r\n x = self.worldModel.currentArea.xDim / 2 - tolerance\r\n y = -self.worldModel.currentArea.yDim / 2 +tolerance\r\n while y <= self.worldModel.currentArea.yDim/2-tolerance:\r\n point = [self.worldModel.currentArea.xDim/2-tolerance, y]\r\n constraintList.append(point)\r\n y += stepsize\r\n while x >= -self.worldModel.currentArea.xDim/2-self.worldModel.targetArea.xDim+tolerance:\r\n point = [x, self.worldModel.currentArea.yDim/2-tolerance]\r\n constraintList.append(point)\r\n x -= stepsize\r\n while y >= -self.worldModel.currentArea.yDim/2+tolerance:\r\n point = [-self.worldModel.currentArea.xDim/2-self.worldModel.targetArea.xDim+tolerance, y]\r\n constraintList.append(point)\r\n y -= stepsize\r\n while x <= self.worldModel.currentArea.xDim/2-tolerance:\r\n point = [x, -self.worldModel.currentArea.yDim/2+tolerance]\r\n constraintList.append(point)\r\n x += stepsize\r\n if self.worldModel.currentArea.posRelToTopNode[1] < self.worldModel.targetArea.posRelToTopNode[1]:\r\n x = -self.worldModel.currentArea.xDim / 2 +tolerance\r\n y = -self.worldModel.currentArea.yDim / 2 +tolerance\r\n while y <= self.worldModel.currentArea.yDim/2+self.worldModel.targetArea.yDim-tolerance:\r\n point = [-self.worldModel.currentArea.xDim/2+tolerance, y]\r\n constraintList.append(point)\r\n y += stepsize\r\n while x <= self.worldModel.currentArea.xDim/2-tolerance:\r\n point = [x, self.worldModel.currentArea.yDim/2+self.worldModel.targetArea.yDim-tolerance]\r\n constraintList.append(point)\r\n x += stepsize\r\n while y >= -self.worldModel.currentArea.yDim/2+tolerance:\r\n point = [self.worldModel.currentArea.xDim/2-tolerance, y]\r\n constraintList.append(point)\r\n y -= stepsize\r\n while x >= -self.worldModel.currentArea.xDim/2+tolerance:\r\n point = [x, -self.worldModel.currentArea.yDim/2+tolerance]\r\n constraintList.append(point)\r\n x -= stepsize\r\n if self.worldModel.currentArea.posRelToTopNode[1] > self.worldModel.targetArea.posRelToTopNode[1]:\r\n x = -self.worldModel.currentArea.xDim / 2 +tolerance\r\n y = self.worldModel.currentArea.yDim / 2 -tolerance\r\n while y >= -self.worldModel.currentArea.yDim/2-self.worldModel.targetArea.yDim+tolerance:\r\n point = [-self.worldModel.currentArea.xDim/2+tolerance, y]\r\n constraintList.append(point)\r\n y -= stepsize\r\n while x <= self.worldModel.currentArea.xDim/2-tolerance:\r\n point = [x, -self.worldModel.currentArea.yDim/2-self.worldModel.targetArea.yDim+tolerance]\r\n constraintList.append(point)\r\n x += stepsize\r\n while y <= self.worldModel.currentArea.yDim/2-tolerance:\r\n point = [self.worldModel.currentArea.xDim/2-tolerance, y]\r\n constraintList.append(point)\r\n y += stepsize\r\n while x <= -self.worldModel.currentArea.xDim/2+tolerance:\r\n point = [x, self.worldModel.currentArea.yDim/2-tolerance]\r\n constraintList.append(point)\r\n x += stepsize\r\n\r\n for area in self.worldModel.taskAreas:\r\n for object in self.world.listOfObjects:\r\n leftAreaBoundary = area.posRelToTopNode[0] - area.xDim / 2\r\n rightAreaBoundary = area.posRelToTopNode[0] + area.xDim / 2\r\n bottomAreaBoundary = area.posRelToTopNode[1] - area.yDim / 2\r\n topAreaBoundary = area.posRelToTopNode[1] + area.yDim / 2\r\n if (object.actualPosition[0] >= leftAreaBoundary\r\n and object.actualPosition[0] <= rightAreaBoundary\r\n and object.actualPosition[1] >= bottomAreaBoundary\r\n and object.actualPosition[1] <= topAreaBoundary):\r\n angleIncrement = stepsize/object.radius\r\n objectOrigin = [0,0]\r\n objectOrigin[0] = object.actualPosition[0] - self.worldModel.taskAreas[0].posRelToTopNode[0]\r\n objectOrigin[1] = object.actualPosition[1] - self.worldModel.taskAreas[0].posRelToTopNode[1]\r\n angle = 0\r\n while angle < 2*math.pi:\r\n point = [objectOrigin[0]+(object.radius+tolerance)*math.cos(angle), objectOrigin[1]+(object.radius+tolerance)\r\n *math.sin(angle)]\r\n constraintList.append(point)\r\n angle += angleIncrement\r\n\r\n self.worldModel.staticConstraints = constraintList",
"def _on_configure(self, event):\n self.radius = (min(event.width, event.height) - 2 * self.circ_pad) / 2",
"def _update_inhibition_radius(self):\r\n\t\t\r\n\t\tself.inhibition_radius = max(bn.nansum(self.syn_dist * self.syn_c) /\r\n\t\t\tmax(bn.nansum(self.syn_c), 1), 1)",
"def set_circular(self, radius):\n self.apoapsis = radius\n self.periapsis = radius\n self.eccentricity = 0.0\n return self",
"def get_receptive_field_radius(self):\n raise NotImplementedError()",
"def WildcardIndustryConstraint(exclude, lower_limit, upper_limit, relative, classification, hard):\n\tpass",
"def WildcardStyleConstraint(exclude=None, lower_limit=None, upper_limit=None, relative=False, hard=True):",
"def get_bounds(self, nonconvex_bound):\n pass",
"def check_overlapping(self, fit_radius=True, merge=True, mindist='auto', update_geometry=False):\n\n from scipy.spatial.distance import cdist\n from scipy.spatial import cKDTree\n # index = list(self.graph)[:]\n # centers = np.array(list(zip(*nx.get_node_attributes(self.graph,'center').values()))).T\n # pores_radii = np.fromiter(nx.get_node_attributes(self.graph,'radius').values(),dtype=np.float)\n\n pores_radii = list(nx.get_node_attributes(\n self.graph, 'radius').items())\n # we begin by the bigger pores\n pores_radii.sort(key=lambda tup: tup[1], reverse=True)\n index, pores_radii = zip(*pores_radii)\n pores_radii = np.array(pores_radii)\n\n centers = nx.get_node_attributes(self.graph, 'center')\n centers = [np.array(centers[i]) for i in index]\n centers = np.array(centers)\n # distances = cdist(centers,centers)\n kdtree = cKDTree(centers)\n\n stop = False\n\n while not stop:\n\n stop = True\n\n for i, n1 in enumerate(index):\n\n #distances = cdist(centers,[self.graph.nodes[n1]['center']])[:,0]\n\n if self.graph.has_node(n1):\n\n if mindist == 'auto':\n gap = self.graph.nodes[n1]['radius']*0.02\n else:\n gap = mindist\n\n labels = kdtree.query_ball_point(\n self.graph.nodes[n1]['center'], 2.5*self.graph.nodes[n1]['radius'])\n labels.remove(i)\n # distances,labels = kdtree.query(x=net.graph.nodes[n1]['center'],2*self.graph.nodes[n1]['radius'],n_jobs=1)\n # labels.remove(i)\n #distance *= 0.998\n distances = cdist(centers[labels], [self.graph.nodes[n1]['center']])[\n :, 0]*0.998\n d = distances - pores_radii[labels]\n d -= self.graph.nodes[n1]['radius']\n # On commence par la distance la plus faible\n d_and_labels = [(d[j], k) for j, k in enumerate(labels)]\n d_and_labels.sort(key=lambda t: t[0])\n\n for (dist, ind) in d_and_labels:\n\n n2 = index[ind]\n if self.graph.has_node(n2) and self.graph.has_node(n1):\n\n # Le centre du pore né est dans la sphère du pore n1 OU il y a overlapping et fit_radius == False\n # -> Merging ou suppression du pore de plus petit rayon\n if (dist + self.graph.nodes[n2]['radius'] <= gap) or (dist < gap and dist + self.graph.nodes[n2]['radius'] > gap and not fit_radius):\n\n if (self.graph.nodes[n1]['radius'] >= self.graph.nodes[n2]['radius']):\n if merge:\n self.merge_pores(n1, n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: merging (deleting\", n2, \")\")\n else:\n self.remove_pore(n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n\n else:\n if merge:\n self.merge_pores(n2, n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: merging (deleting\", n1, \")\")\n else:\n self.remove_pore(n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n # On termine l'itération car le pore n1 n'existe plus...\n break\n\n # Overlapping et fit_radius == True\n # 3 options:\n # -Le rayon du pore le plus petit est modifié\n # -Merging\n # -Suppression\n elif dist < gap and dist + self.graph.nodes[n2]['radius'] > gap and fit_radius:\n if (self.graph.nodes[n1]['radius'] >= self.graph.nodes[n2]['radius']):\n r = dist + \\\n self.graph.nodes[n2]['radius'] - \\\n self.graph.nodes[n1]['radius'] - gap\n if self.graph.nodes[n2]['radius'] >= r and r > 0:\n self.graph.nodes[n2]['radius'] = r\n pores_radii[ind] = r\n print(\n \"pore\", n1, \"and\", n2, \"overlap: changin radius of\", n2, \"to\", r)\n else:\n if merge:\n self.merge_pores(n1, n2)\n print(\n \"pore\", n1, \"and\", n2, \"overlap: merging (deleting\", n2, \")\")\n else:\n self.remove_pore(n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n else:\n if self.graph.nodes[n1]['radius'] >= dist:\n self.graph.nodes[n1]['radius'] = dist\n pores_radii[i] = dist\n print(\n \"pore\", n1, \"and\", n2, \"overlap: changin radius of\", n1, \"to\", dist)\n else:\n if merge:\n self.merge_pores(n2, n1)\n print(\n \"pore\", n1, \"and\", n2, \"overlap: merging (deleting\", n1, \")\")\n else:\n self.remove_pore(n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n1)\n # On termine l'itération car le pore n1 n'existe plus...\n break\n\n if update_geometry:\n self.set_auto_throats_length()\n self.set_auto_throats_radius()",
"def add_corridor_constraint(self,seg,r,weight=1.0):\n\n constraint_type = \"cylinder\"\n params = dict()\n params['x1'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg],\n self.qr_polytraj.waypoints['y'][0,seg],\n self.qr_polytraj.waypoints['z'][0,seg]])\n params['x2'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg+1],\n self.qr_polytraj.waypoints['y'][0,seg+1],\n self.qr_polytraj.waypoints['z'][0,seg+1]])\n params['der'] = 0\n params['l'] = r # Give the same radius buffer on the end caps\n params['r'] = r\n params['weight'] = weight\n params['keep_out'] = False\n params['active_seg'] = seg\n\n\n self.qr_polytraj.add_constraint(constraint_type,params,dynamic_weighting=False,sum_func=False)",
"def update_circumference_coordinates(self):\n self.top = abs(self.y) - self.radius\n self.right = abs(self.x) + self.radius\n self.bottom = abs(self.y) + self.radius\n self.left = abs(self.x) - self.radius",
"def exclude(self, radius):\n mat = distance.cdist(self.selected.tail(1).values,\n self.X.values)\n idxs = np.flatnonzero(mat <= radius)\n for idx in sorted(idxs, reverse=True):\n self.drop_index(idx)",
"def __init__(self,r):\n self.radius = r\n self.uc_centered_a = r\n self.uc_centered_b = r*np.sqrt(3.0)",
"def define_potential(self) -> hoomd.md.pair.pair:\n self.potential_args.setdefault('r_cut', 2.5)\n potential = self.potential(\n **self.potential_args,\n nlist=hoomd.md.nlist.cell()\n )\n for i, j in combinations_with_replacement(self._radii.keys(), 2):\n potential.pair_coeff.set(i, j, epsilon=1, sigma=self._radii[i] + self._radii[j])\n return potential",
"def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns true if provenance of property is SPE or AST
|
def use_property(kepid, prop):
try:
prov = kicu.DATA.ix[kepid, '{}_prov'.format(prop)]
return any([prov.startswith(s) for s in ['SPE', 'AST']])
except KeyError:
raise MissingStellarError('{} not in stellar table?'.format(kepid))
|
[
"def isprop(v):\n return isinstance(v, property)",
"def isProp(node):\r\n #TODO: efficiency\r\n return isinstance(node,PropNode)",
"def _is_property(cls, member):\n return isinstance(member, property)",
"def hasProperty(self, p_str): # real signature unknown; restored from __doc__\n return False",
"def isproperty(object):\n return isinstance(object, property)",
"def hasProperty(self, property):\n return property in self.metadataByProperty.keys()",
"def is_properties(self): # tag matches properties tag\n return self.tag == tagset.properties.tag \\\n or self.tag == 'campaignProperties'",
"def is_property_in_source_frame(source_frame, property):\n prop = re.split('\\.', property)[0]\n\n if prop not in source_frame:\n return False\n inner_element = source_frame[prop]\n # either we've reached the end or we need to keep searching\n if len(re.split('\\.', property)) == 1:\n return True\n rem_prop = '.'.join(re.split('\\.', property)[1:])\n if type(inner_element) == dict:\n return ResultExtractors.is_property_in_source_frame(inner_element, rem_prop)\n elif type(inner_element) == list:\n for element in inner_element:\n if type(element) == dict and ResultExtractors.is_property_in_source_frame(element, rem_prop):\n return True\n return False",
"def is_simple(self):\n return self.propertyValueType.lower() in ('float', 'double',\n 'int', 'integer',\n 'string')",
"def isSemantics(self):\n return _libsbml.ASTNode_isSemantics(self)",
"def is_psionic(self) -> bool:\n return ATTRIBUTE.Psionic.value in self.type_data.attributes",
"def should_run(self):\n return any(self.property_manager.properties)",
"def is_prop_symbol(s):\n return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'",
"def can_prove(self, target):\n return self.prop == target.prop and set(self.hyps).issubset(set(target.hyps))",
"def isProcedural(self):\n \n pass",
"def check_if_dp_field_is_property_field(self, obj, field):\n if obj._fields.has_key(field):\n if obj._fields[field]:\n if obj._fields[field].column is not None:\n if isinstance(obj._fields[field].column._multi, str):\n if obj._fields[field].column._multi == 'properties':\n return True\n return False",
"def has_property(cls, key):\n return key in cls.get_property_list()",
"def _is_splice(self):\n if self.snpeff_terms:\n for entry in self.snpeff_terms:\n if 'splice_region_variant' in entry:\n return True\n return False",
"def check_thm_type(self):\n for t in list(self.hyps) + [self.prop]:\n if t.checked_get_type() != BoolType:\n raise term.TypeCheckException('expect boolean type for propositions')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns star config object for given KOI
|
def star_config(koi, bands=['g','r','i','z','J','H','K'],
unc=dict(g=0.05, r=0.05, i=0.05, z=0.05,
J=0.02, H=0.02, K=0.02), **kwargs):
folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))
if not os.path.exists(folder):
os.makedirs(folder)
config = ConfigObj(os.path.join(folder,'star.ini'))
koi = ku.koiname(koi)
maxAV = koi_maxAV(koi)
config['maxAV'] = maxAV
mags = ku.KICmags(koi)
for band in bands:
if not np.isnan(mags[band]):
config[band] = (mags[band], unc[band])
config['Kepler'] = mags['Kepler']
kepid = KOIDATA.ix[koi,'kepid']
if use_property(kepid, 'teff'):
teff, e_teff = (kicu.DATA.ix[kepid, 'teff'],
kicu.DATA.ix[kepid, 'teff_err1'])
if not any(np.isnan([teff, e_teff])):
config['Teff'] = (teff, e_teff)
if use_property(kepid, 'logg'):
logg, e_logg = (kicu.DATA.ix[kepid, 'logg'],
kicu.DATA.ix[kepid, 'logg_err1'])
if not any(np.isnan([logg, e_logg])):
config['logg'] = (logg, e_logg)
if use_property(kepid, 'feh'):
feh, e_feh = (kicu.DATA.ix[kepid, 'feh'],
kicu.DATA.ix[kepid, 'feh_err1'])
if not any(np.isnan([feh, e_feh])):
config['feh'] = (feh, e_feh)
for kw,val in kwargs.items():
config[kw] = val
return config
|
[
"def get_config():\n return ImSimConfiguration()",
"def fpp_config(koi, **kwargs):\n folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))\n if not os.path.exists(folder):\n os.makedirs(folder)\n config = ConfigObj(os.path.join(folder,'fpp.ini'))\n\n koi = ku.koiname(koi)\n\n rowefit = jrowe_fit(koi)\n\n config['name'] = koi\n ra,dec = ku.radec(koi)\n config['ra'] = ra\n config['dec'] = dec\n config['rprs'] = rowefit.ix['RD1','val']\n config['period'] = rowefit.ix['PE1', 'val']\n\n config['starfield'] = kepler_starfield_file(koi)\n\n for kw,val in kwargs.items():\n config[kw] = val\n\n config['constraints'] = {}\n config['constraints']['maxrad'] = default_r_exclusion(koi)\n try:\n config['constraints']['secthresh'] = pipeline_weaksec(koi)\n except NoWeakSecondaryError:\n pass\n\n return config",
"def get_pixis_config_object(env, src):\n cfg = env.configStore()\n\n o = cfg.get(_psana.Pixis.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_epix10ka_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n return None",
"def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})",
"def get_config(app):\n items = app.config.items()\n prefix = 'WIKI_'\n\n def strip_prefix(tup):\n return (tup[0].replace('WIKI_', ''), tup[1])\n\n return dict([strip_prefix(i) for i in items if i[0].startswith(prefix)])",
"def get_iAF1260b_config():\n package_path = get_package_path()\n metabolism_file = os.path.join(package_path, 'bigg_models', 'iAF1260b.json')\n return {'model_path': metabolism_file}",
"def get_epix_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config100aV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config100aV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10KV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_epix10ka2m_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n return None",
"def oci_config(self):\n return {\n \"user\": self.api_user_id,\n \"key_file\": self.api_key_file_path,\n \"pass_phrase\": self.api_key_passphrase,\n \"fingerprint\": self.api_key_file_fingerprint,\n \"tenancy\": self.tenant_id,\n \"region\": self.region,\n }",
"def get_config_object() -> \"BaseConfig\":\n assert (\n len(G_CONFIG_OBJECT) == 1\n ), \"Have you created quantize config object before calling `quantize_model`?\"\n if G_CONFIG_OBJECT:\n return G_CONFIG_OBJECT[0]",
"def get_acqiris_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Acqiris.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def istio_config(self) -> Optional[pulumi.Input['IstioConfigArgs']]:\n return pulumi.get(self, \"istio_config\")",
"def io_config(self):\n elem = self._find(_BP_IO_CFG)\n return PartitionIOConfiguration.wrap(elem)",
"def get_config_template(self) -> cconfig.Config:",
"def get_uxi_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Uxi.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_princeton_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Princeton.ConfigV5, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV4, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV3, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Pimax.ConfigV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Pixis.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def kisip_configure_run(self):\n\t\tconfig=configparser.ConfigParser()\n\t\tconfig.read(self.configFile)\n\t\n\t\tself.kisipArcsecPerPixX=config[self.instrument]['kisipArcsecPerPixX']\n\t\tself.kisipArcsecPerPixY=config[self.instrument]['kisipArcsecPerPixY']\n\t\tself.kisipMethodSubfieldArcsec=config[self.instrument]['kisipMethodSubfieldArcsec']\n\t\tself.speckledFileForm=config[self.instrument]['speckledFileForm']\n\t\tself.wavelengthnm=config[self.instrument]['wavelengthnm']\n\t\n\t\tself.kisipMethodMethod=config['KISIP_METHOD']['kisipMethodMethod']\n\t\tself.kisipMethodPhaseRecLimit=config['KISIP_METHOD']['kisipMethodPhaseRecLimit']\n\t\tself.kisipMethodUX=config['KISIP_METHOD']['kisipMethodUX']\n\t\tself.kisipMethodUV=config['KISIP_METHOD']['kisipMethodUV']\n\t\tself.kisipMethodMaxIter=config['KISIP_METHOD']['kisipMethodMaxIter']\n\t\tself.kisipMethodSNThresh=config['KISIP_METHOD']['kisipMethodSNThresh']\n\t\tself.kisipMethodWeightExp=config['KISIP_METHOD']['kisipMethodWeightExp']\n\t\tself.kisipMethodPhaseRecApod=config['KISIP_METHOD']['kisipMethodPhaseRecApod']\n\t\tself.kisipMethodNoiseFilter=config['KISIP_METHOD']['kisipMethodNoiseFilter']\n\t\tself.kisipPropsHeaderOff=config['KISIP_PROPS']['kisipPropsHeaderOff']\n\t\tself.kisipPropsTelescopeDiamm=config['KISIP_PROPS']['kisipPropsTelescopeDiamm']\n\t\tself.kisipPropsAoLockX=config['KISIP_PROPS']['kisipPropsAoLockX']\n\t\tself.kisipPropsAoLockY=config['KISIP_PROPS']['kisipPropsAoLockY']\n\t\tself.kisipPropsAoUsed=config['KISIP_PROPS']['kisipPropsAoUsed']\n\t\tself.kisipEnvLib=config['KISIP_ENV']['kisipEnvLib']\n\t\tself.kisipEnvBin=config['KISIP_ENV']['kisipEnvBin']\n\t\tself.kisipEnvMpiNproc=config['KISIP_ENV']['kisipEnvMpiNproc']\n\t\tself.kisipEnvMpirun=config['KISIP_ENV']['kisipEnvMpirun']\n\t\tself.kisipEnvKisipExe=config['KISIP_ENV']['kisipEnvKisipExe']\n\t\n\t\tself.logger.info(\"This is kisipWrapper, part of SSOsoft \"\n\t\t\t\t\"version {0}\".format(self.ssosoftConfig.__version__)\n\t\t\t\t)\n\t\tself.logger.info(\"Contact {0} to report bugs, make suggestions, \"\n\t\t\t\t\"or contribute.\".format(self.ssosoftConfig.__email__)\n\t\t\t\t)\n\t\tself.logger.info(\n\t\t\t\t\"kisipWrapper is designed to be used in conjunction with \"\n\t\t\t\t\"the SSOsoft RosaZylaCal class. Any other way of using \"\n\t\t\t\t\"this package is at the user's risk.\"\n\t\t\t\t)\n\t\tself.logger.info(\"Now configuring this KISIP run.\")\n\t\n\t\t## Directories preSpeckleBase and speckleBase must exist or be\n\t\t## created.\n\t\tif not os.path.isdir(self.preSpeckleBase):\n\t\t\tself.logger.info(\"os.mkdir: attempting to create directory: \"\n\t\t\t\t\t\"{0}\".format(self.preSpeckleBase)\n\t\t\t\t\t)\n\t\t\ttry:\n\t\t\t\tos.mkdir(self.preSpeckleBase)\n\t\t\texcept Exception as err:\n\t\t\t\tself.logger.critical(\"CRITICAL: {0}\".format(err))\n\t\t\t\traise\n\t\tif not os.path.isdir(self.speckleBase):\n\t\t\tself.logger.info(\"os.mkdir: attempting to create directory: \"\n\t\t\t\t\t\"{0}\".format(self.speckleBase)\n\t\t\t\t\t)\n\t\t\ttry:\n\t\t\t\tos.mkdir(self.speckleBase)\n\t\t\texcept Exception as err:\n\t\t\t\tself.logger.critical(\"CRITICAL: {0}\".format(err))\n\t\t\t\traise",
"def Get():\n return Configuration()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns config object for given KOI
|
def fpp_config(koi, **kwargs):
folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))
if not os.path.exists(folder):
os.makedirs(folder)
config = ConfigObj(os.path.join(folder,'fpp.ini'))
koi = ku.koiname(koi)
rowefit = jrowe_fit(koi)
config['name'] = koi
ra,dec = ku.radec(koi)
config['ra'] = ra
config['dec'] = dec
config['rprs'] = rowefit.ix['RD1','val']
config['period'] = rowefit.ix['PE1', 'val']
config['starfield'] = kepler_starfield_file(koi)
for kw,val in kwargs.items():
config[kw] = val
config['constraints'] = {}
config['constraints']['maxrad'] = default_r_exclusion(koi)
try:
config['constraints']['secthresh'] = pipeline_weaksec(koi)
except NoWeakSecondaryError:
pass
return config
|
[
"def get_config():\n return ImSimConfiguration()",
"def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})",
"def get_epix10ka_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n return None",
"def io_config(self):\n elem = self._find(_BP_IO_CFG)\n return PartitionIOConfiguration.wrap(elem)",
"def star_config(koi, bands=['g','r','i','z','J','H','K'],\n unc=dict(g=0.05, r=0.05, i=0.05, z=0.05,\n J=0.02, H=0.02, K=0.02), **kwargs):\n folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n config = ConfigObj(os.path.join(folder,'star.ini'))\n\n koi = ku.koiname(koi)\n\n maxAV = koi_maxAV(koi)\n config['maxAV'] = maxAV\n\n mags = ku.KICmags(koi)\n for band in bands:\n if not np.isnan(mags[band]):\n config[band] = (mags[band], unc[band])\n config['Kepler'] = mags['Kepler']\n\n kepid = KOIDATA.ix[koi,'kepid']\n\n if use_property(kepid, 'teff'):\n teff, e_teff = (kicu.DATA.ix[kepid, 'teff'],\n kicu.DATA.ix[kepid, 'teff_err1'])\n if not any(np.isnan([teff, e_teff])):\n config['Teff'] = (teff, e_teff)\n\n if use_property(kepid, 'logg'):\n logg, e_logg = (kicu.DATA.ix[kepid, 'logg'],\n kicu.DATA.ix[kepid, 'logg_err1'])\n if not any(np.isnan([logg, e_logg])):\n config['logg'] = (logg, e_logg)\n\n if use_property(kepid, 'feh'):\n feh, e_feh = (kicu.DATA.ix[kepid, 'feh'],\n kicu.DATA.ix[kepid, 'feh_err1'])\n if not any(np.isnan([feh, e_feh])):\n config['feh'] = (feh, e_feh)\n\n for kw,val in kwargs.items():\n config[kw] = val\n\n return config",
"def get_config_template(self) -> cconfig.Config:",
"def get_uxi_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Uxi.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_config(self) -> ConverterConfiguration:",
"def get_acqiris_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Acqiris.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def istio_config(self) -> Optional[pulumi.Input['IstioConfigArgs']]:\n return pulumi.get(self, \"istio_config\")",
"def config():\n return Config()",
"def get_config(self) -> Configuration:\n return self.config",
"def get_epix_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config100aV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config100aV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10KV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_config_object() -> \"BaseConfig\":\n assert (\n len(G_CONFIG_OBJECT) == 1\n ), \"Have you created quantize config object before calling `quantize_model`?\"\n if G_CONFIG_OBJECT:\n return G_CONFIG_OBJECT[0]",
"def get_config():\n return CONFIG",
"def oci_config(self):\n return {\n \"user\": self.api_user_id,\n \"key_file\": self.api_key_file_path,\n \"pass_phrase\": self.api_key_passphrase,\n \"fingerprint\": self.api_key_file_fingerprint,\n \"tenancy\": self.tenant_id,\n \"region\": self.region,\n }",
"def get_epix10ka2m_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n return None",
"def get_pixis_config_object(env, src):\n cfg = env.configStore()\n\n o = cfg.get(_psana.Pixis.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def Get():\n return Configuration()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Predict a single batch of images, optionally with augmentation. Augmentations vectorized across the entire batch and predictions averaged.
|
def predict_batch(self, imgs_batch, augment=False):
if augment:
aug_funcs = [
lambda x: x, # identity
lambda x: x[:, ::-1, ...], # vlip
lambda x: x[:, :, ::-1], # hflip
lambda x: np.rot90(x, 1, axes=(1, 2)), # +90
lambda x: np.rot90(x, 2, axes=(1, 2)), # +180
lambda x: np.rot90(x, 3, axes=(1, 2)), # +270
lambda x: np.rot90(x, 1, axes=(1, 2))[:, ::-1, ...], # vflip(+90)
lambda x: np.rot90(x, 1, axes=(1, 2))[:, :, ::-1] # vflip(+90)
]
yp = np.zeros((imgs_batch.shape[0], len(TAGS)))
for aug_func in aug_funcs:
imgs_batch = aug_func(imgs_batch)
tags_batch = self.net.predict(imgs_batch)
yp += tags_batch / len(aug_funcs)
return yp
else:
return self.net.predict_on_batch(imgs_batch)
|
[
"def warmup_predict(model, imgs, Npred):\n H = augmented_state_matrix(model[:-1], imgs, 0)\n h0 = H[-2]\n y0 = imgs[-1]\n return predict(model, y0, h0, Npred)",
"def batched_predict(model, batcher, batch_size, int_mapped_X, doc_labels):\n # Intialize batcher but dont shuffle.\n train_batcher = batcher(full_X=int_mapped_X, full_y=doc_labels,\n batch_size=batch_size, shuffle=False)\n preds = []\n for batch_X, _ in train_batcher.next_batch():\n batch_preds = model.predict(batch_X=batch_X)\n preds.append(batch_preds)\n preds = np.hstack(preds)\n return preds",
"def predict(self, input_batch: InputBatch) -> OutputBatch:",
"def predict_from_batch(self, data):\n if len(data.shape) < 3:\n data = np.expand_dims(data, axis=0)\n if len(data.shape) < 3:\n data = np.expand_dims(data, axis=0)\n\n if data.shape[1] > self.last_avg:\n sample = np.mean(data[0, -self.last_avg:-1, :], axis=0)\n else:\n sample = data[0, -1]\n\n sample = np.expand_dims(sample, axis=0)\n p = self.model.predict(sample).flatten()[0]\n prediction = self.classes_list[p]\n return prediction",
"def predict_on_batch(self, sess, inputs_batch, seqlen_batch):\n feed = self.create_feed_dict(inputs_batch, seqlen_batch)\n predictions = sess.run(self.pred, feed_dict=feed)\n return predictions",
"def predict(predict_var, x_unlabeled, inputs, batch_sizes, view_size):\n x = x_unlabeled\n\n # calculate batches for predict loop\n unlabeled_batch_size = batch_sizes.get(\"Embedding\", 0)\n batch_size = min(len(x[0]), unlabeled_batch_size)\n batches = make_batches(len(x[0]), batch_size)\n\n y_preds = []\n # predict over all points\n for j, (batch_start, batch_end) in enumerate(batches):\n feed_dict = {K.learning_phase(): 0}\n # feed corresponding input for each input_type\n for input_type, input_placeholder in inputs.items():\n if input_type == \"Embedding\":\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_start:batch_end]\n elif input_type == \"Orthogonal\":\n batch_ids = np.random.choice(\n len(x), size=min(len(x), batch_sizes[input_type]), replace=False\n )\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_ids]\n else:\n raise Exception(\"Unrecognized feed name ['{}']\".format(input_type))\n # evaluate the batch\n y_pred_batch = np.asarray(K.get_session().run(predict_var, feed_dict=feed_dict))\n y_preds.append(y_pred_batch)\n y_list = np.concatenate(y_preds, axis=1)\n\n return y_list",
"def predict_batch(self, model, context, data=None):\n pass",
"def batch_predictions(self, images):\n \n px, _ = self._process_input(images)\n\n _boxes, _box_scores, _box_confidence_logits, \\\n _box_class_probs_logits, _box_coord_logits = \\\n self._batch_gather_feats_fn([px])\n\n boxes, scores, classes = self._batch_pred_fn(\n [_boxes, _box_scores])\n\n predictions = []\n for i in range(len(boxes)):\n num = (scores[i] > 0.).sum()\n pred = {}\n pred['boxes'] = boxes[i][:num].tolist()\n pred['scores'] = scores[i][:num].tolist()\n pred['classes'] = classes[i][:num].tolist()\n predictions.append(pred)\n\n assert len(predictions) == images.shape[0], \"batch size doesn't match.\"\n\n return predictions",
"def predict(self, x, batch_size=1):\n\t\treturn self.model.predict(x, batch_size=batch_size, verbose=0)",
"def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]",
"def predict(model, img, classes,target_size, top_n=3):\n if img.size != target_size:\n img = img.resize(target_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n x = x/255.0\n preds = model.predict(x)\n return decode_predictions(preds, classes,top=top_n)[0]",
"def predict(self, images):\n if not isinstance(images, list):\n images = [images]\n\n # setup model\n self.eval()\n if self.config.DEVICE==\"cuda\":\n self.cuda()\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n else:\n self.cpu()\n torch.set_default_tensor_type(torch.FloatTensor)\n\n # dataset\n molded_images = []\n image_shapes = []\n image_metas = []\n for image in images:\n # If grayscale or rgba then convert to RGB for consistency (load_image)\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image) * 255\n elif image.shape[-1] == 4:\n image = skimage.color.rgba2rgb(image) * 255\n image = image.astype(np.uint8)\n\n # image and shape (__get_item__\n image_shapes.append(torch.tensor(image.shape))\n image, window, scale, padding = image_utils.resize_image(image, self.config)\n image = image_utils.mold_image(image, self.config)\n molded_images.append(image)\n\n # image meta\n image_meta = image_utils.mold_meta(dict(window=window))\n image_metas.append(image_meta)\n\n # dataloader\n molded_images = torch.stack(molded_images)\n image_metas = torch.stack(image_metas)\n image_shapes = torch.stack(image_shapes)\n\n # predict\n with torch.no_grad():\n boxes, class_ids, scores, masks = self(molded_images, image_metas)\n\n # prepare outputs\n results = []\n for i in range(len(images)):\n detections1 = [var[i] for var in [boxes, class_ids, scores, masks, image_shapes, image_metas]]\n results.append(image_utils.unmold_detections(*detections1))\n return results",
"def predict(self, input_seq, pred_y, adapt):\n inputs = input_seq[:][:]\n cur_predict = []\n \n seq = torch.FloatTensor(inputs[-self.sequence_length:]).view(1, self.sequence_length, self.input_size).to(self.device)\n if(adapt and not isinstance(self.pre_x, int)):\n err = (np.asarray(input_seq[-1][:]).reshape((3, 1)) - np.asarray(pred_y).reshape((3, 1)))\n self.err_list.append(err.reshape((1, 3)))\n\n jt = np.linalg.norm(np.asarray(input_seq[-1][:]).reshape(3, 1) - np.asarray(pred_y).reshape(3, 1))\n if(self.enable_adapt):\n kt = 1\n if(self.enable_multiepoch):\n if(jt > self.ep1 and jt <= self.ep2):\n kt = 2\n elif(jt > self.ep2):\n kt = 0\n while(kt > 0):\n err = (np.asarray(input_seq[-1][:]).reshape((3, 1)) - np.asarray(pred_y).reshape((3, 1)))\n self.K = self.MKF.adapt(self.pre_x, err)\n pred_y = np.matmul(np.transpose(self.K), self.pre_x)\n kt -= 1\n self.model.state_dict()['linear.weight'][:, :] = torch.FloatTensor(np.transpose(self.K[:, :])).to(self.device)\n self.K_list.append(np.reshape(self.K, (1, self.K.shape[0]*self.K.shape[1])))\n \n with torch.no_grad():\n predict = self.model(seq)\n step_predict = []\n for j in range(self.output_size):\n step_predict.append(predict[0, j].item())\n cur_predict.append(step_predict)\n if(adapt):\n self.pre_x = np.asarray(self.model.x_pre)\n return cur_predict",
"def predict_on_images(self, images, verbose: int = 0):\n from apCode.behavior.FreeSwimBehavior import prepareForUnet_1ch\n from apCode.volTools import img as img_\n print('Predicting ...')\n images_prob = np.squeeze(self.unet.predict(prepareForUnet_1ch(\n images, sz=self.unet.input_shape[1:3]), verbose=verbose))\n images_prob = img_.resize(images_prob, images.shape[1:],\n preserve_dtype=True, preserve_range=True)\n return images_prob",
"def predict(model, img):\n\tx = image.img_to_array(img)\n\tx = np.expand_dims(x, axis=0)\n\tx = preprocess_input(x)\n\tpreds = model.predict(x)\n\treturn preds[0]",
"def predict(self, state_batch):\n pass",
"def _fm_predict_batch(X, scaling, w, V):\n return _fm_predict_batch_fast(X.data, X.indices, X.indptr, scaling, w, V,\n X.shape[0])",
"def predict(\n outputs,\n labels,\n all_preds,\n all_labels,\n prediction_type,\n criterion,\n **kwargs\n ):\n if prediction_type == \"binary\":\n if isinstance(criterion, nn.BCEWithLogitsLoss):\n all_preds, all_labels = bce_with_logits_inference(\n outputs,\n labels,\n all_preds,\n all_labels,\n **kwargs\n )\n elif isinstance(criterion, nn.BCELoss):\n all_preds, all_labels = bce_inference(\n outputs,\n labels,\n all_preds,\n all_labels,\n **kwargs\n )\n elif prediction_type == \"classification\":\n all_preds, all_labels = crossentropy_inference(\n outputs,\n labels,\n all_preds,\n all_labels\n )\n elif prediction_type == \"regression\":\n # TODO: test different loss functions\n all_preds, all_labels = regression_inference(\n outputs,\n labels,\n all_preds,\n all_labels\n )\n elif prediction_type == \"reconstruction\":\n # TODO: test different loss functions\n all_preds, all_labels = regression_inference(\n outputs,\n labels,\n all_preds,\n all_labels\n )\n elif prediction_type == \"variational\":\n # TODO: test different loss functions\n all_preds, all_labels = variational_inference(\n outputs,\n labels,\n all_preds,\n all_labels\n )\n else:\n raise NotImplementedError\n\n return all_preds, all_labels",
"def make_predictions(opts, model, dataset):\n if opts.num_examples_for_predict:\n dataset = tuple(x[:opts.num_examples_for_predict] for x in dataset)\n\n batched_dataset = (tf.data.Dataset.from_tensor_slices(dataset)\n .batch(_BATCH_SIZE_FOR_PREDICT))\n out = collections.defaultdict(list)\n for images, labels in tfds.as_numpy(batched_dataset):\n logits_samples = np.stack(\n [model.predict(images) for _ in range(opts.predictions_per_example)],\n axis=1) # shape: [batch_size, num_samples, num_classes]\n probs = scipy.special.softmax(logits_samples, axis=-1).mean(-2)\n out['labels'].extend(labels)\n out['logits_samples'].extend(logits_samples)\n out['probs'].extend(probs)\n if len(out['image_examples']) < _NUM_IMAGE_EXAMPLES_TO_RECORD:\n out['image_examples'].extend(images)\n\n return {k: np.stack(a) for k, a in six.iteritems(out)}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Unstack batch dimension and split into channels and alpha mask.
|
def unstack_and_split(self, x, batch_size, num_channels=3):
unstacked = torch.reshape(x, [batch_size, -1] + list(x.shape)[1:])
channels, masks = torch.split(unstacked, [num_channels, 1], dim=2)
return channels, masks
|
[
"def make_grid(batch_img: torch.Tensor,\n batch_mask: torch.Tensor,\n img_denormalize_fn: Callable,\n mask_palette: Optional[Sequence] = default_palette,\n batch_gt_mask: Optional[torch.Tensor] = None):\n assert isinstance(batch_img, torch.Tensor) and isinstance(batch_mask, torch.Tensor)\n assert len(batch_img) == len(batch_mask)\n\n if batch_gt_mask is not None:\n assert isinstance(batch_gt_mask, torch.Tensor)\n assert len(batch_mask) == len(batch_gt_mask)\n\n b = batch_img.shape[0]\n h, w = batch_img.shape[2:]\n\n le = 3 if batch_gt_mask is None else 3 + 2\n out_image = np.zeros((h * le, w * b, 3), dtype='uint8')\n\n for i in range(b):\n img = batch_img[i]\n mask = batch_mask[i]\n\n img = img_denormalize_fn(img)\n img = tensor_to_numpy(img)\n img = render_image(img)\n mask = mask.cpu().numpy()\n mask = render_mask(mask, mask_palette)\n\n out_image[0:h, i * w:(i + 1) * w, :] = img\n out_image[1 * h:2 * h, i * w:(i + 1) * w, :] = render_datapoint(img,\n mask,\n blend_alpha=0.4)\n out_image[2 * h:3 * h, i * w:(i + 1) * w, :] = mask\n\n if batch_gt_mask is not None:\n gt_mask = batch_gt_mask[i]\n gt_mask = gt_mask.cpu().numpy()\n gt_mask = render_mask(gt_mask, mask_palette)\n out_image[3 * h:4 * h, i * w:(i + 1) * w, :] = render_datapoint(img,\n gt_mask,\n blend_alpha=0.4)\n out_image[4 * h:5 * h, i * w:(i + 1) * w, :] = gt_mask\n\n return out_image",
"def process_state_batch(self, batch):\n # The image section of the batch\n batch_images = []\n # The augmentation section of the batch\n batch_augmentation = []\n for b in batch:\n b_images = b[:, 0]\n b_images = np.array(b_images.tolist())\n # Apply the transformation needed by DQN\n b_images = b_images.astype('float32') / 255.\n # Append it back to batch_images\n batch_images += [b_images]\n\n # Augmentation section\n b_augmentations = b[:, 1]\n b_augmentations = np.array(b_augmentations.tolist())\n temp = []\n for bb in b_augmentations:\n temp += [bb.flatten()]\n batch_augmentation += [temp]\n batch_images = np.array(batch_images)\n batch_augmentation = np.array(batch_augmentation)\n return [batch_images, batch_augmentation]",
"def _reshape_arrays_to_images(X, image_size):\n index_dimensions = X.shape[:-1]\n return X.reshape(index_dimensions + image_size)",
"def collate_fn(self, batch):\n images = list()\n targets = list()\n\n for b in batch:\n images.append(b[0])\n targets.append(b[1])\n\n # images = torch.stack(images, dim=0)\n\n return images, targets # tensor (N, 3, 300, 300), 3 lists of N tensors each",
"def combined_masks(action_mask,betsize_mask):\n if action_mask.dim() > 2:\n return torch.cat([action_mask[:,:,:-2],betsize_mask],dim=-1)\n elif action_mask.dim() > 1:\n return torch.cat([action_mask[:,:-2],betsize_mask],dim=-1)\n else:\n return torch.cat([action_mask[:-2],betsize_mask])",
"def split_and_concat_model():\n x = tf.keras.Input(shape=[224, 224, 3, ])\n # TODO: implement split for the following commented out method of splitting\n # y1 = x[:, :100, :, :]\n # y2 = x[:, 101:, :, :]\n y1, y2 = tf.split(x, [100, 124], 1)\n y1 = tf.nn.relu(y1)\n y2 = tf.keras.layers.BatchNormalization()(y2)\n z = tf.keras.layers.concatenate([y1, y2], axis=1)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"split_and_concat_model\")(z)\n return output",
"def unbatch_stack(S, grid_shape):\n\tI, J = grid_shape\n\tC, M = S.shape[1], S.shape[2]\n\treturn S.reshape(-1, I, J, C, M, M)",
"def reformat_dataset(dataset):\n # change the dataset shape from\n # (50000, 32, 32, 3)\n # (50000, 3, 32, 32)\n dataset = np.rollaxis(dataset, 3, 1)\n return dataset.reshape((-1, image_size * image_size * color_channel))",
"def split_3d_array_into_channels(arr):\n return [arr[:, :, i] for i in range(arr.shape[-1])]",
"def collate_fn(self, batch):\n images = list()\n boxes = list()\n labels = list()\n difficulties = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n difficulties.append(b[3])\n\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each",
"def extract_meta_features(batch_tensor):\n # per image mean per channel resulting in (batch_size, dimension)\n mean, variance = tf.nn.moments(batch_tensor, axes=(1, 2))\n std = tf.sqrt(variance)\n channel_max = tf.reduce_max(batch_tensor, axis=(1, 2))\n channel_min = tf.reduce_min(batch_tensor, axis=(1, 2))\n mean_sub = batch_tensor - tf.reshape(mean,\n shape=[-1, 1, 1, IMAGE_SHAPE[-1]])\n skewness = tf.reduce_mean(mean_sub ** 3, axis=(1, 2)) / std ** 3\n kurtosis = tf.reduce_mean(mean_sub ** 4, axis=(1, 2)) / std ** 4\n feature_stack = tf.stack(values=[mean, std, channel_max, channel_min,\n skewness, kurtosis],\n axis=1)\n # output stacked features in shape of [batch_size, 6, dimension]\n return feature_stack",
"def masking_reshape_start(data, mask):\n assert(len(data.shape) == 3 or len(data.shape) == 4)\n \n mask_1d=np.ravel(mask)\n b_mask_1d = (mask_1d==1)\n \n if len(data.shape) == 3:\n data_1d = np.ravel(data)\n reshaped = data_1d[b_mask_1d]\n\n if len(data.shape) == 4:\n data_2d = data.reshape((-1, data.shape[-1]))\n reshaped = data_2d[b_mask_1d, :]\n \n return reshaped",
"def split_heads(self, x, batch):\n x = tf.reshape(x, (batch, -1, self.h, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])",
"def split_images(x, y=None, size=(128, 128), num_part=4):\n x_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n x_imgs = x_patches.transform(x)\n # Check if number of channels is the same for grayscale\n if x.shape[-1] != x_imgs.shape[-1]:\n x_imgs = x_imgs[:, :, :, np.newaxis]\n\n if not y is None:\n y_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n y_imgs = y_patches.transform(y)\n\n # Check if number of channels is the same for grayscale\n if y.shape[-1] != y_imgs.shape[-1]:\n y_imgs = y_imgs[:, :, :, np.newaxis]\n\n return x_imgs, y_imgs\n\n return x_imgs",
"def _reshape_images_to_arrays(X, image_size):\n index_dimensions = X.shape[:-len(image_size)]\n return X.reshape(index_dimensions + (int(np.prod(image_size)), ))",
"def _reshape(self, hog_feat: torch.Tensor) -> torch.Tensor:\n hog_feat = hog_feat.flatten(1, 2)\n self.unfold_size = hog_feat.shape[-1] // 14\n hog_feat = hog_feat.permute(0, 2, 3, 1)\n hog_feat = hog_feat.unfold(1, self.unfold_size,\n self.unfold_size).unfold(\n 2, self.unfold_size, self.unfold_size)\n hog_feat = hog_feat.flatten(3).view(self.B, self.T, 14, 14, -1)\n hog_feat = hog_feat.flatten(1, 3) # B N C\n return hog_feat",
"def flatten_layers(data):\n return data.reshape((data.shape[0], data.shape[1], -1))",
"def image2batchfunc(fun, ignorechannels=True):\n\n def func(batch):\n # Type assertion for Pycharm\n assert isinstance(batch, np.ndarray), \"Batch must be a numpy array.\"\n assert batch.ndim == 5 or batch.ndim == 4, \"Batch must be a 4D or 5D numpy.ndarray.\"\n\n # Infer dimensionality from batch dimension if dim is not valid\n dim = {5: 3, 4: 2}[len(batch.shape)]\n\n if dim == 3:\n # batch.shape = (numbatches, T, numchannels, row, col).\n # Reshape to (numbatches, numchannels, T, row, col)\n batch = batch.swapaxes(1, 2)\n # Apply function\n pbatch = np.array([fun(sample) if not ignorechannels else np.array([fun(image) for image in sample])\n for sample in batch])\n # Reswap axes and return\n pbatch = pbatch.swapaxes(1, 2)\n return pbatch\n\n elif dim == 2:\n # batch.shape = (numbatches, numchannels, row, col).\n pbatch = np.array([fun(sample) if not ignorechannels else np.array([fun(image) for image in sample])\n for sample in batch])\n # Return\n return pbatch\n\n return func",
"def stack_channels(inputs: List[TensVar], shapes: List[ImgShape], width: int,\n height: int, CHANNEL_DIM: int) -> TensVar:\n assert len(inputs) > 0\n if len(inputs) == 1:\n print(\"Only one input skipping stack\")\n return inputs[0]\n else:\n\n with tf.name_scope(\"channel\"):\n input_channels = []\n for i in range(len(inputs)):\n inp_ndim = len(shapes[i])\n nchannels = 1 # FIXME\n ch = tf.reshape(inputs[i], [tf.shape(inputs[i])[0],\n height, width, nchannels])\n input_channels.append(ch)\n concat_img = tf.concat(input_channels, CHANNEL_DIM)\n return concat_img"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Animals that can speak are correctly identified
|
def test_animals_can_speak(self):
self.assertEqual(self.lion, 'roar')
self.assertEqual(self.cat, 'meow')
|
[
"def print_humans_and_animals():",
"def print_animal_info(self):",
"def test_animals_can_speak(self):\r\n lion = Animal.objects.get(name=\"lion\")\r\n cat = Animal.objects.get(name=\"cat\")\r\n self.assertEqual(lion.speak(), 'The lion says \"roar\"')\r\n self.assertEqual(cat.speak(), 'The cat says \"meow\"')",
"def animal_eats(self):\n self.update_fodder()\n self.herbivore_eats()\n self.carnivore_eats()",
"def test_speak(self):\n self.assertTrue(self.animal.say())\n self.assertEqual(self.animal.last_speech, \"Hello, I'm an animal and my name is Animal.\")",
"def on_object(self, image, objects):\n for obj in objects:\n if self.is_object_recognition_appropriate(obj.name):\n self.say(\"I see a {}\".format(obj.name))",
"def add_animal(self, animal):\n try:\n if animal.saltwater:\n super().add_animal(animal)\n except AttributeError:\n raise AttributeError(\"Animal Is Incompatible With Biome\")",
"def encontrarPato(animal):\n if isinstance(animal, Pato):\n animal.nadar()\n animal.som()",
"def test_unknown_action(self):\n self.assertFalse(self.animal.do_something(action=\"play\"))\n self.assertFalse(self.animal.do_something(action=\"jump\"))\n self.assertFalse(self.animal.do_something(action=\"think\"))",
"def get_humans_by_animal_species(animal_species):",
"def test_animal_creation(self):\n buddy = Dog(\"Buddy\")\n self.assertIsInstance(buddy, Dog)\n self.assertIsInstance(buddy, Animal)",
"def all_animals_eat(self):\n for cell in itertools.chain.from_iterable(self.map):\n if type(cell).__name__ in self.allowed_cells:\n cell.gen_fodder()\n cell.eat_herbivore()\n cell.eat_carnivore()",
"def animals_gives_birth(self):\n for species, animals in self.new_fauna_list.items():\n for i in range(math.floor(len(self.new_fauna_list[species])/2)):\n animal = animals[i]\n if animal.probability_of_birth(len(animals)):\n offspring_species = animal.__class__\n offspring = offspring_species()\n animal.update_weight_after_birth(offspring)\n if animal.gives_birth:\n self.fauna_list[species].append(offspring)\n animal.gives_birth = False",
"def known_organisms():\n return [\"rat\"]",
"def speak(self, name):\n # Write your code here\n for p in self.pets:\n if p.name == name:\n return p.cry()\n return \"\"",
"def isIsomorphism(hcube1, hcube2):",
"def check_wild_animal_population(self):\n\t\tself.log.debug(\"Checking wild animal population: %s\", len(self.wild_animals))\n\t\tif len(self.wild_animals) == 0:\n\t\t\t# find a tree where we can place it\n\t\t\tfor building in self.buildings:\n\t\t\t\tif building.id == BUILDINGS.TREE_CLASS:\n\t\t\t\t\tpoint = building.position.origin\n\t\t\t\t\tEntities.units[UNITS.WILD_ANIMAL_CLASS](self, x=point.x, y=point.y, session=self.session)\n\t\t\t\t\treturn\n\t\t# we might not find a tree, but if that's the case, wild animals would die out anyway again,\n\t\t# so do nothing in this case.",
"def _get_animal_from_message(self, message):\n animal = None\n\n # Try to find an animal from our inventory in the message\n find_animal_regex = r'({animals})'.format(animals='|'.join(self.animals))\n ret = re.findall(find_animal_regex, message)\n\n # re.findall return is a list of matching strings in the message\n # Is an empty list if no match found\n if ret:\n animal = random.choice(ret)\n\n return animal",
"def can_encounter(self, player):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a urlsafe version of an Album key, get the actual key
|
def get_album_key_by_keystr(keystr):
attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr
kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.'
if not keystr or not isinstance(keystr, basestring):
raise RuntimeError(attr_err)
key = ndb.Key(urlsafe=keystr)
if not key.kind() == PHOTOALBUM_KIND:
raise RuntimeError(kind_err % (PHOTOALBUM_KIND, key.kind()))
return key
|
[
"def get_album_key(slug):\n err = 'Series slug must be defined and of of type basestring'\n\n if not slug or not isinstance(slug, basestring):\n raise RuntimeError(err)\n\n return ndb.Key(PHOTOALBUM_KIND, slug)",
"def get_key(url):\n digest = hashlib.md5(url.encode()).hexdigest()\n return f\"u/{digest[:7]}\"",
"def get_key_from_url(file_url):\t\n\tparts = urlparse(file_url)\n\tbucket_name = get_bucket_name_from_url(file_url)\n\tkey = parts.path.replace(\"/\" + bucket_name + \"/\", \"\")\n\treturn key",
"def as_key(key):\n return key.lstrip('/').rstrip('/')",
"def sharekey_from_url(url):\n index = url.find('share_key=')\n return url[index + len('share_key='):]",
"def get_key(object):\n return object.id.text.split('/')[-1]",
"def create_key_from_url(raw_url):\n org_url = urllib2.urlparse.urlparse(raw_url)\n new_key = ''\n net_location = org_url.netloc\n netloc_list = net_location.split(\".\")\n netloc_list.reverse()\n for part in netloc_list:\n new_key += '%s.' % part\n new_key = new_key[:-1] # Removes trailing period\n new_key = new_key + org_url.path \n return new_key",
"def get_album_by_slug(slug):\n\n album_key = get_album_key(slug)\n album = album_key.get()\n return album",
"def _extract_spreadsheet_key_from_url(url):\r\n result = url\r\n\r\n if 'key=' in url:\r\n result = url.split('key=')[-1].split('#')[0].split('&')[0]\r\n\r\n return result",
"def reverse_key(key):\n import urllib\n return urllib.quote_plus(key.replace(' ','_'))",
"def _get_key_url(self, key):\n urls = self.get_URLS(key)\n\n if len(urls) == 1:\n return urls[0]\n else: # multiple\n # TODO: utilize cache to check which archives might already be\n # present in the cache.\n # Then if not present in the cache -- check which are present\n # locally and choose that one to use\n if self._last_url and self._last_url in urls:\n return self._last_url\n else:\n return urls[0] # just the first one",
"def get_hub_key(entity_id):\n if isinstance(entity_id, URIRef):\n return entity_id\n if entity_id.startswith('hk:'):\n return hk[entity_id[len('hk:'):]]\n if URIRef(entity_id).startswith(hk) or is_hub_key(entity_id):\n return URIRef(entity_id)\n return hk[entity_id]",
"def _get_raw_key(self, key_id):",
"def get_dst_key(blob_key: str):\n return f\"{blob_key}\"",
"def parse_key(key_id):\n\tcomment = get_key_comment(key_id)[0]\n\tregex = re.compile(\".*?\\\\((.*?)\\\\)\")\n\tcomment_bits = re.findall(regex, comment)[0].split(' ')\n\tif comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n\t\treturn comment_bits[1]",
"def get_ndb_key(urlsafe_key):\n try:\n return ndb.Key(urlsafe=urlsafe_key)\n except Exception: # pylint: disable=broad-except\n raise endpoints.BadRequestException(_CORRUPT_KEY_MSG)",
"def get_album_id(album_id):\n return BeetIdType.album.value + ':' + str(album_id)",
"def parse_key(key_id):\n comment = get_key_comment(key_id)[0]\n regex = re.compile(\".*?\\\\((.*?)\\\\)\")\n comment_bits = re.findall(regex, comment)[0].split(' ')\n if comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n return comment_bits[1]",
"def get_resource_id_from_key(key):\n\n pair_strings = []\n\n pairs = key.pairs()\n\n for pair in pairs:\n kind = unicode(pair[0])\n key_or_id = pair[1]\n \n if isinstance(key_or_id, (int, long)):\n key_or_id = unicode(INTPREFIX + unicode(key_or_id))\n\n pair_strings.append(kind + SEPARATOR + key_or_id)\n\n buff = SEPARATOR.join(pair_strings)\n encoded = base64.urlsafe_b64encode(buff)\n encoded = encoded.replace('=', '')\n return encoded"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a ndb.Key given an Album slug
|
def get_album_key(slug):
err = 'Series slug must be defined and of of type basestring'
if not slug or not isinstance(slug, basestring):
raise RuntimeError(err)
return ndb.Key(PHOTOALBUM_KIND, slug)
|
[
"def build_key(cls, song_id):\n return ndb.Key(cls, song_id)",
"def get_album_key_by_keystr(keystr):\n attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr\n kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.'\n if not keystr or not isinstance(keystr, basestring):\n raise RuntimeError(attr_err)\n\n key = ndb.Key(urlsafe=keystr)\n if not key.kind() == PHOTOALBUM_KIND:\n raise RuntimeError(kind_err % (PHOTOALBUM_KIND, key.kind()))\n\n return key",
"def get_album_by_slug(slug):\n\n album_key = get_album_key(slug)\n album = album_key.get()\n return album",
"def folder_key(title,folder_name=DEFAULT_FOLDER_NAME):\n #parameter order is reversed because of kwargs necessities :(\n #i dont use this atm\n return ndb.Key('Folder', folder_name,'File',title)",
"def add_album(conn, album_title, artist_name, release_year):\n curs = dbi.dict_cursor(conn)\n curs.execute('''insert into coda_album(album_title,artist_id, release_year)\n values (%s, (select artist_id from coda_artist where artist_name = %s), \n %s)''', \n [album_title, artist_name, release_year])\n conn.commit()",
"def _create_album(album):\n art_path = None\n if album['artpath']:\n art_path = BeetIdType.get_album_id(album['id'])\n return utils.create_album(\n BeetIdType.get_album_id(album['id']), album['album'],\n artist=album['albumartist'], year=album['year'],\n genre=album['genre'], coverArt=art_path,\n parent=BeetIdType.get_artist_id(album['albumartist'])\n )",
"def genre_key(genre_name=DEFAULT_GENRE):\n return ndb.Key('Genre', genre_name.lower())",
"def adverts_key(advert_category):\n return ndb.Key('AdvertCategory', advert_category)",
"def blog_key(blog_name=DEFAULT_BLOG_NAME):\n return ndb.Key('Blog', blog_name)",
"def _create_entity_key(self, entity_event):\n pass",
"def _playlists_key(playlist_name):\n key = ndb.Key(playlist_name, playlist_name)\n return key",
"def _create_jsonable_key(music_service_id, household_id):\n return str(music_service_id) + \"#\" + str(household_id)",
"def create_book_slug(sender, instance, created, **kwargs):\n if created:\n slug = generate_unique_slug(Book, instance.name)\n instance.slug = slug\n instance.save()",
"def _create_unique_key(self, dictionary, name, numb=0):\n if dictionary.get(name) is not None:\n numb += 1\n name = name.split(\"_\")[0]\n name += \"_{0}\".format(numb)\n name = self._create_unique_key(dictionary, name, numb)\n return name",
"def create_album(title, days, rating):\n release_date = timezone.now() + datetime.timedelta(days=days)\n return models.Album.objects.create(\n title=title,\n release_date=release_date,\n rating=rating\n )",
"def gallery_key():\n return ndb.Key('Gallery', 'All')",
"def create_key(cls, topic, entry_id):\n\t\treturn db.Key.from_path(\n\t\t\t\tFeedRecord.kind(),\n\t\t\t\tFeedRecord.create_key_name(topic),\n\t\t\t\tcls.kind(),\n\t\t\t\tget_hash_key_name(entry_id))",
"def post_key(post_name=DEFAULT_POST_NAME):\n return ndb.Key('Post', post_name)",
"def build_key(model, id):\n return \"{}.{}\".format(model.__name__, id)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given an album slug, fetch the album entity
|
def get_album_by_slug(slug):
album_key = get_album_key(slug)
album = album_key.get()
return album
|
[
"def get_album(album_id):\n return query_single(album_id, Album, album_schema)",
"def get_album(self):\n return self._album",
"def from_id(album_id):\n endpoint = album_endpoint + '/' + album_id\n response = api_call(endpoint)\n\n if response is None:\n return None\n \n parsed = json.loads(response.read())\n\n if not parsed['success']:\n return None\n \n return Album.from_json(parsed['data'])",
"def get_album(artist, album):\n return api_call('album.getInfo', artist=artist, album=album, autocorrect=1)",
"def _get_album_for_song(self, song):\n if song and song['album_id']:\n album_list, result = self.get_albums(None, song['album_id'])\n if result == 200:\n song['album'] = album_list[0]\n else:\n logger.error(f\"Could not find album for song with album id {song['album_id']} and song id {song['id']}\")\n return None\n return song\n else:\n logger.error(f\"Could not get a song or album id for {song}\")\n return None",
"def _get_album_or_image(json, imgur):\n if json['is_album']:\n return Gallery_album(json, imgur, has_fetched=False)\n return Gallery_image(json, imgur)",
"def get_album(self):\n payload = {'access_token': self._lr_object.access.token}\n url = SECURE_API_URL + \"api/v2/album/\"\n return self._lr_object._get_json(url, payload)",
"def get_album_key(slug):\n err = 'Series slug must be defined and of of type basestring'\n\n if not slug or not isinstance(slug, basestring):\n raise RuntimeError(err)\n\n return ndb.Key(PHOTOALBUM_KIND, slug)",
"def album(self, album_id, **kwargs):\n _id = self._get_album_id(album_id)\n # pylint: disable=no-member\n return self._get(API.ALBUM.value.format(id=_id), **kwargs)",
"def getAlbum():\r\n return args.album if args.album else os.path.basename(getFolder())",
"def get_album_by_id(self, album_id):\n self.app.curs.execute('select * from album where alid=%s', (album_id,))\n if self.app.curs.rowcount == 1:\n return self.app.curs.fetchone()\n else: # pragma: no cover\n return None",
"def get_album(self, album_id):\n return Album(\n self,\n self.call(\n 'muse/legacy/lookup',\n 'com.amazon.musicensembleservice.MusicEnsembleService.lookup',\n {\n 'asins': [album_id],\n 'features': [\n 'popularity',\n 'expandTracklist',\n 'trackLibraryAvailability',\n 'collectionLibraryAvailability'\n ],\n 'requestedContent': 'MUSIC_SUBSCRIPTION',\n 'deviceId': self.deviceId,\n 'deviceType': self.deviceType,\n 'musicTerritory': self.territory,\n 'customerId': self.customerId\n }\n )['albumList'][0]\n )",
"def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums",
"async def get_album(self, album_id: int) -> APIReturn:\n return await self._request(\"GET\", \"/getAlbum\", extra_query={\"id\": album_id})",
"def get_album(cls, album_id) -> SpotifyAlbum:\n a = cls._spotify_api.album(album_id)\n return SpotifyAlbum(\n name=a[\"name\"],\n spotify_id=a[\"id\"],\n release_date_str=a[\"release_date\"],\n release_date_precision=a[\"release_date_precision\"],\n genres=a[\"genres\"],\n popularity=a[\"popularity\"],\n )",
"def get_random_album(self):\n lib = self.ctrl.library\n artist, album = lib.get_random_album()\n return self.resp_from_data({\n \"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)\n })",
"def get_album(self, album_id):\n data, m = self._api_fallback(\n partial(self.gw.get_album, album_id), partial(self.api.get_album, album_id), gw_priority=False)\n\n if m == \"gw\":\n data = util.map_gw_album(data)\n\n # TODO: maybe better logic?\n data[\"cover_id\"] = str(data[\"cover_small\"]).split(\n \"cover/\")[1].split(\"/\")[0]\n\n return data, m",
"def display_album(album_id):\n fb_id = settings.FB_PAGE_ID\n fql = \"select aid, name from album where owner=%s and aid='%s'\" % (fb_id, album_id)\n valid_album = get_fql_result(fql)\n if valid_album:\n fql = \"select pid, src, src_small, src_big, caption from photo where aid = '%s' order by created desc\" % album_id\n album = get_fql_result(fql)\n #album_detail = [item for item in valid_album]\n return album",
"def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fetch a list of Albums
|
def get_album_list():
# TODO: Paginate this, etc
entities = PhotoAlbum.query().order(-PhotoAlbum.title).fetch(1000)
return entities
|
[
"def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)",
"def albums(self, albums, **kwargs):\n album_list = map(self._get_album_id, albums)\n return self._get(API.ALBUMS.value, ids=\",\".join(album_list), **kwargs)",
"def albums(self):\n query = {\n 'Operation': 'searchLibrary',\n 'ContentType': 'JSON',\n 'searchReturnType': 'ALBUMS',\n 'searchCriteria.member.1.attributeName': 'status',\n 'searchCriteria.member.1.comparisonType': 'EQUALS',\n 'searchCriteria.member.1.attributeValue': 'AVAILABLE',\n 'searchCriteria.member.2.attributeName': 'trackStatus',\n 'searchCriteria.member.2.comparisonType': 'IS_NULL',\n 'searchCriteria.member.2.attributeValue': None,\n 'albumArtUrlsSizeList.member.1': 'FULL',\n 'selectedColumns.member.1': 'albumArtistName',\n 'selectedColumns.member.2': 'albumName',\n 'selectedColumns.member.3': 'artistName',\n 'selectedColumns.member.4': 'objectId',\n 'selectedColumns.member.5': 'primaryGenre',\n 'selectedColumns.member.6': 'sortAlbumArtistName',\n 'selectedColumns.member.7': 'sortAlbumName',\n 'selectedColumns.member.8': 'sortArtistName',\n 'selectedColumns.member.9': 'albumCoverImageFull',\n 'selectedColumns.member.10': 'albumAsin',\n 'selectedColumns.member.11': 'artistAsin',\n 'selectedColumns.member.12': 'gracenoteId',\n 'sortCriteriaList': None,\n 'maxResults': 100,\n 'nextResultsToken': None,\n 'caller': 'getAllDataByMetaType',\n 'sortCriteriaList.member.1.sortColumn': 'sortAlbumName',\n 'sortCriteriaList.member.1.sortType': 'ASC',\n 'customerInfo.customerId': self.customerId,\n 'customerInfo.deviceId': self.deviceId,\n 'customerInfo.deviceType': self.deviceType,\n }\n\n data = self.call('cirrus/', None, query)['searchLibraryResponse']['searchLibraryResult']\n results = []\n results.extend(data['searchReturnItemList'])\n while results:\n r = results.pop(0)\n if r['numTracks'] >= 4 and r['metadata'].get('primeStatus') == 'PRIME':\n yield Album(self, r)\n\n if not results and data['nextResultsToken']:\n query['nextResultsToken'] = data['nextResultsToken']\n data = self.call('cirrus/', None, query)['searchLibraryResponse']['searchLibraryResult']\n results.extend(data['searchReturnItemList'])",
"def get_albums(page):\n return Album.query.paginate(page, 12, False)",
"def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums",
"def get_albums(self):\n self.artist = self.artists_list.currentText()\n self.c_albums = [x['album'] for x in dmlc.list_albums(self.artist)\n if [x['album'] in self.albums_map[self.artist]]]\n self.albums_list.clear()\n self.albums_list.addItems(self.c_albums)\n self.update_navigation_buttons()",
"def get_albums(self, quantity, album_id):\n # TODO: get album list if album_id is a list\n if album_id:\n result = Album.select().where(Album.id == album_id)\n elif quantity:\n result = Album.select().order_by(fn.Rand()).limit(int(quantity))\n else:\n result = Album.select()\n if result:\n list_result = [row for row in result.dicts()]\n logger.debug('Getting result for get_album: %s', list_result)\n return list_result, 200\n else:\n logger.error(f\"Could not find album with id {album_id}\")\n return album_id, 400",
"def fetchAlbumIds(artist_id):\n pass",
"def get_albums(username):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM album WHERE username = '{0}'\".format(username))\n return cur.fetchall()",
"def albums(self, album_id):\n album_photos = PhotosAlbumsMapping.query.filter_by(AlbumID=album_id).order_by(\n asc(PhotosAlbumsMapping.PhotoID)\n ).all()\n album_photo_ids = [p.PhotoID for p in album_photos]\n photos = Photos.query.filter(Photos.PhotoID.in_(album_photo_ids)).all()\n\n content = jsonify({\n \"photos\": [{\n \"id\": photo.PhotoID,\n \"image\": photo.Image,\n \"caption\": photo.Caption,\n \"takenBy\": photo.TakenBy,\n \"country\": photo.Country,\n \"countryCode\": photo.CountryCode,\n \"city\": photo.City,\n \"albumID\": self.get_album_id(photo.PhotoID),\n \"categories\": self.get_categories(photo.PhotoID),\n \"createdAt\": photo.Created,\n \"updatedAt\": photo.Updated,\n } for photo in photos]\n })\n\n return make_response(content, 200)",
"def fetchAlbumIds(artist_id):\n url = 'https://api.spotify.com/v1/artists/' + artist_id + '/albums?market=US&album_type=album'\n req = requests.get(url)\n\n data = req.json()\n\n #checking for bad return value\n if not req.ok:\n print \"error : \" + data['error']['message']\n return \"error : \" + data['error']['message']\n\n albums = []\n for item in data['items']:\n \talbums.append(item['id'])\n\n return albums",
"async def _build_albums_listing(self, mass: MusicAssistant):\n media_class = LIBRARY_MEDIA_CLASS_MAP[LIBRARY_ALBUMS]\n\n return BrowseMediaSource(\n domain=DOMAIN,\n identifier=LIBRARY_ALBUMS,\n title=LIBRARY_TITLE_MAP[LIBRARY_ALBUMS],\n media_class=MEDIA_CLASS_DIRECTORY,\n media_content_type=MEDIA_TYPE_ALBUM,\n can_play=False,\n can_expand=True,\n children_media_class=media_class,\n children=sorted(\n await asyncio.gather(\n *[\n self._build_item(mass, album, can_expand=True)\n # we only grab the first page here becaus ethe HA media browser does not support paging\n for album in (await mass.music.albums.db_items(True)).items\n ],\n ),\n key=lambda x: x.title,\n ),\n )",
"def _get_albums(self, shared=False):\n\n album_uuids = self._get_album_uuids(shared=shared)\n return list({self._dbalbum_details[album][\"title\"] for album in album_uuids})",
"def spotify_api_get_albums(token, ids, sample=False):\n sp = _get_spotify_client(token)\n\n album_chunks = []\n chunk_size = 20\n for i, chunk in enumerate(_chunks(ids, chunk_size)):\n print \"Fetching album chunk {}/{:.0f}\".format(i + 1, math.ceil(len(ids)/float(chunk_size)))\n album_chunks.append(sp.albums(albums=chunk))\n if sample:\n print \"Sample run, stopping\"\n break\n\n return [a for a_chunk in album_chunks for a in a_chunk['albums']]",
"def getAlbums():\n\n r = requests.get(ALBUMS_URL, headers=HEADER, timeout=5)\n\n if r.status_code == 200:\n \n try:\n albums = [] \n soup = BeautifulSoup(r.text, \"html.parser\")\n album = soup.find_all(\"div\", class_=\"duv\")\n for i,al in enumerate(album): \n temp = {}\n temp['link'] = al.find_all(\"a\")[0]['href']\n temp['album'] = al.find_all(\"span\", class_=\"title\")[0].text\n albums.append(temp)\n\n if len(albums) > 0:\n return albums\n else:\n print(\"No albums found on site2!\")\n sys.exit(0)\n \n except Exception as e:\n print(\"Failed to get albums from site2\\n\", e)\n sys.exit(0)\n\n else:\n print(\"Albums Url fetch failed! Status code: {}\".format(r.status_code))\n sys.exit(0)",
"def get_album(self):\n payload = {'access_token': self._lr_object.access.token}\n url = SECURE_API_URL + \"api/v2/album/\"\n return self._lr_object._get_json(url, payload)",
"def get_album_list(self, **kwargs):\n TAG = u'renren get album list'\n\n SUCCESS_MSG = u'人人获取相册列表成功'\n ERROR_MSG = u'人人获取相册列表失败'\n\n logger.info(u\"%s:%s\" % (TAG, u'%r 获取相册列表中...' % (self)))\n\n path = '/v2/album/list'\n method = 'GET' \n\n try:\n r = self._renrenAPI(path, method, ownerId=self.uid)\n except RenRenAPIError, err:\n logger.error(u\"%s:%s\" % (TAG, u\"%r 获取相册列表失败,原因:%s\" % (self, err)))\n return (False, None, ERROR_MSG)\n else:\n r = list(r)\n return (True, r, SUCCESS_MSG)",
"def get_albums(self, query, va_likely):\n # Strip non-word characters from query. Things like \"!\" and \"-\" can\n # cause a query to return no results, even if they match the artist or\n # album title. Use `re.UNICODE` flag to avoid stripping non-english\n # word characters.\n query = re.sub(r'(?u)\\W+', ' ', query)\n # Strip medium information from query, Things like \"CD1\" and \"disk 1\"\n # can also negate an otherwise positive result.\n query = re.sub(r'(?i)\\b(CD|disc)\\s*\\d+', '', query)\n\n # Query VGMdb\n r = requests.get('http://vgmdb.info/search/albums/%s?format=json' % urllib.parse.quote(query.encode('utf-8')))\n\n # Decode Response's content\n try:\n items = r.json()\n except:\n self._log.debug('VGMdb JSON Decode Error: (query: %s)' % query)\n return []\n\n # Break up and get search results\n\n return [self.album_for_id(item[\"link\"].split('/')[-1])\n for item in items[\"results\"][\"albums\"]]",
"def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Attempts a Redis DB connection and returns the DB Object
|
def dbConnect(self):
r = redis.StrictRedis()
try:
r = redis.from_url(os.environ.get("REDIS_URL"))
print("DB Connection seems okay!")
except Exception as error:
print ("Oops! An exception has occured:", error)
print ("Exception TYPE:", type(error))
r = None
finally:
return r
|
[
"def establish_connection(self) -> Redis:\n try:\n conn = self.connection()\n conn.ping()\n except ConnectionError:\n log.error(\"Connection to DB could not be established\")\n return False\n return conn",
"def get_database_connection() -> redis.Redis:\n global _database\n if _database is None:\n database_password = os.getenv('DB_PASSWORD')\n database_host = os.getenv('DB_HOST')\n database_port = os.getenv('DB_PORT')\n _database = redis.Redis(host=database_host, port=database_port, # type: ignore\n password=database_password)\n db_logger.debug('Got new db connection')\n return _database",
"def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)",
"def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)",
"def redis_connect():\n try:\n return new_redis_connection()\n except redis.exceptions.ConnectionError as redis_error:\n log.error(redis_error)\n return None",
"def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r",
"def connect_redis():\n rv = redis.Redis(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'])\n return rv",
"def get_redis_connection(session):\n return redis.Redis(\n host=session.get('redis_host', settings.REDIS_HOST),\n port=session.get('redis_port', settings.REDIS_PORT),\n db=session.get('redis_db', settings.REDIS_DB))",
"def get_database(self, override=None):\n\n # Grab the database connection arguments\n redis_args = self['redis']\n\n # If we have an override, read some overrides from that\n # section\n if override:\n redis_args = redis_args.copy()\n for key, value in self[override].items():\n if not key.startswith('redis.'):\n continue\n key = key[len('redis.'):]\n if value:\n redis_args[key] = value\n else:\n redis_args.pop(key, None)\n\n # Return the redis database connection\n return database.initialize(redis_args)",
"def get_client(conn):\n # No database indicates a cluster connection\n if not conn.get('db', None):\n conn.pop('db', None)\n return connect_redis_cluster(conn)\n\n # Otherwise it's a regular redis connection\n return connect_redis(conn)",
"def _open_db_connection():\n client = MongoClient()\n return client[DB]",
"def redis_client(host=None, port=None, db=None):\n global REDIS_CLIENT\n\n if not REDIS_CLIENT:\n REDIS_CLIENT = Redis(**_get_options(host, port, db=db, sync=True))\n return REDIS_CLIENT",
"def _get_connection():\n conn = DB_CONFIG.get('DB_CONNECTION')\n if conn is not None:\n # print \"connection exists, so reusing it...\"\n return conn\n else:\n # print \"no connection found...\"\n return _connect_db()",
"def connect_redis(uri):\n puri = urlparse.urlparse(uri)\n host = puri.hostname\n port = puri.port\n password = puri.password if puri.password else ''\n db_name = puri.path.split('/')[1]\n r = redis.Redis(host=host, port=port, password=password, db=db_name)\n assert r.ping()\n return r",
"def getRedisClient(self) -> Redis:\n if self.redisClient is not None:\n self.logger.info(\"getting existing redis client\")\n return self.redisClient\n try:\n self.logger.info(\"creating new redis client\")\n self.redisClient: Redis = Redis(host=self.config[\"kvs\"][\"host\"], port=self.config[\"kvs\"][\"port\"])\n except Exception as exc:\n self.logger.critical(exc)\n exit(1)\n return self.redisClient",
"def get_redis_client():\n try:\n redis_client = redis.StrictRedis(\n host=app.config[\"REDIS_HOST\"],\n port=app.config[\"REDIS_PORT\"],\n decode_responses=True,\n )\n return redis_client\n except Exception as e:\n print(e)",
"def get_client() :\n global _redis_client\n if _redis_client: return _redis_client\n else :\n _redis_client = redis.StrictRedis(\n host=os.environ.get('REDIS_HOST', 'localhost'),\n port=int(os.environ.get('REDIS_PORT', '6000')),\n db=0\n )\n return _redis_client",
"def _get_db(self, n_tries=3):\n try:\n conn = pymongo.MongoClient()\n db = conn[self.DATABASE]\n except pymongo.errors.ConnectionFailure:\n if n_tries:\n self._get_db(n_tries-1)\n else:\n self.DB = None\n self.CONNECTION = None \n else:\n self.DB = db\n self.CONNECTION = conn",
"async def connection():\n return await r.connect(db='main_db')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts short URL to an ID
|
def shortURLToId(self, shortURL):
id = 0
for i in shortURL:
val_i = ord(i)
if(val_i >= ord('a') and val_i <= ord('z')):
id = id*62 + val_i - ord('a')
elif(val_i >= ord('A') and val_i <= ord('Z')):
id = id*62 + val_i - ord('Z') + 26
else:
id = id*62 + val_i - ord('0') + 52
return id
|
[
"def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))",
"def get_id_shortlink(link = None):\n choppedLink = legacy_check(link)\n id = None\n try:\n id = choppedLink[3] # or -1 instead of 3\n except:\n pass #dont care bout issues here\n return id",
"def encode(shorturl_id: int) -> str:\n short_resource = []\n while shorturl_id > 0:\n character_index = shorturl_id % BASE\n short_resource.append(CHARACTER_SPACE[character_index])\n shorturl_id //= BASE\n return \"\".join(short_resource[::-1])",
"def extract_id(cls, link):\n\t\treturn link.split('/')[-1]",
"def get_id(url):\n\n return re.search(GET_ID_REGEX_URL, url)[0]",
"def long_to_short(self, url, url_mobile=None, url_tablet=None):\n\n temp_short = uuid4() #temporary short code so we can get lastworid after insert\n query = 'INSERT into urls(short,default_url,mobile_url,tablet_url) VALUES (\"{short}\",\"{url}\",\"{mobile}\",\"{tablet}\");'.\\\n format(short=temp_short, url=url,\n mobile=url_mobile, tablet=url_tablet)\n with sq.connect(self.DB) as conn:\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n url_id = cursor.lastrowid + 1\n based_id = base36.encode(url_id)\n #Update to the definitive short url\n update_query = 'UPDATE urls SET short = \"{new_short}\" WHERE short = \"{temp_uuid}\";'.\\\n format(new_short=based_id, temp_uuid=temp_short)\n cursor.execute(update_query)\n return based_id\n except sq.OperationalError:\n print(\"ERROR\")\n return False\n except ValueError:\n return False",
"def decode(self, shortUrl):\n val = shortUrl.replace(\"http://tinyurl.com/\",\"\")\n return self.url_dict[int(val)]",
"def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id",
"def __create_short_url(self):\n last_short_url = Url.__load_last_short_url()\n short_url = self.__increment_string(last_short_url)\n Url.__save_last_short_url(short_url)\n return short_url",
"def shorten_url():\n check_authentication_with_token()\n\n try:\n valid_url(request.json)\n except MultipleInvalid as e:\n abort(400, e.msg)\n except BadRequest:\n abort(400, \"The request does not contain a body.\")\n\n long_url = request.json.get('url')\n site_url = current_app.config['SITE_URL']\n\n if request.json.get('vanity_string') and not g.current_user.is_anonymous:\n vanity_string = request.json.get('vanity_string')\n short_url = UrlSaver.generate_and_save_urls(long_url, g.current_user,\n vanity_string)\n return jsonify({'id': short_url.id,\n 'short_url': site_url + short_url.url}), 201\n\n user = g.current_user\n\n short_url = UrlSaver.generate_and_save_urls(long_url, user)\n return jsonify({'id': short_url.id,\n 'short_url': site_url + short_url.url}), 201",
"def get_shorten_link(token, url):\n bitlink_site_url = \"https://api-ssl.bitly.com/v4/bitlinks\"\n headers = {f\"Authorization\": \"Bearer {token}\"}\n payload = {\"long_url\": url}\n response = requests.post(bitlink_site_url, json=payload, headers=headers)\n response.raise_for_status()\n return response.json()['id']",
"def short_id(self):\n if self.short_id_missing:\n return \"0\" * settings.ID_LENGTH\n return str(self.id)[0:settings.ID_LENGTH]",
"def get_row_id_for_short_url(url):\n try:\n return short_url.decode_url(url)\n except:\n return -1",
"def shortened_id(self):\n return str(self.id)[-8:]",
"def get_item_id(url):\n ret = \"\"\n try:\n link = create_original_link(url)\n ret = link.split(\"/\")[-1]\n if ret.strip() == \"\":\n ret = link.split(\"/\")[-2]\n except Exception as e:\n print(\"Failed to get id: \" + format(e))\n return ret",
"def id_from_url(url: str) -> str:\n parts = RedditBase._url_parts(url)\n try:\n comment_index = parts.index(\"comments\")\n except ValueError:\n raise InvalidURL(url) from None\n\n if len(parts) - 4 != comment_index:\n raise InvalidURL(url)\n return parts[-1]",
"def encode(self, longUrl):\n shortUrl = \"http://tinyurl.com/\" + str(hash(longUrl))\n self.decode_map[shortUrl] = longUrl\n return shortUrl",
"def decode(self, shortUrl):\n shortUrl = shortUrl[-6:]\n if shortUrl in self.short_to_long:\n return self.short_to_long[shortUrl]",
"def bill_id_from_url(url):\n match = re.search('(h[a-z]+)(\\d+)([a-z]+)',url)\n bill_id = match.group()\n return bill_id"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
probably the wrost way to parse this captcha
|
def get_captcha_reply(captcha):
def get_char_at(pos, captcha):
char_chars = [line[pos-1:pos] for line in captcha.split(b'\n')]
key = ''.join([ str(s, 'ascii') for s in char_chars])
if key == ' | ':
return get_char_at(pos+2, captcha)
if key == ' | .\\ ':
return get_char_at(pos+2, captcha)
return chars[key]
pos = 1
a, size = get_char_at(pos, captcha)
pos += size
pwn.log.info("a=%d" % a)
op, size = get_char_at(pos, captcha)
pos += size
pwn.log.info('op=%s' % op)
b, size = get_char_at(pos, captcha)
pos += size
pwn.log.info('b=%d' % b)
if op == '-':
return a - b
if op == '*':
return a * b
if op == '/':
return a / b
if op == '+':
return a + b
pwn.log.error("Ops not found (%s)" % op)
|
[
"def baixa_captcha(self):\n url = \"https://www.receita.fazenda.gov.br/PessoaJuridica/CNPJ/cnpjreva/captcha/gerarCaptcha.asp\"\n pagina = self.sessao.get(url)\n open('teste.png','wb').write(pagina)\n imagem_data = (ndimage.imread('teste.png'))\n# plt.imshow(imagem_data)\n# plt.show()\n\n # Site da receita exige tempo de espera\n time.sleep(1)\n\n imagem_data = imagem_data.reshape(1,50,180,4)\n predicao = quebra_captcha(imagem_data).flatten()\n predicao = ''.join([ classes[x] for x in predicao ]).lower()\n return(predicao)",
"def get_captcha(self):\n res = self._limited_call(self._requests.get,\n constants.FA_ROOT + \"/captcha.jpg\")\n data = res.content\n return data",
"def handle_captcha(self):\n self.webdriver.save_screenshot('./out/captcha.png')\n sleep(20)\n\n with open('./out/captcha', 'r') as f:\n try:\n self.webdriver.find_element_by_xpath(\"//input[@aria-label='Type the text you hear or see']\").send_keys(f.read())\n except:\n log.error('Captcha input failed. Possibly incorrect captcha?')\n raise\n\n self.webdriver.find_element_by_xpath('//*[@id=\"identifierNext\"]').click()\n sleep(4)\n\n self.webdriver.find_element_by_css_selector(\"input[type=password]\").send_keys(self.bot.getPassword())",
"def parse_captcha_string(captcha_string: str):\n try:\n if captcha_string.find('?') != -1:\n captcha_string = captcha_string[:captcha_string.find('?')]\n list_digits = captcha_string.split('+')\n if list_digits[1] == '':\n return None\n if int(list_digits[1]) > 25:\n list_digits[1] = list_digits[1][0]\n\n except (ValueError, IndexError) as error:\n print('Cant recognize captcha')\n print(error)\n else:\n return int(list_digits[0]) + int(list_digits[1])",
"def process_captcha(self, req, resp):\n self.logger.debug(\n \"RPM over Anti-Automation threshold %s\",\n self.cfg.MAX_RPM\n )\n # test the aa cookie if they provided it\n if 'aa' in req.cookies and self.web_util.test_captcha(req.cookies['aa']):\n self.logger.debug('Captcha completed successfully')\n #reset their counter\n now = int(time.time())\n self.session_tracker[req.remote_addr] = {\n 'c':1,\n 't':now\n }\n resp.unset_cookie('aa')\n #if they provided and failed set new one and throw error\n elif 'aa' in req.cookies:\n self.set_captcha_required(resp)\n raise falcon.HTTPError(\n falcon.HTTP_401, #Forbidden\n 'Error',\n \"Captcha Rejected\"\n )\n else:\n self.set_captcha_required(resp)\n raise falcon.HTTPError(\n falcon.HTTP_401, #Forbidden\n 'Error',\n \"Captcha Required\"\n )",
"def extractCaptcha(x, y, nameInfix=None, debug=False):\n\n\tif nameInfix == None:\n\t\tcaptchaName = \"./captcha/captcha_\" + str(datetime.datetime.now().isoformat()) + \".png\"\n\telse:\n\t\tcaptchaName = \"./captcha/captcha_\" + str(nameInfix) + \".png\"\n\n\treturn extractScreenPart(x-50, y+5, 170, 60, name=captchaName, debug=debug)",
"def generate_captcha(captcha_text):\n\timage = ImageCaptcha()\n\tcaptcha = image.generate(captcha_text)\n\tcaptcha_image = Image.open(captcha)\n\tcaptcha_array = np.array(captcha_image)\n\treturn captcha_array",
"def captcha(self):\n notification.send_sms(message=message)\n notification.send_emails(emails=email, message=message)\n sleep(25)\n\n ### this code snippet is for reference only, not to be used ###\n # sleep(3)\n # captcha = self.driver.find_element_by_xpath('/html/body/div/iframe[0]')\n # self.driver.switch_to.frame(captcha)\n # captcha_loc = captcha.location\n # print(captcha_loc)\n # captcha_x = captcha_loc[\"x\"]\n # captcha_y = captcha_loc[\"y\"]\n # self.actions.tap_and_hold(captcha_x, captcha_y)\n # sleep(5)\n # self.actions.release(captcha_x, captcha_y)\n # self.search_input()",
"def bypass_captcha(self, rps):\n viewstate_pattern = r\"id=\\\"__VIEWSTATE\\\".*\\\"(.*)\\\"\"\n viewstategenerator_pattern = r\"id=\\\"__VIEWSTATEGENERATOR\\\".*\\\"(.*)\\\"\"\n CAPTCHA_PATTERN = r\"id=\\\"ctl00_ContentPlaceHolder1_ctl00_lblCapcha\\\".*?>(.*?)<\\/span>\"\n viewstate = re.search(viewstate_pattern, rps)\n if viewstate:\n viewstate = viewstate.group(1)\n else:\n print(\"VIEWSTATE value not found!\")\n viewstategenerator = re.search(viewstategenerator_pattern, rps)\n if viewstategenerator:\n viewstategenerator = viewstategenerator.group(1)\n captcha = re.search(CAPTCHA_PATTERN, rps)\n if captcha:\n captcha_text = captcha.group(1)\n print(\"[*] CAPTCHA -> [{}]\".format(captcha_text))\n payload = {\n 'ctl00$ContentPlaceHolder1$ctl00$txtCaptcha':captcha_text,\n '__VIEWSTATE':viewstate,\n '__VIEWSTATEGENERATOR':viewstategenerator,\n '__EVENTARGUMENT':'',\n '__EVENTTARGET':'',\n 'ctl00$ContentPlaceHolder1$ctl00$btnXacNhan': 'Vào website'\n }\n rps = self.session.post(url = home_url, headers = BROWSER_HEADERS, data=payload)\n if CAPTCHA_ELEMENT_ID not in rps.text:\n print(\"[*] CAPTCHA BYPASSED\")\n return True\n else:\n print(\"CAPTCHA NOT BYPASSED! PLEASE REPORT TO DEVELOPER BACHVKHOA!\")\n else:\n print(\"[*] CAPTCHA NOT FOUND\")\n return False",
"def get(self):\n imgio, capstr = getcaptcha(4)\n self.session.set(\"captcha\", capstr.upper())\n self.set_header(\"Content-Type\", \"image/png\")\n self.write(imgio.read())",
"def test_display_captcha(self):\r\n res = self.get()\r\n for i in range(6):\r\n res = res.forms[\"loginform\"].submit()\r\n\r\n form = res.forms[\"loginform\"]\r\n\r\n self.assertIn(\"captcha\", form.fields)",
"def test_good_captcha(self):\r\n self.F.UserFactory.create(username=\"test\", password=\"sekrit\")\r\n\r\n session_data = {}\r\n\r\n with patch_session(session_data):\r\n res = self.get()\r\n for i in range(6):\r\n res = res.forms[\"loginform\"].submit()\r\n\r\n form = res.forms[\"loginform\"]\r\n answer = session_data[\"auth_captcha_answer\"]\r\n form[\"captcha\"] = answer\r\n form[\"username\"] = \"test\"\r\n form[\"password\"] = \"sekrit\"\r\n res = form.submit(status=302)\r\n\r\n self.assertRedirects(res, reverse(\"home\"))",
"def clean_captcha(self):\r\n answer = self.cleaned_data.get(\"captcha\")\r\n if answer != self.captcha_answer:\r\n raise forms.ValidationError(\r\n \"Sorry, that's not the answer we were looking for.\")",
"def solve_captcha(self):\n # Switch to the Captcha's iframe\n captcha = CapatchaSolver(self.driver)\n while True:\n self.driver.switch_to.frame(self.driver.find_element_by_tag_name(\"iframe\"))\n captcha.solve_captcha()\n # Check if we passed the captcha part by checking the page title\n wait = WebDriverWait(self.driver, 10)\n try:\n wait.until_not(EC.title_is(consts.BLOCKED))\n break\n except TimeoutException:\n self.driver.refresh()",
"def funcbot():\n msg.askyesno(\"Captcha\", \"Are you human\")",
"def test_bad_captcha(self):\r\n self.F.UserFactory.create(username=\"test\", password=\"sekrit\")\r\n\r\n session_data = {}\r\n\r\n with patch_session(session_data):\r\n res = self.get()\r\n for i in range(6):\r\n res = res.forms[\"loginform\"].submit()\r\n\r\n form = res.forms[\"loginform\"]\r\n answer = session_data[\"auth_captcha_answer\"]\r\n form[\"captcha\"] = answer + 1 # oops, wrong answer!\r\n form[\"username\"] = \"test\"\r\n form[\"password\"] = \"sekrit\"\r\n res = form.submit(status=200)\r\n\r\n res.mustcontain(\"not the answer we were looking for\")",
"def handle_verify_code(self, code):\n r = self.session.get(self.image_url_format.format(code=code))\n\n # FIXME use terminal better\n img_path = os.path.expanduser('~/') + 'pansh.{}.vcode.png'.format(hash(self.username))\n with open(img_path, mode='wb') as fp:\n fp.write(r.content)\n print(\"Saved verification code to {}\".format(os.path.dirname(img_path)))\n vcode = raw_input(\"Please input the captcha:\\n\")\n return vcode",
"def get_captcha(self, file_name, file_bytes, file_type=\"image/jpeg\", codetype=\"1000\", repeat=10):\n cid = self.upload(file_name, file_bytes, file_type, codetype)\n if not cid:\n return None, None\n while repeat > 0:\n code = self.result(cid)\n if code:\n return cid, code\n repeat -= 1\n time.sleep(2)\n return cid, None",
"def getTasseledCap(img):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The Amazon Resource Name (ARN) of the custom platform to use with the environment.
|
def platform_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "platform_arn")
|
[
"def platform_arn(self) -> Optional[str]:\n return pulumi.get(self, \"platform_arn\")",
"def platform_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"platform_arn\")",
"def PLATFORM_NAME(self) -> str:",
"def get_platform_name(cls):\n platform_info = Utility.get_platform_info\n try:\n platform_name = platform_info[\"product\"]\n except:\n print(\"Utility: Failed to obtain platform name!\")\n platform_name = \"Unspecified\"\n return platform_name",
"def platform_key(self):\n return self._lookup_base64_attribute('platform_key')",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def environment_label(self) -> str:\n return self._environment_label",
"def get_platform_name(plat: Optional[Platform] = None) -> str:\n if plat is None:\n plat = get_platform()\n\n if plat == Platform.LINUX:\n return \"Linux\"\n if plat == Platform.WINDOWS:\n return \"Windows\"\n if plat == Platform.MAC:\n return \"MacOS\"\n return \"Unknown\"",
"def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch",
"def platform(self):\n return self.config['production'].get('platform', 'python')",
"def get_platform(self, train):\n try:\n return train.platform\n except AttributeError:\n return ''",
"def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])",
"def app_image_config_arn(self) -> Optional[str]:\n return pulumi.get(self, \"app_image_config_arn\")",
"def application_arn(self) -> Optional[str]:\n return pulumi.get(self, \"application_arn\")",
"def managed_by(self):\n if \"arn:aws:iam::aws:\" in self.arn:\n return \"AWS\" # pragma: no cover\n else:\n return \"Customer\"",
"def get_application_name(): # real signature unknown; restored from __doc__\n return \"\"",
"def fabric_arm_resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"fabric_arm_resource_name\")",
"def architecture_name(self):\n return get_architecture_name(self.architecture)",
"def os(self):\n cluster_template = self.config.get(\"global\", \"cluster_template\", fallback=\"default\")\n return self.config.get(\"cluster {0}\".format(cluster_template), \"base_os\", fallback=\"alinux\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The name of an Elastic Beanstalk solution stack (platform version) to use with the environment.
|
def solution_stack_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "solution_stack_name")
|
[
"def solution_stack_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"solution_stack_name\")",
"def stack_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"stack_name\")",
"def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")",
"def env_name(self):\n pass",
"def stack_name(self):\n stack_name = getattr(self, '__stack_name', None)\n if (\n self.args.stack_name and\n not stack_name\n ):\n stack_name = self.args.stack_name\n elif not stack_name:\n stack_name = \"nephoria-stack-\" + str(int(time.time()))\n\n setattr(self, '__stack_name', stack_name)\n return stack_name",
"def get_stack_name(new=False):\n if new:\n # For back-compatibility\n set_stack_name()\n\n try:\n stack_tag = get_env_tag()\n except AttributeError:\n stack_tag = 'active'\n env.tag = stack_tag\n if not hasattr(env, 'stack_name'):\n # get_config needs a stack_name so this is a hack because we don't\n # know it yet...\n env.stack_name = 'temp'\n zone_name = get_zone_name()\n zone_id = get_zone_id()\n logger.info(\"fab_tasks::get_stack_name: Found master zone '%s' in config...\", zone_name)\n # get record name in the format of: stack.[stack_tag].[app]-[env]\n record_name = get_tag_record_name(stack_tag)\n dns_name = \"{}.{}\".format(record_name, zone_name)\n r53_conn = get_connection(R53)\n try:\n # get stack id\n stack_suffix = r53_conn.get_record(zone_name, zone_id, record_name, 'TXT').replace('\"', \"\")\n logger.info(\"fab_tasks::get_stack_name: Found stack suffix '%s' \"\n \"for dns record '%s'... \", stack_suffix, dns_name)\n legacy_name = get_legacy_name()\n env.stack_name = \"{0}-{1}\".format(legacy_name, stack_suffix)\n logger.info(\"fab_tasks::get_stack_name: Found stack name '%s'...\", env.stack_name)\n except Exception:\n raise DNSRecordNotFoundError(dns_name)\n\n return env.stack_name",
"def set_stack_name():\n # create a stack id\n r53_conn = get_connection(R53)\n zone_name = get_zone_name()\n zone_id = get_zone_id()\n stack_suffix = uuid.uuid4().__str__()[-8:]\n try:\n stack_tag = get_env_tag()\n if stack_tag == 'active':\n # print red(\"'Active' tag is reserved, please change a tag. \")\n raise ActiveTagExistConflictError()\n elif r53_conn.hastag(zone_name, zone_id, get_tag_record_name(stack_tag)):\n # print red(\"{} exists, please change a tag. \".format(env.tag))\n raise TagRecordExistConflictError(stack_tag)\n except AttributeError:\n stack_tag = stack_suffix\n env.tag = stack_tag\n record = \"{}.{}\".format(get_tag_record_name(stack_tag), zone_name)\n logger.info(\"fab_tasks::set_stack_name: \"\n \"Creating stack suffix '%s' \"\n \"for record '%s' \"\n \"in zone id '%s'...\", stack_suffix, record, zone_id)\n # Let DNS update DNSServerError propogate\n try:\n r53_conn.update_dns_record(zone_id, record, 'TXT', '\"{0}\"'.format(stack_suffix))\n env.stack_name = \"{0}-{1}\".format(get_legacy_name(), stack_suffix)\n except Exception:\n raise UpdateDNSRecordError\n print green(\"Stack tag is set to {0}\".format(stack_tag))\n return env.stack_name",
"def name(self):\n return self._env_name",
"def kernel_name():\n return \"python3\"",
"def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])",
"def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)",
"def service_name():\n return \"salt_minion\" if \"bsd\" in sys.platform else \"salt-minion\"",
"def get_name():\n return config.APP_NAME",
"def pipeline_stack_name(self) -> str:\n return self._values.get(\"pipeline_stack_name\")",
"def runtime_stack(self) -> str:\n return pulumi.get(self, \"runtime_stack\")",
"def environment_name(self):\n return self.run_data.get('environment', {}).get('name')",
"def environment_label(self) -> str:\n return self._environment_label",
"def PLATFORM_NAME(self) -> str:",
"def get_egg_name():\n global eggname\n if not eggname:\n version = local('git describe --abbrev=4', capture=True)\n if version:\n version = '%s-%s' % (version, datetime.datetime.today().strftime('%Y%m%d'))\n eggname = APP_NAME + '-%s-py%s.egg' % (version.replace('-', '_'), python_version)\n return eggname"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Specifies the tier to use in creating this environment. The environment tier that you choose determines whether Elastic Beanstalk provisions resources to support a web application that handles HTTP(S) requests or a web application that handles backgroundprocessing tasks.
|
def tier(self) -> Optional[pulumi.Input['EnvironmentTierArgs']]:
return pulumi.get(self, "tier")
|
[
"def tier(self) -> Optional['outputs.EnvironmentTier']:\n return pulumi.get(self, \"tier\")",
"def set_tier(self, tier):\n self.single_selection_from_static_kendo_dropdown(self.tier_kendo_dropdown_locator, tier)",
"def tier(self) -> Optional[pulumi.Input['InstanceTier']]:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> str:\n return pulumi.get(self, \"tier\")",
"def tier(self, tier):\n\n self._tier = tier",
"def tier_type(self, tier_type):\n\n self._tier_type = tier_type",
"def performance_tier(self) -> str:\n return pulumi.get(self, \"performance_tier\")",
"def performance_tier(self) -> Optional[pulumi.Input['VolumeConfigPerformanceTier']]:\n return pulumi.get(self, \"performance_tier\")",
"def access_tier(self) -> Optional[str]:\n return pulumi.get(self, \"access_tier\")",
"def AddTier(parser, is_patch=False):\n help_text = (\n \"Machine type for a shared-core instance e.g. ``db-g1-small''. \"\n 'For all other instances, instead of using tiers, customize '\n 'your instance by specifying its CPU and memory. You can do so '\n 'with the `--cpu` and `--memory` flags. Learn more about how '\n 'CPU and memory affects pricing: '\n 'https://cloud.google.com/sql/pricing.'\n )\n if is_patch:\n help_text += ' WARNING: Instance will be restarted.'\n\n parser.add_argument('--tier', '-t', required=False, help=help_text)",
"def access_tier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_tier\")",
"def configure_tiers(self, datacenter, tier):\n print \"Enabling tier %s...\" % tier\n tiers = datacenter.listTiers()\n\n tiers[0].setName(tier)\n tiers[0].update()\n\n for i in range(1, 4):\n tiers[i].setEnabled(False)\n tiers[i].update()\n\n return tiers[0]",
"async def set_tier( # pylint: disable=inconsistent-return-statements\n self,\n tier: Union[str, _models.AccessTierRequired],\n snapshot: Optional[str] = None,\n version_id: Optional[str] = None,\n timeout: Optional[int] = None,\n rehydrate_priority: Optional[Union[str, _models.RehydratePriority]] = None,\n request_id_parameter: Optional[str] = None,\n lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,\n modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,\n **kwargs: Any\n ) -> None:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n comp: Literal[\"tier\"] = kwargs.pop(\"comp\", _params.pop(\"comp\", \"tier\"))\n cls: ClsType[None] = kwargs.pop(\"cls\", None)\n\n _lease_id = None\n _if_tags = None\n if lease_access_conditions is not None:\n _lease_id = lease_access_conditions.lease_id\n if modified_access_conditions is not None:\n _if_tags = modified_access_conditions.if_tags\n\n request = build_set_tier_request(\n url=self._config.url,\n tier=tier,\n snapshot=snapshot,\n version_id=version_id,\n timeout=timeout,\n rehydrate_priority=rehydrate_priority,\n request_id_parameter=request_id_parameter,\n lease_id=_lease_id,\n if_tags=_if_tags,\n comp=comp,\n version=self._config.version,\n template_url=self.set_tier.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 202]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n if response.status_code == 200:\n response_headers[\"x-ms-client-request-id\"] = self._deserialize(\n \"str\", response.headers.get(\"x-ms-client-request-id\")\n )\n response_headers[\"x-ms-request-id\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-request-id\"))\n response_headers[\"x-ms-version\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-version\"))\n\n if response.status_code == 202:\n response_headers[\"x-ms-client-request-id\"] = self._deserialize(\n \"str\", response.headers.get(\"x-ms-client-request-id\")\n )\n response_headers[\"x-ms-request-id\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-request-id\"))\n response_headers[\"x-ms-version\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-version\"))\n\n if cls:\n return cls(pipeline_response, None, response_headers)",
"def performance_tier(self) -> Optional[pulumi.Input['VolumePerformanceTier']]:\n return pulumi.get(self, \"performance_tier\")",
"def tier_type_name(self, tier_type_name):\n\n self._tier_type_name = tier_type_name",
"def tier_number(self, tier_number):\n\n self._tier_number = tier_number",
"def target_tier(self) -> Optional[str]:\n return pulumi.get(self, \"target_tier\")",
"def post(self, tier):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n try:\n tier = tier.as_dict()\n LOG.debug(\"storage tier post dict= %s\" % tier)\n\n new_tier = _create(self, tier)\n except exception.SysinvException as e:\n LOG.exception(e)\n raise wsme.exc.ClientSideError(_(\"Invalid data: failed to create \"\n \"a storage tier object\"))\n\n return StorageTier.convert_with_links(new_tier)",
"def sku_tier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku_tier\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get an existing Environment resource's state with the given name, id, and optional extra properties used to qualify the lookup.
|
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EnvironmentArgs.__new__(EnvironmentArgs)
__props__.__dict__["application_name"] = None
__props__.__dict__["cname_prefix"] = None
__props__.__dict__["description"] = None
__props__.__dict__["endpoint_url"] = None
__props__.__dict__["environment_name"] = None
__props__.__dict__["operations_role"] = None
__props__.__dict__["option_settings"] = None
__props__.__dict__["platform_arn"] = None
__props__.__dict__["solution_stack_name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["template_name"] = None
__props__.__dict__["tier"] = None
__props__.__dict__["version_label"] = None
return Environment(resource_name, opts=opts, __props__=__props__)
|
[
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EnvironmentArgs.__new__(EnvironmentArgs)\n\n __props__.__dict__[\"arm_template_display_name\"] = None\n __props__.__dict__[\"created_by_user\"] = None\n __props__.__dict__[\"deployment_properties\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"resource_group_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"unique_identifier\"] = None\n return Environment(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def _get_env(cls, name: str) -> ApiEnvironment:\n envs = {e.name: e for e in cls._envs} # type: ignore\n if name not in envs:\n raise KeyError(f\"Invalid environment '{name}'. Choose from {list(envs.keys())}.\")\n return envs[name]",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Stack':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = StackArgs.__new__(StackArgs)\n\n __props__.__dict__[\"agent_version\"] = None\n __props__.__dict__[\"attributes\"] = None\n __props__.__dict__[\"chef_configuration\"] = None\n __props__.__dict__[\"clone_app_ids\"] = None\n __props__.__dict__[\"clone_permissions\"] = None\n __props__.__dict__[\"configuration_manager\"] = None\n __props__.__dict__[\"custom_cookbooks_source\"] = None\n __props__.__dict__[\"custom_json\"] = None\n __props__.__dict__[\"default_availability_zone\"] = None\n __props__.__dict__[\"default_instance_profile_arn\"] = None\n __props__.__dict__[\"default_os\"] = None\n __props__.__dict__[\"default_root_device_type\"] = None\n __props__.__dict__[\"default_ssh_key_name\"] = None\n __props__.__dict__[\"default_subnet_id\"] = None\n __props__.__dict__[\"ecs_cluster_arn\"] = None\n __props__.__dict__[\"elastic_ips\"] = None\n __props__.__dict__[\"hostname_theme\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"rds_db_instances\"] = None\n __props__.__dict__[\"service_role_arn\"] = None\n __props__.__dict__[\"source_stack_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"use_custom_cookbooks\"] = None\n __props__.__dict__[\"use_opsworks_security_groups\"] = None\n __props__.__dict__[\"vpc_id\"] = None\n return Stack(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['ConfigurationStoreEncryptionArgs']]] = None,\n endpoint: Optional[pulumi.Input[str]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ConfigurationStoreIdentityArgs']]] = None,\n local_auth_enabled: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_read_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationStorePrimaryReadKeyArgs']]]]] = None,\n primary_write_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationStorePrimaryWriteKeyArgs']]]]] = None,\n public_network_access: Optional[pulumi.Input[str]] = None,\n purge_protection_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n secondary_read_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationStoreSecondaryReadKeyArgs']]]]] = None,\n secondary_write_keys: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConfigurationStoreSecondaryWriteKeyArgs']]]]] = None,\n sku: Optional[pulumi.Input[str]] = None,\n soft_delete_retention_days: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'ConfigurationStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ConfigurationStoreState.__new__(_ConfigurationStoreState)\n\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"endpoint\"] = endpoint\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"local_auth_enabled\"] = local_auth_enabled\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_read_keys\"] = primary_read_keys\n __props__.__dict__[\"primary_write_keys\"] = primary_write_keys\n __props__.__dict__[\"public_network_access\"] = public_network_access\n __props__.__dict__[\"purge_protection_enabled\"] = purge_protection_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"secondary_read_keys\"] = secondary_read_keys\n __props__.__dict__[\"secondary_write_keys\"] = secondary_write_keys\n __props__.__dict__[\"sku\"] = sku\n __props__.__dict__[\"soft_delete_retention_days\"] = soft_delete_retention_days\n __props__.__dict__[\"tags\"] = tags\n return ConfigurationStore(resource_name, opts=opts, __props__=__props__)",
"def get_state(api, entity_id):\n try:\n req = api(METH_GET, URL_API_STATES_ENTITY.format(entity_id))\n\n # req.status_code == 422 if entity does not exist\n\n return ha.State.from_dict(req.json()) \\\n if req.status_code == 200 else None\n\n except (HomeAssistantError, ValueError):\n # ValueError if req.json() can't parse the json\n _LOGGER.exception(\"Error fetching state\")\n\n return None",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Layer':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = LayerArgs.__new__(LayerArgs)\n\n __props__.__dict__[\"attributes\"] = None\n __props__.__dict__[\"auto_assign_elastic_ips\"] = None\n __props__.__dict__[\"auto_assign_public_ips\"] = None\n __props__.__dict__[\"custom_instance_profile_arn\"] = None\n __props__.__dict__[\"custom_json\"] = None\n __props__.__dict__[\"custom_recipes\"] = None\n __props__.__dict__[\"custom_security_group_ids\"] = None\n __props__.__dict__[\"enable_auto_healing\"] = None\n __props__.__dict__[\"install_updates_on_boot\"] = None\n __props__.__dict__[\"lifecycle_event_configuration\"] = None\n __props__.__dict__[\"load_based_auto_scaling\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"packages\"] = None\n __props__.__dict__[\"shortname\"] = None\n __props__.__dict__[\"stack_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"use_ebs_optimized_instances\"] = None\n __props__.__dict__[\"volume_configurations\"] = None\n return Layer(resource_name, opts=opts, __props__=__props__)",
"def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job",
"def get_environment(env_name: str) -> Environment:\n _check_active_client()\n envs = _merlin_client.list_environment() # type: ignore\n for env in envs:\n if env.name == env_name:\n return env\n return None # type: ignore",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n asset_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ZoneAssetStatusArgs']]]]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n state: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n update_time: Optional[pulumi.Input[str]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"asset_statuses\"] = asset_statuses\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_spec\"] = discovery_spec\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"lake\"] = lake\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"resource_spec\"] = resource_spec\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"update_time\"] = update_time\n return Zone(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n deploy_access_levels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProjectProtectedEnvironmentDeployAccessLevelArgs']]]]] = None,\n environment: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n required_approval_count: Optional[pulumi.Input[int]] = None) -> 'ProjectProtectedEnvironment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ProjectProtectedEnvironmentState.__new__(_ProjectProtectedEnvironmentState)\n\n __props__.__dict__[\"deploy_access_levels\"] = deploy_access_levels\n __props__.__dict__[\"environment\"] = environment\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"required_approval_count\"] = required_approval_count\n return ProjectProtectedEnvironment(resource_name, opts=opts, __props__=__props__)",
"def _get_service_env(self, attrs):\n se_params = {\n 'environment__name': attrs.get('environment'),\n }\n if attrs.get('service_id'):\n se_params['service_id'] = attrs['service_id']\n elif attrs.get('service_uid'):\n se_params['service__ci_uid'] = attrs['service_uid']\n else:\n se_params['service__name'] = attrs['service']\n try:\n se = ServiceEnvironment.objects.get(**se_params)\n except ServiceEnvironment.DoesNotExist:\n params = \", \".join(\n [\"{}={}\".format(k, v) for k, v in se_params.items()]\n )\n raise ServiceEnvironmentDoesNotExistError(\n 'query params: {}'.format(params)\n )\n except ServiceEnvironment.MultipleObjectsReturned:\n params = \", \".join(\n [\"{}={}\".format(k, v) for k, v in se_params.items()]\n )\n raise MultipleServiceEnvironmentsReturned(\n 'query params: {}'.format(params)\n )\n return se",
"def find_state(self, name):\n return self.state_index.get(name, None)",
"def _query_app_state_by_id(self, app_id: str) -> str:\n state = self.last_known_state\n try:\n response = self.resource_mgr.cluster_application_state(application_id=app_id)\n except Exception as e:\n self.log.warning(f\"Query for application '{app_id}' state failed with exception: '{e}'. \"\n f\"Continuing with last known state = '{state}'...\")\n else:\n state = response.data['state']\n self.last_known_state = state\n\n return state"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The Amazon Resource Name (ARN) of the custom platform to use with the environment.
|
def platform_arn(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "platform_arn")
|
[
"def platform_arn(self) -> Optional[str]:\n return pulumi.get(self, \"platform_arn\")",
"def platform_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"platform_arn\")",
"def PLATFORM_NAME(self) -> str:",
"def get_platform_name(cls):\n platform_info = Utility.get_platform_info\n try:\n platform_name = platform_info[\"product\"]\n except:\n print(\"Utility: Failed to obtain platform name!\")\n platform_name = \"Unspecified\"\n return platform_name",
"def platform_key(self):\n return self._lookup_base64_attribute('platform_key')",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def environment_label(self) -> str:\n return self._environment_label",
"def get_platform_name(plat: Optional[Platform] = None) -> str:\n if plat is None:\n plat = get_platform()\n\n if plat == Platform.LINUX:\n return \"Linux\"\n if plat == Platform.WINDOWS:\n return \"Windows\"\n if plat == Platform.MAC:\n return \"MacOS\"\n return \"Unknown\"",
"def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch",
"def platform(self):\n return self.config['production'].get('platform', 'python')",
"def get_platform(self, train):\n try:\n return train.platform\n except AttributeError:\n return ''",
"def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])",
"def app_image_config_arn(self) -> Optional[str]:\n return pulumi.get(self, \"app_image_config_arn\")",
"def application_arn(self) -> Optional[str]:\n return pulumi.get(self, \"application_arn\")",
"def managed_by(self):\n if \"arn:aws:iam::aws:\" in self.arn:\n return \"AWS\" # pragma: no cover\n else:\n return \"Customer\"",
"def get_application_name(): # real signature unknown; restored from __doc__\n return \"\"",
"def fabric_arm_resource_name(self) -> Optional[str]:\n return pulumi.get(self, \"fabric_arm_resource_name\")",
"def architecture_name(self):\n return get_architecture_name(self.architecture)",
"def os(self):\n cluster_template = self.config.get(\"global\", \"cluster_template\", fallback=\"default\")\n return self.config.get(\"cluster {0}\".format(cluster_template), \"base_os\", fallback=\"alinux\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The name of an Elastic Beanstalk solution stack (platform version) to use with the environment.
|
def solution_stack_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "solution_stack_name")
|
[
"def solution_stack_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"solution_stack_name\")",
"def stack_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"stack_name\")",
"def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")",
"def env_name(self):\n pass",
"def stack_name(self):\n stack_name = getattr(self, '__stack_name', None)\n if (\n self.args.stack_name and\n not stack_name\n ):\n stack_name = self.args.stack_name\n elif not stack_name:\n stack_name = \"nephoria-stack-\" + str(int(time.time()))\n\n setattr(self, '__stack_name', stack_name)\n return stack_name",
"def get_stack_name(new=False):\n if new:\n # For back-compatibility\n set_stack_name()\n\n try:\n stack_tag = get_env_tag()\n except AttributeError:\n stack_tag = 'active'\n env.tag = stack_tag\n if not hasattr(env, 'stack_name'):\n # get_config needs a stack_name so this is a hack because we don't\n # know it yet...\n env.stack_name = 'temp'\n zone_name = get_zone_name()\n zone_id = get_zone_id()\n logger.info(\"fab_tasks::get_stack_name: Found master zone '%s' in config...\", zone_name)\n # get record name in the format of: stack.[stack_tag].[app]-[env]\n record_name = get_tag_record_name(stack_tag)\n dns_name = \"{}.{}\".format(record_name, zone_name)\n r53_conn = get_connection(R53)\n try:\n # get stack id\n stack_suffix = r53_conn.get_record(zone_name, zone_id, record_name, 'TXT').replace('\"', \"\")\n logger.info(\"fab_tasks::get_stack_name: Found stack suffix '%s' \"\n \"for dns record '%s'... \", stack_suffix, dns_name)\n legacy_name = get_legacy_name()\n env.stack_name = \"{0}-{1}\".format(legacy_name, stack_suffix)\n logger.info(\"fab_tasks::get_stack_name: Found stack name '%s'...\", env.stack_name)\n except Exception:\n raise DNSRecordNotFoundError(dns_name)\n\n return env.stack_name",
"def set_stack_name():\n # create a stack id\n r53_conn = get_connection(R53)\n zone_name = get_zone_name()\n zone_id = get_zone_id()\n stack_suffix = uuid.uuid4().__str__()[-8:]\n try:\n stack_tag = get_env_tag()\n if stack_tag == 'active':\n # print red(\"'Active' tag is reserved, please change a tag. \")\n raise ActiveTagExistConflictError()\n elif r53_conn.hastag(zone_name, zone_id, get_tag_record_name(stack_tag)):\n # print red(\"{} exists, please change a tag. \".format(env.tag))\n raise TagRecordExistConflictError(stack_tag)\n except AttributeError:\n stack_tag = stack_suffix\n env.tag = stack_tag\n record = \"{}.{}\".format(get_tag_record_name(stack_tag), zone_name)\n logger.info(\"fab_tasks::set_stack_name: \"\n \"Creating stack suffix '%s' \"\n \"for record '%s' \"\n \"in zone id '%s'...\", stack_suffix, record, zone_id)\n # Let DNS update DNSServerError propogate\n try:\n r53_conn.update_dns_record(zone_id, record, 'TXT', '\"{0}\"'.format(stack_suffix))\n env.stack_name = \"{0}-{1}\".format(get_legacy_name(), stack_suffix)\n except Exception:\n raise UpdateDNSRecordError\n print green(\"Stack tag is set to {0}\".format(stack_tag))\n return env.stack_name",
"def name(self):\n return self._env_name",
"def kernel_name():\n return \"python3\"",
"def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])",
"def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)",
"def service_name():\n return \"salt_minion\" if \"bsd\" in sys.platform else \"salt-minion\"",
"def get_name():\n return config.APP_NAME",
"def pipeline_stack_name(self) -> str:\n return self._values.get(\"pipeline_stack_name\")",
"def runtime_stack(self) -> str:\n return pulumi.get(self, \"runtime_stack\")",
"def environment_name(self):\n return self.run_data.get('environment', {}).get('name')",
"def environment_label(self) -> str:\n return self._environment_label",
"def PLATFORM_NAME(self) -> str:",
"def get_egg_name():\n global eggname\n if not eggname:\n version = local('git describe --abbrev=4', capture=True)\n if version:\n version = '%s-%s' % (version, datetime.datetime.today().strftime('%Y%m%d'))\n eggname = APP_NAME + '-%s-py%s.egg' % (version.replace('-', '_'), python_version)\n return eggname"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get global ip address
|
def get_global_ip():
network_info_providers = [
'http://api.ipify.org/',
'http://myip.dnsomatic.com',
'http://inet-ip.info/ip',
'http://v4.ident.me/',
]
random.shuffle(network_info_providers)
for url in network_info_providers:
try:
return requests.get(url).text.lstrip().rstrip()
except Exception:
continue
else:
log.info('cannot find global ip')
return ""
|
[
"def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()",
"def get_ip():\n return '219.45.143.143'",
"def get_local_host_ip(self) -> str:",
"def _get_local_ip():\n return netutils.get_my_ipv4()",
"def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())",
"def get_IP():\n\n return socket.gethostbyname(socket.gethostname())",
"def getLocalIP():\r\n\t\r\n\treturn socket.gethostbyname(socket.gethostname()) #Just a slight hack\r",
"def get_ip():\n rel_ip = None\n main_ip = unit_private_ip() if (\n not config.get('host') or (config.get('host') == \"none\")\n ) else config.get('host')\n if not main_ip or (main_ip == '0.0.0.0'):\n rel_ip = unit_private_ip()\n if not rel_ip:\n rel_ip = main_ip\n return main_ip, rel_ip",
"def get_own_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n except:\n # fallback\n ip = '127.0.0.1'\n finally:\n s.close()\n return ip",
"def get_local_ip():\n return socket.gethostbyname(get_computer_name())",
"def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]",
"def getPublicIP():\r\n\t\r\n\treturn request.urlopen('http://ip.42.pl/raw').read().decode()",
"def get_ip_address(self):\n return self.crb['tester IP']",
"def get_ip_address():\n for iface in netifaces.interfaces():\n if iface != 'lo':\n address_list = netifaces.ifaddresses(iface)\n if netifaces.AF_INET in address_list:\n try:\n return address_list[netifaces.AF_INET][0]['addr']\n except Exception:\n log.exception(\"Unable to determine ipaddress\")",
"def get_global_ip_ipv6():\n network_info_providers = [\n 'http://v6.ipv6-test.com/api/myip.php',\n 'http://v6.ident.me/',\n ]\n random.shuffle(network_info_providers)\n for url in network_info_providers:\n try:\n return requests.get(url).text.lstrip().rstrip()\n except Exception:\n continue\n else:\n log.info('cannot find global ipv6 ip')\n return \"\"",
"def get_ip():\n g = geocoder.ip('me')\n return g.latlng",
"def get_own_ip():\n # LINUX AKA RASPBERRY PI\n import os\n ipv4 = os.popen('ip addr show wlan0').read().split(\"inet \")[1].split(\"/\")[0]\n return ipv4\n\n # MAC OS\n # return ((([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")] or [[(s.connect((\"8.8.8.8\", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + [\"no IP found\"])[0])",
"def getPublicIp():\n global PUBLIC_IP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n PUBLIC_IP = s.getsockname()[0]\n except Exception:\n PUBLIC_IP = '127.0.0.1'\n finally:\n s.close()\n return PUBLIC_IP",
"def get_ip():\n\ts = session() \n\tr = s.get('http://icanhazip.com/')\n\treturn r.content.strip()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get global ipv6 address
|
def get_global_ip_ipv6():
network_info_providers = [
'http://v6.ipv6-test.com/api/myip.php',
'http://v6.ident.me/',
]
random.shuffle(network_info_providers)
for url in network_info_providers:
try:
return requests.get(url).text.lstrip().rstrip()
except Exception:
continue
else:
log.info('cannot find global ipv6 ip')
return ""
|
[
"def get_main_ipv6():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)\n s.connect( ('2001:4860:4860::8888', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv6 address: %s\" % e)\n return None",
"def GlobalIpv6Address(self):\n if self.force_auto_sync:\n self.get('GlobalIpv6Address')\n return self._GlobalIpv6Address",
"def ipv6_address(self) -> str:\n return pulumi.get(self, \"ipv6_address\")",
"def _get_ipv6_gateway_address(self):\n return self.__ipv6_gateway_address",
"def get_local_ipv6_address():\n getIPV6_process = subprocess.Popen(\"ipconfig\", stdout = subprocess.PIPE)\n output = (getIPV6_process.stdout.read())\n ipv6_pattern='(([a-f0-9]{1,4}:){7}[a-f0-9]{1,4})'\n # m = re.search(ipv6_pattern, str(output))\n # 找到所有匹配的ipv6地址\n m = re.findall(ipv6_pattern, str(output))\n if m != []:\n # return m.group()[1]\n # 返回临时IPv6\n print type(m)\n return m[1][0]\n else:\n return None",
"def Ipv6Address(self):\n\t\treturn self._get_attribute('ipv6Address')",
"def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_address\")",
"def ipv6(self):\n return self._ipv6",
"def ip6_addr(self) -> str:\n return self.get_interface_ip6addr(self.iface_dut)",
"def ipv6_vip_address(self):\n return self._ipv6_vip_address",
"def _get_ipv6route(self):\n return self.__ipv6route",
"def DrIpv6Addr(self):\n if self.force_auto_sync:\n self.get('DrIpv6Addr')\n return self._DrIpv6Addr",
"def ipv6_address(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")",
"def ip_v6(self) -> str:\n ipv6 = IPv6Address(\n self.random.randint(\n 0, 2 ** 128 - 1,\n ),\n )\n return str(ipv6)",
"def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")",
"def ipv6_addresses(self):\n return self._resolve(\"ahostsv6\")",
"def LinkLocalIpv6Address(self):\n if self.force_auto_sync:\n self.get('LinkLocalIpv6Address')\n return self._LinkLocalIpv6Address",
"def get_ipv6(host=\"2001:4860:4860::8888\", port=80):\n return _get_ip(host, port, family=socket.AF_INET6)",
"def ipv6_address(self) -> str:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Implementation of binary operator between DataFrames on different indices. A new DataFrame representing an inmemory DolphinDB table is returned. It is garenteed that both DataFrames have no where_expr.
|
def _binary_op_on_different_indices(self, other, func, axis): # TODO: add axis check
def merge_columns(self_columns, other_columns):
"""
Align the input columns, filling the missing columns with None
--------
Examples
--------
>>> merge_columns(
... ["a", "b", "ba", "d", "f"],
... ["e", "c", "d", "g", "ga", "a"]
... )
(('a','a'),('b',None),('ba',None),(None,c),('d','d'),(None,'e'),('f',None),(None,'g'),(None,'ga'))
"""
sorted_self_columns, sorted_other_columns = sorted(self_columns), sorted(other_columns)
self_idx = other_idx = 0
self_len, other_len = len(self_columns), len(other_columns)
while self_idx < self_len and other_idx < other_len:
curr_self_column, curr_other_column = sorted_self_columns[self_idx], sorted_other_columns[other_idx]
if curr_self_column == curr_other_column:
yield curr_self_column, curr_other_column
self_idx += 1
other_idx += 1
elif curr_self_column < curr_other_column:
yield curr_self_column, None
self_idx += 1
else:
yield None, curr_other_column
other_idx += 1
while self_idx < self_len:
yield sorted_self_columns[self_idx], None
self_idx += 1
while other_idx < other_len:
yield None, sorted_other_columns[other_idx]
other_idx += 1
assert isinstance(self, _Frame)
assert isinstance(other, _Frame)
if ((not self._in_memory and len(self._index_columns) == 0)
or (not other._in_memory and len(other._index_columns) == 0)):
raise ValueError("Frame has no default index if it is not in memory")
session = self._session
self_var_name, other_var_name = self._var_name, other._var_name
if other._is_dataframe_like:
self_data_columns = self._data_columns
other_data_columns = other._data_columns
index_list, from_clause = _generate_joiner(
self_var_name, other_var_name, self._index_columns, other._index_columns)
if self_data_columns == other_data_columns:
select_list = (f"{func}({self_var_name}.{c}, {other_var_name}.{c}) as {c}"
for c in self_data_columns)
data_columns = self_data_columns
else:
merged_columns = list(merge_columns(self_data_columns, other_data_columns))
select_list = (f"00f as {s if o is None else o}" if s is None or o is None
else f"{func}({self_var_name}.{s}, {other_var_name}.{s}) as {s}"
for s, o in merged_columns)
data_columns = [s if o is None else o for s, o in merged_columns]
select_list = itertools.chain(index_list, select_list)
script = sql_select(select_list, from_clause)
elif other._is_series_like:
self_data_columns = self._data_columns
other_data_column = other._data_columns[0]
index_list, from_clause = _generate_joiner(
self._var_name, other._var_name, self._index_columns, other._index_columns)
select_list = (f"{func}({self_var_name}.{c}, {other_var_name}.{other_data_column}) as {c}"
for c in self_data_columns)
data_columns = self_data_columns
select_list = itertools.chain(index_list, select_list)
script = sql_select(select_list, from_clause)
return self._get_from_script(
session, script, data_columns=data_columns, index_map=self._index_map, index=self._index)
|
[
"def _compare(query, targets, on_cols, func, suffix):\n on_index = targets.index\n table = pd.DataFrame(index=on_index)\n\n if on_cols is None:\n return table\n\n compared_cols = on_cols.copy()\n if type(compared_cols) == str:\n compared_cols = [compared_cols]\n assert isinstance(compared_cols, list)\n\n for c in compared_cols:\n assert isinstance(c, str)\n colname = c + suffix\n if pd.isnull(query[c]):\n table[colname] = None\n else:\n table[colname] = targets[c].apply(lambda r: func(r, query[c]))\n return table",
"def union_all(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n return bind_rows(x, y, __calling_env=CallingEnvs.REGULAR)",
"def _copy_experimental_conditions_to_second_df(self, df1, df1_cols, df2, df2_cols):\n _cols_ = np.array([df1_cols, df2_cols])\n has_cols = _cols_ != set([])\n exp_cols = _cols_[has_cols]\n if len(exp_cols) == 1: # only one DataFrame has additional columns\n _dfs_ = [df1, df2]\n exp_cols = list(exp_cols[0])\n df_with_cols, df_without_cols = _dfs_[list(has_cols).index(True)], _dfs_[list(has_cols).index(False)]\n exp_cols_only_df = df_with_cols[exp_cols].drop_duplicates()\n num_unique_exp_rows = len(exp_cols_only_df)\n len_df_without_cols = len(df_without_cols)\n\n try:\n expanded_df_without_cols = pd.concat([df_without_cols] * num_unique_exp_rows, ignore_index=True)\n expanded_df_without_cols[exp_cols] = pd.DataFrame(np.repeat(\n exp_cols_only_df.values, len_df_without_cols, axis=0),\n columns=exp_cols)\n return tuple([(expanded_df_without_cols, df_with_cols)[i] for i in _cols_ != set([])]\n + [set(exp_cols), exp_cols_only_df])\n\n except ValueError: # breaks when df_with_out_columns is of len 0.\n return tuple([(pd.DataFrame(columns=list(set(exp_cols)|set(df_without_cols.columns))), df_with_cols)[i]\n for i in _cols_ != set([])] + [set(exp_cols), exp_cols_only_df])\n else:\n return self._combine_experimental_conditions(df1, df1_cols, df2, df2_cols)",
"def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(eq, other)",
"def intersect(\n self,\n df1: DataFrame,\n df2: DataFrame,\n distinct: bool = True,\n metadata: Any = None,\n ) -> DataFrame: # pragma: no cover\n raise NotImplementedError",
"def dataframe_diff(xxa,xxb):\n\n xa=pd.DataFrame(xxa)\n xb=pd.DataFrame(xxb)\n merged = xa.merge(xb, indicator=True, how='outer')\n\n diff=merged[merged['_merge'] != 'both']\n\n return diff",
"def broadcast(self, df: DataFrame) -> DataFrame: # pragma: no cover\n raise NotImplementedError",
"def get_difference(data1, data2):\n\tdifference = (data1 != data2) & ((data1 == data1) & (data2 == data2))\n\t# the second (after or) part makes true cells for cells that are NaN in both dataframes\n\n\td1 = data1[difference.any(axis='columns')]\n\td2 = data2[difference.any(axis='columns')]\n\n\tresult = DataFrame(index=d1.index)\n\tfor col in difference.columns:\n\t\tif difference[col].any():\n\t\t\tresult[f'{col}_1'] = d1[col]\n\t\t\tresult[f'{col}_2'] = d2[col]\n\n\treturn result",
"def join(dataframe_1 ,dataframe_2 ,how ,left_on ,right_on ,select):\n if isinstance(dataframe_1, gftIO.GftTable):\n otv_0 = dataframe_1.as_mutable_column_tab()\n otv_0 = otv_0.dropna()\n else:\n otv_0 = dataframe_1\n left_on_list = get_col_names_by_acronym(otv_0, left_on)\n\n if isinstance(dataframe_2, gftIO.GftTable):\n otv_1 = dataframe_2.as_mutable_column_tab()\n otv_1 = otv_1.dropna()\n else:\n otv_1 = dataframe_2\n\n right_on_list = get_col_names_by_acronym(otv_1, right_on)\n df_merge = pd.merge(otv_0, otv_1, how=how, left_on=left_on_list, right_on=right_on_list)\n select_list = get_col_names_by_acronym(df_merge, select)\n return df_merge.loc[:, select_list]",
"def query(self, *args, **kwargs):\n df = self.to_pandas()\n df2 = df.query(*args, **kwargs)\n return self.from_pandas(df2)",
"def _inherit_binary_operation(self, other, op):\n sdata = self.data\n if isinstance(op, basestring) and hasattr(sdata, op):\n bound_op = getattr(sdata, op)\n else:\n def bound_op(odata):\n return op(sdata, odata)\n\n bset = self.bset\n if isinstance(other, type(self)) or isinstance(self, type(other)):\n obset = other.bset\n if not ((bset == obset) or\n bset.shape == () or\n obset.shape == ()):\n raise ValueError(\"instances of {} must be defined over \"\n \"instances of {} that compare equal for \"\n \"binary operations to be defined\"\n .format(self.__class__.__name__,\n bset.__class__.__name__))\n new_data = bound_op(other.data)\n if bset.shape == ():\n bset = obset\n else:\n new_data = bound_op(other)\n\n return type(self)(new_data, bset)",
"def outer_join(self, table: Union[str, sa.Table], left_where: Union[str, sa.Column, BinaryExpression], right_where: Union[str, sa.Column] = None, alias: str = None) -> B[B, E]:",
"def _(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n indicator = \"__datar_setdiff__\"\n out = pandas.merge(x, y, how=\"left\", indicator=indicator)\n\n from .distinct import distinct\n\n return distinct(\n out[out[indicator] == \"left_only\"]\n .drop(columns=[indicator])\n .reset_index(drop=True),\n __calling_env=CallingEnvs.REGULAR,\n )",
"def dataframe_crossjoin(df1, df2, **kwargs):\n df1['_tmpkey'] = 1\n df2['_tmpkey'] = 1\n\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\n\n df1.drop('_tmpkey', axis=1, inplace=True)\n df2.drop('_tmpkey', axis=1, inplace=True)\n\n return res",
"def test_arithmetic_operations() -> None:\n\n # one two\n # 0 1\n # 2 3\n # 4 5\n df = pd.DataFrame(np.arange(6).reshape((3, 2)), columns=[\"one\", \"two\"])\n\n series = df.iloc[0] # first row == (0, 1)\n\n assert series.index.values.tolist() == [\"one\", \"two\"]\n assert series.values.tolist() == [0, 1]\n\n # Arithmetic operations between frames and series match the index of the\n # series (column names) on the columns of the frame, broadcasting over the\n # rows by default.\n\n df2 = df.sub(series) # axis=1\n\n # one two\n # 0 0\n # 2 2\n # 4 4\n assert df2.values.flatten().tolist() == [0, 0, 2, 2, 4, 4]\n\n # If you want to match on rows, use axis=0. This will match the index of the\n # series (row indices) on the rows of the frame, broadcasting over the\n # columns by default.\n series = df.loc[:, \"one\"]\n\n df2 = df.sub(series, axis=0)\n # one two\n # 0 1\n # 0 1\n # 0 1\n assert df2.values.flatten().tolist() == [0, 1, 0, 1, 0, 1]",
"def test_df_comparison(self):\n self.read_container = self.roundtripContainer()\n df_obt = self.read_container.to_dataframe()\n\n tsa = self.read_nwbfile.get_acquisition('a')\n df_exp = pd.DataFrame({\n 'foo': [1, 2, 3, 4],\n 'bar': ['fish', 'fowl', 'dog', 'cat'],\n 'start_time': [0.2, 0.25, 0.30, 0.35],\n 'stop_time': [0.25, 0.30, 0.40, 0.45],\n 'timeseries': [[(2, 1, tsa)],\n [(3, 1, tsa)],\n [(3, 1, tsa)],\n [(4, 1, tsa)]],\n 'tags': [[''], [''], ['fizz', 'buzz'], ['qaz']]\n },\n index=pd.Index(np.arange(4, dtype=np.int64), name='id')\n )\n # pop the timeseries column out because ts_obt has rows of lists of tuples and ts_exp has rows of lists of lists\n ts_obt = df_obt.pop('timeseries')\n ts_exp = df_exp.pop('timeseries')\n pd.testing.assert_frame_equal(df_exp, df_obt, check_like=True, check_dtype=False)\n\n # check the timeseries columns match\n for ex, obt in zip(ts_exp, ts_obt):\n self.assertEqual(ex[0][0], obt[0][0])\n self.assertEqual(ex[0][1], obt[0][1])\n self.assertContainerEqual(ex[0][2], obt[0][2])",
"def join_where(self, table, one, operator, two, type='inner'):\n return self.join(table, one, operator, two, type, True)",
"def view_or_copy(df, df2):\r\n if df.values.base is df2.values.base:\r\n print('A view')\r\n else:\r\n print('A copy')",
"def test_update_df(self):\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])\n pipeline = PandasQueryPipeline(df)\n pipeline.add_query(lambda df: df + 3, is_output=True)\n new_df = df * -1\n pipeline.update_df(new_df)\n output_df = pipeline.compute_batch()[0]\n df_equals((df * -1) + 3, output_df)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Open a window to compose an email, with the edi invoice dian template message loaded by default
|
def action_invoice_dian_resend(self):
self.ensure_one()
template = self.env.ref('l10n_co_e-invoice.email_template_edi_invoice_dian', False)
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
ctx = dict(
default_model='account.invoice',
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template and template.id or False,
default_composition_mode='comment',
mark_invoice_as_sent=True,
)
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
|
[
"def action_invoice_sent(self):\n self.ensure_one()\n template = self.env.ref('account.email_template_edi_invoice', False)\n compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)\n att = self._create_attachment()\n atts = []\n if template.attachment_ids:\n for a in template.attachment_ids:\n atts.append(a.id)\n atts.append((6, 0, [att.id]))\n template.attachment_ids = atts\n ctx = dict(\n default_model='account.invoice',\n default_res_id=self.id,\n default_use_template=bool(template),\n default_template_id=template.id,\n default_composition_mode='comment',\n mark_invoice_as_sent=True,\n )\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form.id, 'form')],\n 'view_id': compose_form.id,\n 'target': 'new',\n 'context': ctx,\n }",
"def send_payslip(self):\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference('send_email_payslips', 'email_template_hr_payslip')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n\n print 'user', self.employee_id.user_id\n\n user = self.env['res.users'].browse(self.employee_id.user_id.id)\n print 'partner_id', user.partner_id.id\n ctx = dict()\n ctx.update({\n 'default_model': 'hr.payslip',\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n 'default_partner_id': user.partner_id.id,\n })\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }",
"def Open(self):\r\n self._Owner._DoCommand('OPEN VOICEMAIL %s' % self.Id)",
"def open(self, args):\n self.mailClientApp.open(args)",
"def _show_popup(self) -> None:\n\n top = tk.Toplevel()\n email_list_len = len(self.get_recipients())\n msg = tk.messagebox.askquestion('Confirm send emails', 'Are you sure you want to email {} client{}?'\n .format(email_list_len, \"s\" if email_list_len > 1 else \"\"),\n icon='warning')\n if msg == \"yes\":\n self._disable_buttons()\n email_process(self.get_recipients())\n top.destroy()\n else:\n top.destroy()",
"def open_client(introducing, msg):\n subject = urllib.quote(\"Introduction from %s\" % settings.name)\n body = urllib.quote(msg)\n s = \"mailto:?subject=%s&body=%s\" % (subject, body)\n if \"linux\" in sys.platform:\n proc_args = [\"xdg-open\", s]\n elif \"darwin\" in sys.platform:\n proc_args = [\"open\", s]\n # TODO: os.startfile works in Windows?\n p = subprocess.Popen(proc_args)",
"def open_email(self):\n self.driver.execute_script(\"window.scrollTo(0, 700)\")\n self.click_on_element_by_css(tep.OPEN_EMAIL_BUTTON)",
"def open_invoice(self):\n\t\tcontext = dict(self._context or {})\n\t\twiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'invoice_form')[1]\n\t\treturn {\n\t\t\t'view_type': 'form',\n\t\t\t'view_id': wiz_form_id,\n\t\t\t'view_mode': 'form',\n\t\t\t'res_model': 'account.invoice',\n\t\t\t'res_id':self.invc_id.id,\n\t\t\t'type': 'ir.actions.act_window',\n\t\t\t'target': 'current',\n\t\t\t'context': context,\n\t\t\t}",
"def openemail(event):\n import webbrowser\n webbrowser.open(emailurl)\n close(event)",
"def createSendMailFrame(self, empireDict):\n self.destroyTempFrames()\n self.sendMailInfo = anwp.gui.sendmailinfo.SendMailInfoFrame(self, self.game.app, empireDict)\n self.tempFrames.append(self.sendMailInfo)",
"def email(self):\r\n webbrowser.open(\"mailto: gorm90@gmail.com\")",
"def mailManagement():\n #mail_app = \n return render_template(\n 'email.html',\n title='mailManagement',\n year=datetime.now().year,\n message='Your contact page.',\n current_time = datetime.utcnow()\n )",
"def display_mail(self, event):\r\n indexes = self.msgsubs.curselection()\r\n if len(indexes) != 1:\r\n return\r\n self.message.delete(1.0, \r\n END)\r\n pk, msg = self.msglist[int(indexes[0])]\r\n for header_name in \"Subject\", \"Date\", \"From\":\r\n hdr = msg[header_name]\r\n if hdr:\r\n self.message.insert(INSERT, \r\n \"{0}: {1}\\n\".format(header_name,\r\n hdr))\r\n self.message.insert(END, \r\n \"\\n\")\r\n if msg.is_multipart():\r\n self.message.insert(END, \r\n \"MULTIPART MESSAGE - SORRY!\")\r\n self.message.insert(END, \r\n msg.get_payload())",
"def demo_launch():\n import acm\n shell = acm.UX().SessionManager().Shell()\n dialog = TextObjectEditDialog(\"pb_funds\", editable=True)\n builder = dialog.create_layout()\n acm.UX().Dialogs().ShowCustomDialogModal(shell, builder, dialog)",
"def send_mail(self):\n context2 = self.env.context.copy()\n if self.model and self.id_active and self.env.context.get('send_mail_wkf_signal'):\n obj = self.env[self.model].browse(self.id_active)\n obj.signal_workflow(self.env.context['send_mail_wkf_signal'])\n context2['thread_model'] = self.model\n if self.model and self.id_active and self.env.context.get('send_mail_method_next'):\n obj = self.env[self.model].browse(self.id_active)\n getattr(obj, self.env.context['send_mail_method_next'])()\n \n return super(mail_compose_message, self.with_context(context2)).send_mail()",
"def send_mail_action(self):\n auto_commit=False \n for wizard in self:\n # Duplicate attachments linked to the email.template.\n # Indeed, basic mail.compose.message wizard duplicates attachments in mass\n # mailing mode. But in 'single post' mode, attachments of an email template\n # also have to be duplicated to avoid changing their ownership.\n if wizard.attachment_ids and wizard.composition_mode != 'mass_mail' and wizard.template_id:\n new_attachment_ids = []\n for attachment in wizard.attachment_ids:\n if attachment in wizard.template_id.attachment_ids:\n new_attachment_ids.append(attachment.copy({'res_model': 'mail.compose.message', 'res_id': wizard.id}).id)\n else:\n new_attachment_ids.append(attachment.id)\n wizard.write({'attachment_ids': [(6, 0, new_attachment_ids)]})\n\n # Mass Mailing\n mass_mode = wizard.composition_mode in ('mass_mail', 'mass_post')\n\n Mail = self.env['mail.mail']\n ActiveModel = self.env[wizard.model if wizard.model else 'mail.thread']\n if wizard.template_id:\n # template user_signature is added when generating body_html\n # mass mailing: use template auto_delete value -> note, for emails mass mailing only\n Mail = Mail.with_context(mail_notify_user_signature=False)\n ActiveModel = ActiveModel.with_context(mail_notify_user_signature=False, mail_auto_delete=wizard.template_id.auto_delete)\n if not hasattr(ActiveModel, 'message_post'):\n ActiveModel = self.env['mail.thread'].with_context(thread_model=wizard.model)\n if wizard.composition_mode == 'mass_post':\n # do not send emails directly but use the queue instead\n # add context key to avoid subscribing the author\n ActiveModel = ActiveModel.with_context(mail_notify_force_send=False, mail_create_nosubscribe=True)\n # wizard works in batch mode: [res_id] or active_ids or active_domain\n if mass_mode and wizard.use_active_domain and wizard.model:\n res_ids = self.env[wizard.model].search(safe_eval(wizard.active_domain)).ids\n elif mass_mode and wizard.model and self._context.get('active_ids'):\n res_ids = self._context['active_ids']\n else:\n res_ids = [wizard.res_id]\n\n batch_size = int(self.env['ir.config_parameter'].sudo().get_param('mail.batch_size')) or self._batch_size\n sliced_res_ids = [res_ids[i:i + batch_size] for i in range(0, len(res_ids), batch_size)]\n\n if wizard.composition_mode == 'mass_mail' or wizard.is_log or (wizard.composition_mode == 'mass_post' and not wizard.notify): # log a note: subtype is False\n subtype_id = False\n elif wizard.subtype_id:\n subtype_id = wizard.subtype_id.id\n else:\n subtype_id = self.sudo().env.ref('mail.mt_comment', raise_if_not_found=False).id\n\n for res_ids in sliced_res_ids:\n batch_mails = Mail\n all_mail_values = wizard.get_mail_values(res_ids)\n for res_id, mail_values in all_mail_values.items():\n if wizard.composition_mode == 'mass_mail':\n batch_mails |= Mail.create(mail_values)\n else:\n ActiveModel.browse(res_id).message_post(\n message_type=wizard.message_type,\n subtype_id=subtype_id,\n **mail_values)\n sale_order_lines = self.env['sale.order.line'].search([('id', '=', res_id)])\n for sale_order_line in sale_order_lines:\n sale_order_line.write({'is_email_sent': True})\n\n if wizard.composition_mode == 'mass_mail':\n batch_mails.send(auto_commit=auto_commit)\n\n return {'type': 'ir.actions.act_window_close'}",
"def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()",
"def po_modal(request):\n\treturn render(request, 'po_modal.html', {})",
"def send_receipt(self):\n email_server = self.env['ir.mail_server']\n email_sender = email_server.sudo().search([])\n ir_model_data = self.env['ir.model.data']\n\n if self.partner_id.is_student == True:\n template_id = ir_model_data.get_object_reference('edsys_edu_fee', 'email_template_send_receipt')[1]\n template_rec = self.env['mail.template'].sudo().browse(template_id)\n email = self.partner_id.parents1_id.parents_email\n elif self.partner_id.is_parent == True:\n template_id = ir_model_data.get_object_reference('edsys_edu_fee', 'email_template_send_receipt_parent')[1]\n template_rec = self.env['mail.template'].sudo().browse(template_id)\n email = self.partner_id.parents_email\n\n else:\n template_id = ir_model_data.get_object_reference('edsys_edu_fee', 'email_template_send_receipt_parent')[1]\n template_rec = self.env['mail.template'].sudo().browse(template_id)\n email = self.partner_id.email\n\n\n template_rec.write({'email_to': email , 'email_from': email_sender.smtp_user, 'email_cc': ''})\n template_rec.send_mail(self.id, force_send=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get preditors base on their distance The predictors are selected as following [1,2], [1,3], [1,4], [2,3], [2,4], [2,5], [2,6]
|
def getpredictors_distance( staname, distance):
distfromsta = distance[staname]
try:
del distfromsta[staname] # remove the station to be fill from the dataframe
except:
pass
distfromsta = distfromsta.sort_values()
stations = distfromsta.index
sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1
sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2
selection= [None] * (len(sel1) + len(sel2))
selection[::2] = sel1
selection[1::2] = sel2
return selection[:4]
|
[
"def __getpredictors_distance(self, staname, distance):\n\n distfromsta = distance[staname]\n del distfromsta[staname] # remove the station to be fill from the dataframe\n distfromsta = distfromsta.sort_values()\n\n stations = self.network.getsta(distfromsta.index.values)\n # station = self.network.getsta(staname)\n\n # Only 3 closest stations\n # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2\n\n # Use all stations\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3\n # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4\n\n # Only 3 closest stations\n # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1\n\n # using all stations\n sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1\n\n # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1\n # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1\n\n selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]\n selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]\n\n return selection, selectionnames",
"def multi_pred(model,query,neg_list,anchor_pos_list,anchor_neg_list):\n predictions = model.predict([len(neg_list)*[query],neg_list,anchor_pos_list,anchor_neg_list])\n #mean_dist = predictions[:,0].mean()\n mean_rel = (predictions[:,0]>predictions[:,3]).mean()\n #print('Dist',mean_dist,'Rel',mean_rel)\n return mean_rel#np.min([mean_dist,mean_rel])",
"def pred_for_user(self,u):\r\n ids=np.where(self.Y_data_n[:,0]==u)[0]\r\n items_rated_by_u=Y_data_n[ids,1].tolist()\r\n pred_ratings=[]\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_u:\r\n pred_ratings.append(self.pred(u,i))\r\n return pred_ratings",
"def get_predictors(path, name=None):\n\n preds = []\n for pred in predictors:\n P = get_results(path, pred, name)\n if P.data is not None and len(P.data)>0:\n preds.append(P)\n return preds",
"def find_most_diverse_preds(true_masks, pred_masks) -> pd.DataFrame:\n\n\n\n pass",
"def predict(self, features):\n distances = np.ones((features.shape[0], self.n_clusters))\n for j in range(self.n_clusters):\n for l in range(features.shape[0]):\n distances[l, j] = distance.euclidean(features[l, :], self.means[j, :])\n predictions = np.argmin(distances, axis=1)\n return predictions",
"def predict(self, query: np.ndarray):\n assert query.shape == self._training_set[1, :-1].shape, \"Size of the query does not match the size of the\" \\\n \" training set, Which is: \"\\\n + str(self._training_set[1, :-1].shape)\n tmp = (self._training_set[:, :-1] - query).astype(float)\n distances = np.linalg.norm(tmp, axis=1)\n\n index = np.argsort(distances)\n sorted_set = self._training_set[index, :]\n\n (unique, counts) = np.unique(sorted_set[:self._k, -1], return_counts=True)\n\n return unique[counts == np.max(counts)][0]",
"def distance_rank(active, pred, decoys, dist_func=jaccard):\n\n pred_dist = dist_func(active, pred)\n rank = 0\n for lig in decoys:\n d = dist_func(active, lig)\n if d < pred_dist:\n rank += 1\n return 1- (rank / (len(decoys) + 1))",
"def predict_only(self):",
"def get_predictors(self):\n\t\treturn self.predictors",
"def closest_preds_by_ind(self, pred, number=50):\n # Get the parameters for the predicate\n vec = self.pred_wei[pred]\n if not vec.any(): # if all entries are zero\n return None\n \n # Find the distance to other predicates\n dot_prod = dot(self.pred_wei, vec)\n dist = dot_prod / norm(self.pred_wei, axis=1)\n dist = nan_to_num(dist)\n # The closest pred will have the second largest dot product\n # (Largest is the pred itself)\n return dist.argpartition(tuple(range(-1-number,0)))[-1-number:-1]",
"def get_predicted_places(results):\n\n ranks = rankdata([-x.prior_mu for x in results], method='min')\n for result, rank in zip(results, ranks):\n result.predicted_place = int(rank) # convert from numpy dtype",
"def predict(self, X):\n dists = euclidean_distances (self.X_mean, X)\n preds = np.array (dists).argmin (axis = 0) + 1\n return preds",
"def test_review_data():\n print(\"learning done\")\n data, labels = [], []\n for rating, category in enumerate(partition):\n length = len(category) // 2\n data += category[length:]\n # reviews in each category have their star rating as the label\n labels += [rating + 1] * length\n print(\"beginning to test model\")\n abs_dist = []\n for datum, label in zip(data, labels):\n abs_dist.append(abs(label - classifier(datum)))\n return abs_dist",
"def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T",
"def nn_classifier(test_X, train_X, train_y):\n#We create an array for you to populate with your class predictions\n#Go through each sample in test_X and predict its class\n#based on the label of its nearest neighbor in train_X.\n#Insert this prediction in 'predictions'\n#(Use Euclidean Distance as your distance metric here)\n predictions = np.zeros(test_X.shape[0])\n\n for i, row in enumerate(test_X):\n hold = distances(row, train_X)\n predict = train_y[np.argmin(hold)]\n predictions[i] = predict\n return predictions",
"def cal_distance(self, poses):\n\n if len(poses) > 0:\n pose_dis = []\n for pose in poses:\n feature_dis = []\n keypoints = pose.keypoints\n keypoints = self.keypoint_filter.update(keypoints)\n for f_name_1, f_name_2, dis in FEATURE_DISTANCES:\n keypoint_1 = keypoints[f_name_1]\n keypoint_2 = keypoints[f_name_2]\n if keypoint_1.score > THRESHOLD and keypoint_2.score > THRESHOLD:\n # quality sufficient -> use keypoints\n pix_distance = np.linalg.norm(\n keypoint_1.yx - keypoint_2.yx, ord=1)\n distance = dis * FOCAL_LENGTH / pix_distance\n # filtering odd values\n # distance = self.outlier_rejection.update(distance, f_name_1)\n # only append distance, if it doesn't differenciate too much from the same keypoint, last frame\n # if distance != -1:\n feature_dis.append(distance)\n if len(feature_dis) > 0:\n # avg over feature distances\n distance = sum(feature_dis) / len(feature_dis)\n pose_dis.append(distance)\n if len(pose_dis) > 0:\n # max of pose distances -> wost case\n distance = max(pose_dis)\n return distance\n return None",
"def get_prediction():\n \n ################################################\n # put your original prediction code here\n ################################################\n \n pred_proba = np.round(np.random.rand(), 4)\n pred_class = random.choice([\"cat\", \"dog\", \"monkey\"])\n return pred_class, pred_proba",
"def predict(self, xs):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DESCRIPTION Check every variable of every stations and try to fill them with the variables of the two nearest station for every time. INPUT
|
def fillstation(self, stanames, all=None, plot=None, summary=None, From=None, To=None, by=None,
how='mean', variables=None, distance=None, sort_cor=True, constant=True, cor_lim=None):
if all == True:
stations = self.network.getsta([], all=True).values()
else:
stations = self.network.getsta(stanames)
for station in stations:
staname = station.getpara('stanames')
if variables == None:
newdataframe = station.getData(reindex=True, From=From, To=To, by=by,
how=how) # Dataframe which stock the new data of the stations
newdataframe['U m/s'] = station.getData('U m/s', reindex=True, From=From, To=To, by=by, how=how)
newdataframe['V m/s'] = station.getData('V m/s', reindex=True, From=From, To=To, by=by, how=how)
newdataframe['Ua g/kg'] = station.getData('Ua g/kg', reindex=True, From=From, To=To, by=by, how=how)
newdataframe['Theta C'] = station.getData('Theta C', reindex=True, From=From, To=To, by=by, how=how)
variables_name = newdataframe.columns
else:
newdataframe = station.getData(var=variables, reindex=True, From=From, To=To, by=by,
how=how) # Dataframe which stock the new data of the stations
variables_name = variables
# select and sort nearest stations
selections, selectionsnames = self.__getpredictors_distance(staname, distance)
for var in variables_name:
print("I" * 30)
print("variable -> " + var)
try:
selections, params = self.__sort_predictors_by_corr(station, selections, var, From, To, by, how,
constant=constant,
selectionsnames=selectionsnames,
sort_cor=sort_cor, cor_lim=cor_lim)
selections_iter = iter(selections)
params_iter = iter(params)
# print newdataframe
idxmissing = newdataframe[var][
newdataframe[var].isnull() == True].index # slect where their is missing data
while len(idxmissing) > 0:
print("Their is [" + str(len(idxmissing)) + "] events missing")
try: # Try if their is still other stations to fill with
selection = selections_iter.next()
param = params_iter.next()
except StopIteration:
print("NO MORE SELECTED STATIONS")
break
try:
Y = station.getData(var, From=From, To=To, by=by, how=how) # variable to be filled
X1 = selection[0].getData(var, From=From, To=To, by=by,
how=how) # stations variable used to fill
X2 = selection[1].getData(var, From=From, To=To, by=by,
how=how) # stations variable used to fill
select = pd.concat([X1, X2], keys=['X1', 'X2'], axis=1, join='inner').dropna()
if constant:
newdata = param[0] + param[1] * select['X1'] + param[2] * select[
'X2'] # reconstruct the data
else:
newdata = param[0] * select['X1'] + param[1] * select['X2'] # reconstruct the data
newdataframe.loc[idxmissing, var] = newdata.loc[idxmissing, var]
idxmissing = newdataframe[var][
newdataframe[var].isnull() == True].index # slect where their is missing data
except KeyError:
print("&" * 60)
print('Selected stations did not fill any events')
except ValueError:
print('The variable ' + var + "Does not exist or no data to do the multilinear regression ")
if plot == True:
df = pd.concat([Y, X1, X2, newdata, newdataframe[var]],
keys=['Y', 'X1', 'X2', 'estimated data', 'Estimated replaced'], axis=1,
join='outer')
self.plotcomparison(df)
print("Their is [" + str(len(idxmissing)) + "] FINALLY events missing")
# Recalculate the wind direction and speed from the U an V components
try:
speed, dir = cart2pol(newdataframe['U m/s'], newdataframe['V m/s'])
newdataframe['Dm G'] = dir
newdataframe['Sm m/s'] = speed
except ValueError:
print
'No wind found in the dataframe'
except KeyError:
print('No wind found in the dataframe')
self.newdataframes[staname] = newdataframe
|
[
"def multilaterate(stations:list):\n assert len(stations) >= 3, 'I need >= 3 stations!'\n stations = np.array(stations)\n stations = stations[stations[:,1].argsort()]\n # We use the fixing with the shortest distance as initial guess\n x0 = stations[1,0].coordinates()\n # Simple OLS error function\n def error(x):\n current_pos = Pos(x[0], x[1])\n error = 0\n for pos, dist in stations:\n error += (haversine(current_pos, pos) - dist)**2\n return error**0.5\n # Minimize squared errors\n position = minimize(error, x0, method='L-BFGS-B', \n options={'ftol':1e-5, 'maxiter': 1e+6})\n # Return the position object\n return Pos(position.x[0], position.x[1], desc=f'Error = {position.fun}')",
"def fillstation(self, stanames, all=None, plot = None, summary = None, From=None, To=None):\n if all == True:\n stations = self.network.getsta([], all=True).values()\n else:\n stations = self.network.getsta(stanames)\n\n for station in stations:\n newdataframe = station.getData(reindex = True, From=From, To=To) # Dataframe which stock the new data of the stations\n print \"x\"*80\n print newdataframe.index\n print \"x\"*80\n newdataframe['U m/s'] = station.getvar('U m/s')\n newdataframe['V m/s'] = station.getvar('V m/s')\n newdataframe['Ua g/kg'] = station.getvar('Ua g/kg')\n newdataframe['Theta C'] = station.getvar('Theta C')\n\n staname = station.getpara('stanames')\n selections = self.__getpredictors(staname)\n variables = newdataframe.columns\n for i,selection in enumerate(selections):\n print \"=\"*20\n print str(i), ' on ', str(len(selections)), ' completed'\n print \"=\"*20\n for var in variables:\n Y = station.getvar(var) # variable to be filled\n X1 = selection[0].getvar(var) # stations variable used to fill\n X2 = selection[1].getvar(var)# stations variable used to fill\n\n try:\n # get parameters\n data=pd.concat([Y, X1, X2],keys=['Y','X1','X2'],axis=1, join='outer').dropna()\n params = self.MLR(data[['X1','X2']], data['Y'], summary = summary)\n \n # get new fitted data\n select = pd.concat([X1, X2],keys=['X1','X2'],axis=1, join='inner').dropna()\n newdata = params[0] + params[1]*select['X1'] + params[2]*select['X2']\n \n # Place fitted data in original dataframe\n idxmissing = newdataframe[var][newdataframe[var].isnull() == True].index # slect where their is missing data\n newdataframe[var][idxmissing] = newdata[idxmissing] # Fill the missing data with the estimated serie\n except KeyError:\n print('Data not present in all station')\n except ValueError:\n print('The variable '+var+ \"Does not exist to do the multilinear regression \")\n\n speed,dir = cart2pol(newdataframe['U m/s'],newdataframe['V m/s'])\n newdataframe['Dm G'] = dir\n newdataframe['Sm m/s'] = speed\n\n if plot == True:\n df = pd.concat([Y,X1,X2,newdata,newdataframe[var] ], keys=['Y','X1','X2','estimated data','Estimated replaced'],axis=1, join='outer')\n self.plotcomparison(df)\n\n\n self.newdataframes[staname] = newdataframe",
"def test_nearest(self):\n dist = station.nearest(28.43, -81.31)\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"KMCO\")\n for val in dist.values():\n self.assertIsInstance(val, float)\n for *params, count in (\n (30, -82, 10, True, True, 0.2, 1),\n (30, -82, 10, True, False, 0.2, 5),\n (30, -82, 10, False, False, 0.2, 6),\n (30, -82, 1000, True, True, 0.5, 6),\n (30, -82, 1000, False, False, 0.5, 37),\n ):\n stations = station.nearest(*params)\n self.assertEqual(len(stations), count)\n for dist in stations:\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n for val in dist.values():\n self.assertIsInstance(val, float)",
"def calculate_distance_to_stations(self, stations: list):\n for station in stations:\n import pdb; pdb.set_trace()",
"def test_nearest_filter(self):\n for airport, reports, count in (\n (True, True, 6),\n (True, False, 16),\n (False, True, 6),\n (False, False, 30),\n ):\n stations = station.nearest(30, -80, 30, airport, reports, 1.5)\n self.assertEqual(len(stations), count)",
"def test_nearest(self):\n for lat, lon, icao in ((28.43, -81.31, \"KMCO\"), (28.43, -81, \"KTIX\")):\n stn, dist = station.Station.nearest(lat, lon, is_airport=True)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, icao)\n for val in dist.values():\n self.assertIsInstance(val, float)\n # Test with IATA req disabled\n stn, dist = station.Station.nearest(28.43, -81, False, False)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"FA18\")\n for val in dist.values():\n self.assertIsInstance(val, float)",
"def interpolate(self, var, time, lat, lon):\n\n # Get the nearest four points in space\n # Check to see if lat/lons are 2d or 1d\n if len(self['lat'].shape) == 2:\n closey, closex, distances = self.nearest_points(lat, lon, npt=4)\n # Distances in km\n# distances = np.array([self.haversine(\n# (self['lat'][y,x].values, self['lon'][y,x].values),\n# (lat, lon)) for y,x in \n# zip(list(closey), list(closex))])\n else:\n closen = self.nearest_points(lat, lon, npt=4)\n closey = closen\n closex = closen\n # Distances in km\n distances = np.array([self.haversine(\n (self['lat'][n].values, self['lon'][n].values),\n (lat, lon)) for n in list(closen)])\n # Check for exact match (within some tolerance)\n spaceweights = np.zeros(distances.shape)\n if (distances < 1.0).sum() > 0:\n spaceweights[distances.argmin()] = 1\n else:\n # Here, inverse distance weighting (for simplicity)\n spaceweights = 1.0 / distances\n spaceweights /= spaceweights.sum()\n # Get weights in time\n #time64 = np.datetime64(time)\n #all the valid times in the ensemble\n valids = self['validtime'].values\n timeweights = np.zeros(valids.shape)\n # Check if we are outside the valid time range\n if (time < valids[0]) or (time > valids[-1]):\n print(\"Interpolation is outside of time range in state!\")\n return None\n # Find where we are in this list\n #index after the time of the observation\n lastdex = (valids >= time).argmax()\n # If we match a particular time value, then\n # this is just an identity\n if valids[lastdex] == time:\n # Just make a one at this time\n timeweights[lastdex] = 1\n else:\n # Linear interpolation\n #often going to be 6 hours, subtracts datetime objects I think\n diff = (valids[lastdex] - valids[lastdex-1])\n #print(valids[lastdex], valids[lastdex-1], diff)\n #often going to be 21600 seconds\n totsec = diff.seconds\n #totsec = np.abs(diff / np.timedelta64(1, 's'))\n #ST\n #calculate time difference between time after and time of observation\n #the abs will make this positive definite, which is okay since\n #the difference will always be negative\n thisdiff = abs(time - valids[lastdex])\n #thissec = np.abs(thisdiff / np.timedelta64(1,'s'))\n thissec = thisdiff.seconds\n # Put in appropriate weights\n #ST switched the -1 between the two lines to match up with the positive-\n #definite thisdiff\n timeweights[lastdex-1] = float(thissec) / totsec\n timeweights[lastdex] = 1.0 - (float(thissec)/totsec)\n # Now that we have the weights, do the interpolation\n #ST an ntimes x 4 x nens array\n interp = self.variables[var].values[:,closey,closex,:]\n # Do a dot product with the time weights\n # And with the space weights\n if len(interp.shape) == 3:\n interp = (timeweights[:,None,None] * interp).sum(axis=0)\n else:\n interp = (timeweights[:,None,None,None] * interp).sum(axis=0)\n \n if len(interp.shape) == 3:\n #ST Changed 2nd : to None\n interp = (spaceweights[:,None,None] * interp).sum(axis=1)\n else:\n interp = (spaceweights[:,None] * interp).sum(axis=0)\n # Return estimate from all ensemble members\n return interp",
"def guess_stations(flats_list, constraint, config):\n distance_threshold = config[\"max_distance_housing_station\"]\n opendata = {\n \"postal_codes\": data.load_data(PostalCode, constraint, config),\n \"stations\": data.load_data(PublicTransport, constraint, config),\n }\n\n for flat in flats_list:\n flat_station = flat.get(\"station\", None)\n\n if not flat_station:\n # Skip everything if empty station\n LOGGER.info(\"No stations field for flat %s, skipping stations lookup.\", flat[\"id\"])\n continue\n\n # Woob modules can return several stations in a comma-separated list.\n flat_stations = flat_station.split(\",\")\n # But some stations containing a comma exist, so let's add the initial\n # value to the list of stations to check if there was one.\n if len(flat_stations) > 1:\n flat_stations.append(flat_station)\n\n matched_stations = []\n for tentative_station in flat_stations:\n matched_stations += fuzzy_match(\n tentative_station,\n [x.name for x in opendata[\"stations\"]],\n limit=10,\n threshold=50,\n )\n\n # Keep only one occurrence of each station\n matched_stations = list(set(matched_stations))\n\n # Filter out the stations that are obviously too far and not well\n # guessed\n good_matched_stations = []\n postal_code = flat[\"flatisfy\"].get(\"postal_code\", None)\n if postal_code:\n # If there is a postal code, check that the matched station is\n # closed to it\n postal_code_gps = next((x.lat, x.lng) for x in opendata[\"postal_codes\"] if x.postal_code == postal_code)\n for station in matched_stations:\n # Note that multiple stations with the same name exist in a\n # city, hence the list of stations objects for a given matching\n # station name.\n stations_objects = [x for x in opendata[\"stations\"] if x.name == station[0]]\n for station_data in stations_objects:\n distance = tools.distance((station_data.lat, station_data.lng), postal_code_gps)\n if distance < distance_threshold:\n # If at least one of the coordinates for a given\n # station is close enough, that's ok and we can add\n # the station\n good_matched_stations.append(\n {\n \"key\": station[0],\n \"name\": station_data.name,\n \"confidence\": station[1],\n \"gps\": (station_data.lat, station_data.lng),\n }\n )\n break\n LOGGER.info(\n (\"Station %s is too far from flat %s (%dm > %dm), discarding this station.\"),\n station[0],\n flat[\"id\"],\n int(distance),\n int(distance_threshold),\n )\n else:\n LOGGER.info(\"No postal code for flat %s, skipping stations detection.\", flat[\"id\"])\n\n if not good_matched_stations:\n # No stations found, log it and cotninue with next housing\n LOGGER.info(\n \"No stations found for flat %s, matching %s.\",\n flat[\"id\"],\n flat[\"station\"],\n )\n continue\n\n LOGGER.info(\n \"Found stations for flat %s: %s (matching %s).\",\n flat[\"id\"],\n \", \".join(x[\"name\"] for x in good_matched_stations),\n flat[\"station\"],\n )\n\n # If some stations were already filled in and the result is different,\n # display some warning to the user\n if \"matched_stations\" in flat[\"flatisfy\"] and (\n # Do a set comparison, as ordering is not important\n set([station[\"name\"] for station in flat[\"flatisfy\"][\"matched_stations\"]])\n != set([station[\"name\"] for station in good_matched_stations])\n ):\n LOGGER.warning(\n \"Replacing previously fetched stations for flat %s. Found \"\n \"stations differ from the previously found ones.\",\n flat[\"id\"],\n )\n\n flat[\"flatisfy\"][\"matched_stations\"] = good_matched_stations\n\n return flats_list",
"def test_find_nearest_storms(self):\n\n this_wind_to_storm_table = linkage._find_nearest_storms(\n storm_object_table=STORM_OBJECT_TABLE,\n event_table=WIND_TABLE,\n max_time_before_storm_start_sec=MAX_EXTRAPOLATION_TIME_SEC,\n max_time_after_storm_end_sec=MAX_EXTRAPOLATION_TIME_SEC,\n max_link_distance_metres=MAX_LINK_DISTANCE_METRES,\n interp_time_interval_sec=INTERP_TIME_INTERVAL_SEC,\n event_type_string=linkage.WIND_EVENT_STRING)\n\n self.assertTrue(\n this_wind_to_storm_table[\n linkage.NEAREST_SECONDARY_ID_COLUMN].values.tolist() ==\n WIND_TO_STORM_TABLE[\n linkage.NEAREST_SECONDARY_ID_COLUMN].values.tolist()\n )\n\n self.assertTrue(numpy.array_equal(\n this_wind_to_storm_table[linkage.NEAREST_TIME_COLUMN].values,\n WIND_TO_STORM_TABLE[linkage.NEAREST_TIME_COLUMN].values\n ))\n\n self.assertTrue(numpy.allclose(\n this_wind_to_storm_table[linkage.LINKAGE_DISTANCE_COLUMN].values,\n WIND_TO_STORM_TABLE[linkage.LINKAGE_DISTANCE_COLUMN].values,\n equal_nan=True, atol=TOLERANCE\n ))",
"def _update_along_flow_latlon(self, ds_u, ds_v, dr_n, dr_s, dr_e, dr_w):\n for var in [\"longitude\", \"latitude\"]:\n tmp_var = xr.full_like(ds_u[var], np.nan)\n tmp_var[dr_n] = ds_u[var].data[dr_n]\n tmp_var[dr_s] = ds_u[var].data[dr_s + 1]\n tmp_var[dr_e] = ds_v[var].data[dr_e]\n tmp_var[dr_w] = ds_v[var].data[dr_w + 1]\n tmp_var.attrs = {\"standard_name\": var.capitalize() + \" at the along-contour velocity grid points\"}\n self.data_along_flow.assign_coords({var: tmp_var[:-1]})",
"def station_test(df_single_test: pd.DataFrame, lat_deg: float, kc: list, year: int, start_date: str,\r\n end_date: str, interval_length: int, station_str: str):\r\n # temperature data\r\n df_single_test_temp = df_single_test.loc[df_single_test[\"Type\"] != \"PRCP\"] # not a scalable solution\r\n print(df_single_test_temp)\r\n\r\n if df_single_test.size > 24: # screen for if sufficient temperature data to calculate et_t\r\n print(f'temp data good for station: {station_str}')\r\n single_station_et_t = calculate_et_t(df_single_test_temp, lat_deg, year, kc, station_str)\r\n if len(single_station_et_t) > 0: # if calculate_et_t successful\r\n et_t_dict = {\"Station_ID\": station_str}\r\n et_t_dict.update( # update to et_t_dict\r\n {a: b for a, b in zip([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], single_station_et_t)})\r\n df_single_station_prcp = df_single_test.loc[df_single_test[\"Type\"] == \"PRCP\"]\r\n water(df_single_station_prcp, start_date, end_date,\r\n interval_length, default_water_demand, default_water_demand,\r\n station_str)\r\n else: # temperature data insufficient\r\n print(f'insufficient temp data for station: {station_str}, going solely off prcp values')\r\n df_single_station_prcp = df_single_test.loc[df_single_test[\"Type\"] == \"PRCP\"]\r\n water(df_single_station_prcp, start_date, end_date,\r\n interval_length, default_water_demand, default_water_demand,\r\n station_str)\r\n\r\n else: # temperature data insufficient\r\n print(f'insufficient temp data for station: {station_str}, going solely off prcp values')\r\n df_single_station_prcp = df_single_test.loc[df_single_test[\"Type\"] == \"PRCP\"]\r\n water(df_single_station_prcp, start_date, end_date,\r\n interval_length, default_water_demand, default_water_demand,\r\n station_str)\r\n print(df_water_use)",
"def compute_station_river_distances():\n repo = Repository()\n\n runs = repo.get_all_runs()\n stations = repo.get_all_stations()\n\n # foreach run, find the close USGS, NOAA, and SNOW station\n for run in runs.iterrows():\n distances = stations.apply(lambda row: get_distance_between_geo_points(\n run[1].put_in_latitude,\n run[1].put_in_longitude,\n row.latitude,\n row.longitude,\n run[1].run_id,\n row.station_id,\n row.source\n ), axis=1).apply(pd.Series)\n\n distances.sort_values('distance', inplace=True)\n\n usgs_ = distances[distances.source == 'USGS'].iloc[0, :]\n noaa_ = distances[distances.source == 'NOAA'].iloc[0, :]\n snow_ = distances[distances.source == 'SNOW'].iloc[0, :]\n\n usgs = StationRiverDistance(\n station_id=usgs_.station,\n run_id=run[1].run_id,\n distance=round(float(usgs_.distance), 2)\n )\n\n noaa = StationRiverDistance(\n station_id=noaa_.station,\n run_id=run[1].run_id,\n distance=round(float(noaa_.distance), 2)\n )\n\n snow = StationRiverDistance(\n station_id=snow_.station,\n run_id=run[1].run_id,\n distance=round(float(snow_.distance), 2)\n )\n\n repo.put_station_river_distances([usgs, noaa, snow])",
"def from_stations(station_file, bry_file, grid, threads=4):\n grid = seapy.model.asgrid(grid)\n with netCDF4.Dataset(station_file) as ncstation:\n src_ref, time = seapy.roms.get_reftime(ncstation)\n\n # Create the boundary file and fill up the descriptive data\n ncbry = seapy.roms.ncgen.create_bry(bry_file,\n eta_rho=grid.eta_rho, xi_rho=grid.xi_rho,\n s_rho=grid.n, reftime=src_ref, clobber=False,\n title=\"generated from \" + station_file)\n grid.to_netcdf(ncbry)\n\n # Load the times: we need to see if the times are duplicated\n # because if using assimilation, they may be duplicated for every\n # outer-loop. Currently, it overwrites the first one each time (this\n # will need to be fixed if ROMS is fixed).\n statime = ncstation.variables[time][:]\n dup = np.where(statime[1:] == statime[0])[0]\n rng = np.s_[:]\n if dup.size > 0:\n rng = np.s_[0:np.min(dup)]\n statime = statime[rng]\n # Map the times between station and boundary files\n brytime = seapy.roms.get_timevar(ncbry)\n ncbry.variables[brytime][:] = seapy.roms.date2num(\n seapy.roms.num2date(ncstation, time, rng), ncbry, brytime)\n\n # Set up the indices\n bry = {\n \"south\": range(0, grid.lm),\n \"north\": range(grid.lm, 2 * grid.lm),\n \"west\": range(2 * grid.lm, 2 * grid.lm + grid.ln),\n \"east\": range(2 * grid.lm + grid.ln, 2 * (grid.lm + grid.ln))\n }\n\n # Gather the information from the station file\n sta_vt = ncstation.variables[\"Vtransform\"][:]\n sta_hc = ncstation.variables[\"hc\"][:]\n sta_s_rho = ncstation.variables[\"s_rho\"][:]\n sta_cs_r = ncstation.variables[\"Cs_r\"][:]\n sta_h = ncstation.variables[\"h\"][:]\n sta_angle = ncstation.variables[\"angle\"][:]\n sta_lon = ncstation.variables[\"lon_rho\"][:]\n sta_lat = ncstation.variables[\"lat_rho\"][:]\n sta_mask = np.ones(sta_lat.shape)\n sta_mask[sta_lon * sta_lat > 1e10] = 0\n\n # Load the station data as we need to manipulate it\n sta_zeta = np.ma.masked_greater(ncstation.variables[\"zeta\"][rng], 100)\n sta_ubar = np.ma.masked_greater(ncstation.variables[\"ubar\"][rng], 100)\n sta_vbar = np.ma.masked_greater(ncstation.variables[\"vbar\"][rng], 100)\n sta_temp = np.ma.masked_greater(ncstation.variables[\"temp\"][rng], 100)\n sta_salt = np.ma.masked_greater(ncstation.variables[\"salt\"][rng], 100)\n sta_u = np.ma.masked_greater(ncstation.variables[\"u\"][rng], 100)\n sta_v = np.ma.masked_greater(ncstation.variables[\"v\"][rng], 100)\n\n # Create the true positions and mask\n grid_h = np.concatenate([grid.h[0, :], grid.h[-1, :],\n grid.h[:, 0], grid.h[:, -1]])\n grid_lon = np.concatenate([grid.lon_rho[0, :], grid.lon_rho[-1, :],\n grid.lon_rho[:, 0], grid.lon_rho[:, -1]])\n grid_lat = np.concatenate([grid.lat_rho[0, :], grid.lat_rho[-1, :],\n grid.lat_rho[:, 0], grid.lat_rho[:, -1]])\n grid_mask = np.concatenate([grid.mask_rho[0, :], grid.mask_rho[-1, :],\n grid.mask_rho[:, 0], grid.mask_rho[:, -1]])\n grid_angle = np.concatenate([grid.angle[0, :], grid.angle[-1, :],\n grid.angle[:, 0], grid.angle[:, -1]])\n\n # Search for bad stations due to child grid overlaying parent mask.\n # Unfortunately, ROMS will give points that are not at the locations\n # you specify if those points conflict with the mask. So, these points\n # are simply replaced with the nearest.\n dist = np.sqrt((sta_lon - grid_lon)**2 + (sta_lat - grid_lat)**2)\n bad_pts = np.where(np.logical_and(dist > 0.001, grid_mask == 1))[0]\n good_pts = np.where(np.logical_and(dist < 0.001, grid_mask == 1))[0]\n for i in bad_pts:\n didx = np.sqrt((sta_lon[i] - sta_lon[good_pts])**2 +\n (sta_lat[i] - sta_lat[good_pts])**2).argmin()\n index = good_pts[didx]\n sta_h[i] = sta_h[index]\n sta_angle[i] = sta_angle[index]\n sta_lon[i] = sta_lon[index]\n sta_lat[i] = sta_lat[index]\n sta_zeta[:, i] = sta_zeta[:, index]\n sta_ubar[:, i] = sta_ubar[:, index]\n sta_vbar[:, i] = sta_vbar[:, index]\n sta_temp[:, i, :] = sta_temp[:, index, :]\n sta_salt[:, i, :] = sta_salt[:, index, :]\n sta_u[:, i, :] = sta_u[:, index, :]\n sta_v[:, i, :] = sta_v[:, index, :]\n\n # Construct the boundaries: a dictionary of boundary side and two element\n # array whether the u[0] or v[1] dimensions need to be averaged\n sides = {\"north\": [True, False], \"south\": [True, False],\n \"east\": [False, True], \"west\": [False, True]}\n delta_angle = sta_angle - grid_angle\n sta_ubar, sta_vbar = seapy.rotate(sta_ubar, sta_vbar, delta_angle)\n sta_u, sta_v = seapy.rotate(sta_u, sta_v, np.tile(delta_angle,\n (sta_u.shape[-1], 1)).T)\n\n # Set up the parameters for depth-interpolated\n wght = 5\n nx = 5\n ny = 11\n\n # Build a non-extrapolating field to interpolate.\n def __expand_field(x):\n shp = x.shape\n y = np.zeros((shp[0] + 2, shp[1] + 2))\n y[1:-1, 1:-1] = x\n y[1:-1, 0] = x[:, 0]\n y[1:-1, -1] = x[:, -1]\n y[0, :] = y[1, :]\n y[-1, :] = y[-2, :]\n return y\n\n # Interpolate a given field\n def __side_interp(data):\n in_data = __expand_field(data)\n res, _ = seapy.oasurf(in_x, in_depth, in_data, out_x, out_depth, pmap, wght, nx, ny)\n return np.ma.masked_equal(res, 0, copy=False)\n\n # Go through each side and variable and interpolate (vertically) from the station\n # output to the new grid boundaries.\n try:\n for side in track(sides, total=4, description=f\"Converting stations...\"):\n # Masks\n sta_ocean = np.where(sta_mask[bry[side]] == 1)[0]\n ocean = np.where(grid_mask[bry[side]] == 1)[0]\n\n # If we have a masked boundary, skip it\n if not np.any(ocean):\n continue\n\n # 1) Zeta\n ncbry.variables[\"zeta_\" + side][:,\n ocean] = sta_zeta[:, bry[side]][:, ocean]\n\n # 2) Ubar\n if sides[side][0]:\n ncbry.variables[\"ubar_\" + side][:] = 0.5 * (\n sta_ubar[:, bry[side][0:-1]] + sta_ubar[:, bry[side][1:]])\n else:\n ncbry.variables[\"ubar_\" + side][:] = sta_ubar[:, bry[side]]\n\n # 3) Vbar\n if sides[side][1]:\n ncbry.variables[\"vbar_\" + side][:] = 0.5 * (\n sta_vbar[:, bry[side][0:-1]] + sta_vbar[:, bry[side][1:]])\n else:\n ncbry.variables[\"vbar_\" + side][:] = sta_vbar[:, bry[side]]\n ncbry.sync()\n\n # For 3D variables, we need to loop through time and interpolate\n # onto the child grid. Construct the positions. We will scale\n # the x-index to be similar to the vertical (we don't want to\n # interpolate in the x)\n x = np.arange(len(bry[side]))\n x[1:] = np.cumsum(seapy.earth_distance(grid_lon[bry[side][0:-1]],\n grid_lat[bry[side][0:-1]],\n grid_lon[bry[side][1:]],\n grid_lat[bry[side][1:]]))\n sta_x = seapy.adddim(x, len(sta_s_rho))\n x = seapy.adddim(x, len(grid.s_rho))\n\n # Create depths\n sta_depth = seapy.roms.depth(sta_vt, sta_h[bry[side]], sta_hc,\n sta_s_rho, sta_cs_r,\n np.mean(sta_zeta[: , bry[side]], axis=0))\n depth = seapy.roms.depth(grid.vtransform, grid_h[bry[side]],\n grid.hc, grid.s_rho, grid.cs_r,\n np.mean(sta_zeta[:, bry[side]], axis=0))\n\n # Create the pmap for the x, depth mapping\n in_x = __expand_field(sta_x[:, sta_ocean])\n in_x[:, 0] = in_x[:, 0] - 3600\n in_x[:, -1] = in_x[:, -1] + 3600\n in_depth = __expand_field(sta_depth[:, sta_ocean])\n in_depth[0, :] = in_depth[0, :] - 1000\n in_depth[-1, :] = in_depth[-1, :] + 10\n out_x = x[:, ocean]\n out_depth = depth[:, ocean]\n _, pmap = seapy.oasurf(in_x, in_depth, in_depth,\n out_x, out_depth, weight=wght,\n nx=nx, ny=ny)\n\n # 4) Temp\n ncbry.variables[\"temp_\" + side][:] = 0.0\n ncbry.variables[\"temp_\" + side][:, :, ocean] = \\\n np.ma.array(Parallel(n_jobs=threads)\n (delayed(__side_interp)(\n np.transpose(sta_temp[n, bry[side], :][sta_ocean, :]))\n for n in range(len(statime))), copy=False)\n\n # 5) Salt\n ncbry.variables[\"salt_\" + side][:] = 0.0\n ncbry.variables[\"salt_\" + side][:, :, ocean] = \\\n np.ma.array(Parallel(n_jobs=threads)\n (delayed(__side_interp)(\n np.transpose(sta_salt[n, bry[side], :][sta_ocean, :]))\n for n in range(len(statime))), copy=False)\n\n # 6) U\n data = np.zeros([len(statime),x.shape[0],x.shape[1]])\n data[:, :, ocean] = np.ma.array(Parallel(n_jobs=threads)\n (delayed(__side_interp)(\n np.transpose(sta_u[n, bry[side], :][sta_ocean, :]))\n for n in range(len(statime))), copy=False)\n if sides[side][0]:\n ncbry.variables[\"u_\" + side][:] = 0.5 * (\n data[:, :, 0:-1] + data[:, :, 1:])\n else:\n ncbry.variables[\"u_\" + side][:] = data\n\n # 7) V\n data = data * 0\n data[:, :, ocean] = np.ma.array(Parallel(n_jobs=threads)\n (delayed(__side_interp)(\n np.transpose(sta_v[n, bry[side], :][sta_ocean, :]))\n for n in range(len(statime))), copy=False)\n\n\n if sides[side][1]:\n ncbry.variables[\"v_\" + side][:] = 0.5 * (\n data[:,:, 0:-1] + data[:, :, 1:])\n else:\n ncbry.variables[\"v_\" + side][:] = data\n except Exception as e:\n raise(e)\n finally:\n ncbry.close()\n pass",
"def find_xings_sat(satName,date_list,file_dict,common_data_list):\n \n # Init CS2 data dictionnary\n data_dict = dict()\n data_param = dict()\n param_list = ['lat','lon','time','delay','dist','weight']\n delta_reftime = (ref_date[satName] - ref_date['CS2']).total_seconds()\n\n print(\"\\nFind Crossings points with %s \\n#---------------\\n\" %(satName))\n\n for ngdr,gdr in enumerate(file_dict.keys()):\n\n print(\"%s:\\n---------\" %(gdr))\n data_dict[gdr] = dict()\n \n data_list = dict()\n\n # For each collocated tracks - dates (every 1.5 days)\n #-------------------------------------------------------\n for n,date in enumerate(date_list):\n \n date_str = date.strftime('%Y%m%d')\n \n # if no data for this date continue\n #if date_str not in file_dict[gdr].keys(): continue\n \n print(\"\\n%s\" %(date.strftime('%d/%m/%Y')))\n\n # ref coordinates for this date\n lat_ref,lon_ref = common_data_list[n]['ref_lat'],common_data_list[n]['ref_lon']\n if any(np.abs(np.diff(lon_ref)) > 20): lon_ref[lon_ref > 180] = lon_ref[lon_ref > 180] - 360 \n time_ref = common_data_list[n]['ref_time']\n ref_size = lon_ref.size\n coord_ref = np.vstack((lat_ref,lon_ref)).T\n x_ref,y_ref,z_ref = cf.lon_lat_to_cartesian(lon_ref, lat_ref)\n coordinates_ref = np.vstack((x_ref,y_ref,z_ref)).T\n tree = scipy.spatial.KDTree(coordinates_ref)\n\n # maximun length list\n max_len = 0\n \n # initiation array of lists\n for p in param_list:\n data_list[p] = np.frompyfunc(list, 0, 1)(np.empty((ref_size,), dtype=object))\n\n nfiles = 0\n for ite,val in file_dict[gdr].items():\n if isinstance(val, str): val=[val]\n nfiles = nfiles + len(val)\n\n # For each file around date\n #-------------------------------------------------------\n nfile = 0\n for datefile,filelist in file_dict[gdr].items():\n\n # for case one file per day\n if isinstance(filelist, str): filelist=[filelist]\n for filename in filelist:\n\n print(\"%i/%i\" %(nfile,nfiles))\n\n # Get dictionnary\n if satName=='SARAL':\n data_desc = saral_dict.init_dict(gdr,flag_1hz)\n elif satName=='S3':\n data_desc = s3_dict.init_dict(gdr,flag_1hz)\n elif satName=='CS2':\n data_desc = cs2_dict.init_dict(gdr,flag_1hz)\n else:\n print(\"No dictionnary for %s\" %(satName))\n\n # initiation array of lists for specific params\n \"\"\"\n if nfile==0:\n for p in data_desc.keys():\n if p not in param_list:\n data_list[p] = np.frompyfunc(list, 0, 1)(np.empty((ref_size,), dtype=object))\n\n \"\"\"\n lat,lon,time,x_dist,valid_idx = cf.get_coord_from_netcdf(filename,data_desc,'01',LAT_MIN)\n\n if any(np.abs(np.diff(lon)) > 20): lon[lon > 180] = lon[lon > 180] - 360 \n lat_sub = lat[::TRACK_REDUCTION_SAR]\n lon_sub = lon[::TRACK_REDUCTION_SAR]\n time_sub = time[::TRACK_REDUCTION_SAR]\n\n coord_is2 = np.vstack((lat_sub,lon_sub)).T\n\n # get intersections\n from intersection import find_intersections\n lat_inter, lon_inter, idx_ref, idx_is2 = find_intersections(coord_ref,coord_is2,1)\n\n # test intersection\n if lat_inter==None:\n print(\"No intersection found for this track\")\n nfile = nfile+1\n continue\n\n\n \n \n # test\n \"\"\"\n plt.plot(lat,lon,'.')\n plt.plot(lat_ref,lon_ref,'*')\n plt.plot(lat_inter, lon_inter,'*')\n plt.plot(lat_sub[idx_ref].flatten(),lon_sub[idx_ref].flatten(),'.')\n plt.plot(new_lat, new_lon,'*')\n plt.show()\n \"\"\"\n \n \n # check if intersection is correct\n dist1 = cf.dist_btw_two_coords(lat_inter,lat_sub[idx_is2],lon_inter,lon_sub[idx_is2])\n dist2 = cf.dist_btw_two_coords(lat_inter,lat_ref[idx_ref],lon_inter,lon_ref[idx_ref])\n if dist2 > MAX_DIST_INTER or dist1 > MAX_DIST_INTER:\n print(\"No or wrong intersection found for this track\")\n nfile = nfile+1\n continue\n\n # get subsection of track\n index = TRACK_REDUCTION_SAR*idx_is2\n first_index = index-int(index/TRACK_REDUCTION_SAR) if index-int(index/TRACK_REDUCTION_SAR)>0 else 0\n last_index = index+int(index/TRACK_REDUCTION_SAR) if index+int(index/TRACK_REDUCTION_SAR)< lat.size else lat.size-1\n idx_sub = np.arange(first_index,last_index)\n lat_sect = lat[idx_sub]\n lon_sect = lon[idx_sub]\n time_sect = time[idx_sub]\n\n # convert to cartesien\n x,y,z = cf.lon_lat_to_cartesian(lon_sect, lat_sect)\n coordinates = np.vstack((x,y,z)).T \n distance,idx_in_ref = tree.query(coordinates,1)\n\n # Selection close points\n selected_idx = np.argwhere(distance < MAX_DIST_OF_COLLOC_DATA) # XXX same as IS2?\n selected_idx = selected_idx.reshape((selected_idx.size,))\n ref_idx = idx_in_ref[selected_idx]\n\n new_lat = lat_sect[selected_idx]\n new_lon = lon_sect[selected_idx]\n new_time = time_sect[selected_idx]\n new_dist = distance[selected_idx]\n weight = np.exp(-(new_dist**2)/(MAX_DIST_OF_COLLOC_DATA**2))\n\n # Add data to array of lists\n for idx in np.unique(ref_idx):\n size = np.argwhere(ref_idx==idx).size\n data_list['lat'][idx].extend(new_lat[np.argwhere(ref_idx==idx).flatten()].tolist())\n data_list['lon'][idx].extend(new_lon[np.argwhere(ref_idx==idx).flatten()].tolist())\n data_list['time'][idx].extend(new_time[np.argwhere(ref_idx==idx).flatten()].tolist())\n delay = time_ref[idx] - new_time[np.argwhere(ref_idx==idx).flatten()].tolist() -delta_reftime\n data_list['delay'][idx].extend(delay)\n data_list['dist'][idx].extend(new_dist[np.argwhere(ref_idx==idx).flatten()].tolist())\n data_list['weight'][idx].extend(weight[np.argwhere(ref_idx==idx).flatten()].tolist())\n\n # Adding new track parameters data to list\n #-------------------------------------------------\n for pname,prodname in data_desc.items():\n\n if pname in ['lat','lon','time','lat01','lon01','time01','hour','minute','second'] + param_list: #add 2D params later\n continue\n\n param,units,param_is_flag = cf.get_param_from_netcdf(filename,data_desc,pname,'01',LAT_MIN)\n param_sub = param[idx_sub]\n for idx in np.unique(ref_idx):\n data_list[pname][idx].extend(param[np.argwhere(ref_idx==idx).flatten()].tolist())\n max_len = max(len(data_list[pname][idx]),max_len)\n \n # next file\n nfile = nfile+1\n\n # converting arrays of lists to matrix\n #---------------------------------------------\n \n # initiating masked arrays\n data_ar = {}\n print(\"\\nMax data at one crossing point: %i\" %(max_len))\n for p in data_list.keys():\n data_ar[p] = ma.masked_array(np.zeros((N_MAX_CROSSPTS_IN_CS2BEAMS,ref_size)),mask=np.ones((N_MAX_CROSSPTS_IN_CS2BEAMS,ref_size)))\n\n # save into masked array\n #--------------------------------\n #data_param_ar = {}\n for p in data_list.keys():\n for ncol in range(ref_size):\n # limit size of array\n if len(data_list[p][ncol])>= N_MAX_CROSSPTS_IN_CS2BEAMS:\n max_idx = N_MAX_CROSSPTS_IN_CS2BEAMS\n else:\n max_idx=len(data_list[p][ncol])\n\n # save in column of masked array\n data_ar[p][:max_idx,ncol] =np.array(data_list[p][ncol][:max_idx]).T\n if max_idx>0: print(data_ar[p][:max_idx,ncol])\n\n # initialize track lists to save per colloc ref track the data\n if n==0:\n for pname in data_list.keys():\n data_dict[gdr][pname] = list()\n\n # saving the data\n for pname in data_list.keys():\n data_ar[pname] = np.ma.masked_invalid(data_ar[pname],copy=True)\n data_dict[gdr][pname].append(data_ar[pname]) \n \n return data_dict",
"def fillMissingSubstations(self, lines):\n # first, do matching for those with only 1 substation missing\n for index, row in lines[lines.SUBSTATION_A_GLOBALID.isnull() ^ lines.SUBSTATION_B_GLOBALID.isnull()].iterrows():\n matched_subs = self.all_substations_and_taps[self.all_substations_and_taps.geometry.apply(lambda x: x.distance(lines.geometry.loc[index])) == 0]\n matched_subs = matched_subs[\"SUBSTATION_GLOBALID\"].to_list()\n\n for sub in matched_subs:\n if sub == row[\"SUBSTATION_A_GLOBALID\"] or sub == row[\"SUBSTATION_B_GLOBALID\"]:\n continue\n elif row[\"SUBSTATION_A_GLOBALID\"] is np.nan:\n lines.loc[index, \"SUBSTATION_A_GLOBALID\"] = sub\n elif row[\"SUBSTATION_B_GLOBALID\"] is np.nan:\n lines.loc[index, \"SUBSTATION_B_GLOBALID\"] = sub\n else: # TODO: might not be necessary\n print(sub)\n print(matched_subs)\n raise(ValueError(\"There are too many matches. Line index: {}\".format(index)))\n\n # second, do matching for those with both substations missing\n for index, row in lines[lines.SUBSTATION_A_GLOBALID.isnull() & lines.SUBSTATION_B_GLOBALID.isnull()].iterrows():\n matched_subs = self.all_substations_and_taps[self.all_substations_and_taps.geometry.apply(lambda x: x.distance(lines.geometry.loc[index])) == 0]\n matched_subs = matched_subs[\"SUBSTATION_GLOBALID\"].to_list()\n\n # insert match information into dataframe\n if len(matched_subs) > 0:\n lines.loc[index, \"SUBSTATION_A_GLOBALID\"] = matched_subs[0]\n\n if len(matched_subs) > 1:\n lines.loc[index, \"SUBSTATION_B_GLOBALID\"] = matched_subs[1]\n\n if len(matched_subs) > 2: # TODO: might not be necessary\n print(sub)\n print(matched_subs)\n raise(ValueError(\"There are too many matches. Line index: {}\".format(index)))\n\n return lines",
"def calc_beta_sta_quadrant(station_objects, wells_objects):\n for sta_obj in station_objects:\n dist_pre_q1 = []\n dist_pre_q2 = []\n dist_pre_q3 = []\n dist_pre_q4 = []\n #\n name_aux_q1 = [] \n name_aux_q2 = []\n name_aux_q3 = []\n name_aux_q4 = []\n wl_q1 = []\n wl_q2 = []\n wl_q3 = []\n wl_q4 = []\n\n for wl in wells_objects:\n # search for nearest well to MT station in quadrant 1 (Q1)\n if (wl.lat_dec > sta_obj.lat_dec and wl.lon_dec > sta_obj.lon_dec): \n # distance between station and well\n dist = dist_two_points([wl.lon_dec, wl.lat_dec], [sta_obj.lon_dec, sta_obj.lat_dec], type_coord = 'decimal')\n if not dist_pre_q1:\n dist_pre_q1 = dist\n # check if distance is longer than the previous wel \n if dist <= dist_pre_q1: \n name_aux_q1 = wl.name\n wl_q1 = wl\n dist_pre_q1 = dist\n # search for nearest well to MT station in quadrant 2 (Q2)\n if (wl.lat_dec < sta_obj.lat_dec and wl.lon_dec > sta_obj.lon_dec): \n # distance between station and well\n dist = dist_two_points([wl.lon_dec, wl.lat_dec], [sta_obj.lon_dec, sta_obj.lat_dec], type_coord = 'decimal')\n if not dist_pre_q2:\n dist_pre_q2 = dist\n # check if distance is longer than the previous wel \n if dist <= dist_pre_q2: \n name_aux_q2 = wl.name\n wl_q2 = wl\n dist_pre_q2 = dist\n # search for nearest well to MT station in quadrant 3 (Q3)\n if (wl.lat_dec < sta_obj.lat_dec and wl.lon_dec < sta_obj.lon_dec): \n # distance between station and well\n dist = dist_two_points([wl.lon_dec, wl.lat_dec], [sta_obj.lon_dec, sta_obj.lat_dec], type_coord = 'decimal')\n if not dist_pre_q3:\n dist_pre_q3 = dist\n # check if distance is longer than the previous wel \n if dist <= dist_pre_q3: \n name_aux_q3 = wl.name\n wl_q3 = wl\n dist_pre_q3 = dist\n # search for nearest well to MT station in quadrant 4 (Q4)\n if (wl.lat_dec > sta_obj.lat_dec and wl.lon_dec < sta_obj.lon_dec): \n # distance between station and well\n dist = dist_two_points([wl.lon_dec, wl.lat_dec], [sta_obj.lon_dec, sta_obj.lat_dec], type_coord = 'decimal')\n if not dist_pre_q4:\n dist_pre_q4 = dist\n # check if distance is longer than the previous wel \n if dist <= dist_pre_q4: \n name_aux_q4 = wl.name\n wl_q4 = wl\n dist_pre_q4 = dist\n\n # save names of nearest wells to be used for prior\n sta_obj.beta_wl_names = [name_aux_q1, name_aux_q2, name_aux_q3, name_aux_q4]\n sta_obj.beta_wl_names = list(filter(None, sta_obj.prior_meb_wl_names))\n near_wls = [wl_q1,wl_q2,wl_q3,wl_q4] #list of objects (wells)\n near_wls = list(filter(None, near_wls))\n dist_wels = [dist_pre_q1,dist_pre_q2,dist_pre_q3,dist_pre_q4]\n dist_wels = list(filter(None, dist_wels))\n sta_obj.beta_wl_dist = dist_wels\n\n # maximum depth for temp. profile based on weigthed average \n depth_near_wls = [wl.red_depth[-1] for wl in near_wls]\n sta_obj.max_depth_temp = np.dot(depth_near_wls,dist_wels)/np.sum(dist_wels)\n\n\n #### betas\n # betas consist of mean and std for parameter, calculate as weighted(distance) average from nearest wells\n b1_mean = np.zeros(len(near_wls))\n b1_std = np.zeros(len(near_wls))\n b2_mean = np.zeros(len(near_wls))\n b2_std = np.zeros(len(near_wls))\n b3_mean = np.zeros(len(near_wls))\n b3_std = np.zeros(len(near_wls))\n count = 0\n # extract betas from nearest wells \n #print(sta_obj.beta_wl_names)\n #print(sta_obj.beta_wl_dist)\n \n for wl in near_wls:\n # extract beta values from wl\n #wl.betas_3l = [[mean_beta1, std_beta1],[mean_beta2, std_beta2],[mean_beta3, std_beta3]]\n b1_mean[count], b1_std[count] = wl.betas_3l[0]\n b2_mean[count], b2_std[count] = wl.betas_3l[1]\n b3_mean[count], b3_std[count] = wl.betas_3l[2]\n count+=1\n # calculete betas normal dist. for MT stations\n b1_mean = np.dot(b1_mean,dist_wels)/np.sum(dist_wels)\n b1_std = np.dot(b1_std,dist_wels)/np.sum(dist_wels)\n b2_mean = np.dot(b2_mean,dist_wels)/np.sum(dist_wels)\n b2_std = np.dot(b2_std,dist_wels)/np.sum(dist_wels)\n b3_mean = np.dot(b3_mean,dist_wels)/np.sum(dist_wels)\n b3_std = np.dot(b3_std,dist_wels)/np.sum(dist_wels)\n\n # assign result to attribute\n sta_obj.betas_3l = [[b1_mean, b1_std],[b2_mean, b2_std],[b3_mean, b3_std]]\n #### Tmin\n # consist of mean and std for parameter, calculate as weighted(distance) average from nearest wells\n t1_mean = np.zeros(len(near_wls))\n t1_std = np.zeros(len(near_wls))\n t2_mean = np.zeros(len(near_wls))\n t2_std = np.zeros(len(near_wls))\n t3_mean = np.zeros(len(near_wls))\n t3_std = np.zeros(len(near_wls))\n count = 0\n # extract meb mcmc results from nearest wells \n for wl in near_wls:\n # extract beta values from wl\n #wl.betas_3l = [[mean_beta1, std_beta1],[mean_beta2, std_beta2],[mean_beta3, std_beta3]]\n t1_mean[count], t1_std[count] = wl.Tmin_3l[0]\n t2_mean[count], t2_std[count] = wl.Tmin_3l[1]\n t3_mean[count], t3_std[count] = wl.Tmin_3l[2]\n count+=1\n # calculete betas normal dist. for MT stations\n t1_mean = np.dot(t1_mean,dist_wels)/np.sum(dist_wels)\n t1_std = np.dot(t1_std,dist_wels)/np.sum(dist_wels)\n t2_mean = np.dot(t2_mean,dist_wels)/np.sum(dist_wels)\n t2_std = np.dot(t2_std,dist_wels)/np.sum(dist_wels)\n t3_mean = np.dot(t3_mean,dist_wels)/np.sum(dist_wels)\n t3_std = np.dot(t3_std,dist_wels)/np.sum(dist_wels)\n\n # assign result to attribute\n sta_obj.Tmin_3l = [[t1_mean, t1_std],[t2_mean, t2_std],[t3_mean, t3_std]]\n\n #### Tmax\n t1_mean = np.zeros(len(near_wls))\n t1_std = np.zeros(len(near_wls))\n t2_mean = np.zeros(len(near_wls))\n t2_std = np.zeros(len(near_wls))\n t3_mean = np.zeros(len(near_wls))\n t3_std = np.zeros(len(near_wls))\n count = 0\n # extract T bound. cond. from nearest wells \n for wl in near_wls:\n # extract beta values from wl\n #wl.betas_3l = [[mean_beta1, std_beta1],[mean_beta2, std_beta2],[mean_beta3, std_beta3]]\n t1_mean[count], t1_std[count] = wl.Tmax_3l[0]\n t2_mean[count], t2_std[count] = wl.Tmax_3l[1]\n # assign std for bottom Temp bound condition as the std of previus boundary (To be checked)\n t3_mean[count], t3_std[count] = wl.Tmax_3l[2][0], wl.Tmax_3l[1][1]\n count+=1\n # calculete betas normal dist. for MT stations\n t1_mean = np.dot(t1_mean,dist_wels)/np.sum(dist_wels)\n t1_std = np.dot(t1_std,dist_wels)/np.sum(dist_wels)\n t2_mean = np.dot(t2_mean,dist_wels)/np.sum(dist_wels)\n t2_std = np.dot(t2_std,dist_wels)/np.sum(dist_wels)\n t3_mean = np.dot(t3_mean,dist_wels)/np.sum(dist_wels)\n t3_std = np.dot(t3_std,dist_wels)/np.sum(dist_wels)\n\n # assign result to attribute\n sta_obj.Tmax_3l = [[t1_mean, t1_std],[t2_mean, t2_std],[t3_mean, t3_std]]",
"def get_neigh_demand(city):\n\n # get station set S with more than 10 charge equipment\n static_file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n static_feature = pd.read_csv(static_file_path, header=0)\n station_set = set(static_feature[static_feature.num >= 10].index)\n\n # calculate 10 nearest neighborhoods for each station, sort by distance and store their index, get a map\n neighbor_distance_map = {}\n matrix_distance = np.load(exp_data_path + os.sep + 'similarity' + os.sep + 'similarity_distance_{}_numpy.npy'.format(city), allow_pickle=True)\n all_distance_map = {i: [] for i in range(station_count[city])}\n for i in range(station_count[city]):\n if i not in station_set:\n continue\n for j in range(station_count[city]):\n if j not in station_set:\n continue\n all_distance_map[i].append((j, matrix_distance[i][j]))\n all_distance_map[i].sort(key=lambda x : x[1], reverse=True)\n neighbor_distance_map[i] = [idx for idx, distance in all_distance_map[i][:10]]\n\n # 11 times header, get static neighborhood feature for each station(in S), get csv: neighbor_feature_{city}.csv\n ALL_HEADER = ['index']\n ALL_HEADER.extend(GENERAL_HEADER)\n for i in range(10):\n for j in GENERAL_HEADER:\n ALL_HEADER.append('{}_{}'.format(j, i))\n\n raw_data = np.empty((len(neighbor_distance_map), len(ALL_HEADER)))\n for i, idx in enumerate(neighbor_distance_map.keys()):\n raw_data[i][0] = idx\n raw_data[i][1:1+len(GENERAL_HEADER)] = static_feature.iloc[idx]['num':'mall']\n for j in range(10):\n neighbor_idx = neighbor_distance_map[idx][j]\n raw_data[i][1+len(GENERAL_HEADER)*(j+1):1+len(GENERAL_HEADER)*(j+2)] = static_feature.iloc[neighbor_idx]['num':'mall']\n neighbor_feature_data = pd.DataFrame(raw_data, columns=ALL_HEADER)\n print('neighbor feature')\n print(neighbor_feature_data)\n\n neighbor_feature_path = exp_data_path + os.sep + 'static' + os.sep + 'static_neighor_feature_{}.csv'.format(city)\n if os.path.exists(neighbor_feature_path):\n os.remove(neighbor_feature_path)\n neighbor_feature_data.to_csv(neighbor_feature_path)\n\n # create final csv(11 times header with basic info(time_index + time_embed_index))\n # if index in S, fill basic info, neighbor_feature and demand\n\n demand = np.load(exp_data_path + os.sep + 'station' + os.sep + 'demand_{}.npy'.format(city), allow_pickle=True)\n time_count = demand.shape[1]\n\n DEMAND_HEADER = []\n DEMAND_HEADER.extend(ALL_HEADER)\n DEMAND_HEADER.extend(['time_index', 'time_embed', 'demand'])\n neighbor_demand_raw_data = np.empty(((len(neighbor_distance_map)*time_count, len(DEMAND_HEADER))))\n\n # get time map like {\"0800\": 1, \"0830\": 2, ....}\n time_index_map = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index_map = dict(time_index_map.tolist())\n time_map = {t: i for i, t in enumerate(sorted(set([k[-4:] for k in time_index_map['rev_index'].keys()])))}\n\n cur_idx = 0\n for time_idx in range(time_count):\n time_embed_idx = time_map[time_index_map['index'][time_idx][-4:]]\n for station_idx in station_set:\n neighbor_demand_raw_data[cur_idx][0:len(ALL_HEADER)] = neighbor_feature_data.loc[neighbor_feature_data['index']==station_idx, 'index':'mall_9']\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)] = time_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+1] = time_embed_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+2] = demand[station_idx][time_idx][-1]\n # todo add slow demand and quick demand here\n cur_idx = cur_idx + 1\n print(cur_idx, neighbor_demand_raw_data.shape)\n\n neighbor_demand_data = pd.DataFrame(neighbor_demand_raw_data, columns=DEMAND_HEADER)\n print('neighbor demand')\n print(neighbor_demand_data)\n\n neighbor_demand_path = exp_data_path + os.sep + 'static' + os.sep + 'neighbor_demand_{}.csv'.format(city)\n if os.path.exists(neighbor_demand_path):\n os.remove(neighbor_demand_path)\n neighbor_demand_data.to_csv(neighbor_demand_path)",
"def Fetch_station(long, lat, y):\r\n global ddf\r\n dmin = 1000000\r\n rs = 0\r\n i=0\r\n for i in range(len(ddf[y])):\r\n #Calculate the distance between zip code location and weather station location\r\n dnew = Distance_orthonormique(ddf[y]['LON'][i], ddf[y]['LAT'][i], long, lat)\r\n\r\n if(dmin > dnew):\r\n #If the last smaller distance is superior than the current distance :\r\n #the new smaller distance is the current distance\r\n dmin = dnew\r\n rs = i\r\n\r\n #rs = index dataframe weather station\r\n #ddf[y]['STATION NAME'][rs] = Weather station name\r\n #round(dmin, 2) = Distance between weather station and zip code\r\n \r\n return rs, ddf[y]['STATION NAME'][rs], round(dmin,2)",
"def parse_addresses_and_stations_from_snowfall():\n df = pd.read_csv('data/snowfall.csv')\n\n addresses, stations = [], []\n for name, group in df.groupby(['lat', 'lon']):\n if name[0] == 0 or name[1] == 0:\n continue\n\n # parse address information\n r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=%s' %\n (name[0], name[1], settings.GEOLOCATION_API_KEY))\n\n components = json.loads(r.content)['results'][0]['address_components']\n addresses.append(parse_location_components(components, name[0], name[1]))\n\n # parse station information\n station = dict()\n\n name = pd.unique(group.location_name)[0]\n station['station_id'] = name[name.find('(') + 1:-1].strip().lower()\n\n parts = name[:name.find(',')].split(' ')\n for i, s in enumerate(parts):\n if s.isdigit() or s not in \\\n ['N', 'NE', 'NNE', 'ENE', 'E', 'ESE', 'SSE',\n 'SE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']:\n parts[i] = s.title()\n\n station['name'] = ' '.join(parts)\n station['source'] = 'NOAA'\n station['latitude'] = pd.unique(group.lat)[0]\n station['longitude'] = pd.unique(group.lon)[0]\n\n stations.append(station)\n\n pd.DataFrame(addresses).to_csv('data/addresses_snowfall.csv', index=False)\n pd.DataFrame(stations).to_csv('data/stations_snowfall.csv', index=None)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get preditors base on their distance The predictors are selected as following [1,2], [1,3], [1,4], [2,3], [2,4], [2,5], [2,6]
|
def __getpredictors_distance(self, staname, distance):
distfromsta = distance[staname]
del distfromsta[staname] # remove the station to be fill from the dataframe
distfromsta = distfromsta.sort_values()
stations = self.network.getsta(distfromsta.index.values)
# station = self.network.getsta(staname)
# Only 3 closest stations
# sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1
# sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2
# Use all stations
sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1
sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2
# sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3
# sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4
# Only 3 closest stations
# sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1
# sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1
# using all stations
sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in
zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1
sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in
zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1
# sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1
# sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1
selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]
selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]
return selection, selectionnames
|
[
"def getpredictors_distance( staname, distance):\n\n distfromsta = distance[staname]\n try:\n del distfromsta[staname] # remove the station to be fill from the dataframe\n except:\n pass\n distfromsta = distfromsta.sort_values()\n\n stations = distfromsta.index\n\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n selection= [None] * (len(sel1) + len(sel2))\n selection[::2] = sel1\n selection[1::2] = sel2\n\n return selection[:4]",
"def multi_pred(model,query,neg_list,anchor_pos_list,anchor_neg_list):\n predictions = model.predict([len(neg_list)*[query],neg_list,anchor_pos_list,anchor_neg_list])\n #mean_dist = predictions[:,0].mean()\n mean_rel = (predictions[:,0]>predictions[:,3]).mean()\n #print('Dist',mean_dist,'Rel',mean_rel)\n return mean_rel#np.min([mean_dist,mean_rel])",
"def pred_for_user(self,u):\r\n ids=np.where(self.Y_data_n[:,0]==u)[0]\r\n items_rated_by_u=Y_data_n[ids,1].tolist()\r\n pred_ratings=[]\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_u:\r\n pred_ratings.append(self.pred(u,i))\r\n return pred_ratings",
"def get_predictors(path, name=None):\n\n preds = []\n for pred in predictors:\n P = get_results(path, pred, name)\n if P.data is not None and len(P.data)>0:\n preds.append(P)\n return preds",
"def find_most_diverse_preds(true_masks, pred_masks) -> pd.DataFrame:\n\n\n\n pass",
"def predict(self, features):\n distances = np.ones((features.shape[0], self.n_clusters))\n for j in range(self.n_clusters):\n for l in range(features.shape[0]):\n distances[l, j] = distance.euclidean(features[l, :], self.means[j, :])\n predictions = np.argmin(distances, axis=1)\n return predictions",
"def predict(self, query: np.ndarray):\n assert query.shape == self._training_set[1, :-1].shape, \"Size of the query does not match the size of the\" \\\n \" training set, Which is: \"\\\n + str(self._training_set[1, :-1].shape)\n tmp = (self._training_set[:, :-1] - query).astype(float)\n distances = np.linalg.norm(tmp, axis=1)\n\n index = np.argsort(distances)\n sorted_set = self._training_set[index, :]\n\n (unique, counts) = np.unique(sorted_set[:self._k, -1], return_counts=True)\n\n return unique[counts == np.max(counts)][0]",
"def distance_rank(active, pred, decoys, dist_func=jaccard):\n\n pred_dist = dist_func(active, pred)\n rank = 0\n for lig in decoys:\n d = dist_func(active, lig)\n if d < pred_dist:\n rank += 1\n return 1- (rank / (len(decoys) + 1))",
"def predict_only(self):",
"def get_predictors(self):\n\t\treturn self.predictors",
"def closest_preds_by_ind(self, pred, number=50):\n # Get the parameters for the predicate\n vec = self.pred_wei[pred]\n if not vec.any(): # if all entries are zero\n return None\n \n # Find the distance to other predicates\n dot_prod = dot(self.pred_wei, vec)\n dist = dot_prod / norm(self.pred_wei, axis=1)\n dist = nan_to_num(dist)\n # The closest pred will have the second largest dot product\n # (Largest is the pred itself)\n return dist.argpartition(tuple(range(-1-number,0)))[-1-number:-1]",
"def get_predicted_places(results):\n\n ranks = rankdata([-x.prior_mu for x in results], method='min')\n for result, rank in zip(results, ranks):\n result.predicted_place = int(rank) # convert from numpy dtype",
"def predict(self, X):\n dists = euclidean_distances (self.X_mean, X)\n preds = np.array (dists).argmin (axis = 0) + 1\n return preds",
"def test_review_data():\n print(\"learning done\")\n data, labels = [], []\n for rating, category in enumerate(partition):\n length = len(category) // 2\n data += category[length:]\n # reviews in each category have their star rating as the label\n labels += [rating + 1] * length\n print(\"beginning to test model\")\n abs_dist = []\n for datum, label in zip(data, labels):\n abs_dist.append(abs(label - classifier(datum)))\n return abs_dist",
"def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T",
"def nn_classifier(test_X, train_X, train_y):\n#We create an array for you to populate with your class predictions\n#Go through each sample in test_X and predict its class\n#based on the label of its nearest neighbor in train_X.\n#Insert this prediction in 'predictions'\n#(Use Euclidean Distance as your distance metric here)\n predictions = np.zeros(test_X.shape[0])\n\n for i, row in enumerate(test_X):\n hold = distances(row, train_X)\n predict = train_y[np.argmin(hold)]\n predictions[i] = predict\n return predictions",
"def cal_distance(self, poses):\n\n if len(poses) > 0:\n pose_dis = []\n for pose in poses:\n feature_dis = []\n keypoints = pose.keypoints\n keypoints = self.keypoint_filter.update(keypoints)\n for f_name_1, f_name_2, dis in FEATURE_DISTANCES:\n keypoint_1 = keypoints[f_name_1]\n keypoint_2 = keypoints[f_name_2]\n if keypoint_1.score > THRESHOLD and keypoint_2.score > THRESHOLD:\n # quality sufficient -> use keypoints\n pix_distance = np.linalg.norm(\n keypoint_1.yx - keypoint_2.yx, ord=1)\n distance = dis * FOCAL_LENGTH / pix_distance\n # filtering odd values\n # distance = self.outlier_rejection.update(distance, f_name_1)\n # only append distance, if it doesn't differenciate too much from the same keypoint, last frame\n # if distance != -1:\n feature_dis.append(distance)\n if len(feature_dis) > 0:\n # avg over feature distances\n distance = sum(feature_dis) / len(feature_dis)\n pose_dis.append(distance)\n if len(pose_dis) > 0:\n # max of pose distances -> wost case\n distance = max(pose_dis)\n return distance\n return None",
"def get_prediction():\n \n ################################################\n # put your original prediction code here\n ################################################\n \n pred_proba = np.round(np.random.rand(), 4)\n pred_class = random.choice([\"cat\", \"dog\", \"monkey\"])\n return pred_class, pred_proba",
"def predict(self, xs):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a sorted selections by the correlation rsquared scores
|
def __sort_predictors_by_corr(self, station, selections, var, From, To, by, how, constant=True,
selectionsnames=None, sort_cor=True, cor_lim=None):
scores_corel = pd.DataFrame(index=np.arange(0, len(selections)), columns=['corel', 'selections', 'params',
'selectionname']) # correlation of each selections and variables
for i, (selection, selectionname) in enumerate(zip(selections, selectionsnames)):
try:
Y = station.getData(var, From=From, To=To, by=by, how=how) # variable to be filled
X1 = selection[0].getData(var, From=From, To=To, by=by, how=how) # stations variable used to fill
X2 = selection[1].getData(var, From=From, To=To, by=by, how=how) # stations variable used to fill
data = pd.concat([Y, X1, X2], keys=['Y', 'X1', 'X2'], axis=1, join='outer').dropna()
est = self.__MLR(data[['X1', 'X2']], data['Y'], constant=constant)
rsquared = est.rsquared
scores_corel.loc[i, 'corel'] = rsquared
scores_corel.loc[i, 'selections'] = selection
scores_corel.loc[i, 'selectionname'] = selectionname
if constant:
scores_corel.loc[i, 'params'] = [est.params[0], est.params[1], est.params[2]]
else:
scores_corel.loc[i, 'params'] = [est.params[0], est.params[1]]
except ValueError:
print('No data to do the multilinear regression. Put correlation = 0')
scores_corel.loc[i, 'selections'] = selection
scores_corel.loc[i, 'selectionname'] = selectionname
scores_corel.loc[i, 'corel'] = 0
scores_corel.loc[i, 'params'] = np.nan
if sort_cor:
scores_corel = scores_corel.sort_values('corel', ascending=False)
if cor_lim:
scores_corel = scores_corel[scores_corel['corel'] > cor_lim]
else:
scores_corel = scores_corel[scores_corel['corel'] > 0]
scores_corel.index = np.arange(0, len(scores_corel.index))
selections = scores_corel['selections'].values
params = scores_corel['params'].values
print("u" * 30)
print("Correlation coefficient of the multilinear regression")
print("u" * 30)
print(scores_corel[['corel', 'selectionname']])
print("u" * 30)
return selections, params
|
[
"def corr_list(self):\n c = self.df.corr().abs()\n s = c.unstack()\n so = s.sort_values(ascending=False)\n i = int(len(so) ** (1/2))\n charts = so[i:]\n charts = charts[::2]\n if len(charts) > 3:\n charts = charts[:3]\n return charts.index, charts.values",
"def _sort_results_scored(self, result): # Sort scores from highest to lowest\n v1, v2 = result\n return v2, v1",
"def extract_sub_scores(self, idxs):\n\n\t\tnew_cv_res = CV_Results()\n\n\t\tfor j, (model_s, cur_res) in enumerate(self.res.items()):\n\t\t\tfor i,(k, lmbdas) in enumerate(cur_res.items()):\n\t\t\t\tfor lmbda, res_list in lmbdas.items():\n\t\t\t\t\tfor res in res_list:\n\t\t\t\t\t\tif res.ranks != None:\n\t\t\t\t\t\t\tres = Result(res.preds[idxs], res.true_vals[idxs], res.ranks[idxs], res.raw_ranks[idxs])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tres = Result(res.preds[idxs], res.true_vals[idxs], None, None)\n\t\t\t\t\t\t\n\t\t\t\t\t\tnew_cv_res.add_res(res, model_s, k, lmbda, self.nb_params_used[model_s][k])\n\n\t\treturn new_cv_res",
"def compute_correlations(struc_df, option, gamma, alpha):\n n_states = len(np.unique(struc_df.objnum))\n nodes = network.temp_node_info()\n adjacency = network.adjacency_mat(nodes)\n L = compute_limit_matrix(0.5, adjacency, n_states)\n L_vector = L.flatten()\n M = learn_sr(struc_df, gamma, alpha)\n M = M[2, 6]\n M_vector = M.flatten()\n\n if option == \"norm\":\n print(\"Norm of L - M: \")\n print(la.norm(L_vector - M_vector, np.inf))\n\n if option == \"correlation\":\n print(\"Correlation of L, M: \")\n print(np.dot(L_vector, M_vector) /\n (la.norm(L_vector) * la.norm(M_vector)))",
"def get_top_correlations(dataframe,columns,frame_type='spark'):\n if frame_type == 'spark':\n import math\n correlation_list = []\n correlations_finished = [] #hold correlatons done to prevent repitition\n for i, col_i in enumerate(columns):\n for j, col_j in enumerate(columns):\n if col_i+col_j not in correlations_finished: # don't repeat\n columns = [col_i,col_j]\n correlation = dataframe.stat.corr(col_i,col_j)\n if math.isnan(correlation):\n correlation=0.0\n correlation_list.append({\n 'columns': columns,\n 'correlation': correlation,\n 'correlation_abs':math.fabs(correlation),\n })\n # print({\n # 'columns': columns,\n # 'correlation': correlation,\n # 'correlation_abs':math.fabs(correlation),\n # })\n correlations_finished.append(col_i+col_j)\n #sort the list so highest correlations are first\n correlation_list = sorted(correlation_list, key=lambda x: x['correlation_abs'], reverse=True)\n return correlation_list\n else:\n pass",
"def test_correlations_2(self):\n smart_explainer = self.smart_explainer\n\n df = pd.DataFrame({\n \"A\": [8, 90, 10, 110],\n \"B\": [4.3, 7.4, 10.2, 15.7],\n \"C\": [\"C8\", \"C8\", \"C9\", \"C9\"],\n \"D\": [1, -3, -5, -10]\n }, index=[8, 9, 10, 11])\n\n output = smart_explainer.plot.correlations(df, max_features=3, facet_col='C')\n\n assert len(output.data) == 2\n assert len(output.data[0].x) == 3\n assert len(output.data[0].y) == 3\n assert output.data[0].z.shape == (3, 3)",
"def comparator(self):\n return self.get_scores()",
"def roc(matches, non_matches):\n sortedm = matches[:]\n sortedm.sort()\n dist_at_95 = sortedm[int(0.95 * len(matches))]\n dist_at_75 = sortedm[int(0.75 * len(matches))]\n dist_max = sortedm[-1]\n thresholds = list(np.linspace(dist_at_75, dist_max, 200))\n matches = np.array(matches)\n non_matches = np.array(non_matches)\n # number of true positives/false positives\n total_tp = float(len(matches))\n total_fp = float(len(non_matches))\n ## Threshold finding: I want to find tp and fp. Therefore\n ## look a distances between median, 75%Quartil (q3) and\n ## maximum distance in the matching histogramm.\n # med = np.median(matches)\n # q3 = np.median(matches[matches > med])\n # mx = np.max(matches)\n ## compute threshold by linear interplating\n ## between median, q3 and max distance\n # thresholds = list(np.linspace(q3, mx, 200))\n ## summary: list of tuples, threshold and (tp,fp) pair.\n curve = [{\"true_positive\": 0.95, \"false_positive\": np.sum(non_matches <= dist_at_95) / total_fp,\n \"threshold\": dist_at_95}]\n for thresh in thresholds:\n tp = np.sum(matches <= thresh) / total_tp\n fp = np.sum(non_matches <= thresh) / total_fp\n curve.append({\"true_positive\": tp, \"false_positive\": fp, \"threshold\": thresh})\n return curve",
"def correlation(self) -> List[float]:\n self.pearson_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"pearson\")\n self.spearman_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"spearman\")\n return [self.pearson_corr, self.spearman_corr]",
"def GetCorrelationLoadings(self):\n\n # Creates empty matrix for correlation loadings\n self.correlationLoadings = zeros((shape(self.scores)[1], shape(self.originalMatrix)[1]), Float)\n\n # Calculates correlation loadings with formula:\n # correlation = cov(x,y)/(std(x)*std(y))\n\n # For each PC in score matrix\n for PC in range(shape(self.scores)[1]):\n PCscores = self.scores[:, PC]\n PCscoresSTD = std(PCscores)\n # For each variable/attribute in original matrix (not meancentered)\n for var in range(shape(self.originalMatrix)[1]):\n origVar = self.originalMatrix[:, var]\n origVarSTD = std(origVar)\n # If std = 0 for any variable an OverflowError occurs.\n # In such a case the value 0 is written in the matrix\n try:\n self.correlationLoadings[PC, var] = cov(PCscores, origVar) / (PCscoresSTD * origVarSTD)\n except OverflowError:\n self.correlationLoadings[PC, var] = 0\n\n return self.correlationLoadings",
"def find_perfect_corr(df): \n corrMatrix = df.corr()\n corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)\n already_in = set()\n result = []\n for col in corrMatrix:\n perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()\n if perfect_corr and col not in already_in:\n already_in.update(set(perfect_corr))\n perfect_corr.append(col)\n result.append(perfect_corr)\n toRemove = []\n for item in result:\n toRemove.append(item[1:(len(item)+1)])\n toRemove = sum(toRemove, [])\n return {'corrGroupings':result, 'toRemove':toRemove}",
"def best_subset_ls(X: np.ndarray, y: np.ndarray):\n scores = []\n subsets = []\n for i in subset_iterator(X.shape[1]):\n X_train, X_test, y_train, y_test = train_test_split(X[:, i], y, proportion=0.9, shuffle=False)\n ws = least_squares(X_train, y_train)\n scores.append(R_squared(y_test, predict(X_test, ws)))\n subsets.append(i)\n\n return scores, subsets",
"def find_correlations(self):\n self.raw_data = self.ds.get_raw_data().copy()\n # convert dataset to numberic categories\n d = pd.DataFrame()\n for attr in self.raw_data.columns.values:\n d[attr] = self.raw_data[attr].astype('category').cat.codes\n # drop columns with only one value and tid column\n d = d.loc[:, (d != 0).any(axis=0)]\n d = d.drop(['_tid_'], axis=1)\n # Computer correlation across attributes\n m_corr = d.corr()\n self.correlations = m_corr",
"def find_perfect_corr(df): \n corrMatrix = df.corr()\n corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)\n already_in = set()\n result = []\n for col in corrMatrix:\n perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()\n if perfect_corr and col not in already_in:\n already_in.update(set(perfect_corr))\n perfect_corr.append(col)\n result.append(perfect_corr)\n toRemove = []\n for item in result:\n toRemove.append(item[1:(len(item)+1)])\n toRemove = sum(toRemove, [])\n return {'corrGroupings':result, 'toRemove':toRemove}",
"def compute_correlation_separability_score(self) -> float:\n sep_scores = pd.DataFrame.from_dict(self.separability_scores).to_numpy()\n sep_scores = minmax_scale(sep_scores)\n corrs = {}\n for tumor_pair in range(sep_scores.shape[1]):\n corr_sep_score = np.corrcoef(PATHO_PRIOR[:, tumor_pair], sep_scores[:, tumor_pair])\n corrs[tumor_pair] = corr_sep_score[1, 0]\n corrs['agg_with_risk'] = sum(\n np.array([val for _, val in corrs.items()]) *\n RISK\n ) \n corrs['agg'] = sum([val for key, val in corrs.items() if type(key)==int]) \n return corrs",
"def __get_best_items(matches):\n best_matches = []\n matches.sort(key=lambda match: match.score, reverse=True)\n best_score = matches[0].score\n i = 0\n nbMatches = len(matches)\n while i < nbMatches and matches[i].score == best_score:\n best_matches.append(matches[i])\n i += 1\n return best_matches",
"def calc_rocchio(original, relevant_vectors, nonrelevant_vectors):\n print('orig' + str(len(original)))\n if len(relevant_vectors) > 0: print('rv 1st len' + str(len(relevant_vectors[0])))\n if len(nonrelevant_vectors) > 0: print('nr 1st len' + str(len(nonrelevant_vectors[0])))\n rv_count = len(relevant_vectors)\n nr_count = len(nonrelevant_vectors)\n rv_sum = np.add.reduce(relevant_vectors)\n print('rv_sum' + str(rv_sum) + 'rv_count' + str(rv_count))\n nr_sum = np.add.reduce(nonrelevant_vectors)\n print('nr_sum' + str(nr_sum) + 'nr_count' + str(nr_count))\n updated_relevance = cg.ROCCHIO_ALPHA * original \\\n + cg.ROCCHIO_BETA * (1/rv_count if rv_count else 1) * rv_sum \\\n - cg.ROCCHIO_GAMMA * (1/nr_count if nr_count else 1) * nr_sum\n #only keep terms above minimum threshold (also serves to exclude negative values)\n print('before')\n print(updated_relevance[:40])\n updated_relevance = [0 if wgt < cg.ROCCHIO_MIN else wgt for wgt in updated_relevance]\n print('after')\n print(updated_relevance[:40])\n return updated_relevance",
"def topMatches(prefs, person, n=5, similarity=sim_pearson):\n scores=[(similarity(prefs,person,other), other) for other in prefs if other!=person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]",
"def correlation(data, method, caption):\n columns = list(data)\n coefficients = data.astype(float).corr(method=method)\n results = []\n for i in range(len(columns)):\n for j in range(i + 1, len(columns)):\n coefficient = coefficients[columns[i]][columns[j]]\n results.append((\n abs(coefficient), coefficient,\n columns[i] + ' x ' + columns[j]))\n print('# ' + caption + ', ' + method)\n for result in reversed(sorted(results)):\n abs_coefficient, coefficient, columns_pair = result\n print (coefficient, columns_pair)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets a remote file of a bucket using a connection
|
def _get(conn, remote_file, bucket_name=BUCKET_NAME):
contents = None
try:
reply = conn.get(bucket_name, remote_file)
contents = reply.body
if reply.http_response.status != 200:
print 'Failed to fetch current_remote metadata'
contents = None
except:
contents = None
return contents
|
[
"def get(\n self, writer: protocols.ByteWriter, bucket_name: str = None, key_name: str = None\n ) -> None:\n s3_bucket, s3_key = self._fetch_bucket_and_key(bucket_name, key_name)\n\n if not self._isfile(s3_bucket, s3_key):\n raise exceptions.S3Error(\"Unable to fetch the remote file\")\n\n self.conn.download_fileobj(s3_bucket, s3_key, writer)",
"def get_blob(bucket_name, blob_name, file):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.download_to_file(file)\n print(f\"File {blob_name} downloaded from {bucket_name} to {file.name}.\")",
"def __retrieve_from_bucket(fname):\n blob = BUCKET.blob(fname)\n json_data = json.loads(blob.download_as_string())\n return json_data",
"def _retrieve(cls, connection, bucket_uuid):\n return connection.retrieve_bucket(uuid=bucket_uuid)",
"def _download(path: str):\n\n try:\n resp = requests.get(path)\n resp.raise_for_status()\n except requests.RequestException:\n logging.exception(\"Could not connect to bucket, URL may be out of service!\")\n raise ConnectionError\n\n return resp",
"def remote_resource(cloud_config):\n remote_uri = 'http://storage.googleapis.com/{}/'.format(\n cloud_config.storage_bucket)\n\n return lambda path, tmpdir: fetch_gcs_resource(\n remote_uri + path.strip('/'), tmpdir)",
"def download_file(file):\n ring = RINGS[file.owner.username]\n clouds = ring.lookup(long(file.identifier))\n container = file.owner.username\n for cloud in clouds:\n # Init cloud's connector\n connector = Client(version='1.0.0', resource='object_storage',\n provider=cloud.provider)\n try:\n object_stat = connector.stat_object(container,\n file.path.strip('/'))\n except Exception:\n continue\n\n # Temporary handle.\n stream_key = 1\n\n # drop cause not cloud s3 amazon <if have s3, need uncomment>\n # if cloud.type == 'amazon':\n # object_stat = object_stat['Metadata']\n # stream_key = 'Body'\n\n\n object_status = [object_stat[key]\n for key in object_stat.keys() if 'status' in key]\n\n if object_status[0] == 'UPDATED':\n file_content = connector.download_object(container, file.path.strip('/'))[stream_key]\n del connector\n gc.collect()\n # if cloud.type == 'amazon':\n # return file_content.read()\n return file_content\n return None",
"def download(bucket_name, file_key, local_file_path):\n message = None\n try:\n message = 'BLANK'\n client = storage.Client()\n\n # Handling exception when Bucket does not exist\n\n source_bucket = client.get_bucket(bucket_name)\n # Download file from GCP\n with open(local_file_path, 'wb') as file_obj:\n blob = source_bucket.blob(file_key)\n blob.download_to_file(file_obj)\n message = local_file_path\n except Exception as e:\n print(e)\n return message",
"def download_blob(bucket_name, source_blob_name):\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n return blob",
"def gget(link, gauth: GoogleAuth) -> requests.models.Response:\n\n gauth._check_credentials()\n credentials = gauth.credentials\n\n file_id = api_file_url(link)\n r = requests.get(\n file_id,\n headers={\"Authorization\": f\"Bearer {credentials.token}\"},\n stream=True\n )\n\n try:\n r.raise_for_status()\n _l.debug(f'streaming from {link}')\n except requests.HTTPError as e:\n _l.error(e)\n\n return r",
"def download_blob(bucket_name, source_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n return blob",
"def open_file(client, bucket, key):\n logger = logging.getLogger(__name__)\n logger.info(\"Reading S3 file: %s\", key)\n\n response = client.get_object(Bucket=bucket, Key=key)\n _, type_params = cgi.parse_header(response[\"ContentType\"])\n\n stream = response[\"Body\"]\n charset = type_params.get(\"charset\", None)\n\n return stream, charset",
"def get(self, bucket: str, object_name: str) -> bytes:\n raise NotImplementedError()",
"def get_zipfile(bucket_name, file_name, dl_name):\n s3 = boto3.client('s3')\n s3.download_file(bucket_name, file_name, dl_name)",
"def scp_get_file(self, source_file, dest_file):\n self.scp_client.get(source_file, dest_file)",
"def get_image(filename):\n\n client.download_file(S3_BUCKET, filename, 'uploads/{}'.format(filename))",
"def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)",
"def get_blob(self, blob_name):\n return self.bucket.get_blob(blob_name)",
"def get_s3_object(s3_client, *, bucket, key):\n return s3_client.get_object(Bucket=bucket, Key=key)[\"Body\"].read()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Put some contents into a remote_file of a bucket usign connection conn. Optionally the headers can be specified.
|
def _put(conn, remote_file, contents, bucket_name=BUCKET_NAME, headers=None):
error_msg = 'Failed to upload to %s' % remote_file
try:
reply = conn.put(bucket_name, remote_file,
S3.S3Object(contents), headers)
if reply.http_response.status != 200:
print error_msg
except:
print error_msg
|
[
"def put_file(self, host, local_file, remote_file):\n LOG.info(\"Transferring local file %s to %s on host %s\", local_file, remote_file, host)\n return execute(self._transfer_file, put, local_file, remote_file, host=host)",
"def upload_file(conn, filename_local, filename_s3, gzip=False):\n\n filename_s3 = filename_s3.lstrip('./')\n\n file_descriptor = open(filename_local, 'rb')\n content = file_descriptor.read()\n\n content_type = _get_content_type(file_descriptor)\n headers = _get_headers(content_type)\n\n #should compress if the file is compressable and gzip is enabled\n can_be_gzipped = _file_can_be_compressed(filename_local)\n if gzip and can_be_gzipped:\n content = _compress_string(content)\n headers['Content-Length'] = str(len(content))\n headers['Content-Encoding'] = 'gzip'\n extension = mimetypes.guess_extension(content_type)\n #we should not overwrite the original file in the server.\n #We change extensions: style.css --> style.gz.css, for instance\n filename_s3 = filename_s3.rstrip(extension) + '.gz' + extension\n\n #if gzip is enabled and it is not compressable, don't upload nothing at all\n elif gzip and not can_be_gzipped:\n return\n\n #upload\n print 'Uploading %s to %s' % (filename_local, filename_s3)\n _put(conn, filename_s3, content, headers=headers)\n file_descriptor.close()",
"def put(\n self, reader: protocols.ByteReader, bucket_name: str = None, key_name: str = None\n ) -> None:\n s3_bucket, s3_key = self._fetch_bucket_and_key(bucket_name, key_name)\n\n extra_args = {}\n if self.conn_encrypt:\n extra_args[\"ServerSideEncryption\"] = \"AES256\"\n\n self.conn.upload_fileobj(reader, s3_bucket, s3_key, ExtraArgs=extra_args)",
"def ingest_httpfile(self, url, dest, name=None, metadata={}, mimetype='application/octet-stream'):\n parsed = urlparse(url)\n if name is None:\n name = basename(parsed.path)\n try:\n tempfilename = download_tempfile(url)\n logger.debug(\"Downloaded file to: \"+tempfilename)\n with closing(open(tempfilename, 'rb')) as f:\n res = get_client().put(dest + name,\n f,\n metadata=metadata,\n mimetype=mimetype)\n if not res.ok():\n raise IOError(str(res))\n cdmi_info = res.json()\n logger.debug(\"put success for {0}\".format(json.dumps(cdmi_info)))\n except IOError as e:\n raise self.retry(exc=e)\n finally:\n os.remove(tempfilename)",
"def upload_file_handle(\n self,\n bucket: str,\n object_name: str,\n src_file_handle: typing.BinaryIO):\n raise NotImplementedError()",
"def test_put_object_from_file(self):\n self.get_file(20)\n response = self.bos.put_object_from_file(self.BUCKET, self.KEY, self.FILENAME)\n self.check_headers(response, [\"etag\"])",
"def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass",
"def upload_file(self, bucket_name, bucket_file_path, local_file_path):\n\n bucket = self.bucket_client.get_bucket(bucket_name)\n blob = bucket.blob(bucket_file_path)\n blob.upload_from_filename(local_file_path)\n print(\"uploaded file\")",
"def putFile(self, uri, f):\n files = {'file': open(f, 'rb')}\n self.put(uri, files=files)\n print(f + \" --> \" + self.url)",
"def store_s3_contents ( s3_conn, bucket_name, key_name, key_contents = None, key_contents_filename = None ) :\n bucket = s3_conn.get_bucket( bucket_name )\n key = boto.s3.key.Key( bucket )\n key.key = key_name\n if ( key_contents_filename ) :\n key.set_contents_from_filename( key_contents_filename )\n else :\n key.set_contents_from_string( key_contents )",
"def put_buffer(self, buffer: bytes, remote_path: str, **kwargs) -> None:\n if self.sftp_handler:\n file = self.sftp_handler.open(remote_path, 'wb')\n file.write(buffer)\n file.close()",
"def test_put_object_from_file_user_headers(self):\n\n user_headers = {\"Cache-Control\":\"private\", \n \"Content-Disposition\":\"attachment; filename=\\\"abc.txt\\\"\", \n \"Expires\":\"123456\"}\n\n self.get_file(5)\n response = self.bos.put_object_from_file(bucket=self.BUCKET,\n key=\"test_put_file_user_headers\",\n file_name=self.FILENAME,\n user_headers=user_headers)\n self.check_headers(response)\n\n response = self.bos.get_object_meta_data(bucket_name=self.BUCKET, \n key='test_put_file_user_headers')\n self.assertEqual(response.metadata.expires, \"123456\")\n self.assertEqual(response.metadata.content_disposition, 'attachment; filename=\"abc.txt\"')\n self.assertEqual(response.metadata.cache_control, 'private')",
"def get(\n self, writer: protocols.ByteWriter, bucket_name: str = None, key_name: str = None\n ) -> None:\n s3_bucket, s3_key = self._fetch_bucket_and_key(bucket_name, key_name)\n\n if not self._isfile(s3_bucket, s3_key):\n raise exceptions.S3Error(\"Unable to fetch the remote file\")\n\n self.conn.download_fileobj(s3_bucket, s3_key, writer)",
"def put_object(self, bucket, key, local_file_path=None, file_bytes=None) -> None:\n def upload_to_s3(byte_array):\n self.resource.Object(bucket, key).put(Body=byte_array)\n\n if file_bytes:\n upload_to_s3(file_bytes)\n else:\n with open(local_file_path, 'rb') as local_file:\n self.resource.Object(bucket, key).put(Body=local_file)",
"def upload(cls, local_file, remote_file='', bucket_name=QINIU_BUCKET_NAME):\n if remote_file == '':\n remote_file = cls.__gen_uuid()\n local_file = cls.__get_abs_path(local_file)\n url = \"/v1/qiniu/upload?key=%s&localFile=%s&token=root-weimiyun-9@usstpwd!\" % (remote_file, local_file)\n try:\n conn = httplib.HTTPConnection(UPLOAD_API_HOST)\n conn.request(method=\"POST\", url=url)\n response = conn.getresponse()\n res = response.read()\n if AUTO_DELETE:\n os.remove(local_file)\n return res, True\n except Exception, e:\n return 'Connection refused', False",
"def pushToS3()-> None:\n logging.info(f\"Connecting to s3 {getTime()}\")\n s3 = boto3.client(\"s3\",endpoint_url=\"http://localhost:4566\")\n if(not s3.head_bucket(Bucket=\"demo\")):\n s3.create_bucket(Bucket='demo')\n try:\n logging.info(f\"Uploading to s3 {getTime()}\")\n s3.upload_file(\"result.csv\",\"demo\",\"result.csv\")\n logging.info(f\"Finished uploding to s3 {getTime()}\")\n except ClientError as e:\n logging.error(f\"Error uploading file to S3 {getTime()}\")",
"def _upload_file(sftp, local_file, remote_file) -> None:\n # Check if local_file is a file-like object and use the proper\n # paramiko function to upload it to the remote machine.\n if hasattr(local_file, \"read\"):\n sftp.putfo(local_file, remote_file)\n else:\n sftp.put(local_file, remote_file)",
"def put_bytes(buf: bytes, bucket: str, key: str, tags: dict = {}, acl: str = 'private') -> Tuple[str, str, int]:\n logger.debug(f'Writing {len(buf)} bytes to s3://{bucket}/{key}')\n tagging = urllib.parse.urlencode(tags)\n client().put_object(Bucket=bucket, Key=key, Body=buf, Tagging=tagging, ACL=acl)\n return (bucket, key, len(buf))",
"def scp_put_file(self, source_file, dest_file):\n self.scp_client.put(source_file, dest_file)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Guess the content_type, by using its file descriptor
|
def _get_content_type(file_descriptor):
content_type = mimetypes.guess_type(file_descriptor.name)[0]
if not content_type:
content_type = 'text/plain'
return content_type
|
[
"def get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'",
"def guess_content_type ( self, path_info ) :\n _type, _enc = guess_type ( path_info )\n return _type",
"def _get_content_type(url):\r\n scheme, netloc, path, query, fragment = urlparse.urlsplit(url)\r\n if not scheme in ('http', 'https', 'ftp', 'ftps'):\r\n ## FIXME: some warning or something?\r\n ## assertion error?\r\n return ''\r\n req = Urllib2HeadRequest(url, headers={'Host': netloc})\r\n resp = urlopen(req)\r\n try:\r\n if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'):\r\n ## FIXME: doesn't handle redirects\r\n return ''\r\n return resp.info().get('content-type', '')\r\n finally:\r\n resp.close()",
"def guess_content_type(filename):\n return mimetypes.guess_type(filename)[0]",
"def content_type(url):\n h = requests.head(url, allow_redirects=True)\n header = h.headers\n return header.get(\"content-type\").lower()",
"def content_type(fn):\n return {'Content-Type': utils.guess_mimetype(fn)}",
"def get_content_type(ct):\n content_type = ct\n\n if ct == \"csv\":\n content_type = \"text/csv\"\n elif ct == \"json\":\n content_type = \"application/json\"\n\n return content_type",
"def _content_type__get(self):\n header = self.headers.get('Content-Type')\n if not header:\n return None\n return header.split(';', 1)[0]",
"def guess_type_from_content(file_obj):\n first_bytes = file_obj.read(2)\n if first_bytes == b\"PK\":\n filetype = \"xlsx\"\n else:\n content = file_obj.read()\n if b\"\\t\" in content:\n filetype = \"tsv\"\n else:\n filetype = \"csv\"\n return filetype",
"def _guess_mime_type(self, name):\n ct, _ = mimetypes.guess_type(name)\n if ct and not self.content_type:\n self.content_type = ct\n if ct not in self.interoperability_types:\n warnings.warn(\n \"Image type %s is not guaranteed to be interoperable\" % ct\n )",
"def detect_content_type(self, path=None, payload=None):\n\n f = file_path(path, payload)\n switches = [\"-d\", f]\n result = self._command_template(switches).lower()\n return result, path, f",
"def get_content_type(file_path):\n\n try:\n magic_obj = magic.Magic(mime=True)\n magic_obj.file = magic_obj.from_file\n except AttributeError as e:\n magic_obj = magic.open(magic.MAGIC_MIME_TYPE)\n magic_obj.load()\n\n content_type = magic_obj.file(file_path)\n return content_type",
"def _guess_mimetype(self, file):\n if not is_exe_in_path('file'):\n return self.DEFAULT_MIMETYPE\n\n # The browser didn't know what this was, so we'll need to do\n # some guess work. If we have 'file' available, use that to\n # figure it out.\n p = subprocess.Popen(['file', '--mime-type', '-b', '-'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE)\n\n # Write the content from the file until file has enough data to\n # make a determination.\n for chunk in file.chunks():\n try:\n p.stdin.write(chunk)\n except IOError:\n # file closed, so we hopefully have an answer.\n break\n\n p.stdin.close()\n ret = p.wait()\n\n if ret == 0:\n mimetype = p.stdout.read().strip()\n else:\n mimetype = None\n\n # Reset the read position so we can properly save this.\n file.seek(0)\n\n return mimetype or self.DEFAULT_MIMETYPE",
"def test_guess_content_type(self):\n self.assertEqual(\n 'text/cloud-config',\n userdata._guess_content_type('#cloud-config\\nbla')\n )\n self.assertEqual(\n 'text/upstart-job',\n userdata._guess_content_type('#upstart-job\\nbla')\n )\n self.assertEqual(\n 'text/cloud-boothook',\n userdata._guess_content_type('#cloud-boothook\\nbla')\n )\n self.assertEqual(\n 'text/part-handler',\n userdata._guess_content_type('#part-handler\\nbla')\n )\n self.assertEqual(\n 'text/x-shellscript',\n userdata._guess_content_type('#!/bin/sh echo')\n )\n self.assertEqual(\n 'text/x-include-url',\n userdata._guess_content_type('#include \\n\\n http://')\n )\n self.assertIsNone(\n userdata._guess_content_type('whatever')\n )",
"def content_type(self):\n return self.get_header('Content-Type') or ''",
"def best_match_content_type(self): \n # First lookup http request path\n parts = self.path.rsplit('.', 1)\n if len(parts) > 1:\n _format = parts[1]\n if _format in ['json', 'xml']:\n return 'application/{0}'.format(_format)\n \n #Then look up content header\n type_from_header = self.get_content_type()\n if type_from_header:\n return type_from_header\n ctypes = ['application/json', 'application/xml']\n \n #Finally search in Accept-* headers\n bm = self.accept.best_match(ctypes)\n return bm or 'application/json'",
"def _check_url_file_type(headers: Dict[str, str]) -> Optional[str]:\n content_type = headers.get(\"content-type\", \"\").lower()\n file_type = None\n\n for extension in SUPPORTED_MIME_TYPES.keys():\n for mime_type in SUPPORTED_MIME_TYPES.get(extension, []):\n if mime_type in content_type:\n file_type = extension\n break\n\n return file_type",
"def best_match_content_type(self):\n # First lookup http request path\n parts = self.path.rsplit('.', 1)\n if len(parts) > 1:\n _format = parts[1]\n if _format in ['json', 'xml']:\n return 'application/{0}'.format(_format)\n\n #Then look up content header\n type_from_header = self.get_content_type()\n if type_from_header:\n return type_from_header\n ctypes = ['application/json', 'application/xml']\n\n #Finally search in Accept-* headers\n bm = self.accept.best_match(ctypes)\n return bm or 'application/json'",
"def test_content_type(self):\n self.assertEqual(self.res.content_type, \"application/octet-stream\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Asserts if a given file (w/ name filename) can be compressed. content_type is optional and can speed up assertion. Should return True if it is a Text Type (CSS/JS)
|
def _file_can_be_compressed(filename):
content_type = ''
with open(filename, 'rb') as f:
content_type = _get_content_type(f)
return content_type in TEXT_TYPES
|
[
"def check_gzip_path(file_path):\n _, ftype = mimetypes.guess_type(file_path)\n return ftype == 'gzip'",
"def check_compressed_file(filename):\n\n recognized_exts = COMPRESSED_TYPES\n \n # Check the two last extensions\n # (to recognize also composed extensions such as tar.gz)\n filename_noext1, ext1 = os.path.splitext(filename)\n filename_noexts, ext2 = os.path.splitext(filename_noext1)\n \n if ext2+\".\"+ext1 in recognized_exts:\n return ext2+\".\"+ext1\n\n if ext1 in recognized_exts:\n return ext1\n\n return None",
"def detect_compression_type(cls, file_path):\n compression_types_by_suffix = {'.bz2': cls.BZIP2, '.gz': cls.GZIP}\n lowercased_path = file_path.lower()\n for suffix, compression_type in compression_types_by_suffix.iteritems():\n if lowercased_path.endswith(suffix):\n return compression_type\n return cls.UNCOMPRESSED",
"def isgzip(filename):\n magic_number = b'\\x1f\\x8b\\x08'\n with open(filename, 'rb') as f:\n file_start = f.read(len(magic_number))\n\n if magic_number == file_start:\n return True\n return False",
"def is_archive(afile):\n return file_ext(os.path.basename(afile)) in ARCHIVE_COMPRESS_FORMATS",
"def is_gzip_file(f):\r\n with open(f, 'rb') as i:\r\n return binascii.hexlify(i.read(2)) == b'1f8b'",
"def test_gzip(handler,config):\r\n if not config.gzip:\r\n return False\r\n if not gzip_support:\r\n return False\r\n accept_encoding = handler.headers.get('accept-encoding','').split(',')\r\n accept_encoding = [ x.strip() for x in accept_encoding ]\r\n ctype = handler.resp_headers[\"Content-type\"]\r\n # if gzip is supported by the user agent,\r\n # and if the option gzip in the configuration file is set, \r\n # and content type is text/ or javascript, \r\n # set Content-Encoding to 'gzip' and return True\r\n if 'gzip' in accept_encoding and \\\r\n ctype and (ctype.startswith('text/') or \r\n ctype=='application/x-javascript'):\r\n return True\r\n return False",
"def isPackedFile(file: java.io.File) -> bool:\n ...",
"def is_zip(filepath):\n\treturn os.path.splitext(filepath)[1] == '.gz'",
"def is_gzip_compressed(chunk: bytes):\n if not chunk[:2] == b\"\\x1f\\x8b\":\n raise OSError(\"Not a gzipped file\")",
"def detect_compression_type(file_patterns: tf.Tensor) -> tf.Tensor:\n # Implementation notes:\n # Because the result of this function usually feeds to another\n # function that creates a TF dataset, and the whole dataset creating logic\n # is usually warpped in an input_fn in the trainer, this function must\n # be a pure composition of TF ops. To be compatible with\n # tf.compat.v1.make_oneshot_dataset, this function cannot be a @tf.function,\n # and it cannot contain any conditional / stateful op either.\n # Once we decide to stop supporting TF 1.x and tf.compat.v1, we can rewrite\n # this as a @tf.function, and use tf.cond / tf.case to make the logic more\n # readable.\n\n files = tf.io.matching_files(file_patterns)\n is_gz = tf.strings.regex_full_match(files, r\".*\\.gz$\")\n all_files_are_not_gz = tf.math.reduce_all(~is_gz)\n all_files_are_gz = tf.math.reduce_all(is_gz)\n # Encode the 4 cases as integers 0b00 - 0b11 where\n # `all_files_are_not_gz` is bit 0\n # `all_files_are_gz` is bit 1\n # 00: invalid, some files are gz some files are not\n # 01: all are not gz\n # 10: all are gz\n # 11: the only possibility is `files` is empty, can be arbitrary.\n formats = tf.constant([\"INVALID_MIXED_COMPRESSION_TYPES\", \"\", \"GZIP\", \"\"])\n index = (\n tf.bitwise.left_shift(tf.cast(all_files_are_gz, tf.int32), 1) +\n tf.cast(all_files_are_not_gz, tf.int32))\n\n return formats[index]",
"def should_compress_file(mastercompress, filecompress, exitcode):\n\n if miscutils.fwdebug_check(6, \"PFWUTILS_DEBUG\"):\n miscutils.fwdebug_print(\"BEG: master=%s, file=%s, exitcode=%s\" % (mastercompress, filecompress, exitcode))\n\n mcompress = mastercompress\n if isinstance(mastercompress, str):\n mcompress = mastercompress.lower()\n\n fcompress = miscutils.convertBool(filecompress)\n\n if mcompress == 'success':\n if exitcode != 0:\n mcompress = 'never'\n else:\n mcompress = 'file'\n\n retval = (mcompress == 'file' and fcompress)\n\n if miscutils.fwdebug_check(6, \"PFWUTILS_DEBUG\"):\n miscutils.fwdebug_print(\"END - retval = %s\" % retval)\n return retval",
"def is_gzipped(filename):\n f = open(filename, \"rb\")\n # read first two bytes\n byte1 = f.read(1)\n byte2 = f.read(1)\n f.close()\n # check against gzip magic number 1f8b\n # return (byte1 == chr(0x1f)) and (byte2 == chr(0x8b))\n return (byte1 == b'\\x1f') and (byte2== b'\\x8b')",
"def _is_archive(local_path: str) -> bool:\n archive_mimetypes = [\n \"application/zip\",\n \"application/x-tar\",\n \"application/x-gzip\",\n \"application/x-bzip2\",\n \"application/x-7z-compressed\",\n \"application/x-rar-compressed\",\n \"application/x-xz\",\n \"application/x-lzip\",\n \"application/x-lzma\",\n \"application/x-lzop\",\n \"application/x-bzip\",\n \"application/x-bzip2\",\n \"application/x-compress\",\n \"application/x-compressed\",\n ]\n\n return mimetypes.guess_type(local_path)[0] in archive_mimetypes",
"def are_files_gzipped(raw_files):\n files_are_gzipped = None\n for file_name in raw_files:\n if re.search(r\"\\.gz$\", file_name) is not None:\n if files_are_gzipped is False:\n raise Exception(\n \"It seems one file is compressed and the \"\n \"other is \"\n \"not:\\n{}\".format(\"\\n\".join(raw_files))\n )\n files_are_gzipped = True\n else:\n if files_are_gzipped:\n raise Exception(\n \"It seems one file is compressed and the \"\n \"other is \"\n \"not:\\n{}\".format(\"\\n\".join(raw_files))\n )\n files_are_gzipped = False\n return files_are_gzipped",
"def _ensure_uncompressed(response):\n compressed = response.headers.get(\"Content-Encoding\") == \"gzip\"\n if not compressed:\n content_disp = response.headers.get(\"Content-Disposition\", \"\")\n compressed = bool(re.match(r'attachment; *filename=.*\\.gz\\\"?$',\n content_disp))\n if compressed:\n return GzipFile(fileobj=StringIO(response.read()), mode=\"rb\")\n print response.headers\n return response",
"def test_gzipped(f):\n if isinstance(f, basestring):\n f = io.open(f, 'rb')\n current = f.tell()\n f.seek(0)\n magic = f.read(2)\n f.seek(current)\n return magic == b'\\037\\213'",
"def test_compress_file_response(self):\n with open(__file__, \"rb\") as file1:\n\n def get_response(req):\n file_resp = FileResponse(file1)\n file_resp[\"Content-Type\"] = \"text/html; charset=UTF-8\"\n return file_resp\n\n r = GZipMiddleware(get_response)(self.req)\n with open(__file__, \"rb\") as file2:\n self.assertEqual(self.decompress(b\"\".join(r)), file2.read())\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertIsNot(r.file_to_stream, file1)",
"def is_valid_compression_type(cls, compression_type):\n return isinstance(compression_type, _CompressionType)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compress the content string passed. Should be called when gzip is enabled to compress text types. There is no real advantage in using this with images, since most are already nicely compressed by some image processing algorithm.
|
def _compress_string(content):
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(content)
zfile.close()
return zbuf.getvalue()
|
[
"def compress(content):\n if isinstance(content, str):\n content = content.encode('utf-8')\n else:\n content = json.dumps(content , separators=(',', ':')).encode('utf-8')\n return gzip.compress(content, compresslevel=9)",
"def compress_string(string):\n return encoding.encode(compression.encode(string))",
"def compress(string):",
"def compress(content, threshold=512):\n compression_enabled = CONF.logging.http_request_compression\n\n if is_dict(content):\n for key in content:\n content[key] = compress(content[key])\n if is_string(content) and compression_enabled:\n if len(content) > threshold:\n less_data = content[:50]\n compressed_data = base64.b64encode(\n zlib.compress(bytes(content.encode(\"utf-8\"))))\n if not six.PY2:\n compressed_data = str(compressed_data.decode(\"utf-8\"))\n return pprint.pformat(\n \"\\n***Content compressed by Syntribos.***\"\n \"\\nFirst fifty characters of content:\\n\"\n \"***{data}***\"\n \"\\nBase64 encoded compressed content:\\n\"\n \"{compressed}\"\n \"\\n***End of compressed content.***\\n\".format(\n data=less_data, compressed=compressed_data))\n return content",
"def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()",
"def compress_string(self, s):\n zbuf = StringIO()\n zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()",
"def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)",
"def compress(string):\n\n pass",
"def compress_string(s):\n zbuf = cStringIO.StringIO()\n zfile = gzip.GzipFile(mode=\"wb\", compresslevel=6, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()",
"def compress(self):\n\t\tcodec = self.content_codec\n\t\tif codec:\n\t\t\tself.set(codec.encode(self.__content_bytes()))\n\t\t\tself.content_encoding = None",
"def compressString(s):\n import cStringIO, gzip\n\n # Nasty monkeypatch to avoid gzip changing every time\n class FakeTime:\n def time(self):\n return 1111111111.111\n\n gzip.time = FakeTime()\n\n zbuf = cStringIO.StringIO()\n zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()",
"def encode(self, compress=0):\n raw = bytes(self._encode())\n return gzip.compress(raw, compress) if compress else raw",
"def _compress(self):\n self.response.headers[\"Content-Encoding\"] = \"gzip\"\n if isinstance(self.response.body, str):\n logger.debug(\"Converting string response to bytes before compressing it\")\n self.response.body = bytes(self.response.body, \"utf-8\")\n gzip = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)\n self.response.body = gzip.compress(self.response.body) + gzip.flush()",
"def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x",
"def handle_string():\n input_string = receive_String()\n print(\"Before compression: \" + input_string + \"\\n\")\n compressed_string = compress_string(input_string)\n print(\"After compression: \" + compressed_string + \"\\n\")",
"def encode_gzip(data, compresslevel=6):\n return gzip.compress(data, compresslevel=compresslevel)",
"def compress(self, data):\n if not isinstance(data, bytes):\n raise Exception(f\"Can only compress bytes, got {type(data)}\")\n\n return brotli.compress(data)",
"def compression(self) -> str:\n ...",
"def compress_encode(value):\n return base64.b64encode(zlib.compress(value.encode(\"ascii\"))).decode(\"ascii\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build the metadata local file with all sha information about files. File location is computed based on home kwargument.
|
def _build_local_metadata_file(files, home=''):
filepaths = [os.path.join(home, f) for f in files]
shas = [_get_sha_metadata(f) for f in filepaths]
metadata = dict(zip(files, shas))
with open(LOCAL_METADATA_FILE, 'w') as f:
f.write(json.dumps(metadata))
|
[
"def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }",
"def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value",
"def create_build_hash(self):\n\n # The hash order is:\n # - The build script\n # - The build specificity\n # - The build group and umask\n # - The src archive.\n # - For directories, the mtime (updated to the time of the most\n # recently updated file) is hashed instead.\n # - All of the build's 'extra_files'\n # - All files needed to be created at build time 'create_files'\n\n hash_obj = hashlib.sha256()\n\n # Update the hash with the contents of the build script.\n hash_obj.update(self._hash_file(self._script_path, save=False))\n group = self._group.encode() if self._group is not None else b'<def>'\n hash_obj.update(group)\n umask = oct(self._umask).encode() if self._umask is not None \\\n else b'<def>'\n hash_obj.update(umask)\n\n specificity = self._config.get('specificity', '')\n hash_obj.update(specificity.encode('utf8'))\n\n # Update the source and get the final source path.\n src_path = self._update_src()\n\n if src_path is not None:\n if src_path.is_file():\n hash_obj.update(self._hash_file(src_path))\n elif src_path.is_dir():\n hash_obj.update(self._hash_dir(src_path))\n else:\n raise TestBuilderError(\n \"Invalid src location {}.\"\n .format(src_path))\n\n # Hash extra files.\n for extra_file in self._config.get('extra_files', []):\n extra_file = Path(extra_file)\n full_path = self._find_file(extra_file, Path('test_src'))\n\n if full_path is None:\n raise TestBuilderError(\n \"Could not find extra file '{}'\"\n .format(extra_file))\n elif full_path.is_file():\n hash_obj.update(self._hash_file(full_path))\n elif full_path.is_dir():\n self._date_dir(full_path)\n hash_obj.update(self._hash_dir(full_path))\n else:\n raise TestBuilderError(\n \"Extra file '{}' must be a regular file or directory.\"\n .format(extra_file))\n\n # Hash created build files. These files are generated at build time in\n # the test's build directory but we need the contents of these files\n # hashed before build time. Thus, we include a hash of each file\n # consisting of the filename (including path) and it's contents via\n # IOString object.\n files_to_create = self._config.get('create_files')\n if files_to_create:\n for file, contents in files_to_create.items():\n io_contents = io.StringIO()\n io_contents.write(\"{}\\n\".format(file))\n for line in contents:\n io_contents.write(\"{}\\n\".format(line))\n hash_obj.update(self._hash_io(io_contents))\n io_contents.close()\n\n hash_obj.update(self._config.get('specificity', '').encode())\n\n return hash_obj.hexdigest()[:self.BUILD_HASH_BYTES * 2]",
"def metadata_path(self):\n return os.path.join(self.path, 'metadata.txt')",
"def gen_file_info(self, key) -> Dict[str, Any]:\n abs_path = self.get_abs_path(key)\n stat = os.stat(abs_path)\n is_dir = True if S_ISDIR(stat.st_mode) else False\n\n return {'key': key,\n 'size': str(stat.st_size) if not is_dir else '0',\n 'is_local': True,\n 'is_dir': is_dir,\n 'modified_at': stat.st_mtime}",
"def mitogen_buildah_path(self):",
"def _create_file_infos():\n d = {\n #'file_md5sum': None, # provided by AthFile.impl\n #'file_name': None, # ditto\n #'file_type': None, # ditto\n #'file_guid': None, # ditto\n \n 'nentries' : 0, # to handle empty files\n 'run_number': [],\n 'run_type': ['N/A'],\n 'evt_type': [],\n 'evt_number': [],\n 'lumi_block': [],\n 'mc_channel_number': [],\n 'beam_type': ['N/A'], # XXX fixme\n 'beam_energy': ['N/A'], # XXX fixme\n 'stream_tags': [],\n 'metadata_items': [],\n 'eventdata_items': [],\n 'stream_names': None,\n 'geometry': None,\n 'conditions_tag': None,\n 'det_descr_tags': None,\n ##\n 'metadata': {},\n 'tag_info': {},\n }\n return d",
"def metadata_file(self):\r\n if not self._subs:\r\n self._mksubs()\r\n return self._bupm",
"def collect_metadata(file_name, chunk_hash, index, seek, compressed_flag):\n meta_data = {\n \"filename\": file_name,\n \"hash\": chunk_hash,\n \"index\": index,\n \"seek\": seek,\n \"compressed\": compressed_flag,\n \"version\": VersionOnePackageProcessor.version,\n }\n # print(meta_data)\n return meta_data",
"def ComputeMetadata(self, path):\n # For deduplication:\n # md5\n # sha\n # length\n \n # For convenience:\n # enc-content-type\n pass",
"def _create_info_files(self):\n self._create_info_general_file()\n self._create_info_annotation_file()\n shutil.copyfile(self.rconfig.pipeline_config_file, self.info_file_config)\n shutil.copyfile(self.file_list, self.info_file_filelist)",
"def add_file_metadata(self):\n metadata = self.__file.require_group(METADATA)\n self.__write_value(metadata, DATE_CREATED, date.today().strftime(\"%Y-%m-%d\"))\n self.__write_value(metadata, SDK_VERSION, __version__)",
"def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = fname.split('_')[0]\n\n # Test for presence and size of tif file\n os.listdir(directory)\n tiff_files = glob(os.path.join(directory, '{}*.TIF'.format(fbase)))\n\n if len(tiff_files) > 0:\n location = 'on_disk'\n tiff_files.sort()\n _, data_file = os.path.split(tiff_files[0])\n data_file_size = os.path.getsize(\n os.path.join(directory, data_file))\n tiff_names = []\n tiff_sizes = []\n for tiff in tiff_files:\n _, tiff_name = os.path.split(tiff)\n tiff_names.append(tiff_name)\n tiff_sizes.append(str(os.path.getsize(tiff)))\n tiff_names = \",\".join(tiff_names)\n tiff_sizes = \",\".join(tiff_sizes)\n else:\n location = 'on_tape'\n data_file_size = 0\n data_file = \"\"\n\n # Test for presence of quick look PNG file\n quicklook_file = fbase + '_VER.jpg'\n quicklook_path = os.path.join(directory, quicklook_file)\n\n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': data_file, 'location': location,\n 'data_file_size': data_file_size,\n 'quicklook_file': quicklook_file}\n\n if len(tiff_files) > 0:\n item_map.update({'data_files': tiff_names,\n 'data_file_sizes': tiff_sizes})\n\n for key, value in item_map.items():\n metadata[key] = value",
"def generate_metadata_files(self):\n\n data_folder = self.get_data_folder(mode='absolute')\n\n parents = (data_folder / '_').parents\n\n for mfile in self.mdata:\n for regex, level in METADATA_LEVEL_BY_NAME.items():\n if re.compile(regex).match(mfile.name):\n create_file(mfile, parents[(3-level)] / mfile.name,\n mode='copy')",
"def static_metadata_path(self):\r\n return os.path.join(self.doc_path, 'metadata.json')",
"def get_metadata(self):\n previous = DirectoryMetadata.load_pickle(self)\n metadata = {}\n\n for dirpath, dirnames, filenames in os.walk(self.prefix_dir):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n relative_path = path.split(self.base_dir, 1)[1]\n try:\n stats = os.stat(path)\n except OSError:\n log.exception('Error stating a file on disk while building up metadata, skipping file %s' % path)\n continue\n swift_bytes = stats.st_size\n mtime = datetime.utcfromtimestamp(stats.st_mtime)\n if (previous is not None) and (relative_path in previous.metadata) and\\\n (previous.metadata[relative_path].bytes == swift_bytes):\n swift_hash = previous.metadata[relative_path].hash\n else:\n try:\n with open(path, 'rb') as afile:\n md5_hash = hashlib.md5()\n md5_hash.update(afile.read())\n swift_hash = md5_hash.hexdigest()\n except OSError:\n log.exception('Error reading a file to create the md5 while building up metadata, skipping file %s' % path)\n continue\n\n metadata[relative_path] = FileMetadata(relative_path, swift_bytes, mtime, swift_hash)\n\n return metadata",
"def download_metadata():\n\n os.makedirs(\"meta_data\", exist_ok = True)\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 1e2 / totalsize\n s = \"\\r%5.1f%% %*d / %d\" % (\n percent, len(str(totalsize)), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize: # near the end\n sys.stderr.write(\"\\n\")\n else: # total size is unknown\n sys.stderr.write(\"read %d\\n\" % (readsofar,))\n\n print(\"Download Meta data\")\n for url in _META_URL_LIST:\n file_name = url.split(\"/\")[-1]\n\n # exist check\n if os.path.isfile(f\"meta_data/{file_name}\"):\n print(f\"...{file_name} already exist\")\n continue\n else:\n print(f\"... {file_name}\")\n urlretrieve(url, f\"meta_data/{file_name}\", reporthook)",
"def _metadata_filepath(\n name: str, run_id: Optional[str] = None, operator_id: Optional[str] = None\n) -> str:\n path = _data_filepath(name, run_id, operator_id).lstrip(f\"{BUCKET_NAME}/\")\n path = f\"{path}.metadata\"\n\n return path",
"def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Uploads a file to S3 bucket. If gzip=True, compress and upload the gzipped version of the file instead of the original one. If gzip=True and it is not possible to compress, then quit the upload process (don't upload at all). So you should always pass the correct gzip info into this function, in order to get a upload.
|
def upload_file(conn, filename_local, filename_s3, gzip=False):
filename_s3 = filename_s3.lstrip('./')
file_descriptor = open(filename_local, 'rb')
content = file_descriptor.read()
content_type = _get_content_type(file_descriptor)
headers = _get_headers(content_type)
#should compress if the file is compressable and gzip is enabled
can_be_gzipped = _file_can_be_compressed(filename_local)
if gzip and can_be_gzipped:
content = _compress_string(content)
headers['Content-Length'] = str(len(content))
headers['Content-Encoding'] = 'gzip'
extension = mimetypes.guess_extension(content_type)
#we should not overwrite the original file in the server.
#We change extensions: style.css --> style.gz.css, for instance
filename_s3 = filename_s3.rstrip(extension) + '.gz' + extension
#if gzip is enabled and it is not compressable, don't upload nothing at all
elif gzip and not can_be_gzipped:
return
#upload
print 'Uploading %s to %s' % (filename_local, filename_s3)
_put(conn, filename_s3, content, headers=headers)
file_descriptor.close()
|
[
"def s3_upload(upload_file=\"test\", args):\n REGION = args['REGION']\n BUCKET = args['BUCKET']\n ACCESS_KEY = base64.b64decode(args['ACCESS_KEY'])\n SECRET_KEY = base64.b64decode(args['SECRET_KEY'])\n TAR_FILE = args['TAR_FILE']\n\n session = Session(aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY,\n region_name=REGION)\n s3 = session.resource('s3')\n client = session.client('s3')\n\n upload_data = open(TAR_FILE, 'rb')\n file_obj = s3.Bucket(BUCKET).put_object(Key=upload_file, Body=upload_data)",
"def _upload_file_to_s3(self, filepath, bucket, prefix=None):\n\t s3_client = self.session.client('s3')\n\n\t try:\n\t if prefix is not None:\n\t s3_object = '/'.join([prefix, Path(filepath).name])\n\t else:\n\t s3_object = Path(filepath).name\n\n\t logging.info(\"Uploading %s to bucket %s\", s3_object, bucket)\n\t s3_client.upload_file(str(filepath), bucket, s3_object)\n\n\t except ClientError as e:\n\t logging.error(e)",
"def s3upload(self, fpath, bucket):\n from s3upload import S3upload\n S3upload(fpath, bucket)",
"def upload_file(file_name, bucket):\r\n object_name = file_name\r\n s3_client = boto3.client('s3')\r\n response = s3_client.upload_file(file_name, bucket, object_name)\r\n\r\n return response",
"def upload_s3(self, s3_file):\n file_key = s3_file.file_key\n filename = s3_file.filename\n headers = {}\n \n content_type = mimetypes.guess_type(filename)[0]\n if content_type:\n headers['Content-Type'] = content_type\n \n # Check if file on S3 is older than local file, if so, upload\n if not self.do_force:\n s3_key = self.bucket.get_key(file_key)\n if s3_key:\n s3_datetime = datetime.datetime(*time.strptime(\n s3_key.last_modified, \"%a, %d %b %Y %H:%M:%S %Z\")[0:6])\n local_datetime = datetime.datetime.utcfromtimestamp(\n os.stat(filename).st_mtime)\n if local_datetime < s3_datetime:\n self.skip_count += 1\n if self.verbosity > 1:\n print \"File %s hasn't changed since last uploade\" % file_key\n return\n\n if self.verbosity > 0:\n print \"Uploading %s (worker: %d)\" % (file_key, self.num)\n \n file_obj = open(filename, 'rb')\n filedata = file_obj.read()\n file_size = os.fstat(file_obj.fileno()).st_size\n \n if self.do_expires:\n # HTTP/1.0\n headers['Expires'] = \"%s GMT\" % (email.Utils.formatdate(\n time.mktime((datetime.datetime.now() +\n datetime.timedelta(days=365*2)).timetuple())))\n \n # HTTP/1.1\n headers['Cache-Control'] = \"max-age %d\" % (3600 * 24 * 365 * 2)\n if self.verbosity > 1:\n print \"\\texpires: %s\" % (headers['Expires'])\n print \"\\tcache-control: %s\" % (headers['Cache-Control'])\n\n try:\n if not self.dry_run:\n self.key.name = file_key\n self.key.set_contents_from_string(filedata, headers, replace=True)\n self.key.make_public()\n \n if self.do_gzip and not self.dry_run:\n \n # Gzipping only if file is large enough (>1K recommended) \n if file_size > 1024 and content_type in self.GZIP_CONTENT_TYPES:\n headers['Content-Encoding'] = 'gzip'\n gzip_filedata = self.compress_string(filedata)\n gz_filename, gz_ext = os.path.splitext(file_key)\n \n self.key.name = ''.join([gz_filename, '.gz', gz_ext])\n self.key.set_contents_from_string(gzip_filedata, headers, replace=True)\n self.key.make_public()\n if self.verbosity > 1:\n print \"\\tgzipped: %dk to %dk\" % (file_size / 1024, len(gzip_filedata) / 1024)\n \n elif self.verbosity > 0 and file_size < 1024 and content_type in self.GZIP_CONTENT_TYPES:\n print \"Skipping gzip on %s, less than 1k\" % file_key\n \n except boto.s3.connection.S3CreateError, e:\n print \"Failed: %s\" % e\n except Exception, e:\n print e\n raise\n else:\n self.upload_count += 1\n \n if file_obj:\n file_obj.close()",
"def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)",
"def upload(self, bucket, obj, s3_client=None):\n\n s3_client = s3_client or self.s3_client\n transfer_config = boto3.s3.transfer.TransferConfig(multipart_threshold=1024, use_threads=True, max_concurrency=10)\n s3_transfer = boto3.s3.transfer.S3Transfer(client=s3_client, config=transfer_config)\n\n try:\n logging.debug(\"Uploading {} to {}\".format(obj, bucket))\n s3_transfer.upload_file(obj, bucket, helpers.strip_path(obj)[1])\n\n return True\n except botocore.exceptions.EndpointConnectionError:\n logging.error(\"Couldn't connect to an S3 endpoint. If you're using an S3 compatible provider other than AWS, remember to set --s3-endpoint-url\")\n return False\n except Exception as e:\n logging.error(\"Error uploading: {}\".format(e))\n return False",
"def upload_to_s3(bucket, file_path, prefix, timestamp):\n upload_name = f'{prefix}_{timestamp or \"\"}{basename(file_path)}'\n\n try:\n bucket.upload_file(file_path, upload_name)\n syslog.syslog(syslog.LOG_INFO,\n f'Uploaded {file_path} to S3 Bucket - {bucket.name}')\n return True\n except S3UploadFailedError as s3ex:\n syslog.syslog(\n syslog.LOG_ERR, f'Failed to upload {file_path} to S3 Bucket - {bucket_name} - {s3ex}')\n return False\n finally:\n rm(file_path)",
"def upload_to_s3(site, bucket, directory=None, files=None, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n if directory is None and files is None:\n print red('Error: Directory and/or files must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n import mimetypes\n import fnmatch\n\n setup_aws_access_key(site)\n\n # Connect to S3\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n\n # Fix the prefix\n # prefix itself shouldn't have a / prefix itself but should end with /\n if prefix:\n prefix = prefix.lstrip('/')\n if prefix and not prefix.endswith('/'):\n prefix = prefix + '/'\n\n def __upload(key, filename):\n k = Key(b)\n k.key = key\n headers = {}\n content_type = mimetypes.guess_type(filename)[0]\n if site.has_key('webapp') and site['webapp'].get('cache_control'):\n for pattern in site['webapp']['cache_control']:\n if fnmatch.fnmatch(filename, pattern):\n headers['Cache-Control'] = site['webapp']['cache_control'][pattern]\n break\n if site.has_key('webapp') and site['webapp'].get('gzip_types') and content_type in site['webapp']['gzip_types']:\n from gzip import GzipFile\n from StringIO import StringIO\n # Need to specify content_type when uploading from a string!\n headers['Content-Type'] = content_type\n headers['Content-Encoding'] = 'gzip'\n s = StringIO()\n g = GzipFile(fileobj=s, mode='wb')\n with open(filename, 'rb') as f:\n g.write(f.read())\n g.close()\n k.set_contents_from_string(s.getvalue(), headers)\n else:\n k.set_contents_from_filename(filename, headers)\n\n if files:\n # Upload individual files\n if directory:\n keys = [filename.lstrip('/') for filename in files]\n files = [os.path.join(directory, filename) for filename in files]\n else:\n keys = [os.path.split(filename)[1] for filename in files]\n for i, filename in enumerate(files):\n print 'Uploading %s' % keys[i]\n if prefix:\n key = prefix + keys[i]\n else:\n key = keys[i]\n __upload(key, filename)\n elif directory:\n # Upload an entire directory\n def __upload_dir(arg, dirname, names):\n # arg is the starting directory\n for name in names:\n filename = os.path.join(dirname, name)\n if not os.path.isdir(filename) and not os.path.islink(filename) and not name.startswith('.'):\n key = filename[len(arg):]\n if key.startswith('/'):\n key = key[1:]\n if prefix:\n key = prefix + key\n print 'Uploading %s' % key\n __upload(key, filename)\n os.path.walk(directory, __upload_dir, directory)",
"def EncryptAndUploadToS3(bucket, filename, metadata):\n\n tmp_fp = os.tmpfile()\n subprocess.call(\"cat\", filename, stdout=tmp_fp)\n metadata['enc-method'] = 0\n # Check for an existing S3 path xattr on the file.\n # Check if the uploaded hash matches this new one.\n if NOOP:\n print bucket, key, metadata, filename\n else:\n key = Key(bucket)\n key.update_metadata(metadata)\n key.key(filename)\n key.set_contents_from_file(tmp_fp, cb=, num_cb=100)",
"def upload_to_s3(file_path, config):\n logging.info(\"Uploading file to S3 bucket: %s\", config['s3_bucket_name'])\n s3 = boto3.resource('s3')\n s3_filename = config['s3_bucket_path'] + config['rendered_filename']\n s3.Bucket(config['s3_bucket_name']).upload_file(\n file_path, s3_filename, ExtraArgs={\n 'ContentType': 'text/html', 'ACL': 'public-read'})",
"def archive(self, file):\n if not self.__bucket_exists():\n self.__create_bucket()\n self.__upload(file)",
"def upload_to_s3(aws_access_key_id, aws_secret_access_key, file, bucket, key):\n try:\n size = os.fstat(file.fileno()).st_size\n except:\n # Not all file objects implement fileno(),\n # so we fall back on this\n file.seek(0, os.SEEK_END)\n size = file.tell()\n\n conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)\n bucket = conn.get_bucket(bucket, validate=True)\n k = Key(bucket)\n k.key = key\n sent = k.set_contents_from_file(file, rewind=True)\n\n # Rewind for later use\n file.seek(0)\n\n if sent == size:\n return True\n return False",
"def upload_one(s3_host: str, bucket_name: str, filepath: Path, dst_name: str):\n s3 = boto3.resource(\"s3\", endpoint_url=s3_host)\n s3.meta.client.meta.events.register(\"choose-signer.s3.*\", disable_signing)\n bucket = s3.Bucket(f\"{bucket_name}\")\n print(f\"Uploading {filepath}\")\n bucket.upload_file(str(filepath), dst_name)",
"def upload_fileobj(self, bucket, s3_key, fileobj, s3_kms_extra_args):\n\n try:\n self.exp_backoff(\n action_method=self.get_client().upload_fileobj,\n Fileobj=fileobj,\n Bucket=bucket,\n Key=s3_key,\n ExtraArgs=s3_kms_extra_args)\n except botocore.exceptions.ClientError:\n log_and_exit(\"Unable to upload fileobj\",\n SIMAPP_S3_DATA_STORE_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_500)\n except Exception as ex:\n log_and_exit(\"Exception in uploading fileobj: {}\".format(ex),\n SIMAPP_S3_DATA_STORE_EXCEPTION,\n SIMAPP_EVENT_ERROR_CODE_500)",
"def upload_object(self, file_path, s3_path):\n logging.info(\"Uploading file to \\\"{}\\\" to S3\".format(s3_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.Bucket(bucket_name).upload_file(file_path, key)",
"def upload(iid, file_obj, content_type):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n s3.Bucket(BUCKET_NAME).put_object(Key=iid,\n Body=file_obj,\n ContentType=content_type)\n return StorageType.S3\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # store locally in temp dir (tests, local development)\n store_temp_file(iid, file_obj)\n return StorageType.TMP\n return None",
"def upload_img_to_s3(file_name: str, bucket_name: str = None) -> None:\n if not(bucket_name is None):\n send_command = SEND_COMMAND_TEMPLATE.format(file_name, bucket_name, PROFILE_NAME)\n else:\n send_command = SEND_COMMAND_TEMPLATE.format(file_name, DEFAULT_BUCKET, PROFILE_NAME)\n log.Info(send_command)\n subprocess.call(send_command, shell=True)\n return",
"def write_to_s3(self, filename):\n print('writing to s3 bucket')\n try:\n self.s3_client.upload_file(f'/tmp/{filename}', self.transformed_bucket, filename,\n ExtraArgs={\n 'Metadata': {'-clientFilename': filename},\n 'ContentType' : 'application/zip',\n }\n )\n except Exception as e:\n print('client - error uploading zip file to s3 with ', e)\n raise e\n\n print('finished writing')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a recursive list of all files inside folder. The list element is a string w/ file path relative to folder. If any file is found with the same name as LOCAL_METADATA_FILE, then do not append it to the list.
|
def _get_file_list(folder):
tree = [x for x in os.walk(folder)]
files = [os.path.join(t[0], y) for t in tree for y in t[2]]
return [os.path.relpath(x, start=folder)
for x in files if x != LOCAL_METADATA_FILE]
|
[
"def _get_files(self, folder):\n folders = os.listdir(folder)\n return [\n f for f in folders if os.path.isfile(os.path.join(folder, f))\n ]",
"def files_in_folder(folder):\n files = []\n for f in glob.glob(folder):\n if os.path.isdir(f):\n files.extend(files_in_folder(f + os.sep + \"**\"))\n else:\n files.append(f)\n return files",
"def ReadFolder(folder: str) -> List[str]:\n\n onlyfiles = [f for f in listdir(folder) if isfile(join(folder, f))]\n \n return onlyfiles",
"def list_file_in_folder(folder,exclude_files=[\".anadama\"]):\n\n # get all of the files in the user process folder, with directories\n list_files = []\n for path, directories, files in os.walk(folder, followlinks=True):\n # remove the base process folder form the path\n reduced_path = path.replace(folder,\"\")\n # remove path sep if first character\n if reduced_path.startswith(os.sep):\n reduced_path = reduced_path[1:]\n current_set = []\n for file in files:\n file_path = os.path.join(path,file)\n # check that this is a file with an extension\n if os.path.isfile(file_path) and \".\" in file:\n current_set.append((os.path.join(reduced_path,file), os.path.join(path,file), get_file_size(file_path), get_mtime(file_path)))\n # add the files sorted\n if current_set:\n list_files+=sorted(current_set, key=lambda x: x[0])\n\n # remove any of the files/folder to exclude\n list_files_reduced = []\n for file_info in list_files:\n include = True\n for exclude in exclude_files:\n if exclude in file_info[0]:\n include = False\n break\n if include:\n list_files_reduced.append(file_info)\n\n return list_files_reduced",
"def list_files(folder, pattern, full_path=False):\n if not folder:\n folder = \".\"\n folder = os.path.abspath(folder)\n fpaths = os.listdir(folder)\n fpattern = re.compile(pattern)\n file_list = list(filter(fpattern.search, fpaths))\n if full_path:\n file_list = [folder + \"/\" + f for f in file_list]\n return file_list",
"def listfiles(self, *path):\n dir = self.localpath(*path)\n files = []\n for root, dirs, fnms in os.walk(dir):\n for f in fnms:\n if f[-5:] == '.info' and os.path.exists(os.path.join(root, f[:-5])):\n try:\n _open_file_info(os.path.join(root, f))\n files.append(\n path + tuple(_split_path(\n os.path.relpath(os.path.join(root, f[:-5]), start=dir)\n )))\n except ValueError:\n pass\n return files",
"def list_all_files(root):\n local_files = []\n for path, dirs, files in os.walk(os_path(root), followlinks=False):\n if len(files) > 0:\n path_wo_root = path[(len(root) + len(slash)):] # remove root part\n local_files.extend([os.path.join(path_wo_root, f) for f in files])\n return local_files",
"def collect_all_files(folder, label):\n ret = []\n for root, subfolders, files in os.walk(folder):\n for file in files:\n ret.append((os.path.join(root, file), label))\n\n return ret",
"def get_full_file_list(self, head_dir, sub_folder=False):\r\n\r\n\t\tfileList = []\r\n\t\tfor fn in os.listdir(head_dir):\r\n\t\t\tdirfile = self.path_join(head_dir, fn)\r\n\t\t\tif os.path.isdir(dirfile):\r\n\t\t\t\tif sub_folder:\r\n\t\t\t\t\tfileList += self.get_full_file_list(dirfile)\r\n\t\t\telse:\r\n\t\t\t\tfileList.append(dirfile)\r\n\r\n\t\treturn fileList",
"def list_files_local(path):\n from glob import glob\n return glob(path)",
"def _list_files(folder, pattern):\n for root, folders, files in os.walk(folder):\n for filename in files:\n if fnmatch.fnmatch(filename, pattern):\n yield os.path.join(root, filename)",
"def get_files_in_dir(dir_path: str) -> List[FileInfo]:\n dir_walk_items = os.walk(dir_path)\n\n all_files = []\n for dir_walk_item in dir_walk_items:\n path_to_dir = dir_walk_item[0]\n file_names = dir_walk_item[2]\n for file_name in file_names:\n if file_name not in IGNORED_FILES:\n all_files.append(\n FileInfo.create(path_to_dir, file_name)\n )\n\n return all_files",
"def file_list(self):\n return [\n x\n for x in os.listdir(self.absolute_process_folder)\n if os.path.isfile(os.path.join(self.absolute_process_folder, x))\n ]",
"def get_file_list(directory, recurse):\n files = []\n\n # if directory recursion is not requested\n if recurse == False:\n filenames = os.listdir(directory)\n # if the last character in the directory name is a '/', don't readd it to the file path\n if directory[-1:] == \"/\":\n for f in filenames:\n if os.path.isdir(directory + f) is not True:\n files.append(directory + f)\n else:\n for f in filenames:\n if os.path.isdir(directory + \"/\" + f) is not True:\n files.append(directory + \"/\" + f)\n\n # if directory recursion is requested\n elif recurse == True:\n for root, dirs, filenames in os.walk(directory):\n if root[-1:] == \"/\":\n for f in filenames:\n if os.path.isdir(directory + f) is not True:\n files.append(root + f)\n else:\n for f in filenames:\n if os.path.isdir(directory + \"/\" + f) is not True:\n files.append(root + \"/\" + f)\n\n else:\n print \"Something weird happened.\"\n sys.exit(1)\n\n return files",
"def get_all_files_walk(folder):\n files = []\n for root, dirs, filenames in os.walk(folder):\n files.extend(os.path.join(root, f) for f in filenames)\n return files",
"def _listdir(folder):\n\tfilePattern = r\"^\\d{4}\\-(0?[1-9]|1[012])\\-(0?[1-9]|[12][0-9]|3[01])\\-clipping\\-[\\d]*\\.json$\"\n\tfilenames = [f for f in os.listdir(folder) if re.match(filePattern, f)]\n\treturn filenames",
"def list_files(self):\n path_with_slash = self.path if self.path.endswith(\"/\") else \"{}/\".format(self.path)\n all_files = []\n for root, _dirs, files in sorted(os.walk(self.path)):\n for f in files:\n f = os.path.join(root, f)\n all_files.append((f, f.replace(path_with_slash, \"[{}]/\".format(self.name), 1)))\n return all_files",
"def get_list_of_files_in_folder(\n self, folder_name: str, limit: int = 1\n ) -> List[str]:\n\n files = []\n if os.path.isdir(folder_name):\n # Get list of only html files from folder:\n files = [file for file in os.listdir(folder_name) if file.endswith(\".html\")]\n\n if len(files) < limit: # short dialogs\n return []\n\n # Descending sort to consider message order:\n files = sorted(\n files,\n key=lambda x: int(re.search(r\"messages(\\d+)\\.html\", x).group(1)),\n reverse=True,\n )\n else:\n print(f\"No such directory: {folder_name}\")\n return files",
"def getAllFiles(self, location):\n \n return [os.path.join(location, filename) for filename in os.listdir(location)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fetches the metadata remote file REMOTE_METADATA_FILE and returns the metadata dict equivalent.
|
def _fetch_current_remote_metadata(conn):
content = _get(conn, REMOTE_METADATA_FILE)
metadata = json.loads(content) if content else {}
return metadata
|
[
"def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents",
"def _fetch_current_local_metadata():\n if not os.path.exists(LOCAL_METADATA_FILE):\n return {}\n\n with open(LOCAL_METADATA_FILE) as f:\n return json.loads(f.read())",
"def get_remote_metadata(contentmetadata):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n contentmetadata.parse_remote_metadata(pydov.util.net.request_timeout)\n\n for remote_md in contentmetadata.metadataUrls:\n if 'metadata' in remote_md and remote_md['metadata'] is not None:\n return remote_md['metadata']",
"def _get_metadata_file(self, metadata_role, remote_filename,\n upperbound_filelength, expected_version,\n compression_algorithm):\n\n I_TO_PRINT = TO_PRINT + uptane.YELLOW + '[SingleRepoUpdater._get_metadata_file(self, metadata_role, remote_filename, upperbound_filelength, expected_version, compression_algorithm)]: ' + uptane.ENDCOLORS\n #TODO: Print to be deleted\n print(str('%s %s %s' % (I_TO_PRINT, 'Trying to download information for metadata_role:', metadata_role)))\n #TODO: Until here\n\n file_mirrors = tuf.mirrors.get_list_of_mirrors('meta', remote_filename,\n self.mirrors)\n # file_mirror (URL): error (Exception)\n file_mirror_errors = {}\n file_object = None\n\n for file_mirror in file_mirrors:\n try:\n file_object = tuf.download.unsafe_download(file_mirror,\n upperbound_filelength)\n\n if compression_algorithm is not None:\n logger.info('Decompressing ' + str(file_mirror))\n file_object.decompress_temp_file_object(compression_algorithm)\n\n else:\n logger.info('Not decompressing ' + str(file_mirror))\n\n # Verify 'file_object' according to the callable function.\n # 'file_object' is also verified if decompressed above (i.e., the\n # uncompressed version).\n metadata_signable = tuf.util.load_string(file_object.read())\n\n # If the version number is unspecified, ensure that the version number\n # downloaded is greater than the currently trusted version number for\n # 'metadata_role'.\n version_downloaded = metadata_signable['signed']['version']\n\n if expected_version is not None:\n # Verify that the downloaded version matches the version expected by\n # the caller.\n if version_downloaded != expected_version:\n message = \\\n 'Downloaded version number: ' + repr(version_downloaded) + '.' \\\n ' Version number MUST be: ' + repr(expected_version)\n raise tuf.BadVersionNumberError(message)\n\n # The caller does not know which version to download. Verify that the\n # downloaded version is at least greater than the one locally available.\n else:\n # Verify that the version number of the locally stored\n # 'timestamp.json', if available, is less than what was downloaded.\n # Otherwise, accept the new timestamp with version number\n # 'version_downloaded'.\n logger.info('metadata_role: ' + repr(metadata_role))\n try:\n current_version = \\\n self.metadata['current'][metadata_role]['version']\n\n if version_downloaded < current_version:\n raise tuf.ReplayedMetadataError(metadata_role, version_downloaded,\n current_version)\n\n except KeyError:\n logger.info(metadata_role + ' not available locally.')\n\n self._verify_uncompressed_metadata_file(file_object, metadata_role)\n\n except Exception as exception:\n # Remember the error from this mirror, and \"reset\" the target file.\n logger.exception('Update failed from ' + file_mirror + '.')\n file_mirror_errors[file_mirror] = exception\n file_object = None\n\n else:\n break\n\n if file_object:\n return file_object\n\n else:\n logger.error('Failed to update {0} from all mirrors: {1}'.format(\n remote_filename, file_mirror_errors))\n raise tuf.NoWorkingMirrorError(file_mirror_errors)",
"def parse_remote_metadata(self, timeout=30):\n for metadataUrl in self.metadataUrls:\n if (\n metadataUrl[\"url\"] is not None and metadataUrl[\"format\"].lower() == \"xml\"\n ):\n try:\n content = openURL(\n metadataUrl[\"url\"], timeout=timeout, headers=self.headers, auth=self.auth\n )\n doc = etree.fromstring(content.read())\n if metadataUrl[\"type\"] == \"FGDC\":\n mdelem = doc.find(\".//metadata\")\n if mdelem is not None:\n metadataUrl[\"metadata\"] = Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n elif metadataUrl[\"type\"] == \"TC211\":\n mdelem = doc.find(\n \".//\" + util.nspath_eval(\"gmd:MD_Metadata\", n.get_namespaces([\"gmd\"]))\n ) or doc.find(\n \".//\" + util.nspath_eval(\"gmi:MI_Metadata\", n.get_namespaces([\"gmi\"]))\n )\n if mdelem is not None:\n metadataUrl[\"metadata\"] = MD_Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n except Exception:\n metadataUrl[\"metadata\"] = None",
"def parse_remote_metadata(self, timeout=30):\n for metadataUrl in self.metadataUrls:\n if (\n metadataUrl[\"url\"] is not None and metadataUrl[\"format\"].lower() == \"text/xml\"\n ):\n try:\n content = openURL(metadataUrl[\"url\"], timeout=timeout, headers=self.headers, auth=self.auth)\n doc = etree.fromstring(content.read())\n\n if metadataUrl[\"type\"] == \"FGDC\":\n mdelem = doc.find(\".//metadata\")\n if mdelem is not None:\n metadataUrl[\"metadata\"] = Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n elif metadataUrl[\"type\"] in [\"TC211\", \"19115\", \"19139\"]:\n mdelem = doc.find(\n \".//\" + nspath_eval(\"gmd:MD_Metadata\", namespaces)\n ) or doc.find(\n \".//\" + nspath_eval(\"gmi:MI_Metadata\", namespaces)\n )\n if mdelem is not None:\n metadataUrl[\"metadata\"] = MD_Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n except Exception:\n metadataUrl[\"metadata\"] = None",
"def get_downloads_metadata():\n global _METADATA\n if _METADATA is None:\n _METADATA = yaml.safe_load(resource_string(__name__, \"downloads.yml\"))\n return _METADATA",
"def get_video_file_metadata(self, fname: str) -> dict:\r\n data = {\"arg\": fname}\r\n cmd_url = (\r\n \"http://\"\r\n + self.info[\"url\"]\r\n + \":8181/api/luminaire/1023\"\r\n + \"/command/GET_VIDEO_FILE_METADATA\"\r\n )\r\n response = requests.post(\r\n cmd_url, cookies=self.info[\"cookiejar\"], json=data, verify=False\r\n )\r\n return self._check_response_for_data(response)",
"def get_metadata(base_dir, config):\n metadata_location = os.path.join(base_dir, config['PATH']['metadata_name'])\n with open(metadata_location, 'rt') as csvfile:\n reader = csv.reader(csvfile, delimiter=';', quoting=csv.QUOTE_NONE)\n meta_dict = dict(reader)\n return meta_dict",
"def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()",
"async def get_file_metadata(\n location_id: LocationID, file_id: StorageFileID, user_id: UserID\n):",
"def remote_file(self):\n return self._remote_file",
"def get_metadata(self):\n try:\n r = requests.get('https://login.mailchimp.com/oauth2/metadata', auth=self)\n except requests.exceptions.RequestException as e:\n raise e\n else:\n r.raise_for_status()\n output = r.json()\n if 'error' in output:\n raise requests.exceptions.RequestException(output['error'])\n return output",
"def _safely_get_metadata_file(self, metadata_role, metadata_filepath,\n uncompressed_fileinfo,\n compression=None, compressed_fileinfo=None):\n\n I_TO_PRINT = TO_PRINT + uptane.YELLOW + '[SingleRepoUpdater._safely_get_metadata_file(metadata_role, metadata_filepath, uncompressed_fileinfo, compression, compressed_fileinfo)]: ' + uptane.ENDCOLORS\n #TODO: Print to be deleted\n print(str('%s %s' % (I_TO_PRINT, 'Non-public method that safely downloads a metadata file up to a certain length, and checks its hashes thereafter.')))\n #TODO: Until here\n\n # Store file length and hashes of the uncompressed version metadata.\n # The uncompressed version is always verified.\n uncompressed_file_length = uncompressed_fileinfo['length']\n uncompressed_file_hashes = uncompressed_fileinfo['hashes']\n download_file_length = uncompressed_file_length\n\n # Store the file length and hashes of the compressed version of the\n # metadata, if compressions is set.\n if compression and compressed_fileinfo:\n compressed_file_length = compressed_fileinfo['length']\n compressed_file_hashes = compressed_fileinfo['hashes']\n download_file_length = compressed_file_length\n\n def safely_verify_uncompressed_metadata_file(metadata_file_object):\n hard_check_file_length(metadata_file_object,\n uncompressed_file_length)\n check_hashes(metadata_file_object, uncompressed_file_hashes)\n self._verify_uncompressed_metadata_file(metadata_file_object,\n metadata_role)\n\n def safely_verify_compressed_metadata_file(metadata_file_object):\n hard_check_file_length(metadata_file_object, compressed_file_length)\n check_hashes(metadata_file_object, compressed_file_hashes)\n\n if compression is None:\n safely_verify_compressed_metadata_file = None\n\n return self._get_file(metadata_filepath,\n safely_verify_uncompressed_metadata_file, 'meta',\n download_file_length, compression,\n safely_verify_compressed_metadata_file,\n download_safely=True)",
"def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata",
"def osf_download_metadata(file):\n extra_metadata = {}\n for key, value in file.__dict__.items():\n if key not in ['materialized_path', 'title', 'md5', 'sha256', 'hashes',\n 'session', 'kind', 'kind_name']:\n extra_metadata[key] = value\n\n return extra_metadata",
"def _FetchCommonMetadata(self, callback):\r\n paths = [ \"meta-data/hostname\", \"meta-data/instance-id\", \"user-data/passphrase\" ]\r\n self.FetchMetadata(paths, callback)",
"def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint",
"async def fetch_metadata(self, route: str):\n data = await self.http.get_metadata(route)\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fetches the metadata local file LOCAL_METADATA_FILE and returns the metadata dict equivalent.
|
def _fetch_current_local_metadata():
if not os.path.exists(LOCAL_METADATA_FILE):
return {}
with open(LOCAL_METADATA_FILE) as f:
return json.loads(f.read())
|
[
"def _fetch_current_remote_metadata(conn):\n content = _get(conn, REMOTE_METADATA_FILE)\n metadata = json.loads(content) if content else {}\n return metadata",
"def get_metadata(base_dir, config):\n metadata_location = os.path.join(base_dir, config['PATH']['metadata_name'])\n with open(metadata_location, 'rt') as csvfile:\n reader = csv.reader(csvfile, delimiter=';', quoting=csv.QUOTE_NONE)\n meta_dict = dict(reader)\n return meta_dict",
"def get_downloads_metadata():\n global _METADATA\n if _METADATA is None:\n _METADATA = yaml.safe_load(resource_string(__name__, \"downloads.yml\"))\n return _METADATA",
"def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata",
"def load_metadata(metadata_path: str) -> dict:\n with open(metadata_path, \"r\") as f:\n metadata: dict = json.load(f)\n return metadata",
"async def get_file_metadata(\n location_id: LocationID, file_id: StorageFileID, user_id: UserID\n):",
"def get_metadata(self, metadata_file, save_metadata):\n # Check if not None\n if metadata_file and isfile(metadata_file):\n with open(metadata_file) as json_file:\n mdata = json.load(json_file)\n # Check the metadata file.\n if self.check_metadata(mdata):\n print(\"Metadata file found. Loading its content.\")\n for key, value in mdata.items():\n setattr(self, key, value)\n return\n # If couldn't get metadata from file, build it from scratch\n mdata = self.build_metadata()\n if save_metadata is not None:\n with open(save_metadata, 'w') as outfile:\n json.dump(mdata, outfile)",
"def read_data_from_file(self, local_lookml_project_path: str) -> dict:\n logger.info(\n \"Parsing data from local LookML file {}\".format(\n self.lookml_file_name_and_path\n )\n )\n with open(\n utils.assemble_path(\n local_lookml_project_path, self.lookml_file_name_and_path\n ),\n \"r\",\n ) as lookml_file:\n return lkml.load(lookml_file)",
"def _build_local_metadata_file(files, home=''):\n filepaths = [os.path.join(home, f) for f in files]\n shas = [_get_sha_metadata(f) for f in filepaths]\n metadata = dict(zip(files, shas))\n\n with open(LOCAL_METADATA_FILE, 'w') as f:\n f.write(json.dumps(metadata))",
"def _local_path_for_metadata( self, file_metadata ):\n return os.path.join(\n self._media_path_local,\n file_metadata.name\n )",
"def file(resource_name, local_filepath):\n\n return {\n \"name\": resource_name,\n \"type\": PLI_FILE_TYPE,\n \"local_filepath\": local_filepath\n }",
"def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()",
"def load_metainfo(filename, dependencyLoader=None, extraArgsHandling=InfoKindEl.ADD_EXTRA_ARGS, uri=None):\n path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../../../../nomad-meta-info/meta_info/nomad_meta_info/{}\".format(filename)))\n return loadJsonFile(path, dependencyLoader, extraArgsHandling, uri)",
"def metadata_file(self):\n return self._metadata_file",
"def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents",
"def get_video_file_metadata(self, fname: str) -> dict:\r\n data = {\"arg\": fname}\r\n cmd_url = (\r\n \"http://\"\r\n + self.info[\"url\"]\r\n + \":8181/api/luminaire/1023\"\r\n + \"/command/GET_VIDEO_FILE_METADATA\"\r\n )\r\n response = requests.post(\r\n cmd_url, cookies=self.info[\"cookiejar\"], json=data, verify=False\r\n )\r\n return self._check_response_for_data(response)",
"def metadata_extractor(self):\n if not hasattr(self, '_local_file'):\n raise AttributeError(\"local_file attribute must be set before \"\n \"calling metadata_extractor\")\n if not hasattr(self, '_metadata_extractor'):\n if self.local_file.endswith('.whl'):\n logger.info(\"Getting metadata from wheel using \"\n \"WheelMetadataExtractor.\")\n extractor_cls = metadata_extractors.WheelMetadataExtractor\n else:\n logger.info(\"Getting metadata from setup.py using \"\n \"SetupPyMetadataExtractor.\")\n extractor_cls = metadata_extractors.SetupPyMetadataExtractor\n\n base_python_version = (\n self.base_python_version or self.template_base_py_ver)\n\n self._metadata_extractor = extractor_cls(\n self.local_file,\n self.name,\n self.name_convertor,\n self.version,\n self.rpm_name,\n self.venv,\n self.distro,\n base_python_version)\n\n return self._metadata_extractor",
"def _read_metadata_file(driver, filepath):\n parser = driver(filepath)\n metad = parser.get_metadata()\n return ensure_constistent_metadata(\n metad\n )",
"def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Based on comparison of local and remote metada dictionaries, filter files to retain only the files which doesn't exist on remote metadata dict or have different content and same filename. Also, based on IGNORE_DIRS and IGNORE_EXTENSIONS, filter the net file list.
|
def _filter_file_list(files, local_metadata, remote_metadata):
def _is_tracked(filename, metadata):
"""
Is the filename tracked in the remote metadata dict.
The file may be not even locally tracked yet
"""
current_local_sha = local_metadata.get(filename, None)
current_remote_sha = metadata.get(filename, None)
return current_local_sha is not None \
and current_remote_sha is not None \
and current_local_sha == current_remote_sha
def _is_inside_ignored_dir(filename):
""" Is the filename inside any of the IGNORE_DIRS list """
ignore_dirs = ['./' + x for x in IGNORE_DIRS]
return any([filename.startswith(x) for x in ignore_dirs])
def _has_ignored_extension(filename):
return any([ext in IGNORE_EXTENSIONS
for ext in filename.split('.')[1:]])
files = [f for f in files
if not _is_inside_ignored_dir(f)
and not _has_ignored_extension(f)
and not _is_tracked(f, remote_metadata)]
return files
|
[
"def _filter_filesystem_files(files):\n filtered_files = []\n for path in files:\n relative_name = _remove_bundle_root(path)\n not_in_excludes = not any(\n [relative_name.startswith(e) for e in _LOCAL_WHITELIST_EXCLUDES])\n head_directory = relative_name.split(os.path.sep)[0]\n if not_in_excludes and head_directory in _LOCAL_WHITELIST:\n filtered_files.append(path)\n return filtered_files",
"def find_remote_files(product, date, channel, fs, mesoregion=None):\n if 'L1' in product:\n files = [fs.glob('gcp-public-data-goes-16/' + product + '/' + str(date.year) + '/' +\n '{0:03g}'.format(int(date.strftime('%j'))) +\n '/*/*{mesoregion}*M[36]C'.replace(\"{mesoregion}\", mesoregion) + str(channel) + '*.nc')]\n elif 'L2' in product:\n files = [fs.glob('gcp-public-data-goes-16/' + product + '/' + str(date.year) + '/' +\n '{0:03g}'.format(int(date.strftime('%j'))) +\n '/*/*{mesoregion}*'.replace(\"{mesoregion}\", mesoregion) + str(product) + '*M[36]' + '*.nc')]\n\n files = [y for x in files for y in x]\n\n return files",
"def file_list(self,*,prev_list=None,remote=None):\n config = self.config\n \n AB = remote\n remote = {'A':config.remoteA,'B':config.remoteB}[remote]\n \n compute_hashes = 'hash' in [config.compare,getattr(config,f'renames{AB}')]\n reuse = compute_hashes and getattr(config,f'reuse_hashes{AB}')\n \n # build the command including initial filters *before* any filters set\n # by the user\n prev_list_name = pathjoin('.syncrclone',f'{AB}-{self.config.name}_fl.json.xz')\n prev_list_nameLEGACY = pathjoin('.syncrclone',f'{AB}-{self.config.name}_fl.zipjson')\n cmd = ['lsjson',\n '--filter',f'+ /{prev_list_name}', # All of syncrclone's filters come first and include before exclude\n '--filter',f'+ /{prev_list_nameLEGACY}', \n '--filter','+ /.syncrclone/LOCK/*',\n '--filter','- /.syncrclone/**']\n \n if compute_hashes and not reuse:\n cmd.append('--hash')\n\n if not config.always_get_mtime and \\\n not (config.compare == 'mtime' or\n getattr(config,f'renames{AB}') == 'mtime' or\n config.conflict_mode in ('newer','older')):\n cmd.append('--no-modtime')\n \n # Now that my above filters, add user flags\n cmd += config.rclone_flags \\\n + self.add_args \\\n + getattr(config,f'rclone_flags{AB}') \\\n + config.filter_flags\n\n cmd.extend([\n '-R',\n '--no-mimetype', # Not needed so will be faster\n ])\n \n cmd.append(remote)\n \n items = json.loads(self.call(cmd))\n \n folders = [] # Just the folder paths\n files = []\n for item in items:\n item.pop('Name',None)\n if item.pop('IsDir',False):\n item.pop('Size',None)\n item.pop('ModTime',None)\n folders.append(item)\n continue\n \n mtime = item.pop('ModTime')\n item['mtime'] = utils.RFC3339_to_unix(mtime) if mtime else None\n files.append(item)\n \n empties = get_empty_folders(folders,files)\n \n # Make them DictTables\n files = DictTable(files,fixed_attributes=['Path','Size','mtime'])\n debug(f'{AB}: Read {len(files)}')\n \n if not prev_list and {'Path':prev_list_name} in files:\n debug(f'Pulling prev list on {AB}')\n prev_list = self.pull_prev_list(remote=AB)\n files.remove({'Path':prev_list_name})\n if {'Path':prev_list_nameLEGACY} in files:\n log(f'NOTE: legacy previous list \"{prev_list_nameLEGACY}\" was found but NOT use on {AB}. You should remove it')\n elif not prev_list and {'Path':prev_list_nameLEGACY} in files:\n debug(f'Pulling prev list LEGACY on {AB}')\n prev_list = self.pull_prev_listLEGACY(remote=AB)\n files.remove({'Path':prev_list_nameLEGACY})\n log(f'NOTE: legacy previous list \"{prev_list_nameLEGACY}\" was used on {AB}. You can now remove it')\n elif not prev_list:\n debug(f'NEW prev list on {AB}')\n prev_list = []\n \n if not isinstance(prev_list,DictTable):\n prev_list = DictTable(prev_list,fixed_attributes=['Path','Size','mtime']) \n \n # inodes if local\n if getattr(config,f'renames{AB}') == 'inode':\n debug(f'{AB}: Getting local inodes')\n if ':' in remote:\n raise ConfigError('Cannot get inodes for non-local or named remote')\n for file in files:\n localfile = os.path.join(remote,file['Path'])\n try:\n stat = os.stat(localfile)\n except Exception as E:\n ## TODO: Handle links\n raise type(E)(f\"Local file '{localfile}' not found. Check paths. May be a link\")\n file['inode'] = stat.st_ino\n \n files.add_fixed_attribute('inode')\n \n if not compute_hashes or '--hash' in cmd:\n return files,prev_list,empties\n \n # update with prev if possible and then get the rest\n not_hashed = []\n updated = 0\n for file in files: #size,mtime,filename\n prev = prev_list[{k:file[k] for k in ['Size','mtime','Path']}] # Will not find if no mtime not in remote\n if not prev or 'Hashes' not in prev:\n not_hashed.append(file['Path'])\n continue\n updated += 1\n file['Hashes'] = prev['Hashes']\n \n if len(not_hashed) == 0:\n debug(f'{AB}: Updated {updated}. No need to fetch more')\n return files,prev_list,empties\n debug(f'{AB}: Updated {updated}. Fetching hashes for {len(not_hashed)}')\n \n tmpfile = self.tmpdir + f'/{AB}_update_hash'\n with open(tmpfile,'wt') as file:\n file.write('\\n'.join(f for f in not_hashed))\n \n cmd = ['lsjson','--hash','--files-from',tmpfile]\n cmd += config.rclone_flags \\\n + self.add_args \\\n + getattr(config,f'rclone_flags{AB}')\n \n cmd.extend([\n '-R',\n '--no-mimetype', # Not needed so will be faster\n '--files-only'])\n \n cmd.append(remote)\n \n updated = json.loads(self.call(cmd))\n for file in updated:\n if 'Hashes' in file:\n files[{'Path':file['Path']}]['Hashes'] = file['Hashes']\n \n debug(f'{AB}: Updated hash on {len(updated)} files')\n \n\n return files,prev_list,empties",
"def filter_files(self,files,history,regex='^.*$'):\n matchingrx=0\n nonmatchingrx=0\n changedcount=0\n newcount=0\n oldcount=0\n filecount=0\n result = dict()\n for file in files:\n #look only for files (not directories, not symlinks etc)\n if stat.S_IFMT(file['type'])==stat.S_IFREG:\n filecount=filecount+1 \n if re.match(regex, file['filename'])!=None:\n self.lg.debug(str(file))\n matchingrx=matchingrx+1 \n filename = \"%s/%s\" % (self.remote_dir,file['filename'])\n if history.has_key(filename):\n self.lg.debug(\"File exists in history: old file: %s, history ts: %s, file ts: %s\" % (filename,history[filename],file['mtime']))\n#TODO: make history time checking optional\n if history[filename]<file['mtime']:\n self.lg.debug(\"File has later ts then in history: old file: %s, history ts: %s, file ts: %s\" % (filename,history[filename],file['mtime']))\n #history[filename] = file.st_mtime\n result[filename] = file['mtime']\n changedcount=changedcount+1\n else:\n self.lg.debug(\"File same or older ts then in history: old file: %s, history ts: %s, file ts: %s\" % (filename,history[filename],file['mtime']))\n oldcount=oldcount+1\n else:\n self.lg.debug(\"new file: %s, file ts: %s\" % (filename,file['mtime']))\n #history[filename] = file.st_mtime\n result[filename] = file['mtime']\n newcount=newcount+1\n else:\n nonmatchingrx=nonmatchingrx+1\n self.lg.info(\"filtering %s files found in %s remote directory against FileHistory with %s entries \" % \n (filecount,self.remote_dir,len(history)))\n self.lg.info(\"\\tmatching file_pattern '%s': %s (non matching: %s) \" % (regex,matchingrx,nonmatchingrx))\n self.lg.info(\"\\told files %s\" % oldcount)\n self.lg.info(\"\\tnew files %s\" % newcount)\n self.lg.info(\"\\tchanged files %s\" % newcount)\n\n \n \n return result",
"def filter_missing_files(file_names, split_by_client=False, allow_missing_files=True):\n\n if not allow_missing_files:\n return file_names\n\n if split_by_client:\n # filter out missing files and empty clients\n existing_files = [\n [f for f in client_files if os.path.exists(f)] for client_files in file_names]\n existing_files = [\n client_files for client_files in existing_files if client_files]\n else:\n # filter out missing files\n existing_files = [f for f in file_names if os.path.exists(f)]\n return existing_files",
"def _selectively_copy_files(source, dest, host_props):\n for root, dirs, files in os.walk(source):\n # Exclude hidden files and directories, in particular \".git\".\n files = [f for f in files if not f[0] == '.']\n dirs[:] = [d for d in dirs if not d[0] == '.']\n for fn in files:\n fn_base, fn_ext = os.path.splitext(fn)\n if fn_ext not in EXTENSIONS:\n print(\" Ignoring {} because extension {} doesn't match\".format(fn, fn_ext))\n continue\n source_path = os.path.join(root, fn)\n reasons = _get_reasons_to_apply_file(source_path, host_props)\n if len(reasons) > 0:\n dest_fn = fn_base if _should_remove_extension(source_path) else fn\n dest_path = os.path.join(dest, dest_fn)\n print(\" Keeping {} as {} because host matches {}\".format(fn, dest_fn, reasons))\n try:\n shutil.copy(source_path, dest_path)\n except Exception as ex:\n raise Exception(\"Error copying {}: {}\".format(source_path, str(ex)))\n\n if host_props['platform'] == 'linux':\n # Git follow umask for public permissions. Explicitly add or remove based\n # on whether the file says it wants them.\n if _should_make_public(source_path):\n subprocess.check_call(['chmod', 'o+rx', dest_path])\n else:\n subprocess.check_call(['chmod', 'o-rwx', dest_path])\n else:\n #print(\" Discarding {} because not all properties matched\".format(fn))\n pass",
"def checkArchFiles(self, key = None, archName = None, verbose = False):\n\n # Set archive from passed args.\n if key is not None and archName is None:\n archName = self.nbDetails[key]['archName']\n elif key is None and archName is None:\n print('Skipping archive checks, no archive supplied.')\n return None\n\n # Check if file exists on remote\n # Note this returns a list\n archExists = self.checkFiles(archName)\n\n if archExists[0]:\n # Get arch contents from remote via Fabric.\n with self.c.prefix(f\"source {self.hostDefn[self.host]['condaPath']} {self.hostDefn[self.host]['condaEnv']}\"):\n result = self.c.run(f\"python -m zipfile -l {archName}\", hide = True)\n\n # Compare with local lsit\n # archFiles = result.stdout.splitlines()\n # localList = self.nbDetails[key]['pkgFileList'][5:]\n # fileComp = list(set(localList) - set(archFiles)) # Compare lists as sets\n archFiles = [(line.split()[0]) for line in result.stdout.splitlines()[1:]] # Keep file names only (drop header, and file properties)\n localList = self.nbDetails[key]['pkgFileList']\n\n # Test & set relative paths for local files in archive\n localListRel = []\n for fileIn in localList:\n try:\n localListRel.append(Path(fileIn).relative_to(self.hostDefn[self.host]['nbProcDir']).as_posix())\n except ValueError:\n localListRel.append(Path(fileIn).name) # In this case just take file name, will go in archive root\n\n fileComp = list(set(localListRel) - set(archFiles)) # Compare lists as sets\n\n # Results\n print(f\"\\n***Checking archive: {archName}\")\n print(f\"Found {len(archFiles)} on remote. Local list length {len(localList)}.\")\n\n # This will run if fileComp is not an empty list\n if fileComp:\n print(f\"Difference: {len(archFiles) - len(localList)}\")\n print(\"File differences:\")\n print(*fileComp, sep = '\\n')\n\n else:\n print(\"Local and remote file lists match.\")\n\n\n else:\n print(f\"***Missing archive: {archName}\")\n fileComp = None\n\n # Set fileComp\n # Either empty, None or list of differences.\n self.nbDetails[key]['archFileCheck'] = fileComp\n if fileComp:\n self.nbDetails[key]['archFilesOK'] = False\n elif fileComp is None:\n self.nbDetails[key]['archFilesOK'] = False\n else:\n self.nbDetails[key]['archFilesOK'] = True\n\n if verbose:\n print(\"\\n***Local file list:\")\n print(*localListRel, sep='\\n')\n print(\"\\n***Archive file list:\")\n print(*archFiles, sep='\\n')\n\n return localListRel, archFiles, fileComp, result",
"def generate_filtered_files(raw_files):\n with open(\"./cloud/lambda_functions/Crawler-Master/IBX50.txt\") as f:\n stock_list = f.readlines()\n stock_list = list(map(str.strip, stock_list))\n\n filtred_files = []\n for file in raw_files:\n for stock in stock_list:\n ftype, date = get_file_info(file)\n filtred_files.append(\n f\"{date[0]}/{date[1]}/{date[2]}/{ftype}_{stock}.parquet\"\n )\n return set(filtred_files)",
"def files_unchanged(self):\n\n passed = []\n failed = []\n ignored = []\n fixed = []\n could_fix = False\n\n # Check that we have the minimum required config\n required_pipeline_config = {\"manifest.name\", \"manifest.description\", \"manifest.author\"}\n missing_pipeline_config = required_pipeline_config.difference(self.nf_config)\n if missing_pipeline_config:\n return {\"ignored\": [f\"Required pipeline config not found - {missing_pipeline_config}\"]}\n try:\n prefix, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\n \"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'\"\n )\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\")\n prefix = \"nf-core\"\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n files_exact = [\n [\".gitattributes\"],\n [\".prettierrc.yml\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n ]\n files_partial = [\n [\".gitignore\", \".prettierignore\", \"pyproject.toml\"],\n ]\n\n # Only show error messages from pipeline creation\n logging.getLogger(\"nf_core.create\").setLevel(logging.ERROR)\n\n # Generate a new pipeline with nf-core create that we can compare to\n tmp_dir = tempfile.mkdtemp()\n\n # Create a template.yaml file for the pipeline creation\n template_yaml = {\n \"name\": short_name,\n \"description\": self.nf_config[\"manifest.description\"].strip(\"\\\"'\"),\n \"author\": self.nf_config[\"manifest.author\"].strip(\"\\\"'\"),\n \"prefix\": prefix,\n }\n\n template_yaml_path = os.path.join(tmp_dir, \"template.yaml\")\n with open(template_yaml_path, \"w\") as fh:\n yaml.dump(template_yaml, fh, default_flow_style=False)\n\n test_pipeline_dir = os.path.join(tmp_dir, f\"{prefix}-{short_name}\")\n create_obj = nf_core.create.PipelineCreate(\n None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path\n )\n create_obj.init_pipeline()\n\n # Helper functions for file paths\n def _pf(file_path):\n \"\"\"Helper function - get file path for pipeline file\"\"\"\n return os.path.join(self.wf_path, file_path)\n\n def _tf(file_path):\n \"\"\"Helper function - get file path for template file\"\"\"\n return os.path.join(test_pipeline_dir, file_path)\n\n # Files that must be completely unchanged from template\n for files in files_exact:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file has an identical match\n else:\n for f in files:\n try:\n if filecmp.cmp(_pf(f), _tf(f), shallow=True):\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n shutil.copy(_tf(f), _pf(f))\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # Files that can be added to, but that must contain the template contents\n for files in files_partial:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file contains the template file contents\n else:\n for f in files:\n try:\n with open(_pf(f), \"r\") as fh:\n pipeline_file = fh.read()\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n if template_file in pipeline_file:\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n with open(_pf(f), \"w\") as fh:\n fh.write(template_file)\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # cleaning up temporary dir\n shutil.rmtree(tmp_dir)\n\n return {\"passed\": passed, \"failed\": failed, \"ignored\": ignored, \"fixed\": fixed, \"could_fix\": could_fix}",
"def _sync_with_list_folder_entries( self, entries ):\n \n # step through remote file metadata entries\n for file_metadata in entries:\n \n # this entry is file metadata\n if isinstance( file_metadata, dropbox.files.FileMetadata ):\n\n # by default, download files\n should_download_file = True\n \n # get possible local path for remote file\n local_path = self._local_path_for_metadata( file_metadata )\n \n # if this path already exists locally...\n if os.path.exists( local_path ):\n\n # get last modification date for local file\n s = os.stat( local_path )\n dt_local = datetime.datetime.fromtimestamp( s.st_mtime )\n\n # if remote file is older, or the same age as local file...\n if file_metadata.server_modified <= dt_local:\n # ..don't download it\n should_download_file = False\n\n if should_download_file:\n self._download_file( file_metadata )\n \n\n # this is a deleted entry\n elif isinstance( file_metadata, dropbox.files.DeletedMetadata ):\n \n # get local path for remote file\n local_path = self._local_path_for_metadata( file_metadata )\n\n # delete it, if it exists\n if os.path.exists( local_path ):\n logging.debug( \"Will delete file at path: %s\", local_path )\n os.remove( local_path )",
"def filter_cityengine(models):\n \n filtered = []\n \n for m in models:\n if 'tahirazim/apiupload' not in m['base_path']:\n filtered.append(m)\n \n return filtered",
"def filter_filelist(files: list, hour_mod: int = 12, min_mod: int = 60) -> list:\n files_restricted = []\n if hour_mod == 0 and min_mod == 0:\n files_restricted.append(sorted(files)[-1])\n else:\n for file in files:\n hour = int(file.split(\"_\")[3][8:10])\n minute = int(file.split(\"_\")[3][10:12])\n if hour % hour_mod == 0 and minute % min_mod == 0:\n files_restricted.append(file)\n logging.debug(f'Remote file added: {file}')\n else:\n logging.debug(f'Remote file ignored: {file}')\n logging.info('Files to be downloaded has been reduced from {} to {}'.format(len(files), len(files_restricted)))\n return files_restricted",
"def compare(file_compared,file_master):\n\n file_compared_extensions = []\n file_master_extensions = []\n\n with open(file_compared,'r') as fc:\n for line in fc:\n file_compared_extensions.append(line.strip())\n\n with open(file_master,'r') as fm:\n for line in fm:\n file_master_extensions.append(line.strip())\n\n return list(set(file_compared_extensions) - set(file_master_extensions))",
"def search_local_files(filename, data_type, train_or_val, path, data_json):\n files = []\n\n if os.path.exists(path):\n all_files = os.listdir(path)\n for f in all_files:\n if f not in data_json.keys():\n continue\n if filename in f and data_type in f and train_or_val in f:\n dataset_filepath = os.path.join(path, f)\n local_file_md5 = get_file_md5(dataset_filepath)\n dataset_md5 = data_json[f][\"md5\"]\n if local_file_md5 == dataset_md5:\n files.append(f)\n else:\n print(f\"{f} is broken so that cannot partition from it.\")\n return files",
"def filter_local_songs(filepaths, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):\n\n\tmatched_songs = []\n\tfiltered_songs = []\n\n\tfor filepath in filepaths:\n\t\ttry:\n\t\t\tsong = _get_mutagen_metadata(filepath)\n\t\texcept mutagen.MutagenError:\n\t\t\tfiltered_songs.append(filepath)\n\t\telse:\n\t\t\tif include_filters or exclude_filters:\n\t\t\t\tif _check_filters(\n\t\t\t\t\t\tsong, include_filters=include_filters, exclude_filters=exclude_filters,\n\t\t\t\t\t\tall_includes=all_includes, all_excludes=all_excludes):\n\t\t\t\t\tmatched_songs.append(filepath)\n\t\t\t\telse:\n\t\t\t\t\tfiltered_songs.append(filepath)\n\t\t\telse:\n\t\t\t\tmatched_songs.append(filepath)\n\n\treturn matched_songs, filtered_songs",
"def test_remote_file_list(self, inst_dict):\n\n test_inst, date = initialize_test_inst_and_date(inst_dict)\n name = '_'.join((test_inst.platform, test_inst.name))\n\n if hasattr(getattr(self.inst_loc, name), 'list_remote_files'):\n assert callable(test_inst.remote_file_list)\n\n # Check for username\n if 'user_info' in inst_dict.keys():\n dl_dict = inst_dict['user_info']\n else:\n dl_dict = {}\n\n files = test_inst.remote_file_list(start=date, stop=date, **dl_dict)\n\n # If test date is correctly chosen, files should exist\n assert len(files) > 0\n else:\n pytest.skip(\"remote_file_list not available\")\n\n return",
"def raw_unfiltered_files(unfiltered_files):\n raw_unfiltred = []\n for file in unfiltered_files:\n file_date = \"\".join(file.split(\"/\")[:3])\n file_type = \"_\".join(file.split(\"/\")[3].split(\"_\")[:2])\n raw_unfiltred.append(f\"{file_type}_{file_date}.gz\")\n return set(raw_unfiltred)",
"def check_local_changes(dir_list):\n logging.info('Checking for local files...')\n for f in dir_list:\n if not os.path.islink(f) and f not in WHITELIST:\n logging.warn('Spec file %s is a local copy. Can this be replaced '\n 'with a link to a helper copy?' % f)",
"def filterFiles(groupDict, fileList):\n for fl in fileList:\n cleanFile = cleanUpPath(fl)\n dirsList = PurePath(fl).parts\n try:\n # Find the first libs directory.\n index = dirsList.index(\"libs\")\n # Any child of libs directory is a group.\n grp = dirsList[index + 1]\n groupDict[grp].append(cleanFile)\n except ValueError:\n groupDict[GRP_UNFILTERED].append(cleanFile)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Is the filename tracked in the remote metadata dict. The file may be not even locally tracked yet
|
def _is_tracked(filename, metadata):
current_local_sha = local_metadata.get(filename, None)
current_remote_sha = metadata.get(filename, None)
return current_local_sha is not None \
and current_remote_sha is not None \
and current_local_sha == current_remote_sha
|
[
"def is_remote_cached(cls, target_filename):\n is_cached = None\n cache = cls.CACHE_BACKEND()\n for file_name, file_id in cache.search():\n if file_name == os.path.basename(target_filename):\n is_cached = file_id\n logger.debug('File %r already cached at %r', target_filename, cls.CACHE_BACKEND)\n break\n return is_cached",
"def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):\n \n # If there is no fileinfo currently stored for 'metadata_filename',\n # try to load the file, calculate the fileinfo, and store it.\n if metadata_filename not in self.fileinfo:\n self._update_fileinfo(metadata_filename)\n\n # Return true if there is no fileinfo for 'metadata_filename'.\n # 'metadata_filename' is not in the 'self.fileinfo' store\n # and it doesn't exist in the 'current' metadata location.\n if self.fileinfo.get(metadata_filename) is None:\n return True\n\n current_fileinfo = self.fileinfo[metadata_filename]\n\n if current_fileinfo['length'] != new_fileinfo['length']:\n return True\n\n # Now compare hashes. Note that the reason we can't just do a simple\n # equality check on the fileinfo dicts is that we want to support the\n # case where the hash algorithms listed in the metadata have changed\n # without having that result in considering all files as needing to be\n # updated, or not all hash algorithms listed can be calculated on the\n # specific client.\n for algorithm, hash_value in new_fileinfo['hashes'].items():\n # We're only looking for a single match. This isn't a security\n # check, we just want to prevent unnecessary downloads.\n if hash_value == current_fileinfo['hashes'][algorithm]:\n return False\n\n return True",
"def IsRepoMetaFile(f: str):\n return (fs.isfile(f) and pbutil.ProtoIsReadable(f,\n scrape_repos_pb2.GitHubRepoMetadata()))",
"def has_metadata(name):",
"def has_metadata(self):\n if self.mimetype in Config.mimes_metadata:\n return True\n return False",
"def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):\n\n I_TO_PRINT = TO_PRINT + uptane.YELLOW + '[SingleRepoUpdater._fileinfo_has_changed()]: ' + uptane.ENDCOLORS\n #TODO: Print to be deleted\n print(str('%s %s' % (I_TO_PRINT, '...')))\n #TODO: Until here\n\n if new_fileinfo is not None:\n tuf.formats.FILEINFO_SCHEMA.check_match(new_fileinfo)\n\n # If there is no fileinfo currently stored for 'metadata_filename',\n # try to load the file, calculate the fileinfo, and store it.\n if metadata_filename not in self.fileinfo:\n self._update_fileinfo(metadata_filename)\n\n # Return true if there is no fileinfo for 'metadata_filename'.\n # 'metadata_filename' is not in the 'self.fileinfo' store\n # and it doesn't exist in the 'current' metadata location.\n if self.fileinfo[metadata_filename] is None:\n return True\n\n current_fileinfo = self.fileinfo[metadata_filename]\n\n if current_fileinfo['length'] != new_fileinfo['length']:\n return True\n\n # Now compare hashes. Note that the reason we can't just do a simple\n # equality check on the fileinfo dicts is that we want to support the\n # case where the hash algorithms listed in the metadata have changed\n # without having that result in considering all files as needing to be\n # updated, or not all hash algorithms listed can be calculated on the\n # specific client.\n for algorithm, hash_value in six.iteritems(new_fileinfo['hashes']):\n # We're only looking for a single match. This isn't a security\n # check, we just want to prevent unnecessary downloads.\n if algorithm in current_fileinfo['hashes']:\n if hash_value == current_fileinfo['hashes'][algorithm]:\n return False\n\n return True",
"def has_metadata(file_contents):\n lines = file_contents.split(\"\\n\")\n return \"---\" in lines[0]",
"def is_file(self, remote_path):\n remote_path = self.get_abs_path(remote_path)\n return os.path.isfile(remote_path)",
"def has_blobstore_file(self, filename):\n return filename in self.blobstore_files",
"def has_filename(self):\n if self.filename == \"untitled\":\n return False\n else:\n return True",
"def file_is_shared(self) -> bool:\n # as of now, the only known repositories are commons and wikitravel\n # TODO: put the URLs to family file\n if not self.site.has_image_repository:\n return False\n\n try:\n info = self.latest_file_info\n except NoPageError:\n return False\n\n if 'wikitravel_shared' in self.site.shared_image_repository():\n return info.url.startswith('https://wikitravel.org/upload/shared/')\n\n # default to commons\n return info.url.startswith(\n 'https://upload.wikimedia.org/wikipedia/commons/')",
"def _has_file_for(self, ticker):\n return os.path.isfile(self._filename_for(ticker))",
"def has_local_tails_file(self) -> bool:\n tails_file_path = Path(self.get_receiving_tails_local_path())\n return tails_file_path.is_file()",
"def is_track(self):\n if not os.path.isfile(self.file_path):\n return False\n\n if '.' not in self.file_path:\n return False\n\n ext = self.get_extension()\n if ext not in self.VALID_FILE_EXTENSIONS:\n log.debug('[ SKIPPED ] %s (Unrecognized extension)' %\n self.file_path)\n\n return False\n elif ext not in self.allowed_extensions:\n log.debug('[ SKIPPED ] %s (Ignored extension)' % self.file_path)\n\n return False\n\n return True",
"def __etag_ok(self, filepath, local_etag, remote_etag):\n\n if self.__get_s3obj_etag_nparts(local_etag) != self.__get_s3obj_etag_nparts(remote_etag):\n parts = int(self.__get_s3obj_etag_nparts(remote_etag))\n size = stat(filepath).st_size\n chunk_size = ceil((size / parts) / 1024 / 1024)\n local_etag = self.__gen_etag(filepath, chunk_size)\n\n return local_etag == remote_etag",
"def isUsedAsFilename(self):\n \n pass",
"def remote_file_exists(url):\n status = requests.head(url).status_code\n\n if status == 200:\n return True\n else:\n raise RemoteFileDoesntExist",
"def local(self):\r\n return self._url.scheme in ('', 'file')",
"def has_key(self, filename):\n return filename in self.keys"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Is the filename inside any of the IGNORE_DIRS list
|
def _is_inside_ignored_dir(filename):
ignore_dirs = ['./' + x for x in IGNORE_DIRS]
return any([filename.startswith(x) for x in ignore_dirs])
|
[
"def is_in_ignored_directory(self, path):\r\n dirs = os.path.split(path)\r\n for dir in dirs:\r\n if dir in self.ignored_dirs:\r\n return True\r\n return False",
"def is_dir_ignored_file(file_name, cfg):\n if file_name:\n for pattern in cfg.options.dir_ignored_files:\n if fnmatch.fnmatch(file_name, pattern):\n return True\n return False",
"def check_ignored(self, path):\n while path != '':\n head, tail = os.path.split(path)\n if tail in self.ignore_directories:\n return True\n old_path = path\n path = head\n if old_path == path:\n return False\n return False",
"def check_if_file_is_ignored(file_path):\n path_parts = file_path.split('/')\n\n for part in path_parts:\n if part in INGORED_PATHS:\n return True",
"def _should_skip_file(path):\n for pattern in IGNORE_PATTERN_LIST:\n if pattern in path:\n return True\n\n return False",
"def _should_ignore(self, path):\n for ignore in self.options.ignores:\n if fnmatch.fnmatch(path, ignore):\n return True\n return False",
"def is_ignored(file, ignored):\n return any(i in PurePath(path.abspath(file)).parts for i in ignored)",
"def dirname_filter ( self, dirname, _fnmatch=fnmatch.fnmatch ):\n return all (\n not _fnmatch ( dirname, pat ) for pat in self.DIRNAMES_IGNORE\n )",
"def exist_subfolder(self, path, ignore=[]):\r\n\r\n\t\tif not os.path.exists(path):\r\n\t\t\tself.sys_error_message(\"%s does not exist.\" % path)\r\n\t\t\treturn False\r\n\r\n\t\tif os.path.isfile(path):\r\n\t\t\tself.sys_error_message(\"%s is a file.\" % path)\r\n\t\t\treturn False\r\n\t\t\r\n\t\tfor fn in os.listdir(path):\r\n\t\t\tdirfile = self.path_join(path, fn)\r\n\t\t\tif os.path.isdir(dirfile):\r\n\t\t\t\tif not fn in ignore:\r\n\t\t\t\t\treturn True\r\n\r\n\t\treturn False",
"def dir_excluded(path):\n\tname = os.path.basename(path)\n\t# skip any dirs which start with . (dot) and in EXCLUDED_DIRS\n\tif name.startswith('.') and u'.*' in EXCLUDED_DIRS:\n\t\treturn True\n\t# skip any dirs in EXCLUDED_DIRS\n\tif name in EXCLUDED_DIRS or path in EXCLUDED_DIRS:\n\t\treturn True\n\t# skip any dirs that are found in reg exp checks including wildcard searches\n\tfound_dir = False\n\tfound_path = False\n\tfor d in EXCLUDED_DIRS:\n\t\tif d == '.*':\n\t\t\tcontinue\n\t\tif d.startswith('*') and d.endswith('*'):\n\t\t\td = d.replace('*', '')\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telif d.startswith('*'):\n\t\t\td = d + '$'\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telif d.endswith('*'):\n\t\t\td = '^' + d\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif d == name:\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif d == path:\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\n\tif found_dir or found_path:\n\t\treturn True\n\n\treturn False",
"def exclude(filename):\n return any(re.match(r'^' + directory, filename)\n for directory in args.excludes)",
"def is_ignored(self, path):\n for ignore in self.project.ignore:\n if path.startswith(ignore):\n return True\n return False",
"def is_excluded(path, dirs):\n for directory in dirs:\n if path.startswith(directory):\n return True\n return False",
"def should_ignore_path(path):\n for p in config.compiled_ignore_patterns:\n if p.match(path):\n return True\n return False",
"def test_find_not_should_ignore_path_regexp(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n \".airflowignore_glob\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)",
"def is_in_directory(f):\n f = os.path.dirname(f) + os.path.sep\n return any(f.startswith(d) for d in dirs_to_group)",
"def is_excluded(root, excludes):\n\n fileName = os.path.basename(root)\n dirName = os.path.dirname(root)\n\n excludes = ['Test.py', 'setup.py']\n\n for exclude in excludes:\n if fileName.endswith(exclude):\n return True\n\n return False",
"def path_excluded(self,path):\n\t\tfor pattern in self.excludes['file_exclude']:\n\t\t\tif pattern in path:\n\t\t\t\t#print \" \u001b[41mExcluding:\u001b[m\",path\n\t\t\t\treturn True\n\t\treturn False",
"def ignore_path(path, ignore_list=None, whitelist=None):\n if ignore_list is None:\n return True\n\n should_ignore = matches_glob_list(path, ignore_list)\n if whitelist is None:\n return should_ignore\n\n return should_ignore and not matches_glob_list(path, whitelist)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Walks through all the subfolders in static_root, and uploads everything valid found to S3. If Gzip is enabled, also tries to compress and upload the compressed version of the static asset.
|
def upload_all_to_s3(static_root):
conn = _get_connection()
files = _get_file_list(static_root)
_build_local_metadata_file(files, home=static_root)
local_metadata = _fetch_current_local_metadata()
remote_metadata = _fetch_current_remote_metadata(conn)
files_to_upload = _filter_file_list(files, local_metadata, remote_metadata)
start_time = time.time()
print 'Upload start: Landing in BUCKET_NAME: %s' % BUCKET_NAME
for f in files_to_upload:
#Upload to Bucket
upload_file(conn, os.path.join(static_root, f), f)
#Upload Gzip css/js version if gzip is enabled
can_be_gzipped = _file_can_be_compressed(os.path.join(static_root, f))
if GZIP_ENABLED and can_be_gzipped:
upload_file(conn, os.path.join(static_root, f), f, gzip=True)
#Extra files
if EXTRA_FILES:
print 'Now, uploading extra files outside public/static'
for filename_local, filename_s3 in EXTRA_FILES.items():
upload_file(conn, filename_local, filename_s3)
end_time = time.time()
print 'Upload finished: \
Time elapsed: %s s' % round(end_time - start_time, 3)
# refresh metadata file on the server
print 'Uploading local metadata file'
upload_file(conn, LOCAL_METADATA_FILE, REMOTE_METADATA_FILE)
print 'Uploading process DONE'
|
[
"def test_upload(self):\n upload_static_assets.upload_static_files()\n\n s3 = boto3.resource('s3', region_name='us-east-1')\n for item in s3.Bucket(BUCKET).objects.all():\n self.assertTrue(item.key.startswith('static/base'),\n 'The only static files involved are from base.')",
"def sync_s3(self):\n bucket, key = self.open_s3()\n for directory in self.DIRECTORIES:\n for root, dirs, files in os.walk(directory):\n self.upload_s3((bucket, key, self.AWS_BUCKET_NAME, directory), root, files, dirs)",
"def sync_static_bucket(not_processed_static_files):\n\n print(f\"sync '{STATIC_BUCKET}' s3 bucket with local static files\")\n\n not_processed_static_bucket_files = get_static_bucket_files()\n\n upload_count = 0\n re_upload_count = 0\n delete_count = 0\n\n for static_path, file in not_processed_static_files.items():\n\n if static_path in not_processed_static_bucket_files:\n\n if file[\"Hash\"] == not_processed_static_bucket_files[static_path][\"Hash\"]:\n del not_processed_static_bucket_files[static_path]\n continue\n\n else:\n s3_cli.delete_object(Bucket=STATIC_BUCKET, Key=not_processed_static_bucket_files[static_path][\"Key\"])\n delete_count += 1\n re_upload_count += 1\n del not_processed_static_bucket_files[static_path]\n\n s3_cli.upload_file(file[\"Path\"],\n STATIC_BUCKET,\n file[\"Name\"],\n ExtraArgs={\"ContentType\": get_file_mime_type(static_path),\n \"ContentEncoding\": \"gzip\",\n \"Tagging\": f\"name={static_path}&hash={file['Hash']}\",\n 'CacheControl': 'max-age: 31536000', # = 1 year\n 'ACL': 'public-read'})\n upload_count += 1\n\n for name, file in not_processed_static_bucket_files.items():\n s3_cli.delete_object(Bucket=STATIC_BUCKET, Key=file[\"Key\"])\n delete_count += 1\n\n print(f\"synced '{STATIC_BUCKET}' s3 bucket with local static files:\\n\"\n f\"\\t- uploaded [{upload_count}] files ([{re_upload_count}] for re-upload)\\n\"\n f\"\\t- deleted [{delete_count}] files ([{re_upload_count}] for re-upload)\\n\"\n f\"\\t- re-uploaded [{re_upload_count}] files\")",
"def s3_sync(s3_bucket, s3_prefix, sync_path=\".\"):\n # Get bucket\n s3_resource = boto3.resource(\"s3\")\n bucket = s3_resource.Bucket(s3_bucket)\n\n # Walk paths and subdirectories, uploading files\n for path, subdirs, files in os.walk(sync_path):\n # Get relative path prefix\n relpath = os.path.relpath(path, sync_path)\n if not relpath.startswith('.'):\n prefix = os.path.join(s3_prefix, relpath)\n else:\n prefix = s3_prefix\n\n for file in files:\n file_key = os.path.join(prefix, file)\n bucket.upload_file(os.path.join(path, file), file_key)",
"def upload_child_objects(self, local_dir_path, s3_dir_path, recursive=False, fn_pattern=None):\n child_objects = [os.path.join(local_dir_path, f) for f in os.listdir(local_dir_path)]\n child_files = [f for f in child_objects if os.path.isfile(f)]\n child_dirs = [f for f in child_objects if os.path.isdir(f)]\n\n for child_file in child_files:\n if not fn_pattern or fnmatch.fnmatch(child_file, fn_pattern):\n s3_object_path = os.path.join(s3_dir_path, os.path.basename(child_file))\n logging.debug(\"Uploading \\\"{}\\\" to \\\"{}\\\"\".format(child_file, s3_object_path))\n self.upload_object(child_file, s3_object_path)\n\n if recursive:\n for child_dir_local in child_dirs:\n child_dir_s3 = os.path.join(s3_dir_path, os.path.basename(child_dir_local))\n self.upload_child_objects(child_dir_local, child_dir_s3, recursive, fn_pattern)",
"def upload_files(bucket, src):\n\n for dir_path, dir_name, file_name in os.walk(src):\n dir_name = dir_name\n for name in file_name:\n upload_to_s3(bucket, (os.path.join(dir_path, name)))\n #click.echo(os.path.join(dir_path, name))",
"def deploy_to_s3(self):\n self.tempdir = tempfile.mkdtemp('s3deploy')\n\n for keyname, absolute_path in self.find_file_paths():\n self.s3_upload(keyname, absolute_path)\n\n shutil.rmtree(self.tempdir, True)\n return True",
"def build_static():\n\n print green(\"Gathering and preprocessing static files...\")\n fabutils.manage_py('collectstatic --noinput -i media')\n fabutils.manage_py('compress')",
"def compress_static():\n import plumbum\n try:\n compress_cmd = plumbum.local['zopfli']['--gzip']\n except plumbum.CommandNotFound:\n print('zopfli not available, falling back to gzip')\n compress_cmd = plumbum.local['gzip']['-9', '-k']\n \n extensions = {'.js', '.css', '.svg', '.ttf'}\n files = set(sc.static_dir.glob('fonts/**/*'))\n files.update(sc.static_dir.glob('js/data/*'))\n files.update(sc.static_dir.glob('js/compiled/*'))\n files.update(sc.static_dir.glob('css/compiled/*'))\n to_process = []\n for file in sorted(files):\n if file.suffix == '.gz':\n if file.with_name(file.stem) not in files:\n # Remove if original does not exist anymore\n file.unlink()\n continue\n if file.suffix not in extensions:\n continue\n if file.with_suffix(file.suffix + '.gz') in files:\n continue\n to_process.append(file)\n if to_process:\n print('Compressing {} files'.format(len(to_process)))\n compress_cmd[to_process]()",
"def copy_static_resources(self):\n if not hasattr(settings, 'STATIC_ROOT'):\n raise MissingStaticRoot()\n destination = os.path.join(STORAGE_PATH, 'static')\n if os.path.exists(destination):\n shutil.rmtree(destination)\n shutil.copytree(settings.STATIC_ROOT, destination)",
"def cp_static_files(self,inpath,outpath): \n if inpath==self.static_dir:\n dest=os.path.join(outpath,os.path.basename(inpath))\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(inpath,dest))\n copyfiles(inpath,dest) \n else:\n for folder in os.listdir(inpath):\n if folder == 'static':\n logger.info('found static folder, copy all...')\n dest=os.path.join(outpath,folder)\n src=os.path.join(inpath,folder)\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(src,dest))\n copyfiles(src,dest)\n return 0",
"def copy_static(root_directory, dist_directory, sdk_directory):\n\n for static in configuration.STATICS:\n context = {\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n }\n\n source = templates.from_string(static[\"source\"], context)\n target = templates.from_string(static[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Copying '%s'\\n\" % source)\n\n if static[\"type\"] == \"directory\":\n recursive_overwrite(source, target)\n else:\n shutil.copy(source, target)",
"def upload_files_to_s3(self):\n\n bucket_name = self.publish_bucket\n\n # Connect to S3 and bucket\n s3_conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)\n bucket = s3_conn.lookup(bucket_name)\n\n for file in glob.glob(self.OUTPUT_DIR + '/*.zip'):\n s3_key_name = file.split(os.sep)[-1]\n s3key = boto.s3.key.Key(bucket)\n s3key.key = s3_key_name\n s3key.set_contents_from_filename(file, replace=True)\n if self.logger:\n self.logger.info(\"uploaded \" + s3_key_name + \" to s3 bucket \" + bucket_name)\n return True",
"def sync(self, pathname, bucket_name):\n\n root=Path(pathname).expanduser().resolve()\n s3_bucket=self.s3.Bucket(bucket_name)\n\n def handle_directory(pathname):\n for p in pathname.iterdir():\n if p.is_dir():\n handle_directory(p)\n if p.is_file():\n self.upload_file(s3_bucket, str(p), str(p.relative_to(root)))\n\n handle_directory(root)",
"def test_gzip_and_send_s3(self):\n # First create some dummy content to work with\n output_path = '{0}/test_out/'.format(os.getcwd())\n helper_extract_all(cluster=self.cluster, output_path=output_path)\n\n # Run the gzip and send\n try:\n stub_response = dict(\n status_code=200,\n response={'msg': 'success'}\n )\n with MockElasticsearch(stub_response):\n response = dashboard.push_to_s3(\n input_directory=output_path,\n s3_details=self.s3_details\n )\n\n # Check response from S3\n self.assertEqual(response.status_code, 200)\n\n # Check there is a gzip on disk\n gb = glob.glob('{0}/*.gz'.format(output_path))\n self.assertTrue(\n len(gb) == 1\n )\n\n except Exception as error:\n self.fail(error)\n finally:\n # Clean up files\n shutil.rmtree(output_path)",
"def _deploy_to_s3(path='.gzip'):\n # Clear files that should never be deployed\n local('rm -rf %s/live-data' % path)\n local('rm -rf %s/sitemap.xml' % path)\n\n s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt sync %s/ %s'\n s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude \"*\" --include-from gzip_types.txt sync %s/ %s'\n\n for bucket in app_config.S3_BUCKETS:\n local(s3cmd % (path, 's3://%s/%s/' % (bucket, app_config.PROJECT_SLUG)))\n local(s3cmd_gzip % (path, 's3://%s/%s/' % (bucket, app_config.PROJECT_SLUG)))",
"def gzip_and_get_other_static_files():\n\n print(\"gzip non css/ js static files\")\n\n def path_relative_to_static_dir(_path):\n path_str = str(_path)\n return pathlib.Path(path_str[path_str.find(str(STATIC_DIR)) + len(str(STATIC_DIR)) + 1:])\n\n files = {}\n\n paths = collect_paths_from_dir_and_opt_recreate_dir_structure(STATIC_DIR, OUTPUT_PATHS[\"/temp\"],\n lambda static_subdir:\n static_subdir == \"css\" or\n static_subdir == \"js\")\n\n for i, path in \\\n enumerate(paths):\n static_path = str(pathlib.PurePosixPath(path_relative_to_static_dir(path)))\n\n files[static_path] = {\"Path\": str(OUTPUT_PATHS[\"/temp\"] / static_path),\n \"Hash\": get_file_md5_hash(path),\n \"Name\": static_path}\n\n with open(path, 'rb') as f_in:\n f_in_content = f_in.read()\n with gzip.open(OUTPUT_PATHS[\"/temp\"] / static_path, 'wb') as f_out:\n f_out.write(f_in_content)\n\n return files",
"def get_static_bucket_files():\n\n print(f\"collect info about '{STATIC_BUCKET}' s3 bucket\")\n\n # STATIC_BUCKET files\n # - Key: <path of corresponding local file (relative to static dir)> | <md5 hash before compression>\n # - TagSet: name=<path of corresponding local file (relative to static dir)> &\n # hash=<md5 hash before compression>\n\n files = {}\n paginator = s3_cli.get_paginator(\"list_objects_v2\")\n for page in paginator.paginate(Bucket=STATIC_BUCKET):\n try:\n contents = page[\"Contents\"]\n except KeyError:\n break\n for obj in contents:\n static_path = None\n file_hash = None\n for tag in s3_cli.get_object_tagging(Bucket=STATIC_BUCKET, Key=obj[\"Key\"])[\"TagSet\"]:\n if tag[\"Key\"] == \"name\":\n static_path = tag[\"Value\"]\n if tag[\"Key\"] == \"hash\":\n file_hash = tag[\"Value\"]\n if static_path is not None and file_hash is not None:\n files[static_path] = {\"Key\": obj[\"Key\"], \"Hash\": file_hash}\n return files",
"def deploy_to_s3():\r\n env.gzip_path = '%(path)s/repository/%(project_name)s/gzip/assets/' % env\r\n run(('s3cmd -P --add-header=Content-encoding:gzip --guess-mime-type --rexclude-from=%(path)s/repository/s3exclude sync %(gzip_path)s s3://%(s3_bucket)s/%(project_name)s/%(site_media_prefix)s/') % env)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a |random value| <= SHIFT_MAX_VAL
|
def get_shift() -> int:
return random.randint(low = -1 *SHIFT_MAX_VAL, high = SHIFT_MAX_VAL)
|
[
"def _rand_value(max_value):\n return randint(1, max_value)",
"def get_next_random(value, max_value, min_value, max_delta):\n # Determine if sensor delta should be added or substracted.\n if value == max_value:\n add = False\n elif value == min_value:\n add = True\n else:\n add = random.random() > 0.5\n\n # Calculate a new delta.\n delta = random.randint(0, max_delta)\n\n # Apply the delta.\n if add:\n value += delta\n else:\n value -= delta\n if value > max_value:\n value = max_value\n elif value < min_value:\n value = min_value\n\n return value",
"def getrandmax(space):\n return space.wrap(RANDMAX)",
"def randomize(self):\n self.value = randint(self.min(), self.max())\n if self.lsb0:\n self.value = self.value - (self.value % 2)",
"def selectRandomInt(maxvalue):\n return int(random.random()*maxvalue)",
"def randint(maxvalue):\n\n bit_size = common.bit_size(maxvalue)\n\n tries = 0\n while True:\n value = read_random_int(bit_size)\n if value <= maxvalue:\n break\n\n if tries % 10 == 0 and tries:\n # After a lot of tries to get the right number of bits but still\n # smaller than maxvalue, decrease the number of bits by 1. That'll\n # dramatically increase the chances to get a large enough number.\n bit_size -= 1\n tries += 1\n\n return value",
"def random_shift_logs(self, max_shift, mnemonics=None):\n df = self.logs\n mnemonics = df.columns if mnemonics is None else to_list(mnemonics)\n max_shift = parse_depth(max_shift, check_positive=True, var_name=\"max_shift\") // self.logs_step\n if max_shift == 0:\n warnings.warn('Passed `max_shift` is smaller then dataframe index step and therefore no shift was applied.')\n for column, series in df[mnemonics].iteritems():\n periods = np.random.randint(-max_shift, max_shift + 1)\n fill_value = series.iloc[0] if periods > 0 else series.iloc[-1]\n df[column] = series.shift(periods=periods, fill_value=fill_value)\n return self",
"def getRandomNumber(max: int) -> int:\n ...",
"def test_in_range_0_1():\n g = RG.larger_random()\n assert 0 <= next(g) <= 1",
"def _random_int(rng, min_val, max_val, available):\n\n if available - min_val >= max_val:\n return rng.integers(min_val, max_val + 1)\n\n if available - min_val >= min_val:\n return rng.integers(min_val, available - min_val + 1)\n\n return available",
"def randomShiftVector(values, smin, smax):\n\tshift = np.random.uniform(smin, smax)\n\treturn list(map(lambda va: va + shift, values))",
"def generate_biased_random(minimum, maximum, exp):\n return math.floor(minimum + (maximum - minimum) * pow(random.random(), exp))",
"def randInt(max):\n return int(max * random.random())",
"def longGenerator(min=-2 ** 63, max=2 ** 63):\n while True:\n yield random.randint(min, max)",
"def roll_d8():\n d8 = random.randint(1,8)\n return d8",
"def _bitsfor(maxval):\n maxvalbits = int(round(math.log(maxval) / math.log(2)))\n if maxval != (1 << maxvalbits):\n raise ValueError(\"maxval must be a power of 2, not %d\" % maxval)\n return maxvalbits",
"def roll(self):\n self.value = randint(1, 6)",
"def sample_from_power_law(alpha, min_val, max_val, num_samples=1):\n exp = -alpha\n r = np.random.random(size=num_samples)\n ag, bg = min_val**exp, max_val**exp\n return (ag + (bg - ag)*r)**(1./exp)",
"def roll(self):\n return randint(1,6)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
load all of imagenet data as flat vector
|
def load_imagenet(directory):
path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val'
train_labels = os.listdir(path_train)
train_data = []
for label in train_labels:
imgs_path = os.path.join(path_train, label)
imgs = os.listdir(imgs_path)
for img_name in imgs:
img_path = os.path.join(imgs_path, img_name)
img = cv2.imread(img_path)
b, g, r = cv2.split(img)
img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)
train_data.append(img)
train_labels.append(label)
train_data = np.concatenate(train_data)
train_labels = np.array(train_labels, dtype='str')
test_labels = os.listdir(path_val)
test_data = []
for label in test_labels:
imgs_path = os.path.join(path_val, label)
for img_name in imgs:
img_path = os.path.join(imgs_path, img_name)
img = cv2.imread(img_path)
b, g, r = cv2.split(img)
img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)
test_data.append(img)
test_labels.append(label)
test_data = np.concatenate(test_data)
test_labels = np.array(test_labels, dtype='str')
_, train_labels = np.unique(train_labels, return_inverse=True)
_, test_labels = np.unique(test_labels, return_inverse=True)
del r, g, b, imgs_path, img_name, img, imgs
return train_data, train_labels, test_data, test_labels
|
[
"def loadData(path = \"../data/\"):\n A=[]\n for i in range(1,8):\n im = cv2.imread(path+\"input_\"+str(i)+\".tif\", -1) \n I_xyz =rgb2xyz(im)\n I_y=I_xyz[:,:,1].reshape(I_xyz.shape[0]*I_xyz.shape[1],1)\n A.append(I_y)\n A=np.asarray(A)\n I=A.reshape(A.shape[0],A.shape[1])\n L = np.load(path+'sources.npy').T\n s = im.shape[:2]\n return I, L, s",
"def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))",
"def load_one_img(ds):\n for img in ds.take(1):\n img = img[1, ...]\n yuv_image_tensor = tf.expand_dims(img, axis=0)\n\n return yuv_image_tensor",
"def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data",
"def load_images():\n hw = 28 ** 2 # Number of pixels per image\n n = 60000 # Number of images\n\n with gzip.open('train-images-idx3-ubyte.gz', 'r') as f:\n f.read(16)\n\n buffer = f.read(hw * n)\n images = np.frombuffer(buffer, dtype=np.uint8)\n images = images.reshape(n, hw)\n\n return images",
"def load_images(self):\n with open('image_data' , 'r+b') as f:\n return pickle.load(f)",
"def load_samples(self):\n filename = os.path.join(self.root, self.filename)\n f = gzip.open(filename, \"rb\")\n data_set = pickle.load(f, encoding=\"bytes\")\n f.close()\n images_train = data_set[0][0]\n images_test = data_set[1][0]\n images = np.concatenate((images_train, images_test), axis=0)\n labels_train = data_set[0][1]\n labels_test = data_set[1][1]\n labels = np.concatenate((labels_train, labels_test), axis=0)\n self.dataset_size = labels.shape[0]\n return images, labels",
"def load_data(features_file):\n\n imgs = np.zeros((3862, 176, 144))\n lbls = np.zeros(3862)\n forward_label = np.array([0., 0., 1., 0.])\n left_label = np.array([0., 1., 1., 0.])\n right_label = np.array([1., 0., 1., 0.])\n\n with open(features_file, 'r') as features:\n for idx, line in enumerate(features):\n\n line_arr = np.fromstring(line, sep=' ')\n\n if (idx % 2) == 0: # label\n if np.array_equal(line_arr[1:], forward_label):\n lbls[idx//2] = 1\n elif np.array_equal(line_arr[1:], left_label):\n lbls[idx//2] = 2\n elif np.array_equal(line_arr[1:], right_label):\n lbls[idx//2] = 3\n else:\n print(\"Label '{}' not recognized\".format(line_arr[1:]))\n\n else: # image pixel data\n image = line_arr[0:25344] # 176 x 144 = 25344\n # correct image for easier viewing\n image = (np.reshape(image, (144, 176)).transpose() * (-1))\n imgs[idx//2] = image\n\n return imgs, lbls",
"def loadData(path = \"../data/\"):\n\n I = None\n L = None\n s = None\n\n # Load images to I\n for i in range(1, 8):\n img = imread(path + 'input_' + str(i) + '.tif', dtype=np.uint16)\n img_xyz = rgb2xyz(img)\n img_luminance = img_xyz[:, :, 1]\n if I is None:\n s = img_luminance.shape\n I = img_luminance.flatten().reshape(1, -1)\n else:\n I = np.vstack((I, img_luminance.flatten().reshape(1, -1)))\n\n # Load light directions to L\n L = np.load('../data/sources.npy').T\n return I, L, s",
"def train2image(vector_image: pd.DataFrame):",
"def mnist_raw():\n # CVDF mirror of http://yann.lecun.com/exdb/mnist/\n base_url = \"https://storage.googleapis.com/cvdf-datasets/mnist/\"\n\n def parse_labels(filename):\n with gzip.open(filename, \"rb\") as fh:\n _ = struct.unpack(\">II\", fh.read(8))\n return numpy.array(array.array(\"B\", fh.read()), dtype=numpy.uint8)\n\n def parse_images(filename):\n with gzip.open(filename, \"rb\") as fh:\n _, num_data, rows, cols = struct.unpack(\">IIII\", fh.read(16))\n return numpy.array(array.array(\"B\", fh.read()),\n dtype=numpy.uint8).reshape(num_data, rows, cols)\n\n for filename in [\"train-images-idx3-ubyte.gz\", \"train-labels-idx1-ubyte.gz\",\n \"t10k-images-idx3-ubyte.gz\", \"t10k-labels-idx1-ubyte.gz\"]:\n _download(base_url + filename, filename)\n\n train_images = parse_images(path.join(_DATA, \"train-images-idx3-ubyte.gz\"))\n train_labels = parse_labels(path.join(_DATA, \"train-labels-idx1-ubyte.gz\"))\n test_images = parse_images(path.join(_DATA, \"t10k-images-idx3-ubyte.gz\"))\n test_labels = parse_labels(path.join(_DATA, \"t10k-labels-idx1-ubyte.gz\"))\n\n return train_images, train_labels, test_images, test_labels",
"def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)",
"def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data",
"def load_images(file_dir):\n x = np.load(file_dir + \"/x.npy\")\n y = np.load(file_dir + \"/y.npy\")\n return x, y",
"def load_vecs(fin):\n h5f = tables.open_file(fin)\n h5vecs= h5f.root.vecs\n \n vecs=np.zeros(shape=h5vecs.shape,dtype=h5vecs.dtype)\n vecs[:]=h5vecs[:]\n h5f.close()\n return vecs",
"def load_data(self, fname):\r\n points, labels = self.load_h5(fname)\r\n self.point_data = points.reshape(-1, 2048, 3)\r\n self.point_label = labels.reshape(-1, 1)\r\n print(\"data loaded\")",
"def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData",
"def load_data(datadir, maxnum=2000):\n n = 0\n images = []\n for root, _, fnames in os.walk(sys.argv[1]):\n for fname in fnames:\n fpath = os.path.join(root, fname)\n if os.path.splitext(fpath)[-1].lower() == \".png\":\n im = imageio.imread(fpath)\n images.append(im)\n n += 1\n if n >= maxnum:\n break\n x_train = np.array(images)\n return x_train",
"def read_all_poses(dataset_root, world):\n path = os.path.join(dataset_root, world, 'image_structs.mat')\n data = sio.loadmat(path)\n xyz = data['image_structs']['world_pos']\n image_names = data['image_structs']['image_name'][0]\n dire=data['image_structs']['direction']\n n = xyz.shape[1]\n x = [xyz[0][i][0][0] for i in range(n)]\n y = [0 for i in range(n)]\n z = [xyz[0][i][2][0] for i in range(n)]\n px= [dire[0][i][0][0] for i in range(n)]\n py= [0 for i in range(n)]\n pz= [dire[0][i][2][0] for i in range(n)]\n names = [name[0][:-4] for name in image_names]\n if len(names) != len(x):\n raise ValueError('number of image names are not equal to the number of '\n 'poses {} != {}'.format(len(names), len(x)))\n output = {}\n for i in range(n):\n output[names[i]] = [x[i], z[i], px[i], pz[i]]\n return output"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Take as input a Keras ImageGen (Iterator) and generate random crops from the image batches generated by the original iterator.
|
def random_crop_generator(batches, crop_length):
while True:
batch_x, batch_y = next(batches)
batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))
for i in range(batch_x.shape[0]):
batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))
yield (batch_crops, batch_y)
|
[
"def crop_generator(batch_generator, crop_length):\n while True:\n batch_x, batch_y = next(batch_generator)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, batch_x.shape[-1]))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)",
"def crop_generator(batches, crop_length):\n while True:\n batch_x, batch_y = next(batches)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)",
"def flow(self, batch_size=32, output='both', crops=0):\n while True:\n for dataset in self.input_sets:\n X = self.training_set['input/'+dataset]\n y = self.training_set['target/'+dataset]\n y_seg = self.training_set['seg_map/'+dataset]\n\n for i in range(int(math.ceil(X.shape[0]/2000))):\n index = list(range(0,X.shape[0]))\n sample = random.sample(index, batch_size)\n sample.sort()\n X_batch = X[sample, ...]\n y_batch = y[sample, ...]\n y_seg_batch = y_seg[sample, ...]\n X_batch = self.augment(X_batch)\n\n if crops > 0:\n (X_batch, y_batch,\n y_seg_batch) = _augmentors.random_crops(\n X_batch, y_batch, y_seg_batch, n_crops=crops, crop_dim=20)\n\n if output=='both':\n yield (X_batch, [y_batch, y_seg_batch])\n elif output=='seg':\n yield (X_batch, y_seg)\n elif output=='density':\n yield (X_batch, y_batch)\n else:\n raise Exception('output must be \"density\", \"seg\" or \"both\"')",
"def batch_generator(samples, batch_size=32, is_training=True):\n num_samples = len(samples)\n while True: # Loop forever so the generator never terminates\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples.iloc[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples.iterrows():\n batch_sample = batch_sample[1]\n name = DATA_PATH + '/IMG/'+batch_sample['center'].split('/')[-1]\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n center_angle = float(batch_sample['steering'])\n images.append(center_image)\n angles.append(np.clip(center_angle,-1,1))\n if is_training:\n # Center Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip(center_angle*-1.0,-1,1))\n # Left\n name = DATA_PATH + '/IMG/'+batch_sample['left'].split('/')[-1]\n correction = 0.2\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n images.append(center_image)\n angles.append(np.clip(center_angle+correction,-1,1))\n # Left Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip((center_angle+correction)*-1.0,-1,1))\n # Right\n name = DATA_PATH + '/IMG/'+batch_sample['right'].split('/')[-1]\n correction = -0.2\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n images.append(center_image)\n angles.append(np.clip(center_angle+correction,-1,1))\n # Right Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip((center_angle+correction)*-1.0,-1,1))\n \n X_train = np.array(images)\n y_train = np.array(angles)\n yield shuffle(X_train, y_train)",
"def my_generator(batch_size, img_dir):\n cat_dirs = glob.glob(img_dir + \"/*\")\n counter = 0\n while True:\n input_images = np.zeros(\n (batch_size, config.height, config.width, 3 * 5))\n output_images = np.zeros((batch_size, config.height, config.width, 3))\n random.shuffle(cat_dirs)\n if (counter+batch_size >= len(cat_dirs)):\n counter = 0\n for i in range(batch_size):\n input_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[0-4]*\") \n imgs = [Image.open(img) for img in sorted(input_imgs)]\n input_images[i] = np.concatenate(imgs, axis=2)\n output_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[5-7]*\")\n imgs = [Image.open(img) for img in sorted(output_imgs)]\n output_images[i] = np.concatenate(imgs, axis=1)\n input_images[i] /= 255.\n output_images[i] /= 255.\n yield (input_images, output_images)\n counter += batch_size",
"def augmented_generator(batches,augmented_factor,out_size=(250,250)):\n while True:\n batch_x, batch_y = batches.my_next()\n batch_y = 'lateralizado'\n batch_size = len(batch_x)\n tot_data = batch_size*augmented_factor\n output = (np.ones((tot_data,out_size[0],out_size[1],3))*0).astype(np.uint8)\n batch_y = (np.ones((tot_data,out_size[0],out_size[1],3))*0).astype(np.uint8)\n i_output = 0\n for i_batch in range(0,batch_size):\n #En funcion de h o w si estos son mas chicos que n_critic resampleo la imagen\n #el n critico ocurre para un tamaño de 250x250\n \n #esferizo la imagen y aumnento la cantidad de muestras de mi dataset a partir de una muestra\n tensor_sq,angles,origins= augmented_data(fim.chf_to_chl(batch_x[i_batch]),\n out_size=out_size,\n k = augmented_factor)\n \n for i_tensor_sq in range(0,tensor_sq.shape[0]):\n if i_output<output.shape[0]:\n output[i_output] = tensor_sq[i_tensor_sq]\n i_output = i_output + 1\n \n yield (output,batch_y) #devuelve el resultado de la iteracion",
"def random_crop(images, crop_size, r_vec=None, c_vec=None):\n # batch size\n batch_size, h, w, c = images.shape\n assert h == w\n crop_max = h - crop_size\n r_vec = np.random.randint(0, crop_max, batch_size) if r_vec is None else r_vec\n c_vec = np.random.randint(0, crop_max, batch_size) if c_vec is None else c_vec\n # creates all sliding windows combinations of size (output_size)\n windows = view_as_windows(images, (batch_size, crop_size, crop_size, c))[0, :, :, 0]\n # selects a random window for each batch element\n cropped_images = windows[r_vec, c_vec, np.arange(batch_size)]\n\n return cropped_images",
"def generator(array, batch_size):\n start = 0 # pointer to where we are in iteration\n while True:\n stop = start + batch_size\n diff = stop - array.shape[0]\n if diff <= 0:\n batch = array[start:stop]\n start += batch_size\n else:\n batch = np.concatenate((array[start:], array[:diff]))\n start = diff\n batch = batch.astype(np.float32) / 255.0 # normalize pixel intensities\n batch = np.random.binomial(1, batch) # binarize images\n yield batch",
"def next_batch(batch_size, images, labels):\r\n # Get next batch of image (path) and labels\r\n # paths = images[pointer:pointer + batch_size]\r\n # labels = labels[pointer:pointer + batch_size]\r\n paths = images[0:batch_size]\r\n labels = labels[0:batch_size]\r\n\r\n #update pointer\r\n #pointer += batch_size\r\n n_classes = 2\r\n patchSize = (227,227)\r\n mean = np.array([104., 117., 124.])\r\n scale_size=(350, 230)\r\n horizontal_flip = False\r\n shuffle = False\r\n\r\n # Read images\r\n randomized_img_list = []\r\n randomized_label_list = []\r\n for i in range(len(paths)):\r\n images = []\r\n label_list = []\r\n temp = []\r\n #rescale image\r\n img0 = PIL.Image.open(paths[i])\r\n # img0 = img0.resize((scale_size[1], scale_size[0]),PIL.Image.LANCZOS) # PIL.Image.BICUBIC)\r\n\r\n img = np.array(img0,dtype=np.float32)\r\n\r\n if(img.ndim>3):\r\n img=img[:,:,0:3]\r\n img=img[:,:,[2,1,0]]\r\n\r\n for (x,y,crop) in resize_and_extract_overlapping(img, patchSize):\r\n if crop.shape[0] != patchSize[0] or crop.shape[1] != patchSize[1]:\r\n # check that the image is of the right size before we do further pre-processing\r\n continue\r\n if horizontal_flip and np.random.random() < 0.5: # randomly flips image\r\n crop = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)\r\n # crop -= mean # normalize every patch\r\n\r\n images.append(crop)\r\n label_list.append(labels[i])\r\n\r\n# ------------------ READ HERE ZAIZAI-------------------------------------\r\n temp = random.sample(list(enumerate(images)), 8)\r\n # This enumerates through the images from a single crop cycle\r\n # Then, it samples 2 of (index, image) from that list\r\n for idx, val in temp: # seperate the tuples into index and image\r\n randomized_label_list.append(label_list[idx]) # append label to a new list\r\n randomized_img_list.append(val) # append image to new list\r\n\r\n image_matrix = np.ndarray([len(randomized_img_list), patchSize[0], patchSize[1], 3])\r\n for i in range(len(randomized_img_list)):\r\n image_matrix[i] = randomized_img_list[i]\r\n\r\n # Expand labels to one hot encoding\r\n one_hot_labels = np.zeros((len(randomized_img_list), n_classes))\r\n\r\n for i in range(len(randomized_label_list)):\r\n one_hot_labels[i][randomized_label_list[i]] = 1\r\n\r\n print (\"image matrix size: \", image_matrix.shape)\r\n print (\"hi\", len(images))\r\n print (\"One hot labels: \", one_hot_labels.shape)\r\n #return array of images and labels\r\n return image_matrix, one_hot_labels",
"def generate(images, text, target, batch_size, vocab_size):\n while 1:\n #zipping inputs and outputs together\n data = list(zip(images, text, target))\n #shuffling inputs and outputs together every epoch\n random.shuffle(data)\n #initilizing count for number of observations in a batch\n batch_count = 0\n #initializing lists for image, text, and target batches\n image_batch = []\n text_batch = []\n target_batch = []\n #looping over the zipped lists and sampling observations for each batch\n for (image_sample, text_sample, target_sample) in data:\n #one hot encoding target\n target_sample = to_categorical(target_sample, num_classes=vocab_size)\n #sampling observations for each batch\n image_batch.append(image_sample)\n text_batch.append(text_sample)\n target_batch.append(target_sample)\n #increasing batch count\n batch_count += 1\n #if batch count reaches the batch_size, the generator will yield the batch\n if batch_count==batch_size:\n image_batch = np.array(image_batch).reshape(batch_size, image_sample.shape[1])\n text_batch = np.array(text_batch)\n target_batch = np.array(target_batch).reshape(batch_size, vocab_size)\n yield [image_batch, text_batch], target_batch\n image_batch = []\n text_batch = []\n target_batch = []\n batch_count = 0",
"def test_random_crop(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomCrop(size=(64, 64))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, image.shape[2])\n assert _label.shape == (64, 64, label.shape[2])\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomCrop(size=(64, 64, 8))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, 8, image.shape[3])\n assert _label.shape == (64, 64, 8, label.shape[3])",
"def image_generator(df,batch_size,plab,augment=True):\n rng = np.random.RandomState(290615)\n if_train = 1 if plab < 1. else 0\n bi,b_list = 0,df.groupby('business_id').apply(get_biz_id,if_train,batch_size)\n b_list = b_list[b_list!=0]\n b_order = rng.permutation(b_list.index)\n pi,p_list = 0, df[df.iloc[:,-1]==0]['photo_id']\n p_order = rng.permutation(p_list.index)\n while True:\n if rng.rand(1)[0] < plab:\n # aggregate biz_id with outdoor-seating\n biz_id_i = b_list.ix[b_order[bi]]\n photo_train = df[df['business_id']==biz_id_i]['photo_id']\n y_batch = np.asarray(df[df['business_id']==biz_id_i].iloc[:,-1])\n # increase/loop indices for next iteration\n if bi < len(b_list)-1:\n bi += 1\n else:\n bi,b_order = 0,rng.permutation(b_list.index)\n else:\n # pic 32 random non-outdoor-seating pictures\n photo_train = p_list[p_order[pi:(pi+batch_size)]]\n y_batch = np.repeat(0, repeats=len(photo_train), axis=0)\n # increase/loop indices for next iteration\n if pi < len(p_list)-1-batch_size:\n pi += batch_size\n else:\n pi,p_order = 0,rng.permutation(p_list.index)\n batch_size_i = len(photo_train)\n # read and augment photos\n X_batch = np.empty((batch_size_i,h,w,ch))\n for i_ in range(batch_size_i):\n f_ = 'data/train_photos/' + str(photo_train.iloc[i_]) + '.jpg'\n im = Image.open(os.path.realpath(f_))\n im_sml = im.resize((w,h))\n # scale inputs [-1,+1]\n xi = np.asarray(im_sml)/128.-1\n if augment:\n # flip coords horizontally (but not vertically)\n if rng.rand(1)[0] > 0.5:\n xi = np.fliplr(xi)\n # rescale slightly within a random range\n jit = w*0.2\n if rng.rand(1)[0] > 0.1:\n xl,xr = rng.uniform(0,jit,1),rng.uniform(w-jit,w,1)\n yu,yd = rng.uniform(0,jit,1),rng.uniform(h-jit,h,1)\n pts1 = np.float32([[xl,yu],[xr,yu],[xl,yd],[xr,yd]])\n pts2 = np.float32([[0,0],[w,0],[0,h],[w,h]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n xi = cv2.warpPerspective(xi,M,(w,h))\n # save individual image to X_batch\n X_batch[i_,:,:,:] = xi\n# plt.imsave('data/aug_%i' % i_,(xi+1)/2);plt.close()\n yield([X_batch],y_batch)",
"def get_random_test_batch(self):\n images, _ = self._test_set.get_random_batch()\n\n return images",
"def create_keras_generator(self, part='train', batch_size=1, shuffle=True,\n reshape=None):\n from tensorflow.keras.utils import Sequence\n\n if self.supports_random_access():\n class KerasGenerator(Sequence):\n def __init__(self, dataset, part, batch_size, shuffle,\n reshape=None):\n self.dataset = dataset\n self.part = part\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.reshape = reshape or (\n (None,) * self.dataset.get_num_elements_per_sample())\n self.data_shape = self.dataset.get_shape()\n self.on_epoch_end()\n\n def __len__(self):\n return ceil(self.dataset.get_len(self.part) /\n self.batch_size)\n\n def __getitem__(self, idx):\n indexes = self.indexes[idx*self.batch_size:\n (idx+1)*self.batch_size]\n # for last batch, indexes has len <= batch_size\n n_elem = self.dataset.get_num_elements_per_sample()\n arrays = []\n for i in range(n_elem):\n array = np.empty(\n (len(indexes),) + self.data_shape[i],\n dtype=self.dataset.space[i].dtype)\n arrays.append(array)\n for j, ind in enumerate(indexes):\n out = tuple([array[j] for array in arrays])\n self.dataset.get_sample(ind, part=self.part, out=out)\n for i in range(n_elem):\n if self.reshape[i] is not None:\n arrays[i] = arrays[i].reshape(\n (len(indexes),) + self.reshape[i])\n return tuple(arrays) if n_elem > 1 else arrays[0]\n\n def on_epoch_end(self):\n self.indexes = np.arange(self.dataset.get_len(self.part))\n if self.shuffle:\n np.random.shuffle(self.indexes)\n\n generator = KerasGenerator(self, part, batch_size=batch_size,\n shuffle=shuffle, reshape=reshape)\n\n else:\n def keras_generator(dataset, part, batch_size, reshape=None):\n generator = dataset.generator(part)\n n_elem = dataset.get_num_elements_per_sample()\n num_steps_per_epoch = ceil(dataset.get_len(part) / batch_size)\n if reshape is None:\n reshape = (None,) * n_elem\n data_shape = dataset.get_shape()\n while True:\n for k in range(num_steps_per_epoch):\n batch_size_ = (batch_size if k < num_steps_per_epoch-1\n else dataset.get_len(part) % batch_size)\n arrays = []\n for i in range(n_elem):\n array = np.empty(\n (batch_size_,) + data_shape[i],\n dtype=dataset.space[i].dtype)\n arrays.append(array)\n for j in range(batch_size_):\n sample = next(generator)\n if n_elem == 1:\n sample = (sample,)\n for i, array in enumerate(arrays):\n array[j, :] = sample[i]\n for i in range(n_elem):\n if reshape[i] is not None:\n arrays[i] = arrays[i].reshape(\n (batch_size_,) + reshape[i])\n yield tuple(arrays) if n_elem > 1 else arrays[0]\n\n generator = keras_generator(self, part, batch_size=batch_size,\n reshape=reshape)\n\n return generator",
"def _test_procedure(batch, sess, gen_real, gen_cyc, real_placeholder, save_dir, image_shape):\n print('Test Images sent to generator..')\n gen_real_out, gen_cyc_out = sess.run([gen_real, gen_cyc],\n feed_dict={real_placeholder: batch})\n print('Images obtatined back generator..')\n for i in range(batch.shape[0]):\n # A single real image in batch.\n real_img = batch[i]\n # Generate fake and cyclic images.\n\n # Concatenate 3 images into one.\n # out_img = np.concatenate((real_img, gen_real_out, gen_cyc_out), axis=0)\n # # Save result.\n # # --------------- Need the utils file!!! ---------------\n # # Temporarily use i as image name. Should change this.\n # im.imwrite(im.immerge(out_img, 1, 3), save_dir + '/' + str(i))\n\n # gen_real_out_image = Image.fromarray(gen_real_out, \"RGB\")\n # gen_cyc_out_image = Image.fromarray(gen_cyc_out, \"RGB\")\n\n # new_im = Image.new('RGB', (image_shape * 3, image_shape))\n # new_im.paste(real_img, (0, 0))\n # new_im.paste(gen_real_out_image, (image_shape, 0))\n # new_im.paste(gen_cyc_out_image, (image_shape * 2, 0))\n\n # new_im.save(save_dir + '(%d).jpg' % (i))\n\n new_im = np.zeros((image_shape, image_shape * 3, 3))\n new_im[:, :image_shape, :] = np.asarray(real_img)\n new_im[:, image_shape:image_shape * 2, :] = np.asarray(gen_real_out[i])\n new_im[:, image_shape * 2:image_shape * 3, :] = np.asarray(gen_cyc_out[i])\n\n scipy.misc.imsave(save_dir + 'Image(%d).png' % (i), _to_range(new_im, 0, 255, np.uint8))\n print(\"Save image.\")",
"def my_generator(batch_size, img_dir):\n\timage_filenames = glob.glob(img_dir + \"/*\")\n\tcounter = 0\n\twhile True:\n\t\tbw_images = np.zeros((batch_size, config.width, config.height))\n\t\tcolor_images = np.zeros((batch_size, config.width, config.height, 3))\n\t\trandom.shuffle(image_filenames) \n\t\tif ((counter+1)*batch_size>=len(image_filenames)):\n\t\t\t counter = 0\n\t\tfor i in range(batch_size):\n\t\t\t img = Image.open(image_filenames[counter + i]).resize((config.width, config.height))\n\t\t\t color_images[i] = np.array(img)\n\t\t\t bw_images[i] = np.array(img.convert('L'))\n\t\tyield (bw_images, color_images)\n\t\tcounter += batch_size",
"def imagenet_generator(meta_data, size, is_train, batch_size, preprocess_func):\n X_cache, y_cache = [], []\n while True:\n if is_train:\n indices = np.random.permutation(len(meta_data))\n else:\n indices = np.arange(len(meta_data))\n for i in indices:\n with Image.open(meta_data[i][0]) as img:\n if is_train:\n X_item = imagenet_data_augmentation(img, size)\n else:\n X_item = validation_image_load(img, size)\n y_item = np.zeros((1000), np.float32)\n y_item[meta_data[i][1]] = 1.0\n X_cache.append(X_item)\n y_cache.append(y_item)\n if len(X_cache) == batch_size:\n X_batch = np.asarray(X_cache, np.uint8)\n y_batch = np.asarray(y_cache, np.float32)\n X_cache, y_cache = [], []\n yield (preprocess_func(X_batch), y_batch)",
"def batch_generator(data, batch_size):\r\n data = np.array(data)\r\n n_batches = int(np.ceil(len(data) / float(batch_size)))\r\n \r\n idx = np.random.permutation(len(data))\r\n data_shuffled = data[idx]\r\n \r\n for i in range(n_batches):\r\n start = i * batch_size\r\n end = start + batch_size\r\n\r\n batch = data_shuffled[start:end]\r\n if len(batch) < batch_size:\r\n # Pad with zeros \r\n pad = np.zeros((batch_size - batch.shape[0], batch.shape[1]),\r\n dtype=batch.dtype)\r\n batch = np.vstack((batch, pad))\r\n\r\n yield batch",
"def batch_generator(data_dir, image_paths, steering_angles, batch_size, is_training):\n images = np.empty([batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])\n steers = np.empty([batch_size,2])\n while True:\n # print(\"E\")\n i = 0\n for index in np.random.permutation(image_paths.shape[0]):\n #print(index)\n object_image = image_paths[index]\n steering_angle = steering_angles[index]\n # argumentation\n if is_training and np.random.rand()<0.5:\n \n image, steering_angle = augument(data_dir, object_image,steering_angle)\n #print(\"P_T\")\n else:\n image = load_image(data_dir, object_image)\n #print(\"P_V\") \n # add the image and steering angle to the batch\n images[i] = preprocess(image)\n steers[i] = steering_angle\n i += 1\n if i == batch_size:\n break\n yield images, steers"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
To be used in conjunction with loss.binary_xentropy_with_sigmoid
|
def sigmoid_with_binary_xentropy(z):
return sigmoid(z)
|
[
"def test_sigmoid_cross_entropy(self):\n loss_op = pointwise_losses.SigmoidCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n assert np.isclose(y_pred[0][0].numpy(), 0.54905695, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0., atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true, y_pred)\n assert np.isclose(loss, 0.6905699, atol=1e-5)",
"def sigmoid():",
"def sigmoid(dblX):\n return 1 / (1 + math.exp(-dblX))",
"def sigmoid(x):\r\n return 1 / (1 + exp(-x))",
"def sigmoid(x): # sigmoid activation function\n return 1 / (1 + np.exp(-x))",
"def sigmoid_activation(X):\n return expit(X)",
"def hard_sigmoid(x):\n return K.hard_sigmoid(x)",
"def sigmoid(x):\n\treturn 1 / (1 + m.exp(-x))",
"def act_sigmoid_scaled(x):\n return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2",
"def activation_sigmoid_custom(self):\n self.value = 1 / (1 + np.e ** (-4.9 * self.value))",
"def Sigmoid(x):\r\n return 1.0 / (1.0 + np.exp(-x))",
"def binary_crossentropy(predictions, targets):\n return T.nnet.binary_crossentropy(predictions, targets)",
"def sigmoid(self, x):\n\n #logging.debug(\"sigmoid received %s as input\" % (x))\n return 1.0 / ( 1.0 + np.exp(-x) )",
"def binary_crossentropy(y_pred, y_true):\n with tf.name_scope(\"BinaryCrossentropy\"):\n return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(y_pred,\n y_true))",
"def binary_crossentropy(predictions, targets):\n return theano.tensor.nnet.binary_crossentropy(predictions, targets)",
"def binary_crossentropy(output, target):\r\n return -(target * tensor.log(output) + (1.0 - target) * tensor.log(1.0 - output))",
"def scaled_sigmoid(self, x):\r\n return (tf.keras.backend.sigmoid(x) * 30 - 5)",
"def sigmoid(self, x):\n # Função de ativação\n return 1/(1 + np.exp(-x) )",
"def sigmoid_cross_entropy(y, label):\r\n losses = - np.log(y + g_epsilon) * label - np.log(1.0 - y + g_epsilon) * (1.0 - label)\r\n return losses"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
To be used in conjunction with loss.xentropy_with_softmax
|
def softmax_with_xentropy(z):
return softmax(z)
|
[
"def cross_entropy_with_logits_loss(input, soft_target):\n return torch.sum(- soft_target * torch.nn.functional.log_softmax(input, 1), 1)",
"def test_softmax_cross_entropy(self):\n loss_op = listwise_losses.SoftmaxCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n assert np.isclose(y_pred[0][0].numpy(), 0.19868991, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0.0, atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true, y_pred)\n assert np.isclose(loss, 1.306335, atol=1e-5)",
"def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass",
"def cross_entropy_loss():\n return nn.CrossEntropyLoss()",
"def test_aux_softmax_cross_entropy(self):\n loss_op = listwise_losses.AuxiliarySoftmaxCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n\n assert np.isclose(y_pred[0][0].numpy(), 0.19868991, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0.0, atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true_aux, y_pred)\n assert np.isclose(loss, 0.88127804, atol=1e-5)",
"def ap_entropy(X, M, R):",
"def convert_softmax_with_cross_entropy(g, op, block):\n\n logits = g.get_node(op.input(\"Logits\")[0])\n labels = g.get_node(op.input(\"Label\")[0])\n ignore_index = op.attr(\"ignore_index\")\n axis = op.attr(\"axis\")\n if axis < 0:\n axis = len(infer_shape(logits)) + axis\n\n softmax = _op.nn.softmax(logits, axis=axis)\n\n g.add_node(op.output(\"Softmax\")[0], softmax)\n\n softmax = _op.log(softmax)\n soft_label = op.attr(\"soft_label\")\n if soft_label:\n loss = _op.sum(-labels * softmax, axis=axis)\n else:\n labels_one = _op.one_hot(\n labels,\n on_value=_expr.const(1.0, dtype=\"float32\"),\n off_value=_expr.const(0.0, dtype=\"float32\"),\n depth=infer_shape(logits)[axis],\n axis=axis + 1,\n dtype=\"float32\",\n )\n labels_one = _op.squeeze(labels_one, axis=axis)\n loss = _op.sum(-labels_one * softmax, axis=axis)\n loss = _op.expand_dims(loss, axis=axis)\n if ignore_index != -100: # noly when soft_label is False\n assert not soft_label, \"soft_label and ignore_index cannot be set at the same time.\"\n ignore_mask = _op.not_equal(labels, _expr.const(ignore_index, dtype=\"int64\"))\n ignore_mask = _op.cast(ignore_mask, \"float32\")\n loss = _op.multiply(loss, ignore_mask)\n\n g.add_node(op.output(\"Loss\")[0], loss)",
"def softmax_entropy_mat(T,Y):\n m = T.shape[0]\n return - sum([softmax_entropy(t,y) for t,y in zip(T,Y)]) / m",
"def softmax_loss(x, y):\n probs = np.exp(x - np.max(x, axis=1, keepdims=True)) # 减去最大的score,防止指数爆炸\n probs /= np.sum(probs, axis=1, keepdims=True)\n N = x.shape[0]\n data_loss = -np.sum(np.log(probs[np.arange(N), y])) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n\n return data_loss, dx",
"def _log_softmax(x):\n return x - logsumexp(x)",
"def ggml_cross_entropy_loss(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData:\n ...",
"def softmax_categorical_crossentropy(y_pred, y_true):\n with tf.name_scope(\"SoftmaxCrossentropy\"):\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred,\n y_true))",
"def softmax(x):\n\tx = np.array(x)\n\tif len(x.shape) > 1:\n\t\tmax_x = np.max(x, axis=1, keepdims=True)\n\t\tclass_prob = np.exp(x - max_x) \n\t\treturn class_prob / np.sum(class_prob, axis=1, keepdims=True)\n\telse:\n\t\tmax_x = np.max(x)\n\t\tclass_prob = np.exp(x - max_x)\n\t\treturn class_prob / np.sum(class_prob)",
"def compute_entropy(x):\n H = 0.0\n for i in range(len(x)):\n if 0 < x[i] < 1:\n H -= x[i] * np.log(x[i])\n return H",
"def cross_entropy(X, y, using_onehot=True):\n\tM = y.shape[0]\n\tif using_onehot :\n\t\tlog_likelihood = -np.log(np.max(X * y, -1))\n\telse:\n\t\tlog_likelihood = -np.log(X[range(M), y]) # 找到y对应的那个类别所对应的logit\n\tloss = np.sum(log_likelihood) / M\n\treturn loss",
"def Weighted_Cross_Entropy(y_true, y_pred, eps = 1e-10):\n y_pred = tf.cast(y_pred, 'float64')\n y_true = tf.cast(y_true, 'float64')\n # deduce weights based on true pixel value\n class_weights = weights * y_true\n # compute your (unweighted) softmax cross entropy loss\n unweighted_losses = y_true*tf.math.log(y_pred + eps)\n ##print(unweighted_losses.dtype, weights.dtype)\n weighted_losses = unweighted_losses * class_weights\n # reduce the result to get your final loss\n loss = -tf.reduce_sum(weighted_losses)\n return loss",
"def log_softmax(x: jnp.DeviceArray, *, axis: int = 0) -> jnp.DeviceArray:\n return x - jnp.expand_dims(jnp.log(jnp.sum(jnp.exp(x), axis=axis)), axis)",
"def binary_crossentropy(output, target):\r\n return -(target * tensor.log(output) + (1.0 - target) * tensor.log(1.0 - output))",
"def CrossEntropyWithSoftmax(self, cntk_op, inputs):\n cast_0, cast_1 = squeeze_axes(inputs)\n\n if cast_0.axes.lengths != cast_1.axes.lengths:\n cast_0 = ng.Transpose(cast_0)\n assert cast_0.axes.lengths == cast_1.axes.lengths\n\n cast_0 = ng.cast_axes(cast_0, axes=cast_1.axes)\n loss = ng.cross_entropy_multi(ng.softmax(cast_0), cast_1)\n\n return ng.mean(loss, out_axes=()).named(cntk_op.uid)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the maximal score for a Yahtzee hand according to the upper section of the Yahtzee score card.
|
def score(hand):
max_score = []
for dice in hand:
max_score.append(hand.count(dice) * dice)
return max(max_score)
|
[
"def score(hand):\n summy = {1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0}\n maxy = 0\n for ind in hand:\n summy[ind] = ind + summy[ind]\n for key in summy:\n if summy[key] > maxy:\n maxy = summy[key]\n return maxy",
"def score(hand):\n if (hand==()):\n return 0\n score_board=[0,0,0,0,0,0,0,0,0,0,0,0]\n for dice in hand:\n score_board[dice-1]+=dice\n max_score=max(score_board)\n return max_score",
"def max_score(self) -> int:\n return max(self.score(center) for center in self)",
"def max_score(self):\n return self.raw_possible",
"def high_card(hand):\r\n card_rank = ['--23456789TJQKA'.index(n) for n,h in hand]\r\n card_rank.sort()\r\n card_rank.reverse()\r\n if card_rank == [14,5,4,3,2]:\r\n card_rank = [5,4,3,2,1]\r\n return max(card_rank)",
"def highCard(self):\n return max(self)",
"def poker(hands):\r\n return allmax(hands, key=hand_rank)",
"def getHighScore(self):\n return max(self.scores)",
"def max_score(self):\r\n return self.lcp.get_max_score()",
"def max_ultility(board):\n if terminal(board):\n return utility(board)\n\n # As opponent is min player, they attempt to gain lower score \n player_max = -math.inf\n possible_moves = actions(board)\n\n for move in possible_moves:\n result_board = result(board, move)\n player_max = max(player_max, min_ultility(result_board))\n\n return player_max",
"def max_score(self):\n return max(self._extract_set('score') or [0])",
"def _maxscore(self):\n total = 0\n lowtot= 0\n for lli in self.ll:\n total = total + max(lli.values())\n lowtot= lowtot+ min(lli.values())\n self.maxscore = total\n self.minscore = lowtot",
"def score(cards):\n \n values = sorted(map(lambda x: x[0], cards))\n\n if same_suit(cards) and values[0] == 10 and values[4] == 14: # royal flush\n return (10, 14, 0) \n\n if same_suit(cards) and values[4] - values[0] == 4 and len(set(values)) == 5: # straigh flush\n return (9, values[4], 0)\n\n if len(set(values)) == 2 and values[1] == values[3]: # four of a kind\n if values[0] != values[1]:\n high_card = values[0]\n else: high_card = values[4]\n return (8, values[2], high_card)\n\n if len(set(values)) == 2 and values[1] != values[3]: # full house\n return (7, values[2], 0)\n\n if same_suit(cards): # flush\n return (6, values[4], 0)\n\n if values[4] - values[0] == 4 and len(set(values)) == 5: # straight\n return (5, values[4], 0)\n\n if len(set(values)) == 3: # three of a kind or two pair\n # three of a kind\n if values[0] == values[2]:\n return (4, values[0], max(values[3:5]))\n if values[1] == values[3]:\n return (4, values[1], max(values[0], values[4]))\n if values[2] == values[4]: \n return (4, values[2], max(values[0:2]))\n else: # two pair\n return (3, max(values[1], values[3]), dict((values.count(i), i) for i in values)[1])\n\n if len(set(values)) == 4: # one pair\n high_value_card = dict((values.count(i), i) for i in values)[2]\n s = set(values)\n s.remove(high_value_card)\n return (2, high_value_card, max(s))\n\n return (1, values[4], 0)",
"def max_score(self):\n return self.weight if self.has_score else None",
"def get_big_joker_value(deck):\n \n return max(deck)",
"def get_big_joker_value(deck: List[int]) -> int:\n return max(deck)",
"def worst_score(self):\r\n pass",
"def getMaxAlignmentScore(self):\n # get max of each row\n # max_scores = [max(i) for i in self.matrix]\n\n # return the max of the max vaules\n return numpy.max(self.matrix)",
"def get_best(hand):\n plays = find_plays(hand)\n if len(plays) == 0:\n return 'High Card'\n else:\n return plays[len(plays) - 1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate all possible choices of dice from hand to hold.
|
def gen_all_holds(hand):
held_dice = [()]
for dice in hand:
for dummy_dice in held_dice:
held_dice = held_dice + [tuple(dummy_dice) + (dice, )]
return set(held_dice)
|
[
"def gen_all_holds(hand):\r\n possible_holds = set([()])\r\n \r\n for dice in hand:\r\n temp_holds = possible_holds.copy()\r\n for hold in temp_holds:\r\n temp_seq = list(hold)\r\n temp_seq.append(dice)\r\n possible_holds.add(tuple(temp_seq))\r\n \r\n return possible_holds",
"def gen_all_holds(hand):\n answer = set([()])\n for dice in hand:\n temp = set(answer)\n for hold in answer:\n hold_list = list(hold)\n hold_list.append(dice)\n temp.add(tuple(hold_list))\n answer = temp\n return answer",
"def gen_all_holds(hand):\r\n all_hold_seq = set([])\r\n # can only have 3 1's, 1 FIVE and 1 SIX\r\n #for xxx in range(0, len(hand)+1):\r\n for xxx in range(0, len(hand)+1):\r\n all_hold_seq.update(gen_all_limit_seq(hand, xxx))\r\n return all_hold_seq\r\n\r\n # use total count board formula to check len(all_hold_seq) is right\r",
"def gen_all_holds(hand):\n \n # Create a set of unique hold hands by dice index so that duplicate values don't intefere\n hold_idx_set = set([()])\n for dummy_idx in range(len(hand)):\n temp_set = set()\n for partial_sequence in hold_idx_set:\n for idx in range(len(hand)):\n new_sequence = list(partial_sequence)\n if idx not in new_sequence:\n new_sequence.append(idx)\n temp_set.add(tuple(new_sequence))\n hold_idx_set.update(temp_set)\n \n # Convert the unique set of hold hand indexes to an actual set of hold hand values\n all_holds_set = set([()])\n for idx_hand in hold_idx_set:\n hold_hand = tuple(sorted([hand[idx_hand[idx]] for idx in range(len(idx_hand))]))\n all_holds_set.add(hold_hand)\n \n return all_holds_set",
"def chooseDice(self):\n # choices is a list of the indexes of the selected dice\n choices = [] # No dice chosen yet\n while True: \n # wait for user to click a valid button\n # added choice for Help\n b = self.choose([\"Die 1\", \"Die 2\", \"Die 3\", \"Die 4\", \"Die 5\",\n \"Roll Dice\", \"Score\", \"Help\"])\n\n if b[0] == \"D\": # User clicked a die button\n i = int(b[4]) - 1 # Translate label to die index\n if i in choices: # Currently selected, unselect it\n choices.remove(i)\n self.dice[i].setColor(\"black\")\n else: # Currently unselected, select it\n choices.append(i)\n self.dice[i].setColor(\"gray\")\n elif b == \"Help\": # choice added for Help\n self.show_help()\n else: # User clicked Roll or Score\n for d in self.dice: # Revert appearance of all dice\n d.setColor(\"black\")\n if b == \"Score\": # Score clicked, ignore choices\n return []\n elif choices != []: # Don't accept Roll unless some\n return choices # dice are actually selected",
"def reroll_selected_dice(selected_dice, yatzy_dice):\n for die in selected_dice:\n yatzy_dice[die] = random_die()",
"def dealHand(n):\n hand={}\n numVowels = n / 3\n \n for i in range(numVowels):\n x = VOWELS[random.randrange(0,len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n \n for i in range(numVowels, n): \n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n \n return hand",
"def safe_dice():\n return [1] * 14",
"def dealHand(n):\n hand={}\n numVowels = n // 3\n \n for i in range(numVowels):\n x = VOWELS[random.randrange(0,len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n \n for i in range(numVowels, n): \n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n \n return hand",
"def deal_hand(n):\n hand = {}\n num_vowels = n // 3\n\n for i in range(num_vowels):\n x = VOWELS[random.randrange(0, len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n\n for i in range(num_vowels, n):\n x = CONSONANTS[random.randrange(0, len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n\n return hand",
"def rollDices():\n for i in range(5):\n dices[i] = randint(1, 6)",
"def deal_hand(n):\n\n hand={}\n num_vowels = int(math.ceil(n / 3))\n\n for i in range(num_vowels):\n vow = random.choice(VOWELS)\n hand[vow] = hand.get(vow, 0) + 1\n\n for i in range(num_vowels, n):\n cons = random.choice(CONSONANTS)\n hand[cons] = hand.get(cons, 0) + 1\n\n return hand",
"def generate_questions(num):\n\tdb = []\n\tfor hand, strategies in basic_strategy.items():\n\t\tfor i, answer in enumerate(strategies):\n\t\t\tupcard = \"A\" if i+2 == 11 else i+2\n\t\t\tprompt = \"{0} vs {1}\".format(hand, upcard)\n\t\t\tdb.append((prompt, answer))\n\treturn random.sample(db, num)",
"def roll_dices():\n dices = []\n\n for i in range(DICE_COUNT):\n dice = random.randrange(MIN_DICE, MAX_DICE + 1)\n dices.append(dice)\n\n return dices",
"def get_outcomes(num_die_sides):\n outcomes = []\n\n for value in range(1, num_die_sides + 1):\n outcomes.append(value)\n\n return outcomes\n\n\n \"\"\"\n Iterative function that enumerates the set of all sequences of\n outcomes of given length.\n DO NOT MODIFY.\n\n outcomes: possible values of a roll (ex. -- [1,2,3,4,5,6] for a 6-sided die)\n \"\"\"\n\n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set",
"def roll():\n population = ('pink', 'dot', 'razorback', 'trotter', 'snouter', 'jowler')\n weights = (34.9, 30.2, 22.4, 8.8, 3.0, 0.61)\n pigs = random.choices(population, weights, k=2)\n return pigs",
"def roll_dice():\r\n parts = {1: \"body\", 2: \"head\", 3: \"two legs\", 4: \"eye\", 5: \"feeler\", 6: \"tail\"}\r\n x = random.randint(1,6) #A die has 6 sides so the number ranges from 1 to 6\r\n return (parts .get(x))",
"def computerChoices():\n return choices[random.randint(0,len(choices)-1)]",
"def select_hands_for_players(self):\n return [random.choice(h.possible_hands) for h in self.holdem_ranges]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the hold that maximizes the expected value when the discarded dice are rolled.
|
def strategy(hand, num_die_sides):
best_hold = (0.0, ())
current_score = 0
for held_dice in gen_all_holds(hand):
score = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice))
if score > current_score:
current_score = score
best_hold = (current_score, held_dice)
return best_hold
|
[
"def strategy(hand, num_die_sides):\n all_hold = list(gen_all_holds(hand))\n maxi_exp_val = 0\n print all_hold\n for hold in all_hold:\n if expected_value(hold, num_die_sides, len(hand) - len(hold)) >= maxi_exp_val:\n maxi_exp_val = expected_value(hold, num_die_sides, len(hand) - len(hold))\n maxi_hold = hold\n print maxi_exp_val, maxi_hold\n return (maxi_exp_val, maxi_hold)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)",
"def strategy(hand, num_die_sides):\n max_value = 0\n for held_dice in gen_all_holds(hand):\n value = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice))\n if value > max_value:\n decision = held_dice\n max_value = value\n \n \n return (max_value, decision)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\r\n temp = gen_all_sequences(tuple(range(1, num_die_sides + 1)), num_free_dice)\r\n total = 0\r\n \r\n for item in temp:\r\n total += score(held_dice + item)\r\n return float(total)/len(temp)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = [dummy_i + 1 for dummy_i in range(num_die_sides)]\n free_dice = gen_all_sequences(outcomes, num_free_dice)\n all_dice = [list(held_dice) + list(dummy_dice) for dummy_dice in free_dice]\n return sum([score(dummy_dice) for dummy_dice in all_dice])*1.0/len(free_dice)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n # initialize local variables\n exp_score = 0\n # calculate all dice outcomes that can be rolled on a single die\n dice_outcomes = range(1, num_die_sides + 1)\n # calculate all possible combination dice that can be rolled\n # for num_free_dice\n poss_rolls = gen_all_sequences(dice_outcomes, num_free_dice)\n # iterate over all of these combinations and calculate the\n # expected score on concatenated held dice and\n # current possible dice roll combination\n for roll in poss_rolls:\n temp_roll = sorted(list(roll)+list(held_dice))\n exp_score += (1.0/((num_die_sides)**num_free_dice) * score(tuple(temp_roll)))\n \n return exp_score",
"def maximum_roll(self):\n if self.dice_array is None:\n return self.number * self.sides\n else:\n return np.sum(self.dice_array)",
"def max_scoring_num_rolls(dice=six_sided):\n \"*** YOUR CODE HERE ***\"\n k, avg, high, num_die = 1, 0, 0, 0\n while (k <= 10):\n avg = make_averaged(roll_dice, 100000)(k, dice) \n print(\"{} dice scores {} on average\".format(k, avg))\n if (high < avg):\n high = avg\n num_die = k\n k = k + 1\n return num_die",
"def roll_die(sides = 6, maxi = 6):\n d = 1000\n # discard highest roll(s)\n while d > maxi:\n d = random.randint(1,sides)\n return d",
"def max_dice(self):\n return self.check_cached_value(\"max_dice\", default_values.MAX_DICE)",
"def getFairDieRoll():\n return 4",
"def roll(self):\n self.currentValue = choice(self.possibleValues)\n self.value = AngryDie.ANGRY_VALUES[self.currentValue]\n return self.currentValue",
"def scorecard_calculate_upper(player_dice, player_choice_upper) -> int:\n upper_choice = int(player_choice_upper)\n choice_in_dice = player_dice.count(upper_choice)\n return choice_in_dice * upper_choice",
"def big_straight(dice):\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0",
"def roll_die(self):\n\t\tresult = random.randint(1, self.sides)\n\t\treturn result",
"def evaluate():\n\n global all_possible_holds\n global index\n global match_flag\n global match_cnt\n global no_of_deals\n\n print \"evaluating....\"\n print datetime.datetime.now().time()\n \n del all_possible_holds[:]\n index = 0 \n \n # all 32 possible strategies (hold possibilities) for a dealt hand ( need to calculate expected value for each of these ) \n for i in range(0,len(hand)+1):\n for subset in itertools.combinations(hand,i):\n all_possible_holds.append(subset) \n\n # build all possible trial hands by brute force and calculate expected value for all possible hold strategies\n expected_value = [] \n for item in all_possible_holds: \n number_of_draws = 5-len(item)\n no_all_possible_draws = combination(len(deck),number_of_draws) \n payout_running_sum = 0\n sets = itertools.combinations(deck,number_of_draws)\n for subset in sets: \n trial_hand = item + subset\n payout_running_sum = payout_running_sum + payout(trial_hand) \n expected_value.append(payout_running_sum/no_all_possible_draws) \n \n\n #find hold strategy with maximum expected value\n max_val = 0.0\n for i in range(0,len(expected_value)):\n if expected_value[i] > max_val:\n max_val = expected_value[i]\n index = i \n\n print datetime.datetime.now().time()\n print \"hold: \"\n for item in all_possible_holds[index]: \n print print_num[item[0]],\n print print_color[item[1]],\n print ' ',\n print '' \n \n print \"with expected value: \",\n print max_val \n \n user_match = False\n calc_match = False\n no_of_deals += 1\n for user_item in user_holds:\n if user_item in all_possible_holds[index]:\n user_match = True\n else:\n user_match = False\n break\n for hold_item in all_possible_holds[index]:\n if hold_item in user_holds:\n calc_match = True\n else:\n calc_match = False\n break\n if (user_match and calc_match) or (len(all_possible_holds[index]) == 0 and len(user_holds) == 0):\n print \"Correct\"\n match_flag = True\n match_cnt += 1\n \n else:\n print \"Wrong\" \n match_flag = False \n \n print match_cnt\n print no_of_deals \n print float(match_cnt)/float(no_of_deals) * 100",
"def _score(self, dice):\n return self._multiplier * sum(dice) + self._base",
"def reroll(self, values: Iterable[TChancesValue]) -> Dice:\n rerolled, kept = self.partition(values)\n return self.sum([kept, self.as_total_chance(rerolled.total_chance)])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find names in a sentence based on a FIRST_NAMES file
|
def find_names(sentence=None, last_names_enabled=True, no_names_enabled=False):
if not sentence:
raise Exception(ParameterMissing, "This method requires sentence as input")
if not isinstance(sentence, str):
raise Exception(TypeError, "This method requires string as input")
first_names = get_first_names_pack()
if not first_names:
raise Exception(VariableNotSet, "Variable FIRST_NAMES is not set in settings.py")
if last_names_enabled:
last_names = get_last_names_pack()
if not last_names:
raise Exception(VariableNotSet, "Variable LAST_NAMES is not set in settings.py")
first_names = list(set(first_names).union(set(last_names)))
if no_names_enabled:
no_names = get_no_names_pack()
if not no_names:
raise Exception(VariableNotSet, "Variable NO_NAMES is not set in settings.py")
first_names = list(set(first_names).difference(set(no_names)))
punctuation = '!@#$%^&*()_+<>?:.,;'
for c in sentence:
if c in punctuation:
sentence = sentence.replace(c, " ")
words = sentence.lower().split()
res = set(words).intersection(first_names)
to_return = [w.title() for w in res]
return to_return
|
[
"def find_names(text):\n\n names = []\n\n # spacy doc\n doc = nlp(text)\n\n # pattern\n pattern = [{'LOWER': 'prime'},\n {'LOWER': 'minister'},\n {'POS': 'ADP', 'OP': '?'},\n {'POS': 'PROPN'}]\n\n # Matcher class object\n matcher = Matcher(nlp.vocab)\n matcher.add(\"names\", None, pattern)\n\n matches = matcher(doc)\n\n # finding patterns in the text\n\n for i in range(0, len(matches)):\n\n # match: id, start, end\n token = doc[matches[i][1]:matches[i][2]]\n # append token to list\n names.append(str(token))\n\n # Only keep sentences containing Indian PMs\n\n for name in names:\n if (name.split()[2] == 'of') and (name.split()[3] != \"India\"):\n names.remove(name)\n\n return names",
"def find_names_position(sentence=None, last_names_enabled=True, no_names_enabled=False):\n if not sentence:\n raise Exception(ParameterMissing, \"This method requires sentence as input\")\n\n if not isinstance(sentence, str):\n raise Exception(TypeError, \"This method requires string as input\")\n\n names_found = find_names(sentence, last_names_enabled=last_names_enabled, no_names_enabled=no_names_enabled)\n\n to_return = []\n for name in names_found:\n begin_positions = [m.start() for m in re.finditer(name, sentence)]\n for begin in begin_positions:\n to_return.append((begin, begin + len(name)))\n # begin = sentence.lower().index(name.lower())\n # end = begin + len(name)\n # to_return.append((begin, end))\n\n return to_return",
"def add_name_matching_to_nlp_pipeline(nlp):\n file_lasts = \"spanish_last_names.txt\"\n file_firsts = \"spanish_first_names.txt\"\n\n\n\n first_names_set_all = read_file_into_set(file_firsts)\n # Removing the names which consist of more than one name (eg. Maria Carmen)\n first_names_set = {name for name in first_names_set_all if len(name.split(\" \")) == 1}\n first_name_matcher = FirstNameListMatcher(first_names_set)\n\n last_names_set_all = read_file_into_set(file_lasts)\n # Removing the names which consist of more than one name\n last_names_set = {name for name in last_names_set_all if len(name.split(\" \")) == 1}\n last_name_matcher = LastNameListMatcher(last_names_set)\n\n full_name_matcher = FullNameMatcher()\n\n # Potential optimisation: remove accents once, then use this info in the NameListMatcher\n # To do so: change code in NameListMatcher to call the sin_accent custom extension and uncomment next 2 lines\n # accent_remover = AccentRemover()\n # nlp.add_pipe(accent_remover, last=True)\n\n nlp.add_pipe(first_name_matcher, last=True)\n nlp.add_pipe(last_name_matcher, last=True)\n nlp.add_pipe(full_name_matcher, last=True)",
"def get_named_people_from_sen(sen):\n wordlist = sen['words']\n entities = []\n\n named = []\n for index, word in enumerate(wordlist):\n if word[1]['NamedEntityTag'] == 'PERSON':\n named.append(word)\n\n try:\n next = wordlist[index+1]\n except:\n named = []\n break\n\n if next[1]['NamedEntityTag'] != 'PERSON':\n if named:\n entities.append(named)\n named = []\n\n return entities",
"def get_named_people_by_sentence(sen_dict):\n named = [get_named_people_from_sen(sen) for sen in sen_dict['sentences']]\n return named",
"def fetch_candidate_name(self):\r\n # variable to save possible matches\r\n possible_names = []\r\n\r\n # source text is input document in text format\r\n nlp_text = self.doc # := nlp(self.stringtext)\r\n\r\n # Add patterns to match proper names\r\n patterns = [[{'POS': 'PROPN'}]]\r\n self.matcher.add('NAME', patterns) \r\n matches = self.matcher(nlp_text) \r\n\r\n # fetch the matches\r\n for match_id, start, end in matches:\r\n span = nlp_text[start:end] \r\n possible_names += [span.text] \r\n if len(possible_names) >= 2: \r\n break\r\n\r\n # Extract candidates\r\n doc_entities = self.doc.ents\r\n\r\n # Subset to person type entities\r\n doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)\r\n doc_persons = filter(lambda x: len(\r\n x.text.strip().split()) >= 2, doc_persons)\r\n doc_persons = map(lambda x: x.text.strip(), doc_persons)\r\n doc_persons = list(doc_persons)\r\n\r\n # Assume the first Person entity with more than two tokens is the candidate's name\r\n if len(doc_persons) > 0:\r\n return possible_names + [doc_persons[0]]\r\n\r\n return \"NOT FOUND\"",
"def get_speaker_names(filename):\n petitioner_strings = ['petitioner',\\\n 'appellant',\\\n 'plaintiff']\n respondent_strings = ['respondent',\\\n 'appellee',\\\n 'defendant']\n # The second 'start' condition is for case 10-7387,\n # which does not include the word 'APPEARANCES'\n reg = re.compile(r\"\"\"(?P<start>APPEARANCES:\\n|\n JASON\\sD\\.\\sHAWKINS,\\sESQ\\.,\\sAssistant\\sFederal\\sPublic)\n (?P<speakers>.+?)\n (?P<end>\\nC\\sO\\sN\\sT\\sE\\sN\\sT\\sS|\n \\nC\\sO\\sIn\\sT\\sE\\sIn\\sT\\sS|\n \\nORAL\\sARGUMENT\\sOF\\sPAGE)\n \"\"\", flags=re.MULTILINE|re.DOTALL|re.VERBOSE)\n\n with open(filename) as f:\n match = reg.search(f.read())\n # This is to account for the 'start' condition for case 10-7387\n if 'JASON D. HAWKINS' in match.group('start'):\n speakers_string = match.group('start') + match.group('speakers')\n else:\n speakers_string = match.group('speakers')\n\n speakers = speakers_string.split('.\\n')\n petitioner_speakers = []\n respondent_speakers = []\n for speaker in speakers:\n name = speaker.split(',')[0]\n if any(s in speaker.lower() for s in petitioner_strings):\n petitioner_speakers.append(name)\n else:\n respondent_speakers.append(name)\n return petitioner_speakers, respondent_speakers",
"def _findfirststart(starts, names):\n hout = []\n for hh in starts:\n for cc in names:\n if cc.startswith(hh):\n hout.append(cc)\n break\n return hout",
"def get_named_people(sen_dict):\n named = []\n for sen in sen_dict['sentences']:\n named.extend(get_named_people_from_sen(sen))\n return named",
"def search_person(name):\r\n results = ''\r\n counter = 0\r\n with open('individus.tsv', 'r') as tsv_file:\r\n lines = csv.DictReader(tsv_file, delimiter='\\t')\r\n for line in lines:\r\n if name == line['name']:\r\n results += '\\nName : %s %s\\nPhone: %s\\nAdresse \\\r\n and city: %s %s\\n-------------' % (\r\n line['name'],\r\n line['last_name'],\r\n line['phone'],\r\n line['adresse'],\r\n line['city'])\r\n counter += 1\r\n\r\n return (results, counter)",
"def test_lookup_first_name(self):\n result = self.env.run('phonebook ' + \\\n ('lookup Mary ') + \\\n ('-b %s/phonebook_fixture.pb' % self.prefix))\n expected_output = (\"Mary Anderson 572 932 1921\")\n nose.tools.assert_in(expected_output, result.stdout)",
"def sample_first_name(first_name_file, num_samples):\n\n df = pd.read_csv(first_name_file, header=None)\n df.columns = [\"name\", \"gender\", \"count\"]\n df = df[(df[\"count\"] > 10)]\n names = df[\"name\"].sample(n=num_samples, random_state=2021, replace=True).apply(str.title)\n\n return list(names.values)",
"def get_names_following_titles(review):\n names = []\n spans = []\n\n txt = review.no_pubs_text\n\n iterx = re.finditer(title_list, txt)\n indices = [(m.start(), m.group()) for m in iterx]\n\n for e, index in enumerate(indices):\n\n if (e==len(indices)-1):\n end_index = -1\n else:\n end_index = indices[e+1][0]\n\n end_span = len(txt[indices[e][0]:end_index])\n get_match = re.finditer('[A-Z]\\w+[^A-Z]|[A-Z].[^A-Z]', txt[indices[e][0]:end_index])\n matches = [(m.span(), m.group()) for m in get_match]\n matches.reverse()\n\n for n, m in enumerate(matches):\n if n<len(matches)-1:\n if (m[0][1] != matches[n-1][0][0]):\n end_span = m[0][1]\n\n result = txt[indices[e][0]:(indices[e][0] + end_span - 1)]\n\n if len(result) > len(indices[e][1]):\n names.append(txt[indices[e][0]:(indices[e][0] + end_span - 1)])\n spans.append(indices[e][0])\n\n names = [word.replace(\"'s\", \"\") for word in names]\n names = [PersonName(clean_name(word)) for word in names]\n\n for e, name in enumerate(names):\n name.review_id = review.review_id\n name.review_loc_chars = (spans[e], spans[e]+len(name))\n\n return names",
"def find_head(surnames):\n exp = \"(\\s?[A-ZÑÁÉÍÓÚ]{2,}(\\s[A-ZÑÁÉÍÓÚ]{1,})?(\\s[A-ZÑÁÉÍÓÚ]{1,})?(\\s[A-ZÑÁÉÍÓÚ]{1,})?(\\s[(]\\w+\\s?\\w+?\\s?\\w+?\\s?\\w+?[)])?(\\s{1,}[(]\\d.*?[)]))\"\n\n families = re.split(exp, surnames)\n\n heads_ind = np.array(range(1, len(families), 7))\n\n families_dict = {}\n\n for h in heads_ind:\n families_dict[families[h]] = families[h + 6]\n\n return families_dict",
"def match_person_by_name(looseMatch,peopleList,firstName,lastName,orgName,orgCode):\r\n\r\n outputPeople = []\r\n for person in peopleList:\r\n personOrg = person[\"Organisation\"]\r\n personID = person[\"ID\"]\r\n personFirst = person[\"FirstName\"]\r\n personLast = person[\"LastName\"]\r\n \r\n # perform checks\r\n firstNameMatch = (personFirst.lower() == firstName.lower() or\r\n (looseMatch and personFirst[:1].lower() == firstName[:1].lower()) or\r\n (looseMatch and firstName == \"\"))\r\n \r\n # Split up first name by . and (space) and match each \r\n # part (eg match things like John A. Smith to John Smith).\r\n # This can also resolve situations where a researcher \r\n # publishes under their middle name, as it will match\r\n # Fred J. Smith to John Smith.\r\n \r\n if(looseMatch and not firstNameMatch and (firstName.find(\" \") != -1 or firstName.find(\".\") != -1)):\r\n firstNameParts = firstName.lower().replace(\".\",\" \").split(\" \")\r\n for part in firstNameParts:\r\n if(len(part)>0):\r\n firstNameMatch = firstNameMatch or (part == personFirst[:len(part)].lower())\r\n\r\n orgCodeMatch = personOrg == orgCode\r\n lastNameMatch = personLast.lower() == lastName.lower()\r\n \r\n # check if person matches\r\n if(firstNameMatch and lastNameMatch and orgCodeMatch):\r\n outputPerson = Researcher(person[\"FirstName\"],person[\"LastName\"],person[\"ID\"],looseMatch,orgName,orgCode)\r\n outputPeople.append(outputPerson)\r\n \r\n return outputPeople",
"def process_name(name):\n def getnames_form3(a):\n \"\"\"\n Case with two commas: the name is of the format\n von Last, Jr, First\n like in: von Hicks, III, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[2].strip()\n junior = a[1].strip()\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def getnames_form2(a):\n \"\"\"\n Case with one comma: the name is of the format\n von Last, First\n like in: von Hicks, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[1].strip()\n junior = ''\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior]\n\n def getnames_form1(a):\n \"\"\"\n Case with NO commas: the name is of the format\n First von Last\n like in: Michael von Hicks\n \"\"\"\n last = a[0].split(' ')\n nfn = 0\n for l in last:\n if l != \"\" and not l[0].islower():\n nfn += 1\n else:\n break\n if nfn == len(last):\n nfn = -1\n\n full_first = ' '.join(last[:nfn])\n full_first = full_first.replace('.', ' ')\n full_last = ' '.join(last[nfn:])\n junior = \" \"\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def get_vonlast(full_last):\n von = \"\"\n last = \"\"\n\n for l in full_last.split(' '):\n if len(l) > 0 and l[0].islower():\n von += l.lower() + \" \"\n else:\n last += l + \" \"\n return von, last\n\n # Start the processing\n a = name.split(',')\n if len(a) == 3:\n fullname = getnames_form3(a)\n elif len(a) == 2:\n fullname = getnames_form2(a)\n elif len(a) == 1:\n fullname = getnames_form1(a)\n else:\n fullname = []\n\n return fullname",
"def extract_names(filename):\n\n # create the list and dictionary\n babynames = []\n ranking = {}\n\n # open the file\n f = open(filename,'r')\n\n # read all the text from the file\n fulltext = f.read()\n\n # extract the year\n matchdate = re.search(r'Popularity in \\d\\d\\d\\d',fulltext) \n date = re.search(r'\\d\\d\\d\\d',matchdate.group())\n\n # append year to list\n babynames.append(date.group())\n\n # extract each line of rank data\n matchranks = re.findall(r'<tr align=\"right\"><td>\\d+</td><td>\\w+</td><td>\\w+</td>',fulltext)\n for row in matchranks:\n\n # find the ranks \n rank = re.search(r'\\d+',row)\n \n # treat with the male/female names\n names = re.findall(r'<td>\\w+</td>',row)\n \n # get rid of the <td> tags\n male = re.sub(r'<.*?>',r'',names[1])\n female = re.sub(r'<.*?>',r'',names[2])\n\n # add the names and ranks to a dictionary\n ranking[male] = rank.group()\n ranking[female] = rank.group()\n\n # append them to a list\n for name in sorted(ranking.keys()):\n babynames.append(name + ' ' + ranking[name])\n\n return babynames",
"def get_pycon_speaker_first_names(soup=None):\n if not soup:\n soup = _get_soup(html=PYCON_HTML)\n\n names = _get_speaker_names(soup)\n first_names = [name.split()[0] for name in names]\n return first_names",
"def append_initials_to_firstnames(self):\n self.firstnames = txt_mixin.txt_list(self.firstnames)\n self.raw_firstnames = copy.copy(self.firstnames)\n N = len(self.firstnames)\n self.last_initials = ['']*N\n for i in range(N):\n first = self.firstnames[i]\n inds = self.firstnames.findall(first)\n if len(inds) > 1:\n matching_last = [self.lastnames[ind] for ind in inds]\n for j in inds:\n first = self.firstnames[j]\n last = self.lastnames[j]\n other_lasts = [olast for olast in matching_last \\\n if (olast != last)]\n last_initials = find_min_last_initials(last, other_lasts)\n self.last_initials[j] = last_initials\n first += ' ' + last_initials + '.'\n self.firstnames[j] = first"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find names position in a sentence based on a FIRST_NAMES file
|
def find_names_position(sentence=None, last_names_enabled=True, no_names_enabled=False):
if not sentence:
raise Exception(ParameterMissing, "This method requires sentence as input")
if not isinstance(sentence, str):
raise Exception(TypeError, "This method requires string as input")
names_found = find_names(sentence, last_names_enabled=last_names_enabled, no_names_enabled=no_names_enabled)
to_return = []
for name in names_found:
begin_positions = [m.start() for m in re.finditer(name, sentence)]
for begin in begin_positions:
to_return.append((begin, begin + len(name)))
# begin = sentence.lower().index(name.lower())
# end = begin + len(name)
# to_return.append((begin, end))
return to_return
|
[
"def _findfirststart(starts, names):\n hout = []\n for hh in starts:\n for cc in names:\n if cc.startswith(hh):\n hout.append(cc)\n break\n return hout",
"def find_head(surnames):\n exp = \"(\\s?[A-ZÑÁÉÍÓÚ]{2,}(\\s[A-ZÑÁÉÍÓÚ]{1,})?(\\s[A-ZÑÁÉÍÓÚ]{1,})?(\\s[A-ZÑÁÉÍÓÚ]{1,})?(\\s[(]\\w+\\s?\\w+?\\s?\\w+?\\s?\\w+?[)])?(\\s{1,}[(]\\d.*?[)]))\"\n\n families = re.split(exp, surnames)\n\n heads_ind = np.array(range(1, len(families), 7))\n\n families_dict = {}\n\n for h in heads_ind:\n families_dict[families[h]] = families[h + 6]\n\n return families_dict",
"def fetch_candidate_name(self):\r\n # variable to save possible matches\r\n possible_names = []\r\n\r\n # source text is input document in text format\r\n nlp_text = self.doc # := nlp(self.stringtext)\r\n\r\n # Add patterns to match proper names\r\n patterns = [[{'POS': 'PROPN'}]]\r\n self.matcher.add('NAME', patterns) \r\n matches = self.matcher(nlp_text) \r\n\r\n # fetch the matches\r\n for match_id, start, end in matches:\r\n span = nlp_text[start:end] \r\n possible_names += [span.text] \r\n if len(possible_names) >= 2: \r\n break\r\n\r\n # Extract candidates\r\n doc_entities = self.doc.ents\r\n\r\n # Subset to person type entities\r\n doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)\r\n doc_persons = filter(lambda x: len(\r\n x.text.strip().split()) >= 2, doc_persons)\r\n doc_persons = map(lambda x: x.text.strip(), doc_persons)\r\n doc_persons = list(doc_persons)\r\n\r\n # Assume the first Person entity with more than two tokens is the candidate's name\r\n if len(doc_persons) > 0:\r\n return possible_names + [doc_persons[0]]\r\n\r\n return \"NOT FOUND\"",
"def find_names(text):\n\n names = []\n\n # spacy doc\n doc = nlp(text)\n\n # pattern\n pattern = [{'LOWER': 'prime'},\n {'LOWER': 'minister'},\n {'POS': 'ADP', 'OP': '?'},\n {'POS': 'PROPN'}]\n\n # Matcher class object\n matcher = Matcher(nlp.vocab)\n matcher.add(\"names\", None, pattern)\n\n matches = matcher(doc)\n\n # finding patterns in the text\n\n for i in range(0, len(matches)):\n\n # match: id, start, end\n token = doc[matches[i][1]:matches[i][2]]\n # append token to list\n names.append(str(token))\n\n # Only keep sentences containing Indian PMs\n\n for name in names:\n if (name.split()[2] == 'of') and (name.split()[3] != \"India\"):\n names.remove(name)\n\n return names",
"def find_names(sentence=None, last_names_enabled=True, no_names_enabled=False):\n if not sentence:\n raise Exception(ParameterMissing, \"This method requires sentence as input\")\n\n if not isinstance(sentence, str):\n raise Exception(TypeError, \"This method requires string as input\")\n\n first_names = get_first_names_pack()\n if not first_names:\n raise Exception(VariableNotSet, \"Variable FIRST_NAMES is not set in settings.py\")\n\n if last_names_enabled:\n last_names = get_last_names_pack()\n if not last_names:\n raise Exception(VariableNotSet, \"Variable LAST_NAMES is not set in settings.py\")\n first_names = list(set(first_names).union(set(last_names)))\n\n if no_names_enabled:\n no_names = get_no_names_pack()\n if not no_names:\n raise Exception(VariableNotSet, \"Variable NO_NAMES is not set in settings.py\")\n first_names = list(set(first_names).difference(set(no_names)))\n\n punctuation = '!@#$%^&*()_+<>?:.,;'\n\n for c in sentence:\n if c in punctuation:\n sentence = sentence.replace(c, \" \")\n\n words = sentence.lower().split()\n res = set(words).intersection(first_names)\n\n to_return = [w.title() for w in res]\n\n return to_return",
"def test_word_positions_in_file(self):\n pass",
"def get_named_people_by_sentence(sen_dict):\n named = [get_named_people_from_sen(sen) for sen in sen_dict['sentences']]\n return named",
"def get_named_people_from_sen(sen):\n wordlist = sen['words']\n entities = []\n\n named = []\n for index, word in enumerate(wordlist):\n if word[1]['NamedEntityTag'] == 'PERSON':\n named.append(word)\n\n try:\n next = wordlist[index+1]\n except:\n named = []\n break\n\n if next[1]['NamedEntityTag'] != 'PERSON':\n if named:\n entities.append(named)\n named = []\n\n return entities",
"def search_person(name):\r\n results = ''\r\n counter = 0\r\n with open('individus.tsv', 'r') as tsv_file:\r\n lines = csv.DictReader(tsv_file, delimiter='\\t')\r\n for line in lines:\r\n if name == line['name']:\r\n results += '\\nName : %s %s\\nPhone: %s\\nAdresse \\\r\n and city: %s %s\\n-------------' % (\r\n line['name'],\r\n line['last_name'],\r\n line['phone'],\r\n line['adresse'],\r\n line['city'])\r\n counter += 1\r\n\r\n return (results, counter)",
"def index_of(self, last_name, first_name):\n self.is_at_with_exception()\n self.refresh_table()\n i = 0\n for item in self._table['first_name_column']:\n if item.text == first_name:\n if self._table['last_name_column'][i].text == last_name:\n return i\n else:\n i = i + 1\n return -1",
"def find_file(name):\r\n \r\n file = open(\"index.txt\",'r')\r\n \r\n n = 0\r\n for line in file:\r\n n += 1\r\n if ord(line[-1]) == 10 and line[17:-1] == name.upper(): \r\n return line[:16]\r\n elif line[17:] == name.upper():\r\n return line[:16]\r\n \r\n file.close()\r\n \r\n return n",
"def find(name, list_names):\n lower_names = lmap(to_lower, list_names)\n return lower_names.index(name.lower())",
"def windices_of_name(string, sen_dict):\n sentence_index = 0\n start_word_index = 0\n end_word_index = 0\n\n for sindex, sentence in enumerate(sen_dict['sentences']):\n for windex, word in enumerate(sentence['words']):\n if word[0] == string.split()[0]:\n matched = True\n for i, s in enumerate(string.split()[1:]):\n if sentence['words'][windex+i+1][0] != s:\n matched = False\n if matched:\n start_word_index = windex\n sentence_index = sindex\n end_word_index = windex + len(string.split())\n return [(sentence_index, start_word_index, end_word_index)]",
"def search_in_single_sentence(self, sent, find_chem_name=True):\n try:\n thisScore, this_names = self.thisModel.score(sent, hide_chem_name = find_chem_name)\n except UnicodeDecodeError:\n return 0, []\n return thisScore, this_names",
"def get_names_following_titles(review):\n names = []\n spans = []\n\n txt = review.no_pubs_text\n\n iterx = re.finditer(title_list, txt)\n indices = [(m.start(), m.group()) for m in iterx]\n\n for e, index in enumerate(indices):\n\n if (e==len(indices)-1):\n end_index = -1\n else:\n end_index = indices[e+1][0]\n\n end_span = len(txt[indices[e][0]:end_index])\n get_match = re.finditer('[A-Z]\\w+[^A-Z]|[A-Z].[^A-Z]', txt[indices[e][0]:end_index])\n matches = [(m.span(), m.group()) for m in get_match]\n matches.reverse()\n\n for n, m in enumerate(matches):\n if n<len(matches)-1:\n if (m[0][1] != matches[n-1][0][0]):\n end_span = m[0][1]\n\n result = txt[indices[e][0]:(indices[e][0] + end_span - 1)]\n\n if len(result) > len(indices[e][1]):\n names.append(txt[indices[e][0]:(indices[e][0] + end_span - 1)])\n spans.append(indices[e][0])\n\n names = [word.replace(\"'s\", \"\") for word in names]\n names = [PersonName(clean_name(word)) for word in names]\n\n for e, name in enumerate(names):\n name.review_id = review.review_id\n name.review_loc_chars = (spans[e], spans[e]+len(name))\n\n return names",
"def MatchProtNames(ProteomeDict, MS_names, MS_seqs):\n matchedNames, seqs, Xidx = [], [], []\n counter = 0\n for i, MS_seq in enumerate(MS_seqs):\n MS_seqU = MS_seq.upper()\n MS_name = MS_names[i].strip()\n if MS_name in ProteomeDict and MS_seqU in ProteomeDict[MS_name]:\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(MS_name)\n else:\n try:\n newname = getKeysByValue(ProteomeDict, MS_seqU)[0]\n assert MS_seqU in ProteomeDict[newname]\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(newname)\n except BaseException:\n print(MS_name, MS_seqU)\n counter += 1\n continue\n\n assert counter == 0, \"Proteome is missing %s peptides\" % (counter)\n assert len(matchedNames) == len(seqs)\n return matchedNames, seqs, Xidx",
"def test_lookup_first_name(self):\n result = self.env.run('phonebook ' + \\\n ('lookup Mary ') + \\\n ('-b %s/phonebook_fixture.pb' % self.prefix))\n expected_output = (\"Mary Anderson 572 932 1921\")\n nose.tools.assert_in(expected_output, result.stdout)",
"def get_speaker_names(filename):\n petitioner_strings = ['petitioner',\\\n 'appellant',\\\n 'plaintiff']\n respondent_strings = ['respondent',\\\n 'appellee',\\\n 'defendant']\n # The second 'start' condition is for case 10-7387,\n # which does not include the word 'APPEARANCES'\n reg = re.compile(r\"\"\"(?P<start>APPEARANCES:\\n|\n JASON\\sD\\.\\sHAWKINS,\\sESQ\\.,\\sAssistant\\sFederal\\sPublic)\n (?P<speakers>.+?)\n (?P<end>\\nC\\sO\\sN\\sT\\sE\\sN\\sT\\sS|\n \\nC\\sO\\sIn\\sT\\sE\\sIn\\sT\\sS|\n \\nORAL\\sARGUMENT\\sOF\\sPAGE)\n \"\"\", flags=re.MULTILINE|re.DOTALL|re.VERBOSE)\n\n with open(filename) as f:\n match = reg.search(f.read())\n # This is to account for the 'start' condition for case 10-7387\n if 'JASON D. HAWKINS' in match.group('start'):\n speakers_string = match.group('start') + match.group('speakers')\n else:\n speakers_string = match.group('speakers')\n\n speakers = speakers_string.split('.\\n')\n petitioner_speakers = []\n respondent_speakers = []\n for speaker in speakers:\n name = speaker.split(',')[0]\n if any(s in speaker.lower() for s in petitioner_strings):\n petitioner_speakers.append(name)\n else:\n respondent_speakers.append(name)\n return petitioner_speakers, respondent_speakers",
"def find_member(self, first, last=None):\n mykeys = txt_mixin.txt_list(self.members.keys())\n inds = mykeys.findall(first)\n if len(inds) == 0 and last is not None:\n #try searching by last initial\n if last[-1] == '.':\n last = last[0:-1]\n temp_list = txt_mixin.txt_list(self.last_initials)\n inds = temp_list.findall(last)\n if len(inds) == 1:\n #we found exactly one student with a matching firstname\n ind = inds[0]\n key = mykeys[ind]\n return self.members[key]\n elif len(inds) == 0:\n raise ValueError, '%s not found in self.members.keys(): %s' % \\\n (first, self.members.keys())\n elif len(inds) > 1:\n assert last, \"Found more than one first name match, but last name not specified.\"\n #we have more than one match for first\n #keep adding last initials until there is only one match\n matching_keys = [mykeys[ind] for ind in inds]\n match_with_intials = []\n for key in matching_keys:\n first, last_init = key.split(' ',1)\n if last_init[-1] == '.':\n last_init = last_init[0:-1]#drop trailing period\n if last.find(last_init) == 0:\n match_with_intials.append(key)\n assert len(match_with_intials) > 0, \\\n \"Did not find a last initials match\"\n assert len(match_with_intials) == 1, \\\n \"Found more than one last initials match\"\n return self.members[match_with_intials[0]]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Display messages based on the window
|
def displayMessages(window,messages=['']):
# update messages text
message_in_line = ''
for msg in messages:
message_in_line += '\n'+msg
window['messages'].update(f'{message_in_line}')
|
[
"def display_messages(self, layout):",
"def show_message(self, txt):\n self.statusbar.showMessage(txt)",
"def show_msg(self, msg):\r\n self.statusbar.showMessage(msg)",
"def show_message(self, msg):\n try:\n if not self.msgs_shown[hash(msg)]: #If it haven't been shown yet.\n self.console_active = True\n self.msgs_shown[hash(msg)] = True\n self.LOG_ON_SCREEN(msg, msg_size=(0.60, 0.90), text_lines=UtilityBox.line_number(msg))\n except KeyError: #the fuck msg are you trying to show\n pass",
"def show(self, window):\r\n\r\n return",
"def show_messages(self):\n self.masterlog.revealme()",
"def show_msg(self, msg):\n msg = gtk.MessageDialog(message_format=msg,flags=gtk.DIALOG_MODAL, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK)\n msg.set_title(\" Błąd \")\n msg.run()\n msg.destroy()\n #self.program_mode = 0",
"def show_messages(self):\n for msg in self.messages:\n print msg['text']",
"def displayMessage(self, text):\n\t\tself.statusBar().showMessage(text)",
"def doMessageWindow(msg):\n _loadMsgSettings()\n if g_settings.has_key(msg):\n return\n global dialog\n dialog = QtGui.QDialog()\n msgDialog = ui.message.Ui_Dialog()\n msgDialog.setupUi(dialog)\n msgDialog.messageLabel.setText(msg)\n dialog.exec_()\n if msgDialog.showAgainCheckBox.isChecked():\n g_settings[msg] = True\n _saveMsgSettings()",
"def display_current(self,screen):\r\n \r\n self.levels[self.current].display_message(screen)",
"def display_message():\n # This docstring will be displayed in the status bar when hovering the mouse over\n # the menu item.\n\n # We update the text in the status bar.\n gui.status_message = \"Hello world\"",
"def display_msg(cls, msg):\n display_msg_surface = cls.ONSCREEN_FONT.render(msg, True, BLACK)\n display_msg_rect = display_msg_surface.get_rect()\n display_msg_rect.center = (cls.WINDOW_WIDTH / 2,\n (cls.WINDOW_HEIGHT - cls.BOTTOMBAR - cls.TOPBAR) /2 + cls.TOPBAR)\n cls.surface.blit(display_msg_surface, display_msg_rect)",
"def display_message(self, title: str, message: str):\n self.gui.infoBox(title, message)",
"def showMessage( self, msg ):\n self._console.console().information(msg)",
"def handle_messages(self, message, ms):\n\n self.statusBar().showMessage(message, ms)",
"def info(title, text):\n messagebox.showinfo(title, text)",
"def msgbox(text, width=50):\n menu(text, [], width)",
"def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Normalise an array between a given range.
|
def normalize_range(array, floor=0, ceil=1):
scaler = MinMaxScaler(feature_range=(floor, ceil), copy=True)
return scaler.fit_transform(array)
|
[
"def array_normalisation(self, array,new_min=0.0,new_max=1.0):\n\n array = array.astype(float)\n\n old_min = np.amin(array)\n old_max = np.amax(array)\n\n array = new_min + (array - old_min) * (new_max - new_min) / (old_max - old_min)\n\n return array",
"def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr",
"def normalize_array(array):\n max_value = max(array)\n min_value = min(array)\n return (array-min_value)/(max_value-min_value)",
"def normalize(X, low, high, dtype=None):\n X = np.asarray(X)\n minX, maxX = np.min(X), np.max(X)\n # normalize to [0...1].\n X = X - float(minX)\n X = X / float((maxX - minX))\n # scale to [low...high].\n X = X * (high-low)\n X = X + low\n if dtype is None:\n return np.asarray(X)\n return np.asarray(X, dtype=dtype)",
"def normalize(X, low, high, dtype=None):\n X = np.asarray(X)\n minX, maxX = np.min(X), np.max(X)\n # normalize to [0...1].\n X = X - float(minX)\n X = X / float((maxX - minX))\n # scale to [low...high].\n X = X * (high - low)\n X = X + low\n if dtype is None:\n return np.asarray(X)\n return np.asarray(X, dtype=dtype)",
"def normalize(X, low, high, dtype=None):\r\n X = np.asarray(X)\r\n minX, maxX = np.min(X), np.max(X)\r\n # normalize to [0...1].\r\n X = X - float(minX)\r\n X = X / float((maxX - minX))\r\n # scale to [low...high].\r\n X = X * (high-low)\r\n X = X + low\r\n if dtype is None:\r\n return np.asarray(X)\r\n return np.asarray(X, dtype=dtype)",
"def normalize_spec(ndarray, low=0, high=1, min_db=-80):\n\n factor = min_db / (high-low)\n\n # might just be able to do ndarray /= -min_db\n # would invert the image though\n ndarray -= factor\n ndarray /= abs(factor)\n \n return ndarray",
"def normalize(arr):\n norm_arr = (arr - np.mean(arr)) / np.std(arr)\n return norm_arr",
"def min_max_normalization(array):\n minx = min(array)\n maxx = max(array)\n return [(x - minx) / (maxx - minx) for x in array]",
"def normalize(array):\n\n mean = np.mean(array, axis=0)\n std = np.std(array, axis=0)\n\n norm_array = (array - mean) / std\n\n return norm_array",
"def _range_normalize(vector, lower=None, upper=None):\n\n if lower is None:\n lower = vector.min()\n if upper is None:\n upper = vector.max()\n\n return (vector - lower) / (upper - lower)",
"def rescale_array(array, old_range, new_range, dtype):\n if not HAS_NUMPY:\n LOGGER.error(\"The Python library numpy is required for this operation\")\n return\n\n old_min, old_max = old_range\n if array.min() < old_min or array.max() > old_max:\n ## truncate:\n array = numpy.clip(array, old_min, old_max)\n new_min, new_max = new_range\n old_delta = float(old_max - old_min)\n new_delta = float(new_max - new_min)\n if old_delta == 0:\n return ((array - old_min) + (new_min + new_max) / 2).astype(dtype)\n else:\n return (new_min + (array - old_min) * new_delta / old_delta).astype(dtype)",
"def normalize(self, data):\n x_min, x_max = min(data), max(data)\n normalize = lambda x : (x - x_min) / (x_max - x_min)\n normalized_data = []\n [(normalized_data.append(normalize(float(x)))) for x in data]\n return normalized_data",
"def normalize_box(a:np.ndarray):\n min_x = np.min(a[:, 0])\n max_x = np.max(a[:, 0])\n min_y = np.min(a[:, 1])\n max_y = np.max(a[:, 1])\n range_x = max_x - min_x\n range_y = max_y - min_y\n a[:, 0] = (a[:, 0] - min_x) / range_x\n a[:, 1] = (a[:, 1] - min_y) / range_y\n\n # print(range_x, range_y, min_x)\n # print(a)\n return a",
"def normalize_values(values, new_min=0.0, new_max=1.0):\n old_max = max(values)\n old_min = min(values)\n old_range = (old_max - old_min)\n new_range = (new_max - new_min)\n return [(((value - old_min) * new_range) / old_range) + new_min for value in values]",
"def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data",
"def _normalize_array(array, mean, std):\n if isinstance(array, torch.Tensor):\n dev = array.device\n std = torch.tensor(std, device=dev)\n mean = torch.tensor(mean, device=dev)\n return (array - mean) / std",
"def normalise_between_2_values(arraylike, min_value, max_value, invert=False):\n # normalise array between min and max values\n normalised = (arraylike - min_value) / (max_value - min_value)\n # replace anything above 1 with 1\n normalised[normalised > 1] = 1\n # replace anything below 0 with 0\n normalised[normalised < 0] = 0\n # if desired, invert the normalised values\n if invert:\n normalised = abs(normalised - 1)\n return normalised",
"def scale_using_min_max(self,data_array):\n x_min = np.min(data_array)\n x_max = np.max(data_array)\n diff_x_max_x_min = x_max - x_min\n for index in range(data_array.shape[0]):\n data_array[index] = (data_array[index] - x_min)/diff_x_max_x_min\n return data_array"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Normalise an array by its maximum absolute value. Scales and translates each feature individually such that the maximal absolute value of each feature in the array will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity.
|
def normalize_max_absolute(array):
scaler = MaxAbsScaler(copy=True)
return scaler.fit_transform(array)
|
[
"def max_normalization(array):\n return 1/np.max(array) * array.squeeze(axis=1)",
"def normalize(my_array: np.ndarray) -> np.ndarray:\n\n return np.abs(my_array)/np.max(np.abs(my_array))",
"def min_max_normalization_univariate(data) :\r\n\r\n scaled_data = [] #The data obtained afte scaling the given data\r\n\r\n min_val = min(data) #The min value of the feature \r\n max_val = max(data) #The max value of the feature\r\n\r\n #Calculating the scaled features\r\n for a in range(0, len(data)) :\r\n try :\r\n scaled_val = (data[a] - min_val) / (max_val - min_val)\r\n except ZeroDivisionError :\r\n scaled_val = 0\r\n print(\"Divide by zero encountered. Scaled feature set to 0\")\r\n scaled_data.append(scaled_val)\r\n\r\n return scaled_data",
"def normalize_array(array):\n max_value = max(array)\n min_value = min(array)\n return (array-min_value)/(max_value-min_value)",
"def normalize_max(signal):\n if len(signal) == 0:\n return NormalizationResult([], 1., 0.)\n mx = np.max(signal)\n # Avoid divide by zero\n if mx <= 0.:\n return NormalizationResult(signal, 1., 0.)\n return NormalizationResult(signal / mx, mx, 0.)",
"def normalize(a, newmax):\n return (float(newmax) * a) / np.amax(a)",
"def min_max_normalization(array):\n minx = min(array)\n maxx = max(array)\n return [(x - minx) / (maxx - minx) for x in array]",
"def array_normalisation(self, array,new_min=0.0,new_max=1.0):\n\n array = array.astype(float)\n\n old_min = np.amin(array)\n old_max = np.amax(array)\n\n array = new_min + (array - old_min) * (new_max - new_min) / (old_max - old_min)\n\n return array",
"def normalize_array(a, norm_max=255):\n c = a - np.min(a.flatten())\n c = c / np.max(c)\n centered = c * norm_max\n return centered",
"def autoscale(self, A):\n self.vmax = np.ma.max(ma.abs(A))",
"def normalise_max_abs(vector):\n\n # Check vector shape\n assert len(vector.shape) == 2\n assert vector.shape[0] < vector.shape[1]\n\n # Normalise\n for i in range(vector.shape[0]):\n maxabs = np.nanmax(np.abs(vector[i]))\n vector[i] = safe_divide(vector[i], maxabs)\n\n return vector",
"def min_max_normalization_multivariate(dataset) :\r\n\r\n scaled_data = [] #The data obtained afte scaling the given dataset\r\n \r\n #Getting the max and min values of the features\r\n max_vals = []\r\n min_vals = []\r\n for feature_index in range(0, len(dataset[0])) :\r\n min_vals.append(get_min_val_of_feature(dataset, feature_index))\r\n max_vals.append(get_max_val_of_feature(dataset, feature_index))\r\n\r\n #Calculating the scaled features\r\n for a in range(0, len(dataset)) :\r\n scaled_data.append([])\r\n for feature_index in range(0, len(dataset[a])) :\r\n scaled_val = (dataset[a][feature_index] - min_vals[feature_index]) / (max_vals[feature_index] - min_vals[feature_index])\r\n scaled_data[-1].append(scaled_val)\r\n \r\n #Returning the scaled \r\n return scaled_data",
"def reverse_normalize(data, features, min, max):\n data.loc[:, features] = data.loc[:, features].apply(lambda x: (x * (max - min)) + min)\n return data",
"def rescale(array):\n mn = array.min()\n mx = array.max()\n array_ = array.copy()\n array_ -= mn\n array_ /= (mx-mn)\n return array_",
"def scale_using_min_max(self,data_array):\n x_min = np.min(data_array)\n x_max = np.max(data_array)\n diff_x_max_x_min = x_max - x_min\n for index in range(data_array.shape[0]):\n data_array[index] = (data_array[index] - x_min)/diff_x_max_x_min\n return data_array",
"def max_normalize(a):\n if len(np.unique(a)) == 1:\n return pd.Series(np.zeros_like(a))\n else:\n return (a - np.min(a)) / (np.max(a) - np.min(a))",
"def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr",
"def normalize_values(folder, maximize):\n\n savedir = os.path.join(folder, 'norm')\n if not os.path.exists(savedir):\n os.makedirs(savedir)\n\n filenames = []\n raw_arrays = []\n split_indices = []\n rau_files = [f for f in os.listdir(folder) if os.path.isfile(\n os.path.join(folder, f))]\n for file in rau_files:\n # right now, assume we just want to take all files in a folder\n filenames.append(file)\n ar = np.load(os.path.join(folder, file))\n raw_arrays.append(ar)\n if len(split_indices) == 0:\n split_indices.append(ar.shape[0])\n else:\n split_indices.append(split_indices[-1] + ar.shape[0])\n joined_ar = np.concatenate(raw_arrays)\n maxval = np.max(joined_ar)\n minval = np.min(joined_ar)\n if maximize:\n result_ar = (joined_ar - minval) / (maxval - minval)\n else:\n result_ar = (maxval - joined_ar) / (maxval - minval)\n # assert np.min(result_ar) == 0, \"Normalization did not work correctly\"\n # assert np.max(result_ar) == 1, \"Normalization did not work correctly\"\n norm_arrays = np.split(result_ar, split_indices)\n del norm_arrays[-1]\n for ar_indx in xrange(len(norm_arrays)):\n assert norm_arrays[ar_indx].shape == raw_arrays[ar_indx].shape\n save_as = os.path.join(savedir, filenames[ar_indx])\n np.save(save_as, norm_arrays[ar_indx])",
"def _rescaleData(self):\n # calculate the maxmum and minimum for each feature\n featuresMax = np.max(self.data, axis=0).flatten()[2:]\n featuresMin = np.min(self.data, axis=0).flatten()[2:]\n # map the range of each feature to [-1,1]\n self.data[:, 2:] = 2*(self.data[:, 2:]-featuresMin) / \\\n (featuresMax-featuresMin)-1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a diagonal mask computed from an array. Useful when the data is the same if you transpose the array, eg in a heatmap.
|
def get_diagonal_mask(data):
mask = np.zeros_like(data, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
return mask
|
[
"def makeMaskFromArray(array):\n if array is None: return None\n cls = globals()[\"Mask%s\" % suffixes[str(array.dtype.type)]]\n return cls(array)",
"def remove_diagonal(a):\n # TODO: parametrise dimensions\n return a[~np.eye(a.shape[0], dtype=bool)].reshape(a.shape[0], -1)",
"def row_as_diagonal(a):\n\n a = np.expand_dims(a, -2)\n\n return np.eye(a.shape[-1]) * a",
"def diag_indices_from(arr):\n return _npi.diag_indices_from(arr)",
"def diagonal(a, offset=0, axis1=0, axis2=1):\n return _npi.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)",
"def dilate(array):\n # kernel = [[1] * 7] * 7 # blocky 3-pixel dilation\n y, x = np.ogrid[-3:4, -3:4]\n kernel = ((x * x) + (y * y) <= 3.5**2) # disk-like 3-pixel radial dilation\n return scipy.ndimage.binary_dilation(array, structure=kernel)",
"def diagonal(nd):\n assert nd.ndim == 2, \"diagonal requires 2 dimensional ndarray\"\n shape_min = hl.min(nd.shape[0], nd.shape[1])\n return hl.nd.array(hl.range(hl.int32(shape_min)).map(lambda i: nd[i, i]))",
"def dilate(array, dilation=10, invert=True):\n \n y, x = np.ogrid[\n -dilation : (dilation + 1),\n -dilation : (dilation + 1),\n ]\n \n # disk-like radial dilation\n kernel = (x * x) + (y * y) <= (dilation + 0.5) ** 2\n \n # If invert=True, invert True values to False etc\n if invert: \n array = ~array\n \n return ~binary_dilation(array.astype(np.bool), \n structure=kernel.reshape((1,) + kernel.shape))",
"def task6_diagonal(matrix):\n return np.diagonal(matrix)",
"def dilate(array, dilation=10, invert=True):\n\n y, x = np.ogrid[\n -dilation : (dilation + 1),\n -dilation : (dilation + 1),\n ]\n\n # disk-like radial dilation\n kernel = (x * x) + (y * y) <= (dilation + 0.5) ** 2\n\n # If invert=True, invert True values to False etc\n if invert:\n array = ~array\n\n return ~binary_dilation(\n array.astype(bool), structure=kernel.reshape((1,) + kernel.shape)\n )",
"def flatten_array(array, mask=None):\n if isinstance(array, (list, tuple)):\n if mask is None:\n return array\n array = np.asarray(array)\n if isinstance(array, np.ndarray):\n if mask is not None:\n if not isinstance(array, np.ndarray):\n raise Exception(f\"Mask type {repr(type(mask))} should be the same as array type {repr(type(array))}\")\n return array[mask]\n else:\n return array.reshape(-1)\n elif torch.is_tensor(array):\n if mask is not None:\n if not torch.is_tensor(mask):\n raise Exception(f\"Mask type {repr(type(mask))} should be the same as array type {repr(type(array))}\")\n return array[mask]\n else:\n return array.reshape(-1)\n else:\n raise Exception(f\"Unrecognized array type {repr(type(array))} during array flattening (mask type is {repr(type(mask))}')\")",
"def apply_mask(array: np.array, mask: np.integer) -> np.array:\n # Get number of bits\n bits = array.itemsize * 8\n\n # Get the corresponding integer type\n int_type = get_uint_type_by_bit_length(bits)\n\n # Get a view of the array as the corresponding uint type.\n i_array = array.view(dtype=int_type)\n\n # Apply mask\n masked_array = np.bitwise_and(i_array, mask)\n\n # Return the masked array with the proper type.\n return masked_array.view(array.dtype)",
"def diag_indices_from(arr):\r\n if not arr.ndim >= 2:\r\n raise ValueError(\"input array must be at least 2-d\")\r\n # For more than d=2, the strided formula is only valid for arrays with\r\n # all dimensions equal, so we check first.\r\n if not np.alltrue(np.diff(arr.shape) == 0):\r\n raise ValueError(\"All dimensions of input must be of equal length\")\r\n\r\n return diag_indices(arr.shape[0], arr.ndim)",
"def writeLaserMask(self, array):\n offset = self.activeOffset\n shape = self.activeShape\n stride = self.activeStride\n \n target = pg.subArray(array, offset, shape, stride)\n target[:] = 1",
"def _maskedCollapse(array_in, method): \n import numpy.ma as ma\n \n # Perform an numpy.ma array collapse along the z-axis\n if method == 'sum':\n print('(3d_collapse): Masked sum collapse of extracted slices ...')\n collapsed_array = ma.sum(array_in, axis=0)\n \n elif method == 'mean':\n print('(3d_collapse): Masked mean of extracted slices:')\n collapsed_array = ma.mean(array_in, axis=0)\n \n elif method == 'median':\n print('(3d_collapse): Masked median of extracted slices:')\n collapsed_array = ma.extras.median(array_in, axis=0)\n \n # Returns an array of type numpy.array \n return collapsed_array.data",
"def zero_diag(mat):\n\n return replace_diag(mat, np.zeros(mat.shape[0]))",
"def diagonal_tensor (diagonal_t:np.ndarray) -> np.ndarray:\n shape = np.shape(diagonal_t)\n return np.diagflat(diagonal_t).reshape(shape+shape)",
"def removeDiagonals(inArray, m):\n for i in range(-m, m + 1):\n fillDiagonal(inArray, 0, i)",
"def myMakeMask(array, range):\n m1=MV.less (array, range[0]) # mask where it is less than the 1st value\n m2=MV.greater(array, range[1]) # mask where it is more than the 2nd value\n return MV.logical_or(m1,m2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
|coro| Refetches the inventory.
|
async def update(self) -> None:
data = await self._state.http.get_user_inventory(self.owner.id64, self.game.app_id, self.game.context_id)
self._update(data)
|
[
"def fetchInventory(self, game):\n packetPipeline = game.getModInstance('ClientMod').packetPipeline\n packetPipeline.sendToServer(FetchInventoryPacket(game.player.name))",
"async def refresh_inventory(self) -> None:\n log.debug(\"Refreshing documentation inventory...\")\n\n # Clear the old base URLS and inventories to ensure\n # that we start from a fresh local dataset.\n # Also, reset the cache used for fetching documentation.\n self.base_urls.clear()\n self.inventories.clear()\n self.renamed_symbols.clear()\n async_cache.cache = OrderedDict()\n\n # Run all coroutines concurrently - since each of them performs a HTTP\n # request, this speeds up fetching the inventory data heavily.\n coros = [\n self.update_single(\n package[\"package\"], package[\"base_url\"], package[\"inventory_url\"]\n ) for package in await self.bot.api_client.get('bot/documentation-links')\n ]\n await asyncio.gather(*coros)",
"async def _fetch_inventory(self, inventory_url: str) -> Optional[dict]:\n fetch_func = functools.partial(intersphinx.fetch_inventory, SPHINX_MOCK_APP, '', inventory_url)\n for retry in range(1, FAILED_REQUEST_RETRY_AMOUNT+1):\n try:\n package = await self.bot.loop.run_in_executor(None, fetch_func)\n except ConnectTimeout:\n log.error(\n f\"Fetching of inventory {inventory_url} timed out,\"\n f\" trying again. ({retry}/{FAILED_REQUEST_RETRY_AMOUNT})\"\n )\n except ProtocolError:\n log.error(\n f\"Connection lost while fetching inventory {inventory_url},\"\n f\" trying again. ({retry}/{FAILED_REQUEST_RETRY_AMOUNT})\"\n )\n except HTTPError as e:\n log.error(f\"Fetching of inventory {inventory_url} failed with status code {e.response.status_code}.\")\n return None\n except ConnectionError:\n log.error(f\"Couldn't establish connection to inventory {inventory_url}.\")\n return None\n else:\n return package\n log.error(f\"Fetching of inventory {inventory_url} failed.\")\n return None",
"def openInventory(self):\n self.inventory.openInventory()",
"def load():\n inventory_path = os.path.join(get_kolla_cli_etc(), INVENTORY_PATH)\n data = ''\n try:\n if os.path.exists(inventory_path):\n data = sync_read_file(inventory_path)\n\n if data.strip():\n inventory = jsonpickle.decode(data)\n\n # upgrade version handling\n if inventory.version != inventory.class_version:\n inventory.upgrade()\n else:\n inventory = Inventory()\n Inventory.save(inventory)\n except Exception:\n raise FailedOperation(\n u._('Loading inventory failed. : {error}')\n .format(error=traceback.format_exc()))\n return inventory",
"async def inventory(self, ctx):\n items = session.query(Inventory).filter_by(player_id=ctx.author.id).all()\n x = PrettyTable()\n x.field_names = [\"Name\", \"Type\", \"Description\", \"Quantity\"]\n if items is None:\n await ctx.send(\"No Items in your inventory, time to get to work!\")\n else:\n for item in items:\n item_obj = session.query(Item).filter_by(id=item.item_id).first()\n x.add_row([item_obj.name, item_obj.types, item_obj.description, item.quantity])\n\n await ctx.send(emb(x))",
"def inventory_api():\n if not config.DEBUG:\n limit_to_localhost()\n\n return jsonify(status='ok', inventory=list_inventory())",
"def inventory(self, time: int) -> Inventory:\n self.refreshDroneStatus(time)\n return self.__inventory",
"def openinv(cls): #THIS DOESN'T NEED TO BE MODIFIED!\n\n while True:\n inventory_items = {thing.id: thing.name for thing in cls.inventory}\n inventory_items[\"exit\"] = \"Exit Inventory\"\n inventory_items[\"newln\"] = \"\"\n inventory_items[\"playername\"] = str(gray('\"{}\"'.format(cls.name)))\n inventory_items[\"lv\"] = str(gray(\"LV: {}\".format(cls.lv)))\n inventory_items[\"hp\"] = str(gray(\"HP: {}/{}\".format(cls.hp, cls.max_hp)))\n inventory_items[\"exp\"] = str(gray(\"EXP: {}/40\".format(cls.exp)))\n\n choice = Menu.menu(\n title = \"Inventory\",\n contents = inventory_items \n )\n if choice == \"exit\":\n Terminal.clear_all()\n return\n while True:\n displayed_item = next((thing for thing in cls.inventory if thing.id == choice), None)\n final_choice = Menu.menu(\n title = displayed_item.name,\n contents = {\n \"interact\":displayed_item.interact_label,\n \"inspect\":\"Inspect\",\n \"drop\":\"Drop\",\n \"back\":\"Back\"\n }\n )\n if final_choice == \"back\":\n break\n if final_choice == \"interact\":\n use = displayed_item.interact()\n Terminal.clear_all()\n print(use[\"message\"])\n if \"heal_\" in use[\"action\"]:\n cls.hp += int(use[\"action\"].replace(\"heal_\", ''))\n if cls.hp > cls.max_hp:\n cls.hp = cls.max_hp\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break\n if final_choice == \"inspect\":\n Terminal.clear_all()\n print(displayed_item)\n Game.standard_wait()\n continue\n if final_choice == \"drop\":\n Terminal.clear_all()\n print(\"You dropped the {}\".format(displayed_item.name))\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break",
"def get_inventory():\n return INVENTORY",
"def loaded_inventory():\n inventory = Inventory()\n nokia_phone = Item(name=\"Nokia Phone\", weight=1, value=100, itype=Item_Type.Weapon)\n stick = Item(name=\"A nice stick\", weight=12, value=13, itype=Item_Type.Weapon)\n coffee = Item(name=\"3 Liters of Coffee\", weight=3, value=20, itype=Item_Type.Consumable)\n Ear = Item(name=\"Enemy's Ear\", weight=1, value=1, itype=Item_Type.Consumable)\n silly_hat = Item(name=\"Silly hat\", weight=5, value=34, itype=Item_Type.Gear)\n inventory.pickup(nokia_phone)\n inventory.pickup(coffee)\n inventory.pickup(silly_hat)\n inventory.pickup(stick)\n inventory.pickup(Ear)\n return inventory",
"def get_inventory(self):\n return self._inventory",
"def check_inventory(self) -> None:\n self.store.check_inventory()",
"def load_inventory(self, path):\n pass",
"def get_inventory(\n self,\n ) -> Callable[[inventory.GetInventoryRequest], Awaitable[inventory.Inventory]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_inventory\" not in self._stubs:\n self._stubs[\"get_inventory\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.osconfig.v1alpha.OsConfigZonalService/GetInventory\",\n request_serializer=inventory.GetInventoryRequest.serialize,\n response_deserializer=inventory.Inventory.deserialize,\n )\n return self._stubs[\"get_inventory\"]",
"def inventory(self):\n return self._inventory",
"async def inventory(self, ctx, target: str = None):\n target_found, target = await target_parser(ctx, target)\n if target_found:\n if is_registered(target.id):\n inventories = get_file(\"inventories\")\n items = get_file(\"items\")\n embed = discord.Embed(color=default_color)\n embed.set_author(name=f\"📦 Inventaire de {target.name}\")\n\n if inventories[str(target.id)][\"items\"]:\n name_column = \"\"\n tier_column = \"\"\n float_column = \"\"\n\n for item in inventories[str(target.id)][\"items\"]:\n name_column += f\"• **{items[item['id']]['name']}** `{item['id']}` \\n\"\n tier_column += f\" *{items[item['id']]['tier']}* \\n\"\n showed_float = str(item[\"float\"])\n for i in range(5 - len(str(item[\"float\"]))):\n showed_float += \"0\"\n float_column += f\" __{showed_float}__ • **{item['points']}**\\n\"\n embed.add_field(name = \"Item (ID)\", value=name_column)\n embed.add_field(name = \"Tier\", value=tier_column)\n embed.add_field(name = \"Float • Points\", value=float_column)\n else:\n embed.add_field(name = \"Inventory\", value=\"`Vous n'avez pas d'items`\", inline=False)\n\n if inventories[str(target.id)][\"powers\"]:\n powers = get_file(\"powers\")\n powers_column = \"\"\n for power in inventories[str(target.id)][\"powers\"]:\n powers_column += f\"• **{powers[power]['name']}** `{power}`\\n\"\n embed.add_field(name=\"Power-Ups\", value=powers_column, inline=False)\n else:\n embed.add_field(name=\"Power-Ups\", value=\"`Vous n'avez pas de power-ups`\", inline=False)\n\n if inventories[str(ctx.author.id)][\"shares\"]:\n share_column = \"\"\n for key in inventories[str(ctx.author.id)][\"shares\"].keys():\n share_column += f\"• $**{key.upper()}** : `{inventories[str(ctx.author.id)]['shares'][key]}`\\n\"\n embed.add_field(name=\"Actions\", value=share_column, inline=False)\n else:\n embed.add_field(name=\"Actions • Quantité\", value=\"`Vous n'avez pas d'actions`\")\n\n embed = set_footer(embed, ctx)\n await ctx.send(embed=embed)\n else:\n await gen_error(\"missing_player\", ctx)\n else:\n await gen_error(\"invalid_synthax\", ctx)",
"async def list_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n start_date = request.args[\"start_date\"][0]\n end_date = request.args[\"end_date\"][0]\n inventory = model.list_inventory(hotel_id, start_date, end_date)\n if inventory == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"inventory\": inventory})",
"def consume(self) -> None:\n\t\tentity = self.parent\n\t\tinventory = entity.parent\n\t\tif isinstance(inventory, components.inventory.Inventory):\n\t\t\tinventory.items.remove(entity)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve all instance of OSLicence
|
def find_all():
return ItopapiPrototype.find_all(ItopapiOSLicence)
|
[
"def get_socios(self):\n return self.__socios",
"def list_silos(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"cn\", \"objectClass\"]\n\n self.display(\n self.engine.query(\n self.engine.SILOS_FILTER(),\n attributes, base=','.join([\"CN=AuthN Policy Configuration,CN=Services,CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )",
"def getobjsense(self): # 3\n res,resargs = self.__obj.getobjsense()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sense_return_value = resargs\n _sense_return_value = objsense(_sense_return_value)\n return _sense_return_value",
"def listObjects(instance):\n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Compose the SQL query to find all the orbits/SSM objects. We do this with \n # a simle query to the derivedobjects table since we realy only need the\n # ssm_id values.\n maxMJD = completedPrecoveryMaxDate(instance)\n if(maxMJD == None):\n return([], None)\n \n sql = 'select distinct(ssm_id) from derivedobjects where ssm_id is not null'\n sql += ' and status = \"I\"'\n # sql += ' and updated >= \"%s\"' %(minModifiedDate)\n # <-- end if\n \n nRes = cursor.execute(sql)\n return([x[0] for x in cursor.fetchall()], float(maxMJD))",
"def show(term: str) -> List[NamespacedOligotype]:\n\n if term:\n return [\n from_dict(NamespacedOligotype, record.__dict__)\n for record in (\n NamespacedOligotypesModel.query.filter(\n (NamespacedOligotypesModel.namespace == term)\n | (NamespacedOligotypesModel.oligotype == term)\n ).order_by(\n NamespacedOligotypesModel.namespace.desc(),\n NamespacedOligotypesModel.oligotype.desc(),\n )\n )\n ]\n\n return [\n from_dict(NamespacedOligotype, record.__dict__)\n for record in (\n NamespacedOligotypesModel.query.order_by(\n NamespacedOligotypesModel.namespace.desc(),\n NamespacedOligotypesModel.oligotype.desc(),\n )\n )\n ]",
"def GetObjects(self): \r\n return self.model.GetObjects()",
"def iter_all(self):\n return self.opportunities.find()",
"def get_entries(self):\n results = []\n results.extend(\n self.find_by_st('urn:schemas-konnected-io:device:Security:1'))\n return results",
"def get_all_elections(self) -> list:",
"def list_os(context):\n occupational_standards = OccupationalStandard.objects.filter(\n is_draft=False\n ).annotate(Count('code'))\n return {\n 'occupational_standards': occupational_standards,\n 'request': context['request'],\n }",
"def test_e_instancia_get_all_competence_rs(self):\n\n paginate = GetAllRQ(page=1, page_size=12)\n\n res = self.api.get_all(paginate)\n\n if isinstance(res, ConnectionExceptionRS):\n raise unittest.SkipTest(res.msg)\n\n self.assertIsInstance(res, GetAllCompetenceRS)",
"def listCriteria():",
"def return_offices(self, *params):\n cprint(\"Returning the offices...\", \"cyan\")\n query = self.session.query(AmityOffices).all()\n for office in query:\n Amity.offices.append(office.Room_name)\n\n return Amity.offices",
"def get_all_locations(self):",
"def get_all(cls, session):\n logging.info(\"Retrieving all %s data from switch\", cls.__name__)\n\n try:\n response = session.request(\"GET\", cls.base_uri)\n except Exception as e:\n raise ResponseError(\"GET\", e)\n\n if not utils._response_ok(response, \"GET\"):\n raise GenericOperationError(response.text, response.status_code)\n\n qos_dict = {}\n\n data = json.loads(response.text)\n uri_list = session.api.get_uri_from_data(data)\n for uri in uri_list:\n name, qos = cls.from_uri(session, uri)\n qos_dict[name] = qos\n\n return qos_dict",
"def get_objects(self):\n\t\treturn self.__objects",
"def list_cursos():\n schema = CursoSchema()\n data = Curso.query.all()\n cursos = [schema.dump(c) for c in data]\n\n return cursos",
"def list(self) -> List[DomainModel]:",
"def odors(self, session):\n odors = session.query(Timepoint.odor).filter(\n Timepoint.id.between(self.start_timepoint_id, self.end_timepoint_id))\n return np.array(odors.all()).flatten()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve the ItopapiOrganization corresponding to this server
|
def find_organization(self):
if self.org_id is not None:
ItopapiPrototype.get_itop_class('Organization').find(self.org_id)
return None
|
[
"def get_organization(self) -> dict:\n try:\n organizations = self.dashboard.organizations.getOrganizations()\n except meraki.exceptions.APIError as exception:\n print(f\"Error: Unable to get organizations: {exception}\")\n return None\n\n if not organizations:\n print(\"Error: No organizations found\")\n return None\n\n # print(\"len(organizations) == {}\".format(len(organizations)))\n if len(organizations) > 1:\n org_id = None\n if 'meraki' in self.configuration and 'org_id' in self.configuration['meraki']:\n org_id = self.configuration['meraki']['org_id']\n\n for organization in organizations:\n if self.verbose:\n #print(\"org={}\".format(organizations))\n print(\"Organization: {} {}\".format(organization['id'], organization['name']))\n if org_id == organization['id']:\n return organization\n if self.orgid == organization['id']:\n return organization\n if not org_id:\n print(\"Error: more than one organization available, but none specified\")\n else:\n print(f\"Error: org_id not found: {org_id}\")\n sys.exit(9)\n return organizations[0]",
"def organization(self):\n return self._organization",
"def getOrganization(self):\n return _libsbml.ModelCreator_getOrganization(self)",
"def getOrganization(self, id):\n text = self.generateRequest('/v2.1/Organisations/' + str(id), 'GET', '')\n return json.loads(text)",
"def get_organization(self):\n pos_or_org = self.position.to_object\n if pos_or_org is None:\n return None\n elif pos_or_org.portal_type == 'position':\n return pos_or_org.get_organization()\n elif pos_or_org.portal_type == 'organization':\n return pos_or_org",
"def organization_get():\n try:\n o = db.session.query(Organization).one()\n except NoResultFound:\n return abort(400, 'No organization details found')\n \n org_schema = OrganizationFlatSchema()\n result = org_schema.dumps(o)\n\n return jsonify(result.data)",
"def get(self, org_id):\n\n params = {\n 'filter': 'id',\n 'eq': org_id\n }\n\n org = self.base_request.request(\n 'organization', 'GET', params=params,\n endpoint=self.settings.get('pine_endpoint'), login=True\n )['d']\n\n if not org:\n raise exceptions.OrganizationNotFound(org_id)\n\n return org[0]",
"def fetch_organization(organization):\n return fetch_json(organization_url, organization)",
"def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))",
"def obj2org(self, obj):\n return obj.organization",
"def organization(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization\")",
"async def get_organization(request: Request, org: str):\n\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n if org not in organizations_obj:\n logger.warning(\"Organization %s not found.\", org)\n raise HTTPException(\n status_code=404, detail=\"Organization {} not found.\".format(org))\n return {org: organizations_obj[org]}",
"def _get_orgs(self):\n return self.api.get('/v2/organizations')",
"def get_organization(self, id: str) -> dict[str, Any]:\n params = {}\n\n return self.client.get(self._url(id), params=params)",
"def get_organization(\n self, organization_id: Union[str, int], *, params: Optional[dict] = None\n ) -> \"resource_types.Organization\":\n\n return communicator.Organization(self.__requester).from_id(\n organization_id=organization_id, parameters=params\n )",
"def _get_organizations(self):\n return self.__organizations",
"def organization_id(self) -> str:\n return pulumi.get(self, \"organization_id\")",
"def sub_organization(self) -> object:\n return self._sub_organization",
"def organization(self, *, name: str):\n return self.github.organization(name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Determine whether a Roman token is the next logical Roman token. This test is for Roman levels 3 or 6, and checks whether the next token is both a Roman numeral and the next bigger Roman numeral. For instance 'v' is a valid Roman numeral. But unless the current Roman evaluates to 4, the 'v' must be a level1 alpha marker.
|
def roman_surf_test(self, token, next_token):
if not token:
return False
for each in [token, next_token]:
if not roman_to_int(each):
return False
return roman_to_int(next_token) == roman_to_int(token) + 1
|
[
"def roman_test(id_token):\n roman_int = roman_to_int(id_token)\n if not roman_int:\n return False\n if LEVEL_STATE.level() not in [3, 6]:\n return False\n if roman_int - 1 == roman_to_int(LEVEL_STATE.current_token()):\n return True",
"def is_roman(x):\n x = str(x).upper()\n if x == 'IIII': return True\n try:\n i = roman2int(x)\n except ValueError:\n return False\n return int2roman(i) == x",
"def _isRomanNumber(strWord):\n return NumberFormula.ROMANNUMBERREGEX.match(strWord) != None",
"def leading_numeral_is(numeral):\n return a_roman[:len(numeral)] == numeral",
"def is_roman_numeral(s: str) -> bool:\n if not isinstance(s, str):\n raise TypeError(\"Only strings may be tested \")\n return bool(_romanNumeralPattern.match(s))",
"def solution(roman):\n r = roman.upper()\n nums = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n total = 0\n for i, c in enumerate(r):\n if i < len(r) - 1:\n if nums[r[i]] < nums[r[i+1]]:\n total -= nums[c]\n else:\n total += nums[c]\n else:\n total += nums[c]\n return total",
"def leading_numerals_remain():\n return len(a_roman) > 0",
"def isOriented(binary):\n rot = 0\n while rot < 4:\n if binary[1][1] == 0 and binary[1][2] == 0 and binary[2][1] == 0 and binary[5][6] == 1:\n return True, rot, binary\n else:\n binary = np.rot90(binary)\n rot += 1\n return False, False, binary",
"def rom_or_int(self):\n # Checks if user input is a integer\n if self.input.isdigit():\n number = int(self.input)\n print(\"The equivalent Roman Numeral is: \" + self.int_to_rom(number) + \".\\n\")\n # Checks if user input is a roman numeral, if so run method \n elif self.isroman():\n self.rom_to_int()\n # If not, print this statement\n else:\n print(\"Entry is neither in roman numerals or an integer.\\n\")",
"def test_021_5(self):\n self.__assertTranslatesFromRoman(\"V\", 5)",
"def toRoman(n):\n pass",
"def test_024_40(self):\n self.__assertTranslatesFromRoman(\"XL\", 40)",
"def create_roman_numerals_validator() -> RomanNumeralsValidator:\n return RomanNumeralsValidator()",
"def test_031_1000(self):\n self.__assertTranslatesFromRoman(\"M\", 1000)",
"def isoperator(token):\n\n # Token is an operator\n return token and token.lower() in Token.OPERATORS",
"def fromRoman(s):\n if not s:\n raise InvalidRomanNumeralError, 'Input can not be blank'\n if not romanNumeralPattern.search(s):\n raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s\n\n result = 0\n index = 0\n for numeral, integer in romanNumeralMap:\n while s[index:index+len(numeral)] == numeral:\n result += integer\n index += len(numeral)\n return result",
"def test_018_2(self):\n self.__assertTranslatesFromRoman(\"II\", 2)",
"def test_022_9(self):\n self.__assertTranslatesFromRoman(\"IX\", 9)",
"def islogicseparator(token):\n\n # Token is a logic separator\n return token and token.lower() in Token.LOGIC_SEPARATORS"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fetch token from the server using the provided user, password resulting in subsequent web service requests for waveforms being authenticated for potential access to restricted data.
|
def _retrieve_jwt_token(self, user, password):
# force https so that we don't send around tokens unsecurely
url = 'https://{}/api/token'.format(urlparse(self.base_url).netloc)
# paranoid: check again that we only send the token to https
if urlparse(url).scheme != "https":
msg = 'This should not happen, please file a bug report.'
raise Exception(msg)
# convert to json
data = json.dumps({"username": user, "password": password})
# encode
data = bytes(data, "utf-8")
headers = {"Content-Type": "application/json"}
html = urllib_request.Request(url, data=data, headers=headers)
# decode('utf-8')
result = urllib_request.urlopen(html).read().decode("utf-8")
dic = json.loads(result)
# get token
self.jwt_access_token = dic['access']
self.jwt_refresh_token = dic['refresh']
if self.debug:
print('Got temporary access/refresh: {}/{}'.format(self.jwt_access_token, self.jwt_refresh_token))
return
|
[
"def get_token(token_url, user_name, pass_word):\n # payload params for the post request\n print(\"Generating ArcGIS Online authentication token.\")\n params = {'f': 'pjson', 'username': user_name, 'password': pass_word,\n 'referer': referer, 'expiration': 1440}\n data = urllib.urlencode(params)\n req = urllib2.Request(token_url, data)\n response = urllib2.urlopen(req)\n json_result = jsonloads(response.read())\n if 'token' in json_result:\n print('ArcGIS Online authentication token is generated.')\n token = json_result['token']\n return token\n else:\n print '{}: {}'.format(response.code, response.msg)\n return None",
"def _request_token(self):\n response = requests.post(\n \"%s/generateToken\" % self.root_uri.rstrip(\"/\"), {\n \"username\": self.username,\n \"password\": self.password,\n \"expiration\": '60',\n \"referer\": 'https://wsdot.maps.arcgis.com',\n \"f\": 'json'\n })\n\n token_info = response.json()\n if \"error\" in token_info:\n raise TokenError(token_info[\"error\"])\n self._token = token_info[\"token\"]\n self._expires = datetime.fromtimestamp(token_info[\"expires\"] / 1000)",
"def get_token():\n\n global Token\n\n headers = {\n 'Content-Type': 'application/json'\n }\n data = {\n \"user\": {\n \"email\": SECRETS['api']['email'],\n \"password\": SECRETS['api']['pass']\n }\n }\n\n resp = urequests.post(SECRETS['api']['base_url']+'/api/users/login', json=data, headers=headers)\n\n if resp.status_code == 200:\n data = ujson.loads(resp.text)\n Token = data['jwt']\n print('Got token.')\n # blink_led2(duration=30, iterations=3)\n else:\n print(\"Error while retrieving token: {}: {}\".format(str(resp.status_code), resp.text))",
"def getToken(url, user, tenant, password):\n url = url + '/tokens'\n data = { \n \"auth\":{\n \"tenantName\": tenant,\n \"passwordCredentials\":{\n \"username\": user,\n \"password\": password\n }\n }\n }\n jsonPayload = json.dumps(data)\n return sendRequest(url, payload=jsonPayload)",
"def get_token(self, user):\n data = {\n 'username': user['username'],\n 'password': user['password']\n }\n resp = self.client.post('/api/token/', data, format='json')\n resp_content = json.loads(resp.content.decode('utf-8'))\n return resp_content[\"access\"]",
"def get_token(self):\n\n #set url\n url = self.server_url + \"V1/Session\"\n #set data\n #this is where the login goes\n #Authentication_id is required for external users only\n data = {\n \"username\": self.user,\n \"password\": self.pwd,\n \"authentication_id\": self.authentication_id\n }\n #run api call\n resp = requests.post(url,data=json.dumps(data),headers=self.headers, verify=True)\n\n token = resp.json()[\"token\"]\n self.headers[\"Token\"] = token\n print (\"I GOT A NETBRAIN TOKEN!!!: \" + token)\n return (token)",
"def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})",
"def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})",
"def get_auth_token():\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})",
"def UserToken(self) -> object:",
"def get(self):\n duration = int(request.args.get('duration', app.config['AUTH_DURATION']))\n token = g.user.generate_auth_token(expiration=duration)\n return jsonify({'token': token.decode('ascii'), 'duration': duration})",
"def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})",
"def _get_token(self):\n return user.get_token()",
"def _fetchJWT(self) -> str:\n if self._cached_jwt_expiry <= time.time():\n credentials = {\"username\": self._username, \"password\": self._password}\n headers = {\"applicationId\": self._appid,\n \"Content-Type\": \"application/json\"}\n response = requests.post(urljoin(self._api_root, \"api/v0-1/auth\"),\n data=json.dumps(credentials), headers=headers)\n if not response.ok:\n raise Exception(\n f\"Didn't get HTTP 200 (OK) response - status_code from server: {response.status_code}\\n{response.text}\")\n self._cached_jwt_token = response.json()[\"token\"]\n self._cached_jwt_expiry = response.json()[\"exp\"]\n return self._cached_jwt_token",
"def basic_auth(user, password):\n return AuthToken(\"basic\", user, password)",
"def get_token(self, user: str) -> dict:\n if self.token:\n return self.token\n\n auth = self.info.get('auth', None)\n if auth['type'] != 'token':\n log.info(f'Auth type for API {self.name} is not \"token\". No token needed.')\n return\n\n if type(user) == str:\n user = User(user)\n if not user.validated and not user.is_netrc_valid:\n raise BrainError(f'User {user.user} is not netrc validated! Cannot access credentials.')\n\n # construct the token url\n valid_host = user._validated_netrc_host or 'api.sdss.org'\n username, password = user.netrc.read_netrc(valid_host)\n token_url = self.construct_token_url()\n\n # submit the token login request\n data = send_post_request(token_url, data={'username': username, 'password': password})\n\n # extract the token\n token = self._extract_access_token(data)\n\n if refresh := data.get('refresh_token'):\n tok_name = f'{self.name.upper()}_API_REFRESH_TOKEN'\n log.info(f'Save this refresh token as either a \"{tok_name}\" environment variable in your '\n f'.bashrc or as \"{tok_name.lower()}\" in your custom sdss_brain.yml config file.')\n\n out = {'access': token}\n if refresh:\n out['refresh'] = refresh\n return out",
"def auth_user_part2(self):\n # Get auth token\n auth_url = \"https://www.box.com/api/1.0/rest?action=get_auth_token&api_key={0}&ticket={1}\".format(self._API_KEY, self._TICKET)\n print \"Auth URL = \"+auth_url\n result = self.request.request_from_url(auth_url)\n auth_token = self.xmlreader.parseString(result, \"auth_token\")\n\n print auth_token\n\n with open(self._SAVE_PATH, \"w\") as f:\n print \"Save auth token to file\"\n f.write(auth_token)\n print \"Done\"",
"def login():\n if not request.json or not '_id' in request.json or not 'password' in request.json:\n abort(400)\n result = request.json\n user_details = mongo.db.users.find_one_or_404({\"_id\":result[\"_id\"]})\n authorised = check_password_hash(user_details[\"password\"], result[\"password\"])\n \n if not authorised:\n return {'error': 'Email or password invalid'}, 401\n\n expires = timedelta(days=7)\n access_token = create_access_token(identity=user_details[\"_id\"], expires_delta=expires)\n return {'token': access_token}, 200",
"def downloadtoken():\n payload = _generate_auth_token()\n return send_file(BytesIO(payload), as_attachment=True, attachment_filename=\"%s-token\" % \\\n session['iyo_user_info']['username'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A check if the jwt token is valid
|
def _validate_jwt_token(self):
# force https so that we don't send around tokens unsecurely
url = 'https://{}/api/token/verify'.format(urlparse(self.base_url).netloc)
# paranoid: check again that we only send the token to https
if urlparse(url).scheme != "https":
msg = 'This should not happen, please file a bug report.'
raise Exception(msg)
if not self.jwt_access_token:
raise FDSNUnauthorizedException("Unauthorized, authentication "
"required.", )
# convert to json
data = json.dumps({"token": self.jwt_access_token})
# encode
data = bytes(data, "utf-8")
headers = {"Content-Type": "application/json"}
html = urllib_request.Request(url, data=data, headers=headers)
# decode('utf-8')
try:
result = urllib_request.urlopen(html).read().decode("utf-8")
dic = json.loads(result)
valid = not bool(dic)
if self.debug:
print('Valid token : {}'.format(valid))
return valid
except urllib_error.HTTPError as e:
return False
|
[
"def __token_is_valid(self):\n\n if not self.__login_token or len(self.__login_token) < 10:\n # Token is not set or totally invalid\n return False\n\n try:\n jwt.decode(self.__login_token, verify = False)\n return True\n except:\n # Most likely the token is expired as `exp` is in the past\n return False",
"def _validate_token(self):\n try:\n return self.client.check_user(\"test\").result == \"test\"\n except AuthError:\n return False",
"def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)",
"def validate_token(user, tkn):\n try:\n decoded = jwt.decode(tkn, KEY)\n if decoded['user'] == user:\n stored_token = User.get(User.username == user).token\n if stored_token == tkn:\n return True\n return False\n except jwt.ExpiredSignatureError:\n return HTTPResponse(status=400, body={\"msg\":\"Validation error.\"})",
"def _validateToken(self, token):\n self._ensure_keys()\n return util.verify(token, self.servicePublic,\n self.appConfig.getSecurityID(), None)",
"def validate(cls, token):\n if not cls.JWT_REGEX.match(token):\n raise ValueError('Invalid JWT token')\n\n return token",
"def verify_token(self, token):\n return False",
"def validate_token(self, token) -> bool:\n try:\n Token.objects.get(key=token)\n return True\n except models.ObjectDoesNotExist:\n return False",
"def verify_jwt(token):\n return jwt.decode(token.encode(), SECRET_KEY)",
"def _verify_auth_token(self, pub_key, auth_token) -> bool:\n try:\n jwt.decode(auth_token, pub_key, algorithms=['RS256'])\n return True\n except jwt.exceptions.DecodeError:\n return False",
"def is_valid_token(auth):\n myLogger.debug(ENTER)\n ret = None\n if (auth is None) or (len(auth) == 0):\n myLogger.warning('Authorization not found in the header parameters')\n else:\n auth_rest = auth[6:] # Get rid of 'Basic '\n decoded = (b64decode(auth_rest)).decode()\n index = decoded.find(':NOT')\n if index == -1:\n token = decoded\n else:\n token = decoded[0:index]\n myLogger.debug('token: ' + token)\n ret = mySession.validate_json_web_token(token)\n myLogger.debug(EXIT)\n return ret",
"def _validate_token(self):\n expire = datetime.strptime(self.access.expire, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n if expire > datetime.utcnow():\n return True\n else:\n return False",
"def _assert_jwt_is_valid(self, jwt_token, should_be_asymmetric_key):\n self.assert_valid_jwt_access_token(\n jwt_token, self.user, self.default_scopes, should_be_asymmetric_key=should_be_asymmetric_key,\n )",
"def validateReceivedToken(self, token):\n if self.mock:\n return True\n self._ensure_keys()\n return util.verify(token, self.servicePublic, None,\n self.appConfig.getSecurityID())",
"def is_valid_token(token):\n if token not in auth.TOKEN_DB.keys() or not auth.TOKEN_DB[token]['log']:\n return False\n return True",
"def test_token_valid(self):\n current_token = Token.objects.get(user_id=self.user_id,\n value=self.token)\n\n a = token_valid(current_token)\n self.assertEqual(a, True)",
"def validate_token(token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except (BadSignature, SignatureExpired):\n return False\n\n user = User.query.get(data['id'])\n if user is None:\n return False\n\n # 设置全局的current_user为该user!\n g.current_user = user\n return True",
"def check_token_validate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/Validate/\"\n return self._lr_object._get_json(url, payload)",
"def token_valid(self):\n try:\n return self.disk.check_token()\n except:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper method to fetch response via get_stations() and attach it to each trace in stream.
|
def _attach_responses(self, st):
netids = {}
for tr in st:
if tr.id not in netids:
netids[tr.id] = (tr.stats.starttime, tr.stats.endtime)
continue
netids[tr.id] = (
min(tr.stats.starttime, netids[tr.id][0]),
max(tr.stats.endtime, netids[tr.id][1]))
inventories = []
for key, value in netids.items():
net, sta, loc, chan = key.split(".")
starttime, endtime = value
try:
inventories.append(self.get_stations(
network=net, station=sta, location=loc, channel=chan,
starttime=starttime, endtime=endtime, level="response"))
except Exception as e:
warnings.warn(str(e))
st.attach_response(inventories)
|
[
"def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)",
"async def _fetch_raw_stations(session: ClientSession, headers: dict, query_builder: BuildQuery) -> dict:\n # We don't know how many pages until our first call - so we assume one page to start with.\n total_pages = 1\n page_count = 0\n while page_count < total_pages:\n # Build up the request URL.\n url, params = query_builder.query(page_count)\n LOGGER.debug('loading station page %d...', page_count)\n async with session.get(url, headers=headers, params=params) as response:\n station_json = await response.json()\n LOGGER.debug('done loading station page %d.', page_count)\n # Update the total page count.\n total_pages = station_json['page']['totalPages']\n for station in station_json['_embedded']['stations']:\n yield station\n # Keep track of our page count.\n page_count = page_count + 1",
"def _get_stations_data(self):\n return [station for network in self.__networks_objects_list for station in network[\"Stations\"]]",
"def stations():\n\n return station_list",
"def getSatellitesAboveParsed(self):\n sats = self.getSatellitesAboveComplete()\n print sats\n\n # Create our json result\n result = {}\n for key in sats.keys():\n result[key] = {\n \"elevation\": sats[key][\"prediction\"][\"elevation\"],\n \"visibility\": sats[key][\"prediction\"][\"visibility\"],\n \"geostationary\": sats[key][\"prediction\"][\"geostationary\"],\n \"satname\": sats[key][\"satcat\"][\"SATNAME\"],\n \"object_type\": sats[key][\"satcat\"][\"OBJECT_TYPE\"],\n \"country\": sats[key][\"satcat\"][\"COUNTRY\"],\n \"launch_year\": sats[key][\"satcat\"][\"LAUNCH_YEAR\"],\n \"launch\": sats[key][\"satcat\"][\"LAUNCH\"]\n \n }\n \n return result",
"def _get_network(self, log_entries):\n\n network_traffic = {}\n for log_entry in log_entries:\n message = jloads(log_entry[\"message\"])\n method = message[\"message\"][\"method\"]\n params = message[\"message\"][\"params\"]\n if method not in [\n \"Network.requestWillBeSent\",\n \"Network.responseReceived\",\n \"Network.loadingFinished\",\n ]:\n continue\n if method != \"Network.loadingFinished\":\n request_id = params[\"requestId\"]\n loader_id = params[\"loaderId\"]\n if loader_id not in network_traffic:\n network_traffic[loader_id] = {\n \"requests\": {},\n \"encoded_data_length\": 0,\n }\n if request_id == loader_id:\n if \"redirectResponse\" in params:\n network_traffic[loader_id][\"encoded_data_length\"] += params[\n \"redirectResponse\"\n ][\"encodedDataLength\"]\n if method == \"Network.responseReceived\":\n network_traffic[loader_id][\"type\"] = params[\"type\"]\n network_traffic[loader_id][\"url\"] = params[\"response\"][\"url\"]\n network_traffic[loader_id][\"remote_IP_address\"] = None\n if \"remoteIPAddress\" in params[\"response\"].keys():\n network_traffic[loader_id][\"remote_IP_address\"] = params[\n \"response\"\n ][\"remoteIPAddress\"]\n network_traffic[loader_id][\"encoded_data_length\"] += params[\n \"response\"\n ][\"encodedDataLength\"]\n network_traffic[loader_id][\"headers\"] = params[\"response\"][\n \"headers\"\n ]\n network_traffic[loader_id][\"status\"] = params[\"response\"][\n \"status\"\n ]\n network_traffic[loader_id][\"security_state\"] = params[\n \"response\"\n ][\"securityState\"]\n network_traffic[loader_id][\"mime_type\"] = params[\"response\"][\n \"mimeType\"\n ]\n if \"via\" in params[\"response\"][\"headers\"]:\n network_traffic[loader_id][\"cached\"] = True\n else:\n if request_id not in network_traffic[loader_id][\"requests\"]:\n network_traffic[loader_id][\"requests\"][request_id] = {\n \"encoded_data_length\": 0\n }\n if \"redirectResponse\" in params:\n network_traffic[loader_id][\"requests\"][request_id][\n \"encoded_data_length\"\n ] += params[\"redirectResponse\"][\"encodedDataLength\"]\n if method == \"Network.responseReceived\":\n network_traffic[loader_id][\"requests\"][request_id][\n \"type\"\n ] = params[\"type\"]\n network_traffic[loader_id][\"requests\"][request_id][\n \"url\"\n ] = params[\"response\"][\"url\"]\n network_traffic[loader_id][\"requests\"][request_id][\n \"remote_IP_address\"\n ] = None\n if \"remoteIPAddress\" in params[\"response\"].keys():\n network_traffic[loader_id][\"requests\"][request_id][\n \"remote_IP_address\"\n ] = params[\"response\"][\"remoteIPAddress\"]\n network_traffic[loader_id][\"requests\"][request_id][\n \"encoded_data_length\"\n ] += params[\"response\"][\"encodedDataLength\"]\n network_traffic[loader_id][\"requests\"][request_id][\n \"headers\"\n ] = params[\"response\"][\"headers\"]\n network_traffic[loader_id][\"requests\"][request_id][\n \"status\"\n ] = params[\"response\"][\"status\"]\n network_traffic[loader_id][\"requests\"][request_id][\n \"security_state\"\n ] = params[\"response\"][\"securityState\"]\n network_traffic[loader_id][\"requests\"][request_id][\n \"mime_type\"\n ] = params[\"response\"][\"mimeType\"]\n if \"via\" in params[\"response\"][\"headers\"]:\n network_traffic[loader_id][\"requests\"][request_id][\n \"cached\"\n ] = 1\n else:\n request_id = params[\"requestId\"]\n encoded_data_length = params[\"encodedDataLength\"]\n for loader_id in network_traffic:\n if request_id == loader_id:\n network_traffic[loader_id][\n \"encoded_data_length\"\n ] += encoded_data_length\n elif request_id in network_traffic[loader_id][\"requests\"]:\n network_traffic[loader_id][\"requests\"][request_id][\n \"encoded_data_length\"\n ] += encoded_data_length\n return network_traffic",
"def load_stations(self):\n extent = int((self.dates[1] - self.dates[0]).total_seconds())\n (status, stations) = SuperMAGGetInventory(self.logon, self.dates[0], extent)\n logger.info(f\"SM inventory fetch stats: {status}\")\n return stations",
"def get_stations(source_station):\n URL = \"http://api.bart.gov/api/stn.aspx\" \n PARAMS = {\"cmd\":\"stns\",\"key\":\"MW9S-E7SL-26DU-VV8V\",\"json\":\"y\"} \n\n res = requests.get(url = URL, params = PARAMS) \n station_list = []\n for stn in res.json()[\"root\"][\"stations\"][\"station\"]:\n station_list.append([stn[\"abbr\"],stn[\"name\"]])\n \n return station_list",
"async def _current_station_data(self) -> None:\n endpoint = f\"observations/station/{self._station_id}?token={self._token}\"\n json_data = await self.async_request(\"get\", endpoint)\n\n station_name = json_data.get(\"station_name\")\n\n cnv = ConversionFunctions()\n items = []\n observations = json_data.get(\"obs\")\n if observations is None:\n observations = {\"nodata\": \"NoData\"}\n\n for row in observations:\n item = {\n \"air_density\": 0 if \"air_density\" not in row else row[\"air_density\"],\n \"air_temperature\": 0\n if \"air_temperature\" not in row\n else await cnv.temperature(\n row[\"air_temperature\"], UNIT_TEMP_CELCIUS, self._to_units_temp\n ),\n \"brightness\": 0 if \"brightness\" not in row else row[\"brightness\"],\n \"dew_point\": 0\n if \"dew_point\" not in row\n else await cnv.temperature(\n row[\"dew_point\"], UNIT_TEMP_CELCIUS, self._to_units_temp\n ),\n \"feels_like\": 0\n if \"feels_like\" not in row\n else await cnv.temperature(\n row[\"feels_like\"], UNIT_TEMP_CELCIUS, self._to_units_temp\n ),\n \"heat_index\": 0\n if \"heat_index\" not in row\n else await cnv.temperature(\n row[\"heat_index\"], UNIT_TEMP_CELCIUS, self._to_units_temp\n ),\n \"lightning_strike_last_time\": None\n if \"lightning_strike_last_epoch\" not in row\n else await cnv.epoch_to_isodatetime(row[\"lightning_strike_last_epoch\"]),\n \"lightning_strike_last_distance\": 0\n if \"lightning_strike_last_distance\" not in row\n else await cnv.distance(\n row[\"lightning_strike_last_distance\"],\n UNIT_DISTANCE_KM,\n self._to_units_distance,\n ),\n \"lightning_strike_count\": 0\n if \"lightning_strike_count\" not in row\n else row[\"lightning_strike_count\"],\n \"lightning_strike_count_last_1hr\": 0\n if \"lightning_strike_count_last_1hr\" not in row\n else row[\"lightning_strike_count_last_1hr\"],\n \"lightning_strike_count_last_3hr\": 0\n if \"lightning_strike_count_last_3hr\" not in row\n else row[\"lightning_strike_count_last_3hr\"],\n \"precip_accum_last_1hr\": 0\n if \"precip_accum_last_1hr\" not in row\n else await cnv.precip(\n row[\"precip_accum_last_1hr\"],\n UNIT_PRECIP_MM,\n self._to_units_precip,\n True,\n ),\n \"precip_accum_local_day\": 0\n if \"precip_accum_local_day\" not in row\n else await cnv.precip(\n row[\"precip_accum_local_day\"],\n UNIT_PRECIP_MM,\n self._to_units_precip,\n True,\n ),\n \"precip_accum_local_yesterday\": 0\n if \"precip_accum_local_yesterday\" not in row\n else await cnv.precip(\n row[\"precip_accum_local_yesterday\"],\n UNIT_PRECIP_MM,\n self._to_units_precip,\n True,\n ),\n \"precip_rate\": 0\n if \"precip\" not in row\n else await cnv.precip(\n row[\"precip\"], UNIT_PRECIP_MM, self._to_units_precip, True\n )\n * 60,\n \"precip_minutes_local_day\": 0\n if \"precip_minutes_local_day\" not in row\n else row[\"precip_minutes_local_day\"],\n \"precip_minutes_local_yesterday\": 0\n if \"precip_minutes_local_yesterday\" not in row\n else row[\"precip_minutes_local_yesterday\"],\n \"relative_humidity\": 0\n if \"relative_humidity\" not in row\n else row[\"relative_humidity\"],\n \"station_pressure\": 0\n if \"station_pressure\" not in row\n else await cnv.pressure(\n row[\"station_pressure\"], UNIT_PRESSURE_HPA, self._to_units_pressure\n ),\n \"sea_level_pressure\": 0\n if \"sea_level_pressure\" not in row\n else await cnv.pressure(\n row[\"sea_level_pressure\"],\n UNIT_PRESSURE_HPA,\n self._to_units_pressure,\n ),\n \"station_name\": station_name,\n \"solar_radiation\": 0\n if \"solar_radiation\" not in row\n else row[\"solar_radiation\"],\n \"pressure_trend\": \"\"\n if \"pressure_trend\" not in row\n else row[\"pressure_trend\"],\n \"timestamp\": None\n if \"timestamp\" not in row\n else await cnv.epoch_to_datetime(row[\"timestamp\"]),\n \"uv\": 0 if \"uv\" not in row else row[\"uv\"],\n \"wind_avg\": 0\n if \"wind_avg\" not in row\n else await cnv.wind(row[\"wind_avg\"], UNIT_WIND_MS, self._to_units_wind),\n \"wind_bearing\": 0\n if \"wind_direction\" not in row\n else row[\"wind_direction\"],\n \"wind_chill\": 0\n if \"wind_chill\" not in row\n else await cnv.temperature(\n row[\"wind_chill\"], UNIT_TEMP_CELCIUS, self._to_units_temp\n ),\n \"wind_gust\": 0\n if \"wind_gust\" not in row\n else await cnv.wind(\n row[\"wind_gust\"], UNIT_WIND_MS, self._to_units_wind\n ),\n }\n items.append(StationData(item))\n\n return items",
"def view_station(request,station_id):\n station_url = settings.SODOR_ENDPOINT + 'station/' + str(int(station_id)) + '.json'\n context = {}\n try:\n station_data = client.load(station_url)\n except KeyError:\n return HttpResponseNotFound('Station not found')\n\n context['station'] = station_data.content\n\n # check children callsigns\n # do NOT assume flagship is (all) that we want - that is a bad assumption\n # e.g. WFSU has two children callsigns\n flagship_obj = station_data.related('flagship')\n flagship_callsign = flagship_obj.content.callsign\n children_callsigns = station_data.related('children')\n\n feeds = []\n callsigns = []\n context['callsign'] = flagship_callsign\n context['callsigns'] = []\n updated_callsigns = []\n\n for callsign_obj in children_callsigns.items():\n \"\"\"iterate thru callsigns\"\"\"\n if callsign_obj.content.callsign == flagship_callsign:\n callsign_obj.is_flagship = 'True'\n else:\n callsign_obj.is_flagship = None\n\n updated_callsigns.append(callsign_obj)\n callsigns.append(callsign_obj.content.callsign)\n\n children_feeds = callsign_obj.related('children')\n\n if children_feeds.self:\n for feed in children_feeds.items():\n feed_obj = {}\n # over the air channel\n # aka subchannel\n ota_channel = feed.related('summary').content\n feed_obj['ota_channel'] = ota_channel\n if callsign_obj.content.callsign == flagship_callsign:\n feed_obj['is_callsign'] = 'True'\n else:\n feed_obj['is_callsign'] = None\n feeds.append(feed_obj)\n\n feeds_by_flagship = sorted(feeds, key=itemgetter('is_callsign'),\n reverse=True)\n callsigns_by_flagship = sorted(updated_callsigns,\n key=attrgetter('is_flagship'), reverse=True)\n context['feeds'] = feeds_by_flagship\n context['callsigns'] = callsigns_by_flagship\n context = render_todays_listings(request, context, callsigns)\n\n return render_to_response(\n 'view_station.html',\n context,\n context_instance = RequestContext(request)\n )",
"def get_streams(a):\n if (a.device_watts): # check if the activity has the power data\n logger.debug(f'load_activities: Fetching stream for {maya.parse(a.start_date).iso8601()}:, {a.name}, {a.start_latlng}, {a.trainer}, {a.type}')\n s = client.get_activity_streams(a.id, response['athlete']['id'])\n if isinstance(s, pd.DataFrame): # check whether the stream was loaded from the local copy\n logger.debug(f'load_activities ...found locally')\n _s = s\n else: # Streams were loaded from the API, will be stored locally first\n logger.debug(f'load_activities ...fetched remotely, storing locally')\n s.store_locally()\n _s = pd.DataFrame(s.to_dict())\n yield {maya.parse(a.start_date).iso8601(): list(_s['watts'])}",
"def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations",
"def get_stations(self):\n return self.__request('stations')['stations']",
"def __init__ (self, msname, inverse = False, useElementResponse = True,\n useArrayFactor = True, useChanFreq = False):\n self._response = _stationresponse.StationResponse(msname, inverse,\n useElementResponse, useArrayFactor, useChanFreq)",
"def parse(self, response):\n theater_list = response.xpath('//div[@class=\"theater_info\"]//li/a')\n for theater_element in theater_list:\n curr_cinema_url = theater_element.xpath(\n './@href').extract_first()\n cinema_name = theater_element.xpath('./text()').extract_first()\n if not cinema_name:\n # partner theater element is different\n cinema_name = ''.join(theater_element.xpath(\n './/text()').extract())\n else:\n curr_cinema_url = response.urljoin(curr_cinema_url)\n data_proto = ShowingLoader(response=response)\n data_proto.add_cinema_name(cinema_name)\n cinema_name = data_proto.get_output_value('cinema_name')\n data_proto.add_cinema_site(curr_cinema_url, cinema_name)\n data_proto.add_value('source', self.name)\n if not self.is_cinema_crawl([cinema_name]):\n continue\n request = scrapy.Request(\n curr_cinema_url, callback=self.parse_cinema)\n request.meta[\"data_proto\"] = data_proto.load_item()\n yield request",
"def stations(self):\n for stat in sorted(self.station_records):\n yield self.station_records[stat]",
"def _get_stations_local() -> List[dict]:\n LOGGER.info('Using pre-generated json to retrieve station list')\n with open(weather_stations_file_path) as weather_stations_file:\n json_data = json.load(weather_stations_file)\n return json_data['weather_stations']",
"def list_stations(self):\n for i in range(len(self.data)):\n self.stnlist.append(Station(self.data[i][0]))\n return self",
"def get_stations(self, limit=250):\n\n endpoint = \"/station/getStations\"\n response = self._send(endpoint, \"POST\", {\"pageSize\": limit})\n stations = response.json()[\"stations\"]\n return stations"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get full version information of webservice as a string.
|
def _get_webservice_versionstring(self, service):
version = self.get_webservice_version(service)
return ".".join(map(str, version))
|
[
"def GetVersionString(self):\n return ConvertVersionToString(self.GetVersion())",
"def version_info(): \n return VERSION_s",
"def version_string(self):\n return self.server_version + ' ' + self.sys_version + ' ' + \"ToyWebResource/\"+str(__version__)",
"def version():\n version_info = pbr.version.VersionInfo('ardana-service')\n return version_info.version_string_with_vcs()",
"def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''",
"def getVersionInfo(cls):\n\n return __version__ + \"\\n\"",
"def get_version():\n return \".\".join(map(str, VERSION))",
"def get_api_version(self):\n from webapi import VERSION\n return '.'.join(map(str, VERSION))",
"def full_version(self):\n return str(self.version)",
"def version(self):\n version = self.get_rpc().getnetworkinfo()[\"subversion\"]\n version = version.replace(\"/\", \"\").replace(\"Satoshi:\", \"v\")\n return version",
"def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)",
"def GetVersion(self):\n return self._devtools_http.RequestJson('version')",
"def get_version(self):\r\n if not self.endpoint_checker(self.endpointurl):\r\n raise Exception(\"Please use a valid ESRI REST url\")\r\n\r\n parsedurl = urlparse(self.endpointurl)\r\n print(f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\")\r\n req = requests.get(\r\n f\"{parsedurl.scheme}://{parsedurl.netloc}/arcgis/rest/services/?f=pjson\"\r\n )\r\n\r\n if req.status_code == 200:\r\n try:\r\n return req.json()[\"currentVersion\"]\r\n except KeyError:\r\n try:\r\n req = requests.get(\r\n self.endpointurl.split(\"services/\")[0] + \"services/?f=pjson\"\r\n )\r\n return req.json()[\"currentVersion\"]\r\n except Exception as e:\r\n raise e\r\n raise Exception(\r\n f\"An Error occurred retrieving vital information, the response status {str(req.status_code)} associate with {req.json()['error']['message']}\"\r\n )",
"def get_version_string():\n return (f\"{config.VERSION['repo']}:{config.VERSION['name']}@\"\n f\"{config.VERSION['sha']}, modified:{config.VERSION['modified']}\")",
"def version_string():\n return '%s %s' % (__release__, __svn_revision__)",
"def get_string(self):\n if not self:\n raise ValueError(\"Null APIVersion cannot be converted to string.\")\n elif self.is_latest():\n return \"%s.%s\" % (self.ver_major, \"latest\")\n return \"%s.%s\" % (self.ver_major, self.ver_minor)",
"def version():\n print(get_version())",
"def get_version(self):\n return self.api_version",
"def get_version():\r\n return __version__"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Attaches the actually used dataselet URL to each Trace.
|
def _attach_dataselect_url_to_stream(self, st):
url = self._build_url("dataselect", "query")
for tr in st:
tr.stats._fdsnws_dataselect_url = url
|
[
"def add_data_url(self, url: str):\r\n if 'urls' in self.metadata:\r\n self.metadata['urls'].append(url)\r\n else:\r\n self.metadata['urls'] = [url]",
"def __traces_url(self):\n path = AGENT_TRACES_PATH % self.from_.pid\n return \"http://%s:%s/%s\" % (self.host, self.port, path)",
"def __init__(self):\n\t\tsuper(SpyEyeTrackerConfigUrls, self).__init__(name=\"SpyEye Tracker Config Urls Feed\", rss_url=\"https://spyeyetracker.abuse.ch/monitor.php?rssfeed=configurls\")\n\t\tfor entry in self.entries:\n\t\t entry[\"Host\"] = entry[\"SpyEye ConfigURL\"]\n\t\t del entry[\"SpyEye ConfigURL\"]",
"def __init__(self):\n\t\tsuper(SpyEyeTrackerDropUrls, self).__init__(name=\"SpyEye Tracker Drop Urls Feed\", rss_url=\"https://spyeyetracker.abuse.ch/monitor.php?rssfeed=dropurls\")\n\t\tfor entry in self.entries:\n\t\t entry[\"Host\"] = entry[\"SpyEye DropURL\"]\n\t\t del entry[\"SpyEye DropURL\"]",
"def add_tiddlers(self, tiddlers):\n for tiddler in tiddlers:\n self.add_tiddler(tiddler)",
"def trace_data(self, trace_data):\n\n self._trace_data = trace_data",
"def trailers(self, trailers):\n\n self._trailers = trailers",
"def log_url_spide(self):\n pass",
"def urls(self, urls):\n\n self._urls = urls",
"def add_trace_df(self, tt):\n tdf = tt.get_df()\n tdf = tdf.set_index(self._index_tracker.get_multiindex(tt))\n self._traces_df_list.append(tdf)",
"def _log_web_access_uris(self):\n pass",
"def setTweetUrls(self):\n self.urls = [u[\"url\"] for u in self.tweet[\"entities\"][\"urls\"]]",
"def __init__(self):\n\t\tsuper(SpyEyeTrackerBinaryUrls, self).__init__(name=\"SpyEye Tracker Binary Urls Feed\", rss_url=\"https://spyeyetracker.abuse.ch/monitor.php?rssfeed=binaryurls\")\n\t\tfor entry in self.entries:\n\t\t entry[\"Host\"] = entry[\"SpyEye BinaryURL\"]\n\t\t del entry[\"SpyEye BinaryURL\"]",
"def set_dbgap_link(self):\n return self.DATASET_URL.format(self.source_study_version.full_accession, self.i_accession)",
"def log_url(self, log_url):\n\n self._log_url = log_url",
"def add_trace(self, trace: PlotlyTrace):\n self.traces.append(trace)",
"def adddatauri(self):\n return self._adddatauri",
"def addHttp(self, d):\n self.__populateDict(self._http, d)",
"def _setup_links(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes any value and converts it to a string compliant with the FDSN webservices. Will raise a ValueError if the value could not be converted. >>> print(convert_to_string("abcd")) abcd >>> print(convert_to_string(1)) 1 >>> print(convert_to_string(1.2)) 1.2 >>> print(convert_to_string( \ UTCDateTime(2012, 1, 2, 3, 4, 5, 666666)))
|
def convert_to_string(value):
if isinstance(value, str):
return value
# Boolean test must come before integer check!
elif isinstance(value, bool):
return str(value).lower()
elif isinstance(value, int):
return str(value)
elif isinstance(value, float):
return str(value)
elif isinstance(value, UTCDateTime):
return str(value).replace("Z", "")
else:
raise TypeError("Unexpected type %s" % repr(value))
|
[
"def convert_to_str(value):\n if isinstance(value, basestring):\n return safe_unicode(value)\n else:\n return str(value)",
"def convert_to_str(value: Any) -> str:\n return get_convertor(value.__class__).to_str(value)",
"def make_str(value):\n if (sys.version_info > (3, 0)):\n # python 3.x has no unicode type, so if error, use str type\n return str(value)\n else:\n # for python 2.x compatibility, use unicode\n return unicode(value)",
"def datetime_object_to_string_converter(datetime_object):\n\n if isinstance(datetime_object, datetime.datetime):\n return datetime_object.__str__()",
"def to_str(cls, value: dt.datetime) -> str:\n\n # Ensure the argument is in UTC and does not have fractional\n # milliseconds\n cls.validate(value)\n\n # Convert to string in ISO format with UTC (Z) timezone suffix\n # and 3 digits after decimal points for seconds, irrespective of\n # how many digits are actually required.\n result_to_microseconds: str = value.strftime('%Y-%m-%dT%H:%M:%S.%f')\n result: str = result_to_microseconds[:-3] + 'Z'\n return result",
"def toString(cls, dtValue):\n if not isinstance(dtValue, datetime):\n raise TypeError(\"Expecting datetime type for string conversion, \"\n \"got %r\" % dtValue)\n \n # isoformat provides the correct formatting\n# return dtIssueInstant.strftime(cls.DATETIME_FORMAT)\n return datetime.isoformat(dtValue)+'Z'",
"def serialise_to_string(value):\n return str(safe_for_serialisation(value))",
"def _as_str(value):\n\tif isinstance(value, bytes):\n\t\treturn value.decode(\"utf-8\", \"replace\")\n\telif isinstance(value, str):\n\t\treturn value\n\telse:\n\t\traise TypeError(\"expected bytes\")",
"def to_str(s):\n if type(s) is unicode:\n s = s.encode(\"utf8\")\n return s",
"def date2str(datetime_object):\n if datetime_object is None:\n return 'None'\n return datetime_object.strftime('%Y-%m-%dT%H:%M:%S.%f')[0:-3]",
"def _str(self, value):\r\n if not isinstance(value, basestring):\r\n return str(value)\r\n else:\r\n return value",
"def test_to_String(self) -> None:\n assert to_String(1) == \"1\", to_String(1)\n assert to_String([1, 2, 3]) == str([1, 2, 3]), to_String([1, 2, 3])\n assert to_String(\"foo\") == \"foo\", to_String(\"foo\")\n assert to_String(None) == 'None'\n # test low level string converters too\n assert to_str(None) == 'None'\n assert to_bytes(None) == b'None'\n\n s1 = UserString('blah')\n assert to_String(s1) == s1, s1\n assert to_String(s1) == 'blah', s1\n\n class Derived(UserString):\n pass\n\n s2 = Derived('foo')\n assert to_String(s2) == s2, s2\n assert to_String(s2) == 'foo', s2",
"def convert(o):\n if isinstance(o, datetime):\n return o.__str__()",
"def to_str(variable):\n try:\n int(variable)\n return str(variable)\n except ValueError:\n return variable",
"def value_as_str(value):\n\n if isinstance(value, str) or isinstance(value, unicode):\n return value\n else:\n return json.dumps(value, ensure_ascii=False)",
"def to_str(x) -> str:\n return str(x) if x else ''",
"def _force_string(x):\n if isinstance(x, basestring):\n return x\n else:\n return str(x)",
"def ensure_strtime(t, isoformat=True):\n t_orig = t\n if isinstance(t, str):\n return t\n if isinstance(t, (int, float)):\n t = ensure_datetime(t)\n if isinstance(t, datetime.datetime):\n return t.isoformat() if isoformat else str(t)\n raise TypeError(f\"Do not know how to convert {t_orig!r} to string datetime\")",
"def _utf8str(x):\r\n if six.PY3:\r\n return str(x)\r\n if isinstance(x, six.binary_type):\r\n return x\r\n elif isinstance(x, six.text_type):\r\n return x.encode('utf-8')\r\n else:\r\n return six.binary_type(x)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test `construct_compose_dict` returns expected compose dict.
|
def test_construct_compose_dict(self):
expected_examplescraper_compose_dict = {
"version": "3",
"services": {
"scp1": {
"container_name": "scp1",
"environment": [
"TOR_PORT=9051",
"TOR_PASSWORD=I-solemnly-swear-I-am-up-to-no-good",
"PRIVOXY_PORT=8118",
"PRIVOXY_HOST=127.0.0.1",
"IPSTORE_PORT=5000",
"IPSTORE_HOST=scp1",
"URLBROKER_PORT=6000",
"URLBROKER_HOST=scp1",
"DATASTORE_PORT=7000",
"DATASTORE_HOST=scp1",
"HEALTHCHECK_PORT=8000",
"HEALTHCHECK_HOST=scp1",
"SCRAPER_PACKAGE=examplescraper",
"DOCKER_HOST_IP=fake_docker_host_ip",
"SCRAPER_CONFIG=tests.integration.fake_config",
],
"hostname": "scp1",
"image": "scp:latest",
"volumes": ["/fake_curent_dir:/scp"],
"build": {
"context": "/fake_curent_dir",
"dockerfile": "/fake_curent_dir/Dockerfile",
},
"entrypoint": "/scp/scrapemeagain/dockerized/entrypoints/entrypoint.scp1.sh",
},
"scp2": {
"container_name": "scp2",
"environment": [
"TOR_PORT=9051",
"TOR_PASSWORD=I-solemnly-swear-I-am-up-to-no-good",
"PRIVOXY_PORT=8118",
"PRIVOXY_HOST=127.0.0.1",
"IPSTORE_PORT=5000",
"IPSTORE_HOST=scp1",
"URLBROKER_PORT=6000",
"URLBROKER_HOST=scp1",
"DATASTORE_PORT=7000",
"DATASTORE_HOST=scp1",
"HEALTHCHECK_PORT=8000",
"HEALTHCHECK_HOST=scp1",
"SCRAPER_PACKAGE=examplescraper",
"DOCKER_HOST_IP=fake_docker_host_ip",
"SCRAPER_CONFIG=tests.integration.fake_config",
],
"hostname": "scp2",
"image": "scp:latest",
"volumes": ["/fake_curent_dir:/scp"],
"depends_on": ["scp1"],
"entrypoint": "/scp/scrapemeagain/dockerized/entrypoints/entrypoint.scpx.sh",
},
},
}
self.assertEqual(
expected_examplescraper_compose_dict,
docker_compose.construct_compose_dict(
"examplescraper", "tests.integration.fake_config"
),
)
|
[
"def test_construct_compose_dict_nonexisting_scraper(self):\n with self.assertRaises(ModuleNotFoundError):\n docker_compose.construct_compose_dict(\"nonexisting\")",
"def test_chemical_composition_trivial(self):\n expected = {\"U\": 1 / 3, \"Ag\": 2 / 3}\n self.assertDictEqual(self.structure.chemical_composition, expected)",
"def test_dict_to_rcd(self):\n _dict = {\n \"general\": {},\n \"commands\": {}\n }\n\n self.assertEqual(\n utilities.resolve_cli_data(_dict),\n _dict,\n \"Dictionary Passed Should Be Returned\"\n )",
"def test_create_mimic_dict_1(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertIsInstance(\n result, dict,\n \"The return value of create_mimic_dict() should be a dict.\"\n )",
"def test_create_dicts():\n\n expected = {\n \"First Name\": \"John\",\n \"Last Name\": \"Smith\",\n \"Age\": 12,\n \"Gender\": \"Male\",\n \"Diagnosis\": \"Something wrong\",\n \"TSH results\": [2, 2, 2, 2, 2, 2],\n }\n\n result = create_dicts([\"John Smith\"], [12], [\"Male\"], [\"Something wrong\"],\n [[2, 2, 2, 2, 2, 2]])\n\n assert expected == result[0]",
"def test_load_from_dict(self):\n\n dict_pipe = {\n \"start\": {\"abc\": 123, \"xyz\": 789},\n \"components\": [\n TestPipeline.MockComponentStart.qualname,\n {TestPipeline.MockComponentMid.qualname: { \"baz\": \"qux\" }},\n MockNonNestedComponent.qualname\n ]\n }\n pipe = Pipeline.load(dict_pipe)\n self.assertDictEqual(\n pipe.start,\n dict_pipe[\"start\"],\n \"'Start' argument correctly passed to pipeline.\")\n\n self.assertIsInstance(\n pipe.components[0],\n TestPipeline.MockComponentStart)\n self.assertIsInstance(\n pipe.components[1],\n TestPipeline.MockComponentMid)\n self.assertIsInstance(\n pipe.components[2],\n MockNonNestedComponent)\n self.assertTupleEqual(pipe.components[1].args, ())\n self.assertDictEqual(pipe.components[1].kwargs, {\"baz\": \"qux\"})",
"def testInitFromDict():\n conf = naiveConf.NaiveConf({})\n conf = naiveConf.NaiveConf({'a':5})\n assert conf.a == 5",
"def test_get_composition(self):\n pass",
"def testDCFormattedDict(self):\n # Make the dictionary.\n dcd = untlpydict2dcformatteddict(UNTL_DICT)\n # Workaround to make usable in Python 2.5.\n self.assertFalse('content' in dcd['publisher'], '%s not in %s'\n % ('content', dcd['publisher']))\n self.assertTrue(len(dcd) < len(UNTL_DICT))",
"def test_compose(self):\n\n # Test single qubit Pauli dot products\n with self.assertWarns(DeprecationWarning):\n stab = StabilizerTable.from_labels([\"I\", \"X\", \"Y\", \"Z\"])\n\n # Test single qubit Pauli dot products\n with self.assertWarns(DeprecationWarning):\n stab = StabilizerTable.from_labels([\"I\", \"X\", \"Y\", \"Z\", \"-I\", \"-X\", \"-Y\", \"-Z\"])\n\n with self.subTest(msg=\"dot single I\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.compose(\"I\")\n target = StabilizerTable.from_labels([\"I\", \"X\", \"Y\", \"Z\", \"-I\", \"-X\", \"-Y\", \"-Z\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single -I\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.compose(\"-I\")\n target = StabilizerTable.from_labels([\"-I\", \"-X\", \"-Y\", \"-Z\", \"I\", \"X\", \"Y\", \"Z\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single I\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.dot(\"I\")\n target = StabilizerTable.from_labels([\"I\", \"X\", \"Y\", \"Z\", \"-I\", \"-X\", \"-Y\", \"-Z\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single -I\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.dot(\"-I\")\n target = StabilizerTable.from_labels([\"-I\", \"-X\", \"-Y\", \"-Z\", \"I\", \"X\", \"Y\", \"Z\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single X\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.compose(\"X\")\n target = StabilizerTable.from_labels([\"X\", \"I\", \"-Z\", \"Y\", \"-X\", \"-I\", \"Z\", \"-Y\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single -X\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.compose(\"-X\")\n target = StabilizerTable.from_labels([\"-X\", \"-I\", \"Z\", \"-Y\", \"X\", \"I\", \"-Z\", \"Y\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single X\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.dot(\"X\")\n target = StabilizerTable.from_labels([\"X\", \"I\", \"Z\", \"-Y\", \"-X\", \"-I\", \"-Z\", \"Y\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single -X\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.dot(\"-X\")\n target = StabilizerTable.from_labels([\"-X\", \"-I\", \"-Z\", \"Y\", \"X\", \"I\", \"Z\", \"-Y\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single Y\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.compose(\"Y\")\n target = StabilizerTable.from_labels([\"Y\", \"Z\", \"-I\", \"-X\", \"-Y\", \"-Z\", \"I\", \"X\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single -Y\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.compose(\"-Y\")\n target = StabilizerTable.from_labels([\"-Y\", \"-Z\", \"I\", \"X\", \"Y\", \"Z\", \"-I\", \"-X\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single Y\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.dot(\"Y\")\n target = StabilizerTable.from_labels([\"Y\", \"-Z\", \"-I\", \"X\", \"-Y\", \"Z\", \"I\", \"-X\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single -Y\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.dot(\"-Y\")\n target = StabilizerTable.from_labels([\"-Y\", \"Z\", \"I\", \"-X\", \"Y\", \"-Z\", \"-I\", \"X\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single Z\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.compose(\"Z\")\n target = StabilizerTable.from_labels([\"Z\", \"-Y\", \"X\", \"I\", \"-Z\", \"Y\", \"-X\", \"-I\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single -Z\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.compose(\"-Z\")\n target = StabilizerTable.from_labels([\"-Z\", \"Y\", \"-X\", \"-I\", \"Z\", \"-Y\", \"X\", \"I\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single Z\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.dot(\"Z\")\n target = StabilizerTable.from_labels([\"Z\", \"Y\", \"-X\", \"I\", \"-Z\", \"-Y\", \"X\", \"-I\"])\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single -Z\"):\n with self.assertWarns(DeprecationWarning):\n value = stab.dot(\"-Z\")\n target = StabilizerTable.from_labels([\"-Z\", \"-Y\", \"X\", \"-I\", \"Z\", \"Y\", \"-X\", \"I\"])\n self.assertEqual(target, value)",
"def test_populate_dictionary_type(a, b):\n from mail_room_madness import populate_dictionary\n for i in range(4):\n output = populate_dictionary(a, b)\n assert type(output) == dict",
"def test_encode_dict_is_deterministic():\n data = dict(c=3, b=2, a=1)\n assert DictProtobufStructSerializer.encode(\n data\n ) == DictProtobufStructSerializer.encode(data)",
"def test_make_map(self):\n\n # TODO: This should be a reusable chunk of data we can put somewhere.\n mapping = {\n\n # Foo uses the default settings (map)\n \"foo\": \"bar\",\n\n # Baz is the same, but explicit\n \"baz\": {\n \"type\": \"map\",\n \"field\": \"bar\"\n },\n\n # Bazza is the same, type: map is assumed.\n \"bazza\": {\n \"field\": \"bar\"\n },\n\n \"lit\": {\n \"type\": \"literal\",\n \"value\": \"MyLiteralString\"\n },\n \"con\": {\n \"type\": \"concat\",\n \"field\": \"somefield\",\n \"prefix\": \"lol\"\n }\n }\n\n made_map = make_map(mapping)\n\n assert(isinstance(made_map[\"foo\"], Map))\n assert(isinstance(made_map[\"baz\"], Map))\n assert(isinstance(made_map[\"bazza\"], Map))\n assert(isinstance(made_map[\"lit\"], Literal))\n assert(isinstance(made_map[\"con\"], Concat))",
"def test_empty_dict_coerce():\n\n @type_checked\n def _run_test(thing:{}):\n assert isinstance(thing, dict)\n\n _run_test([(\"something\", \"is_true\")])",
"def test_evc_from_dict(self, _validate_mock, uni_from_dict_mock):\n _validate_mock.return_value = True\n uni_from_dict_mock.side_effect = [\"uni_a\", \"uni_z\"]\n payload = {\n \"name\": \"my evc1\",\n \"uni_a\": {\n \"interface_id\": \"00:00:00:00:00:00:00:01:1\",\n \"tag\": {\"tag_type\": 1, \"value\": 80},\n },\n \"uni_z\": {\n \"interface_id\": \"00:00:00:00:00:00:00:02:2\",\n \"tag\": {\"tag_type\": 1, \"value\": 1},\n },\n \"circuit_scheduler\": [\n {\"frequency\": \"* * * * *\", \"action\": \"create\"}\n ],\n \"queue_id\": 5,\n }\n # pylint: disable=protected-access\n evc_response = self.napp._evc_from_dict(payload)\n self.assertIsNotNone(evc_response)\n self.assertIsNotNone(evc_response.uni_a)\n self.assertIsNotNone(evc_response.uni_z)\n self.assertIsNotNone(evc_response.circuit_scheduler)\n self.assertIsNotNone(evc_response.name)\n self.assertIsNotNone(evc_response.queue_id)",
"def test_dict_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})\n inner_test(param={'foo': 1, 'bar': ['bat', 2]})",
"def test_mimic_dict():\n assert type(mimic_dict(PATH)) == dict\n print('testing function mimic_dict() passed!')",
"def test_apply_dict(test):\n\n def function(a, b, c, d=123):\n return (a, b, c, d)\n\n goal = (1, 2, 3, 123)\n extra = {\"a\":1, \"b\":2, \"c\":3, \"e\":4}\n test.assertEquals(goal, apply_dict(function, extra))",
"def testCasDict(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n self.assertEqual(casDict, self.msTest.getCASDict(),\n \"The dictionary inside the MachineSettings was not validly initialized\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test `construct_compose_dict` raises `ModuleNotFoundError` for a nonexisting scraper.
|
def test_construct_compose_dict_nonexisting_scraper(self):
with self.assertRaises(ModuleNotFoundError):
docker_compose.construct_compose_dict("nonexisting")
|
[
"def test_construct_compose_dict(self):\n expected_examplescraper_compose_dict = {\n \"version\": \"3\",\n \"services\": {\n \"scp1\": {\n \"container_name\": \"scp1\",\n \"environment\": [\n \"TOR_PORT=9051\",\n \"TOR_PASSWORD=I-solemnly-swear-I-am-up-to-no-good\",\n \"PRIVOXY_PORT=8118\",\n \"PRIVOXY_HOST=127.0.0.1\",\n \"IPSTORE_PORT=5000\",\n \"IPSTORE_HOST=scp1\",\n \"URLBROKER_PORT=6000\",\n \"URLBROKER_HOST=scp1\",\n \"DATASTORE_PORT=7000\",\n \"DATASTORE_HOST=scp1\",\n \"HEALTHCHECK_PORT=8000\",\n \"HEALTHCHECK_HOST=scp1\",\n \"SCRAPER_PACKAGE=examplescraper\",\n \"DOCKER_HOST_IP=fake_docker_host_ip\",\n \"SCRAPER_CONFIG=tests.integration.fake_config\",\n ],\n \"hostname\": \"scp1\",\n \"image\": \"scp:latest\",\n \"volumes\": [\"/fake_curent_dir:/scp\"],\n \"build\": {\n \"context\": \"/fake_curent_dir\",\n \"dockerfile\": \"/fake_curent_dir/Dockerfile\",\n },\n \"entrypoint\": \"/scp/scrapemeagain/dockerized/entrypoints/entrypoint.scp1.sh\",\n },\n \"scp2\": {\n \"container_name\": \"scp2\",\n \"environment\": [\n \"TOR_PORT=9051\",\n \"TOR_PASSWORD=I-solemnly-swear-I-am-up-to-no-good\",\n \"PRIVOXY_PORT=8118\",\n \"PRIVOXY_HOST=127.0.0.1\",\n \"IPSTORE_PORT=5000\",\n \"IPSTORE_HOST=scp1\",\n \"URLBROKER_PORT=6000\",\n \"URLBROKER_HOST=scp1\",\n \"DATASTORE_PORT=7000\",\n \"DATASTORE_HOST=scp1\",\n \"HEALTHCHECK_PORT=8000\",\n \"HEALTHCHECK_HOST=scp1\",\n \"SCRAPER_PACKAGE=examplescraper\",\n \"DOCKER_HOST_IP=fake_docker_host_ip\",\n \"SCRAPER_CONFIG=tests.integration.fake_config\",\n ],\n \"hostname\": \"scp2\",\n \"image\": \"scp:latest\",\n \"volumes\": [\"/fake_curent_dir:/scp\"],\n \"depends_on\": [\"scp1\"],\n \"entrypoint\": \"/scp/scrapemeagain/dockerized/entrypoints/entrypoint.scpx.sh\",\n },\n },\n }\n\n self.assertEqual(\n expected_examplescraper_compose_dict,\n docker_compose.construct_compose_dict(\n \"examplescraper\", \"tests.integration.fake_config\"\n ),\n )",
"def test_func_dict_not_imported_module():\n\n plot_toggles = {\"SMF\": True}\n module_name = \"not_a_module.funcs\"\n function_prefix = \"calc_\"\n\n with pytest.raises(KeyError):\n func_dict = generate_func_dict(plot_toggles, module_name, function_prefix)",
"def test_compose_raises_on_bad_partname_ext(self):\n # setup -----------------------\n MockPart = namedtuple('MockPart', 'partname')\n parts = [MockPart('/ppt/!blat/rhumba.1x&')]\n # verify ----------------------\n with self.assertRaises(LookupError):\n self.cti.compose(parts)",
"def test_get_composition(self):\n pass",
"def test_instantiate_non_existent_module(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"non_existent_module.some_class\"},\n {\"_target_\": \"another_non_existent_module.some_class\", \"a\": 1, \"b\": 2}\n ]\n\n # check that instantiate raises ModuleNotFoundError for each test config\n for test_conf in test_configs:\n self.assertRaises(ModuleNotFoundError, instantiate, test_conf)",
"def test_poscar_exist(poscar_parser):\n\n assert poscar_parser.get_dict()",
"def test_get_components_returns_components(\n component_factory: ComponentFactory,\n) -> None:\n components = component_factory.get_components()\n assert isinstance(components, dict)",
"def composed_url2modules(baseurl):\n import compose\n\n c = compose.Compose(baseurl)\n cid = c.data_id()\n cstat = c.data_status()\n print('Mod Compose:', cid)\n print(' Status:', cstat)\n mdata = c.json_modules()\n m = compose.modules_from_compose(mdata)\n return compose.dedup_modules(m)",
"def test_fails_import_non_existing_module(self):\n consumer = Consumer()\n self.assertRaises(\n TaskLoadingError, consumer.import_taskified_function, \"tests.foobar.nope\"\n )",
"def test_build_compose_section_supports_layers():\n\n custom_output_dir = './build_not_dist'\n manifest = {\n 'output_dir': custom_output_dir,\n 'layers': {\n 'first': {'requirements': 'requirements/first.txt'},\n 'second': {'requirements': 'requirements/second.txt'},\n }\n }\n\n result = actions._get_compose_template(manifest)\n yaml_result = yaml.safe_load(result)\n\n first_layer = yaml_result['services']['first-layer']\n assert any('requirements/first.txt' in volume for volume in first_layer['volumes'])\n assert 'build_layer.sh first' in first_layer['command']\n\n second_layer = yaml_result['services']['second-layer']\n assert any('requirements/second.txt' in volume for volume in second_layer['volumes'])\n assert 'build_layer.sh second' in second_layer['command']",
"def test_compose_method_present(self):\n self.assertClassHasMethod(pptx.packaging._ContentTypesItem, 'compose')",
"def __load_docker_compose(path):\n file_path = __get_docker_file_path(path)\n if file_path is None:\n msg = \"Could not find docker-compose file at {}\".format(path)\n return None, __standardize_result(False, msg, None, None)\n if not os.path.isfile(file_path):\n return (\n None,\n __standardize_result(\n False, \"Path {} is not present\".format(file_path), None, None\n ),\n )\n try:\n with salt.utils.files.fopen(file_path, \"r\") as fl:\n loaded = yaml.load(fl)\n except OSError:\n return (\n None,\n __standardize_result(\n False, \"Could not read {}\".format(file_path), None, None\n ),\n )\n except yaml.YAMLError as yerr:\n msg = \"Could not parse {} {}\".format(file_path, yerr)\n return None, __standardize_result(False, msg, None, None)\n if not loaded:\n msg = \"Got empty compose file at {}\".format(file_path)\n return None, __standardize_result(False, msg, None, None)\n if \"services\" not in loaded:\n loaded[\"services\"] = {}\n result = {\"compose_content\": loaded, \"file_name\": os.path.basename(file_path)}\n return result, None",
"def _get_community_platform_details(community_platform_name: str) -> Dict[str, Any]:\n try:\n importlib.import_module(name=\"scrapli_community\")\n except ModuleNotFoundError as exc:\n title = \"Module not found!\"\n message = (\n \"Scrapli Community package is not installed!\\n\"\n \"To resolve this issue, install the transport plugin. You can do this in one of \"\n \"the following ways:\\n\"\n \"1: 'pip install -r requirements-community.txt'\\n\"\n \"2: 'pip install scrapli[community]'\"\n )\n warning = format_user_warning(title=title, message=message)\n raise ScrapliModuleNotFound(warning) from exc\n\n try:\n # replace any underscores in platform name with \".\"; should support any future platforms\n # that dont have \"child\" os types -- i.e. just \"cisco\" instead of \"cisco_iosxe\"\n scrapli_community_platform = importlib.import_module(\n name=f\"scrapli_community.{community_platform_name.replace('_', '.')}\"\n )\n except ModuleNotFoundError as exc:\n title = \"Module not found!\"\n message = (\n f\"Scrapli Community platform '{community_platform_name}` not found!\\n\"\n \"To resolve this issue, ensure you have the correct platform name, and that a scrapli \"\n \" community platform of that name exists!\"\n )\n warning = format_user_warning(title=title, message=message)\n raise ScrapliModuleNotFound(warning) from exc\n\n platform_details_original = getattr(scrapli_community_platform, \"SCRAPLI_PLATFORM\", {})\n if not platform_details_original:\n msg = \"Community platform missing required attribute `SCRAPLI_PLATFORM`\"\n raise ScrapliException(msg)\n platform_details: Dict[str, Any] = deepcopy(platform_details_original)\n return platform_details",
"def test_bad_func_loads():\n from libensemble.ensemble import Ensemble\n\n yaml_errors = {\n \"./simdir/test_example_badfuncs_attribute.yaml\": AttributeError,\n \"./simdir/test_example_badfuncs_notfound.yaml\": ModuleNotFoundError,\n }\n\n for f in yaml_errors:\n e = Ensemble(libE_specs={\"comms\": \"local\", \"nworkers\": 4})\n flag = 1\n try:\n e.from_yaml(f)\n except yaml_errors[f]:\n flag = 0\n assert flag == 0",
"def test_get_factory_invalid(self):\n order_processor = OrderProcessor()\n self.assertRaises(KeyError,\n order_processor.get_factory('AppleRepublic'))",
"def test_list_compositions(self):\n pass",
"def find_compose_result(compose, results):\n if results is None:\n return None\n\n for comp in results[\"composes\"]:\n if (\n comp[\"url\"] == compose[\"url\"]\n and comp[\"name\"] == compose[\"name\"]\n and comp[\"version\"] == compose[\"version\"]\n ):\n return comp\n\n return None",
"def test_plugin_fcm_cryptography_import_error():\n\n # Prepare a base keyfile reference to use\n path = os.path.join(PRIVATE_KEYFILE_DIR, 'service_account.json')\n\n # Attempt to instantiate our object\n obj = Apprise.instantiate(\n 'fcm://mock-project-id/device/#topic/?keyfile={}'.format(str(path)))\n\n # It's not possible because our cryptography depedancy is missing\n assert obj is None",
"def test_load_dict(testdata_dir, tmp_trestle_dir):\n # prepare trestle project dir with the file\n test_utils.ensure_trestle_config_dir(tmp_trestle_dir)\n\n test_data_source = testdata_dir / 'split_merge/step4_split_groups_array/catalogs'\n\n catalogs_dir = Path('catalogs/')\n mycatalog_dir = catalogs_dir / 'mycatalog'\n catalog_dir = mycatalog_dir / 'catalog'\n\n # Copy files from test/data/split_merge/step4\n shutil.rmtree(catalogs_dir)\n shutil.copytree(test_data_source, catalogs_dir)\n\n actual_model_type, actual_model_alias, actual_model_instance = _load_dict(\n catalog_dir / 'metadata/responsible-parties')\n\n expexted_model_instance = {\n 'contact': ResponsibleParty.oscal_read(\n catalog_dir / 'metadata/responsible-parties/contact__responsible-party.json'\n ),\n 'creator': ResponsibleParty.oscal_read(\n catalog_dir / 'metadata/responsible-parties/creator__responsible-party.json'\n )\n }\n assert len(list(dictdiffer.diff(actual_model_instance, expexted_model_instance))) == 0\n assert actual_model_alias == 'catalog.metadata.responsible-parties'\n\n expected_model_type, _ = fs.get_contextual_model_type((catalog_dir / 'metadata/responsible-parties/').absolute())\n assert actual_model_type.__fields__['__root__'].outer_type_ == expected_model_type"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the rating limit
|
def _testRatingLimit(self):
comment = models.Comment.objects.all()[0]
type = models.RatingType.objects.all()[0]
try:
val = type.limit + 10
rating = models.Rating(comment=comment, type=type, value=val)
rating.save()
assert rating.value == type.limit
finally:
rating.delete()
|
[
"def test_post_rating_greater_than_range(self):\n with self.login(self.test_user):\n response = self.post(\n \"pinax_ratings:rate\",\n content_type_id=ContentType.objects.get_for_model(self.forester).pk,\n object_id=self.forester.pk,\n data={\n \"rating\": settings.PINAX_RATINGS_NUM_OF_RATINGS + 1,\n \"category\": \"handling\"\n },\n )\n self.response_403(response)",
"def test_post_rating_less_than_range(self):\n with self.login(self.test_user):\n response = self.post(\n \"pinax_ratings:rate\",\n content_type_id=ContentType.objects.get_for_model(self.forester).pk,\n object_id=self.forester.pk,\n data={\n \"rating\": -1,\n \"category\": \"handling\"\n },\n )\n self.response_403(response)",
"def test_unsuccessful_rating_with_rate_value_more_than_five(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 6},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_max_value_error_message)",
"def validate_rating(self, key, rating):\n if not rating:\n raise AssertionError('No rating provided')\n if rating < 0 or rating > 10:\n raise AssertionError('Rating not in between 0 and 10')\n return rating",
"def test_rate_limit(self):\n status = self.client.rate_limit_status()\n resource_status = status[\"resources\"][\"search\"][\"/search/tweets\"]\n expected_status = {\"remaining\": 450, \"limit\": 450, \"reset\": 1380131036}\n self.assertEqual(resource_status, expected_status)",
"def bound_rating(self, rating):\n return 1.0 * max(0, min(int(rating + 0.5), 5))\n # return 1.0 * max(0, min(rating, 5))",
"def validate_rating(self, key, r):\n half_or_full = r % 1 == 0 or r % 1 == 0.5\n if r < 1 or r > 5 or not half_or_full:\n raise DBException({'message': 'Rating must be between 1 and 5, with 0.5 increments only.',\\\n 'code': 'rating'})\n return r",
"def validate_rating(self, key, value):\n assert value is None or value <= 10 and value >= 0\n return value",
"def clean_rating(self):\n rating = self.cleaned_data[\"rating\"]\n if rating < 1 or rating > len(REVIEW_RATING_CHOICES):\n raise forms.ValidationError(_(\"Rating should be between %(min)d and %(max)d\") % {'min': 1, 'max': len(REVIEW_RATING_CHOICES)})\n return rating",
"def test_update_page_ratings(self):\n self.whr.update_page_ratings(True)\n self.assert_close([1.0, 1.0], self.whr.page_ratings, \"page_ratings\")\n self.assert_close([0.5, 0.625], self.whr.page_var, \"page_var\")",
"def test_is_valid_fujita_rating_f_too_low(self):\n\n self.assertFalse(\n tornado_io._is_valid_fujita_rating(F_SCALE_RATING_TOO_LOW)\n )",
"def test_get_dealer_ratings(self):\n pass",
"def test_show_rating(self):\n self.assertEqual(self.show.rating, None)",
"def test_check_limit(self):\n self.assertEqual(functions.check_limit('2'), 2)",
"def test_rate_limit_with_resource(self):\n response = self.client.rate_limit_status(\"/search/tweets\")\n expected = {\"remaining\": 450, \"limit\": 450, \"reset\": 1380131036}\n self.assertEqual(response, expected)",
"def test_non_logged_in_users_cannot_rate(self):\n\n self.signup('user@example.com', 'user')\n self.login('user@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.logout()\n\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 1\n }, csrf_token, expected_status_int=401, expect_errors=True\n )",
"def test_is_valid_fujita_rating_ef_too_low(self):\n\n self.assertFalse(\n tornado_io._is_valid_fujita_rating(EF_SCALE_RATING_TOO_LOW)\n )",
"def test_show_rating(self):\n self.assertTrue(isinstance(self.show.rating, float))",
"def test_is_valid_fujita_rating_f_too_high(self):\n\n self.assertFalse(\n tornado_io._is_valid_fujita_rating(F_SCALE_RATING_TOO_HIGH)\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test individual comment rating
|
def _testCommentRating(self):
try:
host = models.Host.objects.all()[0]
comment = models.Comment(text='test', host=host)
comment.save()
types = models.RatingType.objects.all()
items = []
for value, type in zip([3, 4, 5], types):
tmp_obj = models.Rating(comment=comment, type=type, value=value)
tmp_obj.save()
items.append(tmp_obj)
assert comment.rating() - 4.0 < .0001, comment.rating()
for tmp_obj in items:
tmp_obj.delete()
items = []
for value, type in zip([3, 3], types):
tmp_obj = models.Rating(comment=comment, type=type, value=value)
tmp_obj.save()
items.append(tmp_obj)
assert comment.rating() == 3.0, comment.rating()
finally:
for tmp_obj in items:
tmp_obj.delete()
comment.delete()
|
[
"def test_upvote_modifies_comment_score(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)\n vote = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)",
"def test_downvote_modifies_comment_score(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)\n vote = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)",
"def test_default_score_comment(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)",
"def test_assign_and_read_ratings(self):\n\n self.signup('user@example.com', 'user')\n self.login('user@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n\n # User checks rating\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n\n # User rates and checks rating\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 2\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 2)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 1, '3': 0, '4': 0, '5': 0})\n\n # User re-rates and checks rating\n self.login('user@example.com')\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 5\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 5)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 1})\n\n self.logout()",
"def review_rating(review):\n return review[1]",
"def test_ratings_by_different_users(self):\n\n self.signup('a@example.com', 'a')\n self.signup('b@example.com', 'b')\n\n self.login('a@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n self.logout()\n\n self.login('b@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 4)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 2, '5': 0})\n self.logout()",
"def test_get_dealer_ratings(self):\n pass",
"def _testRatingLimit(self):\n\n comment = models.Comment.objects.all()[0]\n type = models.RatingType.objects.all()[0]\n try:\n val = type.limit + 10\n rating = models.Rating(comment=comment, type=type, value=val)\n rating.save()\n assert rating.value == type.limit\n finally:\n rating.delete()",
"def test_upvote_then_downvote_same_user_leaves_comment_score_one_less(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n # self.assertEqual(len(post_qs), 1)\n self.assertEqual(comment.score, DEFAULT_SCORE)\n comment = Comment.objects.get(body=\"987XYZ\")\n\n vote1 = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)\n\n vote2 = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)",
"def update_comment_usefulness(self):\n self.cursor.execute(\"\"\"UPDATE comment SET veryUseful=0, useful=0, useless=0, avg_usefulness=NULL\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM rates\"\"\")\n for rating in self.cursor.fetchall():\n self.update_comment_score(rating[0], rating[1], rating[2])",
"def test_show_rating(self):\n self.assertEqual(self.show.rating, None)",
"def test_comment_flagging(self):\n comment = self.place_comment(self.article, comment='hello world',\n next=self.article_url)\n flag = self.flag_comment(comment)\n self.assertFalse(comment.is_moderated())\n self.assertFalse(comment.is_community_moderated())\n self.assertEqual(flag.flag, UserCommentFlag.SUGGEST_REMOVAL)",
"def test_average_rating(self):\n self.new_project.save()\n\n review1 = Review.objects.create(project = self.new_project, user = self.new_user, design = 8, usability = 5, content = 9, comment = 'This is a nice website.')\n\n review2 = Review.objects.create(project = self.new_project, user = self.new_user, design = 6, usability = 5, content = 3, comment = 'This is a nice website.')\n\n self.assertEqual(self.new_project.average_rating, 6.0)",
"def test_post_rating_with_valid_data(self):\n with self.login(self.test_user):\n content_type_id = ContentType.objects.get_for_model(self.forester).pk\n object_id = self.forester.pk\n response = self.post(\n \"pinax_ratings:rate\",\n content_type_id=content_type_id,\n object_id=object_id,\n data={\n \"rating\": 3,\n \"category\": \"handling\"\n },\n )\n self.response_200(response)\n self.assertContext(\"user_rating\", 3)\n self.assertContext(\"category\", \"handling\")\n self.assertContext(\"overall_rating\", 3.0)",
"def test_liking_disliked_comment(self):\n response = self.like_disliked_comment()\n data = response.data.get(\"comment\").get(\"likes_info\")\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK\n )\n self.assertEqual(\n data.get(\"likes_count\"),\n 1\n )\n self.assertEqual(\n data.get(\"like\"),\n True\n )\n self.assertEqual(\n data.get(\"dislikes_count\"),\n 0\n )\n self.assertEqual(\n data.get(\"dislike\"),\n False\n )\n self.assertEqual(\n response.data.get(\"message\"),\n \"Comment liked\"\n )",
"def create_rating(rating, comments, photo, user):\r\n\r\n rate_and_comment = Rating(rating=rating, comments=comments, photo=photo, user=user)\r\n\r\n db.session.add(rate_and_comment)\r\n db.session.commit()\r\n\r\n return rate_and_comment",
"def update_rating(self):\n self.rating = self._update_author_comments_rating() + self._update_posts_comments_rating() \\\n + self._update_posts_rating()\n self.save()",
"def test_downvote_then_upvote_same_user_leaves_comment_score_one_greater(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n # self.assertEqual(len(post_qs), 1)\n self.assertEqual(comment.score, DEFAULT_SCORE)\n comment = Comment.objects.get(body=\"987XYZ\")\n\n vote1 = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)\n\n vote2 = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)",
"def test_disliking_liked_comment(self):\n response = self.dislike_liked_comment()\n data = response.data.get(\"comment\").get(\"likes_info\")\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK\n )\n self.assertEqual(\n data.get(\"likes_count\"),\n 0\n )\n self.assertEqual(\n data.get(\"like\"),\n False\n )\n self.assertEqual(\n data.get(\"dislikes_count\"),\n 1\n )\n self.assertEqual(\n data.get(\"dislike\"),\n True\n )\n self.assertEqual(\n response.data.get(\"message\"),\n \"Comment disliked\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test individual host rating
|
def _testHostRating(self):
try:
user = auth.User.objects.all()[0]
category = models.Category.objects.all()[0]
host = models.Host(user=user, category=category,
url='http://blah.com')
host.save()
comment = models.Comment(text='test', host=host)
comment.save()
types = models.RatingType.objects.all()
items = []
for value, type in zip([3, 4, 5], types):
tmp_obj = models.Rating(comment=comment, type=type, value=value)
tmp_obj.save()
items.append(tmp_obj)
assert comment.rating() - 4 < .0001, comment.rating()
comment2 = models.Comment(text='test', host=host)
comment2.save()
for value, type in zip([3, 3, 3], types):
tmp_obj = models.Rating(comment=comment2, type=type, value=value)
tmp_obj.save()
items.append(tmp_obj)
assert comment2.rating() - 3.0 < .0001, comment2.rating()
assert host.rating() == 3.5, host.rating()
assert host.rating(100) == 70, host.rating(100)
finally:
try:
for tmp_obj in items:
tmp_obj.delete()
comment.delete()
comment2.delete()
host.delete()
except:
pass
|
[
"def get_host_risk(self):",
"def test_get_dealer_ratings(self):\n pass",
"def test_ratings_by_different_users(self):\n\n self.signup('a@example.com', 'a')\n self.signup('b@example.com', 'b')\n\n self.login('a@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n self.logout()\n\n self.login('b@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 4)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 2, '5': 0})\n self.logout()",
"def do_score(self, hosts, vm, args):\n try:\n hostScores = []\n # use hosts IDs and VM ID to call the Rest API and make a decision\n for hostID in hosts:\n # Do work\n hostScores.append((hostID, 50))\n print(hostScores)\n except Exception as ex:\n print(ex, file=sys.stderr)",
"def host_reputation(host):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': host\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response",
"def test_multiplier_none(self):\n self.flags(pci_weight_multiplier=0.0, group='filter_scheduler')\n\n hosts = [\n ('host1', 'node1', [4, 1]), # 5 devs\n ('host2', 'node2', [10]), # 10 devs\n ('host3', 'node3', [1, 1, 1, 1]), # 4 devs\n ]\n hostinfo_list = self._get_all_hosts(hosts)\n\n request = objects.InstancePCIRequest(count=1,\n spec=[{'vendor_id': '8086'}])\n requests = objects.InstancePCIRequests(requests=[request])\n spec_obj = objects.RequestSpec(pci_requests=requests)\n\n # we do not know the host as all have same weight\n weighed_hosts = self._get_weighed_hosts(hostinfo_list, spec_obj)\n for weighed_host in weighed_hosts:\n # the weigher normalizes all weights to 0 if they're all equal\n self.assertEqual(0.0, weighed_host.weight)",
"def test_multiplier_with_pci(self):\n # none of the hosts will have less than the number of devices required\n # by the instance: the NUMATopologyFilter takes care of this for us\n hosts = [\n ('host1', 'node1', [4, 1]), # 5 devs\n ('host2', 'node2', [10]), # 10 devs\n ('host3', 'node3', [1, 1, 1, 1]), # 4 devs\n ]\n hostinfo_list = self._get_all_hosts(hosts)\n\n # we request PCI devices\n request = objects.InstancePCIRequest(count=4,\n spec=[{'vendor_id': '8086'}])\n requests = objects.InstancePCIRequests(requests=[request])\n spec_obj = objects.RequestSpec(pci_requests=requests)\n\n # host3, which has the least free PCI devices, should win\n weighed_host = self._get_weighed_hosts(hostinfo_list, spec_obj)[0]\n self.assertEqual(1.0, weighed_host.weight)\n self.assertEqual('host3', weighed_host.obj.host)",
"def goodmorning(host):",
"def test_assign_and_read_ratings(self):\n\n self.signup('user@example.com', 'user')\n self.login('user@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n\n # User checks rating\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})\n\n # User rates and checks rating\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 2\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 2)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 1, '3': 0, '4': 0, '5': 0})\n\n # User re-rates and checks rating\n self.login('user@example.com')\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 5\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 5)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 0, '5': 1})\n\n self.logout()",
"def test_post_rating_less_than_range(self):\n with self.login(self.test_user):\n response = self.post(\n \"pinax_ratings:rate\",\n content_type_id=ContentType.objects.get_for_model(self.forester).pk,\n object_id=self.forester.pk,\n data={\n \"rating\": -1,\n \"category\": \"handling\"\n },\n )\n self.response_403(response)",
"def host_estimatescore(self, host_settings=None):\n return self.http.get(host_constants.ESTIMATE_SCORE_URL, host_settings)",
"def test_alt_host_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"host_perfdata_file={}\\n\"\n \"host_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_HOST_TEMPLATE),\n host_perf=True,\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_HOST, \"r\") as f:\n nagios_perf = ensure_bytes(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.host.pl',\n 'timestamp': 1339511440,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:80', 'crit:100', 'min:0'],\n },\n {\n 'name': 'nagios.host.rta',\n 'timestamp': 1339511440,\n 'value': 0.048,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:3000.000000', 'crit:5000.000000', 'min:0.000000'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()",
"def test_post_rating_greater_than_range(self):\n with self.login(self.test_user):\n response = self.post(\n \"pinax_ratings:rate\",\n content_type_id=ContentType.objects.get_for_model(self.forester).pk,\n object_id=self.forester.pk,\n data={\n \"rating\": settings.PINAX_RATINGS_NUM_OF_RATINGS + 1,\n \"category\": \"handling\"\n },\n )\n self.response_403(response)",
"def test_rate_game(self):\n url = reverse('rate-game')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id,\n 'rating': 4.5\n }\n response = self.client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_perform_host_action(self):\n pass",
"def test_update_route_ratings(self):\n self.whr.update_route_ratings(True)\n # Ratings should not change: both ascents had a 50% probability assuming\n # the initial ratings.\n self.assert_close([1.0, 1.0, 1.0], self.whr.route_ratings, \"route_ratings\")\n self.assert_close(\n [2.0 / 3.0, 2.0 / 3.0, 2.0 / 3.0], self.whr.route_var, \"route_var\"\n )",
"def test_search_room_rate(self):\n pass",
"def main():\n module = AnsibleModule(\n argument_spec=dict(\n host=dict(type='str', required=True),\n destination=dict(type='str', required=True),\n repeat_count=dict(type='int', default=5),\n vrf_name=dict(type='str'),\n min_success_rate=dict(type='int', default=100)\n ),\n supports_check_mode=True\n )\n\n if module.check_mode:\n module.exit_json(changed=False)\n\n try:\n retvals = ping(module.params['host'],\n module.params['destination'],\n module.params['repeat_count'],\n module.params['vrf_name'])\n except Exception as exc:\n module.fail_json(msg='Reachability validation failed ({})'.format(exc))\n\n retvals['changed'] = False\n\n if retvals['success_rate'] >= module.params['min_success_rate']:\n module.exit_json(**retvals)\n else:\n module.fail_json(msg=('Success rate lower than expected ({}<{})').\n format(retvals['success_rate'],\n module.params['min_success_rate']))",
"def test007_distrbute_router_os_over_cpu_nodes(self):\n self.lg('Check the number of the router os on all the available cpu node')\n\n cb = j.clients.osis.getNamespace('cloudbroker')\n vcl = j.clients.osis.getNamespace('vfw')\n gid = j.application.whoAmI.gid\n stacks = cb.stack.list()\n \n stacks_list = []\n for stackId in stacks:\n stack = cb.stack.get(stackId)\n if stack.status != 'ENABLED':\n continue\n \n referenceId = int(stack.referenceId)\n number_of_ros = vcl.virtualfirewall.count({'gid': gid, 'nid': referenceId})\n stacks_list.append((referenceId, number_of_ros))\n\n min_ros_count = min(stacks_list, key=lambda tup: tup[1])[1]\n available_stacks = [stack[0] for stack in stacks_list if stack[1] == min_ros_count]\n\n self.lg('Create new cloudspace')\n cloudspace_id = self.cloudapi_cloudspace_create(self.account_id, self.location, self.account_owner)\n\n self.lg('Check that the new cloudspace is created on the cpu node with smallest number of router os') \n vfw = self.api.cloudbroker.cloudspace.getVFW(cloudspace_id)\n self.assertIn(vfw['nid'], available_stacks)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the different rating categories
|
def _testRatingCategories(self):
try:
user = auth.User.objects.all()[0]
category = models.Category.objects.all()[0]
host = models.Host(user=user, category=category,
url='http://blah.com')
host.save()
comment = models.Comment(text='test', host=host)
comment.save()
types = models.RatingType.objects.all()
items = []
for value, type in zip([3, 4, 5], types):
tmp_obj = models.Rating(comment=comment, type=type, value=value)
tmp_obj.save()
items.append(tmp_obj)
assert comment.rating() - 4.0 < .0001, comment.rating()
comment2 = models.Comment(text='test', host=host)
comment2.save()
for value, type in zip([3, 3, 3], types):
tmp_obj = models.Rating(comment=comment2, type=type, value=value)
tmp_obj.save()
items.append(tmp_obj)
assert comment2.rating() - 3.0 < .0001, comment2.rating()
assert host.rating() == 3.5, host.rating()
ratings = host.ratings()
assert ratings['Support'] == 3.5, ratings
assert ratings['Features'] == 3.0
assert ratings['Uptime'] == 4.0
finally:
try:
for tmp_obj in items:
tmp_obj.delete()
comment.delete()
comment2.delete()
host.delete()
except:
pass
|
[
"def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384",
"def test_post_rating_with_invalid_category(self):\n with self.login(self.test_user):\n response = self.post(\n \"pinax_ratings:rate\",\n content_type_id=ContentType.objects.get_for_model(self.forester).pk,\n object_id=self.forester.pk,\n data={\n \"rating\": settings.PINAX_RATINGS_NUM_OF_RATINGS,\n \"category\": \"non-existing-category\"\n },\n )\n self.response_403(response)",
"def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )",
"def test_get_cat_score_w_negation(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = ['large ears', 'increased pigmentation']\n\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7201759238096741",
"def test_get_dealer_ratings(self):\n pass",
"def test_post_rating_with_valid_data(self):\n with self.login(self.test_user):\n content_type_id = ContentType.objects.get_for_model(self.forester).pk\n object_id = self.forester.pk\n response = self.post(\n \"pinax_ratings:rate\",\n content_type_id=content_type_id,\n object_id=object_id,\n data={\n \"rating\": 3,\n \"category\": \"handling\"\n },\n )\n self.response_200(response)\n self.assertContext(\"user_rating\", 3)\n self.assertContext(\"category\", \"handling\")\n self.assertContext(\"overall_rating\", 3.0)",
"def test_rating_from_product(self):\n test_rating_object_1 = customer_review.objects.get(product=1)\n test_rating_object_2 = customer_review.objects.get(product=2)\n self.assertEqual(str(test_rating_object_1.rating), '5')\n self.assertEqual(str(test_rating_object_2.rating), '1')",
"def test_ratings_by_different_users(self):\n\n self.signup('a@example.com', 'a')\n self.signup('b@example.com', 'b')\n\n self.login('a@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n self.logout()\n\n self.login('b@example.com')\n csrf_token = self.get_csrf_token_from_response(\n self.testapp.get('/explore/%s' % self.EXP_ID))\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], None)\n self.put_json(\n '/explorehandler/rating/%s' % self.EXP_ID, {\n 'user_rating': 4\n }, csrf_token\n )\n ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)\n self.assertEqual(ratings['user_rating'], 4)\n self.assertEqual(\n ratings['overall_ratings'],\n {'1': 0, '2': 0, '3': 0, '4': 2, '5': 0})\n self.logout()",
"def test_search_category_level(self):\n pass",
"def test_categories_in_porducts(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.attendant_headers)\n response = self.client.get('api/v1/category/products',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 200)\n self.assertIn('shirt', str(response.data))",
"def test_show_rating(self):\n self.assertEqual(self.show.rating, None)",
"def testGetAllRatings(self):\n url = TestEndpoints.composeUrl(\"ratings\")\n res = self.sendRequest(\"GET\", url)\n self.assertEqual(res.status_code, 200)\n ratings = res.json().get(\"ratings\")\n self.assertTrue(type(ratings) == list)\n ids = [r[\"id\"] for r in ratings]\n self.assertEqual(set(ids), set(TestEndpoints.products))",
"def strategic_drivers(self):",
"def test_critics_darling(self):\n #checks to see if the correct response is returned for critics_darling\n self.assertEqual(critics_darling(self.movieDb,self.ratingDb),['Meg'])\n #Tests if there is a tie\n self.assertEqual(critics_darling(self.movieDb,self.ratingsisDa),['Meg','Brad'])",
"def processed_stars(test=False,\n categories=('books', 'dvd', 'electronics', 'kitchen')):\n\n if isinstance(categories, str):\n categories = [categories]\n\n # loop over each category and extract features and labels per line\n # append these to the final\n labeled_features = []\n for category in categories:\n # open the relevant file, either train or test\n file = f'./processed_stars/{category}/'\n if not test:\n file += 'train'\n elif test:\n file += 'test'\n with open(file, encoding='utf-8') as f:\n raw = f.read()\n # one document per line, so split into lines\n reviews = raw.split('\\n')\n # extract features and their counts for each line\n features = [{ftr[0].strip(): int(ftr[1])\n for ftr in re.findall(r'(.*?(?<!#label#)):(\\d)', line)}\n for line in reviews]\n # extract all labels\n labels = re.findall(r'#label#:(\\d+.\\d+)', raw)\n # zip the features list and labels into tuples and add to final list\n labeled_features += [(f_set, float(label))\n for f_set, label in zip(features, labels)]\n\n return labeled_features",
"def test2(self):\n pozytywne = 0\n wszystkie = 0\n for doc in self.test_documents:\n number_of_categories = len(doc.categories)\n infered_categories = self.model.classify(\n args={\"number-of-categories\": number_of_categories, \"get-similarity\": False},text=doc.text)\n add_positive=True\n for cat in doc.categories:\n if cat not in infered_categories:\n #Jeżeli jest choć jedna kategoria wśród kategorii dokumentu, której nie ma\n # w zbiorze infered_categories, to nie można tago zaliczyć jako test pozytywny.\n add_positive = False\n if add_positive:\n pozytywne += 1\n wszystkie += 1\n return (float(pozytywne)/float(wszystkie))",
"def test_extract_categories():\n pass",
"def test_get_categories(self):\n pass",
"def get_category_ratings(self):\n category_ratings = dict()\n for cat_rating in self.category_ratings.all():\n category_ratings[cat_rating.category.name] = cat_rating.rating\n return category_ratings"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Hit a BJcard and append it. Then, find all possible sums and the current hand. The current hand is defined as max. of possible sums The current hand should be 1 if burst
|
def hit(self, card):
self.append(card)
values=[]
values.append(card.value())
if values[0] < 2:
values.append(values[0]+ 10)
new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])
new_sums =sorted(new_sums)
if len(new_sums) ==0:
self.hand=-1
else:
self.hand = new_sums[-1]
self.possible_sums = new_sums
|
[
"def calculate_hand(self):\n self.value = 0\n aces = 0\n for elem in self.cards:\n if elem[0] == \"J\" or elem[0] == \"Q\" or elem[0] == \"K\":\n self.value += 10\n elif elem[0] == \"A\":\n aces += 1\n else:\n self.value += elem[0]\n\n if aces > 0:\n if self.value <= 10 and aces == 1:\n self.value += 11\n elif self.value <= 9 and aces == 2:\n self.value += 12\n elif self.value <= 8 and aces == 3:\n self.value += 13\n elif self.value <= 7 and aces == 4:\n self.value += 14\n else:\n self.value += aces\n\n if self.value > 21:\n self.bust = True\n\n if self.value == 21:\n self.blackjack = True",
"def bj_hand_value(hand):\n soft = False\n aces = hand.count(11)\n if aces > 0:\n soft = True\n soft_total = total = sum(hand)\n while aces > 0 and soft_total > 21:\n soft_total -= 10\n aces -= 1\n return (soft_total, soft, total)",
"def calcCBets(self, hand):\n # XXX: enumerate(list, start=x) is python 2.6 syntax; 'start'\n # came there\n #for i, street in enumerate(hand.actionStreets[2:], start=1):\n for i, street in enumerate(hand.actionStreets[2:]):\n name = self.lastBetOrRaiser(hand.actionStreets[i+1])\n if name:\n chance = self.noBetsBefore(hand.actionStreets[i+2], name)\n self.handsplayers[name]['street%dCBChance' % (i+1)] = True\n if chance == True:\n self.handsplayers[name]['street%dCBDone' % (i+1)] = self.betStreet(hand.actionStreets[i+2], name)",
"def blackjack_result(cards):\n sum = 0\n a_cards = 0\n dictionary = {\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n 'T': 10,\n 'J': 10,\n 'Q': 10,\n 'K': 10,\n }\n for card in cards.split():\n if card in dictionary:\n sum = sum + dictionary[card]\n elif card == 'A':\n a_cards = a_cards + 1\n\n if a_cards > 0:\n for i in range(a_cards):\n if a_cards > 1:\n sum = sum + 1\n a_cards = a_cards - 1\n else:\n if sum + 11 < 22:\n sum = sum + 11\n else:\n sum = sum + 1\n\n return sum",
"def computeHand(i1, i2, i3, i4, i5):\n arr = [Card(i1), Card(i2), Card(i3), Card(i4), Card(i5)];\n\n flushCount = [0, 0, 0, 0];\n rankCount = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];\n\n hand = (1 << i1) + (1 << i2) + (1 << i3) + (1 << i4) + (1 << i5);\n\n cards = arr;\n\n for i in range(len(arr)):\n rankCount[arr[i].rank] += 1\n flushCount[arr[i].suit] += 1\n\n # find straight\n scount = 1 if rankCount[12] > 0 else 0; # for the wheel straight\n highestStraight = -1;\n for i in range(len(rankCount)) :\n if (rankCount[i] > 0) :\n scount += 1\n if (scount >= 5) :\n highestStraight = i\n else :\n scount = 0\n\n # find flush\n for i in range(len(flushCount)) :\n if (flushCount[i] >= 5) :\n if (highestStraight != -1) :\n # if its a flush and straight, must be a straight flush\n return Hand(STRAIGHT_FLUSH, [highestStraight], hand, cards)\n else :\n highest = 0\n kickers = []\n for j in range(len(rankCount)):\n if rankCount[j] > 0: \n highest = j\n kickers.append(j)\n return Hand(FLUSH, [highest], hand, cards, kickers[::-1]);\n\n # if its not a flush, then must be ordinary straight\n if highestStraight != -1 :\n return Hand(STRAIGHT, [highestStraight], hand, cards);\n\n # check quads, full house, 3 of a kind, two pair, pair\n kickers = [];\n strength = HIGH_CARD;\n rank = [-1];\n for i in range(len(rankCount)) :\n if rankCount[i] == 4 :\n strength = FOUR_OF_A_KIND\n rank = [i]\n elif rankCount[i] == 3 :\n if strength == PAIR :\n strength = FULL_HOUSE\n rank = [i, rank[0]]\n else :\n strength = THREE_OF_A_KIND\n rank = [i]\n elif rankCount[i] == 2 :\n if strength == THREE_OF_A_KIND :\n strength = FULL_HOUSE;\n rank = [rank[0], i];\n elif strength == PAIR :\n strength = TWO_PAIR\n rank = [i, rank[0]]\n else :\n strength = PAIR\n rank = [i]\n elif rankCount[i] == 1 :\n kickers.append(i)\n\n return Hand(strength, rank, hand, cards, kickers[::-1])",
"def sum_card_values(hand):\n handsum = 0\n numaces = 0\n for rank, suit in hand:\n if rank == 'Jack' or rank == 'Queen' or rank == 'King':\n rank = 10\n elif rank == 'Ace':\n rank = 1\n numaces +=1\n handsum += rank\n \n num = 0\n while num <= numaces:\n if handsum + 10*num <= 21:\n handsum = handsum + 10*num\n num +=1\n \n return handsum",
"def bustHand(hand):\n if sum(hand) > 21:\n return True\n else:\n return False",
"def create_best_hand_smart(cards):\n cards = sorted([Card(c) for c in cards], reverse=True)\n \n # Get all flushes\n flushes = []\n for suit in Card.SUITS.values():\n suited = [str(c) for c in cards if c.suit == suit]\n if len(suited) >= 5:\n combos = unique_combinations(suited, 5)\n for combo in combos: flushes.append(Hand(combo))\n flushes = sorted(flushes, reverse=True)\n if (flushes and flushes[0].rank() >= Hand.STRAIGHT_FLUSH):\n # Straight flush! No need to check anything else\n yield flushes[0]\n \n #Get all sets\n merged = {}\n for c in cards:\n if c.value in merged:\n merged[c.value] = merged[c.value] + 1\n else:\n merged[c.value] = 1\n multiples = [m for m in sorted(merged.items(), key = operator.itemgetter(1), reverse = True) if m[1] > 1]\n quads = [c[0] for c in multiples if c[1] == 4]\n quads = [c for c in cards if c.value in quads]\n trips = [c[0] for c in multiples if c[1] == 3]\n trips = [c for c in cards if c.value in trips]\n pairs = [c[0] for c in multiples if c[1] == 2]\n pairs = [c for c in cards if c.value in pairs]\n remaining = [c for c in cards if c.value not in [m[0] for m in multiples]]\n \n if quads:\n h = quads[:4]\n remaining = [c for c in cards if c.value not in [cc.value for cc in h]][:1]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n if trips and pairs:\n # Get a full house together\n h = trips[:3]\n remaining = pairs[:2]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n if flushes:\n # We've already got a flush, return it!\n yield flushes[0]\n # Look for a straight!\n mvals = sorted(merged.keys(), reverse=True)\n for i in range(0, len(mvals) -4, 1):\n if (mvals[i] - mvals[i + 4]) == 4:\n # Regular straight\n h = [[c for c in cards if c.value == v][0] for v in mvals[i:i + 5]]\n yield Hand([str(c) for c in h])\n elif 14 in [c.value for c in cards] and mvals[i + 1] == 5 and mvals[i + 4] == 2:\n # Ace low straight\n h = [[c for c in cards if c.value == v][0] for v in mvals[i + 1:i + 5]]\n h.append([c for c in cards if c.value == 14][0])\n yield Hand([str(c) for c in h])\n \n if trips:\n h = trips[:3]\n remaining = [c for c in cards if c.value not in [cc.value for cc in h]][:2]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n if pairs:\n if len(pairs) > 2:\n h = pairs[:4]\n remaining = [c for c in cards if c.value not in [cc.value for cc in h]][:1]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n else:\n h = pairs\n remaining = [c for c in cards if c.value not in [cc.value for cc in h]][:3]\n for r in remaining: h.append(r)\n yield Hand([str(c) for c in h])\n \n # High card, send the top 5 reverse-sorted cards\n yield Hand([str(c) for c in cards[:5]])",
"def hit(self, card):\n self.cards.hit(card)\n if self.cards.hand ==-1:\n self.state ='burst'",
"def best_hand(hands):\r\n best_val = 0\r\n sum = 0\r\n hand = None\r\n for h in hands:\r\n for t in h:\r\n sum = sum + t[1]\r\n if sum > best_val:\r\n best_val = sum\r\n hand = h\r\n\r\n return hand",
"def sum_cards(self, cards_list):\n sum = 0\n for num in cards_list:\n if num > 51: # means it's Joker\n sum += 0\n else:\n sum += self.num_to_card(num)\n\n return sum",
"def deck_finals(sender, instance, **kwargs):\n # TODO if total moves == 32:\n # TODO count, hands, count room, finished or not, create next deck, if not finished.\n last = instance.room.decks.filter(label=instance.room.current_label).last()\n # NOT the beginning.\n if instance.room.has_jack_of_clubs != 0:\n # define new trump.\n trump = -1\n else:\n trump = 1\n\n instance.room.trump_is_hidden = instance.room.decks.filter(label=instance.room.current_label).count() <= 1\n instance.room.save()\n\n if instance.hands.count() == 0:\n bag = list()\n for suit in SUITS:\n for card_number in CARD_NUMBERS:\n # bag.append(card_to_number(instance.trump, suit, card_number))\n bag.append(card_to_number(trump, suit, card_number)) # at the beginning the trump is unknown\n randomized_bag = list()\n while len(bag):\n # a <= n <= b\n # random.randint(a,b)\n random_number = random.randint(0, len(bag) - 1)\n randomized_bag.append(bag[random_number])\n bag.remove(bag[random_number])\n # search the jack_of_clubs for defining trump\n if trump == -1:\n for i in range(4):\n hand = randomized_bag[(i * 8):((i * 8) + 8)]\n for card in hand:\n if card[\"trump_priority\"] == TRUMP_PRIORITY_JACK * 40:\n if (i + 1) == instance.room.has_jack_of_clubs:\n trump = 1\n elif (i + 1) % 2 == instance.room.has_jack_of_clubs % 2:\n trump = 2\n elif (instance.room.has_jack_of_clubs % 4 + 1) == (i + 1):\n trump = 3\n else:\n trump = 4\n break\n # setting current trump\n instance.room.decks.filter(pk=last.pk).update(trump=trump, label=instance.room.current_label)\n for card in randomized_bag:\n if card[\"value\"] // 100 == trump:\n for card_number in CARD_NUMBERS:\n if card[\"value\"] % 100 == card_number[0]:\n if card[\"name\"] not in [\"jack_of_clubs\", \"jack_of_spades\", \"jack_of_hearts\", \"jack_of_diamonds\"]:\n card[\"trump_priority\"] = card_number[2]\n break\n\n for i in range(4):\n if i == 0:\n user = instance.room.user01\n elif i == 1:\n user = instance.room.user02\n elif i == 2:\n user = instance.room.user03\n else:\n user = instance.room.user04\n hand = Hand.objects.create(deck=instance, user=user)\n sorted_bag = sort_by_trump(trump, randomized_bag[(i * 8):((i * 8) + 8)])\n for j in range(len(sorted_bag)):\n # current_card = randomized_bag[i * 8 + j]\n current_card = sorted_bag[j]\n name = current_card[\"name\"]\n value = current_card[\"value\"]\n worth = current_card[\"worth\"]\n image_url = current_card[\"image_url\"]\n trump_priority = current_card[\"trump_priority\"]\n Card.objects.create(deck=instance, hand=hand, name=name, value=value, worth=worth, image_url=image_url,\n trump_priority=trump_priority)\n # has the FIRST\n if trump_priority == TRUMP_PRIORITY_JACK * 40 and instance.room.trump_is_hidden:\n instance.room.has_jack_of_clubs = i + 1\n # if not the first game, trump is not hidden.\n #TODO correction, which needs to be checked carefully\n\n # setting ace allowed\n if instance.total_moves == 0:\n if instance.trump == CLUBS_VALUE:\n instance.clubs = True\n elif instance.trump == SPADES_VALUE:\n instance.spades = True\n elif instance.trump == HEARTS_VALUE:\n instance.hearts = True\n elif instance.trump == DIAMONDS_VALUE:\n instance.diamonds = True\n if instance.room.owner.game_setting.ace_allowed:\n instance.clubs = True\n instance.spades = True\n instance.hearts = True\n instance.diamonds = True\n # in order to avoid infinite recursion in post saving method.\n instance.room.decks.filter(pk=last.pk).update(clubs=instance.clubs,\n spades=instance.spades,\n hearts=instance.hearts,\n diamonds=instance.diamonds)\n elif instance.total_moves % 4 == 1:\n card = instance.cards.get(pk=instance.moves.last().card_id)\n value = card.value // 100\n if value == CLUBS_VALUE:\n instance.clubs = True\n elif value == SPADES_VALUE:\n instance.spades = True\n elif value == HEARTS_VALUE:\n instance.hearts = True\n elif value == DIAMONDS_VALUE:\n instance.diamonds = True\n instance.room.decks.filter(pk=last.pk).update(clubs=instance.clubs,\n spades=instance.spades,\n hearts=instance.hearts,\n diamonds=instance.diamonds)\n\n instance.room.decks.filter(active=True, label=instance.room.current_label).exclude(pk=last.pk).update(active=False)\n\n if instance.total_moves >= 32 and instance.total_team01 + instance.total_team02 == TEAM_TOTAL_MAX_LOCAL:\n # Если козырь в начале был\n team01 = [1, 3]\n team02 = [2, 4]\n if instance.trump > 2: # Trump is Hearts or Diamonds\n if instance.room.has_jack_of_clubs % 2 == 0: # Trump user of room is 2 or 4\n current_trump_team = 1\n else:\n current_trump_team = 2\n else: # Trump is Clubs or Spades\n if instance.room.has_jack_of_clubs % 2 == 0: # Trump user of room is 2 or 4\n current_trump_team = 2\n else:\n current_trump_team = 1\n if instance.total_team01 == TEAM_TOTAL_MAX_LOCAL and instance.team02_received is False:\n # голая реализована командой 01\n if instance.room.owner.game_setting.on_full == ON_EGGS_OPEN_FOUR:\n # откывается 4 глаза команда 01\n instance.room.total_team01 += 4\n instance.room.save()\n else:\n # голая = партия Выигрывает команда 01\n instance.room.total_team01 = TEAM_TOTAL_MAX\n instance.room.save()\n pass\n elif instance.total_team02 == TEAM_TOTAL_MAX_LOCAL and instance.team01_received is False:\n # голая реализована командой 02\n if instance.room.owner.game_setting.on_full == ON_EGGS_OPEN_FOUR:\n # откывается 4 глаза команда 02\n instance.room.total_team02 += 4\n instance.room.save()\n else:\n # голая = партия Выигрывает команда 02\n instance.room.total_team02 = TEAM_TOTAL_MAX\n instance.room.save()\n elif instance.room.previous_eggs > 0:\n # предыдущая игра ЯЙЦА\n if instance.room.owner.game_setting.on_eggs == ON_EGGS_OPEN_FOUR:\n # открывается 4 глаза\n if instance.total_team01 > instance.total_team02:\n # открывает команда 01\n instance.room.total_team01 += 4 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n elif instance.total_team01 < instance.total_team02:\n # открывает команда 02\n instance.room.total_team02 += 4 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n else:\n # опять ЯЙЦА\n instance.room.previous_eggs = instance.room.previous_eggs + 1\n instance.room.save()\n elif instance.room.owner.game_setting.on_eggs == ON_EGGS_OPEN_DOUBLE:\n # открывается удвоенно\n if instance.total_team01 > instance.total_team02:\n # открывает команда 01\n if current_trump_team in team01:\n # козырь с команды 01\n if instance.total_team02 < instance.room.owner.game_setting.on_save:\n # команда 02 не набрала СПАС\n instance.room.total_team01 += 4 * instance.room.previous_eggs\n else:\n # команда 02 набрала СПАС\n instance.room.total_team01 += 2 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n elif current_trump_team in team02:\n # козырь с команды 02\n if instance.total_team02 < instance.room.owner.game_setting.on_save:\n # команда 02 не набрала СПАС\n instance.room.total_team01 += 6 * instance.room.previous_eggs\n else:\n # команда 02 набрала СПАС\n instance.room.total_team01 += 4 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n elif instance.total_team01 < instance.total_team02:\n # открывает команда 02\n if current_trump_team in team01:\n # козырь с команды 01\n if instance.total_team01 < instance.room.owner.game_setting.on_save:\n # команда 01 не набрала СПАС\n instance.room.total_team02 += 6 * instance.room.previous_eggs\n else:\n # команда 01 набрала СПАС\n instance.room.total_team02 += 4 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n elif current_trump_team in team02:\n # козырь с команды 02\n if instance.total_team01 < instance.room.owner.game_setting.on_save:\n # команда 01 не набрала СПАС\n instance.room.total_team02 += 4 * instance.room.previous_eggs\n else:\n # команда 01 набрала СПАС\n instance.room.total_team02 += 2 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n else:\n # опять ЯЙЦА\n instance.room.previous_eggs = instance.room.previous_eggs + 1\n instance.room.save()\n elif instance.total_team01 < instance.room.owner.game_setting.on_save:\n if current_trump_team in team01:\n # если команда 01 не набрала спас и козырь с команды 01\n instance.room.total_team02 += 3\n instance.room.save()\n else:\n # если команда 01 не набрала спас и козырь с команды 02\n instance.room.total_team02 += 2\n instance.room.save()\n elif instance.total_team02 < instance.room.owner.game_setting.on_save:\n if current_trump_team in team02:\n # если команда 02 не набрала спас и козырь с команды 02\n instance.room.total_team01 += 3\n instance.room.save()\n else:\n # если команда 02 не набрала спас и козырь с команды 01\n instance.room.total_team01 += 2\n instance.room.save()\n else:\n if instance.total_team01 > instance.total_team02:\n if current_trump_team in team01:\n instance.room.total_team01 += 1\n instance.room.save()\n else:\n instance.room.total_team01 += 2\n instance.room.save()\n elif instance.total_team01 < instance.total_team02:\n if current_trump_team in team02:\n instance.room.total_team02 += 1\n instance.room.save()\n else:\n instance.room.total_team02 += 2\n instance.room.save()\n elif instance.total_team01 == instance.total_team02:\n # ЯЙЦА\n instance.room.previous_eggs = instance.room.previous_eggs + 1\n instance.room.save()\n if instance.room.total_team01 >= TEAM_TOTAL_MAX or instance.room.total_team02 >= TEAM_TOTAL_MAX:\n # Обнуление и Ожидание игроков\n instance.room.user01_ready = False\n instance.room.user02_ready = False\n instance.room.user03_ready = False\n instance.room.user04_ready = False\n instance.room.started = False\n instance.room.total_team01 = 0\n instance.room.total_team02 = 0\n instance.room.trump_is_hidden = True\n instance.room.has_jack_of_clubs = 0\n instance.room.current_label = instance.room.current_label + 1\n instance.room.save()\n # деактивирование Всех Активных колод Комнаты\n instance.room.decks.last().hands.filter(active=True).update(active=False)\n instance.room.decks.all().update(active=False)\n pass\n else:\n instance.room.decks.last().hands.filter(active=True).update(active=False)\n instance.room.decks.filter(active=True).update(active=False)\n # create new deck\n if instance.total_moves == 32:\n next_move = instance.room.decks.filter(label=instance.room.current_label).count() % 4 + 1\n # actually, it will CREATE\n deck, created = Deck.objects.get_or_create(room=instance.room, active=True, next_move=next_move, label=instance.room.current_label)\n else:\n next_move = instance.next_move\n # actually, it will GET\n deck, created = Deck.objects.get_or_create(room=instance.room, active=True, next_move=next_move, label=instance.room.current_label)",
"def evalHand(hand):\n # os.system(\"clear\")\n #print(\"dealer hand before evalHand is: \", hand.showHand())\n if (1 in hand.cards) and (21 - hand.handSum() >= 10):\n print(\"found a 1 value Ace in the hand\")\n hand.cards[hand.cards.index(1)] = 11 # Change the first ace from value 1\n # to value 11\n if (11 in hand.cards) and (hand.handSum() >= 22):\n print(\"found an 11 value Ace in the hand and sum > 21\")\n hand.cards[hand.cards.index(11)] = 1 # Change the first ace from value 1\n # to value 11",
"def deal(self, hands, per_hand = 1):\r\n for rounds in range(per_hand):\r\n for hand in hands:\r\n if self.cards:\r\n top_card = self.cards[0]\r\n self.give(top_card, hand)\r\n else:\r\n print(\"Can't continue to deal. Out of cards!\")",
"def flop(self):\n for i in range(3):\n self.board.append(self.deck.deal_card())\n self.last_action = 0\n self.cur_bet = 0\n self.clean_state()",
"def evaluate():\n\n global all_possible_holds\n global index\n global match_flag\n global match_cnt\n global no_of_deals\n\n print \"evaluating....\"\n print datetime.datetime.now().time()\n \n del all_possible_holds[:]\n index = 0 \n \n # all 32 possible strategies (hold possibilities) for a dealt hand ( need to calculate expected value for each of these ) \n for i in range(0,len(hand)+1):\n for subset in itertools.combinations(hand,i):\n all_possible_holds.append(subset) \n\n # build all possible trial hands by brute force and calculate expected value for all possible hold strategies\n expected_value = [] \n for item in all_possible_holds: \n number_of_draws = 5-len(item)\n no_all_possible_draws = combination(len(deck),number_of_draws) \n payout_running_sum = 0\n sets = itertools.combinations(deck,number_of_draws)\n for subset in sets: \n trial_hand = item + subset\n payout_running_sum = payout_running_sum + payout(trial_hand) \n expected_value.append(payout_running_sum/no_all_possible_draws) \n \n\n #find hold strategy with maximum expected value\n max_val = 0.0\n for i in range(0,len(expected_value)):\n if expected_value[i] > max_val:\n max_val = expected_value[i]\n index = i \n\n print datetime.datetime.now().time()\n print \"hold: \"\n for item in all_possible_holds[index]: \n print print_num[item[0]],\n print print_color[item[1]],\n print ' ',\n print '' \n \n print \"with expected value: \",\n print max_val \n \n user_match = False\n calc_match = False\n no_of_deals += 1\n for user_item in user_holds:\n if user_item in all_possible_holds[index]:\n user_match = True\n else:\n user_match = False\n break\n for hold_item in all_possible_holds[index]:\n if hold_item in user_holds:\n calc_match = True\n else:\n calc_match = False\n break\n if (user_match and calc_match) or (len(all_possible_holds[index]) == 0 and len(user_holds) == 0):\n print \"Correct\"\n match_flag = True\n match_cnt += 1\n \n else:\n print \"Wrong\" \n match_flag = False \n \n print match_cnt\n print no_of_deals \n print float(match_cnt)/float(no_of_deals) * 100",
"def finishHand(self):\r\n while self.getHand().getHandValue()<17 and not self.getHand().isFull():\r\n self.deal(1, self.getHand())",
"def hit(self, deck):\n self.showOneCard = False\n while self.getPoints() < 17:\n self.cards.append(deck.deal())",
"def hand_value_check(self, hand):\r\n hand_value = 0\r\n ace = 0\r\n result = []\r\n a = 0\r\n for card in hand: # calculate value of a hand\r\n if card.value < 10:\r\n a = card.value\r\n elif card.value in range(10, 14):\r\n a = 10\r\n elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1\r\n a = 11\r\n ace += 1\r\n hand_value += a\r\n\r\n if ace > 0: # if hand had aces, return all possible hand values\r\n for i in range(0, ace + 1):\r\n result.append(hand_value)\r\n hand_value -= 10\r\n self.display_hand_val = result\r\n return result\r\n else:\r\n result.append(hand_value)\r\n self.display_hand_val = result\r\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Is current cards the Blackjack?
|
def is_blackjack(self):
if self.hand == 21 and len(list(self)) ==2:
print '%s = Blackjack'%self
return True
|
[
"def check_for_blackjack(self):\n if (self.dealer.hand.value + self.dealer.face_down.value) == 21:\n if self.player.hand.blackjack:\n return self.blackjack_push()\n else:\n return self.blackjack_dealer_win()\n\n if self.player.hand.blackjack():\n return self.blackjack_player_win()\n lost_insurance_bet(self.side_bet)\n return False",
"def check_blackjack(card):\r\n\thand_rank1 = player_hand[0].rank\r\n\thand_rank2 = player_hand[1].rank\r\n\thand_value1 = values[hand_rank1]\r\n\thand_value2 = values[hand_rank2]\r\n\tif hand_rank1 in faces and hand_rank2 == \"Ace\":\r\n\t\tprint(\"||BLACKJACK|| YOU WIN! x2 bet\")\r\n\t\tplayer1.balance += Player.player_bet * 2\r\n\t\treturn True\r\n\t\tplaying = False\r\n\tif hand_rank2 in faces and hand_rank1 == \"Ace\":\r\n\t\tprint(\"||BLACKJACK|| YOU WIN! x2 bet\")\r\n\t\tplayer1.balance += Player.player_bet * 2\r\n\t\treturn True\r\n\t\tplaying = False",
"def check_blackjack(hand):\r\n return bool(hand.get_value() == 21)",
"def test_for_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '10'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, True)",
"def has_all_cards(self, cards):\n return self.available_cards_num == sum([c.number for c in cards])",
"def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)",
"def still_in_hand(self):\n return len(self.hand.cards)!=0",
"def is_face_card(self):\n return JACK <= self.__face <= KING",
"def has_card(ck_card):\r\n for card in my_hand:\r\n if ck_card == card:\r\n return True # possible is in list\r\n return False # Not in list\r",
"def hasCards(self):\n event = CardsEvent([1,2,3], None, BuildPlayerContext())\n result = HasCards(EVENT).evaluate(event.context)\n self.assertTrue(result, \"The Condition should be true, if the source has cards\")",
"def is_match(self, card):\n\t\treturn self.suit == card.suit or self.value == card.value",
"def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toString(card.getSuit()) == \"red\":\n cardAttr = \"r\"\n elif Suit.toString(card.getSuit()) == \"green\":\n cardAttr = \"g\"\n elif Suit.toString(card.getSuit()) == \"yellow\":\n cardAttr = \"y\"\n\n if card.getValue() == 1:\n cardAttr += \"1\"\n elif card.getValue() == 2:\n cardAttr += \"2\"\n elif card.getValue() == 3:\n cardAttr += \"3\"\n elif card.getValue() == 4:\n cardAttr += \"4\"\n elif card.getValue() == 5:\n cardAttr += \"5\"\n\n if card.getValue() == 1:\n if self.discardedDict[cardAttr] < 2:\n self.discardedDict[cardAttr] += 1\n # print(3 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 2 or card.getValue() == 3 or card.getValue() == 4:\n if self.discardedDict[cardAttr] < 1:\n self.discardedDict[cardAttr] += 1\n # print(2 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 5:\n if self.discardedDict[cardAttr] < 0:\n self.discardedDict[cardAttr] += 1\n # print(1 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n # print(\"Useful card\")\n return False",
"def check_inital_blackjack(player_hand, dealer_hand):\r\n player_has_blackjack, dealer_has_blackjack = False, False\r\n if Game.check_blackjack(player_hand):\r\n player_has_blackjack = True\r\n if Game.check_blackjack(dealer_hand):\r\n dealer_has_blackjack = True\r\n return player_has_blackjack, dealer_has_blackjack",
"def does_player_have_card(self, player, card):\n return card in self.hands[player]",
"def checkBlackjack(hand):\n if len(hand) == 2 and sum(hand) == 21:\n return True\n else:\n return False",
"def can_play(self, current_card):\n return any(current_card.playable(card) for card in self.hand)",
"def deck_has_cards(deck, cards):\n deck_dict = collections.defaultdict(int)\n for card in itertools.chain(deck.draw_pile, deck.discard_pile, deck.hand):\n deck_dict[card] += 1\n return deck_dict == cards",
"def can_play(self, card):\n played_cards = map(lambda x: str(x).lower(), self.played_cards)\n if str(card).lower() in played_cards:\n return False\n if card.prebuild in played_cards:\n return True\n\n for res in card.cost",
"def is_soft_hand(self):\n is_soft = False\n for i in self.cards:\n if i.value == 'ACE':\n is_soft = True\n\n return is_soft"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Restart another round. Check the remaining budget and leave the game if budget <= 0. Create new BJCards
|
def restart(self):
self.state ='active'
if self.budget <= 0:
return self.leave()
self.cards =BJCards()
self.bet_amount =0
|
[
"def flop(self):\n for i in range(3):\n self.board.append(self.deck.deal_card())\n self.last_action = 0\n self.cur_bet = 0\n self.clean_state()",
"def play_a_game():\n\n # Create a new shuffled full deck\n deck = card.full_deck()\n random.shuffle(deck)\n\n # Start a new game.\n game = Blackjack(deck)\n\n # Tell player the scoring rules\n print('Welcome to CS 1110 Blackjack.')\n print('Rules: Face cards are 10 points. Aces are 11 points.')\n print(' All other cards have face value.')\n print()\n\n # Show initial deal\n print('Your hand: ' + card.cardlist_str(game.playerHand))\n print('Dealer\\'s hand: ' + card.cardlist_str(game.dealerHand))\n print()\n\n # While player has not busted out, ask if player wants to draw\n player_halted = False # True if player asked to halt, False otherwise\n while not game.playerBust() and not player_halted:\n # ri: input received from player\n ri = _prompt_player('Type h for new card, s to stop: ', ['h', 's'])\n\n player_halted = (ri == 's')\n if (not player_halted):\n game.playerHand.append(game.deck.pop(0))\n print(\"You drew the \" + str(game.playerHand[-1]))\n print()\n\n if game.playerBust():\n print(\"You went bust, dealer wins.\")\n else:\n print()\n _dealer_turn(game)\n print()\n print_winner_after_dealer(game)\n\n print(\"The final scores were \" + str(game))",
"def restart(self):\r\n\r\n self.pot = 0\r\n self.actions = 0\r\n self.previous_bet = self.small_blind\r\n self.initiate_blind(self.small_blind + self.big_blind)\r\n\r\n for player in self.players:\r\n player.credits = self.starting_credits\r\n\r\n # Let the first player begin\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n self.players[self.active_player].active = True\r\n\r\n self.players[self.active_player - 1].flip_cards()\r\n self.community_cards.flip_cards()\r\n\r\n self.deck_model = DeckModel()\r\n\r\n for player in self.players:\r\n player.new_cards(self.deck_model)\r\n\r\n output_text = \"Starting game...\\n{} post the big blind [${}]\\n{} post the small blind [${}]\".format(\r\n self.players[(self.active_player + 1) % len(self.players)].name, self.big_blind,\r\n self.players[self.active_player].name, self.small_blind)\r\n\r\n message = \"Player {} won!\".format(self.players[1].name)\r\n self.game_message.emit(message)\r\n\r\n self.new_pot.emit()\r\n self.new_credits.emit()\r\n self.new_output.emit(output_text)",
"def test_new_game_deal(self):\n cg = CribGame()\n status = cg.update()\n self.assertEqual(status['phase'], 'Deal')\n self.assertEqual(status['scores'], [0, 0])\n self.assertEqual(len(status['deck']), 40)",
"def cal_kill_turn(deck): \n #Init board/game state\n goldfish_life = 20\n turn = 0 \n \n #lands in hand\n lands_in_hand = []\n #spell count in hand\n spells_in_hand = []\n #lands in play\n lands_in_play = []\n #creatures in play\n spells_in_play = []\n #creatures' in play power\n #creature_pwr = 1\n \n #shuffle and draw 7 cards, mulls if hand bad\n hand = None\n keep_hand = False\n hand_count = 8\n while keep_hand == False:\n hand_count = hand_count - 1\n deck.shuffle()\n hand = deck.peep(hand_count)\n keep_hand = Mull.keep_or_mull(hand)\n hand = deck.draw_hand(num = hand_count) \n \n #Init Hand state\n for card in hand:\n if card.is_land == True:\n lands_in_hand.append(card)\n else:\n spells_in_hand.append(card) \n \n #SIMULATE GOLDFISH KILL\n while(goldfish_life >= 0 and deck.size() > 0): \n if VERBOSE:\n print(\"+++++++++++++ Turn \" + str(turn) + \"++++++++++++++\") \n print(\" Goldfish life = \" + str(goldfish_life))\n \n print(\" Lands in play\")\n for card in lands_in_play:\n print(card)\n print(\" Spells in play\")\n for card in spells_in_play:\n print(card)\n print(\" Lands in hand\")\n for card in lands_in_hand:\n print(card)\n print(\" Creatures in hand\")\n for card in spells_in_hand:\n print(card) \n \n # Draw a card if not first turn\n if turn > 0:\n card_to_draw = deck.draw() \n if card_to_draw.is_land == True: \n lands_in_hand.append(copy.deepcopy(card_to_draw))\n else: \n spells_in_hand.append(copy.deepcopy(card_to_draw)) \n\n #MAIN PHASE 1 play land card if we have any\n if len(lands_in_hand) > 0: \n lowest_cost = None\n land_to_play = None\n #Play the land card that has the lowest cost creature in hand\n for land in lands_in_hand[:]:\n for creature in spells_in_hand[:]:\n if land.manaEachTurn == creature.manaCost:\n # this land card has a playable creature\n if land_to_play != None:\n temp_cost = creature.manaCost\n if temp_cost < lowest_cost:\n if len(np.where(temp_cost > 0)[0]) <= \\\n len(np.where(lowest_cost > 0)[0]): \n # play the land that corresponds to\n # the creatures that require the \n # least different types\n land_to_play = land\n lowest_cost = temp_cost\n else:\n #first land card, we store it to play\n land_to_play = land\n lowest_cost = creature.manaCost\n if land_to_play == None: #No spell cards in hand\n land_to_play = lands_in_hand[0] #play first land card\n \n lands_in_play.append(copy.deepcopy(land_to_play)) \n lands_in_hand.pop(lands_in_hand.index(land_to_play)) \n \n #ATTACK GOLDFISH \n for creature in spells_in_play:\n goldfish_life = goldfish_life - creature.damageEachTurn\n if goldfish_life <= 0:\n if VERBOSE:\n print('Goldfish killed on turn ' + str(turn))\n return turn\n \n #MAIN PHASE 2 play spells\n if len(spells_in_hand) > 0 and len(lands_in_play) > 0: \n #Spells in hand and mana available --> play a creature\n #GOLDFISH LOGIC\n if p_goldfish:\n if goldfish_interactions > 0:\n pass\n if q_goldfish:\n if r.random(1) < q_goldfish_prob:\n if goldfish_interactions > 0:\n pass\n #Check mana pool\n mana_pool = np.array([0] * Mana.MANA_TYPES)\n for card in lands_in_play:\n mana_pool += card.manaEachTurn\n \n for creature in spells_in_hand:\n temp_pool = np.array(mana_pool - \\\n np.array(creature.manaCost))\n if len(np.where(temp_pool < 0)[0]) == 0: \n #can afford to play card\n mana_pool = temp_pool[:]\n spells_in_play.append(copy.deepcopy(creature))\n spells_in_hand.remove(creature)\n if VERBOSE:\n print(\"++++++++++++ End Turn \" + str(turn) + \"++++++++++\") \n turn += 1 \n #End Gold Fish kill \n \n if VERBOSE:\n print('Goldfish killed on turn ' + str(turn))\n return turn",
"def takeJailCard(self):\r\n self.jailCards -= 1",
"def deck_finals(sender, instance, **kwargs):\n # TODO if total moves == 32:\n # TODO count, hands, count room, finished or not, create next deck, if not finished.\n last = instance.room.decks.filter(label=instance.room.current_label).last()\n # NOT the beginning.\n if instance.room.has_jack_of_clubs != 0:\n # define new trump.\n trump = -1\n else:\n trump = 1\n\n instance.room.trump_is_hidden = instance.room.decks.filter(label=instance.room.current_label).count() <= 1\n instance.room.save()\n\n if instance.hands.count() == 0:\n bag = list()\n for suit in SUITS:\n for card_number in CARD_NUMBERS:\n # bag.append(card_to_number(instance.trump, suit, card_number))\n bag.append(card_to_number(trump, suit, card_number)) # at the beginning the trump is unknown\n randomized_bag = list()\n while len(bag):\n # a <= n <= b\n # random.randint(a,b)\n random_number = random.randint(0, len(bag) - 1)\n randomized_bag.append(bag[random_number])\n bag.remove(bag[random_number])\n # search the jack_of_clubs for defining trump\n if trump == -1:\n for i in range(4):\n hand = randomized_bag[(i * 8):((i * 8) + 8)]\n for card in hand:\n if card[\"trump_priority\"] == TRUMP_PRIORITY_JACK * 40:\n if (i + 1) == instance.room.has_jack_of_clubs:\n trump = 1\n elif (i + 1) % 2 == instance.room.has_jack_of_clubs % 2:\n trump = 2\n elif (instance.room.has_jack_of_clubs % 4 + 1) == (i + 1):\n trump = 3\n else:\n trump = 4\n break\n # setting current trump\n instance.room.decks.filter(pk=last.pk).update(trump=trump, label=instance.room.current_label)\n for card in randomized_bag:\n if card[\"value\"] // 100 == trump:\n for card_number in CARD_NUMBERS:\n if card[\"value\"] % 100 == card_number[0]:\n if card[\"name\"] not in [\"jack_of_clubs\", \"jack_of_spades\", \"jack_of_hearts\", \"jack_of_diamonds\"]:\n card[\"trump_priority\"] = card_number[2]\n break\n\n for i in range(4):\n if i == 0:\n user = instance.room.user01\n elif i == 1:\n user = instance.room.user02\n elif i == 2:\n user = instance.room.user03\n else:\n user = instance.room.user04\n hand = Hand.objects.create(deck=instance, user=user)\n sorted_bag = sort_by_trump(trump, randomized_bag[(i * 8):((i * 8) + 8)])\n for j in range(len(sorted_bag)):\n # current_card = randomized_bag[i * 8 + j]\n current_card = sorted_bag[j]\n name = current_card[\"name\"]\n value = current_card[\"value\"]\n worth = current_card[\"worth\"]\n image_url = current_card[\"image_url\"]\n trump_priority = current_card[\"trump_priority\"]\n Card.objects.create(deck=instance, hand=hand, name=name, value=value, worth=worth, image_url=image_url,\n trump_priority=trump_priority)\n # has the FIRST\n if trump_priority == TRUMP_PRIORITY_JACK * 40 and instance.room.trump_is_hidden:\n instance.room.has_jack_of_clubs = i + 1\n # if not the first game, trump is not hidden.\n #TODO correction, which needs to be checked carefully\n\n # setting ace allowed\n if instance.total_moves == 0:\n if instance.trump == CLUBS_VALUE:\n instance.clubs = True\n elif instance.trump == SPADES_VALUE:\n instance.spades = True\n elif instance.trump == HEARTS_VALUE:\n instance.hearts = True\n elif instance.trump == DIAMONDS_VALUE:\n instance.diamonds = True\n if instance.room.owner.game_setting.ace_allowed:\n instance.clubs = True\n instance.spades = True\n instance.hearts = True\n instance.diamonds = True\n # in order to avoid infinite recursion in post saving method.\n instance.room.decks.filter(pk=last.pk).update(clubs=instance.clubs,\n spades=instance.spades,\n hearts=instance.hearts,\n diamonds=instance.diamonds)\n elif instance.total_moves % 4 == 1:\n card = instance.cards.get(pk=instance.moves.last().card_id)\n value = card.value // 100\n if value == CLUBS_VALUE:\n instance.clubs = True\n elif value == SPADES_VALUE:\n instance.spades = True\n elif value == HEARTS_VALUE:\n instance.hearts = True\n elif value == DIAMONDS_VALUE:\n instance.diamonds = True\n instance.room.decks.filter(pk=last.pk).update(clubs=instance.clubs,\n spades=instance.spades,\n hearts=instance.hearts,\n diamonds=instance.diamonds)\n\n instance.room.decks.filter(active=True, label=instance.room.current_label).exclude(pk=last.pk).update(active=False)\n\n if instance.total_moves >= 32 and instance.total_team01 + instance.total_team02 == TEAM_TOTAL_MAX_LOCAL:\n # Если козырь в начале был\n team01 = [1, 3]\n team02 = [2, 4]\n if instance.trump > 2: # Trump is Hearts or Diamonds\n if instance.room.has_jack_of_clubs % 2 == 0: # Trump user of room is 2 or 4\n current_trump_team = 1\n else:\n current_trump_team = 2\n else: # Trump is Clubs or Spades\n if instance.room.has_jack_of_clubs % 2 == 0: # Trump user of room is 2 or 4\n current_trump_team = 2\n else:\n current_trump_team = 1\n if instance.total_team01 == TEAM_TOTAL_MAX_LOCAL and instance.team02_received is False:\n # голая реализована командой 01\n if instance.room.owner.game_setting.on_full == ON_EGGS_OPEN_FOUR:\n # откывается 4 глаза команда 01\n instance.room.total_team01 += 4\n instance.room.save()\n else:\n # голая = партия Выигрывает команда 01\n instance.room.total_team01 = TEAM_TOTAL_MAX\n instance.room.save()\n pass\n elif instance.total_team02 == TEAM_TOTAL_MAX_LOCAL and instance.team01_received is False:\n # голая реализована командой 02\n if instance.room.owner.game_setting.on_full == ON_EGGS_OPEN_FOUR:\n # откывается 4 глаза команда 02\n instance.room.total_team02 += 4\n instance.room.save()\n else:\n # голая = партия Выигрывает команда 02\n instance.room.total_team02 = TEAM_TOTAL_MAX\n instance.room.save()\n elif instance.room.previous_eggs > 0:\n # предыдущая игра ЯЙЦА\n if instance.room.owner.game_setting.on_eggs == ON_EGGS_OPEN_FOUR:\n # открывается 4 глаза\n if instance.total_team01 > instance.total_team02:\n # открывает команда 01\n instance.room.total_team01 += 4 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n elif instance.total_team01 < instance.total_team02:\n # открывает команда 02\n instance.room.total_team02 += 4 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n else:\n # опять ЯЙЦА\n instance.room.previous_eggs = instance.room.previous_eggs + 1\n instance.room.save()\n elif instance.room.owner.game_setting.on_eggs == ON_EGGS_OPEN_DOUBLE:\n # открывается удвоенно\n if instance.total_team01 > instance.total_team02:\n # открывает команда 01\n if current_trump_team in team01:\n # козырь с команды 01\n if instance.total_team02 < instance.room.owner.game_setting.on_save:\n # команда 02 не набрала СПАС\n instance.room.total_team01 += 4 * instance.room.previous_eggs\n else:\n # команда 02 набрала СПАС\n instance.room.total_team01 += 2 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n elif current_trump_team in team02:\n # козырь с команды 02\n if instance.total_team02 < instance.room.owner.game_setting.on_save:\n # команда 02 не набрала СПАС\n instance.room.total_team01 += 6 * instance.room.previous_eggs\n else:\n # команда 02 набрала СПАС\n instance.room.total_team01 += 4 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n elif instance.total_team01 < instance.total_team02:\n # открывает команда 02\n if current_trump_team in team01:\n # козырь с команды 01\n if instance.total_team01 < instance.room.owner.game_setting.on_save:\n # команда 01 не набрала СПАС\n instance.room.total_team02 += 6 * instance.room.previous_eggs\n else:\n # команда 01 набрала СПАС\n instance.room.total_team02 += 4 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n elif current_trump_team in team02:\n # козырь с команды 02\n if instance.total_team01 < instance.room.owner.game_setting.on_save:\n # команда 01 не набрала СПАС\n instance.room.total_team02 += 4 * instance.room.previous_eggs\n else:\n # команда 01 набрала СПАС\n instance.room.total_team02 += 2 * instance.room.previous_eggs\n instance.room.previous_eggs = 0\n instance.room.save()\n else:\n # опять ЯЙЦА\n instance.room.previous_eggs = instance.room.previous_eggs + 1\n instance.room.save()\n elif instance.total_team01 < instance.room.owner.game_setting.on_save:\n if current_trump_team in team01:\n # если команда 01 не набрала спас и козырь с команды 01\n instance.room.total_team02 += 3\n instance.room.save()\n else:\n # если команда 01 не набрала спас и козырь с команды 02\n instance.room.total_team02 += 2\n instance.room.save()\n elif instance.total_team02 < instance.room.owner.game_setting.on_save:\n if current_trump_team in team02:\n # если команда 02 не набрала спас и козырь с команды 02\n instance.room.total_team01 += 3\n instance.room.save()\n else:\n # если команда 02 не набрала спас и козырь с команды 01\n instance.room.total_team01 += 2\n instance.room.save()\n else:\n if instance.total_team01 > instance.total_team02:\n if current_trump_team in team01:\n instance.room.total_team01 += 1\n instance.room.save()\n else:\n instance.room.total_team01 += 2\n instance.room.save()\n elif instance.total_team01 < instance.total_team02:\n if current_trump_team in team02:\n instance.room.total_team02 += 1\n instance.room.save()\n else:\n instance.room.total_team02 += 2\n instance.room.save()\n elif instance.total_team01 == instance.total_team02:\n # ЯЙЦА\n instance.room.previous_eggs = instance.room.previous_eggs + 1\n instance.room.save()\n if instance.room.total_team01 >= TEAM_TOTAL_MAX or instance.room.total_team02 >= TEAM_TOTAL_MAX:\n # Обнуление и Ожидание игроков\n instance.room.user01_ready = False\n instance.room.user02_ready = False\n instance.room.user03_ready = False\n instance.room.user04_ready = False\n instance.room.started = False\n instance.room.total_team01 = 0\n instance.room.total_team02 = 0\n instance.room.trump_is_hidden = True\n instance.room.has_jack_of_clubs = 0\n instance.room.current_label = instance.room.current_label + 1\n instance.room.save()\n # деактивирование Всех Активных колод Комнаты\n instance.room.decks.last().hands.filter(active=True).update(active=False)\n instance.room.decks.all().update(active=False)\n pass\n else:\n instance.room.decks.last().hands.filter(active=True).update(active=False)\n instance.room.decks.filter(active=True).update(active=False)\n # create new deck\n if instance.total_moves == 32:\n next_move = instance.room.decks.filter(label=instance.room.current_label).count() % 4 + 1\n # actually, it will CREATE\n deck, created = Deck.objects.get_or_create(room=instance.room, active=True, next_move=next_move, label=instance.room.current_label)\n else:\n next_move = instance.next_move\n # actually, it will GET\n deck, created = Deck.objects.get_or_create(room=instance.room, active=True, next_move=next_move, label=instance.room.current_label)",
"def new_game(self):\n self.game_over = False\n self.round = 0\n\n self.new_round()",
"def main_loop(self):\r\n BlackjackView.show_game_start()\r\n another_round = True\r\n while another_round:\r\n for player in self.players:\r\n bet = player.place_bet()\r\n player.balance -= bet\r\n self.bank += bet\r\n self.bank = int(self.bank * 1.5)\r\n winner = self.round()\r\n winner.balance += self.bank\r\n self.bank = 0\r\n BlackjackView.show_round_finish(self, winner)\r\n for player in self.players:\r\n while len(player.hand) > 0:\r\n self.game_deck.cards.append(player.hand.pop())\r\n another_round = BlackjackView.get_next_round()\r\n BlackjackView.show_goodbye()",
"def next_round(self, succeeds_fold=False):\r\n\r\n self.pot = 0\r\n self.actions = 0\r\n self.previous_bet = self.small_blind\r\n self.initiate_blind(self.small_blind + self.big_blind)\r\n\r\n # Let the first player begin\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n self.players[self.active_player].active = True\r\n\r\n self.players[self.active_player-1].flip_cards()\r\n\r\n if not succeeds_fold:\r\n self.community_cards.flip_cards()\r\n if succeeds_fold:\r\n self.community_cards.flip_all_cards()\r\n\r\n # Create a new deck\r\n self.deck_model = DeckModel()\r\n\r\n # Creates new cards\r\n self.community_cards.new_cards(self.deck_model)\r\n for player in self.players:\r\n player.new_cards(self.deck_model)\r\n\r\n output_text = \"Initiating round.\\n{} post the big blind [${}]\\n{} post the small blind [${}]\".format(\r\n self.players[(self.active_player + 1) % len(self.players)].name, self.big_blind,\r\n self.players[self.active_player].name, self.small_blind)\r\n\r\n self.new_pot.emit()\r\n self.new_credits.emit()\r\n self.new_output.emit(output_text)",
"def final_checking_cards(self):\n cards_list=[card.figure for card in self.player_cards if card.figure==\"Ace\"]\n ace_count=0\n while ace_count<=cards_list.count(\"Ace\") and self.value_of_player_cards>21:\n self.value_of_player_cards-=10\n ace_count+=1",
"def new_game_sequence():\n print(\"\\n-------------------- Welcome to Blackjack --------------------\")\n\n n_players = input(\"How many people are playing? (Enter an integer value): \")\n \n # Prompt the user for valid input (Must be an integer > 0)\n while not n_players.isdigit() or int(n_players) < 1:\n print(\"ERROR: Invalid Input\") \n print(\"The number of players must be expressed as a positive integer value \")\n n_players = input(\"How many people will be playing?: \")\n n_players = int(n_players)\n\n # Get the player's names\n print('Please enter each players name')\n player_names = []\n for i in range(n_players):\n new_player = input(\"Player \" + str(i+1) + \"\\'s name: \")\n while len(new_player) == 0:\n print('ERROR: Player name must have at least one character')\n new_player = input(\"Player \" + str(i+1) + \"\\'s name: \")\n player_names.append(new_player)\n\n # Get user input dictating the number of decks to use\n n_decks = input(\"How many decks would you like to use? Standard blackjack uses between 1 and 8 decks: \")\n \n # Prompt the user for valid input (Must be an integer > 0)\n while not n_decks.isdigit() or int(n_decks) < 1:\n print(\"ERROR: Invalid Input\") \n print(\"The number of decks must be expressed as a positive integer value\")\n n_decks = input(\"How many decks should be included?: \")\n n_decks = int(n_decks)\n\n print(\"Starting a new game with\", n_players, \"players and\", n_decks, \"decks (\" + str(n_decks*52) + \" cards)\")\n print(\".\\n.\\n.\\n.\\n\")\n \n # Create a blackjack game with this information.\n blackjack_game = Game(player_names, n_decks)",
"def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()",
"def useJailCard(self):\r\n self.inJail = False\r\n self.numTurnsInJail = 0\r\n self.jailCards -= 1",
"def executeChanceCard(self, num):\n \n if num == \"1\":\n # Pay each player $100\n self.givePlayMon(100, self._players[self._current])\n self.changeTurn()\n self._deck.deactivateChance()\n \n if num == \"2\":\n # Advance to go\n self._players[self._current].setPosition(0)\n self._updatePawnLocations()\n self.passedGo()\n self.changeTurn()\n self._deck.deactivateChance()\n \n if num == \"3\":\n # Go to jail\n self.goToJail()\n self._deck.deactivateChance()\n \n if num == \"4\":\n # Pay bank $150\n self._players[self._current].loseMoney(150)\n self.changeTurn()\n self._deck.deactivateChance()\n\n if num == \"5\":\n # Advance to free parking\n self._players[self._current].setPosition(16)\n self._updatePawnLocations()\n self.changeTurn()\n self._deck.deactivateChance()\n \n if num == \"6\":\n # Move back 3 spaces\n currentPos = self._players[self._current].getPosition()\n self._players[self._current].setPosition(currentPos - 3)\n self._updatePawnLocations()\n self.changeTurn()\n self._deck.deactivateChance()\n \n if num == \"7\":\n # Move forward 4 spaces\n currentPos = self._players[self._current].getPosition()\n self._players[self._current].setPosition(currentPos + 4)\n self._updatePawnLocations()\n self.changeTurn()\n self._deck.deactivateChance()\n \n if num == \"8\":\n # Collect $150\n self._players[self._current].addMoney(150)\n self.changeTurn()\n self._deck.deactivateChance()\n \n if num == \"9\":\n # Move to opus\n self._players[self._current].setPosition(26)\n self._updatePawnLocations()\n self.changeTurn()\n self._deck.deactivateChance()",
"def remodel_test(currentPlayer, g, handPos, choice1, choice2):\n j = g.hand[currentPlayer][choice1] # store card we will trash\n\n if ((getCost(g.hand[currentPlayer][choice1]) + 2) > getCost(choice2)):\n return 1\n\n gainCard(choice2, g, 0, currentPlayer)\n\n # Discard card from hand\n discardCard(handPos, g, currentPlayer, 0)\n\n # Discard trashed card\n for i in range(len(g.hand[currentPlayer])):\n if (g.hand[currentPlayer][i] == j):\n discardCard(i, g, currentPlayer, 0)\n break\n\n return 0",
"def burn_card():\n\tglobal my_deck\n\tburn=my_deck.draw()\n\tmy_deck.discard(burn)\n\treturn my_deck",
"def place_bet(self) -> None:\n amount = self.get_bet()\n while not self.valid_bet(amount):\n print(f\"That is an invalid bet. Please input an amount within ${MINIMUM_BET()} and ${self.balance}\\n\")\n amount = self.get_bet()\n self.balance -= amount\n self.bet = amount\n print(f\"A total of ${self.bet} has been deducted from your balance. Good luck, player!\\n\")\n time.sleep(1)",
"def nextPlayerInBid(self):\n print self.playerWhoseTurnItIs\n self.deleteCards(self.playerWhoseTurnItIs)\n if self.rules.bid[3] == \"round\" or self.rules.bid[3] == \"oneByOne\":\n self.playerWhoseTurnItIs += 1\n elif self.rules.bid[3] == \"faceOff\":\n if self.players[self.playerWhoseTurnItIs].bid == None:\n if self.playerWhoseTurnItIs == self.playerWhoStartsBid: \n if (self.players[self.playerWhoIsFacingOff].bid==False):\n self.playerWhoStartsBid = self.playerWhoIsFacingOff\n self.playerWhoseTurnItIs = self.playerWhoStartsBid\n self.playerWhoIsFacingOff = self.playerWhoseTurnItIs+1\n\n else:\n self.playerWhoStartsBid = self.playerWhoIsFacingOff\n self.playerWhoseTurnItIs = self.playerWhoStartsBid + 1\n self.playerWhoIsFacingOff = self.playerWhoseTurnItIs\n elif self.playerWhoseTurnItIs == self.playerWhoIsFacingOff:\n self.playerWhoseTurnItIs += 1\n self.playerWhoIsFacingOff = self.playerWhoseTurnItIs\n else:\n if self.playerWhoseTurnItIs == self.playerWhoStartsBid:\n self.playerWhoseTurnItIs = self.playerWhoIsFacingOff\n else:\n self.playerWhoseTurnItIs = self.playerWhoStartsBid\n self.playerWhoseTurnItIs %= self.rules.numberOfPlayers\n self.playerWhoIsFacingOff %= self.rules.numberOfPlayers\n self.playerWhoStartsBid %= self.rules.numberOfPlayers\n name = self.players[self.playerWhoseTurnItIs].name\n self.isDisplayingMessage = True\n tkMessageBox.showwarning(\"Next Player\",\"Pass the computer to %s.\" %name)\n self.isDisplayingMessage = False\n self.rules.updateGamePhase()\n self.redrawAll()\n self.preGame()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Bet the amount of money. Cannot exceed player's budget
|
def bet(self, amount):
if amount >self.budget:
print 'you cannot bet because of little money'
else:
self.bet_amount = amount
print 'you bet %s' % (amount)
|
[
"def bet(self, amount):\r\n\r\n if self.players[self.active_player].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[(self.active_player + 1) %\r\n len(self.players)].name)\r\n self.game_message.emit(message)\r\n self.restart()\r\n if self.players[(self.active_player + 1) % len(self.players)].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[self.active_player].name)\r\n self.game_message_warning.emit(message)\r\n self.restart()\r\n\r\n if amount == 0:\r\n message = \"Raises must be larger than zero!\"\r\n self.game_message_warning.emit(message)\r\n\r\n elif self.previous_bet + amount > self.players[self.active_player].credits:\r\n message = \"Not enough money!\"\r\n self.game_message_warning.emit(message)\r\n else:\r\n self.pot += amount\r\n self.new_pot.emit()\r\n\r\n self.players[self.active_player].credits -= (self.previous_bet + amount)\r\n self.new_credits.emit()\r\n\r\n output_text = \"{} bet ${} and raised ${}\".format(self.players[self.active_player].name, self.previous_bet,\r\n amount)\r\n\r\n self.previous_bet = (self.previous_bet + amount)\r\n self.actions += 1\r\n\r\n self.new_output.emit(output_text)\r\n\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n\r\n # Update the players to hide their cards when it is not their turn\r\n for player in self.players:\r\n player.flip_cards()\r\n\r\n self.progress_game()",
"def get_bet(self) -> int:\n return int(input(f\"How much money would you like to place? \"\n f\"(input an integer between {MINIMUM_BET()}-{self.balance}): \"))",
"def make_bet(self, amount):\n self.update_fear(amount)\n self.bot.bet(amount)",
"def win_bet(self):\n self.total += self.bet*2",
"def player_call(self, p, opponent_bet):\n to_bet = min(opponent_bet - p.bet, p.bank)\n p.bank -= to_bet\n p.bet += to_bet\n self.pot += to_bet",
"def all_in():\r\n\r\n raise_bet(player.get_cash())",
"def get_player_bet(self) -> None:\n print(\"Please enter the amount you want to bet.\")\n while self.user.bet == 0:\n input_ = input(\">>> \")\n try:\n input_ = float(input_)\n self.user.bet = input_\n except ValueError as e:\n print(str(e))\n continue",
"async def get_bets(self, ctx):\n game_msg = await self.bot.say(self.game_display())\n for player in self.ingame:\n await self.bot.edit_message(game_msg,self.game_display())\n prompt_msg = await self.bot.say(\n ('__*{0}*__ , please enter valid bet '\n 'amount within 10 seconds.\\n'\n 'Bets must be positive integer amounts.'\n ).format(self.server.get_member(player.id).display_name)\n )\n response_msg = await self.bot.wait_for_message(\n timeout = 11,\n #lambda to check for author since I want to avoid using\n #bot.get_user_info() calls\n check = (lambda message: message.author.id == player.id)\n )\n #Might be able to fit this into the lambda. Could also\n #clean this up\n if response_msg:\n try:\n int(response_msg.content)\n if (5 <= int(response_msg.content)\n <= min(player.score,500)):\n player.no_response = 0\n player.bet = int(response_msg.content)\n player.score -= player.bet\n else: player.no_response += 1\n except: player.no_response += 1\n else: player.no_response += 1\n await self.bot.delete_message(prompt_msg)\n await self.bot.delete_message(game_msg)",
"def place_bet(self) -> None:\n amount = self.get_bet()\n while not self.valid_bet(amount):\n print(f\"That is an invalid bet. Please input an amount within ${MINIMUM_BET()} and ${self.balance}\\n\")\n amount = self.get_bet()\n self.balance -= amount\n self.bet = amount\n print(f\"A total of ${self.bet} has been deducted from your balance. Good luck, player!\\n\")\n time.sleep(1)",
"def pay_player(self, amount):",
"def place_bet(self, bet_amount):\n if bet_amount > self.bankroll:\n raise NotEnoughChips(\"No more chips please reload before playing\")\n\n self.bankroll -= bet_amount\n return bet_amount",
"def money():\n print('Money of the team : %d' % get_team_money())",
"def place_bet(self, bet_):\n\n bet = int(bet_)\n if (bet > self.minimal_bet) and not bool(bet % self.minimal_bet_step):\n self.player.bet = bet\n self.player.balance -= bet\n else:\n raise ValueError",
"def test_bet(self):\n hand = self._hand\n self.assertEqual(hand.bet.amount, 150)",
"def passedGo(self):\n \n self.addMoney(200)",
"def take_money_from_player(self, player_name, amount):\n # set variables for better readability\n player = self.players[player_name][0]\n\n # if player has less money than they need to pay, set their debt accordingly.\n if player.get_money() < amount:\n debt = amount - player.get_money()\n player.take_money(amount)\n player.add_debt(debt)\n self.set_special_event(6)\n\n # player has enough money to pay cost\n else:\n player.take_money(amount)",
"def bet(self):\n\n est_p = (self.heads + 1) / float(self.n + 2)\n return self.wealth * (est_p * (2 + 1) - 1) / 2.",
"def get_amount(self):\n return self.amount_bet",
"def handle_bet(self, player, bet):\n if (bet - self.state.cur_bet) < self.state.minimum_bet_size:\n raise ValueError(\"Bet is smaller than minimum bet size.\")\n # Example: min bet starts at 2 preflop. I open to 6. min bet size should be 4\n # I get reraised to 10. Min bet size should still be 4. 10 - 6 = 4\n print(\"Bet: {}\".format(bet))\n print(\"Prev bet: {}\".format(player.prev_bet))\n self.state.pot += bet - player.prev_bet\n self.state.cur_bet = bet \n self.state.minimum_bet_size = (bet - self.state.minimum_bet_size)\n self.state.last_aggressor = player \n self.state.counter = 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Hit a card and check if bust
|
def hit(self, card):
self.cards.hit(card)
if self.cards.hand ==-1:
self.state ='burst'
|
[
"def hit(player):\n deal_random_card(player)",
"def hit(self, deck):\n self.showOneCard = False\n while self.getPoints() < 17:\n self.cards.append(deck.deal())",
"def deal_self(self):\n self.cards.hit(self.get_card())\n if self.cards.hand < 17 and self.cards.hand>=0:\n self.state = 'active'\n elif self.cards.hand >= 17 and self.cards.hand <= 21:\n self.state = 'stand'\n elif self.cards.hand==-1:\n self.state = 'burst'",
"def hit():\n \n global score, in_play, outcome\n \n # if the hand is in play, hit the player\n if in_play:\n player_hand.add_card(deck.deal_card())\n else: \n outcome = \"Hand not in play. Deal again?\"\n return\n # if busted, assign a message to outcome, update in_play and score\n if player_hand.get_value() > 21:\n in_play = False\n score -= 1\n outcome = \"You have busted! Deal again?\"",
"def hit(self):\n global in_play, deck, player_hand, dealer_hand, outcome, lost\n \n if in_play:\n player_hand.add_card(deck.deal_card())\n \n if player_hand.get_value() > 21:\n self.outcome.set(\"You have busted! Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)\n\n print \"\\nPlayer hand: \", player_hand\n print \"Dealer hand: \", dealer_hand",
"def test_card_hit_non_cup_bread_with_cup_bread_bonus(self):\n self.player.has_bread_cup_bonus = True\n self.player.money = 0\n card = self.new_factory_card_card(name='Card',\n payout=1, target_card_type=cards.CardWheat,\n game=self.game, family=cards.Card.FAMILY_GEAR)\n self.player.add_card(card)\n card.hit(None)\n self.assertEqual(self.player.money, 1)",
"def hit(self, card):\n self.append(card)\n values=[]\n values.append(card.value())\n if values[0] < 2:\n values.append(values[0]+ 10)\n new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])\n new_sums =sorted(new_sums)\n if len(new_sums) ==0:\n self.hand=-1\n else:\n self.hand = new_sums[-1]\n self.possible_sums = new_sums",
"def test_card_hit_non_cup_bread_with_cup_bread_bonus(self):\n self.player.has_bread_cup_bonus = True\n self.player.money = 0\n card = self.new_payout_card(name='Card', payout=1,\n game=self.game, family=cards.Card.FAMILY_GEAR)\n self.player.add_card(card)\n card.hit(None)\n self.assertEqual(self.player.money, 1)",
"def should_hit(player_total, dealer_card_val, player_aces):\n return False",
"def hook_buy_card(self, game, player, card):\n if card.isVictory():\n player.output(\"Gaining Gold from Hoard\")\n player.add_card(game[\"Gold\"].remove())",
"def npc_use_buff(self):\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber) and self.num_fold_cards > 0:\r\n print(self.name+\" uses fold card. \"+self.name+\" is allowed to fold on the next shot.\")\r\n npc.fold_status = True\r\n self.num_fold_cards -= 1\r\n print(self.name+\" has \"+str(self.num_fold_cards)+\" fold card left.\")\r\n else:\r\n print(self.name + \" does not use any card.\")",
"def checking_cards(self,card,deck):\n while self.value_of_dealer_cards <= 16:\n print(\"Dealer hit\")\n print(\"\")\n self.add_card(card,deck)",
"def player_hit(self):\r\n if self.in_progress:\r\n self.player_hand.add(self.deck.deal())\r\n if self.player_hand.total > 21:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n self.refresh_canvas()",
"def hit(self):\n assert not self.damaged\n self.damaged = True\n self.game_piece.hit()",
"def hook_gain_this_card(self, game, player):\n empties = sum(1 for st in game.cardpiles if game[st].is_empty())\n for _ in range(empties):\n player.gain_card(\"Gold\")",
"def get_cards_that_beat(self, card):",
"def shields_hit(self):\n if __debug__:\n self.data_invariant()\n\n successful_absorb = True\n\n if(self.get_shields_health() > 0):\n self.set_shields_health(self.get_shields_health() - 1)\n\n if(self.get_shields_health() == 0):\n self.hull_hit()\n successful_absorb = False\n\n if __debug__:\n self.data_invariant()\n\n return successful_absorb",
"def buy(self, card):\n if self.buying_power < card.cost or self.num_buys < 1:\n raise InsufficientFundsError(card)\n # try:\n self.game.sell(card)\n # except Exception as e:\n # return False\n self.buying_power -= card.cost\n self.num_buys -= 1\n self.discard.append(card)\n return True",
"def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Face up dealer's hidden card and balance with players in the game
|
def showdown(self):
print "%s: %s" %(self.name, repr(self.cards)) # open dealer's cards
for player in self.game.players:
win = self.balance(player)
if win > 0:
print player.name, 'wins', win
elif win == 0:
print player.name, 'draws'
elif win <0:
print player.name, 'loses', -(win)
self.budget -= win
player.budget += win
print 'budget of %s : %s'%(player.name,player.budget)
print 'budget of %s : %s'%(self.name,self.budget)
|
[
"def deal_cards(self):\n self.deck.shuffle(5)\n self.dealer.hand = self.deck.deal(1)\n self.dealer.value = self.dealer.calculate_value()\n for player in self.ingame:\n if player.bet != 0:\n player.hand = self.deck.deal(2)\n player.value = player.calculate_value()",
"def deal(self):\n\n if self.dealer: # Has cards in hand\n self.dealer.reset()\n\n if self.player: # Has cards in hand\n self.player.reset()\n\n dealer_first = self.deck.draw()\n dealer_second = self.deck.draw()\n dealer_second.flip()\n self.dealer.take_card(dealer_first)\n self.dealer.take_card(dealer_second)\n\n player_first = self.deck.draw()\n player_second = self.deck.draw()\n player_first.flip()\n player_second.flip()\n self.player.take_card(player_first)\n self.player.take_card(player_second)\n\n if self.verbose:\n print('Player bets:', self.player_bet)\n for player in (self.player, self.dealer):\n print(player, 'dealt:')\n for card in player:\n if card.face():\n print(' '*3, str(card)+':', 'face up')\n else:\n print(' '*3, str(card)+':', 'face down')",
"def stand():\r\n global dealer_hand, in_play, p_score, d_score, outcome, outcome2, hidden\r\n hidden = False\r\n \r\n outcome = \"Player has \" + str(player_hand.get_value())\r\n outcome2 = \"Dealer has \" + str(dealer_hand.get_value())\r\n if in_play:\r\n dealer_hand.get_value()\r\n player_hand.get_value()\r\n if dealer_hand.get_value() == 21:\r\n outcome2 = \"Dealer has 21\"\r\n outcome = \"Dealer wins!\"\r\n d_score += 1\r\n in_play = False\r\n if dealer_hand.get_value() >= player_hand.get_value():\r\n outcome2 = \"Dealer has \" + str(dealer_hand.get_value())\r\n outcome = \"Dealer wins!\"\r\n d_score += 1\r\n in_play = False\r\n if dealer_hand.get_value() >= 18 and dealer_hand.get_value() < player_hand.get_value():\r\n outcome2 = \"Dealer has \" + str(dealer_hand.get_value())\r\n outcome = \"Player wins!\"\r\n p_score += 1\r\n in_play = False\r\n \r\n while dealer_hand.get_value() < player_hand.get_value() and dealer_hand.get_value() < 21:\r\n dealer_hand.add_card(deck.deal_card())\r\n if dealer_hand.get_value() == 21:\r\n outcome2 = \"Dealer has 21\"\r\n outcome = \"Dealer wins!\"\r\n d_score += 1\r\n in_play = False\r\n if dealer_hand.get_value() < 21 and dealer_hand.get_value() >= player_hand.get_value():\r\n outcome2 = \"Dealer has \" + str(dealer_hand.get_value())\r\n outcome = \"Dealer wins!\"\r\n d_score += 1\r\n in_play = False\r\n elif dealer_hand.get_value() > 21:\r\n outcome2 = \"Dealer busted!\"\r\n outcome = \"Player wins!\"\r\n in_play = False\r\n p_score += 1",
"def _play_dealer(self, dealer_sum):\n while dealer_sum < 17:\n card = self._draw_card()\n dealer_sum += self.get_card_value(card)\n return dealer_sum",
"def calculate_value(self, hand):\n global FACE_CARDS\n #could refactor the 2 hand possiblities into methods of a Dealer and Player Class\n if hand == \"player\":\n if self.player_hand[-1].value in FACE_CARDS:\n self.player_value += 10\n elif self.player_hand[-1].value == \"A\":\n self.player_value += 11\n self.player_ace_count += 1\n else:\n self.player_value += int(self.player_hand[-1].value)\n\n if self.player_value > 21:\n if self.player_ace_count > self.player_almost_bust:\n #To prevent a Bust, your Ace became a one\n self.player_value -= 10\n self.player_almost_bust += 1\n else:\n self.player_lose()\n elif self.player_value == 21:\n self.blackjack = True\n self.endgame()\n\n elif hand == \"dealer\":\n if len(self.dealer_hand) > 1:\n if self.dealer_hand[-1].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[-1].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[-1].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n elif self.dealer_value == 21:\n self.player_lose()",
"def hide_card(self):\n try:\n self.hidden_card_value = self.hand[1]\n self.hand[1] = Card()\n except IndexError:\n print('The dealer does not have enough cards!')",
"def pay_player(self, amount):",
"def dealer_turn(dealer, deck):\n print(\"\\n======== DEALER'S TURN ========\")\n while deck.cards and not bust(dealer) and dealer.total < STAND_LIMIT():\n draw_card(dealer, deck)\n print(f\"\\nThe dealer Stands, their total is \\033[33m{dealer.total}\\033[0m.\\n\")\n time.sleep(1)",
"def nextPlayerInBid(self):\n print self.playerWhoseTurnItIs\n self.deleteCards(self.playerWhoseTurnItIs)\n if self.rules.bid[3] == \"round\" or self.rules.bid[3] == \"oneByOne\":\n self.playerWhoseTurnItIs += 1\n elif self.rules.bid[3] == \"faceOff\":\n if self.players[self.playerWhoseTurnItIs].bid == None:\n if self.playerWhoseTurnItIs == self.playerWhoStartsBid: \n if (self.players[self.playerWhoIsFacingOff].bid==False):\n self.playerWhoStartsBid = self.playerWhoIsFacingOff\n self.playerWhoseTurnItIs = self.playerWhoStartsBid\n self.playerWhoIsFacingOff = self.playerWhoseTurnItIs+1\n\n else:\n self.playerWhoStartsBid = self.playerWhoIsFacingOff\n self.playerWhoseTurnItIs = self.playerWhoStartsBid + 1\n self.playerWhoIsFacingOff = self.playerWhoseTurnItIs\n elif self.playerWhoseTurnItIs == self.playerWhoIsFacingOff:\n self.playerWhoseTurnItIs += 1\n self.playerWhoIsFacingOff = self.playerWhoseTurnItIs\n else:\n if self.playerWhoseTurnItIs == self.playerWhoStartsBid:\n self.playerWhoseTurnItIs = self.playerWhoIsFacingOff\n else:\n self.playerWhoseTurnItIs = self.playerWhoStartsBid\n self.playerWhoseTurnItIs %= self.rules.numberOfPlayers\n self.playerWhoIsFacingOff %= self.rules.numberOfPlayers\n self.playerWhoStartsBid %= self.rules.numberOfPlayers\n name = self.players[self.playerWhoseTurnItIs].name\n self.isDisplayingMessage = True\n tkMessageBox.showwarning(\"Next Player\",\"Pass the computer to %s.\" %name)\n self.isDisplayingMessage = False\n self.rules.updateGamePhase()\n self.redrawAll()\n self.preGame()",
"def draw_card(dealer,player): \n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n\n bj_board.clear()\n for i in range(len(dealer)):\n if dealer[i].state==True:\n bj_board.add(dealer[i].image)\n dealer[i].image.moveTo(x0+i*20,y0)\n dealer[i].image.setDepth(depth-10*i)\n elif dealer[i].state==False:\n img=Image(img_path+\"Back.png\")\n bj_board.add(img)\n img.moveTo(x0+i*20,y0)\n img.setDepth(depth-10*i)\n for i in range(len(player)):\n bj_board.add(player[i].image)\n player[i].image.moveTo(x1+i*20,y1)\n player[i].image.setDepth(depth-10*i) \n \n text=Text(\"Your Total: \" + str(hand_value(player)))\n text.moveTo(300,300)\n bj_board.add(text)\n \n if dealer[0].state==True:\n text=Text(\"Dealer Total: \" + str(hand_value(dealer)))\n text.moveTo(300,100)\n bj_board.add(text)",
"def hook_gain_this_card(self, game, player):\n for plr in game.player_list():\n if plr != player:\n plr.output(\"Gained a silver from %s's purchase of Embassy\" % player.name)\n plr.gain_card(\"Silver\")\n return {}",
"def _calculate(self, cards_user, cards_dealer, sort=False):\r\n self.cards_user = cards_user \r\n self.cards_dealer = cards_dealer\r\n \r\n self.deck.remove(self.cards_user[0])\r\n self.deck.remove(self.cards_user[1])\r\n self.deck.remove(self.cards_dealer[0])\r\n \r\n for i in self.options:\r\n if i == \"stand\":\r\n for k in self.cards_user:\r\n if \"ace\" in self.cards_user:\r\n j = self.cards_user.index(\"ace\")\r\n self.cards_user[j] = 11\r\n points_user = sum(self.cards_user)\r\n points_dealer = self.cards_dealer[0]\r\n if points_user > 21:\r\n points_user -= 10\r\n if points_dealer == \"ace\":\r\n points_dealer = 11\r\n deck = self.deck.copy()\r\n winrate, possibilities = self.chance_win(points_user, points_dealer, deck)\r\n self.chances[0] = winrate/possibilities\r\n elif i == \"hit\":\r\n contains_ace = 0\r\n starting_ace = 0\r\n for k in cards_user:\r\n if 11 == k:\r\n j = self.cards_user.index(11)\r\n self.cards_user[j] = \"ace\"\r\n starting_ace += 1\r\n temp_chances = []\r\n for j in range(len(self.deck)):\r\n contains_ace = starting_ace\r\n for l in range(len(self.cards_user)):\r\n if self.cards_user[l] == \"ace\":\r\n self.cards_user[l] = 11\r\n points_user = sum(self.cards_user)\r\n if points_user > 21 and contains_ace > 0:\r\n m = self.cards_user.index(11)\r\n self.cards_user[m] = 1\r\n contains_ace -= 1\r\n points_user = sum(self.cards_user)\r\n if self.deck[j] == \"ace\" and (points_user + 11) < 22:\r\n next_card = 11\r\n contains_ace += 1\r\n elif self.deck[j] == \"ace\" and (points_user + 11) > 21:\r\n next_card = 1\r\n else:\r\n next_card = self.deck[j]\r\n points_user += next_card\r\n if points_user > 21 and contains_ace > 0:\r\n points_user -= 10\r\n contains_ace -= 1\r\n deck = self.deck.copy()\r\n del deck[j]\r\n winrate, possibilities = self.chance_win(points_user, points_dealer, deck)\r\n temp_chances.append(winrate/possibilities)\r\n self.chances[1] = sum(temp_chances)/len(temp_chances)\r\n elif i == \"double down\":\r\n self.chances[2] = self.chances[1]\r\n elif i == \"surrender\":\r\n self.chances[3] = 0",
"def test_other_player_has_sufficient_funds(self):\n card = self.new_red_card(name='Red', fee=2, game=self.game)\n self.player_card.add_card(card)\n self.player_card.money = 0\n self.player_rolled.money = 3\n card.hit(self.player_rolled)\n self.assertEqual(self.player_card.money, 2)\n self.assertEqual(self.player_rolled.money, 1)",
"def buy_card(self) -> int:\n pass",
"def dealer_rules(dealer_cards):\n\twhile (calc_value_cards(dealer_cards) < 17):\n\t\tdealer_cards = extra_card(dealer_cards)\n\t#function that calculates the changes to win if get a new card\n\tdealer_cards = calc_play_card(dealer_cards)\n\treturn(dealer_cards)",
"def reveal_card(self):\n self.hand[1] = self.hidden_card_value\n self.hidden_card_value = Card()",
"def draw_card(dealer,player):\n # hidden_img = Image(img_path+\"back.png\")\n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n ix = 30\n\n bj_board.clear()\n for card in dealer:\n if card.state:\n card.image.moveTo(x0, y0)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"Back.png\")\n img.moveTo(x0, y0)\n img.setDepth(depth)\n bj_board.add(img)\n x0 += ix\n \n for card in player:\n if card.state:\n card.image.moveTo(x1, y1)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"back.png\")\n img.moveTo(x1, y1)\n img.setDepth(depth)\n bj_board.add(img)\n x1 += ix",
"def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()",
"def hook_gain_this_card(self, game, player):\n newcost = self.cost - 1\n player.plr_gain_card(\n cost=newcost,\n prompt=\"Gain a card costing %d due to Border Village\" % newcost,\n )\n return {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Dealer have no choice. Stand if hand >= 17, otherwise hit
|
def deal_self(self):
self.cards.hit(self.get_card())
if self.cards.hand < 17 and self.cards.hand>=0:
self.state = 'active'
elif self.cards.hand >= 17 and self.cards.hand <= 21:
self.state = 'stand'
elif self.cards.hand==-1:
self.state = 'burst'
|
[
"def stand(hand=bj.player1.hand):\r\n phv = bj.player1.hand_value_check(hand) # check player hand value\r\n phv = [x for x in phv if x <= 21]\r\n if hand == bj.player1.hand:\r\n if len(phv) > 0:\r\n bj.player1.final_hand_val = max(phv)\r\n else:\r\n bj.player1.final_hand_val = \"bust\"\r\n else:\r\n if len(phv) > 0:\r\n bj.player1.final_hand2_val = max(phv)\r\n else:\r\n bj.player1.final_hand2_val = \"bust\"",
"def stand():\r\n global dealer_hand, in_play, p_score, d_score, outcome, outcome2, hidden\r\n hidden = False\r\n \r\n outcome = \"Player has \" + str(player_hand.get_value())\r\n outcome2 = \"Dealer has \" + str(dealer_hand.get_value())\r\n if in_play:\r\n dealer_hand.get_value()\r\n player_hand.get_value()\r\n if dealer_hand.get_value() == 21:\r\n outcome2 = \"Dealer has 21\"\r\n outcome = \"Dealer wins!\"\r\n d_score += 1\r\n in_play = False\r\n if dealer_hand.get_value() >= player_hand.get_value():\r\n outcome2 = \"Dealer has \" + str(dealer_hand.get_value())\r\n outcome = \"Dealer wins!\"\r\n d_score += 1\r\n in_play = False\r\n if dealer_hand.get_value() >= 18 and dealer_hand.get_value() < player_hand.get_value():\r\n outcome2 = \"Dealer has \" + str(dealer_hand.get_value())\r\n outcome = \"Player wins!\"\r\n p_score += 1\r\n in_play = False\r\n \r\n while dealer_hand.get_value() < player_hand.get_value() and dealer_hand.get_value() < 21:\r\n dealer_hand.add_card(deck.deal_card())\r\n if dealer_hand.get_value() == 21:\r\n outcome2 = \"Dealer has 21\"\r\n outcome = \"Dealer wins!\"\r\n d_score += 1\r\n in_play = False\r\n if dealer_hand.get_value() < 21 and dealer_hand.get_value() >= player_hand.get_value():\r\n outcome2 = \"Dealer has \" + str(dealer_hand.get_value())\r\n outcome = \"Dealer wins!\"\r\n d_score += 1\r\n in_play = False\r\n elif dealer_hand.get_value() > 21:\r\n outcome2 = \"Dealer busted!\"\r\n outcome = \"Player wins!\"\r\n in_play = False\r\n p_score += 1",
"def stand():\n \n # Update message, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Player\" has busted, remind the \"Player\" that \n # they have busted.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL\n elif in_play:\n # If the \"Hand\" is in play, repeatedly hit \"Dealer\" \n # until his \"Hand\" has value 17 or more. \n while dealer.get_value() < 17:\n dealer.add_card(deck_of_cards.deal_card())\n\n # If busted, update messages, score and the \n # player's \"Hand\" status. \n if dealer.get_value() > 21:\n outcome = PLAYER_WINS\n outcome_plus = DEALER_BUSTED\n outcome_plus_plus = \"\"\n action = NEW_DEAL \n score += SCORE_POINTS \n in_play = False\n # Else compare the value of the \n # player's and dealer's \"Hands\". If the value of \n # the player's \"Hand\" is less than or equal to \n # the dealer's \"Hand\", the \"dealer\" wins. \n # Otherwise the \"player\" has won. Again,\n # update messages, score and the player's \"Hand\" \n # status. \n else: \n in_play = False\n action = NEW_DEAL\n outcome_plus = outcome_plus_plus = \"\"\n if player.get_value() > dealer.get_value():\n outcome = PLAYER_WINS \n score += SCORE_POINTS \n else:\n outcome = PLAYER_LOSES \n score -= SCORE_POINTS\n \n return None",
"def bustHand(hand):\n if sum(hand) > 21:\n return True\n else:\n return False",
"def apply_basic_strategy(player_hand, dealer_hand, doubling_down=True):\n\n player_sum = player_hand.sum()\n if 1 in player_hand:\n # remove the ace.\n player_hand = np.delete(player_hand, np.argmax(player_hand == 1))\n player_sum = player_hand.sum()\n if player_sum > 10:\n flag = True\n player_hand = np.append(player_hand, [1])\n player_sum = player_hand.sum()\n else:\n flag = False\n player_hand = np.append(player_hand, [1])\n else:\n flag = True\n if player_hand[0] != player_hand[1]:\n if flag:\n if player_sum <= 8:\n return \"hit\"\n elif player_sum == 9:\n if dealer_hand >= 3 and dealer_hand <= 6:\n return \"double\"\n return \"hit\"\n elif player_sum == 10:\n if dealer_hand >= 2 and dealer_hand <= 9:\n return \"double\"\n return \"hit\"\n elif player_sum == 11:\n if dealer_hand >= 2 and dealer_hand <= 10:\n return \"double\"\n return \"hit\"\n elif player_sum == 12:\n if dealer_hand >= 4 and dealer_hand <= 6:\n return \"stand\"\n return \"hit\"\n elif player_sum >= 13 and player_sum <= 16:\n if dealer_hand >= 2 and dealer_hand <= 6:\n return \"stand\"\n else:\n return \"hit\"\n else:\n return \"stand\"\n else:\n if player_sum == 3 or player_sum == 4:\n if dealer_hand == 5 or dealer_hand == 6:\n return \"double\"\n return \"hit\"\n elif player_sum == 5 or player_sum == 6:\n if dealer_hand >= 4 and dealer_hand <= 6:\n return \"double\"\n return \"hit\"\n elif player_sum == 7:\n if dealer_hand >= 3 and dealer_hand <= 6:\n return \"double\"\n return \"hit\"\n elif player_sum == 8:\n if dealer_hand >= 3 and dealer_hand <= 6:\n return \"double\"\n elif dealer_hand == 2 or dealer_hand == 7 or dealer_hand == 8:\n return \"stand\"\n return \"hit\"\n else:\n return \"stand\"\n else:\n if doubling_down:\n if player_hand[0] == 1:\n return \"split\"\n elif player_hand[0] == 2 or player_hand[0] == 3:\n if dealer_hand >= 2 and dealer_hand <= 7:\n return \"split\"\n return \"hit\"\n elif player_hand[0] == 4:\n if dealer_hand == 5 or dealer_hand == 6:\n return \"split\"\n return \"hit\"\n elif player_hand[0] == 5:\n if dealer_hand >= 2 and dealer_hand <= 9:\n return \"double\"\n return \"hit\"\n elif player_hand[0] == 6 or player_hand[0] == 7:\n if dealer_hand >= 2 and dealer_hand <= player_hand[0]:\n return \"split\"\n return \"hit\"\n elif player_hand[0] == 8:\n return \"split\"\n elif player_hand[0] == 9:\n if dealer_hand == 1 or dealer_hand == 7 or dealer_hand == 10:\n return \"stand\"\n return \"split\"\n return \"stand\"\n else:\n if player_hand[0] == 2 or player_hand[0] == 3:\n if dealer_hand >= 4 and dealer_hand <= 7:\n return \"split\"\n return \"hit\"\n elif player_hand[0] == 4:\n return \"hit\"\n elif player_hand[0] == 6:\n if dealer_hand >= 3 and dealer_hand <= 6:\n return \"split\"\n return \"hit\"",
"def deal_dealer(self):\n while self.dealer_hand.value < 17:\n self.deal('dealer')",
"def hit(self):\n global in_play, deck, player_hand, dealer_hand, outcome, lost\n \n if in_play:\n player_hand.add_card(deck.deal_card())\n \n if player_hand.get_value() > 21:\n self.outcome.set(\"You have busted! Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)\n\n print \"\\nPlayer hand: \", player_hand\n print \"Dealer hand: \", dealer_hand",
"def canHit(hand, type):\n\n global round\n \n # if player, check under 21\n if type == 'player' and sum(hand) < 21:\n return True\n\n # if player and exactly 21, then dealer's turn\n if type == 'player' and sum(hand) == 21:\n round = 'dealer'\n return False\n\n # if dealer, check under 17\n elif type == 'dealer' and sum(hand) < 17:\n return True\n\n # if player/dealer and previous checks failed, then cannot hit\n elif type == 'player' or type == 'dealer':\n return False\n\n # if none of the above triggered, then error state, e.g. wrong type passed\n else:\n return 'Error'",
"def best_wild_hand(hand):\n return",
"def policy(self, s):\r\n if s.dealer_sum >= 16:\r\n return Action.STICK\r\n else:\r\n return Action.HIT",
"def player_hit(self):\r\n if self.in_progress:\r\n self.player_hand.add(self.deck.deal())\r\n if self.player_hand.total > 21:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n self.refresh_canvas()",
"def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()",
"def assess_hand(self, r):\n bidIndex = -1\n while bool(random.getrandbits(1)): # Coin flip\n bidIndex += 1\n if bidIndex == -1:\n self.maxBid = LEGAL_BIDS[0] - 1 # Pass immediately.\n else:\n self.maxBid = LEGAL_BIDS[bidIndex]",
"def finishHand(self):\r\n while self.getHand().getHandValue()<17 and not self.getHand().isFull():\r\n self.deal(1, self.getHand())",
"def is_bust(self, hand_idx=0):\n if self.player_hand_value(hand_idx) > 21:\n return True\n else:\n return False",
"def insane_hunter_roll(self):\n if random.choice((0, 1)):\n self.game.add_cultist(self.location)",
"def free_hand(hand):\n return len(hand) < 3",
"def hit(self, card):\n self.append(card)\n values=[]\n values.append(card.value())\n if values[0] < 2:\n values.append(values[0]+ 10)\n new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])\n new_sums =sorted(new_sums)\n if len(new_sums) ==0:\n self.hand=-1\n else:\n self.hand = new_sums[-1]\n self.possible_sums = new_sums",
"def evalHand(hand):\n # os.system(\"clear\")\n #print(\"dealer hand before evalHand is: \", hand.showHand())\n if (1 in hand.cards) and (21 - hand.handSum() >= 10):\n print(\"found a 1 value Ace in the hand\")\n hand.cards[hand.cards.index(1)] = 11 # Change the first ace from value 1\n # to value 11\n if (11 in hand.cards) and (hand.handSum() >= 22):\n print(\"found an 11 value Ace in the hand and sum > 21\")\n hand.cards[hand.cards.index(11)] = 1 # Change the first ace from value 1\n # to value 11"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Finds valid positions for Tile mover in Skilaverkefni 8 Takes in current position of the game
|
def validpositions(tile):
if tile == 11 or tile == 21:
valid_pos = "n"
elif tile == 12:
valid_pos = "nes"
elif tile == 13:
valid_pos = "es"
elif tile == 22 or tile == 33:
valid_pos = "sw"
elif tile == 23:
valid_pos = "ew"
elif tile == 32:
valid_pos = "ns"
possible_directions(valid_pos)
return valid_pos
|
[
"def get_winning_move(state, player):\n position = []\n for i in range(16):\n if state.line_scoring[i] == player * 3:\n row, col = i // 4, i % 4\n for j in range(4):\n if not state[j, row, col]:\n position.append((j, row, col))\n break\n for i in range(16):\n if state.line_scoring[i + 16] == player * 3:\n hei, col = i // 4, i % 4\n for j in range(4):\n if not state[hei, j, col]:\n position.append((hei, j, col))\n break\n for i in range(16):\n if state.line_scoring[i + 32] == player * 3:\n hei, row = i // 4, i % 4\n for j in range(4):\n if not state[hei, row, j]:\n position.append((hei, row, j))\n break\n for i in range(4):\n if state.line_scoring[i + 48] == player * 3:\n for j in range(4):\n if not state[i, j, j]:\n position.append((i, j, j))\n break\n for i in range(4):\n if state.line_scoring[i + 52] == player * 3:\n for j in range(4):\n if not state[i, j, 3 - j]:\n position.append((i, j, 3 - j))\n break\n for i in range(4):\n if state.line_scoring[i + 56] == player * 3:\n for j in range(4):\n if not state[j, i, j]:\n position.append((j, i, j))\n break\n for i in range(4):\n if state.line_scoring[i + 60] == player * 3:\n for j in range(4):\n if not state[j, i, 3 - j]:\n position.append((j, i, 3 - j))\n break\n for i in range(4):\n if state.line_scoring[i + 64] == player * 3:\n for j in range(4):\n if not state[j, j, i]:\n position.append((j, j, i))\n break\n for i in range(4):\n if state.line_scoring[i + 68] == player * 3:\n for j in range(4):\n if not state[j, 3 - j, i]:\n position.append((j, 3 - j, i))\n break\n if state.line_scoring[72] == player * 3:\n for j in range(4):\n if not state[j, j, j]:\n position.append((j, j, j))\n break\n if state.line_scoring[73] == player * 3:\n for j in range(4):\n if not state[j, j, 3 - j]:\n position.append((j, j, 3 - j))\n break\n if state.line_scoring[74] == player*3:\n for j in range(4):\n if not state[j, 3 - j, j]:\n position.append((j, 3 - j, j))\n break\n if state.line_scoring[75] == player * 3:\n for j in range(4):\n if not state[j, 3 - j, 3 - j]:\n position.append((j, 3 - j, 3 - j))\n break\n # print(state.line_scoring)\n # print(player, position)\n return position",
"def locations_of_pieces_with_valid_moves(active_player, board):\n results = []\n for y in range(len(board)):\n for x in range(len(board[y])):\n if reason_piece_at_location_cant_move((x, y), active_player, board) is None:\n results.append((x, y))\n return results",
"def checkMoves(self,board):\n possibleMoves = []\n\n for c in xrange(0,8):\n for r in xrange(0,8):\n if board.isValidMove(self.tile,c,r):\n possibleMoves.append(c+r*8)\n\n return possibleMoves",
"def ai_moves(self):\n self.update_ai_pawns()\n possible_moves = {}\n\n for pawn in self.ai_pawns:\n templist = {}\n # List of next position after each possible move: new_pos ['m','m2','l','r']\n new_pos = [[pawn.get_pos_x(), pawn.get_pos_y()-1], [pawn.get_pos_x(), pawn.get_pos_y()-2],\n [pawn.get_pos_x()-1, pawn.get_pos_y()-1], [pawn.get_pos_x()+1, pawn.get_pos_y()-1]]\n\n # Assigning confidence values | 0 == impossible ; 100 = winning move\n if pawn.get_pos_y() == 7 and self.is_occupied(new_pos[1][0], new_pos[1][1]) is None and self.is_occupied(new_pos[1][0], new_pos[1][1] + 1) is None:\n templist.update({consts.MV_FWD2: (100 / new_pos[1][1])})\n else:\n templist.update({consts.MV_FWD2: 0})\n\n if self.is_occupied(new_pos[0][0], new_pos[0][1]) is None:\n templist.update({consts.MV_FWD1: (100/new_pos[0][1])})\n else:\n templist.update({consts.MV_FWD1:0})\n\n if self.is_occupied(new_pos[2][0], new_pos[2][1]) == consts.COLOR_WHITE:\n templist.update({consts.MV_LEFT: (100 / new_pos[2][1]) + 20})\n else:\n templist.update({consts.MV_LEFT: 0})\n\n if self.is_occupied(new_pos[3][0], new_pos[3][1]) == consts.COLOR_WHITE:\n templist.update({consts.MV_RIGHT: (100 / new_pos[3][1]) + 20})\n else:\n templist.update({consts.MV_RIGHT: 0})\n\n possible_moves.update({f'{pawn.get_pos_x()}::{pawn.get_pos_y()}':templist})\n\n return possible_moves",
"def winningMove():\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9\r\n\r\n\tnoWin=True\r\n\tmove=False\r\n\tif turn==\"Player1\":\r\n\t\tif validMove(1):\r\n\t\t\ttile1+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove=1\t\r\n\t\t\ttile1+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(2):\r\n\t\t\ttile2+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 2\r\n\t\t\ttile2+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(3):\r\n\t\t\ttile3+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 3\r\n\t\t\ttile3+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\t\t\r\n\t\tif validMove(4):\r\n\t\t\ttile4+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 4\t\r\n\t\t\ttile4+=-1\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\t\r\n\t\tif validMove(5):\r\n\t\t\ttile5+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 5\t\t\r\n\t\t\ttile5+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(6):\r\n\t\t\ttile6+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 6\t\r\n\t\t\ttile6+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(7):\r\n\t\t\ttile7+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 7\t\r\n\t\t\ttile7+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(8):\r\n\t\t\ttile8+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 8\t\r\n\t\t\ttile8+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(9):\r\n\t\t\ttile9+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 9\t\t\r\n\t\t\ttile9+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\r\n\telif turn==\"Player2\":\r\n\t\tif validMove(1):\r\n\t\t\ttile1+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 1\t\t\t\t\r\n\t\t\ttile1+=-2\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(2):\r\n\t\t\ttile2+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 2\r\n\t\t\ttile2+=-2\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(3):\r\n\t\t\ttile3+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 3\r\n\t\t\ttile3+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(4):\r\n\t\t\ttile4+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 4\t\r\n\t\t\ttile4+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(5):\r\n\t\t\ttile5+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 5\t\r\n\t\t\ttile5+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(6):\r\n\t\t\ttile6+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 6\t\r\n\t\t\ttile6+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(7):\r\n\t\t\ttile7+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 7\t\r\n\t\t\ttile7+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(8):\r\n\t\t\ttile8+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 8\t\r\n\t\t\ttile8+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(9):\r\n\t\t\ttile9+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 9\r\n\t\t\ttile9+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\tif noWin:\r\n\t\treturn False",
"def validMoves(self, list_of_pieces): # TODO\n movelist = []\n for i in list_of_pieces: #move one up\n if self.team == \"WHITE\":\n if i.x==self.x and i.y==self.y+1:\n break\n movelist.append((self.x, self.y + 1))\n else:\n if i.x==self.x and i.y==self.y-1:\n break\n movelist.append((self.x, self.y - 1))\n if self.num_moves == 0: #move two up\n for i in list_of_pieces:\n if self.team == \"WHITE\":\n if i.x == self.x and i.y == self.y + 2:\n break\n elif i.x == self.x and i.y == self.y + 1:\n break\n movelist.append((self.x, self.y + 2))\n else:\n if i.x == self.x and i.y == self.y - 2:\n break\n elif i.x == self.x and i.y == self.y - 1:\n break\n movelist.append((self.x, self.y - 2))\n for i in list_of_pieces: #capture\n if self.team == \"WHITE\":\n if i.x==self.x+1 and abs(self.y-i.y)==1 and i.team==\"BLACK\":\n movelist.append((i.x, i.y))\n else:\n if i.x==self.x-1 and abs(self.y-i.y)==1 and i.team==\"WHITE\":\n movelist.append((i.x, i.y))\n\n\n\n #en passant\n for piece in list_of_pieces:\n if self.y==piece.y and abs(self.x-piece.x)==1 and piece.type==\"PAWN\" and piece.num_moves==0:\n if piece.team == \"WHITE\":\n movelist.append((piece.x, piece.y-1))\n else:\n movelist.append((piece.x, piece.y+1))\n return movelist",
"def get_all_positions(board, white_turn):\n list = []\n for row in range(8):\n for col in range(8):\n # White\n if white_turn and white_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_white((row, col), board):\n list.append(((row, col), valid_pos))\n # Black\n elif (not white_turn) and black_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_black((row, col), board):\n list.append(((row, col), valid_pos))\n return list",
"def getKnightMoves(pos, chessBoard):\n column, row = list(pos.strip().lower())\n row = int(row) - 1\n column = chess_map_from_alpha_to_index[column]\n i,j = row, column\n solutionMoves = []\n try:\n temp = chessBoard[i + 1][j - 2]\n solutionMoves.append([i + 1, j - 2])\n except:\n pass\n try:\n temp = chessBoard[i + 2][j - 1]\n solutionMoves.append([i + 2, j - 1])\n except:\n pass\n try:\n temp = chessBoard[i + 2][j + 1]\n solutionMoves.append([i + 2, j + 1])\n except:\n pass\n try:\n temp = chessBoard[i + 1][j + 2]\n solutionMoves.append([i + 1, j + 2])\n except:\n pass\n try:\n temp = chessBoard[i - 1][j + 2]\n solutionMoves.append([i - 1, j + 2])\n except:\n pass\n try:\n temp = chessBoard[i - 2][j + 1]\n solutionMoves.append([i - 2, j + 1])\n except:\n pass\n try:\n temp = chessBoard[i - 2][j - 1]\n solutionMoves.append([i - 2, j - 1])\n except:\n pass\n try:\n temp = chessBoard[i - 1][j - 2]\n solutionMoves.append([i - 1, j - 2])\n except:\n pass\n\n # Filter all negative values\n temp = [i for i in solutionMoves if i[0] >=0 and i[1] >=0]\n allPossibleMoves = [\"\".join([chess_map_from_index_to_alpha[i[1]], str(i[0] + 1)]) for i in temp]\n allPossibleMoves.sort()\n return allPossibleMoves",
"def identify_game_offset():\n \n game_image = ImageGrab.grab()\n fast_image = game_image.load()\n width, height = game_image.size\n for x in range(width-GAME_AREA[0]):\n for y in range(height-GAME_AREA[1]):\n for loc, colour in GAME_EDGE_COLOURS.items():\n image_colour = fast_image[loc[0]+x, loc[1]+y]\n if colour is None:\n if not is_grey_boundary_colour(image_colour):\n break\n elif image_colour != colour:\n break\n else: #Did not break\n return x, y\n return None",
"def invalid_spots(move, move_row, size):\n\n moves = []\n col = get_column(move, size) # Get the column the move falls into\n for cur_row in range(size):\n diff = abs(cur_row - move_row)\n if diff != 0:\n left = right = 0\n if col - diff >= 0:\n left = 2 ** (size - 1) >> (col - diff)\n if col + diff < size:\n right = 1 << (size - (col + diff) - 1)\n moves.append(left ^ right ^ move)\n else:\n moves.append(move)\n\n return moves",
"def update_map(self, screenshot=None):\n # Get the visible tiles\n nearby = self.game_map[\n (self.player_position[0] - 10): (self.player_position[0] + 11),\n (self.player_position[1] - 10): (self.player_position[1] + 11)\n ]\n\n # Clear NPCs in the nearby as they may have moved\n nearby[nearby == self.TILES.WEAPON_SHOPKEEPER.value] = self.TILES.UNKNOWN.value\n nearby[nearby == self.TILES.BLACKSMITH.value] = self.TILES.UNKNOWN.value\n\n # Take screenshot and isolate the gamplay region\n if screenshot is None:\n screenshot = utils.take_screenshot()\n play = screenshot[8:344, 8:344]\n\n # Loop through all unknown tiles in the nearby\n for i, j in zip(*np.where(nearby == self.TILES.UNKNOWN.value)):\n # Scale up the dimensions\n tile_x = i * self.TILE_DIM\n tile_y = j * self.TILE_DIM\n\n # The center cell is always the player\n if i == 10 and j == 10:\n tile_x = self.player_position[0] + int(tile_x / 16) - 10\n tile_y = self.player_position[1] + int(tile_y / 16) - 10\n self.game_map[(tile_x, tile_y)] = self.TILES.PLAYER.value\n continue\n\n # Slice the tile from the play region\n tile = play[tile_y:tile_y + self.TILE_DIM,\n tile_x:tile_x + self.TILE_DIM]\n\n tile_x = self.player_position[0] + int(tile_x / 16) - 10\n tile_y = self.player_position[1] + int(tile_y / 16) - 10\n\n # Go through all tile types looking for a high confidence match\n template = None\n for potential_template in self.templates:\n if np.allclose(potential_template[0], tile, 1, 1):\n template = potential_template\n break\n\n # No match, assume it is inaccessible\n if template is None:\n self.game_map[(tile_x, tile_y)] = self.TILES.INACCESSIBLE.value\n continue\n\n # By default, mark tile as inaccessible\n label = None\n\n # Mark as mineable\n if re.search(r'rock', template[1], re.M | re.I):\n label = self.TILES.MOUNTAIN.value\n elif re.search(r'door', template[1], re.M | re.I):\n label = self.TILES.DOOR.value\n elif re.search(r'gravel', template[1], re.M | re.I):\n label = self.TILES.GRAVEL.value\n elif re.search(r'shopkeeper', template[1], re.M | re.I):\n label = self.TILES.WEAPON_SHOPKEEPER.value\n elif re.search(r'blacksmith', template[1], re.M | re.I):\n label = self.TILES.BLACKSMITH.value\n elif re.search(r'guard', template[1], re.M | re.I):\n label = self.TILES.INACCESSIBLE.value\n elif re.search(r'inaccessible', template[1], re.M | re.I):\n label = self.TILES.INACCESSIBLE.value\n elif re.search(r'accessible', template[1], re.M | re.I):\n label = self.TILES.ACCESSIBLE.value\n\n # Calculate coordinates of tile in the map relative to the player\n self.game_map[(tile_x, tile_y)] = label\n\n # Go through all tiles in the gameplay region to find the mountains\n for i, j in zip(*np.where(nearby == self.TILES.MOUNTAIN.value)):\n # Get the tile to the left of the mountain\n tile_left = nearby[(i-1, j)]\n\n # Only allow mountains to be minable if they are beside gravel\n if not tile_left == self.TILES.GRAVEL.value:\n nearby[(i, j)] = self.TILES.INACCESSIBLE.value\n\n # Save the game map to disk\n np.savetxt('map.txt', self.game_map, fmt='%d')",
"def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves",
"def list_valid_move (self, player, opponent):\n ret = [] \n for i in range(8):\n for j in range(8):\n if player[i,j] == 0 and opponent[i,j] == 0:\n p, o = player.copy(), opponent.copy()\n p[i,j] = 1\n if self._check_for_updates (p, o, player, opponent):\n ret.append ( (i,j) )\n\n return ret",
"def get_legal_moves(self):\n location = self.player_locations[self.player]\n if not location:\n \treturn self.get_blank_spaces()\n\n \tmoves = []\n \trays = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n\n \tfor dx, dy in rays:\n \t\t_x, _y = location\n \t\twhile 0 <= _x + dx < xlimit and 0 <= _y + dy < ylimit:\n \t\t\t_x, _y = _x +dx, _y + dy\n \t\t\tif self.board[_x][_y]:\n \t\t\t\tbreak\n \t\t\tmoves.append((_x,_y))\n \treturn moves",
"def find(Map, PosI, PosF):\n \n # Pour les tests, cf. Pathfinding et Pathfinding2 \n \n InitialPosI = PosI\n InitialPosF = PosF\n Chemin = []\n \n Hvalue = np.zeros((np.shape(Map))) #Distance\n Gvalue = np.zeros((np.shape(Map))) #Movement Cost\n Fvalue = np.zeros((np.shape(Map))) #G+H \n Gvalue[:] = np.nan #initialiser Gvalue à une matrice NaN\n \n OpenList = [(InitialPosI,'N')]\n CloseList = []\n \n # Initialisation de Hvalue\n for i in range(np.shape(Hvalue)[0]):\n for j in range(np.shape(Hvalue)[1]):\n if Map[i,j] !=1:\n Hvalue[i,j] = abs(i-PosF[0]) + abs(j-PosF[1])\n else:\n Hvalue[i,j] = np.nan\n\n### Round 1 (+initialisations)\n \n CloseList.append(tuple(PosI))\n \n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #D : fleche vers le bas..\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) \n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R'))\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L'))\n \n \n for OV in OpenList: #OV pour OpenValue \n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList: #CV pour ClosedValue\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n#### Round NEXT \n ###Vers le min de Fvalue:\n while PosF not in CloseList and PosI != PosF:\n \n if np.all(np.isnan(Fvalue)): #Check si F est égale à une matrice Full NaN\n# print('Pas de chemin')\n return(False) # soit return False, soit return la position init, donc bon..\n \n Index = np.argwhere(Fvalue == np.nanmin(Fvalue))\n PosI = Index.tolist()[0]\n \n CloseList.append(tuple(PosI))\n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #DOWN (fleche vers le bas..)\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) #Up\n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R')) #Right\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L')) #Left\n \n for OV in OpenList:\n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList:\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n \n############## TRACING BACK \n PosF = InitialPosF\n\n while InitialPosI not in Chemin:\n \n for Trace in OpenList:\n if Trace[0] == PosF:\n Chemin.append(PosF)\n if Trace[1] == 'U':\n PosF = (PosF[0]-1,PosF[1]) #Go up\n elif Trace[1] == 'D':\n PosF = (PosF[0]+1,PosF[1]) #Go down\n elif Trace[1] == 'L':\n PosF = (PosF[0],PosF[1]-1) #Go left\n elif Trace[1] == 'R':\n PosF = (PosF[0],PosF[1]+1) #Go right\n# else:\n# print(Chemin)\n Chemin.reverse()\n return(Chemin)",
"def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours",
"def get_winning_moves(board, p):\n empty_spots = get_empty_spots(board)\n return [(i, j) for (i,j) in empty_spots if is_winning_move(board, i, j, p)]",
"def guess(their_board):\n\tboard = board_possibility_counter(their_board)\n\tships_remaining = unsunk_ships(board)\n\tmin_size = smallest_ship_size(ships_remaining)\n\tmax_coords = (0, 0)\n\tfor row in range(10):\n\t\tfor col in range(10):\n\t\t\tif(((row + col) % min_size) == 0):\n\t\t\t\tif(board[row][col] > board[max_coords[0]][max_coords[1]]):\n\t\t\t\t\tmax_coords = (row, col)\n\treturn max_coords",
"def get_starting_positions_for_players(self, qty):\n positions = []\n for i in range(qty):\n x = random.choice(range(self.w))\n y = random.choice(range(self.h))\n while self.out_of_bounds(x, y) or (x,y) in positions:\n x = random.choice(range(self.w))\n y = random.choice(range(self.h))\n positions.append((x,y))\n\n return positions"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Changes the tile according to what letter was put in a string Takes 2 arguments one for which direction was chosen and one for which tile it is currently located at Returns new tile
|
def tile_change(direction, tile):
lower_direction = direction.lower()
if lower_direction == "n":
tile += 1
elif lower_direction == "s":
tile -= 1
elif lower_direction == "e":
tile += 10
else:
tile -= 10
return tile
|
[
"def put_char(self, x, y, char, bg=None, fg=None):\r\n if 0 <= x < self.size[0] and 0 <= y < self.size[1]:\r\n tile = self.tiles[x][y]\r\n if (x,y) in self.dirty_tiles:\r\n tile = self.dirty_tiles[(x,y)]\r\n char = self.tile_set.get(char,self.tile_set[' '])\r\n tile = (tile[0], char, tile[2])\r\n if bg:\r\n tile = (self.tile_set.get(bg,bg), tile[1], tile[2])\r\n if fg:\r\n tile = (tile[0], tile[1], fg)\r\n self.dirty_tiles[(x,y)] = tile",
"def changeTile (self, posY, posX, tile=\"t\"):\r\n self.grid[posY][posX] = tile",
"def get_tile(self, char):\n if char == \"#\":\n return self.tiles[0:32, 0:32]\n elif char == \"G\": # gates\n return self.tiles[8 * 32 : 9 * 32, 3 * 32 : 4 * 32] \n elif char == \"W\": # window\n return self.tiles[8 * 32 : 9 * 32, 4 * 32 : 5 * 32]\n elif char == \"C\": # checkout\n return self.tiles[2 * 32 : 3 * 32, 8 * 32 : 9 * 32]\n elif char == \"F\": # fruits\n return self.tiles[1 * 32 : 2 * 32, 4 * 32 : 5 * 32] \n elif char == \"S\": # spices\n return self.tiles[1 * 32 : 2 * 32, 3 * 32 : 4 * 32] \n elif char == \"R\": # dairy\n return self.tiles[8 * 32 : 9 * 32, 7 * 32 : 8 * 32] \n elif char == \"D\": # drinks\n return self.tiles[6 * 32 : 7 * 32, 13 * 32 : 14 * 32] \n elif char == \"c\": # customer/shopping cart\n return self.tiles[8 * 32 : 9 * 32, 6 * 32 : 7 * 32] \n else:\n return self.tiles[32:64, 64:96]",
"def make_move( char, row_index, col_index, game_board):\n \n select = get_position(row_index, col_index, get_board_size(game_board))\n #this gives the position when all of the parameters are written. It is a bit\n #lengthy to write several times so it is compiled into \"select\" \n #as a variable.\n new_board = game_board[:select] + char + game_board[select + 1:]\n #this puts in the char in between the position (select) and one space after\n #the position (select + 1)\n return new_board",
"def draw_tile(tile_id):\n if tile_id == 0:\n return \" \"\n if tile_id == 1:\n return \"#\"\n if tile_id == 2:\n return \"+\"\n if tile_id == 3:\n return \"-\"\n return \"o\"",
"def get_tile(self, char):\n if char == \"#\":\n return self.tiles[0:32, 0:32]\n elif char == \"G\":\n return self.tiles[7 * 32 : 8 * 32, 3 * 32 : 4 * 32]\n elif char == \"C\":\n return self.tiles[2 * 32 : 3 * 32, 8 * 32 : 9 * 32]\n elif char == \"b\":\n return self.tiles[ 0:32, 4*32 : 5*32] \n elif char == \"d\":\n return self.tiles[6*32 : 7*32,13*32: 14*32]\n elif char == \"m\":\n return self.tiles[7*32 : 8*32,11*32: 12*32]\n elif char == \"s\":\n return self.tiles[2*32 : 3*32,3*32: 4*32]\n else:\n return self.tiles[32:64, 64:96]",
"def create_word(self, tile):\r\n pending_word = \"\"\r\n used_tiles = []\r\n # some call here to choose a tile to play, say with index j\r\n j = 0 # just temporarily\r\n pending_word += self.hand[j].letter\r\n used_tiles.append(self.hand.pop(j))",
"def get_tile(self, char):\n if char == \"#\":\n return self.tiles[0:32, 0:32]\n elif char == \"G\":\n return self.tiles[7 * 32: 8 * 32, 3 * 32: 4 * 32]\n elif char == \"C\":\n return self.tiles[2 * 32: 3 * 32, 8 * 32: 9 * 32]\n elif char == 'B':\n return self.tiles[0 * 32: 1 * 32, 4 * 32: 5 * 32]\n elif char == 'D':\n return self.tiles[3 * 32: 4 * 32, 13 * 32: 14 * 32]\n elif char == 'S':\n return self.tiles[5 * 32: 6 * 32, 9 * 32: 10 * 32]\n elif char == 'T':\n return self.tiles[5 * 32: 6 * 32, 6 * 32: 7 * 32]\n elif char == 'Y':\n return self.tiles[6 * 32: 7 * 32, 9 * 32: 10 * 32]\n else:\n return self.tiles[32:64, 64:96]",
"def set_letters(self, tiles):\n for item in tiles:\n self.letters[item[1][0]][item[1][1]] = item[0]",
"def swap_letter(password, *args):\n x_arg = args[0]\n y_arg = args[3]\n x = password.index(x_arg)\n y = password.index(y_arg)\n\n t = password[x]\n password[x] = password[y]\n password[y] = t\n\n return password",
"def move(argument, player):\n current_tile = world.tile_exists(player.location_x, player.location_y)\n if argument == \"north\":\n if world.tile_exists(player.location_x, player.location_y-1):\n new_tile = world.tile_exists(player.location_x, player.location_y-1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y-1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"south\":\n if world.tile_exists(player.location_x, player.location_y+1):\n new_tile = world.tile_exists(player.location_x, player.location_y+1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y+1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"east\":\n if world.tile_exists(player.location_x+1, player.location_y):\n new_tile = world.tile_exists(player.location_x + 1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x+1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"west\":\n if world.tile_exists(player.location_x-1, player.location_y):\n new_tile = world.tile_exists(player.location_x-1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x-1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"Movement not recognized. Specify a cardinal direction.\")\n return",
"def set(self,argument):\n if argument == \"X\" or \"O\":\n self.tile=argument",
"def update_view(puzzle, view, letter):\n #Modify the string by splicing at the location of the letter.\n for i in range(len(puzzle)):\n if puzzle[i] == letter:\n view = view[:i] + letter + view[i + 1:]\n return view",
"def place_tile(self, rack_ind, row, col):\n self.board.board[col][row].letter = self.rack[rack_ind]\n self.placed_tiles[self.selected_tile] = (self.rack[self.selected_tile], (col, row))\n # set the rack tile to an empty string\n self.rack[self.selected_tile] = ''",
"def position_tile(self, zero_row, zero_col, correct_tile):\n \n ans = \"\" \n vert_dist = abs(zero_row - correct_tile[0])\n horiz_dist = abs(zero_col - correct_tile[1])\n \n # Updates ans, the move string, based the correct_tile's\n # position relative to the target position.\n \n # SAME ROW\n if vert_dist == 0:\n # Left of target\n if zero_col > correct_tile[1]:\n # Moves zero tile left to correct_tile's position.\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right to target position,\n # and moves zero tile to left of target position.\n if horiz_dist > 1:\n ans += str(\"urrdl\" * (horiz_dist - 1))\n # Right of target\n else:\n # Moves zero tile right to correct_tile's position.\n ans += str(\"r\" * horiz_dist)\n # Moves correct_tile left to target position,\n # and moves zero tile to left of target position.\n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ulld\")\n \n # SAME COL\n elif horiz_dist == 0:\n # Moves zero tile up to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n # Moves correct_tile down to target position, \n # and moves zero tile to left of target position.\n if vert_dist > 1:\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n # UPPER LEFT\n elif correct_tile[1] < zero_col:\n # Moves zero tile up and left to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"l\" * horiz_dist)\n # Moves correct_tile right and down to target position,\n # and moves zero tile to left of target position.\n ans += str(\"drrul\" * (horiz_dist - 1))\n ans += str(\"druld\" * vert_dist)\n\n # UPPER RIGHT\n else:\n # Moves zero tile up and right to correct_tile's position.\n ans += str(\"u\" * vert_dist)\n ans += str(\"r\" * horiz_dist)\n # This if-elif-else statement moves correct_tile left and down to target position.\n # If statement is only used when target position is in row 2.\n if vert_dist == 1 and correct_tile[0] == 0:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dluld\")\n # Elif statement used when correct_tile is in the row above target position.\n elif vert_dist == 1: \n ans += str(\"ulldr\" * (horiz_dist - 1))\n ans += str(\"ullddruld\")\n # Else statement used when correct_tile is 1+ rows above target position.\n else:\n ans += str(\"dllur\" * (horiz_dist - 1))\n ans += str(\"dlu\")\n ans += str(\"lddru\" * (vert_dist - 1))\n ans += str(\"ld\")\n \n return ans",
"def move_character(character, direction):\n if direction == '1':\n character[1] += 1\n elif direction == '2':\n character[1] -= 1\n elif direction == '3':\n character[0] -= 1\n elif direction == '4':\n character[0] += 1",
"def getNextMove(self, board, letter):",
"def move_character(direction: str, character: list):\n if direction == \"d\":\n character[1] += 1\n if direction == \"a\":\n character[1] -= 1\n if direction == \"w\":\n character[0] -= 1\n if direction == \"s\":\n character[0] += 1",
"def change_cell_to(self, x, y, char):\r\n self.cell_list[(x - 1) * self.size + y - 1] = char"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize appointment's creation workflow; Pass to date definition
|
def create_appointment():
msg = render_template('date')
return question(msg)
|
[
"def test_cron_workflow_service_create_cron_workflow(self):\n pass",
"def create_appt(data):\r\n appt = objectify.Element(\"appointment\")\r\n appt.begin = data[\"begin\"]\r\n appt.uid = data[\"uid\"]\r\n appt.alarmTime = data[\"alarmTime\"]\r\n appt.state = data[\"state\"]\r\n appt.location = data[\"location\"]\r\n appt.duration = data[\"duration\"]\r\n appt.subject = data[\"subject\"]\r\n return appt",
"def _create_schedules(self):\n\n ''''''",
"def __init__(self):\n self.ticket_id = sc.get_ticket_id()\n self.priority = sc.get_priority()\n self.work_flow = sc.get_work_flow()\n self.requester = sc.get_requester_id()\n self.activities = []\n self.notes = []\n self.performed_at = datetime.now()\n\n #local variables used to passed as parameter to constructor of Activity.\n group = sc.get_random_group()\n category = sc.get_random_category()\n issue_type = sc.get_issue_type()\n\n\n logger.debug(\"Creating ticket {} with activities {}.\".format(self.ticket_id,\", \".join(self.work_flow)))\n\n if len(self.work_flow) == 0 :\n logger.error(\"There is no workflow configured. exiting the program\")\n assert()\n\n for action in self.work_flow:\n self.activities.append(Activity(action, self.performed_at,issue_type,category, group))\n self.performed_at += sc.get_random_time()\n if sc.create_note():\n self.notes.append(Note())\n self.performed_at += sc.get_random_time()",
"def main_calendar_appointer(start_datetime,end_datetime, doctor_num, patient_num):\r\n service = token_decider(doctor_num)\r\n calendar_summary = \"Patient Appointment\"\r\n id = id_checker(service,calendar_summary)\r\n\r\n time_start = \"{}-{}-{}T{}:{}:00\".format(start_datetime.year, start_datetime.month, start_datetime.day, start_datetime.hour, start_datetime.minute)\r\n time_end = \"{}-{}-{}T{}:{}:00\".format(end_datetime.year, end_datetime.month, end_datetime.day, end_datetime.hour, end_datetime.minute)\r\n print(time_start)\r\n print(time_end)\r\n\r\n event = {\r\n 'summary': 'Patient appointment',\r\n 'location': 'SmartOffice',\r\n 'description': 'Medical appointment with patient no.{}'.format(patient_num),\r\n 'start': {\r\n 'dateTime': time_start,\r\n 'timeZone': 'Australia/Melbourne',\r\n },\r\n 'end': {\r\n 'dateTime': time_end,\r\n 'timeZone': 'Australia/Melbourne',\r\n }\r\n }\r\n event_= event\r\n\r\n event = service.events().insert(calendarId=id, body=event).execute()\r\n print('Event created: {}'.format(event.get('htmlLink')))\r\n\r\n # Print out latest 10 events\r\n event_checker(id,service)\r\n return event_",
"def create_appointment():\n\n form = AppointmentForm()\n\n if form.validate_on_submit():\n\n appointment = Appointment(\n title = form.title.data,\n description = form.description.data,\n location = form.location.data,\n start = form.start.data,\n client = form.client.data,\n user = current_user\n )\n\n try:\n db.session.add(appointment)\n db.session.commit()\n\n flash('Successfully created the appointment.')\n\n return redirect(url_for('appointment.read_appointments'))\n except:\n flash('Error creating the appointment')\n\n return render_template('appointments/form.html.j2', form=form, title='Create appointment')",
"def create_instance(self, date):\n raise NotImplementedError",
"def test_create_schedule(self):\r\n pass",
"def test_creation_dates(self):\n no_creation_date = todotxt.Task(\"Task 1\")\n newer_task = todotxt.Task(\"2018-02-02 Task 2\")\n older_task = todotxt.Task(\"2017-01-01 Task 3\")\n self.assertEqual([older_task, newer_task, no_creation_date],\n pick_action.next_actions([no_creation_date, newer_task, older_task], self.namespace))",
"def create_auto_task(self):\n\t\tif self.category is not None:\n\t\t\tTask.objects.create(description=\"auto generated\", start_date=\"2019-11-13 23:23\", category=self.category)",
"def appointment_create():\n form = AppointmentForm(request.form)\n if request.method == 'POST' and form.validate():\n appt = Appointment()\n form.populate_obj(appt)\n db.session.add(appt)\n db.session.commit()\n #Success. Send user back to full appointment list\n return redirect(url_for('appointment_list'))\n #Either first load or validation error\n return render_template('appointment/edit.html', form=form)",
"async def appointment_start(request: Request, appointment: AppointmentBase):\n database_instance = request.app.db['appointment']\n appointment_data = {\n **appointment.dict(),\n \"start_date\": datetime.now(),\n \"id\": str(uuid4()),\n }\n create_result = await database_instance.insert_one(\n jsonable_encoder(appointment_data)\n )\n return await database_instance.find_one(\n {\"_id\": create_result.inserted_id}\n )",
"def __init__(self,id,appointment_time,description):\n self.id = id\n self.appointment_time = appointment_time\n self.description = description",
"def action_makeMeeting(self, cr, uid, ids, context=None):\n opportunity = self.browse(cr, uid, ids[0], context)\n res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'base_calendar', 'action_crm_meeting', context)\n res['context'] = {\n 'default_opportunity_id': opportunity.id,\n 'default_partner_id': opportunity.partner_id and opportunity.partner_id.id or False,\n 'default_partner_ids' : opportunity.partner_id and [opportunity.partner_id.id] or False,\n 'default_user_id': uid,\n 'default_section_id': opportunity.section_id and opportunity.section_id.id or False,\n 'default_email_from': opportunity.email_from,\n 'default_state': 'open',\n 'default_name': opportunity.name,\n }\n return res",
"def book_new_appointment(self, play_init=True, client=None):\n\n print 'booking new appt'\n # Ask the user when they would like the appointment\n if play_init:\n\n self.speech.play_audio(join(self.audio_vocab_dir, 'appointment_management/when_would_appt.mp3'))\n\n # Start to listen for user speech\n user_speech = self.speech.listen(method_before_speech_analysis=self.speech.play_audio,\n args=join(self.audio_vocab_dir, 'appointment_management/okay_1_second_check_time_available.mp3'))\n\n print \"Obtained requested booking date\"\n\n month, day, year, time_of_day = self.translator.get_date_time(user_speech)\n\n def is_valid_id(m, d, y, t):\n\n if m != None and d != None and y != None and t == None:\n return False, 'time'\n\n elif m == None or d == None or y == None or t == None:\n return False, None\n time_minutes = 30 if '0.5' in str(time_of_day) else 0\n dt = datetime.datetime(int(year), m + 1, day, int(time_of_day), time_minutes)\n timestamp = time.mktime(dt.timetuple())\n if timestamp < time.time():\n print \"This time is in the past\"\n return False, None\n return True, None\n\n print month, day, year, time_of_day\n\n if is_valid_id(month, day, year, time_of_day)[0] == False:\n if is_valid_id(month, day, year, time_of_day)[1] == 'time':\n self.speech.play_audio([join(self.audio_vocab_dir, 'new_profile/enhanced/time_of_day.mp3'),\n join(self.audio_vocab_dir, 'new_profile/enhanced/sorry_i_couldnt_get_your.mp3')])\n else:\n self.speech.play_audio(join(self.audio_vocab_dir, 'personal/didnt_understand.mp3'))\n self.speech.play_audio(join(self.audio_vocab_dir, 'appointment_management/when_would_appt.mp3'))\n self.book_new_appointment(False, client=client)\n\n elif self._check_slot_availability(month, day, year, time_of_day)[0] is True:\n # Time slot is free\n self._book_appointment_in_db(month, day, year, time_of_day, client=client)\n # Will return its own check. Just need to do confirmation\n self._booking_confirmation(month, day, year, time_of_day, client=client)\n return 'successfully booked'\n else:\n # Cannot book appointment\n cannot_book = join(self.speech.audio_vocab_dir, 'appointment_management/time_slot_already_booked.mp3')\n print \"Time slot already booked\"\n self.speech.microphone_mgr.commands.put({'Command':'Start', 'Wav_path':cannot_book})\n self.speech.play_audio(cannot_book)\n\n self.book_new_appointment(False, client=client)",
"def create():\n config = request.data\n return add_scheduling_block(config)",
"def test_meeting_create(self):\n pass",
"def i_see_the_set_dates(_step):\r\n verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')\r\n verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')\r\n verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013')\r\n verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')\r\n\r\n verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)\r\n # Unset times get set to 12 AM once the corresponding date has been set.\r\n verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)",
"def test_change_workflow_definition(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set appointment's begin date; Pass to appointment's begin time
|
def appointment_date(begin_date):
session.attributes['begin_date'] = str(begin_date)
qs = render_template('time')
return question(qs)
|
[
"def set_begin_date(self, begin_date):\n self.set_value_into_input_field(self.begin_date_inputbox_locator, begin_date)",
"def begin_time(self, begin_time):\n self._begin_time = begin_time",
"def appointment_time(begin_time):\n\n session.attributes['begin_time'] = str(begin_time)\n msg = render_template('end_date')\n return question(msg)",
"def set_start_date(self, date):\n pass",
"def start_date_time(self, start_date_time):\n\n self._start_date_time = start_date_time",
"def set_start_time(td, start_time):\n\n td.setStartTime(start_time)",
"def begin_time(self, begin_time):\n if begin_time is None:\n raise ValueError(\"Invalid value for `begin_time`, must not be `None`\") # noqa: E501\n\n self._begin_time = begin_time",
"def set_task_start_datetime(apps, schema_editor):\n Task = apps.get_model('orchestra', 'Task') # noqa\n for task in Task.objects.all():\n task.start_datetime = task.project.start_datetime\n task.save()",
"def set_adjustment_charge_begin_date(self, begin_date):\n self.set_value_into_input_field(self.adjustment_begin_date_locator, begin_date)",
"def set_billing_cycle_begin_date(self, begin_date):\n if begin_date == \"\":\n current_date = datetime.date.today()\n begin_date = current_date.replace(day=1)\n begin_date = begin_date.strftime(\"%m/%d/%Y\")\n self.set_value_into_input_field(self.billing_cycle_begin_date_inputbox_locator, begin_date)",
"def set_bulk_add_begin_date(self, begin_date):\n if begin_date == \"\":\n begin_date = self.get_date(current_date=True)\n self.set_value_into_input_field(self.bulk_add_begin_date_inputbox_locator, begin_date)\n global bulk_add_begin_date\n bulk_add_begin_date = datetime.datetime.strptime(begin_date, \"%m/%d/%Y\")\n return begin_date",
"def set_statement_begin_date(self, begin_date):\n begin_date_to_set = None\n if begin_date != \"\":\n begin_date_to_set = begin_date\n else:\n self.first_day_of_previous_month = self.get_date(first_day_of_last_month=True)\n begin_date_to_set = self.first_day_of_previous_month\n self.set_value_into_input_field(self.statement_begin_date_locator, begin_date_to_set)\n return begin_date_to_set",
"def set_begin(self, time_code: SmpteTimeCode):\n self._begin = copy.copy(time_code)",
"def set_start_date(self, start_date):\n self.set_value_into_input_field(self.start_date_inputbox_locator, start_date)",
"def start_date(self, start_date):\n \n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def set_begin_date(self, begin_date, current_date, future_date, previous_date, lower_case_d, num_of_days_to_add):\n self.switch_to_detail_frame()\n if begin_date == \"\":\n if current_date:\n begin_date = self.get_date(current_date=True)\n elif future_date:\n begin_date = self.get_date(future_date=True, number_of_days_to_add=int(num_of_days_to_add))\n elif previous_date:\n begin_date = self.get_date(last_day_of_last_month=True)\n if lower_case_d is True:\n self.set_value_into_input_field(self.begin_date_with_lower_case_d_inputbox_locator, begin_date)\n else:\n self.set_value_into_input_field(self.begin_date_inputbox_locator, begin_date)\n self.click_element(self.report_parameter_title_locator)\n self.switch_to_default_content()",
"def set_jobstartdate_today(self):\n today_str = str(date.today())\n return self.set_jobstartdate(today_str)",
"def start_time(self, start_time):\n self._start_time = start_time"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set appointment's begin_time; Pass to apppointment's end date
|
def appointment_time(begin_time):
session.attributes['begin_time'] = str(begin_time)
msg = render_template('end_date')
return question(msg)
|
[
"def appointment_date(begin_date):\n\n session.attributes['begin_date'] = str(begin_date)\n qs = render_template('time')\n return question(qs)",
"def begin_time(self, begin_time):\n self._begin_time = begin_time",
"def set_start_time(td, start_time):\n\n td.setStartTime(start_time)",
"def begin_time(self, begin_time):\n if begin_time is None:\n raise ValueError(\"Invalid value for `begin_time`, must not be `None`\") # noqa: E501\n\n self._begin_time = begin_time",
"def main_calendar_appointer(start_datetime,end_datetime, doctor_num, patient_num):\r\n service = token_decider(doctor_num)\r\n calendar_summary = \"Patient Appointment\"\r\n id = id_checker(service,calendar_summary)\r\n\r\n time_start = \"{}-{}-{}T{}:{}:00\".format(start_datetime.year, start_datetime.month, start_datetime.day, start_datetime.hour, start_datetime.minute)\r\n time_end = \"{}-{}-{}T{}:{}:00\".format(end_datetime.year, end_datetime.month, end_datetime.day, end_datetime.hour, end_datetime.minute)\r\n print(time_start)\r\n print(time_end)\r\n\r\n event = {\r\n 'summary': 'Patient appointment',\r\n 'location': 'SmartOffice',\r\n 'description': 'Medical appointment with patient no.{}'.format(patient_num),\r\n 'start': {\r\n 'dateTime': time_start,\r\n 'timeZone': 'Australia/Melbourne',\r\n },\r\n 'end': {\r\n 'dateTime': time_end,\r\n 'timeZone': 'Australia/Melbourne',\r\n }\r\n }\r\n event_= event\r\n\r\n event = service.events().insert(calendarId=id, body=event).execute()\r\n print('Event created: {}'.format(event.get('htmlLink')))\r\n\r\n # Print out latest 10 events\r\n event_checker(id,service)\r\n return event_",
"def start_date_time(self, start_date_time):\n\n self._start_date_time = start_date_time",
"def set_task_start_datetime(apps, schema_editor):\n Task = apps.get_model('orchestra', 'Task') # noqa\n for task in Task.objects.all():\n task.start_datetime = task.project.start_datetime\n task.save()",
"def set_begin(self, time_code: SmpteTimeCode):\n self._begin = copy.copy(time_code)",
"def start_time(self, start_time):\n self._start_time = start_time",
"def start_time(self, start_time):\n\n self._start_time = start_time",
"def appointment_end_date(end_date):\n\n session.attributes['end_date'] = str(end_date)\n msg = render_template('end_time')\n return question(msg)",
"def set_begin_date(self, begin_date):\n self.set_value_into_input_field(self.begin_date_inputbox_locator, begin_date)",
"def F_set_time_bound(self,start_year=1995,start_month=1,start_day=1,\\\n start_hour=0,start_minute=0,start_second=0,\\\n end_year=2025,end_month=12,end_day=31,\\\n end_hour=0,end_minute=0,end_second=0):\n self.start_python_datetime = datetime.datetime(start_year,start_month,start_day,\\\n start_hour,start_minute,start_second)\n self.end_python_datetime = datetime.datetime(end_year,end_month,end_day,\\\n end_hour,end_minute,end_second)\n step_hour = self.step_hour\n daily_start_time = self.daily_start_time\n # extend the start/end datetime to the closest step_hour intervals\n t_array0 = datetime.datetime.combine(datetime.date(start_year,start_month,start_day),\\\n daily_start_time)-datetime.timedelta(hours=step_hour)\n t_array = np.array([t_array0+datetime.timedelta(hours=int(step_hour)*i) for i in range(int(24/step_hour+2))])\n tn_array = np.array([(self.start_python_datetime-dt).total_seconds() for dt in t_array])\n narr_start_datetime = t_array[tn_array >= 0.][-1]\n \n t_array0 = datetime.datetime.combine(datetime.date(end_year,end_month,end_day),\\\n daily_start_time)-datetime.timedelta(hours=step_hour)\n t_array = np.array([t_array0+datetime.timedelta(hours=int(step_hour)*i) for i in range(int(24/step_hour+2))])\n tn_array = np.array([(self.end_python_datetime-dt).total_seconds() for dt in t_array])\n narr_end_datetime = t_array[tn_array <= 0.][0]\n \n nstep = (narr_end_datetime-narr_start_datetime).total_seconds()/3600/step_hour+1\n \n self.narr_start_datetime = narr_start_datetime\n self.narr_end_datetime = narr_end_datetime\n self.nstep = int(nstep)\n self.logger.info('specified time from '+\\\n self.start_python_datetime.strftime('%Y-%m-%dT%H:%M:%SZ')+\n ' to '+self.end_python_datetime.strftime('%Y-%m-%dT%H:%M:%SZ'))\n self.logger.info('extended time from '+\\\n self.narr_start_datetime.strftime('%Y-%m-%dT%H:%M:%SZ')+\n ' to '+self.narr_end_datetime.strftime('%Y-%m-%dT%H:%M:%SZ'))\n self.logger.info('there will be %d'%nstep+' narr time steps')",
"def start_time(self, value):\n self._start_time = value",
"async def appointment_start(request: Request, appointment: AppointmentBase):\n database_instance = request.app.db['appointment']\n appointment_data = {\n **appointment.dict(),\n \"start_date\": datetime.now(),\n \"id\": str(uuid4()),\n }\n create_result = await database_instance.insert_one(\n jsonable_encoder(appointment_data)\n )\n return await database_instance.find_one(\n {\"_id\": create_result.inserted_id}\n )",
"def appointment_end_time(end_time):\n\n session.attributes['end_time'] = str(end_time)\n form = AppointmentForm(session.attributes)\n form.submit()\n return render_result(form)",
"def set_begin(self, tp):\n if isinstance(tp, sppasPoint) is False:\n raise AnnDataTypeError(tp, \"sppasPoint\")\n\n if sppasInterval.check_types(tp, self.__end) is False:\n raise AnnDataEqTypeError(tp, self.__end)\n\n if sppasInterval.check_interval_bounds(tp, self.__end) is False:\n raise IntervalBoundsError(tp, self.__end)\n\n # assign the reference\n self.__begin = tp",
"def set_start_date(self, date):\n pass",
"def set_time(self, start: str, end: str, duration: int = None):\n self.set_start(start)\n self.set_end(end)\n self.set_count(duration)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set appointment's end date; Pass to appointment's end time
|
def appointment_end_date(end_date):
session.attributes['end_date'] = str(end_date)
msg = render_template('end_time')
return question(msg)
|
[
"def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time",
"def end_date(self, end_date):\n self._end_date = end_date",
"def appointment_end_time(end_time):\n\n session.attributes['end_time'] = str(end_time)\n form = AppointmentForm(session.attributes)\n form.submit()\n return render_result(form)",
"def set_end_time(td, end_time):\n\n td.setEndTime(end_time)",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n if end_date is None:\n end_date = datetime.utcnow()\n\n self._end_date = dt_utils.parse_date(end_date)",
"def schedule_end_date(self, schedule_end_date):\n\n self._schedule_end_date = schedule_end_date",
"def end_time(self, end_time):\n\n self._end_time = end_time",
"def end(self, end):\n # type: (datetime) -> None\n\n if end is not None:\n if not isinstance(end, datetime):\n raise TypeError(\"Invalid type for `end`, type has to be `datetime`\")\n\n self._end = end",
"def bookable_end_date_time(self, bookable_end_date_time):\n\n self._bookable_end_date_time = bookable_end_date_time",
"def end(self):\n self.end_date = arrow.now()",
"def _set_end_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, default=unicode(\"\"), is_leaf=True, yang_name=\"end-time\", rest_name=\"to\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To time period granting user access', u'alt-name': u'to', u'cli-suppress-no': None, u'display-when': u'(../access-time)'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"end_time must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, default=unicode(\"\"), is_leaf=True, yang_name=\"end-time\", rest_name=\"to\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To time period granting user access', u'alt-name': u'to', u'cli-suppress-no': None, u'display-when': u'(../access-time)'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__end_time = t\n if hasattr(self, '_set'):\n self._set()",
"def setEnd(self, end, google = True):\n if google:\n temp = simpleParse(end) # turn unicode to string\n temp = temp[:19] # trim the time zone out\n self.end = convertToDatetimeFull(temp) # convert to a datetime object for further manipulation later\n else:\n self.end = end\n self._updateLength()\n return True",
"def set_statement_end_date(self, end_date):\n end_date_to_set = None\n if end_date != \"\":\n end_date_to_set = end_date\n else:\n end_date_to_set = self.get_date(last_day_of_last_month=True)\n self.set_value_into_input_field(self.statement_end_date_locator, end_date_to_set)\n return end_date_to_set",
"def set_end_date_long(self, end_date_long):\n self.end_date_long = end_date_long",
"def item_end_date(self, item_end_date: str):\n\n self._item_end_date = item_end_date",
"def model_end_date(self, model_end_date):\n\n self._model_end_date = model_end_date",
"def set_end_date(self, date, value):\n locator = pmm_lex_locators[\"service_schedule\"][\"first_session_end\"].format(date)\n self.selenium.set_focus_to_element(locator)\n self.selenium.get_webelement(locator).click()\n self.selenium.clear_element_text(locator)\n self.selenium.get_webelement(locator).send_keys(value)",
"def end_tournament(self):\n self.end_date = strftime(\"%d-%m-%Y\", localtime())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|