query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Create instance of PyRPS. redis_url Redis instance address (tuple containing (hostname, port)). namespace Namespace to separate Pub/Sub instance from another running on the same redis host.
def __init__(self, namespace, redis_url=("localhost", 6379)): self.namespace = namespace if isinstance(redis_url, tuple): self.redis = StrictRedis(host=redis_url[0], port=redis_url[1]) elif isinstance(redis_url, str): self.redis = StrictRedis(host=redis_url)
[ "def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)", "def __init__(self):\n try:\n config = redis_settings[\"REDIS_BACKEND\"]\n self.servers = config[\"servers\"]\n self.port = config[\"port\"]\n self.db = config[\"db\"]\n self.password = config[\"password\"]\n # r = redis.Redis('10.66.136.84', '6379', 0,password=\"xsw2CDE#vfr4\")\n #r = redis.Redis('10.66.136.84', '6379', 0)\n self.redis = Redis(self.servers, self.port, self.db,\n password=self.password, socket_timeout=1)\n except Exception, e:\n print \"Redis YAMLConfig Error :\", e\n logging.error(e)", "def __init__(self, *args, **kwargs):\n self.redis = Redis.from_url(*args, decode_responses=True, **kwargs)", "def init(host=\"localhost\", port=6379, db_number=0, namespace=\"\", ns_versions_loc=None):\n global _r, _glob_namespace, _ns_versions_loc\n _r = redis.StrictRedis(\n host=host,\n port=port,\n db=db_number,\n )\n\n _glob_namespace = namespace + \":\"\n _glob_namespace = _glob_namespace.encode(ENCODING_ASCII)\n if len(_glob_namespace) + SHA1_LENGTH > MAX_KEY_LENGTH:\n raise ValueError(\"Namespace is too long.\")\n\n if ns_versions_loc:\n if not os.path.isdir(ns_versions_loc):\n raise ValueError(\"Can't find directory for storing namespace versions! \"\n \"Please check `version_location` argument.\")\n else:\n ns_versions_loc = os.path.join(tempfile.gettempdir(), NS_VERSIONS_LOC_DIR)\n _ns_versions_loc = os.path.join(ns_versions_loc, namespace)\n if not os.path.exists(_ns_versions_loc):\n os.makedirs(_ns_versions_loc)", "def __init__(self, connection_pool):\n self._conn = redis.Redis(connection_pool=connection_pool)", "def create_redis():\n return _create_redis()", "def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r", "def connect_redis():\n rv = redis.Redis(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'])\n return rv", "def connect_redis(uri):\n puri = urlparse.urlparse(uri)\n host = puri.hostname\n port = puri.port\n password = puri.password if puri.password else ''\n db_name = puri.path.split('/')[1]\n r = redis.Redis(host=host, port=port, password=password, db=db_name)\n assert r.ping()\n return r", "def _redis_from_dsn(self, dsn):\r\n import redis\r\n parts = urlparse(dsn)\r\n _, _, netloc = parts.netloc.partition('@')\r\n netloc = netloc.rsplit(':')\r\n host = netloc[0]\r\n try:\r\n port = int(netloc[1])\r\n except IndexError:\r\n port = 6379\r\n try:\r\n db = int(parts.path.strip('/'))\r\n except ValueError:\r\n db = 0\r\n return redis.Redis(host=host, port=port, db=db)", "def connect(*args, **kwargs):\n global client\n client = redis.Redis(*args, **kwargs)", "def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)", "def get_redis_client():\n return redis.from_url(settings.REDIS_URI)", "def redis_conn_pool(self) -> ConnectionPool:\n if self._redis_conn_pool is None:\n if self._config[\"graph_redis_pool_block\"]:\n pool_class: Callable = BlockingConnectionPool\n else:\n pool_class = ConnectionPool\n\n if self._config[\"graph_redis_pool_gevent_queue\"]:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n queue_class=gevent.queue.LifoQueue,\n )\n\n else:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n )\n\n self._redis_conn_pool = redis_conn_pool\n\n self._logger.debug(\n \"[%s]: Initialized Redis connection pool: %s\",\n self.__name__,\n self._redis_conn_pool,\n )\n\n return self._redis_conn_pool", "def __init__(self, redis_object):\r\n self.redis_object = redis_object", "def get_redis_server():\n return redis_server", "def redis_url():\n\n # Attempt to retrieve the space name the application is running in; this\n # will return the space if the app is running in a cloud.gov environment or\n # None if it is running locally.\n if env.space is not None:\n logger.info(\n 'Running in the {0} space in cloud.gov.'.format(env.space)\n )\n\n # While we are not able to connect to Redis, retry as many times as\n # necessary. This is usually due to a brief 1 - 3 second downtime as\n # a service instance is rebooted in the cloud.gov environment.\n # TODO: Make this more robust in the case of extended outages.\n while True:\n logger.info('Attempting to connect to Redis...')\n redis = env.get_service(label='redis32')\n\n if redis is not None:\n logger.info('Successfully connected to Redis.')\n break\n else:\n logger.error('Could not connect to Redis, retrying...')\n\n # Construct the Redis instance URL based on the service information\n # returned.\n url = redis.get_url(host='hostname', password='password', port='port')\n return 'redis:{}'.format(url)\n else:\n logger.debug(\n 'Not running in a cloud.gov space, attempting to connect locally.'\n )\n\n # Fall back to attempting to read whatever is set in the FEC_REDIS_URL\n # environment variable, otherwise a localhost connection.\n return env.get_credential('FEC_REDIS_URL', 'redis://localhost:6379/0')", "def init_redis(self, redis_url: str, test_mode: bool = False) -> None:\n if test_mode:\n self._client = FakeStrictRedis()\n else:\n self._client = Redis.from_url(redis_url)", "def get_broker() -> RedisBroker:\n return RedisBroker(\n db=EnvVars.DB,\n host=EnvVars.HOST,\n port=EnvVars.PORT\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publish new message into queue. queue Queue name. message Message data. ttl How long the message should stay alive.
def publish(self, queue, message, ttl=3600): # Get next message ID message_id = self.redis.incr(self._ns_nextid()) # Push message to queue self.redis.setex(self._ns_message(queue, message_id), ttl, message) # List all consumers of given queue consumers = self.redis.smembers(self._ns_subscriptions(queue)) # Publish the message to all the consumers. for consumer in consumers: self.redis.rpush(self._ns_queue(queue, consumer), message_id)
[ "def publish(self, message, exchange, routing_key, **kwargs):\r\n mqueue.put(message)", "def publish(self, message, routing_key):\n\t\t#msg = amqp.Message(message)\n\t\t#msg.properties[\"content_type\"] = \"text/plain\"\n\t\t#msg.properties[\"delivery_mode\"] = 2\n\t\t#self.channel.basic_publish(exchange=self.exchange_name,\n\t\t# routing_key=routing_key, msg=msg)\n\n\t\t#channel.queue_declare(queue='task_queue', durable=True)\n\t\tself.channel.queue_declare(queue=\"%ss\" % routing_key, durable=True)\n\t\ttry:\n\t\t\tself.channel.basic_publish(exchange=self.exchange_name,\n\t\t\t routing_key=routing_key,\n\t\t\t body=message#,\n\t\t\t #properties=pika.BasicProperties(\n\t\t\t #\tcontent_type = 'application/json'\n\t\t\t # delivery_mode = 2, # make message persistent\n\t\t\t )#)\n\t\t\tprint \" [x] Sent %r\" % (message,)\n\t\texcept pika.exceptions.ChannelClosed, e:\n\t\t\tprint \"ChannelClosed\"\n\t\t#print \"publish\"", "def publish(self, queue, message):\n # 1. Setup the channel to use to publish message\n channel_handler = ChannelHandler(self._connection)\n\n # 2. Open the channel before using it\n channel_handler.open_channel()\n\n # 3. Send the message via the channel\n channel_handler.send_message(self._exchange_name, queue, message)\n\n # 4. Close the channel after publishing the message\n channel_handler.close_channel()\n LOGGER.info('Bellow message `%s` is published in `%s`', message, queue)", "def publish(self, topic, payload):\n self.q.put((topic, payload))", "def test_queue_publish(self):\n self.queue_publisher._connect()\n with self.assertLogs(level='INFO') as cm:\n result = self.queue_publisher.publish_message(test_data['valid'])\n self.assertEqual(True, result)\n\n self.assertIn('Published message to queue', cm.output[8])", "def push(message: str, date: datetime.datetime):\n msg_id = str(uuid.uuid4())\n pipeline = connection.pipeline()\n pipeline.set(msg_id, message)\n pipeline.zadd(QUEUE_KEY, {\n msg_id: date.timestamp()\n })\n pipeline.execute()\n logger.info(f'Save a new future email: [message: {message}, date: {date}]')", "def publish_message(self, message) -> None:\r\n\r\n # Create a timestamp of the current time\r\n message_timestamp = int(datetime.now().timestamp())\r\n\r\n # Append a uuid to the message\r\n message_with_uid = message + '.' + str(uuid.uuid4())\r\n\r\n # Add the message to a sorted set\r\n self.__redis.zadd(name='messages', mapping={message_with_uid: message_timestamp})", "def send_message(self, routing_key, message, durable=True, exchange=''):\n sending_channel = self.get_channel()\n sending_channel.queue_declare(queue=routing_key, durable=durable)\n sending_channel.basic_publish(exchange=exchange,\n routing_key=routing_key,\n body=message,\n properties=pika.BasicProperties(\n delivery_mode=2,\n ))", "def publish_message(topic_name, data):\n pubsub_client = pubsub.Client()\n topic = pubsub_client.topic(topic_name)\n\n # Data must be a bytestring\n data = data.encode('utf-8')\n\n message_id = topic.publish(data)\n\n print('Message {} published.'.format(message_id))", "def write_message(self, pool_key, data, delay_seconds=None):\n message = sqs.message.Message(body=dumps(data))\n m = self.queue(pool_key).write(message, delay_seconds=delay_seconds)\n return m", "def publish_message(self, message):\n self.producer.publish(message,\n retry=True,\n retry_policy={\n 'interval_start': 0,\n 'interval_step': 2,\n 'interval_max': 30,\n 'max_retries': 30,\n },\n exchange=self.exchange,\n routing_key=self.rabbitmq_config.routing_key)", "def publish(self, msg, routing_key, err=False, **kwargs):\n\n # Make sure we have a valid connection to RabbitMQ\n if not self.amqp_publish_conn:\n self.publish_connect()\n\n priority = kwargs.get('priority', 0)\n\n # If this is an error message, let's make sure our queue\n # has \"-errors\" affixed to it\n if err:\n routing_key = routing_key + \"-errors\".strip()\n\n # Define the queue so we can ensure it is declared before\n # publishing to it\n queue_arguments = {'x-max-priority': 10}\n queue = Queue(routing_key, self.amqp_exchange, routing_key=routing_key,\n queue_arguments=queue_arguments)\n\n with producers[self.amqp_publish_conn].acquire(block=True) as producer:\n producer.publish(self.stoq.dumps(msg),\n exchange=self.amqp_exchange,\n declare=[self.amqp_exchange, queue],\n routing_key=routing_key,\n priority=priority)", "def publish(self, data):\n self.logger.debug(\"Putting data into the channel.\")\n self.queue.put(data)\n self.logger.debug(f\"Channel {self.name} has {self.qsize()} element(s).\")", "def write(self, message):\n self.setup_queue()\n\n m = RawMessage()\n m.set_body(message)\n self.queue.write(m)", "def push_sqs_message(self, queue, msg=None):\n\t\tif not msg: msg = self.working_file\n\t\tconn = boto.connect_sqs(self.aws_id, self.aws_key)\n\t\tq = conn.create_queue(queue)\n\t\tm = Message()\n\t\tm.set_body(msg)\n\t\tstatus = q.write(m)\n\t\treturn status", "def publish(self, message):\n if(type(message)!=self.type):\n raise ValueError(\"Please ensure that the message passed to this method is of the same type as defined during the exchange declaration\")\n if(type(message)!=str):\n try:\n message = message.SerializeToString().decode()\n except:\n raise ValueError(\"Are you sure that the message is Protocol Buffer message/string?\")\n success = self._channel.basic_publish(exchange=self.topic,routing_key=\"\", body=message)\n if(not success):\n print(\"Cannot deliver message to Exchange\")", "def put(self, msg, hput=False):\r\n if not hput:\r\n return self.redis_client.sadd(self.queue_name, msg)\r\n else:\r\n return self.redis_client.sadd(self.queue_name, msg)", "def pop_computetime_push(queue, inqueue, outqueue):\n\n # Check if the redis queue is empty\n msg = queue.rpop(inqueue)\n if msg is None:\n return msg\n\n # if msg is not empty, Load the message out of the processing queue and add a max processing time key\n msg = json.loads(msg, object_hook=object_hook)\n msg['max_time'] = time.time() + slurm_walltime_to_seconds(msg['walltime'])\n\n # Push the message to the processing queue with the updated max_time\n queue.rpush(outqueue, json.dumps(msg, cls=JsonEncoder))\n\n return msg", "def send(self, alias, message):\n if not alias in self.qs:\n raise Exception(\"undefined queue {}\".format(alias))\n self.qs[alias].put(message)\n log.debug(\"new message on {}\".format(alias))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return key for subscribers list for given queue.
def _ns_subscriptions(self, queue): return self._ns(queue, "consumers")
[ "def queue_keys(self) -> List[str]:\n return [queue.key for queue in self.queues]", "def key_for_name(name):\n return 'hotqueue:%s' % name", "def queue_id(self) -> str:\n return pulumi.get(self, \"queue_id\")", "def get_queue_oid(self, port, queue_num):\n redis_cmd = [\n \"redis-cli\", \"-n\", \"2\", \"HGET\", \"COUNTERS_QUEUE_NAME_MAP\",\n \"{}:{}\".format(port, queue_num)\n ]\n queue_oid = next(iter(self.run_redis_cmd(redis_cmd)), None)\n\n pytest_assert(\n queue_oid is not None,\n \"Queue OID not found for port {}, queue {}\".format(\n port, queue_num\n )\n )\n # save the queue OID, will be used to retrieve ASIC instance for\n # this queue's OID\n self.queue_oid.add(queue_oid)\n return queue_oid", "def keys(self):\n if self._keys is None:\n if CONTROL_QUEUE in self._queues:\n raise QueueError('reserved name in queues')\n self._queues.append(CONTROL_QUEUE)\n self._keys = [self.db.key(x) for x in self._queues]\n return self._keys", "def all_keys(cls, connection: Optional['Redis'] = None, queue: Optional['Queue'] = None) -> List[str]:\n return [as_text(key) for key in worker_registration.get_keys(queue=queue, connection=connection)]", "def __getitem__(self, name):\n return self.Queues[name]", "def task_queue_id(self) -> str:\n return pulumi.get(self, \"task_queue_id\")", "def subscription_key(self):\n return self._subscription_key", "def QueueId(self):\n\t\treturn self._get_attribute('queueId')", "def queue_name(self) -> str:\n return pulumi.get(self, \"queue_name\")", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def names(self):\n return [ q['name'] for q in self.queues ]", "def _get_scanner_queue_name(self):\n raise NotImplementedError", "def key(self):\n return self.redis_worker_namespace_prefix + self.name", "def queue_name(self):\n return self._queue_name", "def _queueKey(cls, data, key=None):\n\n\t\t# Turn the data into a str and md5 it\n\t\tsMD5 = md5(str(data).encode('utf-8')).hexdigest()\n\n\t\t# If a key was received\n\t\tif key:\n\n\t\t\t# Decode it and see if it matches the data\n\t\t\treturn StrHelper.decrypt(cls._queue_key, key) == sMD5\n\n\t\t# Else\n\t\telse:\n\n\t\t\t# Generate and return a key\n\t\t\treturn StrHelper.encrypt(cls._queue_key, sMD5)", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def key_of(self, i):\n if i < 0 or i >= self.max_n:\n raise IllegalArgumentException(\"index is out of range\")\n if not self.contains(i):\n raise IllegalArgumentException(\"index is not on the priority queue\")\n return self.keys[i]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unsubscribe from message queue and destroy it. Do not call if you want persistent queues or if you access one queue from multiple processes.
def unsubscribe(self): # Unsubscribe self.pyrps.redis.srem(self.pyrps._ns_subscriptions(self.queue), self.consumer_id) # Remove message queue self.pyrps.redis.delete(self.pyrps._ns_queue(self.queue, self.consumer_id))
[ "def destroy_queue(self):\n response = self.queue.delete()\n if self._is_error_call(response):\n raise RuntimeError('SQS could not delete queue: %s' % response)\n self.queue, self.queue_name = None, None", "def unsubscribe(self):\n if self.pubsub_thread:\n self.log.info('Unsubscribing from channel %s', self.pubsub_channel_name)\n self.pubsub_thread.stop()\n self.pubsub_thread.join()\n self.pubsub.unsubscribe()\n self.pubsub.close()", "def destroy(self):\n self.console._remove_mailbox(self.cid)", "def delete_queue(self):\n self.global_queue.remove_queue(self)", "def delete(self):\n self._queue.delete_message(self._message)", "def __clear_message_queue(self):\r\n self.__lib.CC_ClearMessageQueue(self.__serno)", "def unsubscribe(self, connection, destination):\n self.log.debug(\"Unsubscribing %s from %s\" % (connection, destination))\n if connection in self._queues[destination]:\n self._queues[destination].remove(connection)\n\n if not self._queues[destination]:\n del self._queues[destination]", "def _delete_queued_message(self, message: BaseMessage) -> None:\n message.kill_message()\n if message in self._message_queue:\n self._message_queue.remove(message)\n self._bot.delete_message(chat_id=self.chat_id, message_id=message.message_id)\n del message", "def clear_message_queue(self):\r\n self.KCube.CC_ClearMessageQueue(self.serial)", "def __del__(self):\n self.unsubscribe()", "def unsubscribe(self,topic):\n self.__mqtt_client__.unsubscribe(topic)", "def delete_sqs_message(self, queue, message):\n\t\tconn = boto.connect_sqs(self.aws_id, self.aws_key)\n\t\tq = conn.create_queue(queue)\n\t\tq.delete_message(message)", "def drop_message(self):\n heapq.heappop(self._message_queue)", "def remove_queue(self, queue):\n with self.mutex:\n self.queues.remove(queue)", "def delete_queue(self):\n pass", "def delete_queue(self):\n self.work_queue_client.delete_queue()", "def unsubscribe(self):\n pass # pragma: no cover", "def unlisten(self, prefix: str) -> None:\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info(\"No longer polling for message type: %s\", prefix)", "async def close_amqp_consumer(self):\n await self.amqp_queue.unbind(AMQP_EXCHANGE, self.amqp_routing_key)\n await self.amqp_queue.delete()\n await self.amqp_connection.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instead of rendering each wall block, we create a single shape which can be drawn in a single call, rather than a call for each wall block
def create_wall_shape(self): self.shape_walls = arcade.ShapeElementList() self.shape_walls.center_x = 0 self.shape_walls.center_y = 0 self.shape_walls.angle = 0 point_list = [] color_list = [] # create the walls into a single shape walls = self.game.walls for wall in walls: points = self.get_entity_dimensions(wall) point_list.append(points[0]) point_list.append(points[1]) point_list.append(points[2]) point_list.append(points[3]) # as we have 4 points for i in range(4): color_list.append(COLOUR_MAP[wall.base_colour]) self.shape_walls.append( arcade.create_rectangles_filled_with_colors(point_list, color_list) )
[ "def __make_walls(self):\n x,y,z = self.params['inner_dimensions']\n thickness = self.params['wall_thickness']\n wall_x_overhang = self.params['x_r_overhang']\n d_tab = self.params['wall_tab_dist']\n tab_width = self.params['wall_tab_width']\n dia = self.params['wall_hole_dia_thru']\n dia_tap = self.params['wall_hole_dia_tap']\n wall_hole_y_offset = self.params['wall_hole_y_offset']\n tab_depth = thickness\n x_w = x + 2*wall_x_overhang\n y_w = z\n z_w = thickness\n y_h = self.params['holder_height']\n\n # Tab and hole (for l-brackets) data for stabilizing the walls\n xz_neg = []\n hole_list = []\n for i in range(0,2*self.params['num_devices'],2):\n # Create tab data for xz- face of walls\n tab_x = wall_x_overhang + (i+1)*d_tab\n tab_data = (tab_x/x_w, tab_width, tab_depth, '+')\n xz_neg.append(tab_data)\n\n # Create hole data for walls\n if i < 2*self.params['num_devices']-2:\n hole_x = (i+1)*d_tab - .5*x + d_tab\n hole_y = -.5*y_w + wall_hole_y_offset\n hole_data = (hole_x,hole_y,dia_tap)\n hole_list.append(hole_data)\n # Special case for a 1-device rack\n if self.params['num_devices'] == 1:\n hole_x = (i+1)*d_tab - .5*x\n hole_y = -.5*y_w + wall_hole_y_offset\n hole_data = (hole_x,hole_y,dia_tap)\n hole_list.append(hole_data)\n\n # End holes of the walls, for stability rods\n hole_x_offset = self.params['stability_rod_x_offset']\n for j in (-1,1):\n hole_x = j*.5*x_w - j*hole_x_offset\n hole_y = .5*y_h\n hole_data = (hole_x,hole_y,dia)\n hole_list.append(hole_data)\n\n holder_hole_offset = self.params['holder_hole_offset']\n\n # End holes for the holders\n for i in (-1,1):\n for j in (-1,1):\n hole_x = i*j*.5*x - j*d_tab\n for dy in holder_hole_offset:\n hole_y = dy\n hole_data = (hole_x,hole_y,dia)\n hole_list.append(hole_data)\n\n # Special cases for a 5-device (or greater) rack, I\n # need to add an extra support hole in the middle\n if self.params['num_devices'] >= 5:\n for dy in holder_hole_offset:\n hole_data = (0,dy,dia)\n hole_list.append(hole_data)\n\n # Pack data into parameters structure\n params = {\n 'size' : (x_w, y_w, z_w),\n 'radius' : self.params['corner_radius'], \n 'xz+' : [],\n 'xz-' : xz_neg,\n 'yz+' : [],\n 'yz-' : [],\n 'hole_list' : hole_list,\n } \n\n plate_maker = Plate_W_Tabs(params)\n self.left_wall = plate_maker.make()\n self.right_wall = plate_maker.make()", "def add_walls(self):\n for x in range(self.width):\n self.add_thing(Wall(), (x, 0))\n self.add_thing(Wall(), (x, self.height - 1))\n\n for y in range(self.height):\n self.add_thing(Wall(), (0, y))\n self.add_thing(Wall(), (self.width - 1, y))", "def _draw_walls(self, draw_grid):\n for yi, y in enumerate(self._grid):\n for xi, x in enumerate(y):\n for i, w in enumerate(x.walls):\n if i == 0 and w:\n draw_grid[yi * 2 + 1][xi * 2] = self._wall_color\n if i == 1 and w:\n draw_grid[yi * 2 + 1][xi * 2 + 2] = self._wall_color\n if i == 2 and w:\n draw_grid[yi * 2][xi * 2 + 1] = self._wall_color\n if i == 3 and w:\n draw_grid[yi * 2 + 2][xi * 2 + 1] = self._wall_color\n return draw_grid", "def add_walls(self):\n for x in range(self.width + 1):\n if not self.some_things_at((x, 0), Wall):\n self.add_thing(Wall(), (x, 0))\n if not self.some_things_at((x, self.height), Wall):\n self.add_thing(Wall(), (x, self.height))\n\n for y in range(self.height + 1):\n if not self.some_things_at((0, y), Wall):\n self.add_thing(Wall(), (0, y))\n if not self.some_things_at((self.width, y), Wall):\n self.add_thing(Wall(), (self.width, y))\n #self.add_thing(Wumpus(),(1,3))\n #self.add_thing(Pit(),(3,3))\n #self.add_thing(Pit(),(3,1))\n #self.add_thing(Gold(),(2,3))\n #self.add_thing(Pit(),(4,4))", "def add_walls(self):\n for x in range(self.size_x):\n pos = Coord(x, 0)\n if self.map.is_building_empty(pos):\n self.map.add_building(pos, Wall(self, pos))\n\n pos = Coord(x, self.size_y-1)\n if self.map.is_building_empty(pos):\n self.map.add_building(pos, Wall(self, pos))\n\n for y in range(self.size_y):\n pos = Coord(0, y)\n if self.map.is_building_empty(pos):\n self.map.add_building(pos, Wall(self, pos))\n\n pos = Coord(self.size_x-1, y)\n if self.map.is_building_empty(pos):\n self.map.add_building(pos, Wall(self, pos))", "def build_blocks():\n block_1 = GRect(375, 80, x=20, y=330)\n block_1.filled = True\n block_1.color = 'firebrick'\n block_1.fill_color = 'firebrick'\n window.add(block_1)\n block_2 = GRect(375, 80, x=405, y=330)\n block_2.filled = True\n block_2.color = 'steelblue'\n block_2.fill_color = 'steelblue'\n window.add(block_2)\n block_3 = GRect(375, 80, x=20, y=420)\n block_3.filled = True\n block_3.color = 'goldenrod'\n block_3.fill_color = 'goldenrod'\n window.add(block_3)\n block_4 = GRect(375, 80, x=405, y=420)\n block_4.filled = True\n block_4.color = 'forestgreen'\n block_4.fill_color = 'forestgreen'\n window.add(block_4)\n block_5 = GRect(60, 40, x=720, y=120)\n block_5.filled = True\n block_5.color = 'dodgerblue'\n block_5.fill_color = 'dodgerblue'\n window.add(block_5)\n circle_1 = GOval(90, 90, x=20, y=170)\n circle_1.filled = True\n circle_1.color = 'blueviolet'\n circle_1.fill_color = 'blueviolet'\n window.add(circle_1)", "def build_wall(self, type, pos1, pos2, thickness=1):\n raise NotImplementedError", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def make_boundary_wall(self, height, width) -> None:\n for x in range(0, width):\n Wall(self, x, 0)\n Wall(self, x, height - 1)\n for y in range(1, height - 1):\n Wall(self, 0, y)\n Wall(self, width - 1, y)", "def corridor(x,z, emap, width=10, length=10, height=10, details=None, walls=\"ns\", name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n solid_objects = []\r\n\r\n if \"n\" in walls:\r\n # TODO: abstract out the mostly-duplicate code in these cases...\r\n nwall = SolidObject(name+str(wallnum),\r\n Size(length, height, 1),\r\n Position(x, emap.calcHeight(x, z) + height / 2, n-0.5), 0)\r\n solid_objects.append(nwall)\r\n nwallmodel = createMyCuboid(nwall.w() * 2, nwall.h() * 2, nwall.d() * 2,\r\n name=name+str(wallnum),\r\n x=nwall.x(),y=nwall.y(),z=nwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(nwallmodel)\r\n else:\r\n nwall.setmodel(nwallmodel, details)\r\n\r\n\r\n wallnum += 1\r\n\r\n if \"s\" in walls:\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, emap.calcHeight(x, z)+height / 2, s+0.5), 0)\r\n solid_objects.append(swall)\r\n swallmodel = createMyCuboid(swall.w()*2, swall.h()*2, swall.d()*2,\r\n name=name+str(wallnum),\r\n x=swall.x(), y=swall.y(), z=swall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0,cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(swallmodel)\r\n else:\r\n swall.setmodel(swallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"e\" in walls:\r\n ewall = SolidObject(name+str(wallnum), Size(1, height, width), Position(e-0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(ewall)\r\n ewallmodel = createMyCuboid(ewall.w()*2, ewall.h()*2, ewall.d()*2,\r\n name=name+str(wallnum),\r\n x=ewall.x(), y=ewall.y(), z=ewall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ewallmodel)\r\n else:\r\n ewall.setmodel(ewallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"w\" in walls:\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w+0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(wwall)\r\n wwallmodel = createMyCuboid(wwall.w()*2, wwall.h()*2, wwall.d()*2,\r\n name=name+str(wallnum),\r\n x=wwall.x(), y=wwall.y(), z=wwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(wwallmodel)\r\n else:\r\n wwall.setmodel(wwallmodel, details)\r\n wallnum += 1\r\n\r\n if \"o\" not in walls:\r\n ceiling = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, emap.calcHeight(x, z)+height+0.5, z), 0)\r\n solid_objects.append(ceiling)\r\n ceilingmodel = createMyCuboid(ceiling.w()*2, ceiling.h()*2, ceiling.d()*2,\r\n name=name+str(wallnum),\r\n x=ceiling.x(), y=ceiling.y(), z=ceiling.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ceilingmodel)\r\n else:\r\n ceiling.setmodel(ceilingmodel, details)\r\n\r\n wallnum += 1\r\n\r\n return solid_objects", "def draw_long_shape():\n turtle.fillcolor('blue')\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.back(150)", "def make_rect(self, x, y, w, h, reds=[]):\n for i in range(w):\n for j in range(h):\n px = x+i\n py = y+j\n if i == 0 or i == w-1:\n if [px, py] in reds:\n cube_id = self.p.loadURDF(\"cube_red.urdf\", basePosition=[px, py, 0.5])\n else:\n cube_id = self.p.loadURDF(\"cube_black.urdf\", basePosition=[px, py, 0.5])\n elif j == 0 or j == h-1:\n if [px, py] in reds:\n cube_id = self.p.loadURDF(\"cube_red.urdf\", basePosition=[px, py, 0.5])\n else:\n cube_id = self.p.loadURDF(\"cube_black.urdf\", basePosition=[px, py, 0.5])\n self.wall_block_ids.append(cube_id)", "def __init__(self, mapfile, xpos, zpos, emap, width=10.0, depth=10.0, height=10.0, name=\"building\", draw_details=None, yoff=0.0, scheme=None):\r\n self.xpos = xpos\r\n self.zpos = zpos\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.name = name\r\n self.ceilingthickness = 1.0\r\n self.walls = []\r\n\r\n if scheme == None:\r\n self.scheme = Building.baseScheme\r\n else:\r\n self.scheme = scheme\r\n\r\n # We don't have to be rigorous here, this should only be a draw_details or an iterable of draw_details.\r\n if hasattr(draw_details, \"__getitem__\") or hasattr(draw_details, \"__iter__\"):\r\n assert (len(draw_details) == self.scheme[\"#models\"])\r\n self.details = draw_details\r\n else:\r\n self.details = [draw_details for x in range(self.scheme[\"#models\"])]\r\n # having a method like this allows draw details to be set later\r\n\r\n self.yoff = yoff\r\n\r\n self.model = [MergeShape(name=name+\".\"+str(x)) for x in range(self.scheme[\"#models\"])]\r\n\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n print(\"Loading building map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n ix,iy = im.size\r\n\r\n print(\"image size\", ix, \",\", iy)\r\n\r\n startx = xpos - ix / 2 * width\r\n starty = zpos - ix / 2 * depth\r\n\r\n yoff += emap.calcHeight(-xpos,-zpos)\r\n\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n pixels = im.load()\r\n\r\n for y in range(1,iy-1):\r\n print(\".\", end='')\r\n for x in range(1,ix-1):\r\n colour = pixels[x,y]\r\n\r\n if x == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y], \"edge\"), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y]), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if x == ix-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y], \"edge\"), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y]), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1], \"edge\"), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1]), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == iy-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x, y+1], \"edge\"), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y+1]), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self._executeScheme(x, y, startx, starty, (colour, None), wallfunc=None, ceilingedgefunc=None, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self.set_draw_details(self.details) # after models created otherwise\r\n # details lost by merging\r", "def create_outer_walls(space,width,height):\n static_lines = [pymunk.Segment(space.static_body, (0.0, 0.0), (width, 0.0), 0.0),\n pymunk.Segment(space.static_body, (width, 0.0), (width, height), 0.0),\n pymunk.Segment(space.static_body, (width, height), (0.0, height), 0.0),\n pymunk.Segment(space.static_body, (0.0, 600.0), (0.0, 0.0), 0.0)]\n for line in static_lines:\n line.friction = 0.5\n line.elasticity = 0.9\n\n return static_lines", "def draw_shapes_bb(self):\n for s in self.space.shapes:\n self.draw_bb(s.bb, RED, 1)", "def build_wall(self): #py:UR.build_wall\n RUR._UR.build_wall_(self.body)", "def build_walls(self):\n for room in self.rooms:\n # Builds top and bottom walls\n for x in range(room.x1 - 1, room.x2 + 2):\n # Top wall\n if self.tiles[room.y1 - 1][x].is_blocked:\n self.tiles[room.y1 - 1][x] = cells.Wall()\n # Bottom wall\n if self.tiles[room.y2 + 1][x].is_blocked:\n self.tiles[room.y2 + 1][x] = cells.Wall()\n # Builds left and right walls\n for y in range(room.y1 - 1, room.y2 + 2):\n # Left wall\n if self.tiles[y][room.x1 - 1].is_blocked:\n self.tiles[y][room.x1 - 1] = cells.Wall()\n # Right wall\n if self.tiles[y][room.x2 + 1].is_blocked:\n self.tiles[y][room.x2 + 1] = cells.Wall()", "def create_border_wall(self):\n for north_wall in range(0, const.SCREEN_W, const.TILE_SIZE):\n x = north_wall\n y = 0\n self.walls.append(Wall(x, y, const.WALL_IMAGE))\n for east_wall in range(0, const.SCREEN_H-const.TILE_SIZE, const.TILE_SIZE):\n x = const.SCREEN_W - const.TILE_SIZE\n y = east_wall\n self.walls.append(Wall(x, y, const.WALL_IMAGE))\n for south_wall in range(const.SCREEN_W-const.TILE_SIZE, 0, -const.TILE_SIZE):\n x = south_wall\n y = const.SCREEN_H - const.TILE_SIZE\n self.walls.append(Wall(x, y, const.WALL_IMAGE))\n for west_wall in range(const.SCREEN_H-const.TILE_SIZE, 0, -const.TILE_SIZE):\n x = 0\n y = west_wall\n self.walls.append(Wall(x, y, const.WALL_IMAGE))", "def empty_diff_walls():\n\t# 4 side walls are absorptive\n\troom_materials = [pra.Material(energy_absorption=0.1, scattering=None)] * 4\n\t# floor and ceiling are reflective\n\troom_materials.extend([pra.Material(energy_absorption=0.98, scattering=None)] * 2)\n\t\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_materials))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)\n\n\troom.add_source([-5, 2, 2.])\n\troom.add_microphone([1, 0, 2.])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.compute_rir()\n\n\treturn room" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create/Update the sprite shape for an entity and add/update the entry for it in `self.entities_shapelist`
def update_shape_sprite(self, entity: Entity): shape_sprite: ShapeSprite = entity.shape_sprite if entity.id not in self.entities_shapelist: entity_shapelist = arcade.ShapeElementList() # we need to convert from general colours to arcade specific colours entity_shapelist.append(arcade.create_rectangles_filled_with_colors( shape_sprite.point_list, [COLOUR_MAP[x] for x in shape_sprite.color_list]) ) else: entity_shapelist = self.entities_shapelist[entity.id] entity_shapelist.center_x = shape_sprite.position_x entity_shapelist.center_y = SCREEN_HEIGHT - shape_sprite.position_y entity_shapelist.draw() self.entities_shapelist[entity.id] = entity_shapelist
[ "def add_shape(self, x, y, shape):\n # Awkward as the shape cell coordinates have absolute coordinates\n norm_shape = shape.normalise()\n adj_x, adj_y = norm_shape.center()\n for cell in norm_shape:\n xpos = x + cell.x - adj_x\n ypos = y + cell.y - adj_y\n self.put(Cell(self, xpos, ypos, cell.c))", "def _place_entity(self, entity, x, y, z):\n self._entities[y][x][z] = entity", "def create_sprite(self, pos):\n group = pyglet.sprite.SpriteGroup(\n self.TEXTURE, gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA\n )\n texcoords = []\n for i in xrange(self.length + 1):\n texcoords.extend([\n self.TEXTURE.tex_coords[0], i,\n self.TEXTURE.tex_coords[3], i,\n ])\n count = 2 * (self.length + 1)\n verts = [0, 0] * count # set vertices later from body\n self.vlist = batch.add(\n count, gl.GL_TRIANGLE_STRIP, group,\n ('v2f', verts),\n ('t2f', texcoords)\n )", "def add_entity(self, entity):\n\n if self._dEntities.get(entity.get_type(), None) == None:\n #If there wasn't already a dictionary, then we'll make one\n self._dEntities[entity.get_type()] = {}\n\n #This will overwrite or create a new entity of the given name.\n self._dEntities[entity.get_type()][entity.get_name()] = entity\n\n #THis filters out the entities with -1 priorities to being added\n # To the list of drawable Entities.\n if (entity.get_draw_priority() != -1):\n self._pqDrawableEntities.add_entity(entity)", "def add(self, entity):\n\t\tif entity.name == None:\n\t\t\tentity.name = \"Untitled Thing\"\n\t\tt12.spam(\"Adding \" + entity.name)\n\t\tself.all.append(entity)\n\t\tif entity.attributes.count(\"background\") >= 1:\n\t\t\tself.background.append(entity)\n\t\tif entity.attributes.count(\"touch_player\") >= 1:\n\t\t\tself.touch_player.append(entity)\n\t\tif entity.attributes.count(\"touch_enemies\") >= 1:\n\t\t\tself.touch_enemies.append(entity)\n\t\tif entity.attributes.count(\"activators\") >= 1:\n\t\t\tself.activators.append(entity)\n\t\tif entity.attributes.count(\"geometry\") >= 1:\n\t\t\tself.geometry.append(entity)\n\t\tif entity.attributes.count(\"actor\") >= 1 or entity.attributes.count(\"actors\") >= 1:\n\t\t\tself.actors.append(entity)\n\t\tif entity.attributes.count(\"touch_geom\") >= 1:\n\t\t\tself.touch_geom.append(entity)\n\n\t\tif entity.attributes.count(\"art_back\") >= 1:\n\t\t\tself.art_back.append(entity)\n\t\telif entity.attributes.count(\"art_front\") >= 1:\n\t\t\tself.art_front.append(entity)\n\t\telif entity.attributes.count(\"art_over\") >= 1:\n\t\t\tself.art_over.append(entity)\n\t\telse:\n\t\t\tself.art_mid.append(entity)", "def add(self, entity):\n coord = chunkCoordAt(entity.body.pos, self.scale)\n self.getAt(coord).add(entity)\n self._coords[entity] = coord", "def shapes(self, shape_list):\n for item in shape_list:\n item.store()\n shape_list_uuids = [item.uuid for item in shape_list]\n self.set_attribute('shapes', shape_list_uuids)", "def _update_tile_shape(self) -> None:\n # This might be overly dynamic, but for now if we see there's a new\n # tile shape we nuke our texture atlas and start over with the new\n # tile shape.\n #\n # We added this because the QtTestImage GUI sets the tile shape\n # after the layer is created. But the ability might come in handy\n # and it was not hard to implement.\n tile_shape = self.layer.tile_shape\n if self.node.tile_shape != tile_shape:\n self.node.set_tile_shape(tile_shape)", "def _update_entity(self, entity, x, y, z):\n entity.update(x, y, z, self.origin_x, self.origin_y, self.tile_width,\n self.tile_height, self.batch)", "def add_shape_to_board(self, player):\n\n blocks = player.get_shape_blocks()\n if blocks is None:\n return\n\n for block in blocks:\n # check board is empty for all blocks\n block_x = player.x + block.x\n block_y = player.y + block.y\n block.x = block_x\n block.y = block_y\n self.cells[block_x][block_y] = block", "def add_entity(self, ent):\n self.tiles[ent.position[x]][ent.position[y]].add_entity(ent)", "def add_shape(self, shape: Shape):\r\n self.__shape_list.append(shape)", "def __updateShape(self, ):\n points = [QtCore.QPointF(p[0], p[1]) for p in self._envelope]\n self._shape = QtGui.QPainterPath()\n self._shape.addPolygon(QtGui.QPolygonF(points))", "def update_shapes(self, event=None):", "def setShape(self, shape):\n\n\t\tarrangement = Shape.coordsTable[shape]\n\n\t\tfor i in range(4):\n\t\t\tfor j in range(2):\n\t\t\t\tself.coords[i][j] = arrangement[i][j]\n\n\t\tself.pieceShape = shape", "def add_shape(self, shape):\r\n self._shapes[shape.id] = shape\r\n cp.cpSpaceAddShape(self._space, shape._shape)", "def __init__(self, entities):\n self._shape_to_ent = dict()\n self._ent_to_shapes = dict()\n for entity in entities:\n shapes = entity.shapes\n self._ent_to_shapes[entity] = shapes\n for shape in shapes:\n assert shape not in self._shape_to_ent, \\\n f\"shape {shape} appears in {entity} and \" \\\n f\"{self._shape_to_ent[shape]}\"\n self._shape_to_ent[shape] = entity", "def __change_shape(self):\r\n self.shape = self.next_shape", "def InsertShape(self, object):\n self.children.insert(0, object)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the pixel positions for positioning a menu in the center of the screen
def get_menu_coords(self, menu): menu_center_x = (self.width // 2) menu_center_y = (self.height // 2) # get a mapping of the menu co-ordinates for relative positioning of things inside the menu menu_cords = ( (menu_center_x - (menu.width // 2), menu_center_y + (menu.height // 2)), (menu_center_x + (menu.width // 2), menu_center_y + (menu.height // 2)), (menu_center_x - (menu.width // 2), menu_center_y - (menu.height // 2)), (menu_center_x + (menu.width // 2), menu_center_y - (menu.height // 2)), ) return menu_center_x, menu_center_y, menu_cords
[ "def _menuCoords(self, Lx, Ly):\r\n\r\n if Lx < 0:\r\n xCoord = self.width + Lx + (self.pWidth - self.width) / 2\r\n else:\r\n xCoord = Lx + (self.pWidth - self.width) / 2\r\n\r\n if Ly < 0:\r\n yCoord = self.height + Ly + (self.pHeight - self.height) / 2\r\n else:\r\n yCoord = Ly + (self.pHeight - self.height) / 2\r\n\r\n return [xCoord, yCoord]", "def getCurrentMenuPixels(self):\n screen = copy.copy(self.allMenuPixels[self.currentScroll * 8: self.currentScroll * 8 + 8 * 8])\n return screen", "def getPos(self):\r\n return (self.rect.centery,self.rect.centerx)", "def center_pos(self) -> tuple:\n x, y = self.offset\n return x + self.control_box_width + (len(self.boards) * self.board_width) // 2, y + self.board_height // 2", "def get_main_position(self):", "def GetPrimaryDisplayOrigin(self):\n # NOTE: don't default to 0,0 otherwise on osx the frame will be\n # stuck behind the menubar.\n for idx in range(wx.Display.GetCount()):\n disp = wx.Display(idx)\n if disp.IsPrimary():\n drect = disp.GetClientArea()\n return drect.GetPosition() + (5, 5)\n else:\n return (5, 5)", "def getAllMenuPixels(self):\n screen_all = []\n screen_row = copy.copy(Menu.normal_screen_row) # 8 x 1 pixels\n for i in range(len(Menu.MENUITEMS)):\n screen_all.extend(copy.copy(screen_row)) # 1st row\n screen_all.extend(copy.copy(screen_row)) # 2nd row\n itemPixels = self.menuItems[i].getMenuItemPixels()\n # print(self.menuItems[i].getName(), itemPixels)\n for n in range(6):\n screen_all[i * 8 * 3 + 2 + n] = itemPixels[n]\n screen_all[i * 8 * 3 + 8 + 2 + n] = itemPixels[6 + n]\n if i != len(Menu.MENUITEMS) - 1:\n screen_all.extend(copy.copy(screen_row)) # 3rd row for margin \n return screen_all", "def getCharPos(self):\n # return pyautogui.locateCenterOnScreen('pics/CharMinimapIcon.png')", "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def GetCenter(self):\n ...", "def calculate_screen_position(self):\r\n\r\n character_select_start_y = 604\r\n character_select_end_y = 646\r\n\r\n if self.slotNumber <= 6:\r\n start_y = 585 # 595\r\n end_y = 627 # 637\r\n x_hero_number = self.slotNumber\r\n else:\r\n start_y = 300 # 290\r\n end_y = 342 # 332\r\n x_hero_number = self.slotNumber - 6\r\n\r\n start_x = 249 + (x_hero_number * 192)\r\n end_x = 326 + (x_hero_number * 192)\r\n\r\n self.screenPositionCharacterSelect = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": character_select_start_y,\r\n \"end_y\": character_select_end_y\r\n }\r\n self.screenPositionTab = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": start_y,\r\n \"end_y\": end_y\r\n }", "def _board_center(self, game):\n row = game.height // 2\n col = game.width // 2\n return (row, col)", "def center(self):\r\n self.centerx = self.screen_rect.centerx \r\n self.centery = self.screen_rect.centery", "def toolbarRectInWindowCoords(self):\r\n\r\n (pw, ph) = self.parent.size\r\n pw = float(pw)\r\n ph = float(ph)\r\n x, y = self.toolbarSizeForScreenWidth(pw)\r\n tw = x * 180. / 182.\r\n th = y * 20. / 22.\r\n\r\n tx = (pw - tw) / 2\r\n ty = ph - th * 22. / 20.\r\n\r\n return tx, ty, tw, th", "def midtop(self):\n return (self.centerx, self.top)", "def get_center(self):\r\n return (self.x + self.rect.width / 2, self.y - self.rect.height / 2)", "def abs_screen_center(self):\n return Vec2d(self._abs_screen_center)", "def sprite_position(self):\n return [self.pos_x * TILE_SIZE, self.pos_y * TILE_SIZE]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes logits based on features from the model
def logits_on_features(self, h, batch): batch = batch.to(h.device) # Extract features with the model features = h.view(batch.size, -1) # Log loss logits = self.head(features) return logits
[ "def logit_fn(data, ve_noise_scale):\n data = preprocess(data)\n logits = classifier.apply({'params': classifier_params}, data, ve_noise_scale, train=False, mutable=False)\n return logits", "def get_logits(image):\n x = image\n for filters in (32, 64):\n x = tf.layers.conv2d(x, filters, 3)\n x = tf.nn.relu(x)\n x = tf.layers.max_pooling2d(x, 3, 2)\n x = tf.reduce_mean(x, axis=(1, 2))\n logits = tf.layers.dense(x, 10)\n return logits", "def compute_logits(self):\n # [num train labels, num classes] where each row is a one-hot-encoded label.\n one_hot_train_labels = tf.one_hot(self.data.train_labels, self.way)\n\n # Undocumented in the paper, but *very important*: *only* the support set\n # embeddings is L2-normalized, which means that the distance is not exactly\n # a cosine distance. For comparison we also allow for the actual cosine\n # distance to be computed, which is controlled with the\n # `exact_cosine_distance` instance attribute.\n train_embeddings = tf.nn.l2_normalize(\n self.train_embeddings, 1, epsilon=1e-3)\n test_embeddings = self.test_embeddings\n if self.exact_cosine_distance:\n test_embeddings = tf.nn.l2_normalize(test_embeddings, 1, epsilon=1e-3)\n # [num_test_images, num_train_images]\n similarities = tf.matmul(\n test_embeddings, train_embeddings, transpose_b=True)\n attention = tf.nn.softmax(similarities)\n\n # [num_test_images, way]\n probs = tf.matmul(attention, one_hot_train_labels)\n self.test_logits = tf.log(probs)\n return self.test_logits", "def output(x, hps):\n x = tf.reduce_mean(x, axis=1, keepdims=True) #avg pooling features for classifier\n logits = conv1d(x, 'classifier', hps.n_y, hps)[:, 0, :] #squeeze spatial dimension\n return logits", "def _logit_transform(x):\n return util.logit(MNIST.alpha + (1 - 2*MNIST.alpha) * x)", "def compute_edge_logits(self):", "def dnn_logit_fn(features, mode):\n with tf.variable_scope(\n 'input_from_feature_columns',\n values=tuple(six.itervalues(features)),\n partitioner=input_layer_partitioner):\n net = tf.feature_column.input_layer(\n features=features, feature_columns=feature_columns)\n for layer_id, num_hidden_units in enumerate(hidden_units):\n with tf.variable_scope(\n 'hiddenlayer_%d' % layer_id, values=(net,)) as hidden_layer_scope:\n net = tf.layers.dense(\n net,\n units=num_hidden_units,\n activation=activation_fn,\n kernel_initializer=tf.glorot_uniform_initializer(),\n name=hidden_layer_scope)\n if dropout is not None and mode == 'train':\n net = tf.layers.dropout(net, rate=dropout, training=True)\n # _add_hidden_layer_summary(net, hidden_layer_scope.name)\n\n with tf.variable_scope('logits', values=(net,)) as logits_scope:\n logits = tf.layers.dense(\n net,\n units=units,\n activation=None,\n kernel_initializer=tf.glorot_uniform_initializer(),\n name=logits_scope)\n # _add_hidden_layer_summary(logits, logits_scope.name)\n\n return logits", "def train_model_log(features, target):\n lr = LinearRegression()\n lr.fit(features, target)\n y_pred = np.exp(lr.predict(features))\n r2 = lr.score(features, target)\n rsme = mean_squared_error(target, y_pred)\n print('R Squared:' + str(r2), 'RSME:' + str((rsme**.5)))\n return lr", "def logits(self):\n return np.array([m['actor'] for m in self.model_outs], dtype=np.float32)", "def infer_ensemble_logits(features, model, checkpoints, session, num_steps,\n data):\n _, inferred = model.multi_gpu([features], 1)\n logits = []\n saver = tf.train.Saver()\n for checkpoint in checkpoints:\n saver.restore(session, checkpoint)\n for i in range(num_steps):\n logits.append(\n session.run(\n inferred[0].logits,\n feed_dict={\n features['recons_label']: data[i]['recons_label'],\n features['labels']: data[i]['labels'],\n features['images']: data[i]['images'],\n features['recons_image']: data[i]['recons_image']\n }))\n return logits", "def infer_ensemble_logits(features, model, checkpoints, session, num_steps,\r\n data):\r\n _, inferred = model.multi_gpu([features], 1)\r\n logits = []\r\n saver = tf.train.Saver()\r\n for checkpoint in checkpoints:\r\n print('*********checkpoint***************', checkpoint)\r\n saver.restore(session, checkpoint)\r\n for i in range(num_steps):\r\n logits.append(\r\n session.run(\r\n inferred[0].logits,\r\n feed_dict={\r\n features['recons_labels']: data[i]['recons_labels'],\r\n features['labels']: data[i]['labels'],\r\n features['text']: data[i]['text'],\r\n # features['recons_image']: data[i]['recons_image']\r\n }))\r\n return logits", "def train_logreg(X, y):\n logging.info(\"Start training logistic regression model...\")\n logreg = linear_model.LogisticRegression(\n solver='sag',\n verbose=0,\n max_iter=200,\n n_jobs=-1\n )\n logreg.fit(X, y)\n logging.info(\"Model trained.\")\n return logreg", "def _setup_logits(self, model_fn, hyper):\n with tf.variable_scope(\"model\"):\n data = model_fn(\n self.X_batch,\n self.X_batch_len,\n max_reach=self.max_reach,\n block_size=self.block_size_x_tensor,\n out_classes=9,\n batch_size=self.batch_size_var,\n dtype=self.dtype,\n **hyper\n )\n\n if data['logits'].get_shape()[2] != 9:\n raise ValueError(\n \"Loggits must be tensor with dim 2 = 9\\n%s\" %\n str(data['logits'].get_shape())\n )\n with tf.control_dependencies([\n tf.cond(\n self.training_mode,\n lambda: tf.assert_equal(tf.shape(data['logits']), [self.block_size_x_tensor // self.shrink_factor, self.batch_size_var, 9]),\n lambda: tf.no_op()\n )\n ]):\n data['logits'] = tf.identity(data['logits'])\n return data", "def logreg(X_train, y_train):\n try:\n # Si la matriz es singular va a dar error\n log = sm.Logit(y_train, X_train)\n logreg_model = log.fit()\n except Exception as e:\n # Intentamos con la matriz hessiana\n print(e)\n log = sm.Logit(y_train, X_train)\n logreg_model = log.fit(method='bfgs')\n\n return logreg_model", "def train(self,X,y,reg):\n \n m,dim = X.shape\n theta_opt = np.zeros((dim,len(self.labels)))\n\n ###########################################################################\n # Compute the K logistic regression classifiers #\n # TODO: 7-9 lines of code expected #\n ###########################################################################\n k=len(self.labels);\n ytemp=[0 for i in range(0,len(y))];\n theta_temp = np.zeros((len(self.labels),dim))\n for i in range(0,k):\n for j in range(0, len(y)):\n if y[j] == i:\n ytemp[j] = 1\n else:\n ytemp[j] = 0\n\n clf=linear_model.LogisticRegression(C=1e5,solver='lbfgs',fit_intercept=False)\n print \"here\"\n print X.shape\n \n clf.fit(X, ytemp)\n print \"fit\"\n print \"haha\",clf.coef_.T\n theta_temp[i]=clf.coef_\n print \"yes\",theta_temp[i]\n theta_opt= theta_temp.T\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n self.theta = theta_opt", "def logistic(weights, data, targets, hyperparameters):\n\n # Compute the probabilities\n y = logistic_predict(weights, data)\n\n\n #evaluate the predictions\n f, frac_correct = evaluate(targets, y)\n\n #computing the gradient\n df = np.zeros_like(weights)\n \n #error\n error = y - targets \n df[:-1] = np.dot(data.T,error)\n df[-1] = np.sum(error)\n\n\n\n return f, df, y", "def _project_to_logits(self, inputs):\n return tf.layers.dense(\n inputs,\n 2**self.quantization_bits + 1, # + 1 for stopping token\n use_bias=True,\n kernel_initializer=tf.zeros_initializer(),\n name='project_to_logits')", "def log(inputs):\n return torch.log(inputs)", "def call(self,\n points: tf.Tensor,\n training: Optional[bool] = None) -> tf.Tensor: # pylint: disable=arguments-differ\n features = self.encoder(points, training) # (B,1024)\n logits = self.classifier(features, training) # (B,num_classes)\n return logits" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the NLL loss given features h and targets y This assumes that the features have already be computed with the model
def nll_on_features(self, h, batch, reduction="mean"): batch = batch.to(h.device) y = batch.outputs # Extract features with the model features = h.view(batch.size, -1) # Log loss logits = self.head(features) log_probs = F.log_softmax(logits, dim=-1) nll_loss = F.nll_loss(log_probs, y, reduction=reduction) return nll_loss
[ "def lossFun(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n # forward pass(손실값 계산)\n for t in range(len(inputs)):\n xs[t] = np.zeros((vocab_size, 1)) # 1-of-k(one-hot) 형태로 변환. 모든 값이 0인 array 준비\n xs[t][inputs[t]] = 1 # 해당하는 글자에만 값을 1로 설정 - [0, ..., 0, 1, 0, ..., 0]\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t - 1]) + bh) # hidden state 업데이트\n ys[t] = np.dot(Why, hs[t]) + by # 다음 글자가 어떤 글자가 나올지에 가능성을 표시한 array(정규화되지 않음)\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # softmax로 각 글자의 등장 가능성을 확률로 표시\n loss += -np.log(ps[t][targets[t], 0]) # cross-entropy를 이용하여 정답과 비교하여 손실값 판정\n # backward pass(그래디언트 계산)\n # 변수 초기화\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(range(len(inputs))): # forward pass의 과정을 반대로 진행(t=24부터 시작)\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # y의 그래디언트 계산, softmax 함수의 그래디언트 계산\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # loss에서 사용된 h와 h를 업데이트한 계산의 그래디언트 값을 더함\n dhraw = (1 - hs[t] * hs[t]) * dh # tanh 역전파\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t - 1].T)\n dhnext = np.dot(Whh.T, dhraw)\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # 그래디언트 발산 방지\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs) - 1]", "def getLoss(inputs, targets, hprev):\n xs, hs, fs, ps = {}, {}, {}, {} # all vaues,key: time t, value: vectors of time t\n hprev = np.zeros((hidden_size,1)) # previous hidden layer output\n hs[-1] = np.copy(hprev) # last hidhen layer output\n loss = 0\n\n # 1. forword propagation\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((input_size, 1))\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(Whh, hs[t-1]) + np.dot(Wxh, xs[t]) + bh)\n fs[t] = np.dot(Whf, hs[t]) + bf\n ps[t] = np.exp(fs[t]) / np.sum(np.exp(fs[t]))\n loss += -np.log(ps[t][targets[t], 0])\n\n # 2. backward propagation\n dWxh, dWhh, dWhf = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Whf)\n dbh, dbf = np.zeros_like(bh), np.zeros_like(bf)\n dhnext = np.zeros_like(hs[0])\n\n for t in reversed(xrange(len(inputs))):\n df = np.copy(ps[t])\n df[targets[t]] -= 1\n dWhf += np.dot(df, hs[t].T)\n dbf += df\n dh = np.dot(Whf.T, df) + dhnext # + dhnext, means back-prop twice at one time\n dhraw = (1 - hs[t] * hs[t]) * dh\n dbh += dhraw\n dhnext = np.dot(Whh.T, dhraw)\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n\n # 3. cut gradients to avoid exploding\n for dparam in [dWxh, dWhh, dWhf, dbh, dbf]:\n np.clip(dparam, -5,5, out=dparam)\n\n return loss, dWxh, dWhh, dWhf, dbh, dbf, hs[len(inputs)-1]", "def lossFun(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n # forward pass\n for t in range(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(range(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def lossFun(self, inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((self.vocab_size,1)) # encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(self.Wxh, xs[t]) + np.dot(self.Whh, hs[t-1]) + self.bh) # hidden state\n ys[t] = np.dot(self.Why, hs[t]) + self.by # unnormalized log probabilities for next chars\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(self.Wxh), np.zeros_like(self.Whh), np.zeros_like(self.Why)\n dbh, dby = np.zeros_like(self.bh), np.zeros_like(self.by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(self.Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(self.Whh.T, dhraw)\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def lossFun(inputs, targets):\n hs, ys, ps, ts = {}, {}, {}, {}\n loss = 0\n # forward pass\n for t in xrange(len(inputs)):\n ts[t] = np.zeros((output_size,1)) # encode in 1-of-k representation\n ts[t][int(targets[t])] = 1\n hs[t] = np.tanh(np.dot(Wxh, inputs[t]) + bh) # hidden state\n ys[t] = np.dot(Why,hs[t]) + by # unnormalized log probabilities\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities\n loss += -np.log(np.dot(ts[t].T,ps[t])) # softmax (cross-entropy loss)\n # backward pass: compute gradients going backwards\n dWxh, dWhy = np.zeros_like(Wxh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n dy = np.copy(ps[t])\n dy[int(targets[t])] -= 1 # backprop into y\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, inputs[t].T)\n for dparam in [dWxh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip gradients\n return loss, dWxh, dWhy, dbh, dby", "def unlabeled_loss(self, x):\n\tqy_l = dgm.forwardPassCat(x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x')\n\tx_r = tf.tile(x, [self.n_y,1])\n\ty_u = tf.reshape(tf.tile(tf.eye(self.n_y), [1, tf.shape(self.x_u)[0]]), [-1, self.n_y])\n\tn_u = tf.shape(x)[0] \n\tlb_u = tf.transpose(tf.reshape(self.labeled_loss(x_r, y_u), [self.n_y, n_u]))\n\tlb_u = tf.reduce_sum(qy_l * lb_u, axis=-1)\n\tqy_entropy = -tf.reduce_sum(qy_l * tf.log(qy_l + 1e-10), axis=-1)\n\treturn lb_u + qy_entropy", "def loss(self, data, labels):\n data = self.transform_data(data)\n \n y_n = LogisticRegression.sigmoid(np.dot(data, self.w))\n t_n = labels\n \n return ((-t_n * np.log(y_n) - (1 - t_n) * np.log(1 - y_n)).sum() + self.l / 2 * np.dot(self.w.T, self.w)) / len(data)", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def deep_feature_loss(self, y0, y1):\n assert (self.sess is not None) and (not self.sess._closed)\n if not self.vars_loaded:\n print((\"WARNING: `deep_feature_loss` called before loading vars\"))\n feed_dict={self.tensor_wave0: y0, self.tensor_wave1: y1}\n return self.sess.run(self.loss_deep_features, feed_dict=feed_dict)", "def loss_fn(outputs, labels, wts):\n\n # reshape labels to give a flat vector of length batch_size*seq_len\n loss_noreduce = nn.BCEWithLogitsLoss(reduce=False)\n loss = torch.mean(loss_noreduce(outputs, labels)*wts)\n\t\n # compute cross entropy loss for all tokens\n return loss", "def lossFun(phrase, target, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n rs, zs, cs = {}, {}, {}\n rbars, zbars, cbars = {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n # xs represents entire phrase/sentence\n for t in range(len(phrase)):\n xs[t] = np.zeros((vector_len,1)) # encode in 1-of-k representation\n #Copying entire vector for each word\n\n for j in range(32):\n xs[t][j] = phrase[t][j]\n\n #GRU Implementation\n rbars[t] = np.dot(Wr, xs[t]) + np.dot(Ur, hs[t-1]) + br\n rs[t] = sigmoid(rbars[t])\n\n # The z gate, which interpolates between candidate and h[t-1] to compute h[t]\n zbars[t] = np.dot(Wz, xs[t]) + np.dot(Uz, hs[t-1]) + bz\n zs[t] = sigmoid(zbars[t])\n\n # The candidate, which is computed and used as described above.\n cbars[t] = np.dot(Wc, xs[t]) + np.dot(Uc, np.multiply(rs[t] , hs[t-1])) + bc\n cs[t] = np.tanh(cbars[t])\n\n ones = np.ones_like(zs[t])\n hs[t] = np.multiply(cs[t],zs[t]) + np.multiply(hs[t-1],ones - zs[t])\n\n #Many 2 one\n last = len(phrase) - 1 # Getting only last hidden state\n ys = np.dot(Why, hs[last]) + by # unnormalized log probabilities for next chars\n #Using softmax\n ps = np.exp(ys) / np.sum(np.exp(ys)) # probabilities for next chars\n\n # compute the vectorized cross-entropy loss\n one = np.ones_like(ps)\n a = np.multiply(target.T , np.log(ps))\n b = np.multiply(one - target.T, np.log(one-ps))\n loss -= (a + b)\n\n # backward pass: compute gradients going backwards\n dWc = np.zeros_like(Wc)\n dWr = np.zeros_like(Wr)\n dWz = np.zeros_like(Wz)\n dUc = np.zeros_like(Uc)\n dUr = np.zeros_like(Ur)\n dUz = np.zeros_like(Uz)\n dWhy = np.zeros_like(Why)\n\n # allocate space for the grads of loss with respect to biases\n dbc = np.zeros_like(bc)\n dbr = np.zeros_like(br)\n dbz = np.zeros_like(bz)\n dby = np.zeros_like(by)\n\n # no error is received from beyond the end of the sequence\n dhnext = np.zeros_like(hs[0])\n drbarnext = np.zeros_like(rbars[0])\n dzbarnext = np.zeros_like(zbars[0])\n dcbarnext = np.zeros_like(cbars[0])\n zs[len(phrase)] = np.zeros_like(zs[0])\n rs[len(phrase)] = np.zeros_like(rs[0])\n\n dy = np.subtract(ps,target) # backprop into y.\n dWhy += np.dot(dy, hs[last].T)\n dby += dy\n\n # Not Sure if this was wrong\n #dh = np.dot(Why.T, dy) + dhnext # backprop into\n\n for t in reversed(xrange(len(phrase))):\n # h[t] influences the cost in 5 ways:\n\n # through the interpolation using z at t+1\n dha = np.multiply(dhnext, ones - zs[t+1])\n\n # through transformation by weights into rbar\n dhb = np.dot(Ur.T,drbarnext)\n\n # through transformation by weights into zbar\n dhc = np.dot(Uz.T,dzbarnext)\n\n # through transformation by weights into cbar\n dhd = np.multiply(rs[t+1],np.dot(Uc.T,dcbarnext))\n\n # through the output layer at time t\n dhe = np.dot(Why.T,dy)\n\n dh = dha + dhb + dhc + dhd + dhe\n\n dc = np.multiply(dh,zs[t])\n\n #backprop through tanh\n dcbar = np.multiply(dc , ones - np.square(cs[t]))\n\n dr = np.multiply(hs[t-1],np.dot(Uc.T,dcbar))\n dz = np.multiply( dh, (cs[t] - hs[t-1]) )\n\n # backprop through sigmoids\n drbar = np.multiply( dr , np.multiply( rs[t] , (ones - rs[t])) )\n dzbar = np.multiply( dz , np.multiply( zs[t] , (ones - zs[t])) )\n\n dWr += np.dot(drbar, xs[t].T)\n dWz += np.dot(dzbar, xs[t].T)\n dWc += np.dot(dcbar, xs[t].T)\n\n dUr += np.dot(drbar, hs[t-1].T)\n dUz += np.dot(dzbar, hs[t-1].T)\n dUc += np.dot(dcbar, np.multiply(rs[t],hs[t-1]).T)\n\n dbr += drbar\n dbc += dcbar\n dbz += dzbar\n\n dhnext = dh\n drbarnext = drbar\n dzbarnext = dzbar\n dcbarnext = dcbar\n\n\n '''Clipping Optional\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n '''\n return loss, dWc, dWr, dWz, dUc, dUr, dUz, dWhy, dbc, dbr, dbz, dby, hs[last]", "def calculate_loss(y, y_pred):\n\n return tf.losses.softmax_cross_entropy(y, y_pred)", "def compute_loss(self, output: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n\n return F.cross_entropy(output, y)", "def compute_loss(self, output: torch.Tensor, y: torch.Tensor):\n\n pass", "def nn_cost_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, l):\n Theta_1 = np.reshape(nn_params[0:(hidden_layer_size * (input_layer_size + 1)), ],\n (hidden_layer_size, input_layer_size + 1))\n Theta_2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):, ],\n (num_labels, hidden_layer_size + 1))\n\n m, n = X.shape\n X = np.hstack((np.ones((m, 1)), X))\n\n Z_2 = X.dot(Theta_1.T)\n A_2 = sigmoid(Z_2)\n A_2 = np.hstack((np.ones((m, 1)), A_2))\n\n Z_3 = A_2.dot(Theta_2.T)\n A_3 = sigmoid(Z_3)\n\n Y = np.zeros((m, num_labels))\n for i in range(m):\n Y[i, y[i] - 1] = 1\n\n j = 0.0\n for i in range(m):\n j += np.log(A_3[i, ]).dot(-Y[i, ].T) - np.log(1 - A_3[i, ]).dot(1 - Y[i, ].T)\n j /= m\n\n Theta_1_square = np.square(Theta_1[:, 1:])\n Theta_2_square = np.square(Theta_2[:, 1:])\n reg = 1.0 * l / (2 * m) * (np.sum(Theta_1_square) + np.sum(Theta_2_square))\n j += reg\n\n d_3 = A_3 - Y\n D_2 = d_3.T.dot(A_2)\n\n Z_2 = np.hstack((np.ones((m, 1)), Z_2))\n d_2 = d_3.dot(Theta_2) * sigmoid_gradient(Z_2)\n d_2 = d_2[:, 1:]\n D_1 = d_2.T.dot(X)\n\n Theta_1_grad = 1.0 * D_1 / m\n Theta_1_grad[:, 1:] = Theta_1_grad[:, 1:] + 1.0 * l / m * Theta_1[:, 1:]\n\n Theta_2_grad = 1.0 * D_2 / m\n Theta_2_grad[:, 1:] = Theta_2_grad[:, 1:] + 1.0 * l / m * Theta_2[:, 1:]\n\n grad = np.hstack((Theta_1_grad.ravel(), Theta_2_grad.ravel()))\n\n return j, grad", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n #执行神经网络的前向传播,计算样本对于每个类的分数,并将其储存在scores变量中\n ############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n h1_out,h1_cache = affine_relu_forward(X,self.params['W1'],self.params['b1'])\n scores,out_cache = affine_forward(h1_out,self.params['W2'],self.params['b2'])\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ############################################################################\n # END OF YOUR CODE\n ############################################################################\n # 如果没有给定y,那么就进入test模式,返回scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # 进行反向传播\n #将损失值和梯度储存在一个字典中。损失的变量为loss,梯度的变量为grads。\n #使用softmax计算损失,确保grad[k]是self.params[k]的梯度。别忘添加一个L2正则化\n \n #注意:L2正则化系数请给定为0.5,为了一些啥乱七八糟的理由\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient.\n ############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n #第二层affine的反馈传播\n loss,dout = softmax_loss(scores,y)\n dout,dw2,db2 = affine_backward(dout,out_cache)\n \n #loss的正则化\n loss += 0.5 * self.reg * (np.sum(self.params['W1'] ** 2) + np.sum(self.params['W2'] ** 2))\n \n #第一层affine+Relu的反馈传播\n _,dw1,db1 = affine_relu_backward(dout,h1_cache)\n \n #dw1和dw2的正则化??\n dw1 += self.reg * self.params['W1']\n dw2 += self.reg * self.params['W2']\n \n \n grads['W1'],grads['b1'] = dw1,db1\n grads['W2'],grads['b2'] = dw2,db2\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ############################################################################\n # END OF YOUR CODE \n ############################################################################\n\n return loss, grads", "def bp_mll_loss(y_true, y_pred):\n # get true and false labels\n shape = tf.shape(y_true)\n y_i = tf.equal(y_true, tf.ones(shape))\n y_i_bar = tf.not_equal(y_true, tf.ones(shape))\n\n # get indices to check\n truth_matrix = tf.to_float(pairwise_and(y_i, y_i_bar))\n\n # calculate all exp'd differences\n sub_matrix = pairwise_sub(y_pred, y_pred)\n exp_matrix = tf.exp(tf.negative(sub_matrix))\n\n # check which differences to consider and sum them\n sparse_matrix = tf.multiply(exp_matrix, truth_matrix)\n sums = tf.reduce_sum(sparse_matrix, axis=[1,2])\n\n # get normalizing terms and apply them\n y_i_sizes = tf.reduce_sum(tf.to_float(y_i), axis=1)\n y_i_bar_sizes = tf.reduce_sum(tf.to_float(y_i_bar), axis=1)\n normalizers = tf.multiply(y_i_sizes, y_i_bar_sizes)\n results = tf.divide(sums, normalizers)\n\n # sum over samples\n return tf.reduce_sum(results)", "def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads", "def compute_loss(pred,y,metrics):\n loss = F.cross_entropy(pred,y)\n metrics['loss'] += loss.data.cpu().numpy() \n pred_l = pred.data.cpu().numpy()\n #print(np.sum(np.argmax(pred_l,axis=1)==y.data.cpu().numpy())/pred_l.shape[0])\n metrics['accuracy'] +=np.sum(np.argmax(pred_l,axis=1)==y.data.cpu().numpy())/pred_l.shape[0] \n return loss" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build this task's classification head.
def build_head(self, n_features, device=None): # By default this is a linear layer self.head = self.create_compatible_head(n_features, device)
[ "def head(self) -> tf.estimator.Head:\n\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('one_dimensional_regression'):\n return tf.estimator.RegressionHead()\n num_classes = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n self.raw_label_key))\n if task_type.HasField('multi_class_classification'):\n return tf.estimator.MultiClassHead(num_classes)\n if task_type.HasField('binary_classification'):\n return tf.estimator.BinaryClassHead()\n raise ValueError('Invalid task type: {}'.format(task_type))", "def add_classification_head(\n self,\n head_name,\n num_labels=2,\n layers=2,\n activation_function=\"tanh\",\n overwrite_ok=False,\n multilabel=False,\n id2label=None,\n ):\n\n if multilabel:\n head = MultiLabelClassificationHead(head_name, num_labels, layers, activation_function, id2label, self)\n else:\n head = ClassificationHead(head_name, num_labels, layers, activation_function, id2label, self)\n self.add_prediction_head(head, overwrite_ok)", "def register_classification_head(\n self, name, num_classes=None, inner_dim=None, **kwargs\n ):\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n logger.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n \"and inner_dim {} (prev: {})\".format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n embed_dim = self.cfg.pretrained_model_args.model.embed_dim\n self.classification_heads[name] = RobertaClassificationHead(\n input_dim=embed_dim,\n inner_dim=inner_dim or embed_dim,\n num_classes=num_classes,\n activation_fn=self.cfg.pooler_activation_fn,\n pooler_dropout=self.cfg.pooler_dropout,\n q_noise=self.cfg.quant_noise_pq,\n qn_block_size=self.cfg.quant_noise_pq_block_size,\n do_spectral_norm=self.cfg.spectral_norm_classification_head,\n )", "def register_classification_head(\n self, name, num_classes=None, inner_dim=None, **kwargs\n ):\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n logger.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n \"and inner_dim {} (prev: {})\".format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n self.classification_heads[name] = RobertaClassificationHead(\n input_dim=self.args.encoder_embed_dim,\n inner_dim=inner_dim or self.args.encoder_embed_dim,\n num_classes=num_classes,\n activation_fn=self.args.pooler_activation_fn,\n pooler_dropout=self.args.pooler_dropout,\n q_noise=self.args.quant_noise_pq,\n qn_block_size=self.args.quant_noise_pq_block_size,\n do_spectral_norm=self.args.spectral_norm_classification_head,\n )", "def register_classification_head(\r\n self, name, num_classes=None, inner_dim=None, **kwargs\r\n ):\r\n if name in self.classification_heads:\r\n prev_num_classes = self.classification_heads[name].out_proj.out_features\r\n prev_inner_dim = self.classification_heads[name].dense.out_features\r\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\r\n logger.warning(\r\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\r\n \"and inner_dim {} (prev: {})\".format(\r\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\r\n )\r\n )\r\n self.classification_heads[name] = RobertaClassificationHead(\r\n input_dim=self.args.encoder_embed_dim,\r\n inner_dim=inner_dim or self.args.encoder_embed_dim,\r\n num_classes=num_classes,\r\n activation_fn=self.args.pooler_activation_fn,\r\n pooler_dropout=self.args.pooler_dropout,\r\n q_noise=self.args.quant_noise_pq,\r\n qn_block_size=self.args.quant_noise_pq_block_size,\r\n do_spectral_norm=self.args.spectral_norm_classification_head,\r\n )", "def head(self) -> TaskHead:\n return self._model.head", "def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n logger.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n 'and inner_dim {} (prev: {})'.format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n self.classification_heads[name] = HuggingFaceBertClassificationHead(\n self.args.embed_dim, # self.args.encoder_embed_dim,\n inner_dim or self.args.embed_dim,\n num_classes,\n self.args.pooler_activation_fn,\n self.args.pooler_dropout,\n self.args.quant_noise_pq,\n self.args.quant_noise_pq_block_size,\n )", "def set_task_layer(self, strategy, experience: Experience):\n\n # task label is set depending on the type of scenario\n # multitask or others\n if hasattr(strategy, 'mb_task_id'):\n task_label = strategy.mb_task_id\n else:\n task_label = experience.task_label\n n_output_units = max(experience.dataset.targets) + 1\n\n if task_label not in self.task_layers:\n # create head for unseen tasks\n task_layer = self.create_task_layer(n_output_units=n_output_units)\n strategy.add_new_params_to_optimizer(task_layer.parameters())\n self.task_layers[task_label] = task_layer.to(strategy.device)\n else:\n # check head expansion\n self.task_layers[task_label] = \\\n self.expand_task_layer(strategy, n_output_units,\n self.task_layers[task_label])\n\n # set correct head\n setattr(self.model, self.classifier_field,\n self.task_layers[task_label])", "def set_head(self, type: Type[TaskHead], **kwargs):\n\n self._config.head = TaskHeadConfiguration(type=type, **kwargs)\n self._model.set_head(self._config.head.compile(backbone=self.backbone))", "def add_classifier(self):\n if self.rnn_gap:\n self.classifier = Classify(3 * self.hidden_dim, self.hidden_dim, self.y_len).to(self.device)\n else:\n # only for the average case\n if self.backbone in (\"resnet18\", \"vgg\", \"squeeze\", \"resnet34\"):\n self.classifier = Classify(3 * 512, self.hidden_dim, self.y_len).to(\n self.device\n )\n elif self.backbone == \"alexnet\":\n self.classifier = Classify(3 * 256, self.hidden_dim, self.y_len).to(\n self.device\n )", "def create(cls, prediction_head_name, layer_dims, class_weights=None):\n return cls.subclasses[prediction_head_name](layer_dims=layer_dims, class_weights=class_weights)", "def load(cls, pretrained_model_name_or_path, revision=None, **kwargs):\n if os.path.exists(pretrained_model_name_or_path) and 'config.json' in pretrained_model_name_or_path and 'prediction_head' in pretrained_model_name_or_path:\n head = super(TextClassificationHead, cls).load(pretrained_model_name_or_path)\n else:\n full_model = AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path, revision=revision, **kwargs)\n head = cls(layer_dims=[full_model.config.hidden_size, len(full_model.config.id2label)])\n head.feed_forward.feed_forward[0].load_state_dict(full_model.classifier.state_dict())\n head.label_list = list(full_model.config.id2label.values())\n del full_model\n return head", "def connect_heads_with_processor(self, tasks, require_labels=True):\n for head in self.prediction_heads:\n head.label_tensor_name = tasks[head.task_name]['label_tensor_name']\n label_list = tasks[head.task_name]['label_list']\n if not label_list and require_labels:\n raise Exception(f\"The task '{head.task_name}' is missing a valid set of labels\")\n label_list = tasks[head.task_name]['label_list']\n head.label_list = label_list\n num_labels = len(label_list)\n head.metric = tasks[head.task_name]['metric']", "def classification_thread_fn(self):\n # last image that classification was done on\n last_image = None\n # last classification result\n last_state = None\n # number of times last classification has the same result in a row\n last_state_count = 0\n # overall count of classifications\n overall_count = 0\n\n while not rospy.is_shutdown():\n # 1. Get camera image\n image = self.sh_get_image()\n\n # 2. In case image was changed\n if last_image != image:\n\n # 3. In case warming up is done send message that the classifier is ready\n if overall_count == WARMUP_NUM:\n self.traffic_classifier_ready_pub.publish(Bool(True))\n rospy.loginfo(\"Warming up is finished\")\n elif overall_count < WARMUP_NUM:\n rospy.loginfo(\"Warming up\")\n\n # 4. Do classification\n start_time = time.time()\n state = self.get_light_state(image, overall_count)\n classification_time = time.time() - start_time\n\n if last_state != state: # in case state is changed, remember it and reset the counter\n last_state_count = 0\n last_state = state\n self.sh_set_classification_info(state, classification_time)\n elif last_state_count >= STATE_COUNT_THRESHOLD: # in case state persists long enough change it\n self.sh_set_state_info(last_state, classification_time)\n else: # otherwise just save last classificaton info for debugging\n self.sh_set_classification_info(state, classification_time)\n\n last_state_count += 1 # increment how many times in a row we got the same state\n overall_count += 1 # increment how many times we done classification\n last_image = image # memorize last image", "def __init__(self, task_type=\"classification\", epochs=None, batch_size=-1, early_stop=\"diff\",\n tol=1e-5, encrypt_param=None, predict_param=None, cv_param=None, interactive_layer_lr=0.1,\n validation_freqs=None, early_stopping_rounds=None, use_first_metric_only=None,\n floating_point_precision=23, selector_param=None, seed=100,\n dataset: DatasetParam = DatasetParam(dataset_name='table'), **kwargs\n ):\n\n explicit_parameters = kwargs[\"explict_parameters\"]\n explicit_parameters[\"optimizer\"] = None\n explicit_parameters[\"bottom_nn_define\"] = None\n explicit_parameters[\"top_nn_define\"] = None\n explicit_parameters[\"interactive_layer_define\"] = None\n explicit_parameters[\"loss\"] = None\n FateComponent.__init__(self, **explicit_parameters)\n\n if \"name\" in explicit_parameters:\n del explicit_parameters[\"name\"]\n for param_key, param_value in explicit_parameters.items():\n setattr(self, param_key, param_value)\n\n self.input = Input(self.name, data_type=\"multi\")\n self.output = Output(self.name, data_type='single')\n self._module_name = \"HeteroNN\"\n self.optimizer = None\n self.bottom_nn_define = None\n self.top_nn_define = None\n self.interactive_layer_define = None\n\n # model holder\n self._bottom_nn_model = Sequential()\n self._interactive_layer = Sequential()\n self._top_nn_model = Sequential()\n\n # role\n self._role = 'common' # common/guest/host\n\n if hasattr(self, 'dataset'):\n assert isinstance(\n self.dataset, DatasetParam), 'dataset must be a DatasetParam class'\n self.dataset.check()\n self.dataset: DatasetParam = self.dataset.to_dict()", "def connect_heads_with_processor(self, tasks, require_labels=True):\n if 'nextsentence' not in tasks:\n idx = None\n for i, ph in enumerate(self.prediction_heads):\n if ph.task_name == 'nextsentence':\n idx = i\n if idx is not None:\n logger.info('Removing the NextSentenceHead since next_sent_pred is set to False in the BertStyleLMProcessor')\n del self.prediction_heads[i]\n for head in self.prediction_heads:\n head.label_tensor_name = tasks[head.task_name]['label_tensor_name']\n label_list = tasks[head.task_name]['label_list']\n if not label_list and require_labels:\n raise Exception(f\"The task '{head.task_name}' is missing a valid set of labels\")\n label_list = tasks[head.task_name]['label_list']\n head.label_list = label_list\n if 'RegressionHead' in str(type(head)):\n num_labels = 1\n else:\n num_labels = len(label_list)\n head.metric = tasks[head.task_name]['metric']", "def buildHead(self):\n\n # Checks if we need to stuff the fileName.\n filler = \"\".join([\"@\" for i in range(15 - len(self.fileName))])\n filler = bytes(filler, \"UTF-8\")\n self.fileNameBA = filler + bytes(self.fileName, \"UTF-8\")\n\n # Checks if we need to stuff the fileExtension.\n filler = \"\".join([\"@\" for i in range(5 - len(self.fileExtension))])\n filler = bytes(filler, \"UTF-8\")\n self.fileExtensionBA = filler + bytes(self.fileExtension, \"UTF-8\")\n\n # Update the file size with the number of bytes that were stuffed\n # in the payload.\n self.fileSize = self.fileSize + self.bytesStuffed\n self.fileSizeBA = self.fileSize.to_bytes(4, \"little\")\n\n self.bytesStuffedBA = self.bytesStuffed.to_bytes(1, \"little\")\n\n # HEAD size is 25 bytes.\n # HEAD = fileName[15] + fileSize[4] + fileExtension[5] + bytesStufed[1]\n self.head = self.fileNameBA + self.fileSizeBA + self.fileExtensionBA + self.bytesStuffedBA", "def _multi_class_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if (n_classes is None) or (n_classes < 2):\n raise ValueError(\"n_classes must be > 1 for classification: %s.\" %\n n_classes)\n\n if n_classes == 2:\n if metric_class_ids:\n raise ValueError(\"metric_class_ids invalid for n_classes==2.\")\n return _BinaryLogisticHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds)\n\n return _MultiClassHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids)", "def generate(cl):\n Classification.Validate(cl)\n\n user = cl['user_id']\n if user is None:\n user = cl['session_id']\n\n subject = cl['subject_id']\n annotation = cl['annotation']\n\n c = Classification(user, subject, annotation)\n\n return c" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test data for this task
def test_data(self): return self._test_data
[ "def make_test_data(self):\n import data", "def load_test_data(self):\n last_update = datetime.strptime(\n '2020-08-04T16:32:38.390390Z', DATETIME_FORMAT)\n self.task_data = [\n {\n 'id': '0xfakeTaskId',\n 'instance': 'MyTurbiniaInstance',\n 'last_update': last_update,\n 'name': 'TaskName',\n 'evidence_name': 'EvidenceName',\n 'report_data': '#### Fake Low priority Report\\n* Fake Bullet',\n 'report_priority': 80,\n 'request_id': '0xFakeRequestId',\n 'run_time': timedelta(minutes=1),\n 'saved_paths': ['/no/path/', '/fake/path'],\n 'status': 'This fake task executed',\n 'successful': True,\n 'requester': 'myuser',\n 'worker_name': 'fake_worker'\n }, {\n 'id': '0xfakeTaskId2',\n 'instance': 'MyTurbiniaInstance',\n 'last_update': last_update + timedelta(minutes=20),\n 'name': 'TaskName2',\n 'evidence_name': 'EvidenceName2',\n 'report_data': '#### Fake High priority Report\\n* Fake Bullet',\n 'report_priority': 10,\n 'request_id': '0xFakeRequestId',\n 'run_time': timedelta(minutes=5),\n 'saved_paths': ['/no/path/2', '/fake/path/2'],\n 'status': 'This second fake task executed',\n 'successful': True,\n 'requester': 'myuser',\n 'worker_name': 'fake_worker2'\n }, {\n 'id': '0xfakeTaskId3',\n 'instance': 'MyTurbiniaInstance',\n 'last_update': last_update,\n 'name': 'TaskName3',\n 'evidence_name': 'EvidenceName3',\n 'report_data': '',\n 'report_priority': 80,\n 'request_id': '0xFakeRequestId2',\n 'run_time': timedelta(minutes=3),\n 'saved_paths': ['/no/path/3', '/fake/path/3'],\n 'status': 'Third Task Failed...',\n 'successful': False,\n 'requester': 'myuser2',\n 'worker_name': 'fake_worker'\n }\n ] # yapf: disable", "def test_data(self):\n return self._create_tf_dataset(self._test_mols, self._test_labels)", "def get_test_data(self) -> Any:\n return self.test_data", "def test_load_data(self):\n pass", "def setUpTestData(cls):\n pass", "def test_read_data(self):\n pass", "def test_data(self):\n return self._test_data.series", "def load_test_data(self):\n self.meta_data = pd.read_csv(settings[\"RAW_TEST_METADATA_PATH\"])\n\n self.dataset_name = 'test'", "def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )", "def test_sample_data(self):\n\n # get a temporary object\n tmp = self.objs[\"base\"]\n\n # grab the correct data file \n datafile = \"./data/disqus_sample.json\"\n\n # loop over all test disqus processing objects\n for o in self.objs.values():\n # loop over records in test file \n for i, record in o.file_reader(datafile):\n # if there's a problem parsing, this method will raise an Exception\n record_string = o.procRecord(record)", "def test_create_training_dataset(self):\n pass", "def test_batch(self):\n pass", "def __load_test(self):\n print(\"loading testing data...\")\n with open('test/test.json') as test_file:\n return json.load(test_file)", "def getTestSet(self):\r\n return self.fTestData", "def setUp(self):\n self.test_data = cf.DataNotIdeal(\"test.csv\")\n self.train_data = cf.DataNotIdeal(\"train.csv\")\n self.ideal_data = cf.Data('ideal.csv')", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def test_get_run(self):\n pass", "def photosynthesis_test_data():\n return PhotosynthesisTestData()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dataloader type for this task
def dataloader(self): return DataLoader
[ "def dataloader(self):\n return self.augment_cfg['dataloader'].format(self.plan[\"network_dim\"])", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_dataloader(self):\n shuffle = True if self.mode == \"train\" else False\n return DataLoader(self.get_dataset(), batch_size=self.batch_size, shuffle = shuffle, \n collate_fn=create_mini_batch)", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def data_source_types():\n return [\"FlatfileDataset\", \"XpathDataset\", \"MatrixDataset\", \"SpreadsheetDataset\", \"RDBMSDataset\", \"FilesDataset\"]", "def make_data_load(\r\n self, data_source: DataSourceBase, params: Dict, loader_type: str\r\n ) -> object:\r\n raise NotImplementedError", "def get_dataloader(dataset_config) -> Dict[str, torch.utils.data.DataLoader]:\n if platform.system() == 'Windows':\n n_workers = 0\n else:\n n_workers = min(multiprocessing.cpu_count()-2, dataset_config.batch_size)\n\n aux_aug = {}\n if 'img_normalize' in dataset_config.augmentations:\n aux_aug['img_normalize'] = dataset_config.augmentations.img_normalize\n if 'disparity_out' in dataset_config.augmentations:\n aux_aug['disparity_out'] = dataset_config.augmentations.disparity_out\n if 'box_type' in dataset_config.augmentations:\n aux_aug['box_type'] = dataset_config.augmentations.box_type\n\n if dataset_config.type == \"Kitti\":\n # Using the segmentaiton gt dir to count the number of images\n seg_dir = os.path.join(dataset_config.rootdir, \"semantic\")\n train_ids, val_ids = id_vec_generator(dataset_config.train_ratio, seg_dir)\n\n datasets = {\n 'Training' : Kitti2015Dataset(\n dataset_config.rootdir, dataset_config.objectives,\n **dataset_config.augmentations, id_vector=train_ids),\n 'Validation' : Kitti2015Dataset(\n dataset_config.rootdir, dataset_config.objectives,\n output_size=dataset_config.augmentations.output_size,\n id_vector=val_ids, **aux_aug)\n }\n elif dataset_config.type == \"Cityscapes\":\n datasets = {\n 'Training' : CityScapesDataset(\n dataset_config.rootdir, dataset_config.subsets, 'train',\n **dataset_config.augmentations),\n 'Validation' : CityScapesDataset(\n dataset_config.rootdir, dataset_config.subsets, 'val',\n output_size=dataset_config.augmentations.output_size, **aux_aug)\n }\n else:\n raise NotImplementedError(f\"Dataset not implemented: {dataset_config.type}\")\n\n dataloaders = {\n 'Validation' : torch.utils.data.DataLoader(\n datasets[\"Validation\"],\n batch_size=dataset_config.batch_size,\n shuffle=dataset_config.shuffle,\n num_workers=n_workers,\n drop_last=dataset_config.drop_last,\n pin_memory=True,\n collate_fn=collate_w_bboxes\n )\n }\n\n if hasattr(dataset_config.augmentations, 'rand_scale'):\n dataloaders['Training'] = torch.utils.data.DataLoader(\n datasets[\"Training\"], num_workers=n_workers, pin_memory=True,\n batch_sampler=BatchSamplerRandScale(\n sampler=RandomSampler(datasets[\"Training\"]),\n batch_size=dataset_config.batch_size,\n drop_last=dataset_config.drop_last,\n scale_range=dataset_config.augmentations.rand_scale),\n collate_fn=collate_w_bboxes\n )\n else:\n torch.backends.cudnn.benchmark = True\n\n dataloaders['Training'] = torch.utils.data.DataLoader(\n datasets[\"Training\"],\n batch_size=dataset_config.batch_size,\n shuffle=dataset_config.shuffle,\n num_workers=n_workers,\n drop_last=dataset_config.drop_last,\n pin_memory=True,\n collate_fn=collate_w_bboxes\n )\n\n return dataloaders", "def get_dataloader(path, kind):\n # Your code here\n \n trans = {'train': torchvision.transforms.Compose([ \n # torchvision.transforms.ColorJitter(brightness=0.15, contrast=0.15, saturation=0.06, hue=0.06),\n torchvision.transforms.ColorJitter(brightness=0.10, contrast=0.10, saturation=0.05, hue=0.05),\n torchvision.transforms.RandomPerspective(distortion_scale=0.5, p=0.5),\n # torchvision.transforms.RandomGrayscale(),\n torchvision.transforms.RandomRotation(15),\n torchvision.transforms.RandomHorizontalFlip(),\n torchvision.transforms.RandomVerticalFlip(),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n ]),\n\n 'test' : torchvision.transforms.Compose([torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])}\n \n batch_size = 128\n train_data = torchvision.datasets.ImageFolder(path+'train',transform=trans['train'])\n val_data = torchvision.datasets.ImageFolder(path+'val', transform = trans['test'])\n\n if kind == 'train':\n return torch.utils.data.DataLoader(train_data,\n batch_size = batch_size,\n num_workers = 2,\n shuffle=True)\n if kind == 'val':\n return torch.utils.data.DataLoader(val_data,\n batch_size = batch_size,\n num_workers = 2,\n shuffle=False)", "def create_dataloader(self):\n if self.num_workers:\n dataloader = torch.utils.data.DataLoader(\n self,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n drop_last=self.drop_last,\n prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers,\n )\n else:\n dataloader = torch.utils.data.DataLoader(\n self, batch_size=self.batch_size, drop_last=self.drop_last,\n )\n\n return dataloader", "def dataloader_label(self):\n return self._dataloader_label", "def task_type(self):\n pass", "def create_dataloader(opt):\n dataset_class = find_dataset_using_name(opt.args.dataset_mode)\n \n dataset = dataset_class(opt)\n print(\"dataset [%s] was created\" % type(dataset).__name__)\n\n # Randomly split dataset into training, validation and test sets\n train_size = int(opt.args.train_val_test[0]*len(dataset))\n val_size = int(opt.args.train_val_test[1]*len(dataset))\n test_size = len(dataset) - train_size - val_size\n\n # custom random_split() from custom_split.py\n train_DataSet, val_DataSet, test_DataSet = random_split(dataset, [train_size, val_size, test_size], type(dataset), opt)\n\n # Create dataloader for each phase\n train_dataloader = CustomDatasetDataLoader(opt,train_DataSet)\n val_dataloader = CustomDatasetDataLoader(opt,val_DataSet)\n test_dataloader = CustomDatasetDataLoader(opt,test_DataSet)\n\n # initiate each dataloader\n train_dataloader = train_dataloader.load_data()\n val_dataloader = val_dataloader.load_data()\n test_dataloader = test_dataloader.load_data()\n\n return [train_dataloader, val_dataloader, test_dataloader]", "def _setup_infer_dataloader(self, cfg: DictConfig, queries: List[str]) -> 'torch.utils.data.DataLoader':\n dataset = ThutmoseTaggerTestDataset(sents=queries, example_builder=self.builder)\n return torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=cfg[\"batch_size\"],\n shuffle=False,\n num_workers=cfg.get(\"num_workers\", 0),\n pin_memory=cfg.get(\"pin_memory\", False),\n drop_last=False,\n collate_fn=dataset.collate_fn,\n )", "def getDataSetType(self):\n return self.__data_set_type__", "def _configure_loader(self, loader: Union[DataLoader, tf.data.Dataset]) -> Union[DataLoader, tf.data.Dataset]:\n\n new_loader = loader\n if isinstance(new_loader, DataLoader) and isinstance(self.network, TFNetwork):\n add_batch = bool(new_loader.batch_size)\n if hasattr(loader, 'fe_postprocess_fn') and loader.fe_postprocess_fn is not None:\n # The user is manually batching data and running ops on data batches. No reliable way to shortcut this\n # since ops might require specific batch composition.\n data_instance = next(iter(loader))\n add_batch = False\n else:\n # No batch-based ops so we can try and just use the OpDataset to more quickly get our data summary\n data_instance = loader.dataset[0]\n if isinstance(data_instance, list):\n # This is a batched dataset\n data_instance = data_instance[0]\n add_batch = True\n if isinstance(data_instance, FilteredData):\n # We got unlucky and drew filtered data as the zeroth element. Fall back to a slower but more robust\n # analysis of the batch\n data_instance = next(iter(loader))\n add_batch = False\n data_instance = to_tensor(data_instance, target_type=\"tf\")\n data_type = to_type(data_instance)\n data_shape = to_shape(data_instance, add_batch=add_batch, exact_shape=False)\n new_loader = tf.data.Dataset.from_generator(lambda: loader, data_type, output_shapes=data_shape)\n new_loader = new_loader.prefetch(1)\n if isinstance(new_loader, tf.data.Dataset):\n if self.system.train_steps_per_epoch and self.system.mode == \"train\":\n new_loader = new_loader.take(self.system.train_steps_per_epoch)\n if self.system.eval_steps_per_epoch and self.system.mode == \"eval\":\n new_loader = new_loader.take(self.system.eval_steps_per_epoch)\n if isinstance(tf.distribute.get_strategy(), tf.distribute.MirroredStrategy) and isinstance(\n self.network, TFNetwork) and not isinstance(new_loader, DistributedDataset):\n # The default autoshard policy is file, changing it to data to avoid warning\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA\n new_loader = new_loader.with_options(options)\n new_loader = tf.distribute.get_strategy().experimental_distribute_dataset(new_loader)\n return new_loader", "def _get_dataloader(samples, batch_size):\n print(\"Cogiendo dataloader\")\n return DataLoader(samples, shuffle=True, batch_size=batch_size)", "def get_dataloaders(self):\n raise NotImplementedError('You must provide the dataloaders for the datasets.')", "def data_type(self):\n return DataType.name(self.__data_type_id)", "def get_dataloader(dataset, batch_size, num_GPU):\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size, \n shuffle=True, num_workers=0*num_GPU)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Concatenate two task's datasets
def concatenate_tasks( tasks, concat_train=True, concat_valid=True, concat_test=True, ): new_task = deepcopy(tasks[0]) new_task._name = "+".join(task.name for task in tasks) if concat_train: new_task._train_data = ConcatDataset( [task.train_data for task in tasks]) if concat_valid: new_task._valid_data = ConcatDataset( [task.valid_data for task in tasks]) if concat_test: new_task._test_data = ConcatDataset([task.test_data for task in tasks])
[ "def _combine_datasets(data0, data1):\n data_new = xr.concat([data0, data1], dim=\"timesteps\")\n # Ensure time dimension is ordered\n data_new = data_new.loc[{\"timesteps\": data_new.timesteps.to_index().sort_values()}]\n\n return data_new", "def _combine_datasets(data0, data1):\n data_new = xr.concat([data0, data1], dim='t')\n # Ensure time dimension is ordered\n data_new = data_new.loc[{'t': data_new.t.to_pandas().index.sort_values()}]\n\n return data_new", "def concatenate_data():", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def concatenateData(self):\n self.data = pd.concat([tr.data for tr in self.getTestRuns()])", "def Concat(datasets):\n\n dataset_num = len(datasets)\n dataset = datasets[0]\n for i in range(1, dataset_num):\n dataset.concatenate(datasets[i])\n return dataset", "def concat(cls, pipe1, pipe2):\n # pylint: disable=protected-access\n if pipe1.dataset != pipe2.dataset and pipe1.dataset is not None and pipe2.dataset is not None:\n raise ValueError(\"Cannot add pipelines with different datasets\")\n\n new_p1 = cls.from_pipeline(pipe1)\n new_p2 = cls.from_pipeline(pipe2)\n new_p1._action_list += new_p2._action_list[:]\n new_p1._variables = {**pipe1._variables, **pipe2._variables}\n new_p1.dataset = pipe1.dataset or pipe2.dataset\n return new_p1", "def concatenate_data(first_df: pd.DataFrame, second_df: pd.DataFrame) -> pd.DataFrame:\n df = pd.concat([first_df, second_df], axis=1).reset_index(drop=True)\n logger.info('Se han unido ambos set de datos.')\n return df", "def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def merge(self, datalist):\n pass", "def ConcatDF(train_set, test_set):\n return pd.concat([train_set, test_set], sort=True).reset_index(drop=True)", "def concatenate(self, dataset):\n self._params.data.append(dataset._params.data)\n self._params.options['concatenate'].update({self._params.step: None})\n self._params.step += 1\n return self", "def join_results(tasks, datasets):\n d = defaultdict(dict)\n buffers = (tasks, datasets)\n join_buffer = {}\n req_n = len(buffers)\n for buf in buffers:\n for elem in buf:\n tid = elem['taskid']\n d[tid].update(elem)\n n = join_buffer.get(tid, 0) + 1\n if n == req_n:\n yield d[tid]\n del join_buffer[tid]\n del d[tid]\n else:\n join_buffer[tid] = n\n\n # Flush records stuck in the buffer (not joined)\n for tid in d:\n yield d[tid]", "def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))", "def merge(datasets: Sequence[\"Dataset\"]) -> \"Dataset\":\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._append_items(dsj, copy=False)\n\n return ds", "def Zip(datasets):\n return tf.data.Dataset.zip(datasets)", "def test_concat(delete=True):\n df1 = make_df()\n df2 = pd.DataFrame([[1, 2, 3, 4, 'hi', 'there']], columns=df1.columns)\n job = fyrd.Job(merge_two, (df1, df2)).submit()\n df = job.get(cleanup=delete, delete_outfiles=delete)\n assert len(df) == 101", "def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The exception of ValueError when format was unsupported.
def _raise_format_error(self, name: str, format_str: str, source_format: str): raise ValueError(f"The '{ name }' should be { format_str }, rather than { source_format }")
[ "def _unknown_format(self, format):\n\n raise errors.NotAcceptable('unknown data format: ' + format)", "def test_format_model_exception_for_unsupported_format():\n with pytest.raises(Exception) as e:\n assert parser.format_model(model=None, type='bsr')\n assert str(e.value) == 'type bsr is not supported'", "def test_decode_raises_when_format_unknown(thing):\n with pytest.raises(ValueError):\n decode(thing)", "def test_invalid_format(self):\n with self.assertRaises(Exception) as e:\n Config.init(invalid_format)\n self.assertEqual(e.exception.message, \"[XOS-Config] The config format is wrong: Schema validation failed:\\n - Value '['I am', 'a yaml', 'but the', 'format is not', 'correct']' is not a dict. Value path: ''.\")", "def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def test_pack_method_with_invalid_data_type(self):\n\n datum = Datum(id=0, format=6)\n with self.assertRaises(RuntimeError):\n Radio.pack(datum)", "def test_format_not_supported(self):\n span = trace.get_current_span(\n FORMAT.extract(\n {\n \"traceparent\": [\n \"00-12345678901234567890123456789012-\"\n \"1234567890123456-00-residue\"\n ],\n \"tracestate\": [\"foo=1,bar=2,foo=3\"],\n },\n )\n )\n self.assertEqual(span.get_span_context(), trace.INVALID_SPAN_CONTEXT)", "def test_format_model_exception_for_unsupported_model():\n with pytest.raises(Exception) as e:\n assert parser.format_model(model=\"str_not_model\")\n assert str(e.value) == 'model str_not_model is not supported'", "def test_unknown_format(self):\n date_today = datetime.today()\n with self.assertRaises(DateTimeFormatError):\n datetimeformat(self.context, date_today, format=\"unknown\")", "def test_renamer_format_string_valueerror(self):\n with self.assertRaises(ValueError):\n self.renamer.format_string('')", "def test_formatstmt_validation():\n assert_raises(ValueError, FormatStmt, 3)", "def test_parseTimeInvalidFormat(self):\n self.assertRaises(ValueError, imap4.parseTime, u\"invalid\")", "def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def test_invalid_format(api):\n\twith pytest.raises(top_stories.InvalidFormatType):\n\t\tapi.get_stories(\"home\", \"xml\")", "def test_invalid_reader_input_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_reader_input_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Input type has to be a markdown variant.\", message)", "def test_unpack_method_with_invalid_data_type(self):\n\n packet = Radio.pack(Datum.Integer(id=0, timestamp=0, value=0))\n packet[5:6] = bytes([6])\n with self.assertRaises(RuntimeError):\n Radio.unpack(packet)", "def test_invalid_scale(self):\n self.assertRaises(self.TestException, geometry_parse, \"scale\", \"boom\", self.TestException)", "def _check_format(self, action):\n for letter in action[6:]:\n if not letter.isnumeric():\n raise ValueError()\n n = int(action[6:])\n if n < 1:\n raise ValueError()", "def __parse_error(self):\n def get_badvalue(data_string, data):\n elements = re.sub(r'[\\'\\]]', '', data_string).split('[')\n elements.pop(0) # Get rid of data as the first element\n value = None\n for k in elements:\n try:\n key = int(k)\n except ValueError:\n key = k\n if value is None:\n value = data[key]\n # if this fails, it's caught below\n return value\n try:\n self.badvalue = get_badvalue(str(self.error).split()[-1], self.config)\n except Exception:\n self.badvalue = '(could not determine)'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The train iterator that executes a standard training flow per batch.
def _train_batch(self): # start epoch for i, (source, target) in enumerate(self.train_dataset): result = self._batch_iter(source, target, i) # yield yield result
[ "def train_batch_iter(self, batch_size, num_epochs):\n return self.batch_iter(0, batch_size, num_epochs)", "def _train_loop(self, iterator: DataIterator) -> None:\n epoch = 0\n iteration = 0\n\n # Initialize metrics.\n for metric in self.train_metrics.values():\n metric.reset()\n\n while True:\n epoch += 1\n self.status[\"epoch\"] = epoch\n\n self._fire_event(Event.Epoch, False)\n\n for batch in iterator:\n self._fire_event(Event.Iteration, False)\n iteration += 1\n self.status[\"iteration\"] = iteration\n\n return_dict = self._train_step(batch)\n\n self._train_tracker.add(len(batch))\n utils.update_metrics(return_dict, batch, self.train_metrics)\n\n self._fire_event(Event.Iteration, True)\n self._fire_event(Event.Epoch, True)\n self._train_tracker.reset()", "def iterative_train():\n train_generator, validation_generator = build_generators()\n model = build_model()\n model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size, callbacks=make_callbacks())", "def train_data_iterator(self):\n def iterator():\n for period in self._train_periods:\n yield self.get_train_period(period)\n return iterator", "def train(self, num_batches: int):", "def _train(trainer, train_data, batcher_fn, total_batch_steps = 5, seed = 1):\n for i in range(total_batch_steps):\n torch.manual_seed(seed)\n set_seed(seed)\n data, targets = batcher_fn(train_data, i*35)\n trainer.train_step(data, targets)", "def train_iteration(self, train_sample, sample_idx, epoch_idx, total_losses, current_iter, pbar_train, do_evaluation=False):\n images, metadata = train_sample\n data_batch = images\n\n losses, outputs, metrics = self.model.run_train_iter(data_batch=data_batch, epoch=epoch_idx, do_evaluation=do_evaluation)\n\n train_output_update = self.build_loss_summary_string(losses, metrics)\n\n pbar_train.update(1)\n pbar_train.set_description(\"training phase {} -> {}\".format(self.epoch, train_output_update))\n\n current_iter += 1\n\n return losses, outputs, metrics, current_iter", "def train(self, dataset):\n\n self.first = False\n rng = self.rng\n if not is_stochastic(self.train_iteration_mode):\n rng = None\n\n data_specs = self.cost.get_data_specs(self.model)\n\n # The iterator should be built from flat data specs, so it returns\n # flat, non-redundent tuples of data.\n mapping = DataSpecsMapping(data_specs)\n space_tuple = mapping.flatten(data_specs[0], return_tuple=True)\n source_tuple = mapping.flatten(data_specs[1], return_tuple=True)\n if len(space_tuple) == 0:\n # No data will be returned by the iterator, and it is impossible\n # to know the size of the actual batch.\n # It is not decided yet what the right thing to do should be.\n raise NotImplementedError(\"Unable to train with SGD, because \"\n \"the cost does not actually use data from the data set. \"\n \"data_specs: %s\" % str(data_specs))\n flat_data_specs = (CompositeSpace(space_tuple), source_tuple)\n\n iterator = dataset.iterator(mode=self.train_iteration_mode,\n batch_size=self.batch_size,\n data_specs=flat_data_specs, return_tuple=True,\n rng = rng, num_batches = self.batches_per_iter)\n\n \"\"\"\n if not hasattr(self, 'batch_count'):\n self.batch_count=0\n self.param_records=[]\n print \"Going into first batch\"\n param_init = self.model.get_param_values()\n \"\"\"\n \n\n on_load_batch = self.on_load_batch\n for batch in iterator:\n for callback in on_load_batch:\n callback(*batch)\n self.sgd_update(*batch)\n # iterator might return a smaller batch if dataset size\n # isn't divisible by batch_size\n # Note: if data_specs[0] is a NullSpace, there is no way to know\n # how many examples would actually have been in the batch,\n # since it was empty, so actual_batch_size would be reported as 0.\n actual_batch_size = flat_data_specs[0].np_batch_size(batch)\n self.monitor.report_batch(actual_batch_size)\n for callback in self.update_callbacks:\n callback(self)\n\n\n \"\"\"\n param_first = self.model.get_param_values()\n with log_timing(log, \"Saving initial param and first param\"):\n serial.save(\"param_init_first.pkl\", (param_init, param_first))\n sys.exit(0)\n # Now, we record the weights every 50 minibatches\n # So 10 records per epoch\n self.batch_count+=1\n if self.batch_count%50==0:\n self.param_records.append(self.model.get_param_values())\n # for every 2 epochs, we save the param_records\n if self.batch_count%(50*20)==0:\n record_path = './mytest/'+str(self.batch_count)+'.pkl'\n print \"We are now about to same lots of param records\"\n with log_timing(log, 'Saving param records to'+record_path):\n serial.save(record_path, self.param_records)\n self.param_records=[]\n \"\"\"", "def _run_one_training_iteration(self) -> Tuple[ResultDict, \"TrainIterCtx\"]:\n # In case we are training (in a thread) parallel to evaluation,\n # we may have to re-enable eager mode here (gets disabled in the\n # thread).\n if self.config.get(\"framework\") == \"tf2\" and not tf.executing_eagerly():\n tf1.enable_eager_execution()\n\n results = None\n # Create a step context ...\n with TrainIterCtx(algo=self) as train_iter_ctx:\n # .. so we can query it whether we should stop the iteration loop (e.g.\n # when we have reached `min_time_s_per_iteration`).\n while not train_iter_ctx.should_stop(results):\n # Try to train one step.\n # TODO (avnishn): Remove the execution plan API by q1 2023\n with self._timers[TRAINING_ITERATION_TIMER]:\n if self.config._disable_execution_plan_api:\n results = self.training_step()\n else:\n results = next(self.train_exec_impl)\n\n # With training step done. Try to bring failed workers back.\n self.restore_workers(self.workers)\n\n return results, train_iter_ctx", "def train(self):\n for x, y in self.get_data_and_monitor(self):\n graph = self.run(x, y)\n graph.backprop()\n graph.step(self.learning_rate)", "def train_loop(self):\n pass", "def train(self):\n self.run_epoch()", "def _train_one_epoch(self):\n\n for inp in self.training_set.input:\n inp_with_noise = self._add_noise(inp)\n\n self._feed_forward(inp_with_noise)\n self._compute_error(inp)\n self._update_weights()", "def train(self):\n for doc, label in zip(self.train_docs(), self.train_labels()):\n yield doc, label", "def train_seed_iterator(self) -> ContextManager[Iterable[InitialStateType]] | Iterable[InitialStateType]:\n raise SeedIteratorNotAvailable(\"Seed iterator for training is not available.\")", "def train(self, epochs):\n for _ in range(epochs):\n self.feedforward()\n self.backpropagation()", "def _train_loop_context(self):\n\n self._inside_adanet_training_loop = True\n yield\n self._inside_adanet_training_loop = False", "def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def self_play_iterator_creator(hparams, num_workers, jobid):\n vocab_table = vocab_utils.create_vocab_tables(hparams.vocab_file)[0]\n data_dataset = tf.data.TextLineDataset(hparams.train_data)\n kb_dataset = tf.data.TextLineDataset(hparams.train_kb)\n skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)\n # this is the actual iterator for supervised training\n train_iterator = iterator_utils.get_iterator(\n data_dataset,\n kb_dataset,\n vocab_table,\n batch_size=hparams.batch_size,\n t1=hparams.t1.encode(),\n t2=hparams.t2.encode(),\n eod=hparams.eod,\n len_action=hparams.len_action,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n max_dialogue_len=hparams.max_dialogue_len,\n skip_count=skip_count_placeholder,\n num_shards=num_workers,\n shard_index=jobid)\n\n # this is the actual iterator for self_play_fulltext_iterator\n data_placeholder = tf.placeholder(\n shape=[None], dtype=tf.string, name=\"src_ph\")\n kb_placeholder = tf.placeholder(shape=[None], dtype=tf.string, name=\"kb_ph\")\n batch_size_placeholder = tf.placeholder(\n shape=[], dtype=tf.int64, name=\"bs_ph\")\n\n dataset_data = tf.data.Dataset.from_tensor_slices(data_placeholder)\n kb_dataset = tf.data.Dataset.from_tensor_slices(kb_placeholder)\n\n self_play_fulltext_iterator = iterator_utils.get_infer_iterator(\n dataset_data,\n kb_dataset,\n vocab_table,\n batch_size=batch_size_placeholder,\n eod=hparams.eod,\n len_action=hparams.len_action,\n self_play=True)\n\n # this is the actual iterator for self_play_structured_iterator\n self_play_structured_iterator = tf.data.Iterator.from_structure(\n tf.data.get_output_types(self_play_fulltext_iterator),\n tf.data.get_output_shapes(self_play_fulltext_iterator))\n iterators = [\n train_iterator, self_play_fulltext_iterator, self_play_structured_iterator\n ]\n\n # this is the list of placeholders\n placeholders = [\n data_placeholder, kb_placeholder, batch_size_placeholder,\n skip_count_placeholder\n ]\n return iterators, placeholders" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the process of training, which includes the loss meter reset, epoch reset and model's weights reset.
def reset_train(self): self.model.apply(self._reset_weights) self.epoch_loss.reset() self.epoch = 0 del self.batch_process self.batch_process = None
[ "def reset(self):\n checkpoint = torch.load(\n 'model_lr_finder.pth.tar',\n map_location=self.device)\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.to(self.device)\n self.model.train()", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self.experts = [\n self._construct_new_expert()\n ]", "def reset(self):\n self.loss = []\n self.funcargs = []\n self.nSteps = 0 \n self.converged = False", "def reset(self):\n self.reset_upper_confidence_bounds() ## for UCB\n self.reset_sample_rewards() ## for TS\n self.reset_regrets()\n self.reset_actions()\n self.reset_A_inv()\n self.reset_grad_approx()\n self.iteration = 0", "def reset(self):\n self.loss = 0\n self.cnt = 0", "def _reset(self):\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0", "def reset(self):\n\t\ttf.reset_default_graph()\n\t\tdel self.train_x_state, self.train_y_state\n\t\tdel self.test_x_state, self.test_y_state", "def _reset_metrics(self):\n self.train_loss = []\n self.val_loss = []", "def reset_training(self):\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n self.q_optim = Adam(self.q_net.parameters(), lr=self.lr)\n\n self.alpha_optim = Adam([self.log_alpha], lr=1e-2)", "def reset(self):\r\n self.model.load_state_dict(self.model_state)\r\n self.model.to(self.model_device)\r\n self.optimizer.load_state_dict(self.optimizer_state)", "def reset(self) -> None:\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0", "def retrain(self):\n self.training = True\n self.updating = False\n self.enable_gradients(False)\n self.update_noise_distributions()", "def reset(self):\r\n self.reset_proposal_work()\r\n self.reset_shadow_work()\r\n self.reset_ghmc_statistics()", "def reset(self):\n \n s = self\n s.step_counter = 0\n \n # TODO: initialize first layer activations here, and not everywhere else\n # self.model.initialize_local_vars()\n # self.model.initialize_global_vars()\n\n ops = []\n\n for var in self.model.trainable_vars:\n if self.needs_correction(var):\n A_svd = s[var].A.svd\n B2_svd = s[var].B2.svd \n ops.extend(A_svd.init_ops)\n ops.extend(B2_svd.init_ops)\n ops.append(s[var].A.cov.initializer)\n ops.append(s[var].B2.cov.initializer)\n\n # in new TensorFlow this breaks, probably because of\n # https://github.com/tensorflow/tensorflow/commit/07adc2ea910de715d31e16a019fcbcccb575e931\n # sometimes get \"need to feed\" placeholder error\n # sometimes do not get this error, but spend two minutes inside\n # _build_initializer_expr\n s.run(ops)", "def reset(self):\n\n # Reset everything\n self._layers = OrderedDict()\n self._connections = defaultdict(list)\n self._learning_rules = dict()", "def _reset(self):\n self.classifier.reset()", "def reset(self):\n\t\tself.logs = DotMap()\n\t\tself.logs.forward.training_losses = []\n\t\tself.logs.forward.validation_losses = []\n\t\tself.logs.inverse.training_losses = []\n\t\tself.logs.inverse.validation_losses = []\n\t\tself.logs.consistency.training_losses = []\n\t\tself.logs.consistency.validation_losses = []", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def reset(self):\n self.batch_index = 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UiView of sett module
def ui_view(request): return render(request, 'sett_ui_view.html', {})
[ "def get_ui(self, cfg, id=None):", "def build_ui(self):\n\n pass", "def configure_views(self):", "def getWidget(self):", "def setup_additional_ui(self):\n\n #set title\n self.setWindowTitle(self.title)\n\n #set question\n self.lbl_question.setText(self.question)\n\n #set_remember_choice\n self.set_remember_choice(self.chkbx_remember_choice.isChecked())", "def ui(self):\n return self._ui", "def connect_ui(self):\n \n pass", "def additional_ui(self):\n return _UI_DEF", "def gui(self):\n return gui", "def ui(self, ui):\n\n self._ui = ui", "def fset(self, type):\r\n arg_str = p2e._util._convert_args_to_string(\"set.radiance.viewtype\", \r\n type)\r\n p2e._app.Exec(arg_str)", "def on_action_set_view(self, content):\n self._view = content['view']\n self.refresh_traits_widget()", "def on_show_view(self):\n self.setup()", "def ui_definition(self):\n return self.ui_def", "def show():\n from siding.addons import ui\n ui.show()", "def ui_view(request):\n\treturn render(request, 'my_ui_view.html', {})", "def widget(self, request, group):", "def _CreateAdditionalUIControls( self ):\n return None", "def create_widgets(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls open file dialog, possible to choose only '.xlsx .xls .xlsm .xlsb'
def callDialog(self): self.pathTuple = filedialog.askopenfilenames(filetypes=[("Excel files", ".xlsx .xls .xlsm .xlsb")]) self.fileNames = [basename(path.abspath(name)) for name in self.pathTuple]
[ "def askopenfilenames(**options):\n options[\"multiple\"]=1\n return Open(**options).show()", "def open_file_dialog(self, title, initial_directory=None, file_types=None, multiselect=False):\n return self._impl.open_file_dialog(title, initial_directory, file_types, multiselect)", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def runOpen(self):\n filename, filtr = QFileDialog.getOpenFileName(self)\n if filename == u'':\n return\n\n self.openRun(filename)", "def file_open_dialog(message, wildcard, style=0, defaultDir=os.getcwd(), defaultFile=''):\n style = style | wx.FD_OPEN | wx.FD_CHANGE_DIR\n file_paths = file_dialog(message, wildcard, style, defaultDir, defaultFile)\n if style & wx.FD_MULTIPLE:\n return file_paths\n return next(iter(file_paths), None)", "def open(self):\n dialog = FileDialog(parent=self.window.control, wildcard=\"*.py\")\n if dialog.open() == OK:\n self._open_file(dialog.path)", "def choose_file():\r\n import tkinter\r\n from tkinter import filedialog\r\n\r\n root_window = tkinter.Tk()\r\n root_window.withdraw()\r\n\r\n return filedialog.askopenfilename()", "def openFiles(self):\n \n dialog_caption = self.tr(\"Open Files\")\n file_names = QFileDialog.getOpenFileNames(caption=dialog_caption,\n filter=\"python script (*py)\")\n if len(file_names) == 0:\n return\n else:\n selection = self._askForOpenTarget()\n \n if selection == 2:\n return\n elif selection == 0:\n self.openFileByName(file_names[0], True)\n file_names = file_names[1:]\n \n for file_name in file_names:\n self.openFileByName(str(file_name))", "def request_file():\n \n from tkinter import Tk\n from tkinter.filedialog import askopenfilename\n \n # Make a top-level instance and hide from user.\n root = Tk()\n root.withdraw()\n\n # Make it almost invisible - no decorations, 0 size, top left corner.\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n\n # Show window again and lift it to top so it can get focus, otherwise dialogs will end up behind the terminal.\n root.deiconify()\n root.lift()\n root.focus_force()\n\n # Show an \"Open\" dialog box and return the path to the selected file\n file_path = askopenfilename(initialdir='./IR_Datasets/',\n title='Excel to Read',\n filetypes=(('New Excel', '*xlsx'), ('Old Excel', '*.xls')),\n parent=root)\n\n # Get rid of the top-level instance once to make it actually invisible.\n root.destroy()\n \n return file_path", "def FileOpenDialog( message, wildcard, style=0, defaultDir=os.getcwd(), defaultFile='' ):\n style = style | wx.OPEN | wx.CHANGE_DIR\n return FileDialog( message, wildcard, style, defaultDir, defaultFile )", "def msg_open(self,msg):\r\n filepaths = msg.get_data()\r\n if filepaths is ():\r\n #Create the file open dialog.\r\n filepaths,index = DoFileDialog(self.frame, wildcard = \"Python source (*.py,*.pyw)|*.py;*.pyw|All files (*,*.*)|*.*;*\")\r\n if filepaths==None:\r\n return\r\n\r\n if (filepaths is not None) and (filepaths!=[]):\r\n #open the file requested\r\n for path in filepaths:\r\n self.frame.notebook.OpenFile(path)\r\n self.frame.Show()\r\n self.frame.Raise()", "def file_popup(file) -> str:\n layout = [\n [sg.Text(f\"Select the action to perform on\\n\\n{file}\")],\n [sg.Button(\"Open File\", key=\"-APP-\"),\n sg.Button(\"Open in File Explorer\", key=\"-EXPLORER-\"),\n sg.Button(\"Delete File\", key=\"-DEl-\",\n button_color=(\"Black\", \"OrangeRed\"))]\n ]\n window = sg.Window(\"Open selected file.\", layout, finalize=True)\n button, value = window.read()\n window.close()\n del window\n return button", "def openFile (self, e=None):\n # print \"hello OPENFILE !!!\" \n if len(self.filesel_list) == 1:\n try:\n fname = self.filesel_list.pop(0)\n os.system('open ' + fname) # self.filesel_list.pop(0))\n except:\n self.cprint (\"Could not open file %s for some reason!\" % (fname))\n elif len(self.filesel_list) > 1:\n self.cprint(\"Cannot open multiple files. Please select ONLY one\\n\")", "def get_file_path():\n root = tk.Tk()\n root.withdraw()\n file_path = filedialog.askopenfilename(filetypes=[(\"Excel file\", \"*.xlsx\")])\n return file_path", "def launch_workbook(self):\n another_workbook = filedialog.askopenfile().name\n startfile(another_workbook)", "def standard_file_open(self):\n callback = self.callback_file_open\n if not callback:\n callback = self.standard_load_file\n\n dir = str(CONFIG.working_dir)\n\n filter = CONFIG.file_open_filter\n if not filter:\n filter = \"All Files (*)\"\n\n path, _ = QFileDialog.getOpenFileName(\n self,\n \"Open File\",\n directory=dir,\n filter=filter,\n )\n callback(path)", "def ui_open(*files):\r\n if files:\r\n osname = os.uname()[0].lower()\r\n if not osname in _OPENER_BY_OS:\r\n print('Sorry, open currently not supported for ' + osname)\r\n else:\r\n _OPENER_BY_OS[osname](files)", "def ui_open(*files):\n if files:\n osname = os.uname()[0].lower()\n if not osname in _OPENER_BY_OS:\n print('Sorry, open currently not supported for ' + osname)\n else:\n _OPENER_BY_OS[osname](files)", "def _open_files(view, sel):\n schema, word = get_names(view, sel)\n file_name = word + '.sql'\n path = [schema, None, file_name]\n files = find_file(view.window().folders(), path)\n if len(files) > 5:\n print('something is wrong; too many files; aborting')\n return\n for f in files:\n view.window().open_file(f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns tuple of paths stored at class instance
def getPaths(self): return self.pathTuple
[ "def path_for_class(cls) -> List[str]:\r\n return f\"{cls.__module__}.{cls.__name__}\".split(\".\")", "def get_instance_paths(self, node):\r\n return self._send({'name': 'getInstancePaths', 'args': [node]})", "def generate(self):\n for stage in self.stages:\n if not hasattr(self, stage):\n raise NotImplementedError(f\"The stage {stage} is not implemented for this class\")\n self.paths[stage] = getattr(self, stage)\n return self.paths", "def get_paths(self):\n return (self.world_fpath, self.subj_fpath, self.peds_fpath)", "def splitpath(self):\n parent, child = os.path.split(self)\n return self.__class__(parent), child", "def path(self):\n if bool(self._path_parameters):\n payload = {inflection.underscore(k): v for k, v, in self._path_parameters.items()}\n else:\n payload = dict()\n PathTuple = namedtuple('PathTuple', sorted(payload))\n the_tuple = PathTuple(**payload)\n return the_tuple", "def paths(self):\r\n if not hasattr(self, '_paths'):\r\n paths = []\r\n for finder in self.finders:\r\n if hasattr(finder, 'paths'):\r\n paths.extend(finder.paths)\r\n self._paths = paths\r\n return self._paths", "def paths(self):\n if not hasattr(self, '_paths'):\n paths = []\n for finder in self.finders:\n if hasattr(finder, 'paths'):\n paths.extend(finder.paths)\n self._paths = paths\n return self._paths", "def getTLDPathsTuple(self, basepath):\n return (basepath, )", "def path(self) -> Tuple[os.PathLike, os.PathLike]:\n return self.__in_path, self.__out_path", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def path(self):\n\t\treturn copy.deepcopy(self._path)", "def get_paths():\n pathlist = []\n for name in _hierarchy:\n fullpath = build_path(get_swag(name))\n controllers = get_controllers(name)\n wsme_defs = get_wsme_defs(name)\n paths = get_controller_paths(controllers, wsme_defs)\n for path in paths:\n ptuple = (path_join(fullpath, path[0]), path[1])\n pathlist.append(ptuple)\n return pathlist", "def _get_paths_configured(self):\n return self.__paths_configured", "def class_path(model, variables):\n return None", "def warping_paths(self):\n return self.paths", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def get_val_paths(self):\n steps = self.test_structure_to_steps()\n paths = []\n\n for s in steps:\n current_paths = []\n last_class = s[-1]\n for i in range(last_class):\n path = self.base_folder_path + \"/data/validation/val_\" + str(i + 1) + \".tfrecord\"\n current_paths.append(path)\n\n paths.append(current_paths)\n\n return paths", "def get_handlers(cls):\n svs = []\n paths = cls.get_paths()\n for p in paths:\n s = re.sub(r\"(?<={)\\w+}\", \".*\", p).replace(\"{\", \"\")\n o = re.sub(r\"(?<=<)\\w+\", \"\", s).replace(\"<\", \"\").replace(\">\", \"\").replace(\"&\", \"\").replace(\"?\", \"\")\n svs.append((o, cls))\n\n return svs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Triggers a manual build of the project.
async def trigger_build(self, *, branch=None, message=None):
[ "def trigger_build(self, postdata):\n pass", "def on_build(self, project, name):\n pass", "def build(self):\n logging.info('Build %s of %s (%s)', self._build, self.name,\n self.working_dir)\n self._build += 1\n self._event = None\n status = self._builder.execute_script(self.working_dir, self.script)\n self._show_notification(status)", "def force(self, **kwargs):\r\n log.info(\"Forcing a build\")\r\n self._force = True", "def monitor_project_build(self, project_name):\n pass", "def force(self, **kwargs):\n log.info(\"Forcing a build\")\n self._force = True", "def run_build(self):\n url = 'https://ci.appveyor.com/api/builds'\n headers = {'Authorization': 'Bearer {0.api_token}'.format(self),\n 'Content-type': 'application/json'}\n data = {\n 'accountName': self.username,\n 'projectSlug': self.project,\n 'branch': 'default',\n 'environmentVariables': {}\n }\n response = requests.post(url, data=json.dumps(data), headers=headers)\n response.raise_for_status()\n info = self.json_decoder.decode(response.content.decode('utf-8'))\n print('Started build#{build_id}.'.format(build_id=info['buildId']))", "def build_trigger(ctx, build_type_id, branch, comment, parameter, agent_id,\n open_build_log, wait_for_run):\n parameters = dict([p.split('=', 1) for p in parameter])\n data = ctx.obj.trigger_build(\n build_type_id=build_type_id,\n branch=branch,\n comment=comment,\n parameters=parameters,\n agent_id=agent_id)\n build_id = data['id']\n ctx.invoke(build_queue_show, args=[build_id])\n if open_build_log:\n url = data['webUrl'] + '&tab=buildLog'\n webbrowser.open(url)\n if not wait_for_run:\n return\n while data['state'] == 'queued':\n data = ctx.obj.get_queued_build_by_build_id(build_id)\n click.echo('state: %s' % data['state'])\n time.sleep(1)\n ctx.invoke(build_queue_show, args=[build_id])", "def force(self):\n print \"Forcing a build by touching files\"\n os.chdir(self.version.project.conf_dir(self.version.slug))\n os.system('touch * && touch */*')", "def autoBuild (self, event = None):\r\n if self.autobuildmenuitem.IsChecked():\r\n self.autobuildtimer.Start(5000)\r\n self.autoBuildStart();\r\n else:\r\n self.autobuildtimer.Stop()", "def do_build(self):\n # In our business scenarios, job is always built with parameters.\n # According to jenkins API, to build with parameters, the second\n # parameter of build_job() is needed, or error will occur.\n with self.get_handler() as handler:\n handler.build_job(self.name,\n parameters={'delay': '0sec'})", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def build(config, project, version):\n if config.verbose:\n click.echo('Starting build for {}:{}...'.format(project, version))\n if not config.mason.build(project, version):\n exit('Unable to start build')", "def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()", "def runBuild(self):\n\n os.chdir(self.path_of_module)\n status, output = commands.getstatusoutput( \\\n \"make -j9 \" + \\\n self.architecture_option + \\\n \" \" + \\\n self.cross_compile_option + \\\n \" \" + \\\n self.makefile_compile_rule)\n if status:\n print \"\\nCannot compile, exit script!\\n\" + output + \"\\n\"\n sys.exit()", "def autoBuildTick (self, event = None):\r\n for pathname, oldmtime in self.autobuildfiles.iteritems():\r\n newmtime = os.stat(pathname).st_mtime\r\n if newmtime != oldmtime:\r\n #print \"Auto rebuild triggered by: \", pathname\r\n self.autobuildfiles[pathname] = newmtime\r\n self.rebuild()\r\n break", "def onBuildButtonClick(self, event):\n status = self.build(BUILD_BUTTON) # doing a build from a button click event\n if (status == True):\n dlg = wx.MessageDialog(self, \"Build Completed\", \"Build Status\", wx.OK|wx.ICON_INFORMATION)\n dlg.ShowModal()\n dlg.Destroy()\n else:\n dlg = wx.MessageDialog(self, \"Build Failed\", \"Build Status\", wx.OK|wx.ICON_INFORMATION)\n dlg.ShowModal()\n dlg.Destroy()\n currentList = pdef.getCurrentListObject()\n list.restoreListPanel(currentList) # Update revocation number", "def buildCM():\n CodeGenerator.writeCode()\n os.chdir(projectPath) # + '/CrackGenCM.xcodeproj'\n # os.system(\"xcodebuild build -quiet -project \" + projectPath + '/CrackGenCM.xcodeproj')\n os.system(\"xcodebuild -quiet -target \" + \"CrackGenCM\")\n CodeGenerator.clearCode()", "def run(self):\n\t\tself.sphinx_instance.build(force_all=False, filenames=None)\n\t\treturn None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a specific number of builds from the project.
async def get_builds(self, *, quantity=10):
[ "def get_build_ids():\n global SESSION\n\n if not ARGS.buildtype_id:\n print(\"[error] missing --buildtype-id argument\")\n sys.exit(1)\n\n # if --build-id is not set, get the latest one.\n if ARGS.build_id:\n build_id = ARGS.build_id\n else:\n #http://tc.corp.local/app/rest/builds?locator=buildType:TcTests_mdfs,running:any,count:1\n url = '{}/httpAuth/app/rest/{}/builds?locator=buildType:{},running:any,count:1'.format(ARGS.teamcity_url, ARGS.rest_api_version, ARGS.buildtype_id)\n resp = SESSION.get(url, auth=(ARGS.username, ARGS.password), timeout=ARGS.timeout)\n fail_on_response_error(resp)\n xml = etree.fromstring(resp.content)\n build_id = xml.xpath('/builds/build/@id')[0]\n\n\n ids = []\n #http://tc.corp.local/app/rest/builds?locator=buildType:TcTests_mdfs,running:any,untilBuild:(id:38736)&fields=build(id,status,href)\n url = ('{url}/httpAuth/app/rest/{api_ver}/builds?'\n 'locator=buildType:{buildTypeid},'\n 'running:any,untilBuild:(id:{id})'\n '&count={count}&fields=build(id,status,href)').format(\n url=ARGS.teamcity_url,\n api_ver=ARGS.rest_api_version,\n buildTypeid=ARGS.buildtype_id,\n id=build_id,\n count=ARGS.max_depth)\n\n resp = SESSION.get(url, auth=(ARGS.username, ARGS.password), timeout=ARGS.timeout)\n fail_on_response_error(resp)\n\n xml = etree.fromstring(resp.content)\n\n for build in xml.xpath('/builds/build'):\n ids.append({\n 'id':build.xpath('@id')[0],\n 'status':build.xpath('@status')[0],\n 'href':build.xpath('@href')[0]\n })\n\n # determine element index of the oldest failed build and before the success one\n i = 1\n if not ARGS.no_failed_builds:\n for build in ids[1:]:\n if build['status'] != 'SUCCESS':\n i += 1\n else:\n break\n\n if ARGS.debug:\n print(etree.tostring(xml, pretty_print=True).decode())\n print(\"[debug] will use builds: {}\".format(ids[:i]))\n return ids[:i]", "def get_first_n_built_chunk_ids(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT chunk_id FROM index_builder WHERE ib_task = 'built' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def Builds():\n return builds", "def getPendingBuilds():", "def get_builds_sorted_by_number(self, project):\n jenkins_builds_unsorted = self.get_builds_obj(namespace=project)\n jenkins_builds_sorted = [0] * self.num_of_builds\n for build in jenkins_builds_unsorted:\n build_num = int(re.sub(\"[^0-9]\", \"\", build.name))\n jenkins_builds_sorted[build_num - 1] = build\n return jenkins_builds_sorted", "def num_projects(self):\n return self._num_projects", "def count(search_params=None):\n return count_entities(\"projects\", search_params)", "def build_list():\n build_list = history.fetch_n_last(10)\n return render_template('build_list.html', build_list=build_list)", "def getBuildNumber(self, requestContext):\n return self.service.build() or \"\"", "def concurrent_build_limit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"concurrent_build_limit\")", "def next(self):\n cap = self._job.lastBuild.number\n num = self.number + 1\n while num <= cap:\n try:\n return self._job.build(num)\n except:\n pass\n num += 1\n return None", "def concurrent(self, project):\n limit_reached = False\n query = Q(\n project=project,\n )\n\n if project.main_language_project:\n # Project is a translation, counts all builds of all the translations\n query |= Q(project__main_language_project=project.main_language_project)\n query |= Q(project__slug=project.main_language_project.slug)\n\n elif project.translations.exists():\n # The project has translations, counts their builds as well\n query |= Q(project__in=project.translations.all())\n\n # If the project belongs to an organization, count all the projects\n # from this organization as well\n organization = project.organizations.first()\n if organization:\n query |= Q(project__in=organization.projects.all())\n\n # Limit builds to 5 hours ago to speed up the query\n query &= Q(date__gt=timezone.now() - datetime.timedelta(hours=5))\n\n concurrent = (\n (\n self.filter(query).exclude(\n state__in=[\n BUILD_STATE_TRIGGERED,\n BUILD_STATE_FINISHED,\n BUILD_STATE_CANCELLED,\n ]\n )\n )\n .distinct()\n .count()\n )\n\n max_concurrent = Project.objects.max_concurrent_builds(project)\n log.info(\n \"Concurrent builds.\",\n project_slug=project.slug,\n concurrent=concurrent,\n max_concurrent=max_concurrent,\n )\n if concurrent >= max_concurrent:\n limit_reached = True\n return (limit_reached, concurrent, max_concurrent)", "def _fetch_latest_builds(changelist, buildbucket_host, latest_patchset=None):\n assert buildbucket_host\n assert changelist.GetIssue(), 'CL must be uploaded first'\n assert changelist.GetCodereviewServer(), 'CL must be uploaded first'\n if latest_patchset is None:\n assert changelist.GetMostRecentPatchset()\n ps = changelist.GetMostRecentPatchset()\n else:\n assert latest_patchset > 0, latest_patchset\n ps = latest_patchset\n\n min_ps = max(1, ps - 5)\n while ps >= min_ps:\n builds = _fetch_tryjobs(changelist, buildbucket_host, patchset=ps)\n if len(builds):\n return builds, ps\n ps -= 1\n return [], 0", "def test_get_build_number(self):\n pass", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def build_number(self):\n try:\n return self.config_dict.get_config('build_number')\n except ZigZagConfigError:\n pass # this is not a required property", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def nextBuildNumber(self):\n return self._info['lastBuildNumber']", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return `ret_value` `times` times. If generator will receive some value from outside, update `ret_value`
def exercise_gen(ret_val, times):
[ "def random_values():\n while True:\n yield random()", "def constant_generator(value):\n\n while True:\n yield value", "def repeat(value: T, times: int) -> List[T]:\n return [value] * times", "def repeat(cls, value=None, repeat_count=None, scheduler=None):\n\n scheduler = scheduler or current_thread_scheduler\n if repeat_count == -1:\n repeat_count = None\n \n xs = cls.return_value(value, scheduler)\n ret = xs.repeat(repeat_count)\n return ret", "def counter(generator):\n def factory(*args, **kwargs):\n d = defaultdict(int)\n sink = generator(d, *args, **kwargs)\n sink.next()\n return d, sink.send\n return factory", "async def async_generator() -> typing.Generator[float, None, None]:\n for i in range(10):\n await asyncio.sleep(1)\n yield 10 * random.random()", "def count_generator(start, step):\r\n current = start\r\n yield current\r\n \r\n while True:\r\n current += step\r\n yield current", "def timeit(self, number=default_number):\n it = itertools.repeat(None, number)\n gcold = gc.isenabled()\n gc.disable()\n try:\n timing = self.inner(it, self.timer)\n finally:\n if gcold:\n gc.enable()\n return timing", "def attempts_generator(amount: int = 7):\n yield from range(1, amount)", "async def async_generator() -> Generator[float, None, None]:\n for _ in range(10):\n await asyncio.sleep(1)\n yield random.random() * 10", "def iter_latest_asynchonously(gen_func, timeout = None, empty_value = None, use_forkserver = False, uninitialized_wait = None):\n if use_forkserver:\n from multiprocessing import set_start_method # Only Python 3.X\n set_start_method('forkserver') # On macos this is necessary to start camera in separate thread\n\n m = Manager()\n namespace = m.Namespace()\n\n lock = Lock()\n\n with lock:\n namespace.time_and_data = (-float('inf'), Uninitialized)\n\n p = Process(target=_async_value_setter, args=(gen_func, namespace, lock))\n p.start()\n while True:\n with lock:\n lasttime, item = namespace.time_and_data\n if item is PoisonPill: # The generator has terminated\n break\n elif item is Uninitialized:\n if uninitialized_wait is not None:\n time.sleep(uninitialized_wait)\n continue\n else:\n yield empty_value\n elif timeout is not None and (time.time() - lasttime) > timeout: # Nothing written or nothing recent enough\n yield empty_value\n else:\n yield item", "def times2(value):\r\n return value * 2", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def test_return_value():\n\n def subgen():\n yield 2\n yield 3\n Return(100)\n\n @yieldfrom\n def gen():\n yield 1\n ret = (yield From(subgen()))\n yield 4\n yield ret\n\n assert list(gen()) == [1, 2, 3, 4, 100]", "def multiple_gen(modulus):\n count = 1\n while True:\n yield modulus * count\n count += 1", "def repeat(fn, v=0, k=inf):\n i = 0\n while True:\n yield v\n if i == k: break\n i += 1\n v = fn(v)", "def gen_seq():\n v = 1\n while 1:\n yield 1.0 / v\n v *= 2", "def c(sequence):\n Debugger.starts += 1\n for item in sequence:\n Debugger.items += 1\n yield item", "def test_simple_repeat(self):\n r = mi.repeatfunc(lambda: 5)\n self.assertEqual([5, 5, 5, 5, 5], [next(r) for _ in range(5)])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update `exercise_gen`, so it will ignore all exceptions
def exercise2(): g1 = exercise_gen("I'll ignore errors", 300) assert next(g1) == "I'll ignore errors" assert g1.send('new val') == 'new val' assert g1.throw(Exception) == 'new val' assert next(g1) == 'new val'
[ "def exercise_gen(ret_val, times):", "def test_post_codegen_error_query(self):\n with tempfile.TemporaryDirectory() as tmpdirname:\n translator = AstUprootTranslator()\n with pytest.raises(GenerateCodeException):\n translator.generate_code(\"\", cache_path=tmpdirname)", "def test_untrained_model(self):\n with self.assertRaises(UntrainedModelError):\n self.markov.make_generator()", "def generate_excuse() -> str:", "def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)", "def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = self.cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)", "def test_generate_all_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \"tests/system-testing/inputs/generate-all/\"\n input_filenames = [\n \"simplest.chatette\", \"only-words.chatette\",\n \"words-and-groups.chatette\", \"alias.chatette\", \"include.chatette\",\n \"slot.chatette\", \"slotrolegroup.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n if not TestSystem.check_no_duplicates(facade.train_examples):\n pytest.fail(\n \"Some examples were generated several times \" +\n \"when dealing with file '\" + filename + \"'.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n if len(legal_examples) != len(facade.train_examples):\n training_texts = [ex.text for ex in facade.train_examples]\n for legal_ex in legal_examples:\n if legal_ex[\"text\"] not in training_texts:\n pytest.fail(\n \"Example '\" + legal_ex[\"text\"] + \\\n \"' was not generated.\"\n )\n pytest.fail(\n \"An unknown example was not generated (\" + \\\n str(len(facade.train_examples)) + \\\n \" generated instead of \" + str(len(legal_examples)) + \\\n \").\\nGenerated: \" + str(facade.train_examples)\n )\n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )", "def test_ignore_errors_document():\n trigger_ignore_exceptions_config = TRIGGER_EXCEPTIONS_CONFIG.copy()\n\n for k, v in trigger_ignore_exceptions_config['.'].iteritems():\n v['ignore-errors'] = True\n\n for doc in run_dexy(trigger_ignore_exceptions_config):\n if not doc.key() in tests_to_skip():\n doc.run()", "def test_create_unexpected_problem(self):\n pass", "def generate_example(self, index=0):\n raise NotImplementedError()", "def add_exercise( self, exercise ):\n self.exercises.append( exercise )", "def test_exception_handling(self):\n\n model_dictionary = behroozi10_model_dictionary()\n\n model = SubhaloModelFactory(**model_dictionary)\n\n tmp = copy(model_dictionary['stellar_mass']._galprop_dtypes_to_allocate)\n del model_dictionary['stellar_mass']._galprop_dtypes_to_allocate\n with pytest.raises(HalotoolsError):\n model = SubhaloModelFactory(**model_dictionary)\n model_dictionary['stellar_mass']._galprop_dtypes_to_allocate = tmp\n\n tmp = copy(model_dictionary['stellar_mass']._methods_to_inherit)\n del model_dictionary['stellar_mass']._methods_to_inherit\n with pytest.raises(HalotoolsError):\n model = SubhaloModelFactory(**model_dictionary)\n\n behroozi = Behroozi10SmHm(redshift = 0)\n model2 = SubhaloModelFactory(stellar_mass = behroozi, \n model_feature_calling_sequence = ['stellar_mass'])\n with pytest.raises(HalotoolsError):\n model3 = SubhaloModelFactory(stellar_mass = behroozi, \n model_feature_calling_sequence = ['stellar_mass', 'quiescent'])", "def source_exercise_target(self, node):\n std_domain = self.builder.env.domains['std']\n figtype = std_domain.get_enumerable_node_type(node.parent)\n assert figtype == 'solution'\n\n fig_id = node.parent['ids'][0]\n\n # sort out the label\n exercise_label = node.parent.attributes['exercise']\n\n names = node.parent['names']\n assert len(names) == 1\n assert names[0].startswith('sol:')\n\n # get exercise id\n assert fig_id.startswith('sol-')\n exercise_id = 'ex-{}'.format(fig_id[4:])\n assert exercise_id == nodes.make_id(exercise_label)\n\n # because the exercise may be in a different document, we go global\n all_labels = std_domain.data['labels']\n assert exercise_label in all_labels\n\n # track down the document and identifier\n exercise_source_docname = all_labels[exercise_label][0]\n fig_identifiers = self.builder.env.toc_fignumbers\n assert exercise_source_docname in fig_identifiers\n assert 'exercise' in fig_identifiers[exercise_source_docname]\n ex_docname_map = fig_identifiers[exercise_source_docname]['exercise']\n assert exercise_id in ex_docname_map\n\n fignumber = ex_docname_map[exercise_id]\n\n return exercise_source_docname, exercise_id, fignumber", "def test_refuse_to_save(self):\n\n # setting the temperature alone is invalid, as we also need to set do_sample to True -> throws a warning that\n # is caught, doesn't save, and raises a warning\n config = GenerationConfig()\n config.temperature = 0.5\n with tempfile.TemporaryDirectory() as tmp_dir:\n with warnings.catch_warnings(record=True) as captured_warnings:\n config.save_pretrained(tmp_dir)\n self.assertEqual(len(captured_warnings), 1)\n self.assertTrue(\"Fix these issues to save the configuration.\" in str(captured_warnings[0].message))\n self.assertTrue(len(os.listdir(tmp_dir)) == 0)\n\n # greedy decoding throws an exception if we try to return multiple sequences -> throws an exception that is\n # caught, doesn't save, and raises a warning\n config = GenerationConfig()\n config.num_return_sequences = 2\n with tempfile.TemporaryDirectory() as tmp_dir:\n with warnings.catch_warnings(record=True) as captured_warnings:\n config.save_pretrained(tmp_dir)\n self.assertEqual(len(captured_warnings), 1)\n self.assertTrue(\"Fix these issues to save the configuration.\" in str(captured_warnings[0].message))\n self.assertTrue(len(os.listdir(tmp_dir)) == 0)\n\n # final check: no warnings thrown if it is correct, and file is saved\n config = GenerationConfig()\n with tempfile.TemporaryDirectory() as tmp_dir:\n with warnings.catch_warnings(record=True) as captured_warnings:\n config.save_pretrained(tmp_dir)\n self.assertEqual(len(captured_warnings), 0)\n self.assertTrue(len(os.listdir(tmp_dir)) == 1)", "def test_ignore_errors_controller():\n args = { \"ignore\" : True }\n for doc in run_dexy(TRIGGER_EXCEPTIONS_CONFIG, args):\n if not doc.key() in tests_to_skip():\n doc.run()", "def validate_exercise(exer_name, exercise):\n global error_count\n\n # Ensure exercise name is <= the max length of the Exercise name field in the database\n max_length = 50\n if len(exer_name) > max_length:\n print 'ERROR: ' + exer_name + ' is greater than ' + max_length + ' characters'\n error_count += 1\n\n required_fields = []\n optional_fields = ['jsav_exer_options', 'long_name', 'points', 'remove', 'required', 'showhide', 'threshold']\n\n # Ensure required fields are present\n for field in required_fields:\n if field not in exercise:\n print 'ERROR: Exercise, ' + exer_name + ', is missing required field, ' + field\n error_count += 1\n\n # Ensure there are no invalid fields in the module\n for field in exercise:\n if field not in (required_fields + optional_fields):\n print 'ERROR: Unknown field, ' + field + ', found in exercise ' + exer_name\n error_count += 1", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def add_all_exercises(exam_date, path_all, path_collection):\n type_list = [x for x in os.listdir(path_collection) if '.DS_Store' not in x]\n print(type_list)\n for i in range(len(type_list)):\n print('Type: ' + type_list[i])\n os.mkdir(path_all + '/' + type_list[i])\n path_type = path_collection + '/' + type_list[i]\n nb_ex_type = len(os.listdir(path_type)) # indexing file da 0\n for j in range(nb_ex_type):\n chosen_type_yaml = path_type + '/' + type_list[i] + str(j) + '.yaml'\n if j+1>=9:\n path_ex = path_all + '/' + type_list[i] + '/istanza_' + str(j+1)\n else:\n path_ex = path_all + '/' + type_list[i] + '/istanza_0' + str(j+1)\n print(path_ex)\n os.mkdir(path_ex)\n mode1.create_exercise(exam_date, str(j+1), path_ex, chosen_type_yaml)\n #mode2.create_exercise(str(i+1), path_ex, chosen_type_yaml)\n #mode3.create_exercise(str(i+1), path_ex, chosen_type_yaml)\n print('Exercise ' + str(j+1) + ' added')\n return", "def test_generate_nb_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \\\n \"tests/system-testing/inputs/generate-nb/training-only/\"\n input_filenames = [\n \"only-words.chatette\", \"words-and-groups.chatette\",\n \"alias.chatette\", \"include.chatette\", \"slot.chatette\",\n \"bugfixes/bug-22-slot-position.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n # if not TestSystem.check_no_duplicates(facade.train_examples): # TODO: make sure there are no duplicates in this case\n # pytest.fail(\"Some examples were generated several times \"+\n # \"when dealing with file '\"+filename+\"'.\\n\"+\n # \"Generated: \"+str(facade.train_examples))\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n \n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )\n\n filename_zero = \"zero-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_zero)\n facade.run(file_path)\n if len(facade.train_examples) != 0:\n pytest.fail(\n \"When dealing with file 'zero-ex.chatette', no examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n\n filename_one = \"one-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_one)\n facade.run(file_path)\n print(\"TRAIN EX: \" + str(facade.train_examples))\n if len(facade.train_examples) != 1:\n pytest.fail(\n \"When dealing with file 'one-ex.chatette', one examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the reference file of a test using the response received. The file will be created in the git references folder provided in the settings file
def create_reference( self, response_checker=default_checker.default_journey_checker ): # Check that the file doesn't already exist filename = self.get_file_name() filepath = os.path.join(config["REFERENCE_FILE_PATH"], filename) if os.path.isfile(filepath): logger.warning( "NO REF FILE CREATED - {} is already present".format(filepath) ) else: # Concatenate reference file info reference_text = OrderedDict() reference_text["query"] = self.query.replace( config["URL_JORMUN"][7:], "localhost" ) logger.warning("Query: {}".format(self.query)) reference_text["response"] = response_checker.filter( json.loads(self.full_resp) ) reference_text["full_response"] = json.loads( self.full_resp.replace(config["URL_JORMUN"][7:], "localhost") ) # Write reference file directly in the references folder with open(filepath, "w") as ref: ref.write(json.dumps(reference_text, indent=4)) logger.info("Created reference file : {}".format(filepath))
[ "def ref(request):\n r = referencepytest.ref(request)\n this_dir = os.path.abspath(os.path.dirname(__file__))\n r.set_data_location(os.path.join(this_dir, '..', 'reference'))\n return r", "def test_with_new_file(self):\n repository = self.create_repository(tool_name='Test')\n review_request = self.create_review_request(\n repository=repository,\n submitter=self.user,\n publish=True)\n diffset = self.create_diffset(review_request)\n filediff = self.create_filediff(diffset,\n source_revision=PRE_CREATION)\n\n rsp = self.api_get(\n get_original_file_url(review_request, diffset, filediff),\n expected_status=404)\n self.assertEqual(rsp['stat'], 'fail')\n self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)", "def ref_resp2files(output_file, output_json):\n with open(output_file, \"w\") as reference_text:\n reference_text.write(output_json)", "def create_ref_file(self):\n id = self.task_record.create_published_output_name()\n ctx = self.block_store.make_local_output(id)\n self.open_ref_contexts[ctx.get_filename()] = ctx\n return ctx.get_filename()", "def test_fetcher_git(with_git_ref, spec, tmp_path):\n\n def create_git_repository(repo_path, files, idx):\n \"\"\"Create a git repository with one commit for each file.\"\"\"\n repository = Repo.init(repo_path, initial_branch=\"main\")\n\n commits = []\n for file, content in files:\n file_path = os.path.join(repo_path, file)\n with open(file_path, \"w\") as f:\n f.write(content)\n repository.index.add(file_path)\n commit = repository.index.commit(f\"Add {file}\")\n commits.append(commit.hexsha)\n\n # Checkout given commit\n repository.git.checkout(commits[idx])\n # Create branch\n repository.create_head(\"new-branch\")\n # Create tag\n repository.create_tag(\"new-tag\")\n # Go back to main branch\n repository.git.checkout(\"main\")\n\n return commits\n\n repo_dir = os.path.join(tmp_path, \"repo\")\n output_dir = os.path.join(tmp_path, \"output\")\n\n files = [\n (\"reana.yaml\", \"Content of reana.yaml\"),\n (\"reana-cwl.yaml\", \"Content of reana-cwl.yaml\"),\n (\"README.md\", \"# Test Git Repository\"),\n (\"reana-not-present.yaml\", \"Content of reana-not-present.yaml\"),\n ]\n\n commits = create_git_repository(repo_dir, files, idx=1)\n\n if with_git_ref == \"branch\":\n git_ref = \"new-branch\"\n elif with_git_ref == \"commit\":\n git_ref = commits[1]\n elif with_git_ref == \"tag\":\n git_ref = \"new-tag\"\n else:\n assert with_git_ref is None\n git_ref = None\n\n fetcher = WorkflowFetcherGit(\n ParsedUrl(f\"file://{repo_dir}\"), output_dir, git_ref, spec\n )\n fetcher.fetch()\n expected_path = os.path.join(output_dir, spec or \"reana.yaml\")\n assert expected_path == fetcher.workflow_spec_path()\n assert os.path.isfile(expected_path)", "def compare_with_ref(\n self, response, response_checker=default_checker.default_journey_checker\n ):\n\n def ref_resp2files(output_file, output_json):\n \"\"\"\n Create a file for the filtered response and for the filtered reference\n \"\"\"\n with open(output_file, \"w\") as reference_text:\n reference_text.write(output_json)\n\n def print_diff(ref_file, resp_file):\n \"\"\"\n Print differences between reference and response in console\n \"\"\"\n # open reference\n with open(ref_file) as reference_text:\n reference = reference_text.readlines()\n # open response\n with open(resp_file) as response_text:\n response = response_text.readlines()\n\n # Print failed test name\n print_color(\"\\n\\n\" + str(file_name) + \" failed :\" + \"\\n\\n\", Colors.PINK)\n\n symbol2color = {\"+\": Colors.GREEN, \"-\": Colors.RED}\n for line in difflib.unified_diff(reference, response):\n print_color(line, symbol2color.get(line[0], Colors.DEFAULT))\n\n # Filtering the answer. (We compare to a reference also filtered with the same filter)\n filtered_response = response_checker.filter(response)\n\n # Get the reference\n\n # Create the file name\n filename = self.get_file_name()\n filepath = os.path.join(config[\"REFERENCE_FILE_PATH\"], filename)\n\n assert os.path.isfile(filepath), \"{} is not a file\".format(filepath)\n\n with open(filepath, \"r\") as f:\n raw_reference = f.read()\n\n # Transform the string into a dictionary\n dict_ref = json.loads(raw_reference)\n\n # Get only the full_response part from the ref\n ref_full_response = dict_ref[\"full_response\"]\n\n # Filtering the reference\n filtered_reference = response_checker.filter(ref_full_response)\n\n # Compare response and reference\n try:\n response_checker.compare(filtered_response, filtered_reference)\n except AssertionError as e:\n # print the assertion error message\n logging.error(\"Assertion Error: %s\" % str(e))\n # find name of test\n file_name = filename.split(\"/\")[-1]\n file_name = file_name[:-5]\n\n # create a folder\n dir_path = config[\"RESPONSE_FILE_PATH\"]\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # create path to ref and resp\n full_file_name_ref = dir_path + \"/reference_\" + file_name + \".txt\"\n full_file_name_resp = dir_path + \"/response_\" + file_name + \".txt\"\n\n json_filtered_reference = json.dumps(filtered_reference, indent=4)\n json_filtered_response = json.dumps(filtered_response, indent=4)\n\n # Save resp and ref as txt files in folder named outputs\n ref_resp2files(full_file_name_ref, json_filtered_reference)\n ref_resp2files(full_file_name_resp, json_filtered_response)\n\n # Print difference in console\n print_diff(full_file_name_ref, full_file_name_resp)\n\n raise", "def create_reference(name: str, revision: str) -> str:\n path_to_yang = '{}/{}@{}.yang'.format(ac.d_save_file_dir, name, revision)\n context = {'title': 'Reference {}@{}'.format(name, revision)}\n try:\n with open(path_to_yang, 'r', encoding='utf-8', errors='strict') as f:\n yang_file_content = escape(f.read())\n except FileNotFoundError:\n context['message'] = 'File {}@{}.yang was not found.'.format(name, revision)\n return create_bootstrap(context, 'danger.html')\n\n return '<html><body><pre>{}</pre></body></html>'.format(yang_file_content)", "def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()", "def construct(cls):\n\n restart = (\n Helpers.Regex(\n Helpers.Command(\"git log -1\", False).execute(),\n Settings.launch_test_marker,\n return_data=False,\n escape=False,\n ).match()\n or not Settings.currently_under_test\n )\n\n if restart:\n if Settings.raw_link.endswith(\".tar.gz\"):\n cls.generate_from_tar_gz()\n elif Helpers.Download(Settings.raw_link, Settings.file_to_test).link():\n Helpers.Command(\"dos2unix \" + Settings.file_to_test, False).execute()\n\n formated_content = Helpers.List(\n cls.extract_lines(Settings.file_to_test)\n ).format()\n\n Helpers.File(Settings.file_to_test).write(\n \"\\n\".join(formated_content), overwrite=True\n )\n\n del formated_content\n elif not Settings.raw_link and path.isfile(Settings.file_to_test):\n print(\"\\n\")\n new_file_content = Helpers.List(\n cls.extract_lines(Settings.file_to_test)\n ).format()\n\n Helpers.File(Settings.file_to_test).write(\n \"\\n\".join(new_file_content), overwrite=True\n )\n\n del new_file_content\n\n if restart:\n Settings.currently_under_test = False\n\n PyFunceble.clean()", "def testExampleFileGeneration(ref):\n outdir = ref.tmp_dir\n outpath = os.path.join(outdir, 'file_result.html')\n generate_file(outpath)\n ref.assertTextFileCorrect(outpath, 'file_result.html',\n ignore_substrings=['Copyright', 'Version'])", "def test_generate_diff_download(self, mock_response, mock_request, mock_test_result_file):\n from mod_test.controllers import generate_diff\n\n mock_request.accept_mimetypes.best = 'application/json'\n\n response = generate_diff(1, 1, 1, to_view=0)\n\n self.assertTrue(response, mock_response())", "def _create(cls, repo, path, resolve, reference, force, logmsg=None):\r\n full_ref_path = cls.to_full_path(path)\r\n abs_ref_path = join(repo.git_dir, full_ref_path)\r\n \r\n # figure out target data\r\n target = reference\r\n if resolve:\r\n target = repo.rev_parse(str(reference))\r\n \r\n if not force and isfile(abs_ref_path):\r\n target_data = str(target)\r\n if isinstance(target, SymbolicReference):\r\n target_data = target.path\r\n if not resolve:\r\n target_data = \"ref: \" + target_data\r\n existing_data = open(abs_ref_path, 'rb').read().strip() \r\n if existing_data != target_data:\r\n raise OSError(\"Reference at %r does already exist, pointing to %r, requested was %r\" % (full_ref_path, existing_data, target_data))\r\n # END no force handling\r\n \r\n ref = cls(repo, full_ref_path)\r\n ref.set_reference(target, logmsg)\r\n return ref", "def test_set_api_url(self):\n UI_path = './resources/'\n test_js_filename = 'test_main.js'\n new_js_filename = UI_path + 'main_blabla.js'\n reference_js = UI_path + 'test_main_reference.js'\n\n os.system('cp {} {}'.format(\n UI_path + test_js_filename,\n new_js_filename))\n\n api_url = 'https://app.etabot.ai:8000/api/'\n set_api_url.set_api_url(\n UI_path, api_url, api_url_var_name='apiUrl')\n\n ute.assertFileEqual(new_js_filename, reference_js, self)\n os.remove(new_js_filename)", "def test_call_refget_api():\n\n # assert that json output matches expected response\n client = ENAClient(args=args_success_0)\n response_string = client.call_refget_api(sequence_id_0, 0)\n correct_output = open(\"testdata/response_1.json\", \"r\").read().rstrip()\n assert response_string == correct_output", "def test_create_symlink_file(self):\n pass", "def _make_file(self, rela_path, data, repo=None):\r\n repo = repo or self.rorepo\r\n abs_path = os.path.join(repo.working_tree_dir, rela_path)\r\n fp = open(abs_path, \"w\")\r\n fp.write(data)\r\n fp.close()\r\n return abs_path", "def file_factory(test_workspace):\n\n return FileCreator(test_workspace)", "def file(c, path=local.http_path):\r\n c = conn(c)\r\n print(\"make file repo on {}, path [{}]\".format(c.host, path))\r\n\r\n system.install(c, 'createrepo')\r\n c.run('createrepo {}'.format(path))", "def create_reference_files(cxn, log):\n log.info('Preparing reference gene files for exonerate')\n for ref in db.select_reference_genes(cxn):\n with open(ref['ref_file'], 'w') as ref_file:\n util.write_fasta_record(ref_file, ref['ref_name'], ref['ref_seq'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare the response (which is a dictionary) to the reference First, the function retrieves the reference then filters both ref and resp Finally, it compares them
def compare_with_ref( self, response, response_checker=default_checker.default_journey_checker ): def ref_resp2files(output_file, output_json): """ Create a file for the filtered response and for the filtered reference """ with open(output_file, "w") as reference_text: reference_text.write(output_json) def print_diff(ref_file, resp_file): """ Print differences between reference and response in console """ # open reference with open(ref_file) as reference_text: reference = reference_text.readlines() # open response with open(resp_file) as response_text: response = response_text.readlines() # Print failed test name print_color("\n\n" + str(file_name) + " failed :" + "\n\n", Colors.PINK) symbol2color = {"+": Colors.GREEN, "-": Colors.RED} for line in difflib.unified_diff(reference, response): print_color(line, symbol2color.get(line[0], Colors.DEFAULT)) # Filtering the answer. (We compare to a reference also filtered with the same filter) filtered_response = response_checker.filter(response) # Get the reference # Create the file name filename = self.get_file_name() filepath = os.path.join(config["REFERENCE_FILE_PATH"], filename) assert os.path.isfile(filepath), "{} is not a file".format(filepath) with open(filepath, "r") as f: raw_reference = f.read() # Transform the string into a dictionary dict_ref = json.loads(raw_reference) # Get only the full_response part from the ref ref_full_response = dict_ref["full_response"] # Filtering the reference filtered_reference = response_checker.filter(ref_full_response) # Compare response and reference try: response_checker.compare(filtered_response, filtered_reference) except AssertionError as e: # print the assertion error message logging.error("Assertion Error: %s" % str(e)) # find name of test file_name = filename.split("/")[-1] file_name = file_name[:-5] # create a folder dir_path = config["RESPONSE_FILE_PATH"] if not os.path.exists(dir_path): os.makedirs(dir_path) # create path to ref and resp full_file_name_ref = dir_path + "/reference_" + file_name + ".txt" full_file_name_resp = dir_path + "/response_" + file_name + ".txt" json_filtered_reference = json.dumps(filtered_reference, indent=4) json_filtered_response = json.dumps(filtered_response, indent=4) # Save resp and ref as txt files in folder named outputs ref_resp2files(full_file_name_ref, json_filtered_reference) ref_resp2files(full_file_name_resp, json_filtered_response) # Print difference in console print_diff(full_file_name_ref, full_file_name_resp) raise
[ "def compare(results, reference):\n results_only = set(results) - set(reference)\n reference_only = set(reference) - set(results)\n both = set(results) & set(reference)\n\n def make_delta(key):\n return (reference.get(key, 0), results.get(key, 0))\n\n violations = {key: make_delta(key) for key in results_only}\n improvements = {}\n recommended = {key: reference[key] for key in reference}\n for key in both:\n if results[key] > reference[key]:\n violations[key] = make_delta(key)\n elif results[key] < reference[key]:\n recommended[key] = results[key]\n improvements[key] = make_delta(key)\n\n for key in reference_only:\n improvements[key] = make_delta(key)\n del recommended[key]\n\n return violations, improvements, recommended", "def get_by_reference(self, ref):\n url = \"{}/{}\".format(self._base_url, ref)\n headers = self._generate_headers()\n response = self._session.get(url=url, headers=headers)\n if response.status_code != 200:\n print(response.text)\n raise PartnerClientAPIException(\n \"Couldn't get resource '{}', status code: {}\".format(\n url, response.status_code))\n return response.json()", "def test_resource_get_dict_and_named_tuple_deep_equivalence(server_response):\n assert True == equivalent(server_response.json, server_response)", "def compare_json(expected, actual):\n if type(expected) and type(actual) is not dict:\n return None\n\n result = True\n for key in expected.keys():\n if key in actual.keys():\n if type(expected[key]) is ObjectId:\n if str(expected[key]) == str(actual[key]):\n continue\n else:\n result = False\n print(f'Exp:{key}-> ObjId({expected[key]}) not equal to Act:{key}-> ObjId({actual[key]})')\n else:\n if expected[key] == actual[key]:\n # print(f'{key}:{expected_json[key]} is equal to {key}:{actual_json[key]}')\n continue\n else:\n print(f'Exp:{key}-> {expected[key]} not equal to Act:{key}-> {actual[key]}')\n result = False\n return result", "def compare_normalize_resp(resp, expected_query, expected_match_type,\n expected_gene_descriptor, expected_warnings=None,\n expected_source_meta=None):\n assert resp[\"query\"] == expected_query\n if expected_warnings:\n assert len(resp[\"warnings\"]) == len(expected_warnings), \"warnings len\"\n for e_warnings in expected_warnings:\n for r_warnings in resp[\"warnings\"]:\n for e_key, e_val in e_warnings.items():\n for r_key, r_val in r_warnings.items():\n if e_key == r_val:\n if isinstance(e_val, list):\n assert set(r_val) == set(e_val), \"warnings val\"\n else:\n assert r_val == e_val, \"warnings val\"\n else:\n assert resp[\"warnings\"] == [], \"warnings != []\"\n assert resp[\"match_type\"] == expected_match_type\n compare_gene_descriptor(expected_gene_descriptor, resp[\"gene_descriptor\"])\n if not expected_source_meta:\n assert resp[\"source_meta_\"] == {}\n else:\n resp_source_meta_keys = resp[\"source_meta_\"].keys()\n assert len(resp_source_meta_keys) == len(expected_source_meta),\\\n \"source_meta_keys\"\n for src in expected_source_meta:\n assert src in resp_source_meta_keys\n compare_service_meta(resp[\"service_meta_\"])", "def fetch(self, only_ref=False):\n if self.ref:\n reply = self.connector.get_object(\n self.ref, return_fields=self.return_fields)\n if reply:\n self.update_from_dict(reply)\n return True\n\n search_dict = self.to_dict(search_fields='update')\n return_fields = [] if only_ref else self.return_fields\n reply = self.connector.get_object(self.infoblox_type,\n search_dict,\n return_fields=return_fields)\n if reply:\n self.update_from_dict(reply[0], only_ref=only_ref)\n return True\n return False", "def match_data(rv, expected_data):\n actual_data = dict(get_json(rv))\n match_condition = all(item in actual_data.items() for item in expected_data.items())\n return match_condition", "def _slack_get_value(slack_response, search_value, search_field, return_field, classifier):\n if not slack_response['ok']:\n return False\n for item in slack_response[classifier]:\n if search_field in item and search_value == item[search_field] and return_field in item:\n return item[return_field]", "def test_call_refget_api():\n\n # assert that json output matches expected response\n client = ENAClient(args=args_success_0)\n response_string = client.call_refget_api(sequence_id_0, 0)\n correct_output = open(\"testdata/response_1.json\", \"r\").read().rstrip()\n assert response_string == correct_output", "def test_resource_get_dict_and_named_tuple_stat_equivalence(server_response):\n assert server_response.stat == server_response.json['stat']", "def fusion_api_validate_response(self, respDict, valDict):\n success = True\n returnDict = {}\n keys = []\n for key in valDict:\n if not valDict[key]:\n continue\n # logger._log_to_console_and_log_file('key: %s' % (key))\n keyDict = {'key': key, 'expected': valDict[\n key], 'actual': respDict[key], 'success': True}\n if key in respDict:\n pattern = re.compile(str(valDict[key]))\n # if not re.search(str(valDict[key]), str(respDict[key])):\n # t = re.compile('(?i)Warning|Unknown|Terminated|Killed|Error|Completed')\n\n if not re.search(pattern, str(respDict[key])):\n\n success = False\n keyDict['success'] = False\n else:\n success = False\n keyDict['success'] = False\n keys.append(keyDict)\n\n returnDict['success'] = success\n returnDict['keys'] = keys\n return returnDict", "def compare_resources(self, resob):\n if not resob:\n return None\n\n res = resob.get_resources()\n\n # Save each ASN that we find\n if 'asn' in res.keys():\n for (h, loc) in res['asn']:\n (fresh, data) = resob.get_data('asn', loc)\n if data and 'asn' in data.keys(): \n asn = data['asn']['startAsNumber']\n if not self.add_asn(asn):\n print \"Could not process ASN \" + str(asn)\n\n # Save each Net object that we find\n if 'net' in res.keys():\n for (h, loc) in res['net']:\n (fresh, data) = resob.get_data('net', loc)\n if data and 'net' in data.keys():\n startAddr = data['net']['startAddress']\n endAddr = data['net']['endAddress']\n oaslist = []\n if 'originASes' in data['net'].keys():\n originAS = data['net']['originASes']\n # Check if we have a dict\n if isinstance(originAS, dict): \n # The dict element could contain a list\n if isinstance(originAS['originAS'], list):\n for oas in originAS['originAS']:\n asstr = oas.replace(\"AS\",\"\")\n oaslist.append(asstr)\n else:\n asstr = originAS['originAS'].replace(\"AS\",\"\")\n oaslist.append(asstr)\n else:\n # we have a list\n for oas in originAS: \n asstr = oas['originAS'].replace(\"AS\",\"\")\n oaslist.append(asstr)\n if not self.add_net(startAddr, endAddr, oaslist):\n print \"Could not process Net handle: \" + h\n\n # Compare against route views\n return self.compare()", "def compare():\n body: t.Any = request.json\n check_error({'input': {'old': {}, 'new': {}}}, body)\n response_new = rpc_search({'input': body['input']['new']})\n response_old = rpc_search({'input': body['input']['old']})\n\n modules_new = response_new['yang-catalog:modules']['module']\n modules_old = response_old['yang-catalog:modules']['module']\n\n if len(modules_new) == 0 or len(modules_old) == 0:\n abort(404, description='No hits found either in old or new input')\n\n new_mods = []\n for mod_new in modules_new:\n new_rev = mod_new['revision']\n new_name = mod_new['name']\n found = False\n new_rev_found = False\n for mod_old in modules_old:\n old_rev = mod_old['revision']\n old_name = mod_old['name']\n if new_name == old_name and new_rev == old_rev:\n found = True\n break\n if new_name == old_name and new_rev != old_rev:\n new_rev_found = True\n if not found:\n mod_new['reason-to-show'] = 'New module'\n new_mods.append(mod_new)\n if new_rev_found:\n mod_new['reason-to-show'] = 'Different revision'\n new_mods.append(mod_new)\n if len(new_mods) == 0:\n abort(404, description='No new modules or modules with different revisions found')\n output = {'output': new_mods}\n return output", "def test_rest_vs_sdk(self): \n sdk_obj = self.call_sdk()\n rest_obj = self.call_rest_api() \n # TODO passing Response here, SDK doesn't currently capture other items at this level (e.g. Notifications)\n if self.list_request:\n self.compare_list_to_obj(rest_obj['Response']['Items'], sdk_obj, \"BASE\")\n else:\n self.compare_dict_to_obj(rest_obj['Response'], sdk_obj)", "def _filter_by_attribute(self, context, refs, attr):\n if attr in context['query_string']:\n value = context['query_string'][attr]\n return [r for r in refs if r[attr] == value]\n return refs", "def is_cont_ref_booked(cfg, plannerId, cont_ref, timeout=5):\n\n result, response = find_bookings_by_contentref(\n cfg, plannerId, cont_ref, timeout)\n print \"[INFO: ] booking by content ref result status \", result\n print \"[INFO: ] booking by content ref response \", response.content\n if result:\n res_dict = json.loads(response.content)\n res_bkg_lst = res_dict.get(\"bookings\")\n if res_bkg_lst and len(res_bkg_lst):\n # Per call single Content Ref.\n # Take First Value for Validation\n bk_item = res_bkg_lst[0]\n print \"[INFO: ] Booked Item: \", bk_item\n if bk_item.get(\"state\") == \"BOOKED\":\n return True, response\n else:\n return False, response\n else:\n return False, response\n else:\n return False, response", "def _referencedChecker(self, entity, params):\n\n if 'ref_logic' not in params:\n return False\n\n logic = self.helper.getLogicForItem(params, 'ref_logic')\n filter = {\n params['ref_field']: entity.key()\n }\n ref_entity = logic.getForFields(filter=filter, unique=True)\n\n result = ref_entity is not None\n\n no_ref = params.get('no_ref')\n if no_ref:\n result = not result\n\n return result", "def check_match_type(self,\n query: str,\n resp: Dict,\n sources: Set[str],\n match: str) -> (Dict, Set):\n filter_exp = Key('label_and_type').eq(f'{query}##{match}')\n try:\n db_response = self.db.genes.query(\n KeyConditionExpression=filter_exp\n )\n if 'Items' in db_response.keys():\n concept_ids = [i['concept_id'] for i in db_response['Items']]\n (resp, matched_srcs) = self.fetch_records(\n resp, concept_ids, MatchType[match.upper()]\n )\n sources = sources - matched_srcs\n except ClientError as e:\n logger.error(e.response['Error']['Message'])\n return (resp, sources)", "def match_responses(\n responses: ResponseDict, counties: FIPSDict\n) -> Tuple[Dict[int, int], ResponseDict]:\n matches = {k: get_matching_result(v, counties[k], k) for k, v in responses.items()}\n matching_ids = {k: v['id'] for k, v in matches.items() if v}\n unmatched = {k: responses[k] for k, v in matches.items() if not v}\n return matching_ids, unmatched" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a file for the filtered response and for the filtered reference
def ref_resp2files(output_file, output_json): with open(output_file, "w") as reference_text: reference_text.write(output_json)
[ "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def create_reference(\n self, response_checker=default_checker.default_journey_checker\n ):\n # Check that the file doesn't already exist\n filename = self.get_file_name()\n filepath = os.path.join(config[\"REFERENCE_FILE_PATH\"], filename)\n\n if os.path.isfile(filepath):\n logger.warning(\n \"NO REF FILE CREATED - {} is already present\".format(filepath)\n )\n else:\n # Concatenate reference file info\n reference_text = OrderedDict()\n reference_text[\"query\"] = self.query.replace(\n config[\"URL_JORMUN\"][7:], \"localhost\"\n )\n logger.warning(\"Query: {}\".format(self.query))\n reference_text[\"response\"] = response_checker.filter(\n json.loads(self.full_resp)\n )\n reference_text[\"full_response\"] = json.loads(\n self.full_resp.replace(config[\"URL_JORMUN\"][7:], \"localhost\")\n )\n\n # Write reference file directly in the references folder\n with open(filepath, \"w\") as ref:\n ref.write(json.dumps(reference_text, indent=4))\n logger.info(\"Created reference file : {}\".format(filepath))", "def create_response_info(self, response):\n output_path = os.path.join(self.output_folder, self.file_name)\n output_path += \".response.txt\"\n with open(output_path, 'w') as file:\n file.write(json.dumps(response))", "def create_filter_tmp_files(filtered_files_list, filter_output_dir, logger=None):\n\n # Useful for logging\n # cur_filename = sys._getframe().f_code.co_filename\n # cur_function = sys._getframe().f_code.co_name\n\n # Create the filenames for the tmp_fcst and tmp_anly files.\n tmp_fcst_filename = os.path.join(filter_output_dir,\n \"tmp_fcst_regridded.txt\")\n tmp_anly_filename = os.path.join(filter_output_dir,\n \"tmp_anly_regridded.txt\")\n\n fcst_list = []\n anly_list = []\n\n for filter_file in filtered_files_list:\n fcst_match = re.match(r'(.*/FCST_TILE_F.*.[grb2|nc])', filter_file)\n if fcst_match:\n fcst_list.append(fcst_match.group(1))\n\n anly_match = re.match(r'(.*/ANLY_TILE_F.*.[grb2|nc])', filter_file)\n if anly_match:\n anly_list.append(anly_match.group(1))\n\n # Write to the appropriate tmp file\n # with open(tmp_fcst_filename, \"a+\") as fcst_tmpfile:\n with open(tmp_fcst_filename, \"w+\") as fcst_tmpfile:\n for fcst in fcst_list:\n fcst_tmpfile.write(fcst + \"\\n\")\n\n with open(tmp_anly_filename, \"w+\") as anly_tmpfile:\n for anly in anly_list:\n anly_tmpfile.write(anly + \"\\n\")", "def create_filtered_network_file(network_file_prefix, filtered_network_file, ueids):\n network_file_method_attribute = network_file_prefix + \"_method_id.eda\"\n network_file_source_attribute = network_file_prefix + \"_source.eda\"\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_y2h.sif\", interaction_type=\"y2h\")\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_tap.sif\", interaction_type=\"tap\")\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = network_file_prefix + \"_no_tap.sif\", interaction_type=\"tap\", reverse_selection=True)\n #biana_output_converter.filter_network_by_interaction_type(network_attribute_file_name = network_file_method_attribute, network_out_file_name = filtered_network_file + \".no_tap\", interaction_type=\"tap\", reverse_selection=True)\n valid_ids = set([0,4,96,676,729,19,6,7,858,59,109]) # TAP\n biana_output_converter.filter_network_by_interaction_attribute_value(network_attribute_file_name = network_file_method_attribute, network_out_file_name = filtered_network_file + \".no_tap\", accept_attribute_value = lambda x: int(x) not in valid_ids)\n\n #interaction_to_sources = get_interaction_sources(network_file_source_attribute)\n with open(filtered_network_file, 'w') as f:\n for line in open(filtered_network_file + \".no_tap\"):\n id1, dummy, id2 = line.split()\n # Filter self interactions\n if id1 == id2:\n continue\n # Remove singleton interacions (that has evidence only from one database)\n #id_pair = sorted([id1, id2])\n #if is_singleton(interaction_to_sources[(id_pair[0], id_pair[1])]):\n # continue\n # Do not include ambigous user entities\n if id1 in ueids and id2 in ueids:\n f.write(line)\n return", "def save_response(response, file_name, path='~/tmp/fcb-analyzer'):\n \n path = ensure_path(path)\n f = open(path + '/' + file_name, 'w')\n f.write(response.text)", "def create_file(self):\n for data_element in self.data:\n title = data_element['title']\n anchor = data_element['href']\n example = data_element['example']\n content = data_element['content']\n if example:\n abstract = '<section class=\"prog__container\">{}<br>{}</section>'.format(content, example)\n\n list_of_data = [\n title, # api title\n 'A', # type is article\n '', # no redirect data\n '', # ignore\n '', # no categories\n '', # ignore\n '', # no related topics\n '', # ignore\n '', # no external link\n '', # no disambiguation\n '', # images\n abstract, # abstract\n anchor # url to doc\n ]\n self.output_file.write('{}\\n'.format('\\t'.join(list_of_data)))", "def create_exclusions_file(output_file: str, verbosity: int) -> None:\n set_log_level(verbosity)\n\n with open(output_file, \"a\") as file_obj:\n for line in EXCLUSIONS_TEMPLATE:\n file_obj.write(line)\n utils.print_green(f\"Success! Exclusions template file written to: {output_file}\")\n print(\n \"Make sure you download your account authorization details before running the scan.\"\n \"Set your AWS access keys as environment variables then run: \"\n )\n print(\"\\tcloudsplaining download\")\n print(\"You can use this with the scan command as shown below: \")\n print(\n \"\\tcloudsplaining scan --exclusions-file exclusions.yml --input-file default.json\"\n )", "def build_file_from_response(self, response, **kwargs):\n kwargs.setdefault('file_name', response.request.meta['file_name'])\n kwargs.setdefault('url', response.request.url)\n kwargs.setdefault('data', response.body)\n return self.build_file(**kwargs)", "def process_output_file_write(output_file, response):\n\n with open(output_file, \"w\") as output_file:\n output_file.write(response)", "def _create_file(self):\n data = playercareerstats.PlayerCareerStats(player_id=self.player_id)\n data_json = data.get_dict()\n\n with open(self.filename, 'w') as f:\n json.dump(data_json, f, indent=4)", "def concat(res_file_path: str, input_file_path: str, output_file_path: str,\n LOG: Logger = None,\n is_filter_by_word: bool = False,\n is_filter_by_input: bool = False,\n is_filter_by_country: bool = False,\n writer = None\n ) -> None:\n # res_file\n if LOG is None:\n LOG, _ = Logger(\"test\").getlog()\n mark_file = word_filter(res_file_path)\n assert len(mark_file) != 0, \"文件: {} 过滤后为空!\".format(mark_file)\n LOG.info(\"爬取结果文件共{}条\".format(len(mark_file)))\n\n # TODO 对form_data进行去重。去重处理操作\n input_file = pd.read_csv(input_file_path, sep='\\t')\n assert len(input_file) != 0, \"输入文件为空,请检查输入文件\" + input_file_path\n LOG.info(\"输入文件共{}条\".format(len(input_file)))\n\n df = pd.merge(input_file, mark_file,\n how='left', left_on='REFERER', right_on='referer')\n df.sort_values(['HOST', 'CAPTURE_TIME'], inplace=True, ascending=False)\n assert len(df) != 0, \"爬取结果与输入文件合并后为空,请检查 {} 与 {}\".format(res_file_path, input_file_path)\n LOG.info(\"合并后文件共{}条\".format(len(df)))\n\n # 过滤掉没有爬取成功的\n total = len(df)\n df = df[-df[\"neg_words_label\"].isnull()]\n LOG.info(\"过滤掉未爬取成功的数据{}条\".format(total - len(df)))\n # 必须字符串化, 要不然后面无法去重\n df['neg_words_label'] = df['neg_words_label'].astype('str')\n df['pos_words_label'] = df['pos_words_label'].astype('str')\n\n _, file_name = get_file_name(output_file_path)\n print(file_name, output_file_path)\n df.drop_duplicates().to_csv(\"./datas/_{}.csv\".format(file_name), sep='\\t', index=False)\n now_num = len(df)\n if is_filter_by_word:\n df = df[df[\"neg_words_label\"] == \"{}\"]\n LOG.info(\"反向关键词过滤掉{}条数据\".format(now_num - len(df)))\n now_num = len(df)\n if is_filter_by_input:\n df = df[(df[\"pos_words_label\"] != \"{}\") | (df[\"input_num\"] > 0)]\n LOG.info(\"通过正向关键词与网站输入框数量过滤掉{}条数据\".format(now_num - len(df)))\n now_num = len(df)\n if is_filter_by_country:\n # 中文正则,使用title+keywords+description,剔除全外文网站\n # 该方法存在一种特殊情况,即当title,keywords、description都为空时,此时会判定为外文网站\n df['chinese_web'] = df.apply(lambda row: is_chinese_web(row), axis=1)\n df[df[\"chinese_web\"] == 0].to_csv(\"./tmp/{}_foreign\".format(file_name), sep='\\t', index=False)\n df = df[df[\"chinese_web\"] == 1]\n LOG.info(\"外文网站过滤掉{}条数据\".format(now_num - len(df)))\n # 0918 刘要求新增四列数据\n other_cols = ['label', 'remarks', 'capturetime', 'location']\n for col in other_cols:\n df[col] = ''\n now_num = len(df)\n df = df.drop_duplicates()\n LOG.info(\"去重过滤掉{}条数据\".format(now_num - len(df)))\n df.to_csv(output_file_path, sep='\\t', index=False)\n LOG.info(\"最终{}条数据\".format(len(df)))\n if writer:\n writer.write(\"最终数据\\t{}\".format(len(df)))\n writer.flush()\n df[\"HOST\"].drop_duplicates().to_csv(\"./datas/{}_hosts.csv\".format(file_name), sep='\\t', index=False)", "def create_data_file(self):\n return Trial.create_file(self.__output_file_name, DATA_FILE_TITLES)", "def build_file_from_response(self, response, **kwargs):\n kwargs.setdefault('file_name', response.request.meta['file_name'])\n kwargs.setdefault('url', response.request.url)\n if 'data' not in kwargs:\n body = response.body\n # https://tools.ietf.org/html/rfc7159#section-8.1\n # bytes instances don't have a removeprefix method.\n if body.startswith(codecs.BOM_UTF8):\n body = body[len(codecs.BOM_UTF8):]\n kwargs['data'] = body\n return self.build_file(**kwargs)", "def write_to_fasta(self, output_file):\n fw = FastaWriter(output_file)\n for file_path, template, complement in self.results:\n if template:\n header = \"{0} {1}\".format(file_path, \"template\")\n fw.write_entry(header, template)\n if complement:\n header = \"{0} {1}\".format(file_path, \"complement\")\n fw.write_entry(header, complement)", "def GenerateMain(root, treasuryFilters, cibFilters, validStatuses, asOfDate, headerMap): \n \n fileFilterMap = {\n 'constreasury':('.csv', treasuryFilters), \n 'cib_download':('.csv', cibFilters)\n }\n\n for key in fileFilterMap.keys(): \n ext = fileFilterMap[key][0] \n name = '{0}{1}'.format(key, ext)\n dateInclusiveName = '{0}_{1}{2}'.format(key, asOfDate.to_string('%Y%m%d'), ext)\n fullName = '{0}{1}'.format(root, name)\n\n outfile = open(fullName, 'w')\n try: \n isHeaderWritten = False\n totalRecordCount = 0\n for tradeFilter in fileFilterMap[key][1]:\n trades = ael.TradeFilter[tradeFilter].trades()\n totalRecordCount += len(trades)\n \n if(not isHeaderWritten):\n outfile.write(Lines(trades[0], \"h\", asOfDate))\n isHeaderWritten = True\n\n map(lambda trade: outfile.write(Lines(trade, \"l\", asOfDate)), \n filter(lambda t:t.status in validStatuses, trades)) \n headerMap.update({dateInclusiveName:'|{0}'.format(totalRecordCount)})\n \n print 'Wrote secondary output to: %s' %(fullName)\n except Exception, e:\n print 'Error writing output to: %s' %(fullName), e \n finally:\n outfile.close()", "def filter_file(temp_list, output_nm_filter):\n\tprint 'filter positions'\n\t#output = open(output_nm_filter, 'w')\n\t#output.write(\"Chr\\tbegin\\tend\\tnm_reads_begin\\tnm_reads_end\\n\")\n\twith open(output_nm_filter, 'a') as output:\n\t\ttemp_end = 0\n\t\tfor x in temp_list:\n\t\t\t#print 'x', x\n\t\t\tend = x[2]\n\t\t\tif temp_end != end:\n\t\t\t\t#print x\n\t\t\t\ttemp_end = end\n\n\t\t\t\toutput.write(\"%s\\tmiRNA_extractor.py\\tsRNA\\t%s\\t%s\\t.\\t%s\\t.\\t%s,%s,%s\\n\"%\n\t\t\t\t\t\t\t(x[0],x[1], x[2], \n\t\t\t\t\t\t\t\tx[3], x[4], x[5], x[6]))\n\n\t\t\t\t\t\n\n\t#output.close()\n\t#print temp", "def write_file(self, records):\n ...", "def _create_file(content=''):\r\n sjson_file = tempfile.NamedTemporaryFile(prefix=\"subs_\", suffix=\".srt.sjson\")\r\n sjson_file.content_type = 'application/json'\r\n sjson_file.write(textwrap.dedent(content))\r\n sjson_file.seek(0)\r\n return sjson_file" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print differences between reference and response in console
def print_diff(ref_file, resp_file): # open reference with open(ref_file) as reference_text: reference = reference_text.readlines() # open response with open(resp_file) as response_text: response = response_text.readlines() # Print failed test name print_color("\n\n" + str(file_name) + " failed :" + "\n\n", Colors.PINK) symbol2color = {"+": Colors.GREEN, "-": Colors.RED} for line in difflib.unified_diff(reference, response): print_color(line, symbol2color.get(line[0], Colors.DEFAULT))
[ "def compare_with_ref(\n self, response, response_checker=default_checker.default_journey_checker\n ):\n\n def ref_resp2files(output_file, output_json):\n \"\"\"\n Create a file for the filtered response and for the filtered reference\n \"\"\"\n with open(output_file, \"w\") as reference_text:\n reference_text.write(output_json)\n\n def print_diff(ref_file, resp_file):\n \"\"\"\n Print differences between reference and response in console\n \"\"\"\n # open reference\n with open(ref_file) as reference_text:\n reference = reference_text.readlines()\n # open response\n with open(resp_file) as response_text:\n response = response_text.readlines()\n\n # Print failed test name\n print_color(\"\\n\\n\" + str(file_name) + \" failed :\" + \"\\n\\n\", Colors.PINK)\n\n symbol2color = {\"+\": Colors.GREEN, \"-\": Colors.RED}\n for line in difflib.unified_diff(reference, response):\n print_color(line, symbol2color.get(line[0], Colors.DEFAULT))\n\n # Filtering the answer. (We compare to a reference also filtered with the same filter)\n filtered_response = response_checker.filter(response)\n\n # Get the reference\n\n # Create the file name\n filename = self.get_file_name()\n filepath = os.path.join(config[\"REFERENCE_FILE_PATH\"], filename)\n\n assert os.path.isfile(filepath), \"{} is not a file\".format(filepath)\n\n with open(filepath, \"r\") as f:\n raw_reference = f.read()\n\n # Transform the string into a dictionary\n dict_ref = json.loads(raw_reference)\n\n # Get only the full_response part from the ref\n ref_full_response = dict_ref[\"full_response\"]\n\n # Filtering the reference\n filtered_reference = response_checker.filter(ref_full_response)\n\n # Compare response and reference\n try:\n response_checker.compare(filtered_response, filtered_reference)\n except AssertionError as e:\n # print the assertion error message\n logging.error(\"Assertion Error: %s\" % str(e))\n # find name of test\n file_name = filename.split(\"/\")[-1]\n file_name = file_name[:-5]\n\n # create a folder\n dir_path = config[\"RESPONSE_FILE_PATH\"]\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # create path to ref and resp\n full_file_name_ref = dir_path + \"/reference_\" + file_name + \".txt\"\n full_file_name_resp = dir_path + \"/response_\" + file_name + \".txt\"\n\n json_filtered_reference = json.dumps(filtered_reference, indent=4)\n json_filtered_response = json.dumps(filtered_response, indent=4)\n\n # Save resp and ref as txt files in folder named outputs\n ref_resp2files(full_file_name_ref, json_filtered_reference)\n ref_resp2files(full_file_name_resp, json_filtered_response)\n\n # Print difference in console\n print_diff(full_file_name_ref, full_file_name_resp)\n\n raise", "def debug_print(self, response):\n \n print('REQUEST:')\n method = response.request.method\n url = response.request.url\n body = json.dumps(json.loads(response.request.body), indent=4)\n print('{method} {url}'.format(method, url))\n print(body)\n\n print('RESPONSE:')\n content = json.dumps(json.loads(response.content), indent=4)\n print(content)", "def _print_debug(response):\n\n # Parse the URL to get the path and location header.\n # parsed_url is a tuple with the form: (scheme, netloc, path, query, fragment)\n parsed_url = urlsplit(response.request.url)\n host = parsed_url[1]\n path = urlunsplit(('', '', parsed_url[2], parsed_url[3], parsed_url[4]))\n\n # Print the request. Format the first line, headers and body according to HTTP standard\n print('')\n print('[Request]:')\n print(response.request.method + ' ' + path + ' HTTP/1.1')\n Client._print_debug_headers(response.request.headers)\n print('Host: ' + host)\n if response.request.body:\n print('')\n print(response.request.body)\n\n # Print the response\n print('')\n print('[Response]:')\n print('HTTP/1.1 ' + str(response.status_code))\n Client._print_debug_headers(response.headers)\n if response.text:\n print('')\n print(response.text)\n print('')", "def inspect_response(response):\n Shell(nofetch=True).inspect_response(response)", "def print_diff(ip, common, diff1, diff2):\n logging.info('IP: %s', ip)\n if common:\n common = [' {0}'.format(elem) for elem in common]\n logging.info('\\n'.join(common))\n if diff1:\n diff = ['+ {0}'.format(elem) for elem in diff1]\n logging.info('\\n'.join(diff))\n if diff2:\n diff = ['- {0}'.format(elem) for elem in diff2]\n logging.info('\\n'.join(diff))", "def print_diff_messages(self):\n pprint.pprint(self.diff_messages)", "def print_request_and_response(resp):\n\n # Get the request object for this API call.\n req = resp.request\n\n print(\"\\n######## {} ########\".format(time.asctime()))\n print(\"\\n{} {}\".format(req.method, req.url))\n print()\n for k, v in req.headers.items():\n print(\"{}: {}\".format(k, v))\n print()\n\n try:\n d = json.loads(req.body.decode(\"utf-8\"))\n print(json.dumps(d, sort_keys=True, indent=4))\n except AttributeError:\n # \"body\" is not present in request object.\n pass\n except:\n print(traceback.format_exc())\n\n try:\n status_str = http.HTTPStatus(resp.status_code).name\n except:\n status_str = \"UNKNOWN\"\n\n print(\"\\n{} {}\\n\".format(resp.status_code, status_str))\n for k, v in resp.headers.items():\n print(\"{}: {}\".format(k, v))\n print()\n\n if resp.content:\n try:\n d = resp.json()\n print(json.dumps(d, sort_keys=True, indent=4))\n except:\n print(resp.content)\n\n print(\"################\")", "def test_call_refget_api():\n\n # assert that json output matches expected response\n client = ENAClient(args=args_success_0)\n response_string = client.call_refget_api(sequence_id_0, 0)\n correct_output = open(\"testdata/response_1.json\", \"r\").read().rstrip()\n assert response_string == correct_output", "def print_repsonse(self, resp):\n print(\"Response: (result=%s)\" % \"SUCCESS\" if resp.success else \"FAIL\")\n if resp.order:\n o = resp.order\n print(\"\\torder_id: %s\" % o.order_id)\n print(\"\\ttrader_id: %s\" % o.trader_id)\n print(\"\\tquantity: %d\" % o.quantity)\n print(\"\\ttimestamp: %s\" % str(o.timestamp))\n print(\"\\tside: %s\" % str(o.side))\n print(\"\\tordertype: %s\" % str(o.ordertype))\n print(\"\\tticker: %s\" % o.ticker)\n print(\"\\tfilled: %d\" % o.filled)\n print(\"\\tis_executed: %s\" % str(o.is_executed))", "def print_request_response(request_response: json):\n print(\"Printing response:\")\n print(json.dumps(request_response, indent=4))", "def test_get_request_output(self):\n pass", "def do_showDiffWithServer( self, dummy ):\n try:\n diffData = self.modificator.showCurrentDiff()\n print \"Diff with latest from server ( + local - remote )\"\n for line in diffData:\n if line[0] in ( '-' ):\n print colorize( line, \"red\" )\n elif line[0] in ( '+' ):\n print colorize( line, \"green\" )\n elif line[0] in ( '?' ):\n print colorize( line, \"yellow\" ),\n else:\n print line\n except:\n _showTraceback()", "def run_diagnostics(self):\n request = {\n 'jsonrpc': '2.0',\n 'id': 0,\n 'method': 'ping'\n }\n result = CurlTestBase.send_request('&diag=1', request)\n response = '<html><body><pre>'\n response += cgi.escape(result.content)\n response += '</pre></body></html>'\n self.response.out.write(response)", "def debug_html(label, response):\n\n print(\"\\n\\n\\n\", \"*********\", label, \"\\n\")\n print(response.data.decode('utf8'))\n print(\"\\n\\n\")", "def diff():\n print('SVN diff')", "def dump_request_and_response(response: Response) -> str:\n return _dump_request(response.request) + _dump_response(response)", "def get_raw_diff(self, review):\r\n return self.http_request('/r/%s/diff/raw/' % review, {})", "def print_debug(r): \n print r.headers, \"\\n\\n\", r.content", "def _printPreview(self,response,preview):\n \n if response is not None:\n if preview > 0: # TODO: take a (large enough) heading sub-string of response, and count lines on that)\n print \"RESULT PREVIEW (%d rows)\" % preview\n print response[:response.replace('\\n', '|', preview).find('\\n')] # print the response preview" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Proces elements from the input queue until empty.
def run(self) -> None: while True: try: input_element = self.input_queue.get_nowait() self.process(input_element) except Empty: return
[ "def process_entire_queue(self):\r\n\t\twhile self.queue:\r\n\t\t\tself._dequeue()", "def process_entire_queue(self):\r\n while self.queue:\r\n self._dequeue()", "def _wait_empty(self):\n while True:\n if self.queue.empty():\n # We still have to wait for the last queue item being processed\n # (queue.empty() returns True before queue.task_done() is\n # called).\n self.queue.join()\n return\n time.sleep(1)", "def get_all_from_queue(Q):\n try:\n while True:\n yield Q.get_nowait( )\n except queue.Empty:\n raise StopIteration", "def get_all_from_queue(Q):\n try:\n while True:\n yield Q.get_nowait()\n except queue.Empty:\n raise StopIteration", "def get_all_from_queue(Q):\n try:\n while True:\n yield Q.get_nowait( )\n except Queue.Empty:\n raise StopIteration", "def get_all_from_queue(Q):\n try:\n while True:\n yield Q.get_nowait()\n except Queue.Empty:\n raise StopIteration", "def drainQueue(q):\n buf = []\n while True:\n # Get as much as possible without blocking\n try:\n while True:\n item = q.get_nowait()\n if item is None:\n return buf\n else:\n buf.append(item)\n except Queue.Empty:\n pass\n\n if buf:\n return buf\n\n # Nothing in the queue. Block for\n # one item, then go back and get any\n # that we can without blocking.\n item = q.get()\n if item is None:\n return buf\n else:\n buf.append(item)", "def _process_event_queue(self):\n while not self._event_queue.empty():\n event = self._event_queue.get()\n self._process_event(event)\n return None", "def worker(self):\n while True:\n item,index = self.inbound.get()\n if index is None:\n self.buffer.append(item)\n self.index.value = self.index.value + 1 #index of next item for buffer\n if len(self.buffer)>self.size:\n del self.buffer[0]\n self.newitem.put(None)\n else:\n self.buffer[len(self.buffer)+(index - self.index.value)] = item", "def process_queue(self, queue):\n\n while queue:\n deferred, data = queue.popleft()\n deferred.callback(data)", "def queue_input(self):\n while self.process.poll() is None:\n line = self.process.stdout.readline()\n out = line.rstrip()\n if out != '':\n self.queue = out", "def requeue(self):", "def wait_until_empty(self):\n while not self._message_queue.empty():\n time.sleep(0.05) # Wait for 50ms", "def queue_loader(self, queue):\n for item in self.iterator():\n try:\n converted_item = self.converter(item)\n valid_item = self.validator(converted_item)\n except Exception as e:\n print(type(e), e)\n continue\n queue.put(valid_item)\n while queue.qsize() > 100:\n sleep(0.2)", "def in_queue_empty(self):\n pass", "def queue_ex():\n queue = []\n queue.append(1)\n queue.append(2)\n queue.append(3)\n\n print(\"QUEUE:\", queue)\n # output은 앞에서부터 : 0번 인덱스\n print(queue.pop(0))\n\n while(len(queue) > 0):\n print(\"Queue item:\", queue.pop(0))", "def processIncoming(self):\n while self.queue.qsize( ):\n try:\n msg = self.queue.get(0)\n # Check contents of message and do whatever is needed. As a\n # simple test, print it (in real life, you would\n # suitably update the GUI's display in a richer fashion).\n print (msg)\n except queue.Empty:\n # just on general principles, although we don't\n # expect this branch to be taken in this case\n pass", "def checkQueue( self ):\n if self.queue:\n yield self.writeToSerial( self.queue.pop( 0 ) )\n else:\n self.free = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process every input using the given worker class.
def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40): input_queue = Queue() # type: ignore output_queue = Queue() # type: ignore for input_elm in inputs: input_queue.put(input_elm) threads = [worker_class(input_queue, output_queue) for _ in range(num_threads)] for thread in threads: thread.start() for thread in threads: thread.join() return get_all_nowait(output_queue)
[ "def processInputs(self):", "def process_inputs(self, inputs):", "def run(self):\n self.class_inst_obj.processor(self.msg)", "def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return", "def process(self, input, output):\n pass", "def all_reduce_worker(self, input, output):\n pass", "def process_tasks(self):\n raise NotImplementedError", "def process(self, results):\n raise NotImplementedError", "def worker_handler(self, task_info={}):\n raise NotImplementedError", "def worker(self, request):\n try:\n for processor in self.processors:\n if processor.accepted(request):\n processor.process(request)\n except Exception as e:\n #TODO print e\n print e\n pass\n finally:\n #waiter be awakened\n request.notify()", "def homogeneous_worker_distribution_chunk_by_class(self, chunk_data, dic_current_hyperboxes, nprocs):\n dic_results = dic_current_hyperboxes\n with ProcessPoolExecutor(max_workers=nprocs) as executor:\n for key in chunk_data:\n futures = []\n # get list of current hyperboxes or initialize empty list if not exist list or input key\n if len(dic_current_hyperboxes) > 0 and (key in dic_current_hyperboxes):\n boxes = dic_current_hyperboxes[key]\n else:\n boxes = np.empty(nprocs, dtype=Bunch)\n for j in range(nprocs):\n boxes[j] = Bunch(lower=np.array([]), upper=np.array([]), classId=np.array([]), no_pat=0, centroid=np.array([]))\n \n values = chunk_data[key]\n num_samples = len(values.data)\n if num_samples >= nprocs:\n chunksize = int(math.ceil(num_samples / float(nprocs)))\n \n for i in range(nprocs):\n X_l = values.data[(chunksize * i) : (chunksize * (i + 1))]\n X_u = values.data[(chunksize * i) : (chunksize * (i + 1))]\n patClassId = values.label[(chunksize * i) : (chunksize * (i + 1))]\n X_l = np.where(np.isnan(X_l), 1, X_l)\n X_u = np.where(np.isnan(X_u), 0, X_u)\n \n futures.append(executor.submit(self.homogeneous_hyperbox_expansion, X_l, X_u, patClassId, boxes[i]))\n \n else:\n futures.append(executor.submit(self.homogeneous_hyperbox_expansion, values, boxes[0]))\n \n # Instruct workers to process results as they come, when all are completed\n as_completed(futures) # wait all workers completed\n lst_current_boxes = []\n for future in futures:\n lst_current_boxes.append(future.result())\n \n dic_results[key] = lst_current_boxes\n \n return dic_results", "def train_process(self):\n raise NotImplementedError", "def _spawn_workers(self):\n self._event.set()\n self._workers = [ClassifierWorker(self._event, self._queue, self._results) for x in range(self._NUM_WORKERS)]\n [worker.start() for worker in self._workers]", "def _map_to_workers(self, iterable, result_getter):\n if not self.is_started:\n raise RuntimeError(\"Cannot process inputs: must call start() first.\")\n\n tasks = TaskIterator(iterable)\n task = next(tasks)\n\n while True:\n try:\n self._send_task(task)\n task = next(tasks)\n except Queue.Full:\n for result in result_getter(): # I wish I had `yield from` :(\n yield result\n except StopIteration:\n break\n\n while not self.is_completed:\n for result in result_getter():\n yield result", "def process_classes(self):\n classes = [cls for classes in self.class_map.values() for cls in classes]\n class_num, inner_num = self.count_classes(classes)\n if class_num:\n logger.info(\n \"Analyzer input: %d main and %d inner classes\", class_num, inner_num\n )\n self.assign_packages()\n\n classes = self.analyze_classes(classes)\n class_num, inner_num = self.count_classes(classes)\n logger.info(\n \"Analyzer output: %d main and %d inner classes\", class_num, inner_num\n )\n\n writer = CodeWriter.from_config(self.config)\n if self.print:\n writer.print(classes)\n else:\n writer.write(classes)\n else:\n raise CodeGenerationError(\"Nothing to generate.\")", "def process_class_list(self, module, classes):", "def process(self, handler_input):\n # type: (HandlerInput) -> None\n raise NotImplementedError", "def process(self):\n for scan_request in self.scan_requests:\n scan_request.group.process(scan_request)", "def run_all_classifiers(self):\n\t\tself.classifier_processes.run_all_worker_processes(self.queue)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function allows for current user information modification. There is feature for change of default picture that is assigned during registration of new user. Part of change user picture is connected with save_image() function located in `utils.py` where name of original picture file is processing and then saved new_project_form in render_template() return is intentionally located here to allow for rendering tasks.new_project_2 function
def account(): form = UpdateAccountForm() new_project_form = ProjectForm() if form.validate_on_submit(): if form.picture.data: # if statement responsible for change of default picture picture_file = save_image(form.picture.data) current_user.img_file = picture_file current_user.user_name = form.user_name.data current_user.email = form.email.data db.session.commit() flash("Changes saved", "success") return redirect(url_for('users.account')) elif request.method == "GET": form.user_name.data = current_user.user_name form.email.data = current_user.email img_file = url_for('static', filename='images/' + current_user.img_file) return render_template('account.html', title="Account", form=form, img_file=img_file, new_project_form=new_project_form)
[ "def update_img(form):\n picture_folder = 'static/profile_pics'\n picture_path = picture_folder + '/' + current_user.image_file\n if current_user.image_file != \"default.jpg\":\n delete_picture(picture_path)\n current_user.image_file = save_picture(form.picture.data, picture_folder)", "def change_profile_img(self):\n get_photo = reddit_scrapper()\n get_photo.get_image()\n # Send image to instagram profile picture on the hidden input tag\n profile_pic_button = self.driver.find_elements_by_xpath(\n '//*[@id=\"react-root\"]/section/main/section/div[3]/div[1]/div[2]/form/input')[0].send_keys(os.getcwd() + '/daily_image/daily.jpg')\n\n time.sleep(1)\n save_profile_pic = self.driver.find_elements_by_xpath(\n '//button[contains(text(), \"Save\")]')[0].click()\n time.sleep(1)\n self.driver.get(base_url)", "def change_picture(request):\n if request.method == 'POST':\n user_dp = request.FILES.get('dp')\n if check_image_extension(user_dp.name):\n profile_form = Profile.objects.get(user=request.user)\n profile_form.dp = user_dp\n profile_form.save()\n msg_txt = \"\"\"\n <p>Your profile picture is successfully saved. <a href=\"/\" class=\"alert-link\">Go to homepage</a></p>\n \"\"\"\n messages.success(request, msg_txt)\n else:\n msg_txt = \"\"\"\n <p>\n Filetype not supported. Please use .jpg or .png filetypes.\n <a href=\"/\" class=\"alert-link\">Go to homepage</a>\n </p>\n \"\"\"\n messages.warning(request, msg_txt)\n return redirect('picture_change')\n\n return redirect('user_profile', username=request.user.username)\n else:\n profile_form = ProfileEditForm(instance=request.user.profile)\n return render(request, 'users/change_picture.html', {'profile_form': profile_form})", "def edit_player(request):\n # Get user\n gb_user = get_user_info(request)\n\n # Get ready with the data to send to the front end\n data = dict()\n\n edit_form = EditForm(gb_user.user.username)\n # Only do things if the user has submitted data\n if request.method == \"POST\":\n # Create the form using the request\n edit_form = EditForm(gb_user.user.username, request.POST)\n\n profile_image = None\n if \"photo\" in request.FILES:\n profile_image = request.FILES['photo']\n # profile_image = request.FILES['profile_image']\n # Check if the data is valid\n if edit_form.is_valid():\n # Get the relevant cleaned data for creating a user\n username = edit_form.cleaned_data['username']\n first_name = edit_form.cleaned_data['first_name']\n last_name = edit_form.cleaned_data['last_name']\n password = edit_form.cleaned_data['password']\n\n # print(\"profile image is \",username, profile_image)\n\n if username:\n gb_user.user.username = username\n if first_name:\n gb_user.user.first_name = first_name\n if last_name:\n gb_user.user.last_name = last_name\n if password:\n gb_user.user.set_password(password)\n if profile_image:\n gb_user.profile_image = profile_image\n gb_user.save()\n gb_user.user.save()\n # print(\"users are \",account.objects.get(username=\"Keegan\"))\n login(request, gb_user.user)\n return HttpResponseRedirect(reverse(index))\n\n data['edit_player_form'] = edit_form\n return render(request, \"edit_player.html\", data)", "def edit_nutri_profile(request):\n profile = request.user.profile\n nutriologist = request.user.profile.nutriologist\n user = request.user\n\n if request.method == 'POST':\n form = NutriologistForm(request.POST,request.FILES)\n if form.is_valid():\n data = form.cleaned_data\n\n profile.picture = data['picture']\n \n user.username = data['username']\n user.email = data['email']\n nutriologist.attention_days = data['attention_days']\n nutriologist.attention_hours = data['attention_hours']\n nutriologist.age = data['age']\n nutriologist.work_approach = data['work_approach']\n nutriologist.cedula_prof_det = data['cedula_prof_det']\n nutriologist.biography = data['biography']\n\n user.save()\n if profile.picture:\n profile.save()\n nutriologist.save()\n\n return redirect('profileNutriEdit')\n else:\n form = NutriologistForm()\n\n return render( request=request, \n template_name='users/profile_nutri_edit_form_view.html', \n context={\n 'form':form\n })", "def test_picture(self):\n self.add_user(self.TESTUSER)\n\n # Test the default picture\n with app.test_request_context():\n app.preprocess_request()\n new_picture = streck.models.user.User(self.TESTUSER['barcode']).picture()\n assert new_picture == '../img/NoneUser.png'\n\n # Update a user\n rv = self.app.post('/admin/user/%s/update' % self.TESTUSER['barcode'], data=dict(\n picture=(StringIO(unhexlify(self.IMAGE)), 'picture.png')\n ), buffered=True, follow_redirects=True)\n with app.test_request_context():\n app.preprocess_request()\n new_picture = streck.models.user.User(self.TESTUSER['barcode']).picture()\n assert new_picture != '../img/NoneUser.png'\n\n # Compare the resulting picture\n rv = self.app.get('/images/%s' % new_picture)\n assert rv.status_code == 200", "def update_profile(name):\r\n user = User.query.filter_by(name=name).first()\r\n if not user:\r\n return abort(404)\r\n if current_user.id != user.id:\r\n return abort(403)\r\n show_passwd_form = True\r\n if user.twitter_user_id or user.google_user_id or user.facebook_user_id:\r\n show_passwd_form = False\r\n usr, apps, apps_created = cached_users.get_user_summary(name)\r\n # Extend the values\r\n current_user.rank = usr.get('rank')\r\n current_user.score = usr.get('score')\r\n # Title page\r\n title_msg = \"Update your profile: %s\" % current_user.fullname\r\n # Creation of forms\r\n update_form = UpdateProfileForm(obj=user)\r\n update_form.set_locales(current_app.config['LOCALES'])\r\n avatar_form = AvatarUploadForm()\r\n password_form = ChangePasswordForm()\r\n external_form = update_form\r\n\r\n\r\n if request.method == 'GET':\r\n return render_template('account/update.html',\r\n title=title_msg,\r\n user=usr,\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n show_passwd_form=show_passwd_form)\r\n else:\r\n # Update user avatar\r\n if request.form.get('btn') == 'Upload':\r\n avatar_form = AvatarUploadForm()\r\n if avatar_form.validate_on_submit():\r\n file = request.files['avatar']\r\n coordinates = (avatar_form.x1.data, avatar_form.y1.data,\r\n avatar_form.x2.data, avatar_form.y2.data)\r\n prefix = time.time()\r\n file.filename = \"%s_avatar.png\" % prefix\r\n container = \"user_%s\" % current_user.id\r\n uploader.upload_file(file,\r\n container=container,\r\n coordinates=coordinates)\r\n # Delete previous avatar from storage\r\n if current_user.info.get('avatar'):\r\n uploader.delete_file(current_user.info['avatar'], container)\r\n current_user.info = {'avatar': file.filename,\r\n 'container': container}\r\n db.session.commit()\r\n cached_users.delete_user_summary(current_user.name)\r\n flash(gettext('Your avatar has been updated! It may \\\r\n take some minutes to refresh...'), 'success')\r\n return redirect(url_for('.update_profile', name=current_user.name))\r\n else:\r\n flash(\"You have to provide an image file to update your avatar\",\r\n \"error\")\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n # Update user profile\r\n elif request.form.get('btn') == 'Profile':\r\n update_form = UpdateProfileForm()\r\n update_form.set_locales(current_app.config['LOCALES'])\r\n if update_form.validate():\r\n current_user.id = update_form.id.data\r\n current_user.fullname = update_form.fullname.data\r\n current_user.name = update_form.name.data\r\n current_user.email_addr = update_form.email_addr.data\r\n current_user.privacy_mode = update_form.privacy_mode.data\r\n current_user.locale = update_form.locale.data\r\n db.session.commit()\r\n cached_users.delete_user_summary(current_user.name)\r\n flash(gettext('Your profile has been updated!'), 'success')\r\n return redirect(url_for('.update_profile', name=current_user.name))\r\n else:\r\n flash(gettext('Please correct the errors'), 'error')\r\n title_msg = 'Update your profile: %s' % current_user.fullname\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n\r\n # Update user password\r\n elif request.form.get('btn') == 'Password':\r\n # Update the data because passing it in the constructor does not work\r\n update_form.name.data = user.name\r\n update_form.fullname.data = user.fullname\r\n update_form.email_addr.data = user.email_addr\r\n update_form.ckan_api.data = user.ckan_api\r\n external_form = update_form\r\n if password_form.validate_on_submit():\r\n user = db.session.query(model.user.User).get(current_user.id)\r\n if user.check_password(password_form.current_password.data):\r\n user.set_password(password_form.new_password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n flash(gettext('Yay, you changed your password succesfully!'),\r\n 'success')\r\n return redirect(url_for('.update_profile', name=name))\r\n else:\r\n msg = gettext(\"Your current password doesn't match the \"\r\n \"one in our records\")\r\n flash(msg, 'error')\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n else:\r\n flash(gettext('Please correct the errors'), 'error')\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n # Update user external services\r\n elif request.form.get('btn') == 'External':\r\n del external_form.locale\r\n del external_form.email_addr\r\n del external_form.fullname\r\n del external_form.name\r\n if external_form.validate():\r\n current_user.ckan_api = external_form.ckan_api.data or None\r\n db.session.commit()\r\n cached_users.delete_user_summary(current_user.name)\r\n flash(gettext('Your profile has been updated!'), 'success')\r\n return redirect(url_for('.update_profile', name=current_user.name))\r\n else:\r\n flash(gettext('Please correct the errors'), 'error')\r\n title_msg = 'Update your profile: %s' % current_user.fullname\r\n return render_template('/account/update.html',\r\n form=update_form,\r\n upload_form=avatar_form,\r\n password_form=password_form,\r\n external_form=external_form,\r\n title=title_msg,\r\n show_passwd_form=show_passwd_form)\r\n # Otherwise return 415\r\n else:\r\n return abort(415)", "def _save_new_user(self):\n self._new_user = self._app.create_projectmanager()\n self._new_user.name = self._entry_user_name.get()\n\n self._build_listboxes_gui()\n self._disable_bttns()\n self._new_user_window.destroy()", "def _add_profile_image(self):\r\n self.profile_image_is_set = True\r\n file_name = filedialog.askopenfilename(initialdir=\"/\", title=self.language.refactor(\"Select GIF file\"),\r\n filetypes=((\"GIF files\", \"*.gif\"),))\r\n if file_name == '':\r\n self.new_user_window.lift()\r\n return\r\n\r\n self.add_profile_gif_button.destroy()\r\n gif_canvas = Ctk.CCanvas(self.new_user_window, corners='angular', size=(180, 180),\r\n bg=self.new_user_window['background'])\r\n gif_canvas.create_gif(gif_path=file_name, corner='round', size=(175, 175), pos=(90, 90),\r\n transparent=True, speed='normal')\r\n gif_canvas.place(*(15, 50))\r\n\r\n self.gif_file_path = file_name\r\n\r\n self.new_user_window.lift()", "async def replace_user_project(\n self,\n new_project_data: Dict[str, Any],\n user_id: int,\n project_uuid: str,\n include_templates: Optional[bool] = False,\n ) -> Dict[str, Any]:\n log.info(\"Updating project %s for user %s\", project_uuid, user_id)\n\n async with self.engine.acquire() as conn:\n async with conn.begin() as _transaction:\n current_project: Dict = await self._get_project(\n conn,\n user_id,\n project_uuid,\n exclude_foreign=[\"tags\"],\n include_templates=include_templates,\n for_update=True,\n )\n user_groups: List[RowProxy] = await self.__load_user_groups(\n conn, user_id\n )\n _check_project_permissions(\n current_project, user_id, user_groups, \"write\"\n )\n # uuid can ONLY be set upon creation\n if current_project[\"uuid\"] != new_project_data[\"uuid\"]:\n raise ProjectInvalidRightsError(user_id, new_project_data[\"uuid\"])\n # ensure the prj owner is always in the access rights\n owner_primary_gid = await self._get_user_primary_group_gid(\n conn, current_project[projects.c.prj_owner.key]\n )\n new_project_data.setdefault(\"accessRights\", {}).update(\n _create_project_access_rights(\n owner_primary_gid, ProjectAccessRights.OWNER\n )\n )\n\n # update the workbench\n def _update_workbench(\n old_project: Dict[str, Any], new_project: Dict[str, Any]\n ) -> None:\n # any non set entry in the new workbench is taken from the old one if available\n old_workbench = old_project[\"workbench\"]\n new_workbench = new_project[\"workbench\"]\n for node_key, node in new_workbench.items():\n old_node = old_workbench.get(node_key)\n if not old_node:\n continue\n for prop in old_node:\n # check if the key is missing in the new node\n if prop not in node:\n # use the old value\n node[prop] = old_node[prop]\n return new_project\n\n _update_workbench(current_project, new_project_data)\n\n # update timestamps\n new_project_data[\"lastChangeDate\"] = now_str()\n\n # now update it\n\n log.debug(\"DB updating with %s\", pformat(new_project_data))\n result = await conn.execute(\n # pylint: disable=no-value-for-parameter\n projects.update()\n .values(**_convert_to_db_names(new_project_data))\n .where(projects.c.id == current_project[projects.c.id.key])\n .returning(literal_column(\"*\"))\n )\n project: RowProxy = await result.fetchone()\n log.debug(\"DB updated returned %s\", pformat(project))\n user_email = await self._get_user_email(conn, project.prj_owner)\n\n tags = await self._get_tags_by_project(\n conn, project_id=project[projects.c.id]\n )\n return _convert_to_schema_names(project, user_email, tags=tags)", "def save_image(request):\n if request.method == 'POST':\n if not request.FILES:\n messages.error(request, 'No image selected.')\n return redirect(request.META.get('HTTP_REFERER'))\n\n data = request.POST\n upload_type = data['image-upload-type']\n upload_file = request.FILES['image']\n upload_id = data['image-upload-identifier']\n\n if upload_type not in VALID_UPLOAD_TYPES:\n messages.error(request, 'Wrong upload type used.')\n return redirect(request.META.get('HTTP_REFERER'))\n\n if upload_type == 'profile_image':\n user = CustomUser.objects.get(id=request.user.id)\n user.profile_image = image_to_base64str(upload_file)\n user.save()\n elif upload_type == 'header_image':\n team = HackTeam.objects.get(id=upload_id)\n team.header_image = image_to_base64str(upload_file)\n team.save()\n elif upload_type == 'project_image':\n project = HackProject.objects.get(id=upload_id)\n project.project_image = image_to_base64str(upload_file)\n project.save()\n elif upload_type == 'screenshot':\n project = HackProject.objects.get(id=upload_id)\n project.screenshot = image_to_base64str(upload_file)\n project.save()\n elif upload_type == 'hackathon_image':\n hackathon = Hackathon.objects.get(id=upload_id)\n hackathon.hackathon_image = image_to_base64str(upload_file)\n hackathon.save()\n\n messages.success(request, 'Image uploaded successfully.')\n return redirect(request.META.get('HTTP_REFERER'))\n\n else:\n return HttpResponseBadRequest()", "def update_picture(self, username, picture):\n self.update(('Picture', picture), username)", "def profile_pic(self, client_file_storage):\n\n # If we already have a profile picture, remove it\n if self.profile_pic_filename:\n filepath = os.path.join(\n current_app.config['UPLOADED_IMAGES_DEST'],\n self.profile_pic_filename)\n os.remove(filepath)\n self.profile_pic_filename = None\n self.profile_pic_url = None\n\n # This uploads & saves the file on the server\n # NOTE: It uses the secure_filename function...\n server_filename = images.save(client_file_storage)\n\n # Generate the URL to this file\n url = images.url(server_filename)\n\n # Store information with the user\n self.profile_pic_filename = server_filename\n self.profile_pic_url = url", "def account():\n \n form = UpdateAccountForm()\n \n # perform actions when the form is submitted\n if form.validate_on_submit():\n # checking if the form contains a picture file\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.image_file = picture_file\n # changing the current user details with the form data\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('Your account has been updated!', 'success')\n return redirect(url_for('account'))\n # performs action if the form method is get\n elif request.method == 'GET':\n # setting the form data with the user data from the database\n form.username.data = current_user.username\n form.email.data = current_user.email\n image_file = url_for('static', filename='profile_pics/' + current_user.image_file)\n return render_template('account.html', title='Account',\n image_file=image_file, form=form)", "async def update_user_image(\n id: str,\n profile_image: UploadFile = File(...),\n current_user: UserInDB = Depends(get_current_user),\n):\n\n extension = profile_image.filename.split(\".\")[-1]\n\n app_dirs.USER_DIR.joinpath(id).mkdir(parents=True, exist_ok=True)\n\n try:\n [x.unlink() for x in app_dirs.USER_DIR.join(id).glob(\"profile_image.*\")]\n except:\n pass\n\n dest = app_dirs.USER_DIR.joinpath(id, f\"profile_image.{extension}\")\n\n with dest.open(\"wb\") as buffer:\n shutil.copyfileobj(profile_image.file, buffer)\n\n if dest.is_file:\n return SnackResponse.success(\"File uploaded\")\n else:\n return SnackResponse.error(\"Failure uploading file\")", "def getUserProfilePic(user):", "def adduserimage():\n # Ensure that request contains file\n if 'file' not in request.files:\n return jsonify(success=False)\n # Get file and filename from request\n file = request.files['file']\n filename = secure_filename(file.filename)\n # Get blob for profile image\n bucket = sc.bucket(Config.BUCKET_NAME)\n blob = bucket.blob(f\"{Config.PROFILE_IMAGES_PATH}/{filename}\")\n # Upload new profile image\n blob.upload_from_file(file)\n # Make file public\n blob.make_public()\n # Set content disposition for PNG and disable caching for quick update\n blob.content_disposition = 'image/png'\n blob.cache_control = 'no-cache'\n blob.patch()\n # Return success\n return jsonify(success=True)", "def save_model(self, request, obj, form, change):\n if change:\n user = UserProfile.objects.get(user=obj.codigo_user)\n photo = Foto.objects.get(id=obj.id)\n if photo.categoria != obj.categoria:\n data = {'user_firstname': obj.codigo_user.first_name,\n 'photo_title':obj.titulo,\n 'email': obj.codigo_user.email,\n 'mydomain': request.META['HTTP_HOST'],\n 'encryptedUsername': urlsafe_b64encode(str(obj.codigo_user.id))}\n if obj.codigo_user.get_profile().idioma == u'en':\n subject = u\"Machu Picchu 100 – Your photo has been recategorized\"\n data['category_name'] = obj.categoria.nombre\n template='a-photo-recategorized_en.html'\n else:\n subject = u\"Machu Picchu 100 – Hemos cambiado la categoría de tu foto\"\n data['category_name'] = obj.categoria.nombre_espaniol\n template='a-photo-recategorized_es.html'\n if user.accept_email_updates:\n sendHtmlMail(\"info@machu-picchu100.com\", subject,\n template,\n data, obj.codigo_user.email)\n\n #if obj.panoramica and not photo.panoramica:\n # user = UserProfile.objects.get(user=obj.codigo_user)\n # if user.idioma == u\"es\":\n # subject = __(u\"Su foto ha sido marcada como panorámica\")\n # data = (obj.codigo_user.get_full_name(),\n # obj.titulo,)\n # send_html_mail(subject, \"cambio_panoramica_es.html\", data,\n # \"info@machu-picchu100.com\",\n # [obj.codigo_user.email])\n # else:\n # subject = __(u\"Su foto ha sido marcada como panorámica\")\n # data = (obj.codigo_user.get_full_name(),\n # obj.titulo,)\n # send_html_mail(subject, \"cambio_panoramica_en.html\", data,\n # \"info@machu-picchu100.com\",\n # [obj.codigo_user.email])\n\n obj.save()", "def select_default_picture(sender, instance, **kwargs):\n if not instance.id:\n instance.picture = \"/static/user%s.png\"%(\"F\" if instance.female else \"M\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that render form for email input that is destination of utils.send_reset_email function responsible for sending email to user with token that is available for specific period of time and reset user's password
def reset_password(): if current_user.is_authenticated: return redirect(url_for('main.home')) form = RequestResetForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() send_reset_email(user) # located in utils.py flash('An email has been sent with instruction to reset your password', 'info') return redirect(url_for('users.login')) return render_template('reset_password_request.html', form=form)
[ "def send_password_reset():\r\n form = EmailConfirmationForm()\r\n if form.validate_on_submit():\r\n email = form.email.data\r\n user = verify_email(email)\r\n if user:\r\n token = secrets.token_urlsafe(32)\r\n url = os.environ.get('URL','http://127.0.0.1:5000/password?reset=') + token\r\n body = f\"\"\"Hello, This Is The Password Reset Code You Asked For. \r\n Type this into the confirmation box on the page to verify and reset your password\\n \r\n {url}\"\"\"\r\n subject = \"Password Reset Confirmation Code\" \r\n msg = Message(recipients=[email],body=body,subject=subject)\r\n mail.send(msg)\r\n user.reset_token = token\r\n db.session.commit()\r\n flash(\"Code Has Been Sent and Should Be In Your Email Shortly\",\"alert-success\")\r\n return redirect(\"/password/forgot\")\r\n else:\r\n message = common_flashes(\"missing_user\")\r\n flash(message[0],message[1])\r\n return redirect(\"/login\")\r\n return render_template(\"email_form.html\",form=form)", "def password_reset_token_created(sender, reset_password_token, *args, **kwargs):\n # send an e-mail to the user\n context = {\n 'current_user': reset_password_token.user,\n 'username': reset_password_token.user.username,\n 'email': reset_password_token.user.email,\n 'reset_password_url': \"{}?token={}\".format(\"https://ucalendar.dcc.uchile.cl/login/reset/\", reset_password_token.key)\n }\n\n print(f\"[RESET PASSWORD REQUEST] { context['username'] } {context['email']} {context['current_user']}\")\n \n # # render email text\n # email_html_message = render_to_string('email/user_reset_password.html', context)\n email_plaintext_message = render_to_string('email/user_reset_password.txt', context)\n\n msg = EmailMultiAlternatives(\n # title:\n \"Password Reset for {}\".format(\"U- Calendar\"),\n # message:\n email_plaintext_message,\n # from:\n \"noreply@{}\".format(\"ucalendar.dcc.uchile.cl\"),\n # to:\n [reset_password_token.user.email]\n )\n # msg.attach_alternative(email_html_message, \"text/html\")\n msg.send()", "def send_reset_email(self):\n token = self.get_reset_token()\n msg = MailMessage(\n \"Password Reset Request\",\n recipients=[self.email],\n )\n msg.body = f\"\"\"To reset your password, visit the following link:\n\n{url_for('users.reset_token', token=token, _external=True)}\n\nIf you did not make this request, please ignore this email.\n\"\"\"\n mail.send(msg)", "def reset_token():\n\n form = forms.TokenResetForm(request.form, url_for_with_prefix('app_router.' + inspect.stack()[0][3]))\n\n context = {\n 'form': form\n }\n\n if form.validate_on_submit():\n context['success'] = True\n user = User.query.filter_by(email=form.email.data).first()\n if user:\n user.create_tmp_token()\n emailer.NoReply.token_reset(user).send()\n\n return render_template_wctx('pages/reset_token.html', context=context)", "def password_reset(request):\n\n\tcontext_dict = {}\n\tif request.method == 'POST':\n\t\temail = request.POST.get('email')\n\t\tif email:\n\t\t\tuser = models.Teacher.objects.get(\n\t\t\t\tsoft_delete=False, user__email=email\n\t\t\t)\n\t\t\tif not user:\n\t\t\t\tcontext_dict[\"message\"] = \"Email ID does'nt exist, Enter Correct details\"\n\t\t\tmail = {\n\t\t\t\t'email': email,\n\t\t\t\t'domain': request.META['HTTP_HOST'],\n\t\t\t\t'site_name': 'Placement Portal',\n\t\t\t\t'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n\t\t\t\t'user': user,\n\t\t\t\t'token': ''.join([random.choice(ascii_letters+digits) for i in range (128)]),\n\t\t\t\t'protocol': 'http',\n\t\t\t}\n\t\t\ttry:\n\t\t\t\treset_token = models.PasswordReset(\n\t\t\t\t\tuser=user,\n\t\t\t\t\ttoken=mail['token'],\n\t\t\t\t\ttoken_consumed=False,\n\t\t\t\t)\n\t\t\t\treset_token.save()\n\t\t\texcept Exception as e:\n\t\t\t\tprint (e)\n\t\t\tsubject_template_name = 'password_reset_email_subject.txt'\n\t\t\temail_template_name = 'password_reset_email.html'\n\t\t\tsubject = loader.render_to_string(subject_template_name, mail)\n\t\t\tsubject = ''.join(subject.splitlines())\n\t\t\temail_data = loader.render_to_string(email_template_name, mail)\n\t\t\tsend_mail(subject, email_data, DEFAULT_FROM_EMAIL, [email], fail_silently=False)\n\t\t\tcontext_dict[\"message\"] = \"Email has been sent to your registered Email ID with instructions.\"\n\treturn render(request, \"password_reset_form.html\", context_dict)", "def send_recovery_password_email(token: str, email: str) -> None:\n\n # TODO ...\n # Load html templates and get the content from it.\n # html_content = ...\n\n # You must have to send this as a anchor\n # to my-domain.com/reset-password?token=ad5a....\n link = f\"{SERVER_HOST}/reset-password?token={token}\"\n content = f\"\"\"\n <h1>Reset your password</h1>\n <p></p>\n <a href=\"{link}\" target=\"_blank\" rel=\"noopener noreferrer\">Press here</a>\n \"\"\"\n email = sender.create_email(\n to_list=[email],\n subject=f\"Recovery Password\",\n html_content=content,\n )\n sender.send_email(email_to_send=email)", "def forget_password_request():\n form = EmailForm(request.form)\n if request.method == 'POST':\n if form.validate():\n account_email = form.email.data\n user = User.query.filter_by(email=account_email).first_or_404()\n send_mail(form.email.data, '重置你的密码',\n 'email/reset_password.html', user=user,\n token=user.generate_token())\n flash('一封邮件已发送到邮箱' + account_email + ',请及时查收')\n # return redirect(url_for('web.login'))\n return render_template('auth/forget_password_request.html', form=form)", "def reset_password():\n form = ResetPassword()\n if form.validate_on_submit():\n user_email = form.email.data\n mail_exist = db.check_email(user_email)\n if mail_exist is not None:\n new_password = generate_password()\n new_password_hash = generate_password_hash(new_password)\n username = mail_exist['username']\n db.update_password_username(username, new_password_hash)\n flash('Your new password has been sent to your mailbox')\n redirect('login')\n # send_password_reset_email(user_email, new_password)\n return redirect(url_for('login'))\n else:\n flash('This email address is not registered')\n return redirect('reset_password')\n return render_template('resetpassword.html', form=form)", "def forgot_password():\r\n form = ForgotPasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user = model.user.User.query\\\r\n .filter_by(email_addr=form.email_addr.data)\\\r\n .first()\r\n if user and user.email_addr:\r\n msg = Message(subject='Account Recovery',\r\n recipients=[user.email_addr])\r\n if user.twitter_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Twitter')\r\n elif user.facebook_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Facebook')\r\n elif user.google_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Google')\r\n else:\r\n userdict = {'user': user.name, 'password': user.passwd_hash}\r\n key = signer.signer.dumps(userdict, salt='password-reset')\r\n recovery_url = url_for('.reset_password',\r\n key=key, _external=True)\r\n msg.body = render_template(\r\n '/account/email/forgot_password.md',\r\n user=user, recovery_url=recovery_url)\r\n msg.html = markdown(msg.body)\r\n mail.send(msg)\r\n flash(gettext(\"We've send you email with account \"\r\n \"recovery instructions!\"),\r\n 'success')\r\n else:\r\n flash(gettext(\"We don't have this email in our records. \"\r\n \"You may have signed up with a different \"\r\n \"email or used Twitter, Facebook, or \"\r\n \"Google to sign-in\"), 'error')\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Something went wrong, please correct the errors on the '\r\n 'form'), 'error')\r\n return render_template('/account/password_forgot.html', form=form)", "def password_reset_token_created(sender, reset_password_token, *args, **kwargs):\n # send an e-mail to the user\n context = {\n 'current_user': reset_password_token.user,\n 'username': reset_password_token.user.username,\n 'email': reset_password_token.user.email,\n # ToDo: The URL can (and should) be constructed using pythons built-in `reverse` method.\n 'reset_password_url': \"http://some_url/reset/?token={token}\".format(token=reset_password_token.key)\n }\n\n # render email text\n email_html_message = render_to_string('email/user_reset_password.html', context)\n email_plaintext_message = render_to_string('email/user_reset_password.txt', context)\n\n msg = EmailMultiAlternatives(\n # title:\n \"Password Reset for {title}\".format(title=\"Some website title\"),\n # message:\n email_plaintext_message,\n # from:\n \"noreply@somehost.local\",\n # to:\n [reset_password_token.user.email]\n )\n msg.attach_alternative(email_html_message, \"text/html\")\n msg.send()", "def send_password_reset_mail(email, token):\n print(\"reset password\")\n url = f\"{settings.SITE_URL}/reset-password?email={email}&token={token}\"\n SUBJECT = \"Reset Password Request\"\n # The HTML body of the email.\n body = \"\"\"\n <html>\n <head></head>\n <body>\n <p>Here is your password reset link:</p>\n <p><a href='{0}'>{1}</a></p>\n </body>\n </html>\n \"\"\".format(url, url)\n send_mail(SUBJECT, body, email)", "def request_password_reset_token():\n j = request.get_json(force=True)\n user_requested = j['user'].lower()\n\n # Disabled user accounts can not request for a new password.\n target_user = User.query.filter_by(mail=user_requested).first()\n\n if target_user is None:\n return Errors.UNKNOWN_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n if target_user.state == StateType.DEACTIVATED:\n return Errors.DEACTIVATED_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n target_user.generate_password_request_token()\n\n send_mail(target_user.mail, render_template(\"password/reset_password_mail.txt\",\n greeting=get_opening_greeting(target_user),\n wlink=\"{}/password/reset/{}\".format(\n app.config['BUZZN_BASE_URL'],\n target_user.password_reset_token\n )), 'Passwort zurücksetzen für Buzzn-App')\n\n db.session.commit()\n return '', status.HTTP_201_CREATED", "def _request_reset(self, email):\n response = self.client.post(reverse('users.send_password_reset'),\n {'email': email})\n return response.context['token']", "def password_reset_token_created(sender, instance, reset_password_token, *args, **kwargs):\n # send an e-mail to the user\n context = {\n 'current_user': reset_password_token.user,\n 'username': reset_password_token.user.username,\n 'email': reset_password_token.user.email,\n 'reset_password_url':\n instance.request.build_absolute_uri(get_angular_url('validate-token', reset_password_token.key)),\n 'password_reset_timeout_hours': get_password_reset_token_expiry_time()\n }\n # render email text\n email_message = render_to_string('email/user_reset_password.html', context)\n send_user_link_to_reset_password(reset_password_token.user, email_message)", "def email_reset_notice(email):\n html = render_template(\"email/reset_notice.html\")\n subject = \"Password reset\"\n send_email(email, subject, html)", "def send_password_reset_email():\n aaa.send_password_reset_email(\n username=post_get('username'),\n email_addr=post_get('email_address')\n )\n return 'Please check your mailbox.'", "def password_reset_form(context):\n\n context.update({\n 'form': PasswordResetForm()\n })\n\n return context", "def request_password_reset():", "def _send_reset_request_mail(request, user, code):\n reset_link = \"{}?code={}\".format(\n request.build_absolute_uri(reverse(\"auth-reset\")), code\n )\n send_mail(\n sender=MAIL_FROM,\n recievers=[user.email],\n subject=\"Account Password Reset\",\n tmpl_file=Templates.Email.RESET_REQUEST,\n tmpl_data={\"{email}\": user.email, \"{reset_confirm_link}\": reset_link},\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns pandas dataframe which has latest record for each manual id after merging all "sheet_name" in the previously indexed_files which are present in "indexed_files_dir"
def zeta0_creation(self, indexed_files_dir, merge_columns): indexed_files = [file for file in os.listdir(indexed_files_dir) if not file.startswith("~")] indexed_files_dict = {} indexed_files_dict.clear() dateList = [] del dateList[:] for file in indexed_files: dated = file.split('_')[-1].split('.')[0] dated = dated[4:] + dated[:4] dateList.append(dated) indexed_files_dict[dated] = file dataframes = {} for dated, file in indexed_files_dict.items(): file_name = indexed_files_dir + '\\' + file dataframes[dated] = pd.read_excel(file_name, sheet_name=0) dataframes[dated]['file_date'] = dated dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in dataframes[dated]['manual_id']] merged_df = pd.concat([dataframes[dated] for dated in dateList], ignore_index=True) merged_df = merged_df.sort_values('file_date', ascending=False) zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first') pd.set_option('mode.chained_assignment', None) for col in zeta0.columns: zeta0[col] = zeta0[col].astype('str') zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == "object" else x) zeta0 = zeta0.sort_values('mid', ascending=True) if "manual_id" not in merge_columns: merge_columns.append("manual_id") zeta0 = zeta0[merge_columns] # print(zeta0) return zeta0
[ "def build_index(self):\n records = {}\n run_count = 0\n run_iteration = 1\n parse_dict = {}\n for k in self.value_path:\n parse_dict[k] = parse(k)\n s = time.time()\n for rid, json_data in self._file_iter:\n extracted_data = utils.extract(json_data, self.value_path, parse_dict)\n # Reset run_count when we hit BATCH_SIZE\n if run_count >= self._batch_size:\n self._index_records(records)\n msg = \"Finished indexing {val} records. Time = {time}\".format(val=run_count * run_iteration,\n time=(time.time() - s))\n self._logger.info('{0} {1}'.format(\"[minhash-lsh-blocking]\", msg))\n\n run_iteration += 1\n records = {}\n run_count = 0\n\n records[rid] = set(extracted_data.values()[0])\n run_count += 1\n\n # Index the final remaining records\n self._index_records(records)", "def merge_walkupseq_files(latest_tsca_id):\n paths = glob.glob('walkupseq_files/*sample_info*')\n\n dfs = []\n for f in paths:\n tmp = pd.read_table(f, encoding='latin1')\n dfs.append(tmp)\n\n df = pd.concat(dfs, axis=0)\n df.to_csv('walkupseq_files/walkupseq_all_combined_%s.txt'%latest_tsca_id, sep=\"\\t\", index=None)\n return df", "def master_idx_by_date(self, exptdate, timelapse=False):\n path = self.paths_dict[exptdate]\n datadir = os.path.join(os.path.dirname(path), 'data')\n os.path.exists(datadir)\n if not timelapse:\n fns = os.listdir(datadir)\n else:\n dirs = os.listdir(datadir)\n # Create a master idx dataframe based on the files found\n # in this experiments datadir\n strains = []\n filepaths = []\n\n for fn in fns:\n print(fn)\n if fn[-4:] == '.fcs':\n match = re.search(constants.patterns.strain_name, fn)\n if match:\n strains.append(match.group())\n filepath = os.path.join(datadir, fn)\n filepaths.append(filepath)\n\n df = pd.DataFrame({'strain': strains,\n 'filepath': filepaths})\n # Add clone indices to the dataframe\n for strain in df.strain.unique():\n\n n_clones = len(df[df.strain == strain])\n df.loc[df.strain == strain, 'clone'] = [int(idx) for idx in range(1, n_clones+1, 1)]\n\n # Lookup each strain in constants.strains_dir/Strains_Database.csv\n # and add information found in the database\n strains_df = pd.read_csv(os.path.join(constants.strains_dir, 'Strains_Database.csv'))\n\n for idx in df.index:\n strain_name = df.loc[idx, 'strain']\n if strain_name in strains_df.name.values:\n for col in strains_df.columns:\n df.loc[idx, col] = strains_df.loc[strains_df.name == strain_name, col].values[0]\n \n return df", "def on_complete(self, data_map):\n results = []\n\n for output_filename, data_frame in data_map.items():\n new_filename = None\n versioned_name = None\n new_file_contents_hex_digest = None\n existing_file_contents_hex_digest = None\n \n # get the hash of the pandas data frame \n # this will be used as the name for the temp file if needed\n df_hex_digest = self.get_data_frame_hash(data_frame)\n \n logger.info(f'Existing df hash {df_hex_digest}')\n \n # determine if the file we want to write already exists and if so, compare hashes with the\n # file we're about to write. We can't assume data hashes will match for the same data frame\n # when it gets loaded from a file...\n if self.does_file_exist(output_filename):\n logger.info(f'Checking hash of file that already exists {output_filename}')\n existing_file_contents_hex_digest = self.check_file_contents_hash(output_filename)\n \n # ...therefore we'll temporarily write a file based on the data hash\n # and determine its file content's hash\n new_filename = f'__{df_hex_digest}__.csv'\n self.save_data(new_filename, data_frame)\n new_file_contents_hex_digest = self.check_file_contents_hash(new_filename)\n\n if new_file_contents_hex_digest == existing_file_contents_hex_digest:\n # since they are the same, we don't do anything, just cleanup\n logger.info('The hashes matched, deleting file...')\n self.delete_file(new_filename)\n new_filename = None\n new_file_contents_hex_digest = None\n else:\n logger.info('The hashes did not match. Preserving the old file and using the new one as the latest.')\n # the old existing file gets prepended with a timestamp\n versioned_name = '{0}__{2}{1}'.format(*os.path.splitext(output_filename) + (time(),))\n os.rename(f'{base.LOCAL_PATH}/{output_filename}', f'{base.LOCAL_PATH}/{versioned_name}')\n # the new file gets renamed to whatever the output needs to be\n os.rename(f'{base.LOCAL_PATH}/{new_filename}', f'{base.LOCAL_PATH}/{output_filename}')\n else:\n self.save_data(output_filename, data_frame)\n\n results.append({\n 'output_df_hash': df_hex_digest,\n 'output_filename': output_filename,\n 'versioned_filename': versioned_name,\n 'new_file_hash': new_file_contents_hex_digest,\n 'old_file_hash': existing_file_contents_hex_digest\n })\n\n self.task_results = results", "def get_latest_league_data(self, df):\n max_date = pd.to_datetime(df[\"Date\"]).max()\n df = df[df[\"Date\"] == max_date]\n [latest_league_file_dir] = df[\"File\"].values\n df = self.extract_df(latest_league_file_dir)\n return df", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def load_combine_and_dedupe_from_excel_files(\n self,\n df,\n list_of_excel_files_or_folders_and_sheet_info\n ):\n if not isinstance(list_of_excel_files_or_folders_and_sheet_info, list):\n raise transform_errors.InputDataTypeError(\n f\"list_of_excel_file_names_and_sheet_indexes must be of list type.\")\n\n df = pd.DataFrame()\n print(f\"\\nData in the following Excel files and sheets will be combined:\")\n for f_s in self._get_list_of_excel_files_and_sheets_to_process(\n list_of_excel_files_or_folders_and_sheet_info):\n print(f_s)\n cur_df = pd.read_excel(f_s[0],\n sheet_name=f_s[-1])\n df = pd.concat([df, cur_df])\n\n return df.drop_duplicates(ignore_index=True)", "def _get_last_reports(self):\n db = Database(*self._config)\n sql = \"SELECT document FROM DocsOneMonthArchive WHERE timestamp=(SELECT MAX(timestamp) FROM DocsOneMonthArchive)\"\n rawdata = db.query(sql)\n return [json.loads(x['document']) for x in rawdata]", "def load_data_from_other_sheets_in_excel_file_and_append_to_the_main_dataframe(\n self,\n df,\n list_of_sheet_names\n ):\n for sheet in list_of_sheet_names:\n temp_df = pd.read_excel(\n self.config[KEY_CURRENT_INPUT_FILE],\n sheet_name=sheet,\n header=self.config[KEY_HEADER])\n\n # To append all sheet with same columns names,\n # those columns names that not match will be at the end of the dataframe\n df = df.append(temp_df)\n\n return df", "def main_loop(self, many_id_df):\n many_id_df['join_index'] = many_id_df['hmdb_ids'].apply(lambda x: self.hmdb_merge(x))\n many_merged_df = pd.merge(many_id_df, self.joined_df, how='left', on='join_index')\n\n # Clean-up headers before final joins in main:\n many_merged_df.drop('hmdb_ids_x', axis=1, inplace=True)\n many_merged_df.rename(columns={'hmdb_ids_y': 'hmdb_ids'}, inplace=True)\n return many_merged_df", "def get_newest_df(watchfolder, optional_column_names=[], existing_df=None):\n from measurement_directory import run_ids_from_txt, run_ids_from_filenames\n import os\n bc = load_breadboard_client()\n run_ids = []\n files = [filename for filename in os.listdir(watchfolder)]\n files_spe = []\n for file in files:\n if '.spe' in file:\n files_spe.append(file)\n elif 'run_ids.txt' in file:\n run_ids += run_ids_from_txt(\n os.path.abspath(os.path.join(watchfolder, file)))\n if existing_df is None:\n run_ids += run_ids_from_filenames(files_spe)\n df = bc.get_runs_df_from_ids(\n run_ids, optional_column_names=optional_column_names)\n else:\n run_ids = list(set(run_ids_from_filenames(files_spe)).union(set(run_ids)).difference(\n set(list(existing_df['run_id']))))\n if len(run_ids) > 0:\n df = existing_df.append(bc.get_runs_df_from_ids(run_ids,\n optional_column_names=optional_column_names),\n sort=False,\n ignore_index=True)\n else:\n df = existing_df\n\n def custom_sort(df):\n # takes in df and returns same df with user-interaction columns first\n #['run_id','badshot','manual_foo1','manual_foo2', 'listboundvar1', etc.]\n cols = list(df.columns)\n manual_cols = []\n for col in cols:\n if 'manual' in col:\n manual_cols += [col]\n manual_cols = sorted(manual_cols)\n user_interact_cols = ['run_id'] + ['badshot'] + manual_cols\n for col in user_interact_cols:\n cols.remove(col)\n return df[user_interact_cols + cols]\n\n df = custom_sort(df)\n df.sort_values(by='run_id', ascending=False, inplace=True)\n return df", "def merge_in_original_sheets(self, save_sheet_names=False):\n sheet_dict = dict()\n for ds in self:\n if ds.orig_sheet_name not in sheet_dict:\n sheet_dict[ds.orig_sheet_name] = [ds]\n else:\n sheet_dict[ds.orig_sheet_name].append(ds)\n\n new_dh = DataHolder(self.name)\n if \"\" in sheet_dict:\n # in this case, orig_sheet_name is not assigned and we just use th name\n sheet_dict = self.data_dict\n\n for name in sheet_dict:\n df_data_list = [ds.df_data for ds in sheet_dict[name]]\n df_data = right_merge_df_list(df_data_list)\n df_profiles = right_merge_df_list([ds.df_profiles for ds in sheet_dict[name]])\n new_dh.add_sheet(name, df_data, df_profiles, orig_sheet_name=name)\n\n if save_sheet_names:\n data = pd.DataFrame([ds.orig_sheet_name + \" \" + ds.name for ds in self])\n profiles = pd.DataFrame([SheetTypeDefinitions.EMPTY_STRING]*data.size)\n new_dh.add_sheet(\"triangle names\", data, profiles)\n return new_dh", "def combined_df(self) -> pd.DataFrame:\n return pd.concat([self.data, self.latest_data.reset_index()], ignore_index=True)", "def import_optimal_groupings(filename):\n groupings = pd.read_excel(filename, sheet_name=None)\n sheet_names = list(groupings.keys())\n\n unique_solutions = []\n for sheet_name in sheet_names:\n unique_solutions.append(groupings[sheet_name])\n\n return(unique_solutions)", "def extract_recording_by_idx(all_idx, dict1, window_size, pump_type = 'minor'):\r\n all_idx = np.array(all_idx)\r\n # window_size = int(window_size * 10000)\r\n n_window = len(all_idx)\r\n # total_recording_length = 3000000\r\n # in case window_size is in second units\r\n if window_size < 100:\r\n window_size = int(window_size * 10000)\r\n # n_frame = total_recording_length // window_size\r\n middle_idx = window_size//2\r\n files = sorted(dict1.values())\r\n n_files = len(files)\r\n data = np.zeros(shape=(n_window, window_size + 1))\r\n current_vector = 0\r\n for file_idx in range(n_files):\r\n current_file_idx = file_idx\r\n if (pump_type == 'minor'):\r\n record = pd.read_pickle(cwd + subPath + files[file_idx].replace('.abf', ' Full Annotation.cPickle')).as_matrix()[:,1:]\r\n elif (pump_type == 'major'):\r\n record = pd.read_pickle(cwd + subPath + files[file_idx].replace('.abf', ' Annotation.cPickle')).as_matrix()[:,1:]\r\n # print(file_idx, record.shape)\r\n total_recording_length = record.shape[0]\r\n if (record[:, 0].min() < 0):\r\n record[:, 0] -= record[:, 0].min()\r\n Mat = np.ones(shape = (total_recording_length+window_size - 1, 2))*(-2)\r\n Mat[middle_idx:middle_idx + total_recording_length,:] = record.copy()\r\n # following extracts the middle indeces for a given file index\r\n local_idx = all_idx[all_idx // total_recording_length == file_idx] % total_recording_length\r\n # verifies that the current file includes some windows\r\n if (local_idx.shape[0]):\r\n data[current_vector:current_vector + local_idx.shape[0], -1] = Mat[local_idx + middle_idx, 1]\r\n # following extracts the middle indeces with a bound\r\n local_idx_range = np.array([range(idx_instant, idx_instant + 2*middle_idx) for idx_instant in local_idx])\r\n data[current_vector:current_vector+local_idx.shape[0], :-1] = Mat[local_idx_range ,0]\r\n else:\r\n print ('No windows from %s.' % (files[file_idx]))\r\n\r\n current_vector += local_idx.shape[0]\r\n print 'data distribution\\n', pd.Series(data[:,-1]).value_counts()\r\n return data", "def produce_flat_file(self):\n # read all excel files\n files = []\n for ext in ('**/*.xls', '**/*.xlsx'): files.extend(self._downloaded_folder.glob(ext)) \n\n # if user does not want to append data, convert excel sheets into csv formats \n if int(self._append_option) == 0:\n write_log(\"User chooses not to append any data. All files will be processed separately.\")\n\n #stop if no excel files are present\n if len(files) == 0: \n write_log(\"No Excel file is detected. Moving on to upload all flat files to SQL separately.\")\n return None \n\n # convert excel into csv \n try:\n write_log(\"Attempting to convert all Excel files into csv.\")\n for file in tqdm(files):\n write_log(f\"--Attempting to convert all visible sheets in {file.name} into csv.\")\n\n #check hidden sheets\n sheets = pd.ExcelFile(file).book.sheets()\n visible_sheets = [i for i in sheets if i.visibility==0]\n\n #read visible sheets only\n for sheet in visible_sheets: \n worksheet_df = pd.read_excel(file, sheet_name=sheet.name) \n worksheet_name = str(file.stem) + '_' + str(sheet.name) \n worksheet_name = worksheet_name.replace(' ', '_')\n worksheet_df['FileName'] = worksheet_name \n\n #write to csv\n csv_path = self._downloaded_folder / f'{worksheet_name}.csv'\n if Path.exists(csv_path): Path.unlink(csv_path) #overwrite existing file\n worksheet_df.to_csv(csv_path, sep=\"^\")\n self.move_to_processed(file)\n write_log(f\"--Successfully converted all visible sheets in {file.name} into csv.\")\n write_log(\"Successfully converted all Excel files into csv.\")\n except Exception as e:\n write_log(f\"Failed to convert all Excel files into csv due to error '{e}'\")\n sys.exit()\n\n # if user wants to append data, combine all excel sheets and existing flat files into a single csv\n if int(self._append_option) == 1:\n try:\n write_log(\"User chooses to append data.\")\n write_log(\"Attempting to consolidate all data together into a single table.\")\n\n # read all existing flat files\n flat_files = []\n for ext in ('**/*.csv', '**/*.tsv', '**/*.txt'): flat_files.extend(self._downloaded_folder.glob(ext))\n tables_to_append = []\n\n # read excels (if any) before appending\n for file in tqdm(files):\n write_log(f\"--Attempting to read all visible sheets in {file.name}.\")\n \n #check hidden sheets\n sheets = pd.ExcelFile(file).book.sheets()\n visible_sheets = [i for i in sheets if i.visibility==0]\n\n #read visible sheets only\n for sheet in visible_sheets: \n worksheet_df = pd.read_excel(file, sheet_name=sheet.name) \n worksheet_name = str(file.stem) + '_' + str(sheet) \n worksheet_df['FileName'] = worksheet_name \n tables_to_append.append(worksheet_df)\n self.move_to_processed(file)\n write_log(f\"--Successfully read all visible sheets in {file.name}.\")\n\n # continue appending flat files\n for flat in tqdm(flat_files): \n tables_to_append.append(pd.read_csv(flat, sep = self.detect_file_separator(flat)))\n self.move_to_processed(flat)\n write_log(f\"Successfully appended in {flat.name}.\")\n\n # save appended data \n wb_append = pd.concat(tables_to_append, ignore_index=True, sort=False)\n csv_path = self._downloaded_folder / 'consolidated_data.csv'\n if Path.exists(csv_path): Path.unlink(csv_path) #overwrite existing file\n wb_append.to_csv(csv_path, sep=\"^\")\n write_log(\"Successfully consolidated all data together into a single table.\")\n except Exception as e:\n write_log(f\"Failed to consolidate all data together into a single table due to error '{e}'.\")\n sys.exit()", "def _read_files(self, alarms_file, beds_files):\n alarm_columns = ALARMS_FILES[\"columns\"][:-2]\n alarms_df = pd.read_csv(os.path.join(self.input_dir, alarms_file))\n alarms_df = alarms_df[alarm_columns].set_index(\"UnitBedUID\")\n key = alarms_file.split(\"_\")[0] + \"_\"\n beds_file = [file_name for file_name in beds_files if key in file_name]\n if len(beds_file) > 1:\n print(\n \"Something went wrong. It has been found more than one file \"\n \"mapping the Bedmaster beds IDs with ADT beds. It will be \"\n \"used just the first.\",\n )\n beds_file = [beds_file[0]]\n elif len(beds_file) == 0:\n print(\n \"Something went wrong. It has not been found any file \"\n \"mapping the Bedmaster beds IDs with ADT beds. The mapping \"\n \"won't be performed.\",\n )\n return alarms_df\n bed_columns = ALARMS_FILES[\"columns\"][:1] + ALARMS_FILES[\"columns\"][-2:]\n beds_df = pd.read_csv(os.path.join(self.input_dir, beds_file[0]))\n beds_df = beds_df[bed_columns].set_index(\"UnitBedUID\")\n return alarms_df.join(beds_df)", "def extract_recording_by_idx_fixed(all_idx, dict1, window_size, pump_type = 'minor'):\r\n all_idx = np.array(all_idx)\r\n # window_size = int(window_size * 10000)\r\n n_window = len(all_idx)\r\n total_recording_length = 3000000\r\n # in case window_size is in second units\r\n if window_size < 100:\r\n window_size = int(window_size * 10000)\r\n n_frame = total_recording_length // window_size\r\n middle_idx = window_size//2\r\n files = sorted(dict1.values())\r\n n_files = len(files)\r\n data = np.zeros(shape=(n_window, window_size + 1))\r\n current_vector = 0\r\n for file_idx in range(n_files):\r\n current_file_idx = file_idx\r\n if (pump_type == 'minor'):\r\n record = pd.read_pickle(cwd + subPath + files[file_idx].replace('.abf', ' Full Annotation.cPickle')).as_matrix()[:,1:]\r\n elif (pump_type == 'major'):\r\n record = pd.read_pickle(cwd + subPath + files[file_idx].replace('.abf', ' Annotation.cPickle')).as_matrix()[:,1:]\r\n if (record[:, 0].min() < 0):\r\n record[:, 0] -= record[:, 0].min()\r\n Mat = np.ones(shape = (total_recording_length+window_size - 1, 2))*(-2)\r\n Mat[middle_idx:middle_idx + total_recording_length,:] = record.copy()\r\n # following extracts the middle indeces for a given file index\r\n local_idx = all_idx[all_idx / total_recording_length == file_idx] % total_recording_length\r\n # verifies that the current file includes some windows\r\n if (local_idx.shape[0]):\r\n data[current_vector:current_vector + local_idx.shape[0], -1] = Mat[local_idx + middle_idx, 1]\r\n # following extracts the middle indeces with a bound\r\n local_idx_range = np.array([xrange(idx_instant, idx_instant + 2*middle_idx) for idx_instant in local_idx])\r\n data[current_vector:current_vector+local_idx.shape[0], :-1] = Mat[local_idx_range ,0]\r\n else:\r\n print ('No windows from %s.' % (files[file_idx]))\r\n\r\n current_vector += local_idx.shape[0]\r\n print 'data distribution\\n', pd.Series(data[:,-1]).value_counts()\r\n return data", "def combineDF(files_list, files_dir_path, converters = {}):\r\n combined = pd.concat([pd.read_excel(os.path.join(files_dir_path, file_name), sheetname =0, converters = converters)\r\n for file_name in files_list])\r\n \r\n return combined" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function for break/clear parsing may be overridden. lookupmodule() translates (possibly incomplete) file or module name into an absolute file name.
def lookupmodule(self, filename): if os.path.isabs(filename) and os.path.exists(filename): return filename f = os.path.join(sys.path[0], filename) if os.path.exists(f) and self.canonic(f) == self.mainpyfile: return f root, ext = os.path.splitext(filename) if ext == '': filename = filename + '.py' if os.path.isabs(filename): return filename for dirname in sys.path: while os.path.islink(dirname): dirname = os.readlink(dirname) fullname = os.path.join(dirname, filename) if os.path.exists(fullname): return fullname return None
[ "def lookup_module(filename):\r\n\r\n # stolen from pdb\r\n import os\r\n import sys\r\n\r\n if os.path.isabs(filename) and os.path.exists(filename):\r\n return filename\r\n f = os.path.join(sys.path[0], filename)\r\n if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:\r\n return f\r\n root, ext = os.path.splitext(filename)\r\n if ext == '':\r\n filename = filename + '.py'\r\n if os.path.isabs(filename):\r\n return filename\r\n for dirname in sys.path:\r\n while os.path.islink(dirname):\r\n dirname = os.readlink(dirname)\r\n fullname = os.path.join(dirname, filename)\r\n if os.path.exists(fullname):\r\n return fullname\r\n return None", "def _get_module(self, filename, base):\n if not filename or not filename.endswith('.py'):\n utils._log('Cannot get module for non python-source file: ', filename)\n return '' # only pytnon modules are supported\n base = base or os.path.join(\n self.window.extract_variables().get('project_path', ''),\n self.window.extract_variables().get('project_base_name', ''))\n utils._log('Getting module for file %s relative to base %s' % (filename, base))\n if not filename.startswith(base):\n utils._log('Cannot determine module path outside of directory')\n return ''\n return filename.replace(base, '').replace(os.path.sep, '.')[:-3].strip('.')", "def get_module_name(filename, options):\n if options.module_name is not None:\n return options.module_name\n elif filename:\n filename, _ = os.path.splitext(os.path.normpath(filename))\n # We want '' in our lookup path, but we don't want it for prefix tests.\n for path in filter(bool, options.pythonpath):\n path = os.path.normpath(path)\n if not path.endswith(os.sep):\n path += os.sep\n if filename.startswith(path):\n rel_filename = filename[len(path):]\n return _filename_to_module_name(rel_filename)\n # Explicit pythonpath has failed, treat filename as relative to .\n return _filename_to_module_name(filename)", "def get_module_name_for_import(import_name, module_names):\n if not import_name:\n return None\n if import_name in module_names:\n return import_name\n else:\n return get_module_name_for_import(\".\".join(import_name.split(\".\")[0:-1]), module_names)", "def _get_module_name(filename: str) -> str:\n return \".\".join(_get_relative(filename).split(os.path.sep)[2:]).replace(\".pyi\", \"\").replace(\".__init__\", \"\")", "def module_name(self, jamfile_location):\n assert isinstance(jamfile_location, basestring)\n module = self.location2module.get(jamfile_location)\n if not module:\n # Root the path, so that locations are always umbiguious.\n # Without this, we can't decide if '../../exe/program1' and '.'\n # are the same paths, or not.\n jamfile_location = os.path.realpath(\n os.path.join(os.getcwd(), jamfile_location))\n module = \"Jamfile<%s>\" % jamfile_location\n self.location2module[jamfile_location] = module\n return module", "def test_find_nonexistent_module_name():\n # given\n filepath = build_path(\"test_find_nonexistent_module_name\", \"bar.py\")\n\n # when\n module_name = find_module_name(filepath)\n\n # then\n assert module_name is None", "def normalize_module_name(name):\n if name.endswith('.py'):\n name = name[:-3]\n return name.replace('/', '.')", "def get_pathtocodemodule(self):\n\n # default module name\n path = self.infofilepath\n (dir, fullname) = os.path.split(path)\n (name, ext) = os.path.splitext(fullname)\n pathtocodemodule_default = name + '.py'\n # override with explicit\n pathtocodemodule = dir + '/' + self.get_ourinfofile_property(mconst.DEF_PACK_INFOFIELD_codefile, pathtocodemodule_default)\n # return it\n return (pathtocodemodule, None)", "def resolve(fname):\n if os.path.dirname(__file__):\n return os.path.dirname(__file__) + \"/../common/\" + fname\n else:\n return \"/common/\" + fname", "def find_module(self, address):\n\n for base, end, mod in self.mod_fast:\n if address >= base and address <= end:\n return mod\n\n return obj.NoneObject(\"\")", "def get_module_name(platform, module_name):\n platform_desc = platforms[platform]\n if MODULE_CASE in platform_desc:\n func = platform_desc[MODULE_CASE]\n return func(module_name)\n return module_name", "def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]", "def force_get_path(module, name, default):\n value = module.force_get_input(name, None)\n if value:\n return value.name\n else:\n return default", "def eval_location2(pymodule, offset):\r\n pyname_finder = ScopeNameFinder(pymodule)\r\n return pyname_finder.get_primary_and_pyname_at(offset)", "def find_base_addr(self, module_name):\n modules = self.enum_modules()\n\n for module in modules:\n modname = c_buffer(280)\n psapi.GetModuleFileNameExA(self.hproc, module,\n modname, sizeof(modname))\n if module_name.lower() in modname.value.lower():\n return self._get_base(module)", "def modname(fvars):\r\n file, name = fvars.get('__file__'), fvars.get('__name__')\r\n if file is None or name is None:\r\n return None\r\n\r\n if name == '__main__':\r\n # Since the __main__ module can't be reloaded, the module has \r\n # to be imported using its file name. \r\n name = main_module_name()\r\n return name", "def resolve_import(self, item):\n name = item.name\n basename = self.convert_to_path(name)\n shortened = None\n if item.is_from:\n shortened = os.path.dirname(basename)\n\n # Python builtin modules\n if name in sys.builtin_module_names or name.startswith(\"__future__\"):\n return name + \".so\"\n\n if item.is_relative():\n filename = os.path.join(self.current_directory, basename)\n else:\n filename = basename\n\n # try absolute files\n init_file = os.path.join(filename, \"__init__.py\")\n for fs in self.fs_path:\n if fs.isfile(init_file):\n return fs.refer_to(init_file)\n elif fs.isfile(filename + \".py\"):\n return fs.refer_to(filename + \".py\")\n elif shortened is not None:\n if item.is_relative():\n filename = os.path.join(self.current_directory, shortened)\n else:\n filename = shortened\n init_file = os.path.join(filename, \"__init__.py\")\n if fs.isdir(filename) and fs.isfile(init_file):\n return fs.refer_to(init_file)\n elif fs.isfile(filename + \".py\"):\n return fs.refer_to(filename + \".py\")\n\n raise ImportException(name)", "def get_mod_name():\n return sys.argv[0].split(\"/\")[-1].split(\".py\")[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait n seconds before returning ok
def timeout(n): time.sleep(int(n)) return 'ok', 200
[ "def waitfor(secs):\n time.sleep(secs)", "def wait_for(test, timeout_seconds=DEFAULT_TIMEOUT):\n start = time.time()\n while True:\n if test():\n return True\n if time.time() - start > timeout_seconds:\n return False\n time.sleep(0.5)", "def testWait(self):\n self.runDelayedTest(wait=.1)", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def sleep(time_in_sec):\n delay = int(time_in_sec)\n time.sleep(delay)\n return 'success'", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def testWaitForSuccess1(self):\n self.assertEquals(4, self._TestWaitForSuccess(4, 10, period=1))\n self.assertEquals(5, self.GetTryCount())\n self.assertEquals(4, self.GetTrySeconds())", "def _wait(self, sec=0.75):\n if self.parasitic:\n time.sleep(sec)\n else:\n while self.bus.read_bit() == 0x0:\n pass", "def wait(time_to_wait: float=0.5):\n sleep(time_to_wait)", "def WaitForAction(self, action):\n start_time = time.time()\n while time.time() - start_time < 20:\n if action():\n return True\n time.sleep(1)\n\n return False", "def wait_for_retry(self):\n sleep(min(self.retry_num, self.max_retry) * self.multiplier)", "def sleep(self, Nsecs):\n return scheduler.sleep(Nsecs)", "async def sleep_and_timeout(seconds):\n await sleep(seconds)\n raise TimeoutError()", "def sleep(timeout, cancel_event):\n if cancel_event:\n cancel_event.wait(timeout=timeout)\n if cancel_event.isset():\n return False\n else:\n time.sleep(timeout)\n return True", "def _wait_for_test(jid, test_func, timeout=None, timeout_msg='Job wait timed out'): \n \n poll_interval = 1.0 \n abort_time = _time.time() + timeout if timeout else None \n\n while True:\n retval = test_func(jid)\n if retval:\n return retval\n \n if abort_time and _time.time() > abort_time:\n raise _CloudTimeoutError(timeout_msg, jid=jid)\n \n _time.sleep(poll_interval)", "def _wait_until_state(self, state, timeout=-1):\r\n if timeout == -1:\r\n while self.get_state() != state:\r\n time.sleep(10)\r\n else:\r\n time_to_timeout_at = datetime.datetime.now() + datetime.timedelta(timeout)\r\n while self.get_state() != state:\r\n if datetime.datetime.now() < time_to_timeout_at:\r\n time.sleep(10)\r\n else:\r\n return False\r\n return True", "def timerWait(timer, sleep=0.25, n=40, go=go, timers=timers, tf=testOutFile):\n for i in range(n):\n if not go(sleep=sleep):\n return False\n if not timers[timer].isActive():\n return True\n timers[timer].stop() # Timer never did it's thing so just shut it down\n with open(tf, \"a\") as f: # file to write test results to\n f.write(\"ERROR: timer {} didn't stop in alloted time\\n\".format(timer))\n return False # return False to stop script. Something is wrong", "def wait_for_success_timeout_seconds(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"wait_for_success_timeout_seconds\")", "def wait_for_success_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"wait_for_success_timeout_seconds\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
int ploidy return all possible genotypes, completely determined by ploidy
def all_genotype(ploidy): return ["".join(comb) for comb in cwr("ACGT-", ploidy)]
[ "def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)", "def collapse_genotypes(pL,gL):\n if len(gL) < 2:\n return gL\n else:\n uniqueL = [] # list of unique genotypes relative to ploidy\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL", "def get_all_possible_genotypes(self):\n # Get all genotypes.\n return mutations_to_genotypes(self.mutations, wildtype=self.wildtype)", "def write_genotypes(\n self, chromosome: str, variant_table: VariantTable, only_snvs, ploidy: int = 2\n ) -> None:\n\n # map positions to index\n genotyped_variants = dict()\n for i in range(len(variant_table)):\n genotyped_variants[variant_table.variants[i].position] = i\n\n # INT_TO_UNPHASED_GT = {0: (0, 0), 1: (0, 1), 2: (1, 1), -1: None}\n GT_GL_GQ = frozenset([\"GT\", \"GL\", \"GQ\"])\n for record in self._record_modifier(chromosome):\n pos = record.start\n if not record.alts:\n continue\n\n for sample, call in record.samples.items():\n geno = Genotype([])\n n_alleles = 1 + len(record.alts)\n n_genotypes = binomial_coefficient(ploidy + n_alleles - 1, n_alleles - 1)\n geno_l = [1 / n_genotypes] * int(n_genotypes)\n geno_q = None\n\n # for genotyped variants, get computed likelihoods/genotypes (for all others, give uniform likelihoods)\n if pos in genotyped_variants:\n likelihoods = variant_table.genotype_likelihoods_of(sample)[\n genotyped_variants[pos]\n ]\n # likelihoods can be 'None' if position was not accessible\n if likelihoods is not None:\n geno_l = [l for l in likelihoods] # type: ignore\n geno = variant_table.genotypes_of(sample)[genotyped_variants[pos]]\n\n # Compute GQ\n geno_index = geno.get_index()\n geno_q = sum(geno_l[i] for i in range(n_genotypes) if i != geno_index)\n\n # TODO default value ok?\n # store likelihoods log10-scaled\n\n # Temporarily overwrite the GT field with a (fake) genotype that indicates a\n # diploid sample. Otherwise, if the GT field happens to be empty, pysam\n # complains that we are setting an incorrect number of GL values.\n call[\"GT\"] = tuple([0] * ploidy)\n\n call[\"GL\"] = [max(math.log10(j), -1000) if j > 0 else -1000 for j in geno_l]\n call[\"GT\"] = tuple(geno.as_vector())\n\n # store quality as phred score\n if not geno.is_none():\n # TODO default value ok?\n assert geno_q is not None\n if geno_q > 0:\n call[\"GQ\"] = min(round(-10.0 * math.log10(geno_q)), 10000)\n else:\n call[\"GQ\"] = 10000\n else:\n call[\"GQ\"] = None\n\n record.qual = None\n\n # delete all other genotype information that might have been present before\n for tag in set(call.keys()) - GT_GL_GQ:\n del call[tag]", "def genotypes(self):\n return self.data.genotypes.values", "def genotypes(self):\n return self._genos", "def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes", "def _build_genotypes(self):\n x = np.zeros(self.n)\n \n # Frequencies derived from HWE.\n num_hetero = 2 * self.maf * (1 - self.maf) * self.n\n num_homo_minor = self.maf ** 2 * self.n\n \n x[:num_hetero] = 1\n x[num_hetero:num_hetero+num_homo_minor] = 2\n np.random.shuffle(x)\n \n # Add noise for dosage values if needed.\n if self.dosage_var:\n x[x == 0] += np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 0]))\n )\n x[x == 1] += np.random.normal(0, self.dosage_var, len(x[x == 1]))\n x[x == 2] -= np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 2]))\n )\n\n # Mask some values if the call rate is not 1.\n if self.call_rate < 1:\n missing_rate = 1 - self.call_rate\n missing_number = missing_rate * self.n\n missing_idx = np.arange(0, self.n)\n np.random.shuffle(missing_idx)\n missing_idx = missing_idx[:missing_number]\n x[missing_idx] = np.nan\n \n return x", "def get_genotypes(record):\n snp_list = []\n\n for sample in range(0, len(record.samples)):\n if record.samples[sample].called:\n snp_list.append(record.samples[sample].gt_bases)\n else:\n snp_list.append(\"-\")\n\n return snp_list", "def genotypeGVCFs():\n return outDiscovery + \"/multisample.genotyped.vcf\"", "def genotypes(context, re_upload, family_id):\n\n click.echo(click.style(\"----------------- GENOTYPES -------------------\"))\n\n if not family_id:\n _suggest_cases_to_upload(context)\n context.abort()\n\n tb_api = context.obj[\"tb_api\"]\n gt_api = context.obj[\"genotype_api\"]\n hk_api = context.obj[\"housekeeper_api\"]\n status_api = context.obj[\"status\"]\n family_obj = status_api.family(family_id)\n\n api = UploadGenotypesAPI(status_api, hk_api, tb_api, gt_api)\n results = api.data(family_obj.analyses[0])\n if results:\n api.upload(results, replace=re_upload)", "def _get_genotype(self, genotype, allele_counts, alleles):\n\n if pd.isna(genotype):\n return np.nan\n elif genotype == 'Het':\n return ''.join(alleles)\n else:\n if allele_counts[0] > allele_counts[1]:\n return alleles[0]\n else:\n return alleles[1]", "def phenotype(self, geneName=None):\n # if no gene name specified, build up an entire\n # phenotype dict\n if geneName == None:\n phenotype = {}\n for name, cls in self.genome.items():\n val = self.phenotype(name)\n if not phenotype.has_key(name):\n phenotype[name] = []\n phenotype[name].append(val)\n \n # got the whole phenotype now\n return phenotype\n \n # just getting the phenotype for one gene pair\n return self.genes[geneName]", "def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene", "def get_genotype(self, snp):\n pass", "def phenotypes(self):\n return self.data.phenotypes.values", "def genotypes_of(self, sample):\n\t\treturn self.genotypes[self._sample_to_index[sample]]", "def make_seq_errors_genotype_model(g, error_probs):\n m = g.shape[0]\n frequency = np.sum(g) / m\n closest_row = (error_probs['freq']-frequency).abs().argsort()[:1]\n closest_freq = error_probs.iloc[closest_row]\n\n w = np.copy(g)\n \n # Make diploid (iterate each pair of alleles)\n genos = np.reshape(w,(-1,2))\n\n # Record the true genotypes (0,0=>0; 1,0=>1; 0,1=>2, 1,1=>3)\n count = np.sum(np.array([1,2]) * genos,axis=1)\n \n base_genotypes = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])\n \n genos[count==0,:]=base_genotypes[\n np.random.choice(4,sum(count==0), p=closest_freq[['p00', 'p01','p01', 'p02']].values[0]*[1,0.5,0.5,1]),:]\n genos[count==1,:]=base_genotypes[[0,1,3],:][\n np.random.choice(3,sum(count==1), p=closest_freq[['p10', 'p11', 'p12']].values[0]),:]\n genos[count==2,:]=base_genotypes[[0,2,3],:][\n np.random.choice(3,sum(count==2), p=closest_freq[['p10', 'p11', 'p12']].values[0]),:]\n genos[count==3,:]=base_genotypes[\n np.random.choice(4,sum(count==3), p=closest_freq[['p20', 'p21', 'p21', 'p22']].values[0]*[1,0.5,0.5,1]),:]\n\n return(np.reshape(genos,-1))", "def calculate_genotype_probabilities(self):\n for name, member in self.members.items():\n member.genotype_probabilities = self.genotype_probabilities_of(name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
str genotype str base return P(base in genotype)
def prob_t_N(genotype, base): cnter = Counter(genotype) return cnter.get(base, 0) * 1/len(genotype)
[ "def get_genotype(gt):\n if ('0/0'):\n return \"0\"\n elif ('0/1'):\n return \"1\"\n else:\n return \"2\"", "def get_genotype(self, snp):\n pass", "def get_label(genotype_type):\n if genotype_type == \"Hom\":\n return 0\n elif genotype_type == \"Het\":\n return 1\n elif genotype_type == \"Hom_alt\":\n return 2", "def _get_genotype(self, genotype, allele_counts, alleles):\n\n if pd.isna(genotype):\n return np.nan\n elif genotype == 'Het':\n return ''.join(alleles)\n else:\n if allele_counts[0] > allele_counts[1]:\n return alleles[0]\n else:\n return alleles[1]", "def _GT2genotype(REF, ALT, GT):\n\n # / : genotype unphased\n # | : genotype phased\n\n g1, g2 = re.split('/|\\|', GT)\n\n # 0 : reference allele (what is in the REF field)\n # 1 : first allele listed in ALT\n # 2 : second allele list in ALT\n # and so on.\n\n bases = [REF] + ALT\n\n genotype = bases[int(g1)] + bases[int(g2)]\n\n return genotype", "def genotype_translocations(transinput):", "def all_genotype(ploidy):\n return [\"\".join(comb) for comb in cwr(\"ACGT-\", ploidy)]", "def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)", "def fromgenotype(self):\n\t\tpass", "def test_convert_genotype(self):\n \n genotypes = [(\"0/0\", 0), (\"0/1\", 1), (\"1/0\", 1), (\"1/1\", 2), \\\n (\"1/2\", 1), (\"2/1\", 1), (\"0/2\", 1), (\"2/0\", 1), (\"2/2\", 2)]\n \n # run thorugh all the legit genotype codes\n for geno in genotypes:\n genotype = geno[0]\n result = geno[1]\n self.assertEqual(self.var.convert_genotype(genotype), result)\n \n # Raise error when converting single character genotype\n with self.assertRaises(ValueError):\n self.var.convert_genotype(\"0\")\n \n # raise error when converting unknown genotype\n with self.assertRaises(AssertionError):\n self.var.convert_genotype(\"a/a\")\n \n # also include other genotype format posibilities. None of these are\n # used, but since they aren't explicitly forbidden, make sure they work\n \n # check two character strings\n self.assertEqual(self.var.convert_genotype(\"12|34\"), 1)\n self.assertEqual(self.var.convert_genotype(\"99|99\"), 2)", "def get_genotypes(record):\n snp_list = []\n\n for sample in range(0, len(record.samples)):\n if record.samples[sample].called:\n snp_list.append(record.samples[sample].gt_bases)\n else:\n snp_list.append(\"-\")\n\n return snp_list", "def base_codes(self):\n bases = []\n\n if self.is_gas_giant:\n bases.append(\"G\")\n if self.is_naval_base:\n bases.append(\"N\")\n if self.is_scout_base:\n bases.append(\"S\")\n if self.is_research_base:\n bases.append(\"R\")\n if self.is_tas:\n bases.append(\"T\")\n if self.is_consulate:\n bases.append(\"I\")\n if self.is_pirate_base:\n bases.append(\"P\")\n\n return \" \".join(bases)", "def genotypes_of(self, sample):\n\t\treturn self.genotypes[self._sample_to_index[sample]]", "def base_compos(sequence, base):\n count = Counter(sequence)[base]\n return count", "def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene", "def get_genotype(self):\n return self.genotype", "def obtener_complemento(base):\n # retorna caracter\n if base =='C':\n return 'G'\n elif base =='G':\n return 'C'\n if base == 'T':\n return 'A'\n elif base == 'A':\n return 'T'\n if base!= 'A' or base != 'G' or base!='C'or base!='T':\n return \" NO ES UNA BASE VALIDA\"", "def getBiotype(method, processing=\"raw\"):\n try:\n if (processing in PROCESSINGS_BIOTYPE):\n transform_biotype = PROCESSINGS_BIOTYPE[processing]\n if (re.search('->', transform_biotype)):\n modes = transform_biotype.split(\"->\")\n biotype = re.sub(modes[0], modes[1], TYPES_BIOTYPE[METHODS_TYPE[method.lower()]])\n else:\n biotype = PROCESSINGS_BIOTYPE[processing]\n elif (method == \"uniform\"):\n biotype = \"Discrete Copy number data\" # TODO Continuous is better for grouping but posses problems with glyphs\n else:\n biotype = TYPES_BIOTYPE[METHODS_TYPE[method.lower()]]\n except:\n biotype = TYPES_BIOTYPE[\"unknown\"]\n warn(\"Biotype of \" + method.lower() + \" is unknown\")\n return biotype", "def _genotypes_str(sample_genotypes, genotypes_str_cache):\n n_copies = len(sample_genotypes[0])\n sample_cn = sum(sample_genotypes[0])\n if sample_cn in genotypes_str_cache:\n return genotypes_str_cache[sample_cn]\n\n sample_genotypes_str = [','.join(map(str, gt)) for gt in sample_genotypes]\n marginal_str = []\n if n_copies > 2:\n gt_str = ['?'] * n_copies\n for copy in range(n_copies):\n for curr_copy_cn in range(sample_cn + 1):\n gt_str[copy] = str(curr_copy_cn)\n marginal_str.append(''.join(gt_str))\n gt_str[copy] = '?'\n res = (sample_genotypes_str, marginal_str)\n genotypes_str_cache[sample_cn] = res\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
str genotype iterableobj bases_all_reads, list or np.array return P(data|genotype) == likelihood
def likelihood_genotype(genotype, bases_all_reads, error_rates): likelihood = 1 for observed_base in bases_all_reads: p = 0 for base in "ACGT-": l = prob_t_N(genotype, base) * error_rates[base][observed_base] p += l likelihood *= p return likelihood
[ "def genotype_likelihoods_of(self, sample):\n\t\treturn self.genotype_likelihoods[self._sample_to_index[sample]]", "def get_genotypes(record):\n snp_list = []\n\n for sample in range(0, len(record.samples)):\n if record.samples[sample].called:\n snp_list.append(record.samples[sample].gt_bases)\n else:\n snp_list.append(\"-\")\n\n return snp_list", "def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)", "def genotype_translocations(transinput):", "def make_b2iot(hmm, p, mix_baf=mix_baf): #using pTNs, _b2mix\n mean, sd, uf = hmm['B2_mean'], hmm['B2_sd'], hmm['B2_uf']\n #get mean, sd for each genotype, each state\n means, sds = geno_distrs(mean, sd)\n #cal mean and sd for each mixed genotype, each state\n meanTNs, sdTNs = mix_baf(means, sds, N_COPY, p, NORMAL)\n #cal cdf(0) and cdf(1) for each mixed genotype, each state\n cdf_0s = [norm.cdf(0, meanTN, sdTN)\n for meanTN, sdTN in zip(meanTNs, sdTNs)]\n cdf_1s = [norm.cdf(1, meanTN, sdTN)\n for meanTN, sdTN in zip(meanTNs, sdTNs)]\n\n def b2iot(b, pfb): #using meanTNs, sdTNs, pTNs, uf, cdf_0s, cdf_1s,\n \"\"\"return likelihood of each state.\n \n eq. 16, 17, 18, 19\n \"\"\"\n nN = len(pTNs[NORMAL]) #number of normal genotypes\n pN = binom.pmf(range(nN), nN-1, pfb) #for p(gN), a 1darray\n raw = [] #the main term 'sumsum' for each state\n #for each state, aggregate all the genoT x genoN vals\n for meanTN, sdTN, pTN, cdf_0, cdf_1 in zip(\n meanTNs, sdTNs, pTNs, cdf_0s, cdf_1s):\n pgg = pN * pTN #P(g)P(g'|g) in eq. 16\n if b == 0:\n raw.append( sum(pgg * cdf_0) )\n elif b == 1:\n raw.append( sum(pgg * (1-cdf_1)) )\n else:\n pdf_b = norm.pdf(b, meanTN, sdTN)\n raw.append( sum(pgg * pdf_b) )\n if b in (0, 1):\n return uf * EPS_B * MAGIC + (1-uf) * array(raw)\n else:\n return uf * EPS_B + (1-uf) * array(raw)\n return b2iot", "def genotype_likelihoods_of(self, sample: str) -> List[Optional[GenotypeLikelihoods]]:\n return self.genotype_likelihoods[self._sample_to_index[sample]]", "def _get_genotype(self, genotype, allele_counts, alleles):\n\n if pd.isna(genotype):\n return np.nan\n elif genotype == 'Het':\n return ''.join(alleles)\n else:\n if allele_counts[0] > allele_counts[1]:\n return alleles[0]\n else:\n return alleles[1]", "def calculate_genotype_probabilities(self):\n for name, member in self.members.items():\n member.genotype_probabilities = self.genotype_probabilities_of(name)", "def genotypes_of(self, sample):\n\t\treturn self.genotypes[self._sample_to_index[sample]]", "def genotypes(self):\n return self.data.genotypes.values", "def get_genotype_by_individuals(self, individuals, format='c'):\n pass", "def get_genotype(self, snp):\n pass", "def pval_at_rna_by_nbinom(\n self, pos_dict_of_counts: Mapping[str, List], neg_vals_at_rna: np.array, gene_and_type,\n log_if_values_above=1E9,\n log_values=False, which='per_read',\n verbose=False):\n\n if len(neg_vals_at_rna) == 0:\n return None\n\n log_scale_high_value = (np.mean(neg_vals_at_rna) > log_if_values_above)\n\n if log_values or log_scale_high_value:\n log_this_gene = True\n neg_vals_at_rna = np.log10(neg_vals_at_rna)\n else:\n log_this_gene = False\n \n #if not np.any(neg_vals_at_rna):\n #print(\"No positive values in negatives.\")\n # neg_vals_at_rna = np.array([\n # self.negatives.lowest_positive_vals[which][x]/10 for x in \\\n # self.negatives.metadata.random_proteins])\n #print(f\"negatives now {neg_vals_at_rna}\")\n mean_negative = np.average(neg_vals_at_rna)\n std_negative = np.std(neg_vals_at_rna)\n\n vmr = (std_negative**2)/mean_negative\n\n verbose and print(f'vmr for negatives={vmr}')\n # Use a poisson if the var/mean is low enough:\n if vmr < 2:\n verbose and print(\"Using poisson.\")\n self.stats_log['vmr<2'] += 1\n pois = stats.poisson(mean_negative)\n return self.use_dist(pos_dict_of_counts, log_this_gene, pois)\n\n verbose and print(\"Wil try to use NB.\")\n self.stats_log['vmr>=2'] += 1\n\n # Try to fit a NB useing statsmodels.\n q = sm.NegativeBinomial(\n neg_vals_at_rna, np.array([1] * len(neg_vals_at_rna)), loglike_method='nb2')\n try:\n res = q.fit(disp=0)\n except: # If a NB can't be fit, revert to a poisson.\n print(f\"Could not run q.fit(disp=0) on neg_vals_at_rna= {neg_vals_at_rna}. Using poisson.\")\n pois = stats.poisson(mean_negative)\n return self.use_dist(pos_dict_of_counts, log_this_gene, pois)\n\n # Create a scipy.stats.nbinom object to use its cdf, based on the statsmodels fit parameters.\n # There is no cdf function for the statsmodels object.\n mu = res.predict()[0] # alpha = res.params[1]\n size = 1. / res.params[1] # prob = size / (size + mu)\n\n verbose and print(f\"Fit NB mu={mu}\")\n \n pvals = self.use_dist(\n pos_dict_of_counts, log_this_gene, stats.nbinom(size, size/(size + mu)))\n\n return pvals", "def test_iter_genotypes(self):\n with self.reader_f() as f:\n for g in f.iter_genotypes():\n variant_name = VARIANT_NAME_FIX.get(\n (truth.variant_to_key[g.variant], g.coded),\n truth.variant_to_key[g.variant],\n )\n\n expected = truth.genotypes[variant_name]\n self.assertEqual(expected, g)", "def G_stat(data):\r\n # G = 2*sum(f_i*ln(f_i/f_i_hat)) over all i phenotypes/sample classes\r\n # calculate the total number of observations under the consideration that\r\n # multiple observations in a given group are averaged.\r\n n = sum([arr.mean() for arr in data])\r\n a = len(data) # a is number of phenotypes or sample classes\r\n obs_freqs = array([sample_type.mean() for sample_type in data]) # f_i vals\r\n exp_freqs = zeros(a) + (n / float(a)) # f_i_hat vals\r\n G = 2. * (obs_freqs * log(obs_freqs / exp_freqs)).sum()\r\n return G", "def common_bases(bamdata, fastadata):\n\n for x in bamdata:\n\n try:\n # the bamdata should be [ref_pos, count, [bases]]\n # TODO:\n if x[1] == 1 and len(x) < 3:\n # appending the corresponding base for\n # this position from hg38 ref fasta\n #x.append((fastadata[x[0] + 1]))\n logging.error(\" Bam item does not have the correct format\")\n elif x[1] != 0 and x[2] == '':\n logging.warning(\"An entry of the form {0} found. Turnd RD to zero\".format(str(x)))\n\n # we miss a base so set the RD to zero\n x[1] = 0\n\n # and consult the refernce\n # (plus 1 since fasta is zero based )\n x[2] = [fastadata[x[0] + 1]]\n else:\n # provides the element which reaches the\n # highest occurrence first.\n common_count = Counter(x[2]).most_common(1)\n\n try:\n # when the most common mapped base to the position is an\n # indel then all elements of the string are appended\n # to the list (see screenshot on windows staff account).\n if len(common_count) != 0:\n\n # we will use the most common\n del (x[2:])\n indel = common_count[0][0]\n x.extend([indel[0]])\n\n \"\"\"\n if len(common_count[0][0]) > 1:\n del (x[2:])\n indel = common_count[0][0]\n x.extend([indel[0]])\n else:\n\n del (x[2:])\n # extend adds the new elements into the list,\n # not into the list as a separate list.\n x.extend([common_count[0][0]])\n \"\"\"\n elif x[1] != 0 and x[2] == []:\n\n logging.warning(\" Found a delete marking it\")\n logging.warning(\" x looked at is {0}\".format(x))\n # this is a delete mark is as such\n x[2] = [\"-\"]\n\n else:\n logging.warning(\" No common bases found don't know what to do\")\n logging.warning(\" x looked at is {0}\".format(x))\n\n except Exception as e:\n print(\"Common count is: {0}\".format(common_count))\n print(\"x is: {0}\" .format(x))\n raise\n except Exception as e:\n raise Error(\"An error occurred whilst extracting\\\n common_bases {0}\".format(str(e)))\n\n return bamdata", "def genotypes(self):\n return self._genos", "def nbasegens(self) :\n return len(self.__base_gens)", "def n(self):\n return len(self.genotypes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The base exception class of connection exceptions.
def __init__(self, error_msg): super(ConnectionException, self).__init__(error_msg)
[ "def _suggest_error_class(self):\n if self.adoConn is not None:\n for e in self.adoConn.Errors:\n state = str(e.SQLState)\n if state.startswith('23') or state=='40002':\n return IntegrityError\n return DatabaseError", "def SocketError(self) -> SocketError:", "def handle_demisto_exception(e):\n if 'Proxy Error' in str(e):\n raise ConnectionError(MESSAGES['PROXY_ERROR'])\n elif 'ReadTimeoutError' in str(e):\n raise ConnectionError(MESSAGES['REQUEST_TIMEOUT'])\n elif 'ConnectionError' in str(e) or 'ConnectTimeoutError' in str(e):\n raise ConnectionError(MESSAGES['CONNECTION_ERROR'])\n elif 'SSLError' in str(e):\n raise SSLError(MESSAGES['SSL_CERT_ERROR'])\n else:\n raise e", "def _raise_unknown_error(ex):\n raise MsticpyKqlConnectionError(\n \"Another exception was returned by the service\",\n *ex.args,\n f\"Full exception:\\n{str(ex)}\",\n title=\"connection failed\",\n )", "def __init__(self, msg=None, exception=None):\n if msg is None:\n msg = 'An error occurred while interacting with the PostgreSQL '\n 'database'\n if exception is not None:\n msg + (': {}').format(exception)\n super(PsqlDbException, self).__init__(msg)\n self.original_exception = exception", "def format_connection_exception(e, driver):\n if adodbapi is not None:\n if isinstance(e, OperationalError) and e.args and isinstance(e.args[0], com_error):\n e_comm = e.args[0]\n hresult = e_comm.hresult\n sub_hresult = None\n internal_message = None\n if e_comm.args and len(e_comm.args) == 4:\n internal_args = e_comm.args[2]\n if len(internal_args) == 6:\n internal_message = internal_args[2]\n sub_hresult = internal_args[5]\n base_message, base_conn_err = _lookup_conn_error_and_msg(hresult, internal_message)\n sub_message, sub_conn_err = _lookup_conn_error_and_msg(sub_hresult, internal_message)\n if internal_message == 'Invalid connection string attribute':\n if base_message and sub_message:\n conn_err = sub_conn_err if sub_conn_err else base_conn_err\n return base_message + \": \" + sub_message, conn_err\n else:\n # else we can return the original exception message + lookup the proper\n # ConnectionErrorCode for this issue\n conn_err = sub_conn_err if sub_conn_err else base_conn_err\n return repr(e), conn_err\n else:\n # if not an Operational error, try looking up ConnectionErr type\n # by doing a regex search on the whole exception message\n e_msg = repr(e)\n _, conn_err = _lookup_conn_error_and_msg(0, e_msg)\n return e_msg, conn_err\n\n elif pyodbc is not None:\n e_msg = repr(e)\n _, conn_err = _lookup_conn_error_and_msg(0, e_msg)\n if conn_err == ConnectionErrorCode.driver_not_found:\n installed, drivers = _get_is_odbc_driver_installed(driver)\n if not installed and drivers:\n e_msg += \" configured odbc driver {} not in list of installed drivers: {}\".format(driver, drivers)\n return e_msg, conn_err\n\n return repr(e), ConnectionErrorCode.unknown", "def db_connection_error(error):\n return internal_server_error(error)", "def WrappedException(self) -> object:", "def cancelled_exception_class(cls) -> type[BaseException]:", "def retry_exceptions(self):\n raise NotImplementedError", "def test_base_exception(self):\n err = exceptions.FlaskKeystoneException(\n title=\"AnError\",\n message=\"We encountered an error.\"\n )\n self.assertEqual(err.status_code, 500,\n \"BaseException should result in 500 Status Code.\"\n \"got %d\" % err.status_code)\n self.assertEqual(err.title, \"AnError\",\n \"BaseException title not correctly set.\")\n self.assertEqual(err.message, \"We encountered an error.\",\n \"BaseException message not correctly set.\")\n self.assertEqual(err.to_dict(), {\n \"code\": 500,\n \"title\": \"AnError\",\n \"message\": \"We encountered an error.\"\n }, \"Error message did not match.\")", "def connection_lost(self, exc):", "def unexpectedException(self):", "def __init__(self, message, source=None):\n Exception.__init__(self, message)\n self.source = source", "def test_exception_types(self):\n self.assertTrue(issubclass(Unavailable, DriverException))\n self.assertTrue(issubclass(Unavailable, RequestExecutionException))\n\n self.assertTrue(issubclass(ReadTimeout, DriverException))\n self.assertTrue(issubclass(ReadTimeout, RequestExecutionException))\n self.assertTrue(issubclass(ReadTimeout, Timeout))\n\n self.assertTrue(issubclass(WriteTimeout, DriverException))\n self.assertTrue(issubclass(WriteTimeout, RequestExecutionException))\n self.assertTrue(issubclass(WriteTimeout, Timeout))\n\n self.assertTrue(issubclass(CoordinationFailure, DriverException))\n self.assertTrue(issubclass(CoordinationFailure, RequestExecutionException))\n\n self.assertTrue(issubclass(ReadFailure, DriverException))\n self.assertTrue(issubclass(ReadFailure, RequestExecutionException))\n self.assertTrue(issubclass(ReadFailure, CoordinationFailure))\n\n self.assertTrue(issubclass(WriteFailure, DriverException))\n self.assertTrue(issubclass(WriteFailure, RequestExecutionException))\n self.assertTrue(issubclass(WriteFailure, CoordinationFailure))\n\n self.assertTrue(issubclass(FunctionFailure, DriverException))\n self.assertTrue(issubclass(FunctionFailure, RequestExecutionException))\n\n self.assertTrue(issubclass(RequestValidationException, DriverException))\n\n self.assertTrue(issubclass(ConfigurationException, DriverException))\n self.assertTrue(issubclass(ConfigurationException, RequestValidationException))\n\n self.assertTrue(issubclass(AlreadyExists, DriverException))\n self.assertTrue(issubclass(AlreadyExists, RequestValidationException))\n self.assertTrue(issubclass(AlreadyExists, ConfigurationException))\n\n self.assertTrue(issubclass(InvalidRequest, DriverException))\n self.assertTrue(issubclass(InvalidRequest, RequestValidationException))\n\n self.assertTrue(issubclass(Unauthorized, DriverException))\n self.assertTrue(issubclass(Unauthorized, RequestValidationException))\n\n self.assertTrue(issubclass(AuthenticationFailed, DriverException))\n\n self.assertTrue(issubclass(OperationTimedOut, DriverException))\n\n self.assertTrue(issubclass(UnsupportedOperation, DriverException))", "def exception(self):\n return self._exception", "def exception(self):\n if self._status == Future.STATUS_STARTED:\n raise InvalidStateError()\n if self._status == Future.STATUS_CANCELED:\n raise CancelledError()\n if self._status == Future.STATUS_ERROR:\n return self._exception", "def test_scmb_connection_fail(self):\n log = exceptions.SCMBConnectionFailException('Exception SCMBConnectionFailException raised')\n self.assertEqual(log.message, 'Exception SCMBConnectionFailException raised')", "def solid_exception(self) -> Optional[BaseException]:\n return self.op_exception" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reloads the Polls file.
def reloadpolls(self, irc, msg, args): try: self.polls = yaml.load(open(self.pollFile, 'r'), Loader=yamlordereddictloader.Loader) except FileNotFoundError as e: log.warning("Couldn't open file: %s" % e) raise
[ "def reloadfile(self, ):\n self.loadfile()", "def reload(self,) -> None:\n\n try:\n self._data = self._request(self._self_link)\n\n except requests.exceptions.HTTPError as error:\n warnings.warn(f'Reloading resource failed: {error}')", "def reload(self):\n self.read(self._cfg_path)", "def reload(self):\n if len(self.files) > 0:\n self.load(self.files, regfiles=self.regions)", "def reload(self):\n if self.filename is not None:\n self.channels.clear()\n try:\n self.open(self.filename)\n except EnvironmentError, e:\n log.warning('ChannelsDictionary.reload failed: %s', e)\n else:\n log.warning('ChannelsDictionary.reload without self.filename.')", "def reload(self):\n self.stop()\n # Scheduler Config neu laden\n if self._loadConfig():\n self.messenger.send('Scheduler reloaded by user', '0500', 'success', 'reload', None, 'appinternal')\n self.start()", "def _reload(self):\n self.logger.info('Reloading file %s' % self.filename)\n statinfo = os.stat(self.filename)\n ctime = statinfo.st_ctime\n self._lastreload = ctime\n with open(self.filename, 'r') as fp:\n lines = fp.readlines()\n newcontent = []\n\n for line in lines:\n for func in self.linefilters:\n line = func(line)\n if line is None:\n break\n\n if line is not None:\n newcontent.append(line)\n\n self.content = newcontent", "def autoreload_watcher():\n if not state.curdoc or not state.curdoc.session_context.server_context:\n return\n cb = partial(_reload_on_update, {})\n _callbacks[state.curdoc] = pcb = PeriodicCallback(callback=cb, background=True)\n pcb.start()", "def reload(self, filename = None):\r\n if self.config.get('world', 'autosave'):\r\n self.save()\r\n self.load(filename or self.filename)", "def refresh(self):\n\n self._store.uncache_resource(self.href)\n self._ensure_data(True)", "def reload_cookies(self):\n\n if os.path.exists(self.location_of_cookies):\n with open(self.location_of_cookies, 'rb') as f:\n cookies = pickle.load(f)\n self.load_cookies(cookies, self.cookie_domain)\n \n f.close()", "async def reload(self):\n old_status_table = {\n datapack.name: datapack.status for datapack in self.loaded_data_packs\n }\n await self.cleanup()\n await self.schedule_datapack_load()\n await shared.event_handler.call_async(\"datapack:reload\")\n\n # restore old state\n for datapack in self.loaded_data_packs:\n if datapack.name in old_status_table:\n if old_status_table[datapack.name] in (\n DataPackStatus.ACTIVATED,\n DataPackStatus.DEACTIVATED,\n ):\n datapack.status = old_status_table[datapack.name]", "def onReload(self):\n return", "def reload_config(self):\n pass", "async def reload(ctx, name):\n await unload_extension(name, channel=ctx.channel)\n await load_extension(name, channel=ctx.channel)", "def reload_config(self):\n self.conf.reload()\n self.load_config()", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def reload(self):\n self.all_folders = []\n self.all_files = []\n print(\"Обновление содержимого Я.Диска:\")\n self._parse_catalogues()", "async def reload(self):\n tasks = [repository.update() for repository in\n self.repositories.values()]\n if tasks:\n await asyncio.wait(tasks, loop=self.loop)\n\n # read data from repositories\n self.data.reload()\n\n # update addons\n await self.load_addons()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[channel] Vote on a poll. Channel is only needed if used in a PM.
def vote(self, irc, msg, args, channel, pid, yaynay): if yaynay not in ['yay', 'nay']: irc.error("Valid Answers are 'yay' or 'nay'.") return if channel in self.polls.keys(): if self.polls[channel][pid]['concluded']: irc.reply("Poll #%s is finished, it does not accept updates." % pid) return if self._vote(irc, channel, msg.nick, pid, yaynay): irc.reply("Successfully voted on %s" % self.polls[channel][pid]['question']) else: log.debug('Not dumping due to no change.') else: irc.error("'%s' has no polls." % channel)
[ "async def _vote_count(\n self, ctx: Context, *, channel: discord.TextChannel = None\n ):\n\n guild: discord.Guild = ctx.guild\n\n if not channel:\n channel = await self.get_vote_channel(guild)\n if isinstance(channel, str):\n return await ctx.send(channel)\n\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"I couldn't identify a voting channel. Please specify one explicitly.\"\n ))\n else:\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"That channel has too many messages!\"\n \" Please ask a host for manual vote count.\"\n ))\n\n if len(history) < 1:\n return await ctx.send(_(\"{} is empty.\").format(channel.mention))\n\n user_votes = {}\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for message in history:\n author = message.author\n if player_role not in author.roles:\n continue\n vote = self.get_vote_from_message(message)\n if not vote:\n continue\n user_votes[f\"{author.name}#{author.discriminator}\"] = vote\n\n user_votes = await self.get_non_voters(guild, user_votes)\n\n votes = {}\n for user in user_votes:\n val = user_votes[user].capitalize()\n try:\n votes[val].append(user)\n except KeyError:\n votes[val] = [user]\n\n # max votes first\n votes = dict(sorted(\n votes.items(), key=lambda item: len(item[1]), reverse=True\n ))\n\n # Pop and add stuff back to dict for ordering purpose.\n try:\n votes[\"VTNL\"] = votes.pop(\"Vtnl\")\n except KeyError:\n pass\n try:\n votes[\"No vote\"] = votes.pop(\"No vote\")\n except KeyError:\n pass\n\n txt = \"\"\n\n for i, vote in enumerate(votes, start=1):\n voters = votes[vote]\n\n if vote == \"VTNL\":\n txt += _(\"\\n\\n**{}** - {} ({})\").format(vote, len(voters), \", \".join(voters))\n elif vote == \"No vote\":\n txt += _(\"\\n\\n**Not voting** - {} ({})\").format(len(voters), \", \".join(voters))\n else:\n txt += _(\"\\n{}. **{}** - {} ({})\").format(i, vote, len(voters), \", \".join(voters))\n\n title = _(\"Vote Count\")\n\n embed = discord.Embed(\n color=0x00CDFF, title=title,\n description=_(\"__Counting from {} channel.__\\n\\n{}\").format(\n channel.mention, txt.strip()\n )\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\n f\"**{title}**\\n\\n__Counting from {channel.mention}\"\n f\" channel.__\\n\\n{txt.strip()}\"\n )", "def on_vote_received(node, message):", "async def _msgvote_on(self, ctx):\n\n channel = ctx.message.channel\n if channel.id in self.settings[\"channels_enabled\"]:\n await self.bot.say(\"Msgvote mode is already on in this channel.\")\n else:\n self.settings[\"channels_enabled\"].append(channel.id)\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Msgvote mode is now on in this channel.\")", "def poll_vote(self, mess, args):\n if not Poll.active_poll:\n raise CommandError(\"No active poll. Use !poll start to start a poll.\")\n\n index = args\n\n if not index:\n raise CommandError(\"usage: !poll vote <option_number>\")\n\n if not index.isdigit():\n raise CommandError(\"Please vote using the numerical index of the option.\")\n\n poll = self[Poll.active_poll]\n options, usernames = poll\n\n index = int(index)\n if index > len(options) or index < 1:\n raise CommandError(\n \"Please choose a number between 1 and %d (inclusive).\" % len(options)\n )\n\n option = list(options.keys())[index - 1] # FIXME: this looks random\n\n if not option in options:\n raise CommandError(\n \"Option not found. Use !poll show to see all options of the current poll.\"\n )\n\n username = mess.frm.person\n\n if username in usernames:\n raise CommandError(\"You have already voted.\")\n\n usernames.append(username)\n\n options[option] += 1\n self[Poll.active_poll] = poll\n\n return self.format_poll(Poll.active_poll)", "def new_vote(self, issue: Issue, config: ChannelConfig) -> Vote:\n\n # create a vote object\n vote = Vote()\n vote.issue = issue\n vote.poll = None\n vote.period_start = int(time.time())\n vote.period_end = vote.period_start + int(config.discord.voting_period_seconds)\n vote.config = config\n\n return vote", "async def vote(self, ctx):\n embed = discord.Embed(title = \"Here are some bot lists that you can vote for me on, voters may soon™ recieve perks\", color = discord.Color.blurple())\n embed.add_field(name = \"Bots For Discord\", value = \"[Click Here](https://botsfordiscord.com/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Discord Boats\", value = \"[Click Here](https://discord.boats/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Divine Discord Bots\", value = \"[Click Here](https://divinediscordbots.com/bot/592811241756688405/vote)\") \n embed.add_field(name = \"Botlist.space\", value = \"[Click Here](https://botlist.space/bot/592811241756688405/upvote)\") \n embed.set_thumbnail(url = self.bot.user.avatar_url)\n await ctx.send(embed = embed)", "async def get_vote_channel(self, guild: discord.Guild):\n\n vote_channels = [\n ch for ch in guild.channels\n if \"voting\" in ch.name\n or \"vote\" in ch.name\n ]\n\n if len(vote_channels) < 1:\n return _(\n \"I couldn't identify a voting channel.\"\n \" Please specify one explicitly.\"\n )\n\n if len(vote_channels) > 1:\n # get channel with the largest suffixed number\n return max(\n vote_channels, key=lambda obj: int(obj.name.split(\"-\")[1])\n )\n\n else:\n return vote_channels[0]", "def receive_poll(update: Update, context: CallbackContext) -> None:\n actual_poll = update.effective_message.poll\n # Only need to set the question and options, since all other parameters don't matter for\n # a closed poll\n update.effective_message.reply_poll(\n question=actual_poll.question,\n options=[o.text for o in actual_poll.options],\n # with is_closed true, the poll/quiz is immediately closed\n is_closed=True,\n reply_markup=ReplyKeyboardRemove(),\n )", "async def quotevote(self, ctx, quote_votes = None):\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\t\tif quote_votes is None: # We're querying the current value\r\n\t\t\tqv = self.settings.getServerStat(ctx.guild,\"QuoteVotes\",1)\r\n\t\t\tif not isinstance(qv,int):\r\n\t\t\t\tqv = 1\r\n\t\t\t\tself.settings.setServerStat(ctx.guild,\"QuoteVotes\",1)\r\n\t\t\treturn await ctx.send(\"Quote votes currently set to {:,}.\".format(qv))\r\n\t\t# We're setting a value - make sure it's an int, and at least 1\r\n\t\ttry:\r\n\t\t\tqv = int(quote_votes)\r\n\t\t\tassert qv > 0\r\n\t\texcept:\r\n\t\t\treturn await ctx.send(\"Quote votes must be an integer of at least 1.\")\r\n\t\t# Set the value.\r\n\t\tself.settings.setServerStat(ctx.guild,\"QuoteVotes\",qv)\r\n\t\tawait ctx.send(\"Quote votes set to {:,}.\".format(qv))", "def on_before_poll(bot):", "def vote(self, part_key, choice):\n part_data = self.get_participant(part_key)\n poll_key = part_data['poll']\n poll_data = self.get_poll(poll_key)\n num_choices = len(poll_data['choices'])\n if(choice not in range(num_choices)):\n raise Exception('Invalid choice value ' + choice +\n ' provided to model.vote()')\n part_data['choice'] = choice\n part_data['voted'] = True\n self.set_participant(part_key, part_data)\n # TODO: Remove the following log notification\n print ('Participant ' + part_data['email'] + ' voted for ' +\n poll_data['choices'][part_data['choice']] + '.')\n return part_data", "async def vote_setup(ctx: commands.Context):\n session = session_maker()\n old_channel = session.query(Channel).filter_by(channel_id=ctx.channel.id).one_or_none()\n if old_channel is not None:\n await ctx.send('This channel is already setup.')\n return\n channel = Channel(server_id=ctx.guild.id, channel_id=ctx.channel.id)\n session.add(channel)\n session.commit()\n await ctx.send(f'{ctx.channel} set up for voting!')", "async def simple_poll(self, ctx, seconds: int, *, topic: str):\n\n MemeCommand.check_rate_limit(ctx, 200, True)\n\n if not checks.sudo_check(ctx.message) and (seconds > 3600 or seconds <= 0):\n await ctx.send(\"Invalid poll length. Polls must be less than 3600 seconds.\")\n return\n\n time_msg = \"\\nYou have {} seconds to vote.\".format(seconds) if seconds > 1 else \"\"\n output = \"Vote on {.message.author.display_name}'s poll with reactions.\\n**{}**{}\".format(ctx, topic, time_msg)\n\n message = await ctx.send(output)\n\n await self.add_thumbs(message)\n\n await asyncio.sleep(seconds)\n embed = discord.Embed(title=topic, color=discord.Color.blue())\n embed.set_author(name=ctx.message.author.name, icon_url=ctx.message.author.avatar_url)\n # Pull the number of reactions from the message itself\n thumbs_up = 0\n thumbs_down = 0\n\n message = await message.channel.get_message(message.id) # Update the message object\n\n for reaction in message.reactions:\n if reaction.emoji == \"👍\":\n thumbs_up = reaction.count - 1 # Account for the extra one\n elif reaction.emoji == \"👎\":\n thumbs_down = reaction.count - 1\n embed.add_field(name=\"👍\", value=str(thumbs_up))\n embed.add_field(name=\"👎\", value=str(thumbs_down))\n\n await ctx.send(\"**Results of <@{.message.author.id}>'s poll:**\".format(ctx), embed=embed)", "def __init__(self, poll, question, options, total_votes, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.poll = poll\n self.question = question\n self.options = options\n self.total_votes = total_votes", "async def vps(self, ctx):\n await ctx.send(\"https://discordapp.com/channels/566451504332931073/566451504903618561/662484243808780309\")", "def cmd_comment_vote(client, args):\n comment_vote = client.comment_vote(args.comment_id, args.vote)\n generate_output({'comment_vote': comment_vote})", "def vote(self, modelID, vote=0, gallery=None, wait=False):\n VOTES = {-1: 'down', 0: 'veto', 1: 'up'}\n vote = VOTES.get(vote, vote)\n # Try to match a comment ID (full numerical)\n if gallery is None:\n gallery = isinstance(modelID, str) and re.match(r'^[0-9]{6,12}', modelID) is None\n url = f'{\"gallery\" if gallery else \"comment\"}/{modelID}/vote/{vote}'\n return self.post(url, wait=wait)", "async def votechannel_list(self, ctx):\n channels = await self.bot.db.execute(\n \"\"\"\n SELECT channel_id, voting_type FROM voting_channel WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n )\n if not channels:\n raise exceptions.Info(\"There are no voting channels on this server yet!\")\n\n rows = []\n for channel_id, voting_type in channels:\n rows.append(f\"<#{channel_id}> - `{voting_type}`\")\n\n content = discord.Embed(\n title=f\":1234: Voting channels in {ctx.guild.name}\", color=int(\"3b88c3\", 16)\n )\n await util.send_as_pages(ctx, content, rows)", "def answerPoll(self, question):\n \n print(question.getPrompt())\n resp = input(\"Answer: \")\n return PollResponse(resp)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List waiters within the given configuration.
def ListWaiters(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
[ "def waiters(self):\n waiters = []\n\n for name, item in self._definition.get('waiters', {}).items():\n name = self._get_name('waiter', Waiter.PREFIX + name)\n waiters.append(Waiter(name, item))\n\n return waiters", "def waiters(self):\n return [cb[0].fiber for cb in self._callbacks if hasattr(cb[0], 'fiber')]", "def refreshWaitingList(self):\n RCI.instance().waitingTasks()", "def list():\n # Calling config file\n cf = config.ReadFile(config_file)\n user = cf[\"authentication\"][\"user\"]\n\n l = []\n for job in cron:\n l.append(job)\n return l", "def __put_in_wait_list(self, patron): # waitlist mutator\r\n self.__waitlist.append(patron)", "def test_search_wait_list(self):\n pass", "def create_checkers(config):\n\n checkers = []\n if 'checkers' in config:\n for checker_name, checker_config in config['checkers'].iteritems():\n if checker_name in __checkers:\n configs = None\n if type(checker_config) == list:\n configs = checker_config\n else:\n configs = [checker_config]\n for config in configs:\n ch = __checkers[checker_name]()\n ch.set_config(config)\n if ch:\n checkers.append(ch)\n return checkers", "def accounts_waiting(self) -> list:\n\n return [\n key\n for (key, item) in self.db.items()\n if item[\"status\"] == str(WhitelistStatus.waiting)\n ]", "def get_reviewers(config): # type: (Config) -> List[str]\n phabricator = False\n finders = [\n FindLogReviewers,\n FindHistoricalReviewers,\n FindArcCommitReviewers\n ]\n reviewers = Counter() # type: typing.Counter[str]\n for finder in finders:\n finder_reviewers = finder(config).get_reviewers()\n if config.verbose:\n print(\n \"Reviewers from %s: %s\" %\n (finder.__name__, dict(finder_reviewers))\n )\n reviewers.update(finder_reviewers)\n if finder == FindArcCommitReviewers and finder_reviewers:\n phabricator = True\n\n most_common = [x[0] for x in reviewers.most_common()]\n most_common = [x for x in most_common if x not in config.ignores]\n if phabricator:\n most_common = FindArcCommitReviewers(config) \\\n .filter_phabricator_activated(most_common)\n reviewers_list = most_common[:REVIEWERS_LIMIT]\n return reviewers_list", "def list(self):\n self.background_scheduler.print_jobs()", "def list_watchdogs():\n listWatchdogs()", "def _ls_waiting_jobs(self):\n \n jobs = [j for j in os.listdir(pjoin(self._jobsdir, \"00_waiting\")) if j.endswith(self._job_ext)]\n \n if self._job_filter:\n jobs = [j for j in jobs if self._job_filter(pjoin(self._jobsdir, \"00_waiting\", j), j)]\n \n return jobs", "def wait_on_cluster_conditions(cluster, waiters):\n results = []\n start = datetime.datetime.now()\n while waiters:\n new_waiters = []\n for waiter in waiters:\n type = waiter.get(\"type\")\n name = waiter.get(\"name\")\n timeout = waiter.get(\"timeout\", 1800) # 30 minutes\n expiry = waiter.get(\"expiry\")\n namespace = waiter.get(\"namespace\", \"default\")\n if timeout:\n if not expiry:\n waiter[\"expiry\"] = start + \\\n datetime.timedelta(seconds=timeout)\n if datetime.datetime.now() > waiter[\"expiry\"]:\n waiters = []\n waiter.pop('expiry')\n return None, f\"Waiter: {waiter} expired on cluster: {cluster.id}\" # noqa\n if type == \"ingress\":\n ingress = cluster.ctl.get_ingress(\n name=name, namespace=namespace)\n ips = ingress.get(\"ips\")\n hostnames = ingress.get(\"hostnames\")\n if ips or hostnames:\n waiter.update({\"result\": ingress})\n waiter.pop(\"expiry\", None)\n results.append(waiter)\n else:\n new_waiters.append(waiter)\n waiters = new_waiters\n sleep(5)\n return results, None", "def get_binners(config):\n binners = []\n if config[\"binning\"][\"metabat\"]:\n binners.append(\"metabat\")\n if config[\"binning\"][\"concoct\"]:\n binners.append(\"concoct\")\n if config[\"binning\"][\"maxbin\"]:\n binners.append(\"maxbin\")\n return binners", "def __init__(self, waiter_config):\n self._waiter_config = waiter_config['waiters']\n\n # These are part of the public API. Changing these\n # will result in having to update the consuming code,\n # so don't change unless you really need to.\n version = waiter_config.get('version', 'unknown')\n self._verify_supported_version(version)\n self.version = version\n self.waiter_names = list(sorted(waiter_config['waiters'].keys()))", "def wait_for_workers(self):\r\n stop = False\r\n workers = self.aggregator.get_participants()\r\n\r\n while not stop: \r\n try:\r\n with self.aggregator:\r\n resp = self.aggregator.receive(1)\r\n participant = resp.notification['participant']\r\n workers.append(participant)\r\n print('Task %s: participant %s has joined' % (self.task_name, participant))\r\n except Exception as err:\r\n print(\"Task %s: joined %d participants out of %d\" % (self.task_name, len(workers), self.Nworkers))\r\n #print(err)\r\n #print('Check here: error')\r\n #import code\r\n #code.interact(local=locals())\r\n pass\r\n\r\n if len(workers) == self.Nworkers:\r\n stop = True\r\n\r\n workers = self.aggregator.get_participants()\r\n return list(workers.keys())", "def resource_list(conf: OCIConfig):\n def _retrieve_resources_in_compartment(tree, region, traverse_level=1, scan_resources=False): \n logging.info('{} {}'.format('__'*traverse_level, tree['name']))\n items = tree.get(R.COMPARTMENT)\n for nested_item in [] if not items else items:\n traverse_level += 1\n scan = scan_resources or not bool(conf.compartment_filter) or nested_item.name in conf.compartment_filter\n _retrieve_resources_in_compartment(nested_item, region, traverse_level, scan_resources=scan)\n traverse_level -= 1\n if scan_resources:\n _get_network_resources(tree, conf)\n _get_bv_resources(tree, conf)\n _get_instance_resources(tree, conf)\n _get_lb_resources(tree, conf)\n _get_db_resources(tree, conf)\n _get_autonomous_resources(tree, conf)\n\n for r in conf.compartments_tree.keys():\n # logging.info(r)\n conf.workon_region = r\n logging.info(\"Resource discovery - visit compartments in {} region\".format(r))\n _init_api_client(conf)\n\n # bv_client.list_volumes('').data\n for tree in conf.compartments_tree[r]:\n scan = not bool(conf.compartment_filter) or tree.name in conf.compartment_filter\n _retrieve_resources_in_compartment(tree, r, scan_resources=scan)", "def list(self, config_path: str, results_filter: Optional[ObjectType]) -> List[str]:\n ...", "def _GetAllTryservers(trybot_config):\n all_tryservers = set()\n for builders in trybot_config.itervalues():\n for config in builders.itervalues():\n tryserver = config.get('mastername')\n all_tryservers.add(tryserver)\n return list(all_tryservers)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save seed into temp file.
def saveseed(self, seed): savefile = gettempdir() + '/last_test_seed_fate.tmp' if args.verbose: print('Saving run into ' + savefile) with open(savefile, 'w') as f: f.write(str(seed))
[ "def save_seed(target_dir: Path) -> None:\n with open(target_dir / \"seed\", \"wb\") as f:\n pickle.dump(\n {\n \"datasets\": INITIAL_DATASETS,\n \"orig_datablocks\": INITIAL_ORIG_DATABLOCKS,\n \"attachments\": INITIAL_ATTACHMENTS,\n },\n f,\n )", "def local_seed(self) -> str:\n assert self.definition.settings.sp_root_dir\n seed_file = self.definition.settings.sp_root_dir.joinpath(\"seed.txt\")\n if not seed_file.exists():\n seed = str(encode_hex(bytes(random.randint(0, 255) for _ in range(20))))\n seed_file.write_text(seed)\n else:\n seed = seed_file.read_text().strip()\n return seed", "def save_tmp_file(self, data):\n with open(self.tmp_file, 'wb') as f:\n f.write(data)", "def saveSeeds(seeds):\r\n out = []\r\n for seed in seeds:\r\n out.append( seed[0].strip() + \",\" + seed[1].strip() )\r\n fout = codecs.open(\"seedlist.txt\", \"w\", \"utf-8\")\r\n fout.write( u\"\\n\".join(out) )\r\n fout.close()", "def temporary(cls):\n fh, path = tempfile.mkstemp(suffix='.hdf5')\n os.close(fh)\n self = cls(path, 'w')\n self.path = path\n return self", "def save_tempfile(self, data):\n from tempfile import mkstemp\n fd, name = mkstemp()\n fp = os.fdopen(fd, 'wb')\n fp.write(data)\n fp.close()\n return name", "def fillFile(self, len=100):\n if not os.path.exists(TESTFILES):\n os.mkdir(TESTFILES)\n fd, path = tempfile.mkstemp(dir=TESTFILES)\n self.path = path\n os.write(fd, os.urandom(len))\n os.close(fd)", "def save_setup(self) -> None:\n setup_now = self.get_current_setup()\n gdd.generate_file(\n self.experiment_file_dict, file_path=dp.DAF_CONFIGS, file_name=setup_now\n )", "def make_checkpoint(self):\n try:\n # Locate the checkpoint directory.\n checkpoint_dir = os.path.join(self.config[consts.EXPORT_DIR],\n \"checkpoints\",\n \"experiment_%09d\" % self.id,\n \"epoch_%09d\" % self.current_epoch)\n # If the checkpoint directory already exists, remove it.\n if os.path.isdir(checkpoint_dir):\n shutil.rmtree(checkpoint_dir)\n # Create the checkpoint directory.\n os.makedirs(checkpoint_dir)\n\n # Persist the current state of the experiment.\n with torch.no_grad():\n self.serialize(checkpoint_dir)\n\n # Update the stamp indicating the newest checkpoint.\n epoch_stamp_path = os.path.join(self.config[consts.EXPORT_DIR], \"epoch_stamp.pickle\")\n with open(epoch_stamp_path, 'wb') as file:\n pickle.dump({consts.EXPERIMENT_ID: self.id, consts.EPOCH_NUMBER: self.current_epoch}, file)\n except IOError as exception:\n raise exception", "def _save(self, filename = str(int(time()))):\n if filename:\n with open(filename, 'w') as f:\n f.write('null')\n self.prompt_time = 0\n exit()", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)", "def save_tu(tu):\n _, path = tempfile.mkstemp()\n tu.save(path)\n\n return path", "def save(self, filename=\"fitter.pickle\"):\n\n with open(filename, \"wb\") as outfile:\n pickle.dump(self, outfile)", "def _generate_to_tempfile(self, generator):\r\n (output_fd, output_path) = tempfile.mkstemp()\r\n with os.fdopen(output_fd, 'w') as output:\r\n generator.write(output)\r\n return output_path", "def dump_to_tmpfile(obj):\n\n import tempfile\n\n fname = tempfile.mktemp()\n with open(fname, \"w\") as txtfile:\n txtfile.write(str(obj))\n\n print(\"str(obj) was written to {}\".format(fname))\n\n return fname", "def test(): \n\t\tspawn_dummy_yield_file() \n\t\ttry: \n\t\t\tsave(_DUMMY_FILENAME_) \n\t\texcept: \n\t\t\treturn False \n\t\tfinally: \n\t\t\tremove_dummy_yield_file() \n\t\treturn True", "def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)", "def _save_to_file(self, world, is_smart):\n filepath = self._get_filepath(world._generating_city_name, is_smart, world._generating_scale)\n assert not os.path.exists(filepath), \"File '%s' already exists!\" % filepath\n log.info(\"Saving the new results to {} ...\".format(filepath))\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n with open(filepath, 'wb') as f:\n pickle.dump((world, Params.loader()), f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an HTML script element for including a script from the admin media url (or other location if an absolute url is given).
def include_admin_script(script_path): if not absolute_url_re.match(script_path): script_path = '%s%s' % (settings.ADMIN_MEDIA_PREFIX, script_path) return '<script type="text/javascript" src="%s"></script>' % script_path
[ "def external_js():\n return u'\\n'.join(format_html(u'<script src=\"{0}\"></script>', url) for url in settings.EXTERNAL_JS)", "def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)", "def require_module(module):\n if not require_settings.REQUIRE_DEBUG and module in require_settings.REQUIRE_STANDALONE_MODULES:\n return u\"\"\"<script src=\"{module}\"></script>\"\"\".format(\n module = staticfiles_storage.url(resolve_require_module(require_settings.REQUIRE_STANDALONE_MODULES[module][\"out\"])),\n )\n return u\"\"\"<script src=\"{src}\" data-main=\"{module}\"></script>\"\"\".format(\n src = staticfiles_storage.url(resolve_require_url(require_settings.REQUIRE_JS)),\n module = staticfiles_storage.url(resolve_require_module(module)),\n )", "def javascript(filename, type='text/javascript'):\n\n if '?' in filename and len(filename.split('?')) is 2:\n filename, params = filename.split('?')\n params = '?' + params\n else:\n params = ''\n return format_html(\n '<script type=\"{}\" src=\"{}{}\"></script>',\n type, staticfiles_storage.url(filename), params\n )", "def get_media_url_statement(self, url):\n return '{{ media_url(\\'%s\\') }}' % url", "def propeller_javascript_url():\n return javascript_url()", "def replacement(self):\n assert (self.src or self.inline) and not (self.src and self.inline)\n if self.src:\n return '<script async type=\"text/javascript\" src=\"%s\"></script>' % urllib.quote(self.src)\n else:\n return '<script>\\n%s\\n</script>' % self.inline", "def load_script(browser, url):\r\n if browser.current_url.startswith('file:'):\r\n url = 'https:' + url\r\n browser.execute_script(\"\"\"\r\n var script_tag = document.createElement(\"script\");\r\n script_tag.setAttribute(\"type\", \"text/javascript\");\r\n script_tag.setAttribute(\"src\", arguments[0]);\r\n document.getElementsByTagName(\"head\")[0].appendChild(script_tag);\r\n \"\"\", url)", "def _get_javascript_uri(interactor: _Interactor, headers: Dict[str, Any] = None) -> Any:\n\n news_uri = interactor.replace_base_path_if_appropriate(\"/suite/sites\")\n response = interactor.get_page(\n uri=news_uri, headers=headers, label=\"Login.Feature_Toggles.GetSites\"\n )\n tempo_text = response.text\n script_regex = interactor.replace_base_path_if_appropriate(r'<script src=\"(\\/suite\\/tempo\\/ui\\/sail-client\\/sites-.*?.js)')\n uri_match = re.search(script_regex, tempo_text)\n if uri_match:\n script_uri = uri_match.groups()[0]\n return script_uri\n return None", "def script(content=\"\", src=\"\", scripttype=\"text/javascript\"):\r\n out_script = '<script '\r\n if scripttype:\r\n out_script += 'type=\"%s\" ' % scripttype\r\n if src:\r\n out_script += 'src=\"%s\" ' % src\r\n return out_script[:-1] + '>%s</script>' % content", "def require_module(module):\n if not require_settings.REQUIRE_DEBUG and module in require_settings.REQUIRE_STANDALONE_MODULES:\n return mark_safe(\n \"\"\"<script src=\"{module}\"></script>\"\"\".format(\n module=get_static_url(\n resolve_require_module(require_settings.REQUIRE_STANDALONE_MODULES[module][\"out\"])),\n )\n )\n\n return mark_safe(\n \"\"\"<script src=\"{src}\" data-main=\"{module}\"></script>\"\"\".format(\n src=get_static_url(resolve_require_url(require_settings.REQUIRE_JS)),\n module=get_static_url(resolve_require_module(module)),\n )\n )", "def doScriptUrl(bunch, text, env):\n return env.get(\"root\", \"\")+text", "def media_url(context, path, safe=None):\n return context['site'].media_url(path, safe)", "def _load_snippet(filename) -> str:\n fullpath = f'{dirname(__file__)}/js/{filename}'\n file = open(fullpath, 'r')\n script = file.read()\n file.close()\n return script", "def audio_file_player(self):\n if self.audio_file:\n file_url = settings.MEDIA_URL + str(self.content)\n player_string = '<audio src=\"%s\" controls>Your browser does not support the audio element.</audio>' % (file_url)\n return player_string", "def amp_url(self):\n return self.url.child(\"amp\")", "def get_media_js(self):\n media_js = uniquify_sequence(self.media_js + self.plugin_media_js)\n\n return media_js", "def javascript_src_url(self):\n return '/++resource++adddeployment.js?x={}'.format(datetime.now())", "def embed_widget():\n return render_template('embed_widget.js', **make_context())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The index view, for the home page. Shows Campaigns this UserProfile is in.
def index(request): context = dict() if request.user.is_authenticated(): context['campaigns'] = [ CampaignSerializer(c).serialize() for c in request.user.userprofile.campaigns.order_by('pk')] return render(request, 'voter_validation/index.html', context)
[ "def index():\n # users = User.query.filter_by(_role_code=UserRole.tefl_pending.value)\n # calls = CallLog.query.filter_by(flagged=True)\n # return render_template('admin/index.html',\n # users=users,\n # active='users',\n # calls=calls)\n users = User.query.all()\n return render_template('admin/users.html',\n users=users,\n active='users',\n title='Users',\n countries=COUNTRY_CODES)", "async def list(self, request):\n userid = await authenticated_userid(request)\n project = await request.app.context_project(request, userid)\n\n request['slog'].debug('Campaign list requested')\n\n response_js = {\n 'campaigns': await Campaign.list(request, userid=userid, project_id=project.project_id)\n }\n\n return web.json_response(response_js)", "def index(request):\n return render(request, 'commissioned_sites/index.html',\n {'sites': CommissionedSite.objects.all().order_by('-date')},\n context_instance=RequestContext(request))", "def index():\n\tif g.user.is_authenticated:\n\t\tentries = g.user.entries\n\t\treturn render_template('index.html', entries=entries)\n\telse:\n\t\treturn render_template('landing.html')", "def viewed_campaign(request):\n slug = ''\n if not request.resolver_match:\n return []\n if request.resolver_match.args:\n slug = request.resolver_match.args[0]\n elif request.resolver_match.kwargs:\n key = list(request.resolver_match.kwargs.keys())[0]\n slug = request.resolver_match.kwargs[key]\n campaigns = AlertCampaign.objects.filter(\n article_namespace=request.resolver_match.view_name,\n article_slug=slug\n )\n return campaigns", "def view_campaign(self):\n if self.table.selectedIndexes():\n # selected_data = map((lambda item: item.data()), self.table.selectedIndexes())\n selected_row = self.table.selectedIndexes()[0].row()\n self.mainwindow.navigation.switch_to_campaign_bids(self.content[selected_row][0].id)\n else:\n self.mainwindow.show_dialog(\"Select campaign\", 'No campaigns have been selected.')", "def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def my_index(request):\n try:\n denied = models.ParticipantStatus.objects.get(codename=models.ParticipantStatus.DENIED)\n except:\n denied = -1\n\n competitions_im_creator_of = Competition.objects.filter(creator=request.user).order_by('-pk').select_related(\n 'creator').annotate(num_participants=Count('participants'))\n competitions_im_admin_of = Competition.objects.exclude(pk__in=[c.pk for c in competitions_im_creator_of]).filter(\n admins__in=[request.user]).order_by('-pk').select_related('creator').annotate(num_participants=Count('participants'))\n my_competitions = list(competitions_im_creator_of) + list(competitions_im_admin_of)\n\n # Invalid select related previously\n published_competitions = models.Competition.objects.filter(published=True).select_related('creator').annotate(num_participants=Count('participants'))\n published_competitions = reversed(sorted(published_competitions, key=lambda c: c.get_start_date))\n context_dict = {\n 'my_competitions': my_competitions,\n # Invalid select related previously\n 'competitions_im_in': list(request.user.participation.all().exclude(status=denied)),\n 'published_competitions': published_competitions,\n }\n return render(request, \"web/my/index.html\", context_dict)", "def contest_won_viewall(request):\n is_loggedin, username = get_session_variables(request)\n contest_list = Contest_won.objects.all()\n\n if contest_list:\t\n contest_participant_list = []\n for contest_won_obj in contest_list:\t\n c_id = contest_won_obj.contest_id\n c_p_objs = Contest_won_participant.objects. \\\n filter(contest_id = c_id)\n contest_participant_list.extend(c_p_objs)\n\n return render_to_response('achievement/contest_viewall.html', \\\n {'is_loggedin':is_loggedin, \\\n 'username':username, \\\n 'contest_list':contest_list, \\\n 'contest_participant_list':contest_participant_list}, \\\n RequestContext(request))\n else:\n return render_to_response('achievement/noview.html', \\\n {'is_loggedin':is_loggedin, \\\n 'username':username, \\\n 'type': 'Contest\\'s won'}, \\\n RequestContext(request))", "def index():\n _locations = data_getter.get_locations_for(session['username'])\n return render_template(\"main/index.html\", \n locations = _locations,\n advanced = session['advanced'])", "def all_accounts(request):\n accounts = Account.objects.all()\n return render(request, 'app/home.html', {'accounts': accounts})", "def home(request):\n profile = UserProfile.objects.get(user=request.user)\n\n ondemandfeeds=\"\"\n androidgrid=\"\"\n iosgrid=\"\"\n referral=\"\"\n if \"OnDemand Feeds\" in profile.privileges:\n ondemandfeeds=\"OnDemand Feeds\"\n if \"Android Grid\" in profile.privileges:\n androidgrid = \"Android Grid\"\n if \"iOS Grid\" in profile.privileges:\n iosgrid = \"iOS Grid\"\n if \"Referral\" in profile.privileges:\n referral = \"Referral\"\n if \"All\" in profile.privileges:\n ondemandfeeds = \"OnDemand Feeds\"\n androidgrid = \"Android Grid\"\n iosgrid = \"iOS Grid\"\n referral = \"Referral\"\n return render_to_response(\n 'home.html',\n { 'user': request.user,\n 'ondemandfeeds': ondemandfeeds,\n 'androidgrid' : androidgrid,\n 'iosgrid' : iosgrid,\n 'referral':referral\n }\n\n )", "def index(request):\n if request.method == 'GET':\n if request.user.is_superuser or request.user.profile and request.user.profile.is_instructor:\n tasks = Task.objects.all().order_by('due_date')\n elif request.user.profile and not request.user.profile.is_instructor\\\n and request.user.profile.section is not None:\n tasks = Task.objects.filter(section=request.user.profile.section)\\\n .order_by('due_date')\n else:\n tasks = []\n sections = Section.objects.all()\n\n return render(request, \"index.html\", {\n 'tasks': tasks,\n 'statuses': task_statuses,\n 'sections': sections\n })", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def profiles_view():\n profiles = models.profile_user.query.all()\n return render_template('profiles.html', profiles = profiles)", "def test_admin_sms_campaign_view_list(self):\n response = self.client.get('/admin/sms_module/smscampaign/')\n self.failUnlessEqual(response.status_code, 200)", "def index(request):\n template_var = base_template_vals(request)\n up = UserProfile.objects.filter(django_user=request.user)\n template_var[\"likes\"] = Like.objects.filter(user=up[0])\n \n try:\n event_list = Event.objects.all().order_by('id')\n except Event.DoesNotExist:\n raise Http404\n output = ', '.join([e.title for e in event_list]) \n template_var[\"events\"] = output\t\n return render_to_response(\"event/index.html\", template_var,\n context_instance=RequestContext(request))", "def list_view(request):\n campgrounds = Campground.objects.all()\n return render(request, 'campgrounds.html')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows validation UI for a given campaign, if this UserProfile is authorized to do data entry for the specified Campaign. This is also the endpoint for searching for Voters as part of validation. If doing a search, assume that a sufficient number of the specified fields is present (taken care of in frontend form validation).
def validate(request, campaign_id): if not request.user.userprofile.in_campaign(campaign_id): return HttpResponseRedirect(reverse("voter_validation:index")) campaign_id = int(campaign_id) campaign = get_object_or_404(Campaign, id=campaign_id) # Get the number of signatures validated by the current user for this # campaign, and also for the past 24 hours. val_sigs_set = ValidationRecord.objects.filter( validator=request.user.userprofile, campaign=campaign) val_sigs_24h = val_sigs_set.filter( last_updated__gte=datetime.now(SERVER_TIME_ZONE) - timedelta(hours=24)) context = { "campaign_name": campaign.name, "campaign_id": campaign_id, "val_sigs": val_sigs_set.count(), "val_sigs_24h": val_sigs_24h.count(), } # Search if specified in POST search = request.POST.get("search", "false") if search.lower() == "true": name = request.POST.get("name", None) address = request.POST.get("address", None) res_zip = request.POST.get("zip", None) # Pass in campaign_id so we can check the Voter was previously validated voters = voter_search(name, address, res_zip, campaign_id=campaign_id) context.update({ "name": name, "address": address, "zip": res_zip, "results": voters, }) return render(request, "voter_validation/validation.html", context)
[ "def index(request):\n context = dict()\n if request.user.is_authenticated():\n context['campaigns'] = [\n CampaignSerializer(c).serialize() for c in\n request.user.userprofile.campaigns.order_by('pk')]\n return render(request, 'voter_validation/index.html', context)", "def view_campaign(self):\n if self.table.selectedIndexes():\n # selected_data = map((lambda item: item.data()), self.table.selectedIndexes())\n selected_row = self.table.selectedIndexes()[0].row()\n self.mainwindow.navigation.switch_to_campaign_bids(self.content[selected_row][0].id)\n else:\n self.mainwindow.show_dialog(\"Select campaign\", 'No campaigns have been selected.')", "def validate_schema(self, data, **kwargs):\n if \"role\" not in data and \"visible\" not in data:\n raise ValidationError(_(\"Missing fields 'role' and/or 'visible'.\"))", "def can_access(self, call_campaign, call):\n\n \"\"\"Check if Call Campaign is not None\"\"\"\n if call_campaign is not None:\n\n \"\"\"Check if User can make Call for Campaign\"\"\"\n user = self.request.user\n if can_make_call_for_campaign(user, call_campaign):\n\n \"\"\"If Call is not None then check if User is Caller\"\"\"\n if call is not None:\n return hasattr(user, 'callprofile') and (\n call.caller == user.callprofile\n )\n else:\n \"\"\"If Call is None then Campaign access is sufficient\"\"\"\n return True\n\n \"\"\"Otherwise return False\"\"\"\n return False", "def viewed_campaign(request):\n slug = ''\n if not request.resolver_match:\n return []\n if request.resolver_match.args:\n slug = request.resolver_match.args[0]\n elif request.resolver_match.kwargs:\n key = list(request.resolver_match.kwargs.keys())[0]\n slug = request.resolver_match.kwargs[key]\n campaigns = AlertCampaign.objects.filter(\n article_namespace=request.resolver_match.view_name,\n article_slug=slug\n )\n return campaigns", "def testGetCampaign(self):\n if self.__class__.campaign1 is None:\n self.testAddCampaigns()\n selector = {\n 'fields': ['Id', 'Name', 'Status'],\n 'predicates': [{\n 'field': 'CampaignId',\n 'operator': 'EQUALS',\n 'values': [self.__class__.campaign1['id']]\n }]\n }\n self.assert_(isinstance(self.__class__.service.Get(selector), tuple))", "def __call__(self, data):\n data_combiner = DataCombiner(self.instance, data)\n company = data_combiner.get_value(self.company_field)\n contact = data_combiner.get_value(self.contact_field)\n\n if contact.company != company:\n raise ValidationError({\n self.contact_field: self.message,\n })", "def update_c_mandatory_fields(request, campaign_id):\n # print(request.POST)\n campaign = Campaign.objects.get(id=campaign_id)\n form = CampaignForm(request.POST, instance = campaign)\n # print(form)\n if form.is_valid():\n form.save()\n return redirect('add_campaign_spec', id=campaign_id)\n else:\n # return redirect('clonecampaign', id=campaign_id)\n print(form.errors)\n return redirect(reverse('edit_campaign', kwargs={'campaign_id':campaign_id}))", "def test_get_campaign_by_id_passes(self):\n response = self.client.get(f\"{self.endpoint_url}{self.test_campaign.id}/\")\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"id\": CAMPAIGN_ID, \"name\": CAMPAIGN_NAME})", "def validate_fields(contact, contact_layout):\n contact = json.loads(contact['contact_infos'])\n\n validation_keys = set(contact_layout.fields.keys())\n contact_keys = set(contact.keys())\n \n if len(validation_keys ^ contact_keys) > 0:\n return False\n\n for key in validation_keys:\n if contact_layout.fields[key]['requirement']:\n if not contact.get(key, None):\n return False\n if not contact.get(key).get('value', None):\n return False\n\n return True and validate_subfields(contact, contact_layout)", "def voterListsActivity(request):\n if not getattr(request.user, 'campaign', None):\n # TODO - test this\n messages.error(request, \"You don't own a campaign.\")\n return HttpResponseRedirect(reverse('home'))\n # The user owns a campaign\n VoterListFormSet = modelformset_factory(VoterList, fields=('is_active',), extra=0)\n formset = VoterListFormSet(request.POST)\n if formset.is_valid():\n # Make sure the user owns the campaign that owns the lists\n # TODO - test this with cURL\n for form in formset:\n if form.instance.campaign != request.user.campaign:\n messages.error(request, \"You must manage the campaign that owns a voter list.\")\n break\n else:\n # This gets executed if the for loop continues through exhaustion (all forms are valid)\n # https://docs.python.org/2/tutorial/controlflow.html#break-and-continue-statements-and-else-clauses-on-loops\n formset.save()\n messages.success(request, \"You saved your voter list activity preferences.\")\n return HttpResponseRedirect(reverse('voter_lists'))", "def take_action_template(request):\n if 'campaign_id' not in request.GET:\n return HttpResponse('campaign id not in request')\n try:\n campaign = Campaign.objects.get(pk=request.GET['campaign_id'])\n except (Campaign.DoesNotExist, ValueError):\n return HttpResponse('Campaign does not exist for %s' % (request.GET['campaign_id']))\n #import ipdb; ipdb.set_trace()\n dms = campaign.decision_maker.all()\n context_dict = {\n 'dms': dms,\n 'campaign': campaign,\n }\n return render_to_response('takeaction.html',context_dict)", "def test_view_form_valid_sales_method(self, google):\n google.return_value = GeocoderMock()\n\n form_data = super(BaseSearchPageViewTestCase, self).get_data_sales()\n\n view = super(BaseSearchPageViewTestCase, self).initialize(BaseSearchPageView(), None)\n\n view.form_class = SearchForm\n\n form = SearchForm(form_data)\n\n form.is_valid()\n\n response = view.form_valid(form)\n\n (url, query) = super(BaseSearchPageViewTestCase, self).parse_url(response)\n\n form_data = super(BaseSearchPageViewTestCase, self).get_data_extra(form_data)\n\n for key, value in form_data.iteritems():\n self.assertTrue(key in query and query[key] == str(value))\n\n # Check we are dealing with a redirect and path as expected as sales/search\n self.assertIsInstance(response, HttpResponseRedirect)\n self.assertEqual(url.path, '/sales/search/')", "def human_required(view_func, field_name=None):\n\n def wrapped(request, *args, **kwargs):\n if is_recaptcha_valid(request, field_name=(field_name or 'recaptcha')):\n return view_func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n\n return wrapped", "def test_create_new_campaign_by_admin_passes(self):\n response = self.client.post(\n self.endpoint_url,\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [self.test_org.id],\n \"url\": None,\n },\n headers={\"Authorization\": self.session_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response_body, {\"campaignId\": 2})", "def validate_fields(self):\n if self.file == '':\n sg.popup_error('The data path is empty. Select a supported dataset file.', title='Input Error')\n return False\n if self.results == '':\n sg.popup_error('The result path is empty. Select a location for the results.', title='Input Error')\n return False\n if self.plots == []:\n sg.popup_error('Select the desired plot(s).', title='Input Error')\n return False\n if self.procedure == 'Impedance':\n if self.header_list and (len(self.header_list) not in [3,4]):\n sg.popup_error('Wrong number of header inputs.', title='Input Error')\n return False\n if self.specific and (len(self.specific) not in [3,4]):\n sg.popup_error('Wrong number of specific inputs.', title='Input Error')\n return False\n if self.procedure == 'Arrhenius':\n if self.header_list and (len(self.header_list) != 2):\n sg.popup_error('Wrong number of header inputs.', title='Input Error')\n return False\n if self.specific and (len(self.specific) != 2):\n sg.popup_error('Wrong number of specific inputs.', title='Input Error')\n return False\n return True", "def test_update_existent_campaign_by_admin_passes(self):\n response = self.client.patch(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [],\n \"url\": None,\n },\n headers={\"Authorization\": self.admin_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body[\"Success\"], \"Campaign 1 updated\")", "def test_update_existent_campaign_by_unauthenticated_user_fails(self):\n response = self.client.patch(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [self.test_org.id],\n \"url\": None,\n },\n )\n self.assertEqual(response.status_code, 401)", "def GetCampaignAudienceView(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the base Exception class for all step failures. It can be manually raised from recipe code to cause the build to turn red.
def StepFailure(self): return recipe_api.StepFailure
[ "def raise_step_error(self, error: Exception, step: str):\n error_message = \"{}\\nFailed: Error={}\".format(step, str(error))\n logging.error(error_message)\n self.slacker.send_thread_reply(error_message)\n raise Exception(error_message)", "def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)", "def fail(self, msg=None):\n raise self.failureException, msg", "def test_fails(self):\n raise FoolishError(\"I am a broken test\")", "def failing_task():\n raise Exception('task failed :(')", "def fail(self, msg=None):\r\n raise self.failureException(msg)", "def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)", "def bad_step(self):\n assert False, \"This step is meant to fail.\"", "def run_job_error(self, payload):\n self._job_step_failed(\"Running job failed\", payload)", "def raise_for_failure(self) -> None:\n if not self.is_success():\n raise exc.ExecutionError(self)", "def failed(self):\n self._state = \"FAILED\"", "def fail(self, msg=None):\n raise Exception, msg", "def stage_job_error(self, payload):\n self._job_step_failed(\"Staging job failed\", payload)", "def _task_failure(self, task):\n assert (task.task_id in self._runs_after), \\\n ('%s depending on %s' % (self.task_id, task.task_id))\n assert (task.task_id in self._pending_deps), \\\n ('%s depending on %s' % (self.task_id, task.task_id))\n self._state = TaskState.FAILURE", "def InfraFailure(self):\n return recipe_api.InfraFailure", "def test_class_errored(self, cls, exception):", "def set_launch_failed(self):\n self.diagnostics = textwrap.dedent(\n f\"\"\"\n Application {self.app_id} failed 1 times (global limit =2; local limit is =1) due to AM Container for appattempt_1670152552564_21143_000001 exited with exitCode: 7\n Failing this attempt.Diagnostics: [2022-12-14 10:27:49.976]Exception from container-launch.\n Container id: container_e5070_1670152552564_21143_01_000001\n Exit code: 7\n Exception message: Launch container failed\n Shell error output: Unable to find image 'test-repository/test-image:1234' locally\n docker: Error response from daemon: <some error response here, left it out>\n See 'docker run --help'.\n \"\"\"\n )\n return self.set_failed()", "def test_fail(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_failed, use_instances=True)\n run_info = TrialInfo(config=2, instance=\"test\", seed=0, budget=0.0)\n\n runner.submit_trial(run_info)\n run_info, run_value = next(runner.iter_results())\n\n # Make sure the traceback message is included\n assert \"traceback\" in run_value.additional_info\n assert \"RuntimeError\" in run_value.additional_info[\"traceback\"]", "def test_config_step_raises(self):\n\n run_step = self.ConfigStep.create({\n 'name': 'run_step',\n 'job_type': 'run_odoo',\n })\n\n create_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n })\n\n config = self.Config.create({'name': 'test_config'})\n\n # test that the run_odoo step has to be the last one\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 10, 'step_id': run_step.id}),\n (0, 0, {'sequence': 15, 'step_id': create_step.id}),\n ]\n })\n\n # test that the run_odoo step should be preceded by an install step\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 15, 'step_id': run_step.id}),\n (0, 0, {'sequence': 10, 'step_id': create_step.id}),\n ]\n })" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StepWarning is a subclass of StepFailure, and will translate to a yellow build.
def StepWarning(self): return recipe_api.StepWarning
[ "def warn(self, warning=None):\r\n\r\n if self.getName() != 'Main':\r\n warning = self.getName() + ': ' + warning\r\n\r\n debug.err('Warning: %s' % warning)\r\n if type(warning) != types.ListType:\r\n warning = [warning]\r\n\r\n if self.result:\r\n self.result.addStepWarning(warning)", "def _warn(self, warning=None):\r\n debug.err('Warning: %s' % warning)\r\n\r\n if core.FW_conf['settings'].TestRun.ExecutionMode == 'Leader' and warning != None:\r\n executeInFollower(\"self.warn('%s')\" % (warning,))\r\n\r\n if type(warning) != types.ListType:\r\n warning = [warning]\r\n\r\n self.result.addStepWarning(warning)", "def warning(self, warning):\n pass", "def StepFailure(self):\n return recipe_api.StepFailure", "def bad_step(self):\n assert False, \"This step is meant to fail.\"", "def color_warning(self):\n return self.LEVEL_COLOR['WARNING']", "def warning(msg):\n click.secho(msg, fg='yellow')", "def success_failure_color(self, evaluation):\n return \"#60f979\" if evaluation.passes else \"#f96c60\"", "def warning(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.WARNING))", "def warning(self, msg, *args, **kwargs):\n self.add_report_entry(WARNING, msg, args, **kwargs)", "def notice(self, warning):\n pass", "def warning(str_):\n return _color_level(str_, 'warning')", "def _warn(self, issue, passed, comments=None):\n self._add_issue(issue, WARN, passed, comments)", "def set_warning_message(msg):\n set_message(msg, TYPE_WARNING)", "def MarkCurrentStep(fatal=True):\n # See\n # https://chromium.googlesource.com/chromium/tools/build/+/c63ec51491a8e47b724b5206a76f8b5e137ff1e7/scripts/master/chromium_step.py#495\n if fatal:\n bot.HAS_FAILURES = True\n print '@@@STEP_FAILURE@@@'\n else:\n print '@@@STEP_WARNINGS@@@'\n sys.stdout.flush()", "def warning(self, warning):\n\n self._warning = warning", "def add_warning(self, text):\n self.props.add_warning(text)", "def test_passes_with_specific_warning(self):\r\n with assertions.assert_warns(DeprecationWarning):\r\n self._create_deprecation_warning()", "def log_check_warnings(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InfraFailure is a subclass of StepFailure, and will translate to a purple build. This exception is raised from steps which are marked as `infra_step`s when they fail.
def InfraFailure(self): return recipe_api.InfraFailure
[ "def _GetInfraFailMessages(self, failing):\n msgs = self._GetFailedMessages(failing)\n # Filter out None messages because we cannot analyze them.\n return [x for x in msgs if x and\n x.HasFailureType(failures_lib.InfrastructureFailure)]", "def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)", "def HandleFailure(self, failing, inflight, no_stat):\n # Print out the status about what builds failed or not.\n MasterSlaveSyncCompletionStage.HandleFailure(\n self, failing, inflight, no_stat)\n\n if self._run.config.master:\n self.CanaryMasterHandleFailure(failing, inflight, no_stat)", "def ebLoopFailed(failure):\n logger.error('FATAL: {}'.format(failure.getBriefTraceback()))\n reactor.stop()", "def failure(self, cb: CircuitBreaker, exc: BaseException) -> None:", "def HandleFailure(self, failing, inflight, no_stat):\n # Print out the status about what builds failed or not.\n MasterSlaveSyncCompletionStage.HandleFailure(\n self, failing, inflight, no_stat)\n\n if self._run.config.master:\n self.CQMasterHandleFailure(failing, inflight, no_stat)\n\n self._RecordSubmissionMetrics()", "def process_failure():\n if failure:\n logger.error('Please visit: https://wiki.opnfv.org/x/5oey')\n sys.exit(1)", "def StepFailure(self):\n return recipe_api.StepFailure", "def handle_deploy_failure(self):\n step = \"Recovering From Deployment Error\"\n self.slacker.send_thread_reply(step)\n\n if self.has_down_time is True and self.migration_completed:\n return \"Skipped Automated Recovery: Requires Manual Intervention\"\n\n try:\n self.rollback_images()\n self.scale_up_deployments()\n error_handler_message = \"Successfully Rolled Back Deployment\"\n\n except Exception as e:\n error_handler_message = str(e)\n logging.error(error_handler_message)\n\n return error_handler_message", "def ebLoopFailed(failure):\n print(failure.getBriefTraceback())\n reactor.stop()", "def __failure(self):\n failures = \\\n self.__network['failure'].node_failure(self.__nodes['failure'])\n for failure in failures:\n self.__nodes['application'][failure].failure(\n self.__current_time, self.__network['shared'])", "def _fail(self, exception):\n self.monitor_loop.stop()\n self._maintained.errback(exception)", "def failure(self, input: str) -> enumFail:\n pass", "def logFailure(failure, msg='Unhandled exception in deferred:'):\n logging.error('%s\\n%s', msg, failure.getTraceback())", "def set_launch_failed(self):\n self.diagnostics = textwrap.dedent(\n f\"\"\"\n Application {self.app_id} failed 1 times (global limit =2; local limit is =1) due to AM Container for appattempt_1670152552564_21143_000001 exited with exitCode: 7\n Failing this attempt.Diagnostics: [2022-12-14 10:27:49.976]Exception from container-launch.\n Container id: container_e5070_1670152552564_21143_01_000001\n Exit code: 7\n Exception message: Launch container failed\n Shell error output: Unable to find image 'test-repository/test-image:1234' locally\n docker: Error response from daemon: <some error response here, left it out>\n See 'docker run --help'.\n \"\"\"\n )\n return self.set_failed()", "def indicate_failure(self):\n pass", "def run(self, failure_info):\n signals = {}\n if not failure_info['failed'] or not failure_info['chromium_revision']:\n # Bail out if no failed step or no chromium revision.\n return signals\n\n # Bail out on infra failure\n if failure_info.get('failure_type') == failure_type.INFRA:\n return signals\n\n master_name = failure_info['master_name']\n builder_name = failure_info['builder_name']\n build_number = failure_info['build_number']\n\n for step_name in failure_info.get('failed_steps', []):\n if not waterfall_config.StepIsSupportedForMaster(step_name, master_name):\n # Bail out if the step is not supported.\n continue\n\n step = WfStep.Get(master_name, builder_name, build_number, step_name)\n if step and step.log_data:\n failure_log = step.log_data\n else:\n # TODO: do test-level analysis instead of step-level.\n # TODO: Use swarming test result instead of archived gtest results\n gtest_result = buildbot.GetGtestResultLog(\n master_name, builder_name, build_number, step_name)\n if gtest_result:\n failure_log = _GetReliableTestFailureLog(gtest_result)\n\n if gtest_result is None or failure_log == 'invalid':\n if not lock_util.WaitUntilDownloadAllowed(\n master_name): # pragma: no cover\n raise pipeline.Retry('Failed to pull log of step %s of master %s'\n % (step_name, master_name))\n try:\n failure_log = buildbot.GetStepLog(\n master_name, builder_name, build_number, step_name,\n self.HTTP_CLIENT)\n except ResponseTooLargeError: # pragma: no cover.\n logging.exception(\n 'Log of step \"%s\" is too large for urlfetch.', step_name)\n # If the stdio log of a step is too large, we don't want to pull it\n # again in next run, because that might lead to DDoS to the master.\n # TODO: Use archived stdio logs in Google Storage instead.\n failure_log = 'Stdio log is too large for urlfetch.'\n\n if not failure_log: # pragma: no cover\n raise pipeline.Retry('Failed to pull stdio of step %s of master %s'\n % (step_name, master_name))\n\n # Save step log in datastore and avoid downloading again during retry.\n if not step: # pragma: no cover\n step = WfStep.Create(\n master_name, builder_name, build_number, step_name)\n\n step.log_data = _ExtractStorablePortionOfLog(failure_log)\n\n try:\n step.put()\n except Exception as e: # pragma: no cover\n # Sometimes, the step log is too large to save in datastore.\n logging.exception(e)\n\n # TODO: save result in datastore?\n if step.isolated:\n try:\n json_failure_log = (\n json.loads(failure_log) if failure_log != 'flaky' else {})\n except ValueError: # pragma: no cover\n json_failure_log = {}\n logging.warning('failure_log %s is not valid JSON.' % failure_log)\n\n signals[step_name] = {\n 'tests': {}\n }\n step_signal = FailureSignal()\n\n for test_name, test_failure_log in json_failure_log.iteritems():\n signals[step_name]['tests'][test_name] = extractors.ExtractSignal(\n master_name, builder_name, step_name, test_name,\n base64.b64decode(test_failure_log)).ToDict()\n\n # Save signals in test failure log to step level.\n step_signal.MergeFrom(signals[step_name]['tests'][test_name])\n\n signals[step_name]['files'] = step_signal.files\n signals[step_name]['keywords'] = step_signal.keywords\n else:\n signals[step_name] = extractors.ExtractSignal(\n master_name, builder_name, step_name, None, failure_log).ToDict()\n\n return signals", "def fail(self, msg=None):\n raise self.failureException, msg", "def _handle_unexpected_failure(self, calculation, exception=None):\n if exception:\n self.report('{}'.format(exception))\n\n # if self.ctx.unexpected_failure:\n # self.report(\n # 'failure of {}<{}> could not be handled for the second consecutive time'\n # .format(self.ctx.calc_name, calculation.pk))\n # return self.exit_codes.UNKOWN_ERROR\n\n # else:\n # self.report(\n # 'failure of {}<{}> could not be handled, restarting once more'.\n # format(self.ctx.calc_name, calculation.pk))\n\n self.report('failure of {}<{}> could not be handled'.format(\n self.ctx.calc_name, calculation.pk))\n return self.exit_codes.UNKOWN_ERROR" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StepTimeout is a subclass of StepFailure and is raised when a step times out.
def StepTimeout(self): return recipe_api.StepTimeout
[ "def raise_timeout(self, *args, **kwargs):\n\n self.log.error(\"Task timeout encountered.\")\n raise TimeoutError", "def failed_timeout(self, failed_timeout):\n self._failed_timeout = failed_timeout", "def StepFailure(self):\n return recipe_api.StepFailure", "def _on_timeout_expired(self):\n print(\"You have failed to perform the required manipulation in time\")\n # Fail the test when the timeout was reached\n self._error = True\n # Stop the loop now\n self._loop.quit()", "def failed_timeout(self):\n return self._failed_timeout", "def test_timeout_elapsed_exception(self):\n deadline = Deadline(-MS)\n with self.assertRaises(TimeoutError):\n deadline.timeout()", "def timeout(order):\n return ResultProxy(TaskResult(TaskTimedout('A task has timedout'), order))", "def _timeout(signum, frame):\n # Raise TimeoutException with system default timeout message\n raise TimeoutException()", "def test_quarantine_preserves_timeout(self):\n\n self.fq.quarantine(self.job, Exception('Some fake error'))\n self.assertEquals(self.job.timeout, 200)", "def testTimeoutMatch(self):\n step_output = {\n 'steps': [\n {\n 'name':\n 'All timeout',\n 'summaryMarkdown': ('Max pending time: 2s (shard #1)'\n '* [shard #0 timed out after 1s]'\n '* [shard #1 timed out after 2s]'),\n },\n ],\n }\n expected_output = {\n 'All timeout': [(1, 0), (2, 0)],\n }\n self.assertEqual(\n get_machine_times._GetShardTimesFromStepOutput(json.dumps(step_output)),\n expected_output)", "def on_timeout_error(self, e):\n raise FatalError(\"%s.on_timeout_error should not be called\" % self)", "def test_timeout(self, fake_time):\n fake_task = MagicMock()\n fake_task.info.completeTime = None\n fake_task.info.error = None\n fake_task.info.result = 'woot'\n\n with self.assertRaises(RuntimeError):\n task_lib.consume_task(fake_task, timeout=2)", "def test_timeout(self, mocker, mock_timedelta):\n\n tid = 289466\n site = \"mysite\"\n\n exception_response = self.generate_task_dictionary(\n tid, state=\"started\", completed=None\n )\n\n responses = [{\"json\": exception_response}]\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, responses)\n\n with self.assertRaises(exceptions.AcquiaCloudTimeoutError):\n self.client.site(site).task(tid).wait(0)", "def timeout_next_step(self) -> Optional[pulumi.Input['BotDialogStateArgs']]:\n return pulumi.get(self, \"timeout_next_step\")", "def assert_timeout(self) -> None:\n if self._cancelled:\n raise asyncio.TimeoutError from None", "def test_failed_processing(self):\n # setup\n ledger_api_dialogue, fipa_dialogue = self._setup_fipa_ledger_api_dialogues(self)\n\n self.transaction_behaviour.timedout.add(ledger_api_dialogue.dialogue_label)\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.transaction_behaviour.failed_processing(ledger_api_dialogue)\n\n # after\n self.assert_quantity_in_outbox(0)\n\n # finish_processing\n assert self.transaction_behaviour.timedout == set()\n\n mock_logger.assert_any_call(\n logging.DEBUG,\n f\"Timeout dialogue in transaction processing: {ledger_api_dialogue}\",\n )\n\n # failed_processing\n assert fipa_dialogue in self.transaction_behaviour.waiting", "def handler(signum, frame):\n raise TimeoutError(msg)", "def test_failed_processing(self):\n # setup\n ledger_api_dialogue, ml_dialogue = self._setup_ml_ledger_api_dialogues(self)\n\n self.transaction_behaviour.timedout.add(ledger_api_dialogue.dialogue_label)\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.transaction_behaviour.failed_processing(ledger_api_dialogue)\n\n # after\n self.assert_quantity_in_outbox(0)\n\n # finish_processing\n assert self.transaction_behaviour.timedout == set()\n\n mock_logger.assert_any_call(\n logging.DEBUG,\n f\"Timeout dialogue in transaction processing: {ledger_api_dialogue}\",\n )\n\n # failed_processing\n assert ml_dialogue in self.transaction_behaviour.waiting", "def onTimeStepEnd(self, timeStep):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The currently active (open) result from the last step that was run. This is a `types.StepData` object.
def active_result(self): return self.step_client.previous_step_result()
[ "def current_progress_data(self):\n return self._current_progress_data", "def get_current_data(self):\n return self.__current_data", "def get_current_run(self):\n last_run = self.get_last_runs(1)\n if len(last_run) == 0:\n return None\n last_run = last_run[0]\n if 'result' in last_run.run:\n return None # has a result means it's done\n else:\n return last_run", "def get_last_result(self):\n return self.last_result", "def get_execution_result(self):\r\n return self._execution_result", "def last_triggered_step(self):\n return self._last_triggered_step", "def get_last_iteration_results(self):\n return self.iterated_results[-1]", "def last_observation(self):\n last_timestep = self._timesteps[-1]\n return last_timestep.observation", "def get_last_solution(self):\n return self.last_result", "def _result(self):\n if not self._was_executed: _log.throw( \"Deferred action %s has not been executed yet\", self._info )\n return self._live", "def current_step(self):\n return self.dialog.current_step", "def state(self):\n result = self.getResult()\n return result.state", "def extract_goal_state(self):\n time = rospy.get_time()\n ref_time = time - self.last_time\n future_time = ref_time + self.update_rate\n\n # get state of future time in global trajectory\n return df.compute_output3D(self.global_solution, self.order, self.time[self.future_index], future_time)", "def current(self) -> 'outputs.MetricValueStatus':\n return pulumi.get(self, \"current\")", "def current_run(self):\n return self._current_run", "def GetCurrentItem(self):\r\n\r\n return self._current", "def get_goal(self):\n return self.get_observation(self.env._get_goal())", "def current_value(self):\n\t\treturn self.__current_value", "def verification_result(self):\n # Select all finished runs - for the first one that processed (there\n # should be only one), return its output\n processed_run = CloudFactoryDocumentRun.objects.filter(\n status=CloudFactoryDocumentRun.STATUS_PROCESSED,\n document_url__advancedirective=self).last()\n if processed_run:\n return processed_run.output()\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Nest allows you to nest steps hierarchically on the build UI. Calling ```python
def nest(self, name): step_result = self(name, []) with self.m.context(name_prefix=name, increment_nest_level=True): yield step_result
[ "def build_step(self):\n pass", "def stepStarted(build, step):", "def _run_python_hierblock(self):\n print \"Traversing python...\"\n fname_py = self._info['blockname'] + '.py'\n self._write_tpl('hier_python', 'python', fname_py)\n ed = CMakeFileEditor('python/CMakeLists.txt')\n ed.append_value('GR_PYTHON_INSTALL', fname_py, 'DESTINATION[^()]+')\n ed.write()", "def build_nested_blocks(self):\n pass", "def step_mains(self, model):\n for h in self.helpers:\n h.step_main(model)\n\n for a in self.agents:\n a.step_main(model)", "def build_root(event):\n mode = event.parameters['mode']\n if mode == 'from config':\n config = event.parameters['config']\n\n elif mode == 'from template':\n manager = event.workbench.get_plugin('exopy.tasks')\n view = TemplateSelector(event.parameters.get('widget'),\n manager=manager)\n result = view.exec_()\n if result:\n path = view.path\n config, _ = load_template(path)\n\n else:\n msg = 'Invalid mode (%s) for build_root. Valid ones are : %s'\n raise ValueError(msg % (mode, ('from config', 'from template')))\n\n if config:\n build_dep = event.parameters.get('build_dep', event.workbench)\n return build_task_from_config(config, build_dep, True)\n\n else:\n raise RuntimeError('No config for building')", "def _step_builder(cls):", "def run_suite_tree(request):\r\n # order by id default\r\n project = request.data['project']\r\n relation = request.data[\"relation\"]\r\n back_async = request.data[\"async\"]\r\n report = request.data[\"name\"]\r\n host = request.data[\"host\"]\r\n\r\n if host != \"请选择\":\r\n host = models.HostIP.objects.get(name=host, project=project).value.splitlines()\r\n\r\n test_sets = []\r\n suite_list = []\r\n config_list = []\r\n for relation_id in relation:\r\n suite = list(models.Case.objects.filter(project__id=project,\r\n relation=relation_id).order_by('id').values('id', 'name'))\r\n for content in suite:\r\n test_list = models.CaseStep.objects. \\\r\n filter(case__id=content[\"id\"]).order_by(\"step\").values(\"body\")\r\n\r\n testcase_list = []\r\n config = None\r\n for content in test_list:\r\n body = eval(content[\"body\"])\r\n if \"base_url\" in body[\"request\"].keys():\r\n config = eval(models.Config.objects.get(name=body[\"name\"], project__id=project).body)\r\n continue\r\n testcase_list.append(parse_host(host, body))\r\n # [[{scripts}, {scripts}], [{scripts}, {scripts}]]\r\n config_list.append(parse_host(host, config))\r\n test_sets.append(testcase_list)\r\n suite_list = suite_list + suite\r\n\r\n if back_async:\r\n tasks.async_debug_suite.delay(test_sets, project, suite_list, report, config_list)\r\n summary = loader.TEST_NOT_EXISTS\r\n summary[\"msg\"] = \"用例运行中,请稍后查看报告\"\r\n else:\r\n summary = loader.debug_suite(test_sets, project, suite_list, config_list)\r\n\r\n return Response(summary)", "def tree(ctx):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"tree show subcommand\")\n\n ctx.obj[\"workspace\"].tree()", "def show_build_order(c, ignore=False, update=False):\n\n print(\" # Add this to invoke.yaml\")\n print(\" build_order:\")\n for p in _build_order(c, ignore=ignore, update=update):\n print(f\" - {p}\")\n\n print(\"\")", "def step_expander(steps):\n steps = steps.copy()\n i_store = {}\n\n for i in range(1000):\n if i >= len(steps):\n break\n\n step = steps[i]\n name = step['name']\n actions = step.get('actions')\n if actions:\n if type(actions) is not dict:\n step['actions'] = actions_to_dict(actions)\n\n if 'i_' == name[:2]:\n i_steps = i_store.get(name)\n if i_steps:\n i_steps = deepcopy(i_steps)\n else:\n i_steps = generate_steps(f\"test_{name[2:]}\", True)\n i_store[name] = i_steps\n\n for s in i_steps:\n ancestors = s.get('ancestors', [])\n if name not in ancestors:\n s['ancestors'] = ancestors + [name]\n if actions:\n s_action = s.get('actions', {})\n if type(s_action) is not dict:\n s_action = actions_to_dict(s_action)\n s['actions'] = {**s_action, **step['actions']}\n steps = steps[:i + 1] + i_steps + steps[i + 1:]\n return steps", "def RunSteps(api, properties, env_properties):\n cache_root = api.path['cleanup'].join('builder')\n checkout = GetCheckoutPath(api)\n platform = api.platform.name.capitalize()\n if properties.clobber:\n api.file.rmtree('Clobber cache', cache_root)\n api.file.rmtree('Clobber build output: %s' % platform, checkout.join('out'))\n\n api.file.ensure_directory('Ensure checkout cache', cache_root)\n api.goma.ensure()\n env = {}\n env_prefixes = {}\n\n # Checkout source code and build\n api.repo_util.engine_checkout(cache_root, env, env_prefixes)\n\n # Prepare the dependencies that web tests need.\n # These can be browsers, web drivers or other repositories.\n api.web_util.prepare_dependencies(checkout)\n\n with api.context(cwd=cache_root, env=env,\n env_prefixes=env_prefixes), api.depot_tools.on_path():\n\n target_name = 'host_debug_unopt'\n\n # Load local engine information if available.\n api.flutter_deps.flutter_engine(env, env_prefixes)\n\n android_home = checkout.join('third_party', 'android_tools', 'sdk')\n env['GOMA_DIR'] = api.goma.goma_dir\n env['ANDROID_HOME'] = str(android_home)\n env['CHROME_NO_SANDBOX'] = 'true'\n env['ENGINE_PATH'] = cache_root\n # flutter_engine deps adds dart dependency as out/host_debug_unopt/dart-sdk\n # We are changing it with src/third_party/dart/tools/sdks/dart-sdk\n dart_bin = checkout.join(\n 'third_party', 'dart', 'tools', 'sdks', 'dart-sdk', 'bin'\n )\n paths = env_prefixes.get('PATH', [])\n paths.insert(0, dart_bin)\n env_prefixes['PATH'] = paths\n\n command_args = api.properties.get('command_args', ['test'])\n command_name = api.properties.get('command_name', 'test')\n felt_cmd = [\n checkout.join('out', target_name, 'dart-sdk', 'bin', 'dart'),\n 'dev/felt.dart'\n ]\n felt_cmd.extend(command_args)\n\n with api.context(cwd=cache_root, env=env,\n env_prefixes=env_prefixes), api.depot_tools.on_path():\n # Update dart packages and run tests.\n local_engine_path = env.get('LOCAL_ENGINE')\n local_pub = local_engine_path.join('dart-sdk', 'bin', 'pub')\n with api.context(\n cwd=checkout.join('flutter', 'web_sdk', 'web_engine_tester')):\n api.step('pub get in web_engine_tester', [local_pub, 'get'])\n with api.context(cwd=checkout.join('flutter', 'lib', 'web_ui')):\n api.step('pub get in web_ui', [local_pub, 'get'])\n if api.platform.is_mac:\n with api.osx_sdk('ios'):\n with recipe_api.defer_results():\n api.step('felt test: %s' % command_name, felt_cmd)\n if api.properties.get('dependencies'\n ) and 'goldens_repo' in api.properties.get(\n 'dependencies'):\n api.web_util.upload_failing_goldens(checkout, 'ios-safari')\n # This is to clean up leaked processes.\n api.os_utils.kill_processes()\n # Collect memory/cpu/process after task execution.\n api.os_utils.collect_os_info()\n else:\n with recipe_api.defer_results():\n api.step('felt test: %s' % command_name, felt_cmd)\n if api.properties.get('dependencies'\n ) and 'goldens_repo' in api.properties.get(\n 'dependencies'):\n api.web_util.upload_failing_goldens(checkout, 'chrome')\n # This is to clean up leaked processes.\n api.os_utils.kill_processes()\n # Collect memory/cpu/process after task execution.\n api.os_utils.collect_os_info()", "def test_multi_step_run(self, client):\n workflow_name = 'test multistep workflow run'\n specification = Yaml.serialize({\n 'name': workflow_name,\n 'entry': 'step-0',\n 'steps': {\n 'step-0': {\n 'operation': 'flux:test-operation',\n 'postoperation': [{\n 'actions': [{\n 'action': 'execute-step',\n 'step': 'step-1',\n }],\n }],\n },\n 'step-1': {\n 'operation': 'flux:test-operation',\n 'postoperation': [{\n 'actions': [{\n 'action': 'execute-step',\n 'step': 'step-2',\n }],\n }],\n },\n 'step-2': {\n 'operation': 'flux:test-operation',\n },\n },\n })\n resp1 = self._setup_workflow(client, workflow_name,\n specification=specification)\n self.assertEqual('OK', resp1.status)\n workflow_id = resp1.content['id']\n\n resp = self._setup_run(client, workflow_id)\n self.assertEqual('OK', resp.status)\n run_id = resp.content['id']\n\n result = self._poll_run_status(client, run_id, 'completed', include=['executions'])\n\n run_ended = result.pop('ended')\n run_started = result.pop('started')\n self.assertTrue(run_ended >= run_started)\n\n ancestor_ids = []\n for execution in result['executions']:\n ancestor_ids.append(execution.pop('id'))\n execution_ended = execution.pop('ended')\n execution_started = execution.pop('started')\n\n self.assertTrue(execution_ended >= execution_started)\n self.assertTrue(execution_ended >= run_started)\n self.assertTrue(run_ended >= execution_started)\n\n expected = {\n 'id': run_id,\n 'name': workflow_name,\n 'parameters': None,\n 'workflow_id': workflow_id,\n 'products': {},\n 'status': 'completed',\n 'executions': [\n {\n 'execution_id': 1,\n 'ancestor_id': None,\n 'step': 'step-0',\n 'name': 'Test Operation',\n 'status': 'completed',\n },\n {\n 'execution_id': 2,\n 'ancestor_id': ancestor_ids[0],\n 'step': 'step-1',\n 'name': 'Test Operation',\n 'status': 'completed',\n },\n {\n 'execution_id': 3,\n 'ancestor_id': ancestor_ids[1],\n 'step': 'step-2',\n 'name': 'Test Operation',\n 'status': 'completed',\n },\n ]\n }\n self.assertEquals(expected, result)", "def tree(ctx):\n hokusai.print_command_tree(ctx.find_root().command)", "def run_14():\n result = test_14()\n print_result(result)\n result_cw = get_activity_result(result, 'test_child_workflow', 'v2')\n assert [r for r in result_cw.keys() if 'activity2' in r]", "def test_get_scenarios_expanded(self):\n pass", "def start_build(self):\n for project in self.projects:\n for build_num in range(1, self.num_of_builds + 1):\n log.info(\n f\"Start Jenkins build on {project} project, build number:{build_num} \"\n )\n cmd = f\"start-build {constants.JENKINS_BUILD}\"\n build = OCP(namespace=project)\n build.exec_oc_cmd(command=cmd, out_yaml_format=False)", "def start(context, project_name):\n\n gcc_version = '10-2020-q4-major-'\n os_extension = ''\n\n if platform.system() == 'Linux':\n if platform.machine() == 'x86_64':\n os_extension = 'x86_64-linux'\n else:\n os_extension = 'aarch64-linux'\n elif platform.system() == 'Darwin':\n os_extension = 'mac'\n elif platform.system() == 'Windows':\n os_extension = 'win32'\n\n final_branch_name = f'{gcc_version}{os_extension}'\n\n if not os_extension:\n click.secho(f'This system {platform.system()}:{platform.machine()} ' +\n 'is not supported for SJSU-Dev2 ', fg='red', bold=True)\n return -1\n\n click.secho(f'Creating project: {project_name}', fg='white', bold=True)\n Path(project_name).mkdir()\n\n click.echo(f' Creating \"{project_name}/.sj2\" directory')\n Path(f'{project_name}/.sj2').mkdir(exist_ok=True)\n Path(f'{project_name}/.sj2/reserved').touch(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/library\" directory')\n Path(f'{project_name}/library').mkdir(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/packages\" directory')\n Path(f'{project_name}/packages').mkdir(exist_ok=True)\n\n click.echo(f' Creating \"{project_name}/main.cpp\" source file')\n Path(f'{project_name}/main.cpp').write_text(BASIC_MAIN_CPP)\n\n click.echo('')\n\n context.invoke(install, library='libcore', tag='main',\n project_directory=project_name)\n context.invoke(install, library='libarmcortex',\n tag='main', project_directory=project_name)\n context.invoke(install, library='liblpc40xx', tag='main',\n project_directory=project_name)\n context.invoke(install, library='libstm32f10x',\n tag='main', project_directory=project_name)\n context.invoke(install, library='gcc-arm-none-eabi-picolibc',\n tag=final_branch_name, project_directory=project_name)", "def create_nest(event):\n item = {\n 'nestId': ulid.new().str,\n 'nestComponent': 'NEST',\n 'createdAt': str(datetime.now()),\n 'name': event[\"arguments\"].get(\"name\", \"\"),\n 'owner': (event[\"identity\"] or {}).get(\"username\"),\n 'users': []\n }\n table.put_item(Item=item)\n\n return(item)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert single row into a table
def _insert_table_row(self, db: str, table: str, row: Dict[str, Any]): pass
[ "def insert_db_row(self, param):\n c = self._conn.cursor()\n c.execute(self.sqladdrow, param)\n self._conn.commit()", "def insert_row(self, table: str, row_data: dict):\r\n\r\n columns = \"\".join([f\"'{i}',\" for i in row_data]).rstrip(\",\")\r\n keys = \"\".join([f\"'{row_data[i]}',\" for i in row_data]).rstrip(\",\")\r\n sql_statement = f\"INSERT INTO {table} ({columns}) VALUES({keys});\"\r\n try:\r\n self.__cursor(sql_statement)\r\n self.__db_conn.commit()\r\n except sqlite3.Error as error:\r\n print(\"[!] Couldn't add record\")\r\n print(\"[!]\", str(error).capitalize())\r\n return\r\n print(\"[*] Record added successfully.\")", "def singleInsert(self, table_name, fields, field_values, field_types=[]):\n if not self.checkTable(table_name):\n self.createTable(table_name, fields, field_types)\n self.transactionInsert(table_name, fields, field_values)\n self.transactionEnd()", "def insert(self, row, ensure=None, types=None):\n row = self._sync_columns(row, ensure, types=types)\n res = self.db.executable.execute(self.table.insert(row))\n if len(res.inserted_primary_key) > 0:\n return res.inserted_primary_key[0]\n return True", "def insert_record(self):\n print('Insert Record')\n self.prepare_insert_statement()", "def insert_row(self, table_name, **kwargs):\n\n exec_str = 'INSERT INTO {}('+'{}, '*len(kwargs)+') '\n exec_str += 'VALUES ('+'?, '*len(kwargs)+')'\n columns = [i for i in kwargs]\n exec_str = exec_str.format(table_name, *columns)\n exec_str = exec_str.replace(', )', ')')\n\n input_columns = tuple([kwargs[i] for i in kwargs])\n\n try:\n self.commit(exec_str, input_columns)\n except sqlite3.OperationalError as e:\n log('[-ERR-] ' + str(e).capitalize())\n return\n except sqlite3.IntegrityError as e:\n log('[-ERR-] ' + str(e).capitalize())\n return\n\n log(\"[-DB--] Successfully inserted new row to table '{}'.\".format(table_name), self.is_logged)", "def insert(self, table: str, data: dict):\n insert_data = tuple(data[c] for c in getattr(Tables,table.upper())[\"rt_values\"])\n insert_cmd = getattr(Tables,table.upper())[\"insert_rt\"]\n self.curosor.execute(\n insert_cmd, \n insert_data\n )\n self.conn.commit()", "def insert_row(batch, row):\n batch.put(\"20160621\",{\"total_vehiculos_tunel:0\":row[0],\"total_vehiculos_calle30:0\":row[1],\"velocidad_media_superficie:0\":row[2],\"velocidad_media_tunel:0\":row[3]})", "def insert(self, table, data):\r\n statement = \"INSERT INTO {} SET {};\".format(table, data)\r\n self.curs.execute(statement)", "def insert(self, table_name, rows, bulk=True):\n table = self._create_table(table_name)\n return self._perform_query(table.insert(), rows, bulk)", "def insert_row(self, identifier, position, datastore):\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Insert new row into dataset.\n df = vizual.insert_row(df=dataset.to_dataframe(), pos=position)\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations\n )\n return VizualApiResult(ds)", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def insert(element):\n session.add(element)\n session.commit()", "def do_insert_row(args):\n cc = Client(args.file, args.sheetname)\n cc.insert_row(args.row)\n cc.save(args.file)", "def single_insert(conn, insert_req):\n cursor = conn.cursor()\n try:\n cursor.execute(insert_req)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n cursor.close()", "def insert_to_db(obj, table):\n with db_connect() as db:\n try:\n results = db.execute(insert(table), vars(obj)).inserted_primary_key\n except Exception as e:\n logger.exception(str(e))\n raise\n obj.id = results[0]", "def insertObject(obj,dbpath = sherlockDB,tentative=False):\n\tquery_out = [[0]] if tentative else sqlexecute(sqlEq(obj)) \n\n\tif query_out[0][0]==0: #it's new\n\t\tconnection = sqlite3.connect(dbpath)\n\t\tcursor = connection.cursor()\n\t\t\n\t\ttry:\t\t\t\ttable = obj.sqlTable(tentative)\n\t\texcept TypeError: \ttable = obj.sqlTable()\n\n\t\tcols = ','.join(obj.sqlCols())\n\t\tquestionMarks = ','.join(['?']*len(obj.sqlCols()))\n\t\tvals = obj.sqlInsert()\n\t\t\n\t\tcursor.execute('insert into {0}({1}) values({2})'.format(table,cols,questionMarks), vals)\n\t\tID = cursor.lastrowid\n\t\tconnection.commit()\n\t\tconnection.close()\n\t\tstatus = 'inserted'\n\telse:\n\t\tassert query_out[0][0]==1, \"Weird result for query_out in insertObject: \"+str(query_out)\n\t\tID,status = 1,'already in db'; print status\n\n\treturn ID,status", "def insertRow(self):\n selected = [i.row() for i in self.table.selectedIndexes()]\n self._insertRow(min(selected) if selected else self.table.rowCount())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compares two response objects based on their NVCness. Only returns true if both responses are in agreement with either responding NVC or not NVC.
def compare(obj_a, obj_b): return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')
[ "def compare(ob1, ob2, verbose=True): \n not_equal = ssdf.ssdf_base._not_equal(ob1, ob2)\n if verbose and not_equal:\n print(not_equal)\n return not not_equal", "def verify_vn_in_api_server(self):\n self.api_verification_flag = True\n self.api_s_vn_obj = self.api_s_inspect.get_cs_vn(\n domain=self.domain_name, project=self.project_name,\n vn=self.vn_name, refresh=True)\n if not self.api_s_vn_obj:\n self.logger.debug(\"VN %s is not found in API-Server\" %\n (self.vn_name))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n if self.api_s_vn_obj['virtual-network']['uuid'] != self.uuid:\n self.logger.warn(\n \"VN Object ID %s in API-Server is not what was created\" % (self.uuid))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n\n subnets = list()\n for ipam in self.api_s_vn_obj['virtual-network']['network_ipam_refs']:\n subnets.extend(ipam['attr']['ipam_subnets'])\n for vn_subnet in self.vn_subnets:\n subnet_found = False\n vn_subnet_cidr = str(IPNetwork(vn_subnet['cidr']).ip)\n for subnet in subnets:\n if subnet['subnet']['ip_prefix'] == vn_subnet_cidr:\n subnet_found = True\n if not subnet_found:\n self.logger.warn(\n \"VN Subnet IP %s not found in API-Server for VN %s\" %\n (vn_subnet_cidr, self.vn_name))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n # end for\n self.api_s_route_targets = self.api_s_inspect.get_cs_route_targets(\n vn_id=self.uuid)\n if not self.api_s_route_targets:\n errmsg = \"Route targets not yet found in API-Server for VN %s\" % self.vn_name\n self.logger.error(errmsg)\n self.api_verification_flag = self.api_verification_flag and False\n return False\n self.rt_names = self.api_s_inspect.get_cs_rt_names(\n self.api_s_route_targets)\n\n if not self.rt_names:\n self.logger.debug(\n 'RT names not yet present for VN %s', self.vn_name)\n return False\n\n if self.rt_number:\n if not any(item.endswith(self.rt_number) for item in self.rt_names):\n self.logger.debug('RT %s is not found in API Server RT list %s ' %(\n self.rt_number, self.rt_names))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n\n self.api_s_routing_instance = self.api_s_inspect.get_cs_routing_instances(\n vn_id=self.uuid)\n if not self.api_s_routing_instance:\n msg = \"Routing Instances not found in API-Server for VN %s\" % self.vn_name\n self.logger.warn(msg)\n self.api_verification_flag = self.api_verification_flag and False\n return False\n self.ri_ref = self.api_s_routing_instance['routing_instances'][0]['routing-instance']\n if not self.verify_network_id():\n return False\n self.api_verification_flag = self.api_verification_flag and True\n self.logger.info(\"Verifications in API Server for VN %s passed\" %\n (self.vn_name))\n return True", "def compare_cpes(lhs: ImageCpe, rhs: ImageCpe):\n vendor_cmp = compare_fields(lhs.vendor, rhs.vendor)\n if vendor_cmp != 0:\n return vendor_cmp\n\n name_cmp = compare_fields(lhs.name, rhs.name)\n if name_cmp != 0:\n return name_cmp\n\n version_cmp = compare_fields(lhs.version, rhs.version)\n if version_cmp != 0:\n return version_cmp\n\n update_cmp = compare_fields(lhs.update, rhs.update)\n if update_cmp != 0:\n return update_cmp\n\n meta_cmp = compare_fields(lhs.meta, rhs.meta)\n if meta_cmp != 0:\n return meta_cmp\n\n # all avenues of comparison have been depleted, the two cpes are same for all practical purposes\n return 0", "def test_equality(self, obs1, obs2, res):\n assert obs1.compare(obs2) == res", "def _connected_ncsi():\n config = _get_network_tests_config()\n ncsi_endpoint = config.get('ncsi_endpoint')\n expected_text = config.get('ncsi_expected_text')\n try:\n r = requests.get(ncsi_endpoint)\n if r.text == expected_text:\n return True\n except Exception:\n LOG.error(\"Unable to verify connection via NCSI endpoint.\")\n return False", "def test_equality(self):\n self.assertEqual(self._version1, self._version1)\n self.assertNotEqual(self._version2, self._version1)\n self.assertEqual(self._version1, PrcsVersion(self._version1))", "def is_response_correct(self, response):\n for answer in self.my_osid_object.get_answers():\n if self._is_match(response, answer):\n return True\n return False", "def test_not_equal_on_equal(self):\n a = payloads.GetResponsePayload()\n b = payloads.GetResponsePayload()\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)\n\n # TODO (peter-hamilton): Update this once equality is supported for\n # SymmetricKeys.\n secret = secrets.SymmetricKey(\n key_block=objects.KeyBlock(\n key_format_type=misc.KeyFormatType(enums.KeyFormatType.RAW),\n key_value=objects.KeyValue(\n key_material=objects.KeyMaterial(\n b'\\x73\\x67\\x57\\x80\\x51\\x01\\x2A\\x6D'\n b'\\x13\\x4A\\x85\\x5E\\x25\\xC8\\xCD\\x5E'\n b'\\x4C\\xA1\\x31\\x45\\x57\\x29\\xD3\\xC8'\n )\n ),\n cryptographic_algorithm=attributes.CryptographicAlgorithm(\n enums.CryptographicAlgorithm.TRIPLE_DES\n ),\n cryptographic_length=attributes.CryptographicLength(168)\n )\n )\n\n a = payloads.GetResponsePayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',\n secret=secret\n )\n b = payloads.GetResponsePayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',\n secret=secret\n )\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_compare(self):\n \n v = Voters(0)\n v.add_random(100,4).build()\n \n c = Candidates(v, seed=0) \n c.add_random(cnum=5).build()\n \n e = ElectionStats()\n e.set_raw(voters=v.data.pref,\n candidates=c.data.pref,\n winners=[0])\n\n p = PrRegret(e)\n r1 = p.avg_regret\n r2 = consensus_regret(v.data.pref, c.data.pref[0:1])\n \n print('PR regret =', r1)\n print('consensus regret =', r2)\n self.assertTrue(np.round((r1-r2) / r1, 4) == 0)", "def is_correctness_available_for_response(self, response):\n return True", "def compare_dicts(self, libvirt_inbound_dict, libvirt_outbound_dict):\n if cmp(self.inbound_dict, libvirt_inbound_dict) or cmp(\n self.outbound_dict, libvirt_outbound_dict):\n return False\n return True", "def test_equal_on_not_equal_object_type(self):\n a = payloads.GetResponsePayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY\n )\n b = payloads.GetResponsePayload(\n object_type=enums.ObjectType.OPAQUE_DATA\n )\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def test_equal_on_equal(self):\n a = payloads.GetResponsePayload()\n b = payloads.GetResponsePayload()\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)\n\n # TODO (peter-hamilton): Update this once equality is supported for\n # SymmetricKeys.\n secret = secrets.SymmetricKey(\n key_block=objects.KeyBlock(\n key_format_type=misc.KeyFormatType(\n enums.KeyFormatType.RAW\n ),\n key_value=objects.KeyValue(\n key_material=objects.KeyMaterial(\n b'\\x73\\x67\\x57\\x80\\x51\\x01\\x2A\\x6D'\n b'\\x13\\x4A\\x85\\x5E\\x25\\xC8\\xCD\\x5E'\n b'\\x4C\\xA1\\x31\\x45\\x57\\x29\\xD3\\xC8'\n )\n ),\n cryptographic_algorithm=attributes.CryptographicAlgorithm(\n enums.CryptographicAlgorithm.TRIPLE_DES\n ),\n cryptographic_length=attributes.CryptographicLength(168)\n )\n )\n\n a = payloads.GetResponsePayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',\n secret=secret\n )\n b = payloads.GetResponsePayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',\n secret=secret\n )\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def _are_match_equivalent(vulnerability_a, vulnerability_b):\n\n if (\n not (vulnerability_a and vulnerability_b)\n or vulnerability_a.id != vulnerability_b.id\n or vulnerability_a.namespace_name != vulnerability_b.namespace_name\n ):\n # They aren't the same item reference\n logger.debug(\n \"Vuln id or namespaces are different: {} {} {} {}\".format(\n vulnerability_a.id,\n vulnerability_b.id,\n vulnerability_a.namespace_name,\n vulnerability_b.namespace_name,\n )\n )\n return False\n\n normalized_fixes_a = {\n (fix.name, fix.epochless_version, fix.version)\n for fix in vulnerability_a.fixed_in\n }\n normalized_fixes_b = {\n (fix.name, fix.epochless_version, fix.version)\n for fix in vulnerability_b.fixed_in\n }\n\n fix_diff = normalized_fixes_a.symmetric_difference(normalized_fixes_b)\n if fix_diff:\n logger.debug(\"Fixed In records diff: {}\".format(fix_diff))\n return False\n\n return True", "def is_equivalence(self) -> bool:", "def nonceVerification(nonce, decryptedNonce):\n if nonce==decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"\n #Enter code to compare the nonce and the decryptedNonce. This method\n # should return a string of \"200 OK\" if the parameters match otherwise\n # it should return \"400 Error Detected\"", "def test_not_equal_on_equal(self):\n a = payloads.GetRequestPayload()\n b = payloads.GetRequestPayload()\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)\n\n a = payloads.GetRequestPayload(\n unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',\n key_format_type=enums.KeyFormatType.RAW,\n key_compression_type=enums.KeyCompressionType.\n EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,\n key_wrapping_specification=objects.KeyWrappingSpecification(\n wrapping_method=enums.WrappingMethod.ENCRYPT,\n encryption_key_information=objects.EncryptionKeyInformation(\n unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',\n cryptographic_parameters=attributes.\n CryptographicParameters(\n block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP\n )\n ),\n encoding_option=enums.EncodingOption.NO_ENCODING\n )\n )\n b = payloads.GetRequestPayload(\n unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038',\n key_format_type=enums.KeyFormatType.RAW,\n key_compression_type=enums.KeyCompressionType.\n EC_PUBLIC_KEY_TYPE_UNCOMPRESSED,\n key_wrapping_specification=objects.KeyWrappingSpecification(\n wrapping_method=enums.WrappingMethod.ENCRYPT,\n encryption_key_information=objects.EncryptionKeyInformation(\n unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a',\n cryptographic_parameters=attributes.\n CryptographicParameters(\n block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP\n )\n ),\n encoding_option=enums.EncodingOption.NO_ENCODING\n )\n )\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_not_equal_on_not_equal_object_type(self):\n a = payloads.GetResponsePayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY\n )\n b = payloads.GetResponsePayload(\n object_type=enums.ObjectType.OPAQUE_DATA\n )\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def version_compare(v1, v2):\n # getting raw version, while striping 'v' char at the start. if exists.\n # removing this char lets us safely compare the two version.\n v1_raw = CveUtils.to_raw_version(v1).strip(\"v\")\n v2_raw = CveUtils.to_raw_version(v2).strip(\"v\")\n new_v1 = version.LegacyVersion(v1_raw)\n new_v2 = version.LegacyVersion(v2_raw)\n\n return CveUtils.basic_compare(new_v1, new_v2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs main gobject loop.
def run_main_loop(): mainloop = GObject.MainLoop()
[ "def start(self):\n self.mainloop = gobject.MainLoop()\n self.mainloop.run()", "def loop( self ):\n import gtk\n while self.count >= 1:\n log.debug( 'GTK loop restarting' )\n while gtk.events_pending():\n gtk.main_iteration()\n log.debug( 'GTK loop exiting' )\n try:\n del self.t_loop\n except AttributeError, err:\n pass", "def run(self):\n Gtk.main()", "def run():\n gtk.main()", "def start(self):\n self.mainloop = GObject.MainLoop()\n self.mainloop.run()", "def main(self):\n gtk.gdk.threads_init()\n self.client.start()\n gtk.main()", "def start(self):\n if self.__started:\n return\n\n self.__started = True\n GLib.timeout_add(GtkMainLoop.DEADLINE_GLIB, self.__ioloop_run)\n self.__gi_loop.run()", "def main_loop(self):\n while not self.done:\n self.event_loop()\n self.update()\n self.render()\n self.clock.tick(self.fps)\n self.display_fps()", "def run(self) -> None:\n self.mainloop()", "def start_loop(self):\n\n self.MainLoop()", "def main():\n global loop\n DBusGMainLoop(set_as_default=True)\n\n loop = gobject.MainLoop()\n bus = dbus.SessionBus()\n\n bus.add_signal_receiver(catchall_handler, \n dbus_interface=\"org.freedesktop.DBus.Properties\")\n\n threading.Thread(target=run_spotify).start()\n loop.run()", "def run(self):\n self.cmdloop()", "def DBusGMainLoop(set_as_default=False): # real signature unknown; restored from __doc__\n pass", "def main_loop(self):\n\n self.window.mainloop()", "def run(self):\n self.ui['main_window'].widgets['main'].show_all()\n gtk.main()", "def loop(self):\n pass", "def run(self):\n while True:\n self.paint()\n self.config_select()", "def run():\n gui = GUI()\n gui.mainloop()", "def mainloop(self):\n \n while True:\n # It calls repeteadly the reactor\n # update method.\n try:\n self.update()\n except Kill:\n # It breaks the loop\n # silently.\n # people implementing reactors from other mainloop\n # should implement this try: catch\n # suitably to their needs.\n\n break\n except KeyboardInterrupt:\n print self.base\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialie dbus system bus acquire adapter/interface for org.bluez.GattManager1 register application for 'org.bluez.GattService1'
def __init__(self): dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) self.bus = dbus.SystemBus() self.adapter = self._find_adapter() if not self.adapter: IFaceNotFoundException('%s interface not found' % GATT_MANAGER_IFACE) self.service_manager = dbus.Interface( self.bus.get_object(BLUEZ_SERVICE_NAME, self.adapter), GATT_MANAGER_IFACE) self.mainloop = GObject.MainLoop() self.ctx = GattContext(self.bus, self.mainloop) self.app = Application(self.ctx) #print('Registering GATT application...') self.service_manager.RegisterApplication(self.app.get_path(), {}, reply_handler=register_app_cb, error_handler=register_app_error_cb)
[ "def __init__(self):\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n self.bus = dbus.SystemBus()\n self.adapter = self._find_adapter()\n if not self.adapter:\n IFaceNotFoundException('%s interface not found' % LE_ADVERTISING_MANAGER_IFACE)\n\n adapter_props = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, self.adapter),\n \"org.freedesktop.DBus.Properties\");\n\n # Set adater to Powered on\n adapter_props.Set(BLUEZ_ADAPTER_IFACE, \"Powered\", dbus.Boolean(1))\n\n self.ad_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, self.adapter),\n LE_ADVERTISING_MANAGER_IFACE)\n\n\n self.mainloop = GObject.MainLoop()\n #print('Registering GATT application...')", "def get_gatt_manager_interface():\n return dbus.Interface(\n dbus.SystemBus().get_object(constants.BLUEZ_SERVICE_NAME,\n '/org/bluez/hci0'),\n constants.GATT_MANAGER_IFACE)", "def getDevice(self, path):\r\n\t\tself.device = self.bus.get_object(\"org.bluez\", path)\r\n\t\tself.deviceAlias = self.device.Get(DEVICE_IFACE, \"Alias\", dbus_interface=\"org.freedesktop.DBus.Properties\")\r\n\t\tprint(\"Connected with: \" + self.deviceAlias)\r\n\t\t#set adapter to invisible??\r", "async def async_init(self):\n async with self._bus_lock:\n if self._bus and self._bus.connected:\n return\n\n self._services_cache = {}\n\n # We need to create a new MessageBus each time as\n # dbus-next will destory the underlying file descriptors\n # when the previous one is closed in its finalizer.\n bus = MessageBus(bus_type=BusType.SYSTEM)\n await bus.connect()\n\n try:\n # Add signal listeners\n\n bus.add_message_handler(self._parse_msg)\n\n rules = MatchRules(\n interface=defs.OBJECT_MANAGER_INTERFACE,\n member=\"InterfacesAdded\",\n arg0path=\"/org/bluez/\",\n )\n reply = await add_match(bus, rules)\n assert_reply(reply)\n\n rules = MatchRules(\n interface=defs.OBJECT_MANAGER_INTERFACE,\n member=\"InterfacesRemoved\",\n arg0path=\"/org/bluez/\",\n )\n reply = await add_match(bus, rules)\n assert_reply(reply)\n\n rules = MatchRules(\n interface=defs.PROPERTIES_INTERFACE,\n member=\"PropertiesChanged\",\n path_namespace=\"/org/bluez\",\n )\n reply = await add_match(bus, rules)\n assert_reply(reply)\n\n # get existing objects after adding signal handlers to avoid\n # race condition\n\n reply = await bus.call(\n Message(\n destination=defs.BLUEZ_SERVICE,\n path=\"/\",\n member=\"GetManagedObjects\",\n interface=defs.OBJECT_MANAGER_INTERFACE,\n )\n )\n assert_reply(reply)\n\n # dictionaries are cleared in case AddInterfaces was received first\n # or there was a bus reset and we are reconnecting\n self._properties.clear()\n self._service_map.clear()\n self._characteristic_map.clear()\n self._descriptor_map.clear()\n\n for path, interfaces in reply.body[0].items():\n props = unpack_variants(interfaces)\n self._properties[path] = props\n\n if defs.ADAPTER_INTERFACE in props:\n self._adapters.add(path)\n\n service_props = cast(\n GattService1, props.get(defs.GATT_SERVICE_INTERFACE)\n )\n\n if service_props:\n self._service_map.setdefault(\n service_props[\"Device\"], set()\n ).add(path)\n\n char_props = cast(\n GattCharacteristic1,\n props.get(defs.GATT_CHARACTERISTIC_INTERFACE),\n )\n\n if char_props:\n self._characteristic_map.setdefault(\n char_props[\"Service\"], set()\n ).add(path)\n\n desc_props = cast(\n GattDescriptor1, props.get(defs.GATT_DESCRIPTOR_INTERFACE)\n )\n\n if desc_props:\n self._descriptor_map.setdefault(\n desc_props[\"Characteristic\"], set()\n ).add(path)\n\n logger.debug(f\"initial properties: {self._properties}\")\n\n except BaseException:\n # if setup failed, disconnect\n bus.disconnect()\n raise\n\n # Everything is setup, so save the bus\n self._bus = bus", "def get_gatt_service_interface():\n return dbus.Interface(\n dbus.SystemBus().get_object(constants.BLUEZ_SERVICE_NAME,\n '/org/bluez/hci0'),\n constants.GATT_SERVICE_IFACE)", "def _init_dbus(self):\n self.players = [ 'amarokapp','amarok','rhythmbox','audacious','banshee',\n 'exaile','gmusicbrowser','juk','quodlibet','listen','songbird',\n 'muine','beep-media-play','mpd' ]\n try:\n self.bus=dbus.SessionBus()\n except ImportError:\n self.display_message(\"Some issues python-dbus\")", "def AddBeacon(self,\n adapter_device_name='hci0',\n device_address='11:01:02:03:04:05',\n manf_id=None,\n manf_data=None,\n service_uuid=None,\n service_data=None,\n ):\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n 'Adapter %s does not exist.' % adapter_device_name,\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n\n properties = {\n 'UUIDs': dbus.Array([], signature='s', variant_level=1),\n 'Blocked': dbus.Boolean(False, variant_level=1),\n 'Connected': dbus.Boolean(False, variant_level=1),\n 'LegacyPairing': dbus.Boolean(False, variant_level=1),\n 'Paired': dbus.Boolean(False, variant_level=1),\n 'Trusted': dbus.Boolean(False, variant_level=1),\n 'RSSI': dbus.Int16(-61, variant_level=1), # arbitrary\n 'Adapter': dbus.ObjectPath(adapter_path, variant_level=1),\n 'Address': dbus.String(device_address, variant_level=1),\n 'AddressType': dbus.String(\"random\"),\n 'Alias': dbus.String(\"40-A1-82-A6-BB-3D\", variant_level=1),\n }\n if service_uuid:\n properties['UUIDs'].append(service_uuid)\n properties['ServiceData'] = dbus.Dictionary({service_uuid: service_data})\n if manf_id:\n properties['ManufacturerData'] = dbus.Dictionary({manf_id: manf_data})\n self.AddObject(path,\n DEVICE_IFACE,\n # Properties\n properties,\n # Methods\n [\n ('CancelPairing', '', '', ''),\n ('Connect', '', '', \"\"),\n ('ConnectProfile', 's', '', ''),\n ('Disconnect', '', '', ''),\n ('DisconnectProfile', 's', '', ''),\n ('Pair', '', '', ''),\n ])\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',\n 'oa{sa{sv}}', (\n dbus.ObjectPath(path),\n {DEVICE_IFACE: properties},\n ))", "def add_bluetooth(self):\n\n # Indicating to Agent Pi that the method has begun\n self.__client.sendall(\"OK\".encode())\n\n # Getting message from Agent Pi\n data = self.__client.recv(4096)\n message = json.loads(data.decode())\n\n # Send mac address info to API to be processed\n try:\n # Sending location info to API\n api_response = requests.post(\n 'http://localhost:5000/api/register-bluetooth',\n json=message).text\n except:\n # If connection to API fails then send a generic error message\n api_response = json.dumps({\n \"message\": \"A server error occurred.\"\n })\n\n # Returning response to Agent Pi\n self.__client.sendall(api_response.encode())", "def get_device(self, path):\n self.device = self.bus.get_object(\"org.bluez\", path)\n self.deviceAlias = self.device.Get(DEVICE_IFACE, \"Alias\", dbus_interface=\"org.freedesktop.DBus.Properties\")", "def init_bluetooth(self):\n while True:\n ble_devices = os.popen('timeout -s INT 2s hcitool lescan').read()\n if \"TT_BURST\" in ble_devices:\n self.close_flow()\n base_addy = self.serverip + \":\" + str(self.serverport)\n device_url = (\"/valves/\" + self.devicename + \"/off\")\n update_conn = http.client.HTTPConnection(base_addy)\n update_conn.request('GET',device_url)\n update_conn.close()\n print(\"BURST DETECTED!!!!!!\")\n else:\n ble_devices = None\n #do bluetooth stuff", "def find_gatt_adapter(bus):\n remote_om = dbus.Interface(\n bus.get_object(constants.BLUEZ_SERVICE_NAME, '/'),\n constants.DBUS_OM_IFACE)\n objects = remote_om.GetManagedObjects()\n\n for o, props in objects.items():\n if constants.GATT_MANAGER_IFACE in props:\n return o\n\n return None", "def getDevice(self, path):\n self.device = self.bus.get_object(\"org.bluez\", path)\n self.deviceAlias = self.device.Get(DEVICE_IFACE, \"Alias\",\n dbus_interface=\"org.freedesktop.DBus.Properties\")", "def connect(deviceaddr):\n\n deviceHandle = GATTRequester(deviceaddr, False, args.listen_interface)\n flag = 0\n device = None\n\n while flag<5:\n try:\n #bool wait,std::string channel_type, std::string security_level, int psm, int mtu)\n deviceHandle.connect(True, 'public','low')\n break\n except Exception,e:\n # We have a bunch of RuntimeErrors raised for various reasons by the GATTLib library -- lets handle those, then maybe fork GATTLib and get those to be more specific\n if type(e) == RuntimeError:\n \n if e.message == \"Channel or attrib not ready\":\n if deviceHandle.is_connected():\n if args.debug == True: print \"Device error\"\n break # i don't think we can win\n #print 'w'\n #pdb.set_trace()\n #TODO: maybe see if it's connected or not?\n #flag += 1 # we don't want to get stuck here.\n #continue\n\n elif e.message == \"Already connecting or connected\":\n if deviceHandle.is_connected():\n break\n else:\n time.sleep(3)\n if args.debug == True: print '\\t Waiting for response to connection...'\n continue\n\n else:\n #errnum = int(e.message.split()[-1][1:-1]) #remove the ( and ) from the error number\n time.sleep(1)\n if args.debug == True: print '!!!' + e.message\n continue\n\n print e\n flag += 1\n return deviceHandle", "def get_advert_manager_interface():\n return dbus.Interface(\n dbus.SystemBus().get_object(constants.BLUEZ_SERVICE_NAME,\n '/org/bluez/hci0'),\n constants.LE_ADVERTISING_MANAGER_IFACE)", "def connect_to_dbus(self):\n if not self._connected_to_dbus:\n self._connected_to_dbus = True\n proxy_obj = self._bus.get_object(\"org.wicd.daemon\", \n '/org/wicd/daemon')\n self.proxy_obj = proxy_obj\n daemon = dbus.Interface(proxy_obj, 'org.wicd.daemon')\n interface = dbus.Interface(proxy_obj, 'org.wicd.daemon.interface')\n ui = dbus.Interface(proxy_obj, 'org.wicd.daemon.ui')\n self._dbus_ifaces = {\"daemon\" : daemon,\n \"interface\" : interface, \n \"ui\" : ui}", "def __init__(self, ifname):\n\n self._dbus_loop = gobject.MainLoop()\n self._bus = dbus.SystemBus()\n wait_bus_owner_timeout = 5 # Wait for 5s to have an owner for the bus name we are expecting\n logger.debug('Going to wait for an owner on bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n while not self._bus.name_has_owner(RemoteDhcpClientControl.DBUS_NAME):\n time.sleep(0.2)\n wait_bus_owner_timeout -= 0.2\n if wait_bus_owner_timeout <= 0: # We timeout without having an owner for the expected bus name\n raise Exception('No owner found for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n \n logger.debug('Got an owner for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n gobject.threads_init() # Allow the mainloop to run as an independent thread\n dbus.mainloop.glib.threads_init()\n \n dbus_object_name = RemoteDhcpClientControl.DBUS_OBJECT_ROOT + '/' + str(ifname)\n logger.debug('Going to communicate with object ' + dbus_object_name)\n self._dhcp_client_proxy = self._bus.get_object(RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE, dbus_object_name)\n self._dbus_iface = dbus.Interface(self._dhcp_client_proxy, RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE)\n \n logger.debug(\"Connected to D-Bus\")\n self._dhcp_client_proxy.connect_to_signal(\"IpConfigApplied\",\n self._handleIpConfigApplied,\n dbus_interface = RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Handle the IpConfigApplied signal\n \n self._dhcp_client_proxy.connect_to_signal(\"LeaseLost\",\n self._handleLeaseLost,\n dbus_interface = RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Handle the IpConfigApplied signal\n \n #Lionel: the following line is used for D-Bus debugging only\n #self._bus.add_signal_receiver(catchall_signal_handler, interface_keyword='dbus_interface', member_keyword='member')\n self._dbus_loop_thread = threading.Thread(target = self._loopHandleDbus) # Start handling D-Bus messages in a background thread\n self._dbus_loop_thread.setDaemon(True) # D-Bus loop should be forced to terminate when main program exits\n self._dbus_loop_thread.start()\n \n self._bus.watch_name_owner(RemoteDhcpClientControl.DBUS_NAME, self._handleBusOwnerChanged) # Install a callback to run when the bus owner changes\n \n self._callback_new_lease_mutex = threading.Lock() # This mutex protects writes to the _callback_new_lease attribute\n self._callback_new_lease = None\n \n self._exit_unlock_event = threading.Event() # Create a new threading event that will allow the exit() method to wait for the child to terminate properly\n self._getversion_unlock_event = threading.Event() # Create a new threading event that will allow the GetVersion() D-Bus call below to execute within a timed limit \n\n self.status = DhcpLeaseStatus.DhcpLeaseStatus()\n\n self._getversion_unlock_event.clear()\n self._remote_version = ''\n self._dbus_iface.GetVersion(reply_handler = self._getVersionUnlock, error_handler = self._getVersionError)\n if not self._getversion_unlock_event.wait(10): # We give 10s for slave to answer the GetVersion() request\n logfile = tempfile.NamedTemporaryFile(prefix='TimeoutOnGetVersion-', suffix='.log', delete=False)\n if logfile:\n print('Saving TimeoutOnGetVersion environment dump to file \"' + logfile.name + '\"', file=sys.stderr)\n print('TimeoutOnGetVersion', file=logfile)\n subprocess.call('ps -ef', stdout=logfile, shell=True)\n subprocess.call('perl ./dbus-introspect.pl --system com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary /com/legrandelectric/RobotFrameworkIPC/DhcpClientLibrary/eth1', stdout=logfile, shell=True)\n subprocess.call('dbus-send --system --type=method_call --print-reply --dest=com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary /com/legrandelectric/RobotFrameworkIPC/DhcpClientLibrary/eth1 com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary.GetVersion', stdout=logfile, shell=True)\n logfile.close()\n raise Exception('TimeoutOnGetVersion')\n else:\n logger.debug('Slave version: ' + self._remote_version)", "def add_digital_bus(td, name, start_state, format='Hex'):\n\n dbus = td.addDigitalBus(name, start_state, format)\n\n return dbus", "def connectAdapter(self):\n self.canusb = pycanusb.CanUSB(bitrate='500')\n print('CanUSB: ',self.canusb)\n Msg = Switch_to_Operational_State_Msg()\n QTimer.singleShot(50,lambda msg = Msg : self.initialization(Msg))", "def __init__(self, interface_watcher, conn, dbus_object_path = DBUS_OBJECT_ROOT, **kwargs):\n # Note: **kwargs is here to make this contructor more generic (it will however force args to be named, but this is anyway good practice) and is a step towards efficient mutliple-inheritance with Python new-style-classes\n dbus.service.Object.__init__(self, conn=conn, object_path=dbus_object_path)\n self.interface_watcher = interface_watcher\n interface_watcher.interface_destroy_callback = self.InterfaceRemoved\t# Request interface_watcher object to call InterfaceRemoved (in order to send a D-Bus signal when secondary network interface is going down)\n interface_watcher.interface_add_callback = self.InterfaceAdded\t# Request interface_watcher object to call InterfaceAdded (in order to send a D-Bus signal when secondary network interface is going up)\n logger.debug('Registered binding with D-Bus object PATH: ' + str(dbus_object_path))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds service to previously initialize app.
def add_service(self, service): self.app.add_service(service)
[ "def addService(self, service):\n self.services |= service", "def addService(self, service):\n\t\tself.services.append(service)\n\t\treturn self", "def add(self, service: AbstractService):\n self.services.append(service)", "def register_service(self, service):\n return", "def initialize_service(self):\r\n pass", "def on_register_services(self):\n pass", "def initService(self):", "def _init_services(self) -> None:\n pass", "def add_app(self):\n \n pass", "def init_app(self, app):\n super(ActivityService, self).init_app(app)", "def init_services():\n\n event_log_service = EventLogService()", "def on_service_added(self, name, addrinfo):", "def _set_services(self):\n pass", "def register_service(application):\n consul_host = application.config['CONSUL_ADDR']\n consul_port = int(application.config['CONSUL_PORT'])\n client = consul.Consul(host=consul_host, port=consul_port)\n\n service = application.config['MODEL_ID']\n\n addr = application.config['LEGION_ADDR']\n port = int(application.config['LEGION_PORT'])\n\n print('Registering model %s located at %s:%d on http://%s:%s' % (service, addr, port, consul_host, consul_port))\n\n client.agent.service.register(\n service,\n address=addr,\n port=port,\n tags=['legion', 'model'],\n check=consul.Check.http('http://%s:%d/healthcheck' % (addr, port), '2s')\n )", "def add_service(torconfig, service, port=None):\n # picks a random port until it finds one avaible.\n while not service.tcp:\n port = port or new_port()\n try:\n service.tcp = reactor.listenTCP(port, service.factory)\n except error.CannotListenError:\n pass\n\n service.hs = txtorcon.HiddenService(\n torconfig, os.path.join(config.tor_data, service.name),\n ['%d 127.0.0.1:%d' % (service.port, port)])\n apaf.hiddenservices.append(service)", "def add(self):\n if self.state != service_states.RUNNING:\n # log.debug(\"Trying to add service '%s'\" % self.name)\n self.state = service_states.STARTING\n self.last_state_change_time = dt.datetime.utcnow()\n failed_prereqs = self.dependencies[:]\n # List of service prerequisites that have not been satisfied\n for dependency in self.dependencies:\n # log.debug(\"'%s' service checking its prerequisite '%s:%s'\"\n # % (self.get_full_name(), ServiceRole.to_string(dependency.service_role),\n # dependency.owning_service.name))\n no_services_satisfy_dependency = True\n remove_dependency = False\n for svc in self.app.manager.service_registry.itervalues():\n # log.debug(\"Checking service %s state.\" % svc.name)\n if dependency.is_satisfied_by(svc):\n no_services_satisfy_dependency = False\n # log.debug(\"Service %s:%s running: %s\" % (svc.name,\n # svc.name, svc.state))\n if svc.running() or svc.completed():\n remove_dependency = True\n if no_services_satisfy_dependency:\n if self.app.config.ignore_unsatisfiable_dependencies:\n remove_dependency = True\n else:\n # Fall into infinite loop.\n pass\n if remove_dependency and dependency in failed_prereqs:\n failed_prereqs.remove(dependency)\n if len(failed_prereqs) == 0:\n log.info(\"{0} service prerequisites OK; starting the service.\".format(\n self.get_full_name()))\n self.start()\n return True\n else:\n log.debug(\"{0} service prerequisites are not yet satisfied, waiting for: {2}. \"\n \"Setting {0} service state to '{1}'\"\n .format(self.get_full_name(), service_states.UNSTARTED, failed_prereqs))\n # Reset state so it get picked back up by monitor\n self.state = service_states.UNSTARTED\n return False", "def _init_services(self):\n super(IndyManager, self)._init_services()\n\n indy = self.init_indy_service()\n self.add_service(\"indy\", indy)", "def set_service(self):\n\n if self.service:\n self.service = self.service(\n json=self.json,\n google_user=self.google_user,\n endpoint=self\n )", "def service(self, service):\n self._service = service" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the date (UTC) from 10 days ago formatted as YYYYMMDD.
def _ten_days_ago() -> str: ten_days_ago = gmtime(mktime(gmtime()) - TEN_DAYS_SECONDS) return strftime(DATE_FORMAT, ten_days_ago)
[ "def days_ago(n):\n old_date = datetime.datetime.now() - datetime.timedelta(days=n)\n return fmt(old_date.year) + fmt(old_date.month) + fmt(old_date.day)", "def n_days_ago(n_days):\n n_days_ago = datetime.datetime.now()-datetime.timedelta(days=n_days)\n return n_days_ago.strftime(\"%Y-%m-%d\")", "def days_back(i):\n yesterday = (datetime.now() - timedelta(i))\n return yesterday.strftime('%Y-%m-%d')", "def proper_start_date(self):\n\t\td = date.today() - timedelta(days=self.days_back)\n\t\treturn str(d)", "def relativeTime(date):\n diff = datetime.utcnow() - date\n\n if diff.days > 7 or diff.days < 0:\n return date.ctime()\n elif diff.days == 1:\n return '1 day ago'\n elif diff.days > 1:\n return '%d days ago' % diff.days\n elif diff.seconds <= 1:\n return 'just now'\n elif diff.seconds < 60:\n return '%d seconds ago' % diff.seconds\n elif diff.seconds < (60 * 2):\n return '1 minute ago'\n elif diff.seconds < (60 * 60):\n return '%d minutes ago' % (diff.seconds / 60)\n elif diff.seconds < (60 * 60 * 2):\n return '1 hour ago'\n else:\n return '%d hours ago' % (diff.seconds / (60 * 60))", "def human_date(self, date):\n return timeago.format(date)", "def long_ago_str(cls, date: datetime, utcnow_fn: Callable[[], datetime] = _utcnow) -> str:\n if date.tzinfo is None:\n date = date.replace(tzinfo=UTC)\n\n now = utcnow_fn()\n if date > now:\n return \"in the future\"\n\n delta = relativedelta(now, date)\n delta_str = cls._highest_period_delta_str(delta)\n if delta_str is None:\n return \"now\"\n else:\n return \"{} ago\".format(delta_str)", "def timesince_human(date): # TODO: let user specify format strings\n delta = timezone.now() - date\n\n num_years = delta.days / 365\n if (num_years > 0):\n return ungettext(u\"%d year ago\", u\"%d years ago\", num_years) % (\n num_years,)\n\n num_months = delta.days / 30\n if (num_months > 0):\n return ungettext(u\"%d month ago\", u\"%d months ago\",\n num_months) % num_months\n\n num_weeks = delta.days / 7\n if (num_weeks > 0): # TODO: \"last week\" if num_weeks == 1\n return ungettext(u\"%d week ago\", u\"%d weeks ago\",\n num_weeks) % num_weeks\n\n if (delta.days > 0): # TODO: \"yesterday\" if days == 1\n return ungettext(u\"%d day ago\", u\"%d days ago\",\n delta.days) % delta.days\n\n num_hours = delta.seconds / 3600\n if (num_hours > 0): # TODO: \"an hour ago\" if num_hours == 1\n return ungettext(u\"%d hour ago\", u\"%d hours ago\",\n num_hours) % num_hours\n\n num_minutes = delta.seconds / 60\n if (num_minutes > 0): # TODO: \"a minute ago\" if num_minutes == 1\n return ungettext(u\"%d minute ago\", u\"%d minutes ago\",\n num_minutes) % num_minutes\n\n return ugettext(u\"just now\")", "def format_relative_date(date, relative_to = None):\n # This is based roughly on George Edison's code from StackApps:\n # http://stackapps.com/questions/1009/how-to-format-time-since-xxx-e-g-4-minutes-ago-similar-to-stack-exchange-site/1018#1018\"\"\"\n\n now = datetime.datetime.now() if relative_to is None else relative_to\n diff = (now - date).seconds\n\n # Anti-repetition! These simplify the code somewhat.\n plural = lambda d: 's' if d != 1 else ''\n frmt = lambda d: (diff / float(d), plural(diff / float(d)))\n\n if diff < 60:\n return '%d second%s ago' % frmt(1)\n elif diff < 3600:\n return '%d minute%s ago' % frmt(60)\n elif diff < 86400:\n return '%d hour%s ago' % frmt(3600)\n elif diff < 172800:\n return 'yesterday'\n else:\n return date.strftime('M j / y - H:i')", "def render_delta_from_now(date):\n return render_delta(__timedelta_millis(date - utc()))", "def created_time_ago(self):\n return pretty_date(self.created_at)", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def get_n_days_ago(self, startdate, n):\n return startdate - datetime.timedelta(days=n)", "def file_age_to_string(days):\n if days < 1:\n return ''\n else:\n return N_('%(count)d day ago', '%(count)d days ago', days) % {'count':days}", "def __determine_age_date_str(days_ago):\n now = datetime.datetime.now()\n today = datetime.datetime(now.year, now.month, now.day)\n return ((today - datetime.timedelta(days_ago)).isoformat()\n if days_ago is not None\n else None)", "def next10(self):\n try:\n simpr = self.to_dateutil(datetime.now())\n # 10 first items\n return map(lambda x: x.date(), itertools.islice(simpr, 10))\n except ValueError as err:\n return _(\"Unable to evaluate {0:s} ; Error : {1:s} \").format(self.content, err.__str__)", "def _get_date_offset(days):\n return (datetime.date.today() -\n datetime.timedelta(days)).strftime(\"%Y-%m-%d\")", "def thirty_days_ago():\n return date.today() - timedelta(days=30)", "def pretty_date(time=False):\n\t\tfrom datetime import datetime\n\t\tnow = datetime.now()\n\t\tif type(time) is int:\n\t\t diff = now - datetime.fromtimestamp(time)\n\t\telif isinstance(time,datetime):\n\t\t diff = now - time \n\t\telif not time:\n\t\t diff = now - now\n\t\tsecond_diff = diff.seconds\n\t\tday_diff = diff.days\n\n\t\tif day_diff < 0:\n\t\t return ''\n\n\t\tif day_diff == 0:\n\t\t if second_diff < 10:\n\t\t return \"just now\"\n\t\t if second_diff < 60:\n\t\t return str(second_diff) + \" seconds ago\"\n\t\t if second_diff < 120:\n\t\t return \"a minute ago\"\n\t\t if second_diff < 3600:\n\t\t return str( second_diff / 60 ) + \" minutes ago\"\n\t\t if second_diff < 7200:\n\t\t return \"an hour ago\"\n\t\t if second_diff < 86400:\n\t\t return str( second_diff / 3600 ) + \" hours ago\"\n\t\tif day_diff == 1:\n\t\t return \"Yesterday\"\n\t\tif day_diff < 7:\n\t\t return str(day_diff) + \" days ago\"\n\t\tif day_diff < 31:\n\t\t return str(day_diff/7) + \" weeks ago\"\n\t\tif day_diff < 365:\n\t\t return str(day_diff/30) + \" months ago\"\n\t\treturn str(day_diff/365) + \" years ago\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the last month (UTC) formatted as YYYYMM.
def _last_month() -> str: time_now = gmtime() return ( f"{time_now.tm_year}-{time_now.tm_mon - 1:02d}" if time_now.tm_mon > 1 else f"{time_now.tm_year - 1}-12" )
[ "def last_month():\n return datetime.now() + relativedelta(months=-1)", "def last_month():\n LastMonth = datetime.now().month - 1\n currentYear = datetime.now().year\n\n if LastMonth == 0:\n LastMonth = 12\n currentYear = currentYear - 1\n\n return currentYear, LastMonth", "def CURRENTMONTH(self):\r\n return \"%02d\" % self.utcnow.month", "def LOCALMONTH(self):\r\n return \"%02d\" % self.now.month", "def month_last_day(year, month):\n\n return monthrange(year, month)[1]", "def floor_end_month(date):\n return datetime(date.year, date.month, 1) + timedelta(days=-1)", "def CURRENTMONTHNAME(self):\r\n return self.utcnow.strftime(\"%B\")", "def end_month(d):\n return date(d.year, d.month, monthrange(d.year, d.month)[1])", "def get_end_month(month):\n return datetime(2020, month, 28)", "def _get_month(self):\n return self.datetime.month", "def m(dt):\n return '%02d' % dt.month", "def no_pad_month() -> str:\n return no_pad_code(\"m\")", "def last_day_of_month(year, month):\n return monthrange(year, month)[1]", "def get_months_to_date():\n month_sequence = [5, 4, 3, 2, 1, 12, 11, 10, 9, 8] # season is August to May\n try:\n current_month_index = month_sequence.index(dt.now().month)\n except ValueError:\n current_month_index = 0\n\n return month_sequence[current_month_index:]", "def get_current_month() -> int:\n return datetime.now().month", "def get_last_day_of_month(today: Optional[datetime] = None) -> int:\n if today is None:\n today = datetime.utcnow()\n return monthrange(today.year, today.month)[1]", "def _compute_year_month_str(frame: sourcecatalog.SourceFrame) -> str:\n time = Time(frame.exposure_mjd_mid, format=\"mjd\", scale=\"utc\")\n return time.strftime(\"%Y-%m\")", "def MONTH(date):\n return _make_datetime(date).month", "def default_end_date() -> str:\n last = now - relativedelta(months=1)\n return f\"{last.year}-{last.month:02}\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the latest exchange rate from the given ECB data.
def _get_latest_ecb_rate(data: bytes) -> float: root = etree.fromstring(data) values = root.xpath('.//generic:ObsValue/@value', namespaces=root.nsmap) last_value = len(values) - 1 return float(values[last_value])
[ "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def exchange_rate(self, date=None):\n if date is None:\n exchange_rate = self.exchange_rates.latest()\n else:\n exchange_rate = self.exchange_rates.get(date=date)\n return exchange_rate.rate", "def get_exchange_rate(self, currency_id, exchange_rate_id): \n url = base_url + 'currencies/' + currency_id + '/exchangerates/' + \\\n exchange_rate_id\n resp = zoho_http_client.get(url, self.details, self.headers)\n return parser.get_exchange_rate(resp)", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def get_currency_exchange_rate(self, from_currency, to_currency):\n _FUNCTION_KEY = 'CURRENCY_EXCHANGE_RATE'\n return _FUNCTION_KEY, 'Realtime Currency Exchange Rate', None", "def get_rate(currency, date):\n status = 400\n while status != 200:\n url = (\"http://api.nbp.pl/api/exchangerates/rates/A/%s/%d-%02d-%02d?format=json\" %\n (currency, date.year, date.month, date.day))\n\n response = requests.get(url)\n status = response.status_code\n if status != 200:\n date = date - datetime.timedelta(1)\n\n tree = json.loads(response.content)\n assert len(tree['rates']) == 1\n print_rate_info(tree['rates'])\n return (tree['rates'][0]['mid'], date)", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def get_euro_exchange_rates(currency, frequency=\"D\"):\n ISO_4217_RE = re.compile(r\"[A-Z]{3}\")\n FREQUENCIES = [\"D\", \"M\", \"A\"]\n \n URL_TEMPLATE = \"http://sdw-wsrest.ecb.europa.eu/service/data/EXR/{}.{}.EUR.SP00.A?format=csvdata\"\n \n if not ISO_4217_RE.match(currency):\n raise ValueError('\"' + currency + '\" is no valid currency code!')\n if frequency not in FREQUENCIES:\n raise ValueError(\"Frequency must be one of \" + \", \".join(FREQUENCIES))\n \n url = URL_TEMPLATE.format(frequency, currency)\n req = Request(url)\n response = urlopen(req)\n lines = []\n for line in response:\n lines.append(line.decode(\"utf-8\"))\n reader = csv.DictReader(lines)\n result = {}\n for line in reader:\n date = line[\"TIME_PERIOD\"]\n value = line[\"OBS_VALUE\"]\n result[date] = value\n return result", "def parse_rate():\n try:\n response = requests.get(ecb_url)\n except Exception as e:\n return {\"error\": \"error occurred while accessing www.ecb.europa.eu: {}\".format(e)}, True\n else:\n currency_xml = response.content.decode()\n root = ET.fromstring(currency_xml)\n currencies_list = [currency.attrib.get('currency') for currency in root.iter(cube) if currency.attrib.get('currency')]\n rates_list = [float(currency.attrib.get('rate')) for currency in root.iter(cube) if currency.attrib.get('rate')]\n result = dict(zip(currencies_list, rates_list))\n result['EUR'] = float(1)\n return result, False", "def get_exchange_rates(self):\n\n api_url = '{0}exchange_rates'.format(self.api_base_url)\n\n return self.__request(api_url)", "def get_next_rate(self):\n try:\n return ExchangeHistory.objects.filter(currency=self.currency,\n from_date__gt=self.from_date).latest('from_date')\n except ObjectDoesNotExist:\n return None", "async def get_latest_rate(self, currency, target):\n client = AsyncHTTPClient()\n endpoint = API_ENDPOINT.format(currency=currency, target=target)\n response = await client.fetch(endpoint)\n if response.code == 200:\n json_response = json_decode(response.body)\n return json_response['rates'][target]\n if response.body:\n json_response = json_decode(response.body)\n if response.code == 200:\n return json_response['rates'][target]\n elif response.code == 400:\n raise InvalidRateEntryError(json_response['error'])\n else:\n raise CurrencyServiceError(json_response['error'])\n else:\n raise CurrencyServiceError(\"Unkown error when calling the external rates API\")", "def get_exchange_rate(initial_currency):\n\n\t# Account for condition that flight results were returned in CAD\n\tif initial_currency.upper() == 'CAD':\n\t\treturn 1\n\n\tconversion = '{}_CAD'.format(initial_currency.upper())\n\n\turl = 'http://free.currencyconverterapi.com/api/v5/convert?q={0}&compact=y'.format(\n\t\tconversion\n\t)\n\n\ttry:\n\t\tresponse = safe_get(url)\n\t\tresponse_json = loads(response.text)\n\texcept Exception:\n\t\treturn 1\n\n\trate = response_json[conversion]['val']\n\n\treturn rate", "def view_exchange_rate():\n # Get source currency\n source = prompt_for_currency(\"Enter source currency: \")\n # Get destination currency\n destination = prompt_for_currency(\"Enter destination currency: \")\n # Get the exchange rate in both directions\n exchange_rate = api.get_exchange_rate(source,\n destination)\n # Get the conversion rate from source > destination\n conversion_rate = api.get_conversion_rate(source,\n destination,\n exchange_rate)\n # Display conversion rate from source > destination\n api.display_converted_currency(source,\n destination,\n 1,\n conversion_rate)\n # Get the conversion rate from destination > source\n conversion_rate = api.get_conversion_rate(destination,\n source,\n exchange_rate)\n # Display conversion rate from destination > source\n api.display_converted_currency(destination,\n source,\n 1,\n conversion_rate)\n press_enter_to_continue()", "def lookup(self, invoice_code):\n return self.exchange_rate_btc_today[0]", "async def get_currency_rates(self, data):\n is_rate_changed = False\n message_type = 1\n for currency in self.foreign_currency_names:\n try:\n if \"rate\" in self.database[currency] and \\\n self.database[currency][\"rate\"] != float(data[currency.upper()][\"Value\"]):\n is_rate_changed = True\n self.database[currency][\"rate\"] = float(data[currency.upper()][\"Value\"])\n except (KeyError, TypeError):\n logging.ERROR(\"ключ не найден\")\n if is_rate_changed:\n await self.message_queue.put(message_type)\n return self.database", "def get_rate(self, t):\n return self.rates[bisect.bisect(self.change_times, t) - 1]", "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def base_exchange_rate(self):\n return self._base_exchange_rate" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve and store the 15min delayed BTC market price in EUR.
def _get_btc_eur_15min(self) -> None: with requests.get(BITCOIN_TICKER) as response: response.raise_for_status() json_data = response.json() self.btc_eur_15min = json_data["EUR"]["15m"]
[ "def get_price(self):\r\n try:\r\n self.price = self.exchange.symbol_ticker()\r\n except Exception as e:\r\n pass", "def get_buy_price(self,ticker,time):\n return self.broker.get_buy_price(ticker,time)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def get_sell_price(self,ticker,time):\n return self.broker.get_sell_price(ticker,time)", "def coffee_price(time_delay):\n\n\t# Wait a minute before making the request of the page.\n\ttime.sleep(time_delay)\n\n\t# Getting the url html page\n\tpage = urllib.request.urlopen(\"http://beans-r-us.appspot.com/prices-loyalty.html\") # => http.client.HTTP.Response\n\n\t# converting the data in a str object.\n\ttext = page.read().decode(\"utf8\") # => str\n\n\t# Looking for the index of the $ sign.\n\tindex_of_dollar_sign = text.index('$') # => int\n\n\t# Retrieving the price from the page.\n\tprice = text[index_of_dollar_sign: index_of_dollar_sign + 5] # => str\n\n\treturn price", "def BuyingPrice(self):\n return self.buying_rice", "def _get_btc_gbp_15min(self) -> None:\n self._get_eur_gbp_last_daily()\n\n self.btc_gbp_15min = self.btc_eur_15min * self.eur_gbp_last_day", "def track_price():\n r = requests.get('https://finance.yahoo.com/quote/EURPLN=X?p=EURPLN%3DX&.tsrc=fin-srch&guce_referrer'\n '=aHR0cHM6Ly9maW5hbmNlLnlhaG9vLmNvbS8_Z3VjZV9yZWZlcnJlcj1hSFIwY0hNNkx5OTNkM2N1WjI5d'\n 'loyeGxMbU52YlM4Jmd1Y2VfcmVmZXJyZXJfc2lnPUFRQUFBRG1vS3ROMkF5bzFpTDRpd29Td0Z4Z0NDTVN'\n 'XU3M0UkNoa2pBcGl2NmxobmxJcWRab0JIWUF6NVJuNHlZdkN1WTRBNEdwVTRfWjBZQ3JNM1RwX2ZMd05rej'\n 'g0TkVWdksyUzA3LVNmNXdndUJCUjhieG5sZEN4dGRCRmV6eEZfMnNQdEpQeXJ6UzREeV9WRUF4ZXNUMXNLYz'\n 'lnTm1pSlFCV3R6LVpLX0hvc2p5Jl9ndWNfY29uc2Vud'\n 'F9za2lwPTE1OTcwODc3MTg&guce_referrer_sig=AQAAAKzjjM2--Diw1M3gykrGHjIn9NdqSch_odxmo6xqtgD4pNo'\n 'anrEQBgPoZ9xkh8HPYFN1_9mpio4Fg2tEGa4GrsK69bHe4yN9LactTwdKEuBxazZPO751TNSeFH_lltkNoN1k7D6I978v'\n '1eXB9WaCp0NUgbRZRmbYEdoZmkmQvUq7&_guc_consent_skip=1597087949')\n if r.status_code != 200:\n raise ConnectionError\n else:\n soup = BeautifulSoup(r.text, 'html.parser')\n price_elem = soup.find('span', {\"class\": \"Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)\"})\n return float(price_elem.text)", "def market_value(self):\n return float(self.current_price_data()['price'])", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def get_price(self):\n price = 0.01\n # try:\n # start = datetime.date.today() - datetime.timedelta(1)\n # end = datetime.date.today() - datetime.timedelta(1)\n # f = web.DataReader(self.stock_code, 'yahoo', start, end)\n # print(\"f = \" + str(f))\n # price = f.ix[start.strftime(\"%Y-%m-%d\")]['Close']\n # except:\n # print(\"Error! Cannot get stock price!\")\n\n try:\n f = quandl.get(\"WIKI/\" + self.stock_code, rows=1) # Get latest stock price\n price = f.iloc[0]['Adj. Close']\n except Exception as e:\n print(\"Error! Cannot get stock price! Stock: \" + self.stock_code + \", Quandl error: \" + str(e))\n return price", "def updateLastPrice(self):\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(\n pytz.timezone('US/Central')).strftime(\"%H:%M\")\n\n # UPDATE POSITION LAST PRICE AND UPDATE HIGH PRICE\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n open_positions_list = []\n\n for position in open_positions:\n\n symbol = position[\"Symbol\"]\n\n if symbol not in open_positions_list:\n\n open_positions_list.append(symbol)\n\n if len(open_positions_list) > 0:\n\n resp = self.tdameritrade.getQuotes(open_positions_list)\n\n if resp:\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n if dt_central == \"15:00\":\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Opening_Price\": last_price}})\n\n # UPDATE QUEUE LAST PRICE\n queues = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type})\n\n queues_list = []\n\n for queue in queues:\n\n if self.asset_type == \"EQUITY\":\n\n symbol = queue[\"Symbol\"]\n\n elif self.asset_type == \"OPTION\":\n\n symbol = queue[\"Pre_Symbol\"]\n\n if symbol not in queues_list:\n\n queues_list.append(symbol)\n\n if len(queues_list) > 0:\n\n resp = self.tdameritrade.getQuotes(queues_list)\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n if self.asset_type == \"EQUITY\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n elif self.asset_type == \"OPTION\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Pre_Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})", "def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value", "def price(self) -> float:\n return self.close", "def GetBuyRate(ticker):\r\n if ticker[\"result\"][\"Ask\"] / ticker[\"result\"][\"Last\"] >= BID_COEFFICIENT:\r\n rate = ticker[\"result\"][\"Last\"] * BID_COEFFICIENT\r\n else:\r\n rate = ticker[\"result\"][\"Ask\"]\r\n return rate", "def get_price():\n return uniform(1.0, 350.0)", "def buy_volatile(self, stable_amount):\n if self.demo:\n buy_intend = stable_amount/self.market_price\n self.get_balance()[self.currency['volatile']] += buy_intend\n self.get_balance()[self.currency['stable']] = 0\n buy_price = stable_amount/buy_intend\n else:\n buy_intend = stable_amount/self.market_price\n\n order_start = datetime.datetime.today().timestamp()\n order_data = self.client.place_order(self.market,'buy',None,buy_intend,'market')\n time.sleep(0.2)\n history_data = self.client.get_order_history(self.market,'buy','market',order_start)\n order_status = next(order for order in history_data if order['id'] == order_data['id'])\n\n buy_price = order_status['avgFillPrice']\n\n\n print(f'Bought {buy_intend} {self.currency[\"volatile\"]} for {buy_price*buy_intend} {self.currency[\"stable\"]}')\n print(f'({buy_price} {self.market})')\n print(self.get_balance())\n print('')\n self.last_buy_price = buy_price", "def retrievePrice(self):\n pass", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve and store last month's EUR to GBP average rate.
def _get_eur_gbp_last_month(self) -> None: last_month = _last_month() data = _get_ecb_data(FREQUENCY_MONTHLY, last_month, last_month) self.eur_gbp_last_month = _get_latest_ecb_rate(data)
[ "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def get_avg(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n mean = df.mean(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n mean = round(float(mean), 4)\r\n return mean", "def average_monthly_price(self):\n currency_data_list = self.currency_data\n unique_list_of_dates = set(map(lambda x: x[\"date\"][0:7], currency_data_list))\n\n currency_date_list = []\n for i in unique_list_of_dates:\n dict_dates = {\"date\": i,\n \"price\": [x[\"price\"] for x in currency_data_list if x[\"date\"][0:7] == i]}\n currency_date_list.append(dict_dates)\n\n date_col = 'Date'\n avg_price_col = 'Average price ($)'\n print('{0:10} {1}'.format(date_col, avg_price_col))\n for i in currency_date_list:\n i[\"price\"] = round(sum(i[\"price\"])/len(i[\"price\"]), 2)\n print('{0:10} {1}'.format(i[\"date\"], i[\"price\"]))", "def averageMonthly(self):\n return self.averageDataMonthly(self.values)", "def GetSellRate(ticker):\r\n if ticker[\"result\"][\"Bid\"] / ticker[\"result\"][\"Last\"] <= ASK_COEFFICIENT:\r\n rate = ticker[\"result\"][\"Last\"] * ASK_COEFFICIENT\r\n else:\r\n rate = ticker[\"result\"][\"Bid\"]\r\n return rate", "def risk_free_rate():\n raw = pd.read_csv('data/US_3M_daily_rate.csv', \n parse_dates=[0], names=['date', 'rate'], \n index_col='date', header=0, na_values='.', \n dtype={'rate': np.float64})\n \n raw = raw.ffill() # Forward fill missing days\n\n return raw / 100 # To make it percent", "def bitcoinaverage(site):\n url = \"https://apiv2.bitcoinaverage.com/frontend/constants/exchangerates/local\"\n try:\n session = requests.Session()\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()[\"rates\"]\n data = {\"USD:\" + key: float(val[\"rate\"]) for key, val in ret.items()}\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def test_get_historical_gold_rate(self):\n rates = [153.50, 162.49, 123.86, 155.10]\n helper.gold_loop_helper(get_historical_gold_rate, TestHistoricalRates.dates_rate, rates)", "def get_average_for_month(self, month, weekend):\n\t\tif weekend:\n\t\t\treturn self.averages_weekend[month]\n\t\telse:\n\t\t\treturn self.averages_weekday[month]", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def get_average_growth_rate(monthList: list) -> list:\r\n \r\n growth_rate_list = []\r\n \r\n i = 0\r\n while i < len(monthList) - 1:\r\n growthRate = (monthList[i+1] - monthList[i]) / monthList[i]\r\n growth_rate_list.append(growthRate)\r\n i = i + 1\r\n \r\n return growth_rate_list", "def get_global_avg(self):\n return self.total / self.count", "def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)", "def _predicted_rate_avg(self):\n if len(self.history) < 2:\n return None\n work_done = self.history[-1][0]\n return float(self.history[-1][0] - self.start[0]) / \\\n (self.history[-1][1] - self.start[1])", "def quotamax_rate(self):\n return self._quotamax_rate", "def getAveragePriceForADay(self):\n averagePrice = self.price_srvr.getTodaysAveragePriceBySymbol(self.symbol)\n return averagePrice", "def market_base_rate(self):\n return self._market_base_rate", "def GetMonthlyAverages(MoDataDF):\n \n return( MonthlyAverages )", "def _predicted_rate_period(self):\n if len(self.history) < 2:\n return None\n work_done = self.history[-1][0]\n remaining_work = self.total_work - work_done\n # Drop all old history entries.\n while work_done - self.history[1][0] > remaining_work:\n self.history.pop(0)\n return float(self.history[-1][0] - self.history[0][0]) / \\\n (self.history[-1][1] - self.history[0][1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve and store the latest daily EUR to GBP average rate.
def _get_eur_gbp_last_daily(self) -> None: data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today()) self.eur_gbp_last_day = _get_latest_ecb_rate(data)
[ "def bitcoinaverage(site):\n url = \"https://apiv2.bitcoinaverage.com/frontend/constants/exchangerates/local\"\n try:\n session = requests.Session()\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()[\"rates\"]\n data = {\"USD:\" + key: float(val[\"rate\"]) for key, val in ret.items()}\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def _update_average(self):\n\n # do request\n r = requests.get(self._url + '?type=5min')\n\n # check code\n if r.status_code != 200:\n logging.error('Could not connect to McDonald weather station.')\n return\n\n # get weather\n weather = r.json()\n\n # get time\n time = Time(weather['time']).to_datetime(pytz.UTC)\n\n # got all values, now add them\n self._add_value('temp', time, weather['temp']['avg'])\n self._add_value('humid', time, weather['humid']['avg'])\n self._add_value('winddir', time, weather['winddir']['avg'])\n self._add_value('windspeed', time, weather['windspeed']['avg'])\n #self._add_value('press', time, weather['press']['avg'])\n self._add_value('rain', time, weather['rain']['max'])", "def get_avg(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n mean = df.mean(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n mean = round(float(mean), 4)\r\n return mean", "def GetSellRate(ticker):\r\n if ticker[\"result\"][\"Bid\"] / ticker[\"result\"][\"Last\"] <= ASK_COEFFICIENT:\r\n rate = ticker[\"result\"][\"Last\"] * ASK_COEFFICIENT\r\n else:\r\n rate = ticker[\"result\"][\"Bid\"]\r\n return rate", "def test_get_historical_gold_rate(self):\n rates = [153.50, 162.49, 123.86, 155.10]\n helper.gold_loop_helper(get_historical_gold_rate, TestHistoricalRates.dates_rate, rates)", "def risk_free_rate():\n raw = pd.read_csv('data/US_3M_daily_rate.csv', \n parse_dates=[0], names=['date', 'rate'], \n index_col='date', header=0, na_values='.', \n dtype={'rate': np.float64})\n \n raw = raw.ffill() # Forward fill missing days\n\n return raw / 100 # To make it percent", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def interval_avg():\n r = request.get_json()\n try:\n email = r[\"user_email\"]\n inter = r[\"heart_rate_average_since\"]\n except ValueError:\n print(\"Please provide the correct json format!\")\n try:\n isinstance(email, str)\n isinstance(inter, str)\n except TypeError:\n print(\"Please provide a valid email or proper time string!\")\n return interval_hr(email, inter)", "def get_average_purchase_price(self):\n return self.avg_entry_price", "def _get_eur_gbp_last_month(self) -> None:\n last_month = _last_month()\n data = _get_ecb_data(FREQUENCY_MONTHLY, last_month, last_month)\n\n self.eur_gbp_last_month = _get_latest_ecb_rate(data)", "def getAveragePriceForADay(self):\n averagePrice = self.price_srvr.getTodaysAveragePriceBySymbol(self.symbol)\n return averagePrice", "def interval_average():\r\n import statistics as st\r\n from tach_detect import tach_detect\r\n r = request.get_json()\r\n try:\r\n email = r[\"user_email\"]\r\n except KeyError:\r\n return jsonify(\"no email input\"), 400\r\n raise LookupError(\"no email input\")\r\n check_email = Check_For_User(email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(email) + \" was not found. Please re-enter\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n try:\r\n input_date_time = r[\"date_time\"]\r\n except KeyError:\r\n return jsonify(\"no date entered\"), 400\r\n raise LookupError(\"no date entered\")\r\n try:\r\n validate_date_time(input_date_time)\r\n except (ValueError, TypeError) as error:\r\n return jsonify(\"date entered is invalid. Please re-type.\"), 400\r\n date_time = datetime.datetime(input_date_time[0], input_date_time[1],\r\n input_date_time[2], input_date_time[3],\r\n input_date_time[4], input_date_time[5],\r\n input_date_time[6])\r\n time_list = get_all_times(email)\r\n heart_rate_list = get_all_rates(email)\r\n interval_list = find_first_date(date_time, time_list, heart_rate_list)\r\n try:\r\n interval_average_post = st.mean(interval_list)\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n except st.StatisticsError:\r\n interval_average_post = heart_rate_list[len(heart_rate_list)-1]\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n return jsonify(return_dict), 200", "def GetBuyRate(ticker):\r\n if ticker[\"result\"][\"Ask\"] / ticker[\"result\"][\"Last\"] >= BID_COEFFICIENT:\r\n rate = ticker[\"result\"][\"Last\"] * BID_COEFFICIENT\r\n else:\r\n rate = ticker[\"result\"][\"Ask\"]\r\n return rate", "def current_price_data(self):\n url = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (self.symbol, 'l1c1va2xj1b4j4dyekjm3m4rr5p5p6s7')\n values = urlopen(url).read().strip().strip('\"').split(',')\n data = {}\n data['price'] = values[0]\n data['change'] = values[1]\n data['volume'] = values[2]\n data['avg_daily_volume'] = values[3]\n data['stock_exchange'] = values[4]\n data['market_cap'] = values[5]\n data['book_value'] = values[6]\n data['ebitda'] = values[7]\n data['dividend_per_share'] = values[8]\n data['dividend_yield'] = values[9]\n data['earnings_per_share'] = values[10]\n data['52_week_high'] = values[11]\n data['52_week_low'] = values[12]\n data['50day_moving_avg'] = values[13]\n data['200day_moving_avg'] = values[14]\n data['price_earnings_ratio'] = values[15]\n data['price_earnings_growth_ratio'] = values[16]\n data['price_sales_ratio'] = values[17]\n data['price_book_ratio'] = values[18]\n data['short_ratio'] = values[19]\n return data", "def _capture_day(self, now):\n if self._verbose:\n print 'Saving daily stats averages'\n\n # get oldest day's worth of hourly data points\n a_day_ago = self._in_the_past(now, days=1)\n query = self._get_aggregate_query(a_day_ago)\n rows = self._db[stats_db.STATS_DATA_HOUR].aggregate(query)\n\n # loop through per torrent, save day data point\n for row in rows['result']:\n self._save_data_point_with_averages(row, now, 24, stats_db.STATS_DATA_DAY)\n\n stats_db.set_control_value('last_capture_day', now)", "def get_avg_price(self, **params):\n return self._get('avgPrice', data=params, version=self.PRIVATE_API_VERSION)", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def market_base_rate(self):\n return self._market_base_rate", "def averagePrice(self, onlyUnconsumed):\n\n\t\tif onlyUnconsumed:\n\t\t\treturn self.unconsumedValue / (len(self.bottles) - self.numberConsumed)\n\n\t\treturn self.totalValue / len(self.bottles)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the 15min delayed BTC market price in GBP.
def _get_btc_gbp_15min(self) -> None: self._get_eur_gbp_last_daily() self.btc_gbp_15min = self.btc_eur_15min * self.eur_gbp_last_day
[ "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def _get_btc_eur_15min(self) -> None:\n with requests.get(BITCOIN_TICKER) as response:\n response.raise_for_status()\n json_data = response.json()\n\n self.btc_eur_15min = json_data[\"EUR\"][\"15m\"]", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def current_price_calculator(self, base, build_ct, buy_compensation=False):\n if not buy_compensation:\n buy_ct = self.var.get()\n else:\n buy_ct = 1\n return (base * (1.15**(build_ct + buy_ct) - 1.15**build_ct))/.15", "def get_buy_price(self,ticker,time):\n return self.broker.get_buy_price(ticker,time)", "def BuyingPrice(self):\n return self.buying_rice", "def target_buy_price(self):\n if self.period_tick == 0:\n return random.randint(1, 10)\n elif self.period_tick % self.perseverance == 0:\n # Player runs out of patience and decides to change target price.\n (avg_price,\n max_price,\n min_price) = self.market.get_stock_price_last_period()\n\n power = self.period_tick / self.perseverance\n target_price = min(min_price + power, self.money_balance * 0.5)\n return target_price\n else:\n return None", "def GetBuyRate(ticker):\r\n if ticker[\"result\"][\"Ask\"] / ticker[\"result\"][\"Last\"] >= BID_COEFFICIENT:\r\n rate = ticker[\"result\"][\"Last\"] * BID_COEFFICIENT\r\n else:\r\n rate = ticker[\"result\"][\"Ask\"]\r\n return rate", "def _run_calculation(_last_price) -> BuyPrice:\n # Calculate the minimum price and quantity\n filters: TradeApiFilters = trade_api.get_symbol_filters(symbol)\n\n # Calculate the final buy price using final_buy_price_change_percent\n symbol_data: SymbolData = trade_api.get_symbol(symbol)\n temp_price = _last_price * symbol_data.price / 100\n final_buy_price = temp_price - (temp_price % float(filters.minPrice))\n\n # Calculate the quantity to buy\n temp_quantity = config.buy_quantity_btc / float(final_buy_price)\n quantity = round((temp_quantity - (temp_quantity % float(filters.minQuantity))), 8)\n return BuyPrice(quantity=quantity, price=final_buy_price, last_price=_last_price)", "def get_price():\n return uniform(1.0, 350.0)", "def buy_one_cent_less_than_bid_or_50(self, bid_price):\n if bid_price:\n buying_price = self.buy_fixed_quantity_less_than_bid_price(\n bid_price=bid_price,\n fixed_quantity=0.01)\n else:\n buying_price = self.buy_fixed_price(50)\n return buying_price", "def brutto(self):\n ret = 0\n if self.fixed_price:\n ret += self.fixed_price\n if self.hours and self.fee:\n ret += round(self.fee * self.hours, 2)\n return ret", "def usdToBtc(dollar, bitcoin):\n global btc\n global usd\n if usd>dollar:\n usd-=dollar\n btc+=bitcoin\n return True\n return False", "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "def current_price(self) -> float:\n res = self._calc_price()\n # _LOGGER.debug(\"Current hours price for %s is %s\", self.name, res)\n return res", "def price_relative():", "def get_base_price(self):\n base_price = random.randint(5,9)\n print(base_price)\n\n # see if the order was placed during rush hour\n now = datetime.datetime.now()\n\n dow = now.weekday() # Mon is 0, Sun is 6\n hour = now.hour\n\n if hour >= 8 and hour < 11 and dow >= 0 and dow < 5:\n base_price += 4\n\n return base_price", "def get_base_price(self):\n #8-11am, Monday-Friday\n splurge_base = random.randint(5,9)\n rush_hour = datetime.weekday(0, 4) and 8 < datetime.hour < 10\n\n if datetime.now() in rush_hour:\n return splurge_base + 4\n\n return splurge_base", "def purchase_price(self):\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return 10000 - (.10 * self.miles)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate and run the worker.
def main() -> None: worker = Worker() worker.do_work()
[ "def create_and_run_worker(self):\n\n # Run processing on QThread worker - prevents GUI lock up\n # Create processing object, map control data\n processing_hub = ProcessingHub(control=self.control)\n\n # Create worker thread, connect signals to methods in this class and start, which calls worker.run()\n self.worker = ProcessingWorker(processing_hub, parent=self)\n self.worker.signal_screening_output_to_gui.connect(self.set_screening_output_to_gui)\n self.worker.signal_error.connect(self.error)\n self.worker.start()", "def start(self):\n if not self._worker:\n # the worker might be already created in case of deserialization\n self._worker = APIWorker(self.queue)\n self._worker.start()", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def run(self, worker, evaluator=None):\n pass", "def create_worker(context=None):\n return BasicWorker(context)", "def run(self):\n self._sub.run_threaded()", "def worker(self, worker):\n \n self._worker = worker", "def start (self):\n\t\tself.log.info(\"starting workers.\")\n\t\tself.spawn(max(0,self.low-len(self.worker)))", "def __init__(self):\n # self.NBR_CLIENTS = 1\n manager = Manager()\n self.num_workers = 1\n self.connected_workers = manager.dict()\n self.created_jobs = []\n logging.info(\"Broker spawned and is ready to accept tasks.\")\n \n self.listen_process = multiprocessing.Process(target=self.main, args=(self.connected_workers,))\n self.listen_process.daemon = True\n self.listen_process.start()", "def __init__(self):\n if TrainingManager._queue is not None:\n return\n\n if DEBUG:\n print(\"Initialize training manager\")\n\n TrainingManager._queue: Queue = Queue()\n TrainingManager._process = Process(\n target=_worker, args=(TrainingManager._queue,)\n )\n TrainingManager._process.start()\n self._read_back_tasks_from_database() # Restore tasks from last run", "def __init__(self) -> None:\n\n self.work_thread = Thread(target=self.do_work)\n self.work_queue: Queue[CompilationTask] = Queue()\n self.is_running = True\n self.work_thread.start()\n self.tasks: dict[uuid.UUID, dict[str, Any]] = {}", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.setUpWorkers()", "def run(self):\n self.class_inst_obj.processor(self.msg)", "def run(self):\n\n self.make_connection()\n self.channel()\n self.declare_queue()\n self.publish_message()\n self.close_connection()", "def worker(self) -> _WorkerStub:\n if self._worker is None:\n port = os.environ.get(WORKER_PORT_ENV)\n if port is None:\n raise ConnectionError(\n f\"Cannot connect to the worker since {WORKER_PORT_ENV} is not set. \"\n \"Are you running inside a pipeline?\"\n )\n # Note: This channel does not go through the metadata interceptor.\n channel = _create_channel(\n address=f\"localhost:{port}\", root_certs=None, options=GRPC_CHANNEL_OPTIONS\n )\n self._worker = _WorkerStub(channel)\n return self._worker", "def run(self):\n self.channel.flushall()\n\n port = 1207\n clients_conn = f'tcp://*:{port}'\n self.clients_sock.bind(clients_conn)\n logging.info(f'Channel ready in {clients_conn}')\n logging.info(f'Public Endpoints: {self.endpoints}')\n\n workers_conn = 'inproc://workers'\n self.workers_sock.bind(workers_conn)\n\n # launch pool of working threads\n for i in range(self.num_workers):\n thread = threading.Thread(target=self.worker, name=f'worker{i}')\n # self.workers_pool.append(thread)\n thread.start()\n logging.info(f'Started worker{i}')\n\n zmq.device(zmq.QUEUE, self.clients_sock, self.workers_sock)", "def worker(self, worker):\n\n self._worker = worker", "def run(self):\r\n\r\n if not self.running:\r\n self.running = True\r\n\r\n self.thread = Thread(target = self.runner)\r\n self.thread.start()", "def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the cosmology that is being used
def get_cosmology(cosmology=conf.cosmology): if cosmology.lower() not in available_cosmologies: raise ValueError( "Unrecognised cosmology {}. Available cosmologies are {}".format( cosmology, ", ".join(available_cosmologies) ) ) elif cosmology.lower() in _astropy_cosmologies: ind = [ num for num, name in enumerate(_astropy_cosmologies) if name == cosmology.lower() ][0] return getattr(cosmo, list(parameters.available)[ind]) elif cosmology.lower() == "planck15_lal": return Planck15_lal_cosmology() elif "_with_riess2019_h0" in cosmology.lower(): base_cosmology = cosmology.lower().split("_with_riess2019_h0")[0] return Riess2019_H0_cosmology(base_cosmology)
[ "def get_cosmology(self):\n opt_cosmology = 'cosmology'\n return self.config_parser.get(self.section_data_processing, opt_cosmology)", "def get_cosmology(self):\n return self.kernel.get_cosmology()", "def COSMO_DEFAULT():\n return ccl.Cosmology(Omega_c=0.26066676,\n Omega_b=0.048974682,\n h=0.6766,\n sigma8=0.8102,\n n_s=0.9665)", "def cosmology(self):\n try:\n self._cosmology.distmod(z=1)\n return self._cosmology\n except TypeError:\n raise TypeError('The cosmology must be given as '\n '`astropy.cosmology.core.COSMOLOGY`. It must be '\n 'possible to compute the distance modulus at '\n 'redshift 1 by calling `cosmology.distmod(z=1)`.')", "def get_cosmology_from_string(arg):\n if arg == \"no_default\":\n cosmo = None\n else:\n try:\n cosmo = getattr(sys.modules[__name__], arg)\n except AttributeError:\n s = \"Unknown cosmology '{}'. Valid cosmologies:\\n{}\".format(\n arg, parameters.available\n )\n raise ValueError(s)\n return cosmo", "def print_cosmology(params):\n print(\"Hubble constant in default cosmology, H0: \",params['cosmo'].H0,\" [km/s/Mpc]\")\n #print(\"Hubble constant in current epoch, H0: \",params['cosmo'].current_H0,\" [km/s/Mpc]\")", "def cosseries():\r\n def _cos():\r\n for term in integ(integ(-COS), Fraction(1, 1)):\r\n yield term\r\n COS = PowerSeries(_cos)\r\n return COS", "def set_cosmology( self, cosmo ):\n\n\t\t# see if the cosmology is changing - if so, we need to dump any stored models\n\t\tif self.cosmo is None:\n\t\t\tself.clear_cache()\n\t\telif self.cosmo.Om != cosmo.Om or self.cosmo.Ol != cosmo.Ol or self.cosmo.h != cosmo.h or self.cosmo.w != cosmo.w:\n\t\t\tself.clear_cache()\n\t\t\n\t\t# store the new cosmology\n\t\tself.cosmo = cosmo", "def get_cosmology_from_name(cosmology):\n\n # This list should be updated when astropy releases the Planck18 cosmology\n available_cosmologies = {\n \"WMAP5\": acosmo.WMAP5,\n \"WMAP7\": acosmo.WMAP7,\n \"WMAP9\": acosmo.WMAP9,\n \"Planck13\": acosmo.Planck13,\n \"Planck15\": acosmo.Planck15,\n }\n\n # If the user uses a string for the cosmology look it up in the dict.\n # If they specify a cosmology class, use that instead.\n if isinstance(cosmology, str):\n if cosmology in available_cosmologies.keys():\n cosmo = available_cosmologies[cosmology]\n else:\n msg = (f\"\"\"The cosmology '{cosmology}' is not in the list of\n available cosmologies with string keywords. The list\n if available cosmologies accessable via keyword are:\n {available_cosmologies.keys()}\"\"\")\n raise ValueError(msg)\n\n elif isinstance(cosmology, acosmo.core.FLRW):\n cosmo = cosmology\n\n return cosmo", "def get_coherence_character(self):\n \n return dict(electronic=0.0, vibrational=0.0, mixed=0.0)", "def covariates(self):\n return None", "def general_council(self):\n return self._general_council", "def crds_observatory(self):\n # Eventually ModelContainer will also be used for Roman, but this\n # will work for now:\n return \"jwst\"", "def set_cosmology(self, cosmo_dict):\n self.kernel.set_cosmology(cosmo_dict)\n self.D_z = self.kernel.cosmo.growth_factor(self.kernel.z_bar)\n self.halo.set_cosmology(cosmo_dict, self.kernel.z_bar)", "def _initialize_cosmology(cosmo_file):\n cosmol_params = MyConfigObj(cosmo_file, file_error=True)\n cosmo_func_name, cosmol_kwargs = _cosmology_setup(\n cosmol_params[\"cosmological_parameters\"])\n cosmo = cosmo_mapper[cosmo_func_name](**cosmol_kwargs)\n return cosmo", "def concentration1(self):\n return self._kumaraswamy_cdf.concentration1", "def concentration0(self):\n return self._kumaraswamy_cdf.concentration0", "def Planck15_lal_cosmology():\n return cosmo.LambdaCDM(H0=67.90, Om0=0.3065, Ode0=0.6935)", "def Corr_occ(self):\n return self.Corr_mos_occ" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the Planck15 cosmology coded up in lalsuite
def Planck15_lal_cosmology(): return cosmo.LambdaCDM(H0=67.90, Om0=0.3065, Ode0=0.6935)
[ "def get_cosmology(cosmology=conf.cosmology):\n if cosmology.lower() not in available_cosmologies:\n raise ValueError(\n \"Unrecognised cosmology {}. Available cosmologies are {}\".format(\n cosmology, \", \".join(available_cosmologies)\n )\n )\n elif cosmology.lower() in _astropy_cosmologies:\n ind = [\n num for num, name in enumerate(_astropy_cosmologies) if\n name == cosmology.lower()\n ][0]\n return getattr(cosmo, list(parameters.available)[ind])\n elif cosmology.lower() == \"planck15_lal\":\n return Planck15_lal_cosmology()\n elif \"_with_riess2019_h0\" in cosmology.lower():\n base_cosmology = cosmology.lower().split(\"_with_riess2019_h0\")[0]\n return Riess2019_H0_cosmology(base_cosmology)", "def printPolyCoeffs(lam) :\n ell = len(lam)\n useFormat = \"2.6e\"\n count = 0\n def printLine(s, count) :\n if lam[count] < 0 :\n s = s + 3 * \" \"\n else :\n s = s + 4 * \" \"\n s = s + \"{0:\" + useFormat + \"}\"\n print(s . format(lam[count]))\n count = count + 1\n return count\n if ell >= 1 :\n count = printLine(\"x0y0\", count)\n if ell >= 3 :\n count = printLine(\"x1y0\", count)\n count = printLine(\"x0y1\", count)\n if ell >= 6 :\n count = printLine(\"x2y0\", count)\n count = printLine(\"x1y1\", count)\n count = printLine(\"x0y2\", count)\n if ell >= 10 :\n count = printLine(\"x3y0\", count)\n count = printLine(\"x2y1\", count)\n count = printLine(\"x1y2\", count)\n count = printLine(\"x0y3\", count)\n if ell >= 15 :\n count = printLine(\"x4y0\", count)\n count = printLine(\"x3y1\", count)\n count = printLine(\"x2y2\", count)\n count = printLine(\"x1y3\", count)\n count = printLine(\"x0y4\", count)\n if ell >= 21 :\n count = printLine(\"x5y0\", count)\n count = printLine(\"x4y1\", count)\n count = printLine(\"x3y2\", count)\n count = printLine(\"x2y3\", count)\n count = printLine(\"x1y4\", count)\n count = printLine(\"x0y5\", count)\n if ell >= 28 :\n count = printLine(\"x6y0\", count)\n count = printLine(\"x5y1\", count)\n count = printLine(\"x4y2\", count)\n count = printLine(\"x3y3\", count)\n count = printLine(\"x2y4\", count)\n count = printLine(\"x1y5\", count)\n count = printLine(\"x0y6\", count)\n if ell >= 36 :\n count = printLine(\"x7y0\", count)\n count = printLine(\"x6y1\", count)\n count = printLine(\"x5y2\", count)\n count = printLine(\"x4y3\", count)\n count = printLine(\"x3y4\", count)\n count = printLine(\"x2y5\", count)\n count = printLine(\"x1y6\", count)\n count = printLine(\"x0y7\", count)\n if (ell > 36) or (ell < 1) :\n raise ValueError(\"Polynomial degree less than or equal to 7, please.\")", "def bmad_linac_phasing_lines(epics):\n lines = [\n '! Linac overall phasing',\n 'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.', \n 'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')),\n 'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499'))\n ]\n return lines", "def print_cosmology(params):\n print(\"Hubble constant in default cosmology, H0: \",params['cosmo'].H0,\" [km/s/Mpc]\")\n #print(\"Hubble constant in current epoch, H0: \",params['cosmo'].current_H0,\" [km/s/Mpc]\")", "def main():\n # peptide = 'NQEL'\n # spectrum = [0, 113, 114, 128, 129, 227, 242, 242, 257, 355, 356, 370, 371, 484]\n\n peptide = 'SDEGDLLHYGWR'\n\n cyclospectrum = theoretical_spectrum(peptide)\n # print(check_answer(cyclospectrum, spectrum))\n print(*cyclospectrum)", "def zen():\r\n koans = []\r\n for file in files.find({'code_name': 'koan'}):\r\n koans.append(file['payload'][0])\r\n H(); print('\\n'+random.choice(koans))\r\n controlCentre()", "def getYear1Catalog():\n data_file = os.path.join('..', 'data', 'phat_pcassign_v1.fits')\n data = pyfits.getdata(data_file)\n \n output = open('year1catalog.csv', 'w')\n for row in data:\n fieldname = row[0]\n cluster = row[4]\n x = row[5]\n y = row[6]\n pixradius = row[7]\n ra = row[8]\n dec = row[9]\n output.write(\"%s, %s, %s, %s, %s, %s, %s\\n\" % (fieldname, cluster, x, y, pixradius, ra, dec))\n \n output.close()", "def generate_l1ca_codes(self, prn):\n output_taps = self.l1_code_phase_assignments.loc[prn, 'CA_Phase_Select']\n g1 = self.generate_mls(10, self.g1_feedback_taps, [10])\n g2 = self.generate_mls(10, self.g2_feedback_taps, output_taps)\n ca_code = []\n for index, bit in enumerate(g1):\n ca_code.append(int((bit + g2[index]) % 2))\n return ca_code", "def model(self):\n return (\n \"Sky Control\"\n if self.__panel and self.__panel.data[\"pant\"] == 1\n else \"Smart Hub\"\n )", "def thesis_degree(self):\n if 'v51' in self.data['article']:\n return self.data['article']['v51'][0]['_']", "def crds_observatory(self):\n # Eventually ModelContainer will also be used for Roman, but this\n # will work for now:\n return \"jwst\"", "def get_cosmology(self):\n opt_cosmology = 'cosmology'\n return self.config_parser.get(self.section_data_processing, opt_cosmology)", "def get_lcls_specs():\n # runs in ascending order of incident photon energy:\n runs_to_get = [\"24\", \"23\", \"22\", \"21\", \"25\", \"27\", \"28\", \"32\", \"29\", \"30\"]\n spectra = []\n incident_photon_energies = []\n for run in runs_to_get:\n intensity = threshold_data[run][100:]\n spec = {\n \"x\": (np.arange(len(intensity)) + 100) * PIXELS_TO_EV\n + ANALYZER_KINETIC_ENERGY[run],\n \"y\": intensity,\n }\n spectra.append(spec)\n incident_photon_energies.append(INCIDENT_PHOTON_ENERGY[run])\n energy_spacing = np.mean(np.diff(spectra[0][\"x\"]))\n num_points = len(spectra[0][\"x\"]) * 2 - 1\n psf = get_extended_au4f_spectrum(energy_spacing, num_points)\n # psf = get_recorded_au4f_spectrum()\n to_return = {\n \"psf\": psf,\n \"spectra\": spectra,\n \"incident_photon_energy\": incident_photon_energies,\n }\n return to_return", "def load_cnl():\n cnl = {1: 38, 4: 14, 9: 31, 16: 6, 21: 42, 28: 84, 36: 44, 47: 26, 49: 11, 51: 67,\n 56: 53, 62: 19, 64: 60, 71: 91, 80: 100, 87: 24, 93: 73, 95: 75, 98: 78}\n\n return cnl", "def test_get_tone_from_IBM():\n comments = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n tone_info_dictionary = get_tone_from_IBM(comments[0])\n\n tones = get_columns_from_IBM_tone(tone_info_dictionary)\n print(tones)", "def getcolorcodeALA15(ramapath, N, ssize=5):\n\n from analyse_ala_15 import AngleCategorizer\n\n nResidues = 15\n #angles = np.loadtxt('rama_dataset_ala_15.xvg', skiprows=32, usecols=range(0, 2), delimiter=' ')\n angles = np.loadtxt(os.path.join(ramapath, 'rama_dataset_ala_15_1500.xvg'), skiprows=32, usecols=range(0, 2), delimiter=' ')\n nSamples = angles.shape[0]/15\n angles.resize(nSamples, nResidues, 2)\n angCat = AngleCategorizer(angles)\n angCat.categorize()\n angCat.countConfigurations()\n colInd = angCat.getColorMatrix()\n alphaInd = angCat.getAlphaVals()\n\n marker = list()\n patchlist = list()\n\n marker.append('o')\n marker.append('o')\n marker.append('o')\n\n import matplotlib.patches as mpatches\n patchlist.append(mpatches.Patch(color='black', label=r'$\\alpha$'))\n patchlist.append(mpatches.Patch(color='blue', label=r'$\\beta$-1'))\n patchlist.append(mpatches.Patch(color='red', label=r'$\\beta$-2'))\n\n alpha = plt.scatter(0, 1, c='k', marker=marker[0], s=ssize, label=r'$\\alpha$')\n beta1 = plt.scatter(0, 1, c='b', marker=marker[1], s=ssize, label=r'$\\beta\\textnormal{-}1$')\n beta2 = plt.scatter(0, 1, c='r', marker=marker[2], s=ssize, label=r'$\\beta\\textnormal{-}2$')\n plt.close()\n\n patchlist = [alpha, beta1, beta2]\n\n return colInd, marker, patchlist, alphaInd", "def __cologne_phonetics(self, word: str) -> str:\n\n \"\"\"\n The following table lists the rules of Cologne phonetics.\n | Letter | Context | Code |\n | :---------------------------------------------------------: | :---------------------------------------------------: | :--: |\n | A, E, I, J, O, U, Y | | 0 |\n | H | | - |\n | B | | 1 |\n | P | not before H | |\n | D, T | not before C, S, Z | 2 |\n | F, V, W | | 3 |\n | P | before H | |\n | G, K, Q | | 4 |\n | C | in the initial sound before A, H, K, L, O, Q, R, U, X | |\n | before A, H, K, O, Q, U, X except after S, Z | | |\n | X | not after C, K, Q | 48 |\n | L | | 5 |\n | M, N | | 6 |\n | R | | 7 |\n | S, Z | | 8 |\n | C | after S, Z | |\n | in initial position except before A, H, K, L, O, Q, R, U, X | | |\n | not before A, H, K, O, Q, U, X | | |\n | D, T | before C, S, Z | |\n | X | after C, K, Q | |\n \"\"\"\n\n REGEX_RULES = [\n (r'ä', 'a'),\n (r'ö', 'o'),\n (r'ü', 'u'),\n (r'ß', '8'),\n (r'[^a-z]', ''),\n (r'[dt](?![csz])', '2'),\n (r'[dt](?=[csz])', '8'),\n (r'[ckq]x', '88'),\n (r'[sz]c', '88'),\n (r'^c(?=[ahkloqrux])', '4'),\n (r'^c', '8'),\n (r'(?<![sz])c', '4'),\n (r'x', '48'),\n (r'p(?!h)', '1'),\n (r'p(?=h)', '3'),\n (r'h', ''),\n (r'[aeijouy]', '0'),\n (r'b', '1'),\n (r'[fvw]', '3'),\n (r'[gkq]', '4'),\n (r'l', '5'),\n (r'[mn]', '6'),\n (r'r', '7'),\n (r'[csz]', '8'),\n (r'([^\\w\\s])|(.)(?=\\2)', ''),\n (r'\\B0', '')\n ]\n\n for sub in REGEX_RULES:\n word = re.sub(sub[0], sub[1], word, flags=re.IGNORECASE)\n\n return word", "def get_cosmology_from_name(cosmology):\n\n # This list should be updated when astropy releases the Planck18 cosmology\n available_cosmologies = {\n \"WMAP5\": acosmo.WMAP5,\n \"WMAP7\": acosmo.WMAP7,\n \"WMAP9\": acosmo.WMAP9,\n \"Planck13\": acosmo.Planck13,\n \"Planck15\": acosmo.Planck15,\n }\n\n # If the user uses a string for the cosmology look it up in the dict.\n # If they specify a cosmology class, use that instead.\n if isinstance(cosmology, str):\n if cosmology in available_cosmologies.keys():\n cosmo = available_cosmologies[cosmology]\n else:\n msg = (f\"\"\"The cosmology '{cosmology}' is not in the list of\n available cosmologies with string keywords. The list\n if available cosmologies accessable via keyword are:\n {available_cosmologies.keys()}\"\"\")\n raise ValueError(msg)\n\n elif isinstance(cosmology, acosmo.core.FLRW):\n cosmo = cosmology\n\n return cosmo", "def _curtis_label(cls, stage):\r\n\t\tletter = ('C' if stage <= 2 else 'R')\r\n\t\treturn (letter + str(stage) if letter == 'C' else letter + str(stage - 2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the base cosmology but with the Riess2019 H0 value. For details
def Riess2019_H0_cosmology(base_cosmology): _base_cosmology = get_cosmology(base_cosmology) return cosmo.LambdaCDM( H0=74.03, Om0=_base_cosmology.Om0, Ode0=_base_cosmology.Ode0 )
[ "def H0_def(h):\n return H0_over_h*h", "def getChemicalZero(self):\n return self.solver.getChemicalZero()", "def sound_horizon_EH(self):\n om_m = self.omega_cb\n om_b = self.omega_b\n om_n = np.sum(self.omega_nu)\n h = self.h \n if self.M_nu_tot == 0.: rs = 44.5*np.log(9.83/om_m)/np.sqrt(1+10*om_b**0.75)*h\n else: rs = 55.154*np.exp(-72.3*(om_n+0.0006)**2.)/(om_m**0.25351*om_b**0.12807)*h\n return rs", "def print_cosmology(params):\n print(\"Hubble constant in default cosmology, H0: \",params['cosmo'].H0,\" [km/s/Mpc]\")\n #print(\"Hubble constant in current epoch, H0: \",params['cosmo'].current_H0,\" [km/s/Mpc]\")", "def init_physical(\n ombh2=0.022161, omch2=0.11889, H0=67.77, omkh2=0.0, t0=2.726, nnu=3.046\n ):\n h = H0 / 100.0\n\n c = Cosmology()\n\n c.omega_b = ombh2 / h ** 2\n c.omega_c = omch2 / h ** 2\n c.H0 = H0\n\n rhoc = 3.0 * c.H() ** 2 * c_sl ** 2 / (8.0 * math.pi * G_n)\n rhorad = a_rad * t0 ** 4\n c.omega_g = rhorad / rhoc\n\n rhonu = nnu * rhorad * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)\n c.omega_n = rhonu / rhoc\n\n c.omega_l = 1.0 - (omkh2 + ombh2 + omch2) / h ** 2 - (c.omega_g + c.omega_n)\n\n return c", "def concentration0(self):\n return self._kumaraswamy_cdf.concentration0", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def zero_h():\n a = dicotomie(h,-3,2)\n return a", "def Hc(self):\n return (self.mol * [i.Hc or 0 for i in self._species._compounds]).sum()", "def hc2hhsi(hc):\n\n import numpy as np\n\n ####################################################################################################################\n # Calculate the components c\n rows = hc.shape[0]\n cols = hc.shape[1]\n dims = hc.shape[2]\n\n c = np.zeros((rows, cols, dims-1))\n for i in range(dims - 1):\n nonZeroEle = dims - i # nonZeroEle is the number of non-zero elements of the base unit vector u1, u2, ...\n c[:, :, i] = (nonZeroEle - 1) ** 0.5 / nonZeroEle ** 0.5 * hc[:, :, i] \\\n - 1 / ((nonZeroEle - 1) ** 0.5 * nonZeroEle ** 0.5) * np.sum(hc[:, :, i+1:dims], axis=2)\n ####################################################################################################################\n\n # Normalise the norms of c to 1 to obtain hyper-hue hh.\n c_norm = np.sum(c ** 2, axis=2) ** 0.5\n c_norm = c_norm + (c_norm == 0) * 1e-10\n c_norm = np.tile(c_norm, (dims - 1, 1, 1))\n c_norm = np.moveaxis(c_norm, 0, -1)\n hh = c / c_norm # add 1e-10 to avoid zero denominators\n\n # Saturation\n s = hc.max(2) - hc.min(2)\n # s = np.amax(hc, axis=2) - np.amin(hc, axis=2) # The same as above\n\n # Intensity\n i = 1/dims * np.sum(hc, 2)\n\n return hh, s, i", "def COSMO_DEFAULT():\n return ccl.Cosmology(Omega_c=0.26066676,\n Omega_b=0.048974682,\n h=0.6766,\n sigma8=0.8102,\n n_s=0.9665)", "def CHb_from_RHbHa(RHbHa, balmer0=2.874):\n return np.log10(balmer0*RHbHa) / flambda(6563)", "def Hubble_convert(H_0):\r\n result = H_0*1000.0*3.1536*10**13/(3.09*10**16)/10**6 #This formula convert the Hubble parameter from\r\n #km/s/Mpc to Myr^-1 in order to match the unit convention in this program\r\n return result", "def h2o_from_rh_and_temp(RH, TEMP):\n TC = TEMP - 273.15\n frh = RH / 100.\n svp_millibar = 6.11 * 10**((7.5 * TC)/(TC+237.3))\n svp_pa = svp_millibar * 100\n vp_pa = svp_pa * frh\n molecule_per_cubic_m = vp_pa * Avogadro / R / TEMP\n molecule_per_cubic_cm = molecule_per_cubic_m * centi**3\n #print RH, TEMP, molecule_per_cubic_cm\n return molecule_per_cubic_cm", "def get_hcore1(mol, atom, coord):\n\n mf = scf.RHF(mol)\n g = grad.rhf.Gradients(mf)\n\n hcore1 = g.hcore_generator(mol)(atom)[coord]\n\n # omega = np.identity(2)\n # hcore1 = np.kron(omega, hcore1)\n\n return hcore1", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')", "def beam_irradiance_horizontal_clear(B0c, h0):\n Bhc = B0c * np.sin(h0 * pi / 180)\n Bhc[h0 < 0] = 0\n\n return Bhc", "def calculate_rh(self):\n # Check for existence of relative humidity and mixing ratio\n if self.data.get('Relative_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n # Convert mixing ratio to relative humidity\n sat_vapor = 6.11 * (10.0**((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] -\n sat_vapor))\n\n self.data['Relative_Humidity'] = ((self.data['Mixing_Ratio'] /\n sat_w) * 100.0)", "def _calculate_strehl(self):\n\n self.strehl = np.exp(-1*((2*np.pi/self.science_wavelength)*self.high_order_wfe)**2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the supported components e.g. set(['mmic_autodock_vina',...]). Returns Set[str]
def tactic_comps(cls) -> Set[str]: return set(["mmic_autodock_vina"])
[ "def supported_components() -> Set[Type[\"Component\"]]:\n return set()", "def get_supported_components(self):\n props = [cdav.SupportedCalendarComponentSet()]\n response = self.get_properties(props, parse_response_xml=False)\n response_list = response.find_objects_and_props()\n prop = response_list[unquote(self.url.path)][\n cdav.SupportedCalendarComponentSet().tag\n ]\n return [supported.get(\"name\") for supported in prop]", "def supported_components() -> List[\"Type[Component]\"]:\n return [PowerOutput, Piezo, Button, BatterySensor, LED]", "def supported_constructs(self) -> Set[Construct]:\n config: Dict[str, bool] = self.options.get(\"constructs\", {})\n result = set()\n for construct, supported in config.items():\n if supported:\n result.add(Construct[construct.upper()])\n return result", "def tactic_comps(cls) -> Set[str]:\n ...", "def registered_components():\n return list(_components.keys())", "def getComponentNames(self):\n return set(c.getName() for c in self.iterComponents())", "def get_supported_models(self):\n return list(algo_name.value for algo_name in ObjDetModel)", "def get_supported_engines(self):", "def vendor_list():\n return ['nxos', 'eos', 'cumulus']", "def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)", "def supported_kinds():\n return list(UNITS.keys())", "def components(self):\n all_components = set()\n for v in self._comp_source().values():\n if isinstance(v, ModelComponent):\n all_components.add(v)\n elif isinstance(v, _ComputeModelComponents):\n all_components |= v.components()\n return all_components", "def list_uses(self):\n return list(set(self._prop_typology['USE'].values))", "def _available_algorithms(**_: str) -> Set[str]:\n avail = set()\n pass2 = set()\n for algo in hashlib.algorithms_available:\n lalgo = algo.lower()\n if \"with\" in lalgo:\n continue # skip apparently redundant ones\n if lalgo != algo:\n pass2.add(algo)\n else:\n avail.add(lalgo)\n for algo in pass2:\n if algo.lower() not in avail:\n avail.add(algo)\n return avail", "def get_supported_browsers_suggestions():\n supported_browsers = [\n 'chrome',\n 'chrome-remote',\n 'chrome-headless',\n 'chrome-remote-headless',\n 'firefox',\n 'firefox-remote',\n 'ie',\n 'ie-remote'\n ]\n return supported_browsers", "def _get_components(include_components=None):\n _components = []\n\n if is_installed(DATABASE_SERVICE):\n _components += [components.PostgresqlServer()]\n\n if is_installed(QUEUE_SERVICE):\n _components += [components.RabbitMQ()]\n\n if is_installed(MANAGER_SERVICE):\n _components += [\n components.Manager(),\n components.PostgresqlClient(),\n components.RestService(),\n components.ManagerIpSetter(),\n components.Nginx(),\n components.Cli(),\n components.AmqpPostgres(),\n components.MgmtWorker(),\n components.Stage(),\n ]\n if (\n is_premium_installed()\n and not config[COMPOSER]['skip_installation']\n ):\n _components += [\n components.Composer(),\n ]\n _components += [\n components.UsageCollector(),\n ]\n if not config[SANITY]['skip_sanity']:\n _components += [components.Sanity()]\n\n if is_installed(MONITORING_SERVICE):\n _components += [components.Prometheus()]\n if not is_installed(MANAGER_SERVICE):\n _components += [components.Nginx()]\n\n if include_components:\n _components = _filter_components(_components, include_components)\n return _components", "def required_components(cls) -> List[Type[Component]]:\n\n return []", "def detect_supported_caps():\n result = []\n # generate list of supported capabilities\n\n # Intel RDT L3 CAT\n if common.PQOS_API.is_l3_cat_supported():\n result.append(common.CAT_L3_CAP)\n\n # Intel RDT L2 CAT\n if common.PQOS_API.is_l2_cat_supported():\n result.append(common.CAT_L2_CAP)\n\n # Intel RDT MBA\n if common.PQOS_API.is_mba_supported():\n result.append(common.MBA_CAP)\n\n if sstbf.is_sstbf_enabled():\n result.append(common.SSTBF_CAP)\n\n if power.is_sstcp_enabled():\n result.append(common.POWER_CAP)\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the specified mojofile, and return its model id.
def load_model(self, mojofile: str) -> str: return self._request("GET /loadmojo", params={"file": mojofile})
[ "def load_model(self, filename):\r\n pass", "def load_model(self, filename):\n pass", "def load_model(filename):\n return Model.load_savefile(filename)", "def load(path_to_model):\n pass", "def load_model(filename):\n model = joblib.load(filename)\n\n return model", "def load(filepath='last_model.p'):\n try: \n import cPickle as pickle\n except ImportError:\n import pickle\n\n with open(filepath, 'rb') as f:\n var = pickle.load(f)\n return var", "def load_model(filepath):\n\n return joblib.load(filepath)", "def load_model(work_dir, config_id):\n config_id = '_'.join([str(i) for i in config_id])\n model_file_name = work_dir + '/models/'\n model_file_name += 'hp_' + config_id\n g = glob(model_file_name + \"*.pkl\")\n if len(g) == 0:\n return None, 0\n else:\n # Assume ony single model is available.\n # Return the model and the budget\n model = torch.load(g[0])\n\n budget = g[0].split('_')[-1]\n budget = budget.rstrip('.pkl')\n budget = float(budget)\n return model, budget", "def import_model(self):\n gen_path = os.path.join(smtk.testing.DATA_DIR,\n 'model/3d/genesis/filling1.gen')\n import_op = smtk.session.vtk.Import.create()\n import_op.parameters().findFile('filename').setValue(gen_path)\n # Demonstrate/test use of safeOperate() instead of operate():\n resource = None\n\n def handler(op, result):\n nonlocal resource\n resource = smtk.model.Resource.CastTo(\n result.find('resource').value())\n outcome = import_op.safeOperate(handler)\n print('Outcome = ', outcome)\n self.assertEqual(outcome, smtk.operation.Operation.Outcome.SUCCEEDED,\n 'Failed to import model file {}'.format(gen_path))\n print('Resource is ', resource)\n return resource", "def load_model(mfile):\n mname = mfile.split('.py')[0].replace('/', '.')\n try:\n mod = __import__(mname, fromlist=['model'])\n print(\"loaded {} {} {}\".format(mfile, mod.model, mod.model.__doc__))\n return mod.model\n except ImportError:\n traceback.print_exc()\n msg = \"Please provide python module which implements model function.\\n\"\n msg += \"The input file name should be visible through PYTHONPATH\"\n print(msg)\n raise", "def load_model(task_id):\n # get model file name\n task_chain_id = task_id.split('-')[0]\n\n root_dir = os.path.split(os.path.realpath(__file__))[0]\n model_path = os.path.join(root_dir, '..', 'common', 'model', task_chain_id)\n model_file_name = os.path.join(model_path, task_id + '.model')\n if not os.path.exists(model_file_name):\n raise Exception(\"Algorithm load_model not find model {}\".format(model_file_name))\n # load mode from disk\n model = load(model_file_name)\n\n return model", "def load_model(file_path):\n if not os.path.isabs(file_path):\n file_path = os.path.abspath(file_path)\n if os.path.exists(file_path):\n with open(file_path, 'rb') as f:\n try:\n return pickle.load(f)\n except Exception as e:\n print(\"Couldn't load Gnarl model from file %s\" % file_path)", "def load_model(language_id, model_type):\n\n # getting the language code from it's id\n language_code = get_language_code(language_id)\n\n # getting the model name from it's type\n model_name = get_model_name(model_type)\n\n # building the model's full path\n model_full_path = \"%s/%s/%s.txt\" % (models_base_path, language_code, model_name)\n\n # returning the model loaded directly from file\n return load_model_from_file(model_full_path)", "def import_model(path):\n try:\n return torch.jit.load(path)\n except RuntimeError:\n try:\n return torch.load(path) # loads model as a nn.Module\n except Exception as e:\n raise IOError(\"Could not load file. Please save as torch.jit.ScriptModule instead.\") from e", "def load_model_by_name(model, global_step, device=None, path=\"/scratch/users/zucks626/ADNI/IPMI/checkpoints/\"):\r\n # path = \"/scratch/users/zucks626/ADNI/ae_cls/checkpoints/\"\r\n file_path = path + model.name + \"/\" + 'model-{:05d}.pt'.format(global_step)\r\n state = torch.load(file_path, map_location=device)\r\n model.load_state_dict(state)\r\n print(\"Loaded from {}\".format(file_path))", "def import_model(file):\n file = os.path.expanduser(file)\n obj = IsolationForest()\n metadata = obj._cpp_obj.deserialize_obj(file)\n metadata = json.loads(metadata)\n obj._take_metadata(metadata)\n return obj", "def load_model(filename):\n checkpoint = torch.load(filename)\n model = QNetwork(checkpoint['input_size'], checkpoint['output_size'], checkpoint['hidden_layers'])\n model.load_state_dict(checkpoint['state_dict'])\n return model", "def _loadModel(self, filename):\n with open(filename, 'r') as f:\n lda = pickle.load(f)\n return lda", "def _load(path):\n status = KerasOpenVINOModel._load_status(path)\n if status.get('xml_path', None):\n xml_path = Path(status['xml_path'])\n invalidInputError(xml_path.suffix == '.xml',\n \"Path of openvino model must be with '.xml' suffix.\")\n else:\n invalidInputError(False, \"nano_model_meta.yml must specify 'xml_path' for loading.\")\n xml_path = Path(path) / status['xml_path']\n return KerasOpenVINOModel(xml_path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shutdown / kill the server. Sometimes the ``POST /shutdown`` request may fail. In any case we attempt to terminate the process with the SIGKILL signal if it still seems to be running.
def shutdown(self): try: self._request("POST /shutdown") time.sleep(0.300) except requests.exceptions.ConnectionError: pass if self._process and self._process.poll() is None: self._process.kill() if self._session: self._session.close()
[ "def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)", "def shutdown(self) -> None:\n prefix = f\"In {ThreadedServer.__name__}.{ThreadedServer.shutdown.__name__}\"\n\n print(f\"{prefix}: Instructing the server to shut down...\", file=self.stdout)\n with self._server_exception_lock:\n if self._server_exception is not None:\n raise self._server_exception\n\n print(f\"{prefix}: Waiting for server to shut down...\", file=self.stdout)\n self._httpd.shutdown()", "def shutdown(self):\n self.logger.info(\"Received graceful shutdown request\")\n self.stop()", "def _HandleShutdown(self):\n self.send_response(httplib.OK)\n self.send_header('Content-Type', 'text/plain')\n self.end_headers()\n self.wfile.write('API Server Quitting')\n self.server.shutdown()", "def shutdown(self):\n self.broadcast(self.server_socket, '[server shutdown]', 'server')\n self.selector.unregister(self.server_socket)\n self.server_socket.close()", "def shutdown(self):\n self.req_shutdown = True", "def shutdown():\n LIB.tfg_pitc_Terminate()", "async def kill_server(self):\n if await self._kill():\n await self.send('Server killed')", "def kill_server(self):\n self.run_cmd('kill-server')", "def shutdown(self):\n # shutdown all known sessions\n for session in self.sessions.values():\n session.shutdown()\n\n # if we are a daemon remove pid file\n if self.config[\"daemonize\"]:\n pid_file = self.config[\"pidfile\"]\n try:\n os.unlink(pid_file)\n except OSError:\n logger.exception(\"error daemon pid file: %s\", pid_file)\n\n # remove server from server list\n CoreServer.remove_server(self)", "def shutdown_server(self):\n try:\n ans = self.xmlproxy.shutdown()\n except socket_error as err:\n self.class_logger.info(\"xmlrpc shutdown complete. (DEBUG: {0})\".format(err))\n except XmlrpcProtocolError as err:\n self.class_logger.info(\"xmlrpc shutdown complete. (DEBUG: {0})\".format(err))\n except Exception as err:\n self.class_logger.info(\"xmlrpc shutdown expected error: {0} - {1}\".format(type(err), err))\n else:\n self.class_logger.info(\"xmlrpc shutdown query answer: %s\" % (ans, ))\n # except socket.error, err:\n # if err[0] == 111:\n # print \"!\"*100\n # print \"ERR '{0}' handled\".format(err)\n # else:\n # raise", "def shutdown(self):\n os.remove('/tmp/mimic_daemon')\n for address, p in self._connections.iteritems():\n if not p.returncode:\n p.terminate()\n self.daemon.shutdown()", "def shutdown(event):\n elements.REMOTE_SERVER.shutdown()\n disconnect(None)", "def shutdown(self):\r\n self.initiate_shutdown()\r\n self.wait_for_shutdown()", "def shutdown():\n\n # Earlier versions of traffic_ctl do not support\n # \"server stop\", so we prefer traffic_line here.\n if _TRAFFICLINE:\n cmd = _traffic_line(\"-S\")\n else:\n cmd = _traffic_ctl(\"server\", \"stop\")\n\n _subprocess(cmd)\n return _statuscmd()", "def close(self):\n if self.render_app:\n requests.post('http://127.0.0.1:8050/shutdown')\n return", "def cmd_web_service_shutdown(self, arg):\n server_app.web_shutdown()", "def shutdown(self) -> None:\n if self.is_alive():\n self.terminate()\n else:\n logger.warning(\"DHT shutdown has no effect: dht process is already not alive\")", "def on_exit(self, event):\n # Close server\n if hasattr(self, 'webapp'):\n requests.get(ROOT_URL + '/shutdown')\n self.webapp = None\n\n # Close app\n sys.exit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the kernelspecs table.
def refresh_kernelspecs() -> None: ...
[ "def refresh_kernels() -> None:\n ...", "def update_spec(self, req, key, opts, spec):", "def _rename_appkernels_mod_appkernel(self) -> None:\n dry_run = akrr.dry_run\n update_app_kernel_def = True\n update_app_kernel = True\n update_app_kernel_def_list = True\n\n con_appkernel, cur_appkernel = self.get_akrr_db_con(\"mod_appkernel\", dict_cursor=True)\n\n log.info(\"Updating mod_appkernel\")\n if update_app_kernel_def:\n log.info(\"Updating app_kernel_def\")\n for old, new in ak_rename.items():\n log.debug(\"update app_kernel_def: %s -> %s\" % (new, old))\n cursor_execute(\n cur_appkernel,\n \"update app_kernel_def set ak_base_name=%s where ak_base_name=%s\", (new, old), dry_run)\n if not dry_run:\n con_appkernel.commit()\n\n if update_app_kernel:\n log.info(\"Updating app_kernel\")\n for old, new in ak_rename.items():\n log.debug(\"update app_kernel: %s -> %s\" % (new, old))\n cursor_execute(\n cur_appkernel,\n \"update app_kernel set name=%s where name=%s\", (new, old), dry_run)\n if not dry_run:\n con_appkernel.commit()\n\n if update_app_kernel_def_list:\n log.info(\"Updating mod_appkernel.app_kernel_def\")\n from akrr.cli.generate_tables import populate_mod_appkernel_app_kernel_def\n populate_mod_appkernel_app_kernel_def(con_appkernel, cur_appkernel, dry_run)", "def update_devices_info(self):\n for beo_key in self.beo_keys:\n self.update_deviceinfo(beo_key)", "def test_update_software_components_for_system_module(self):\n pass", "def test_list_drives_drive_firmware_update(self):\n pass", "def update_spec(spec, nodes):\n\n lookup = dict([(n.hostname, n) for n in nodes])\n assert len(lookup) == len(nodes)\n\n assert len(spec.machines) == len(nodes)\n spec.machines = nodes\n\n for group in spec.inventory:\n assert len(group) == 1, group\n groupname = group.keys()[0]\n hostnames = group.values()[0]\n\n for i in xrange(len(hostnames)):\n hostname = hostnames[i]\n group[groupname][i] = lookup[hostname]", "def command_update_hw(self, cmd):\n # TODO\n pass", "def system_update_keyspace(self, ks_def):\r\n pass", "def find_kernel_specs(self):\n kspecs = super(TestKernelSpecManager, self).find_kernel_specs()\n\n # add conda envs kernelspecs\n kspecs.update({name: spec.resource_dir\n for name, spec\n in self._kspecs.items()})\n return kspecs", "def test_update_pci_device(self):\n pass", "def initTable(cls):\n\n # Always include the data kernels first\n Kernel.kernelTable[\"IMAGE\"] = Kernel(\"IMAGE\", \"I\", \"I\")\n Kernel.kernelTable[\"BUFFER\"] = Kernel(\"BUFFER\", \"B\", \"B\")\n Kernel.kernelTable[\"SCALAR\"] = Kernel(\"SCALAR\", \"S\", \"S\")\n\n for node in vxNodeTable:\n k = Kernel.fromNodeTableEntry(node)\n Kernel.kernelTable[k.name] = k", "def update_block_devices(self, instance_conf):\n b = 'block_device_map'\n if b in instance_conf:\n disks = BlockDeviceMapping()\n for device in self.make_list(instance_conf[b]):\n device_args = self.get_conf('device:%s' % device)\n disks[device] = BlockDeviceType(**device_args)\n instance_conf.update({ b : disks })", "def disk_recfg(self):\n devices = []\n edit = True\n host = Query.get_obj(self.virtual_machines.view, self.opts.name)\n disk_cfg_opts = {}\n # KB\n tokbytes = 1024*1024\n label = self.opts.disk_prefix + ' ' + str(self.opts.disk_id)\n try:\n key, controller = Query.get_key(host, label)\n except IOError:\n pass\n if self.opts.disk_id:\n for item in host.config.hardware.device:\n if label == item.deviceInfo.label:\n disk_new_size = self.opts.sizeGB * tokbytes\n current_size = item.capacityInKB\n current_size_gb = int(current_size / (1024*1024))\n if disk_new_size == current_size:\n raise ValueError(\n 'New size and existing size are equal'.format()\n )\n if disk_new_size < current_size:\n raise ValueError(\n 'Size {0} does not exceed {1}'.format(\n disk_new_size, current_size\n )\n )\n disk_delta = disk_new_size - current_size\n ds_capacity_kb = item.backing.datastore.summary.capacity / 1024\n ds_free_kb = item.backing.datastore.summary.freeSpace / 1024\n threshold_pct = 0.10\n if (ds_free_kb - disk_delta) / ds_capacity_kb < threshold_pct:\n raise ValueError(\n '{0} {1} disk space low, aborting.'.format(\n host.resourcePool.parent.name,\n item.backing.datastore.name\n )\n )\n\n disk_cfg_opts.update(\n {\n 'size' : disk_new_size,\n 'key' : key,\n 'controller' : controller,\n 'unit' : item.unitNumber,\n 'filename' : item.backing.fileName\n }\n )\n if disk_cfg_opts:\n devices.append(self.disk_config(edit=edit, **disk_cfg_opts))\n self.logger.info(\n '%s label: %s %s current_size: %s new_size: %s', host.name,\n self.opts.disk_prefix, self.opts.disk_id, current_size_gb, self.opts.sizeGB\n )\n self.reconfig(host, **{'deviceChange': devices})", "def test_update_software_component_for_system_module(self):\n pass", "def _populate_filesystem_info(self):\n st_dev = os.stat(self.volume).st_dev\n devid = os.makedev(os.major(st_dev), os.minor(st_dev))\n directory = mkdtemp(prefix='devnode-')\n devnode = os.path.join(directory, 'rootdev')\n os.mknod(devnode, 0o400 | stat.S_IFBLK, devid)\n try:\n for tag in BLKID_TAGS:\n try:\n out = subprocess.Popen(['blkid', '-s', tag,\n '-ovalue', devnode],\n stdout=subprocess.PIPE\n ).communicate()[0]\n self.filesystem[BLKID_TAGS[tag]] = out.rstrip()\n except subprocess.CalledProcessError:\n pass\n finally:\n os.remove(devnode)\n os.rmdir(directory)", "def test_update_device(self):\n pass", "def update(self):\n for component in self.components.values():\n try:\n component.update()\n except Exception as e:\n if self.ds.isFMSAttached():\n log.error(\"In subsystem %s: %s\" % (component, e))\n else:\n raise e", "def _update_device_types(self):\n device_types = self.adapter.device_types()\n for device_type in device_types.items:\n key = device_type.id\n self._make_up_to_date('/device_types', key, device_type)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new terminal and returns the name.
def create_terminal() -> str: ...
[ "def create_terminal(self, terminal_name=None, dump_received=None,\n dump_outgoing=None):\n if terminal_name is None:\n terminal_name = self._default_terminal_name\n\n if terminal_name in self._terminals:\n raise TerminalAlreadyExists(\n \"Terminal name exists in terminals list: \" + terminal_name)\n\n s = self._session_factory.allocate(dump_received, dump_outgoing)\n self._terminals[terminal_name] = s\n return terminal_name", "def addTerminal(self, name, **opts):\n opts.update(renamable=True, removable=True)\n name = self.nextTerminalName(name)\n term = NetTerminal(self, name, **opts)\n self.terminals[name] = term\n if term.isInput():\n self._inputs[name] = term\n elif term.isOutput():\n self._outputs[name] = term\n self.graphicsItem().updateTerminals()\n self.sigTerminalAdded.emit(self, term)\n return term", "def console_create(self):\n return self.call('console.create')", "def ttyname(fd):\n return ''", "def New(self):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.mcd.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n if name.lower() in NAMES:\n GenName().New()\n return name.capitalize()", "def terminal(workspace, directory=None,\n command=None, options=[],\n new_win_name=\"launched term\",\n profile=\"default\"):\n prog_array = [\"gnome-terminal\",f'--window-with-profile={profile}']\n if directory is not None:\n prog_array.append(f'--working-directory={directory}')\n prog_array.extend(options)\n\n if command is not None:\n prog_array.append('--')\n prog_array.extend(shlex.split(command))\n\n get_wid = lambda old, pid: get_wid_by_title(old, \"Terminal\")\n\n if \"Terminal\" in new_win_name.split()[0]:\n raise ValueError(\"The new name for terminal can't start with anything containing Terminal\")\n\n return launch_and_move(prog_array, workspace, get_wid, new_win_name)", "def create_prompt(name):\r\n return '\\n[' + Fore.LIGHTBLACK_EX + name.upper() + Fore.WHITE + '] '", "def get_custom_terminal_cmd():\n return lnp.userconfig.get_string('terminal')", "def create_namespace(self):\n print(\"\\nCreating namespace...\")\n\n name = input(\" - name (default = commands): \") or \"commands\"\n path = \"./{}\".format(name.replace(\".\", \"/\")).lower()\n\n os.makedirs(path, exist_ok=True)\n\n init_path = os.path.join(path, \"__init__.py\")\n if not os.path.isfile(init_path):\n open(init_path, 'w+').close()\n\n return name, path", "def _create_unique_turtle_name(self):\n\n\t\tself._id_counter += 1\n\t\tnew_name = \"turtle{}\".format(self._id_counter)\n\n\t\tif self._has_turtle(new_name):\n\t\t\treturn self._create_unique_turtle_name()\n\n\t\treturn new_name", "def create_turtle():\n name = turtle.Turtle()\n name.speed(0)\n name.ht()\n turtle.colormode(255)\n color = (randrange(256), randrange(256), randrange(256))\n name.color(color)\n name.fillcolor(color)\n return name", "def create_character(name):\n\n # Create char definiton\n name += '_HIK'\n if mc.objExists(name):\n mm.eval('hikSetCurrentCharacter \"{0}\"; hikDeleteDefinition'.format(name))\n update_hik_UI()\n\n result = mm.eval('hikCreateCharacter( \"{0}\" )'.format(name))\n\n return name", "def create_name (self):\n return self.create_topic().create_name('Name')", "def create_device(name, device_type, runtime):\n command = 'create \"%s\" \"%s\" \"%s\"' % (\n name, device_type.identifier, runtime.identifier)\n device_id = _run_command(command)\n\n # The device ID has a new line at the end. Strip it when returning.\n return device_id[:-1]", "def create_ScratchGDB_name(in_name):\n\n return r\"{0}\\{1}\".format(arcpy.env.scratchGDB, in_name)", "def NewName(self) -> str:", "def create(self, playername):\n if playername not in players:\n players[playername] = Turtle()\n\n padde = players[playername]\n padde.color(playername)\n padde.shape(\"turtle\")\n padde.penup()", "def terminal_name(arg_terminal):\n if not arg_terminal:\n return 'до двери'\n curs_dict = APP.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n terminal_sql = curs_dict.mogrify(TERMINAL_SQL_TEMPL, (arg_terminal,))\n logging.info('terminal_sql=%s', terminal_sql)\n curs_dict.execute(terminal_sql)\n res = curs_dict.fetchone()\n curs_dict.close()\n return '{}, {}'.format(res.get('name', 'Название терминала по id не найдено'),\n res.get('address', 'Адрес терминала по id не найден'))", "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the kernels table.
def refresh_kernels() -> None: ...
[ "def initTable(cls):\n\n # Always include the data kernels first\n Kernel.kernelTable[\"IMAGE\"] = Kernel(\"IMAGE\", \"I\", \"I\")\n Kernel.kernelTable[\"BUFFER\"] = Kernel(\"BUFFER\", \"B\", \"B\")\n Kernel.kernelTable[\"SCALAR\"] = Kernel(\"SCALAR\", \"S\", \"S\")\n\n for node in vxNodeTable:\n k = Kernel.fromNodeTableEntry(node)\n Kernel.kernelTable[k.name] = k", "def refresh_kernelspecs() -> None:\n ...", "def system_update_keyspace(self, ks_def):\r\n pass", "def compute_kernel(self):\n print \"Computing kernel (%d) \" % len(self.training_set),\n for sample1 in self.training_set:\n for sample2 in self.training_set:\n self.network.kernel(sample1, sample2)\n self.elapsed_epochs += 1\n if self.debug_level:\n self.print_epochs(0, 0, 0)\n print\n self._digit_length = 1\n self._next_order = 10\n self.network.kernel_mutable = False\n self.network.training_set = self.training_set", "def callUpdateTable(self):\r\n self.updateTable()", "def gpu_kernels(self, node, name):\r\n raise MethodNotDefined, 'gpu_kernels'", "def update(self):\n self.columns.update()\n self.update_screen()", "def _modify_updates(self, updates):\n\n if self.max_kernel_norm is not None:\n W, = self.transformer.get_params()\n if W in updates:\n updated_W = updates[W]\n row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))\n desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)\n scales = desired_norms / (1e-7 + row_norms)\n updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))", "def update_pixel_table(self, pixel_table):\n\n self.pixel_table = pixel_table", "def _rename_appkernels_mod_appkernel(self) -> None:\n dry_run = akrr.dry_run\n update_app_kernel_def = True\n update_app_kernel = True\n update_app_kernel_def_list = True\n\n con_appkernel, cur_appkernel = self.get_akrr_db_con(\"mod_appkernel\", dict_cursor=True)\n\n log.info(\"Updating mod_appkernel\")\n if update_app_kernel_def:\n log.info(\"Updating app_kernel_def\")\n for old, new in ak_rename.items():\n log.debug(\"update app_kernel_def: %s -> %s\" % (new, old))\n cursor_execute(\n cur_appkernel,\n \"update app_kernel_def set ak_base_name=%s where ak_base_name=%s\", (new, old), dry_run)\n if not dry_run:\n con_appkernel.commit()\n\n if update_app_kernel:\n log.info(\"Updating app_kernel\")\n for old, new in ak_rename.items():\n log.debug(\"update app_kernel: %s -> %s\" % (new, old))\n cursor_execute(\n cur_appkernel,\n \"update app_kernel set name=%s where name=%s\", (new, old), dry_run)\n if not dry_run:\n con_appkernel.commit()\n\n if update_app_kernel_def_list:\n log.info(\"Updating mod_appkernel.app_kernel_def\")\n from akrr.cli.generate_tables import populate_mod_appkernel_app_kernel_def\n populate_mod_appkernel_app_kernel_def(con_appkernel, cur_appkernel, dry_run)", "def updateKernelWeights(self, selected_kernel_indices=None):\n self.updateCellDistancesToKernels(selected_kernel_indices)\n self.updateKernels(selected_kernel_indices)\n self.updateWeightedSum(selected_kernel_indices)", "def update_binary_potential_table(self, var1, var2, table):\n if var2 not in self.binaryPotentials[var1]:\n self.binaryPotentials[var1][var2] = table\n else:\n currentTable = self.binaryPotentials[var1][var2]\n assert len(table) == len(currentTable)\n assert len(table[0]) == len(currentTable[0])\n for i in range(len(table)):\n for j in range(len(table[i])):\n currentTable[i][j] *= table[i][j]", "def updateNetworkTables(self):\n self.nt.putNumber(\"actual_rpm\", self.getVelocity())\n self.nt.putBoolean(\"is_spinning\", self.is_spinning)\n self.nt.putNumber(\"desired_rpm\", self.desired_rpm)\n self.nt.putNumber(\"error_rpm\", self.getVelocity() - self.desired_rpm)\n self.nt.putNumber(\"desired_accel\", self.desired_acceleration)\n self.nt.putNumber(\"feedforward\", self.feedforward)\n self.nt.putBoolean(\"is_ready\", self.isReady())", "def table_update_callback():\n value_table.update()", "def update(self):\n self.cursor.execute(\"\"\"SELECT * FROM sensors_powersensor\"\"\")\n list = self.cursor.fetchall()\n for sensor in list:\n self.add(sensor[2], sensor[1])", "def update_devices_info(self):\n for beo_key in self.beo_keys:\n self.update_deviceinfo(beo_key)", "def add(self, kernels):\n if not isinstance(kernels, list):\n kernels = [kernels]\n self.kernels += kernels\n # update `_active_indices` from scratch: inactive kernels might be added\n self._active_indices = [idx for idx in range(len(self)) if \\\n not self.kernels[idx].stop()]\n self._ratio_nondom_offspring_incumbent = len(self) * [0] # len(self) changed", "def updateGrid(self) -> None:\n emu = self.emulator\n arch = self.root.arch\n registers = arch.registers\n self.__values.setRowCount(len(registers))\n for i, reg in enumerate(registers):\n self.__values.setRowHeight(i, self.__row_size)\n name = QTableWidgetItem(reg)\n name.setFlags(Qt.NoItemFlags)\n val = emu.get_register_value(reg) if emu.vm else 0\n old_val = self.__old_register_values.get(reg, 0)\n if type(val) in (int, int):\n value = format_address(val, arch)\n else:\n value = str(val)\n value = QTableWidgetItem( value )\n if old_val != val:\n self.__old_register_values[reg] = val\n value.setForeground(QColor(Qt.red))\n value.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable)\n self.__values.setItem(i, 0, name)\n self.__values.setItem(i, 1, value)\n return", "def release(self):\n # type: () -> None\n for k in self.kernels:\n k.release()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new kernel and returns the ID
def create_kernel(name: str) -> str: ...
[ "def create_device(name, device_type, runtime):\n command = 'create \"%s\" \"%s\" \"%s\"' % (\n name, device_type.identifier, runtime.identifier)\n device_id = _run_command(command)\n\n # The device ID has a new line at the end. Strip it when returning.\n return device_id[:-1]", "def create_session(\n path: str,\n type: str,\n name: Optional[str] = None,\n kernel_name: Optional[str] = None,\n kernel_id: Optional[str] = None,\n) -> str:\n ...", "def __create_kernel (self):\n kernel_cols = ee.List.repeat(1, self.kernel_size)\n kernel_matrix = ee.List.repeat(kernel_cols, self.kernel_size)\n kernel = ee.Kernel.fixed(self.kernel_size, self.kernel_size, kernel_matrix)\n return kernel", "def LUCID_create(lucid_kernel=None, blur_kernel=None): # real signature unknown; restored from __doc__\n pass", "def create_kernel(ktype='sph-anarchy'):\n \n kernel = get_kernel(ktype)\n header = np.array([{'kernel': ktype, 'bins': kernsize}])\n np.savez('kernel_{}.npz'.format(ktype), header=header, kernel=kernel)\n \n print (header)\n \n return kernel", "def make_kernel_spec(kernel_name):\n try:\n from ipython_kernel.kernelspec import (get_kernel_dict, RESOURCES)\n except ImportError:\n try:\n from IPython.kernel import kernelspec\n except ImportError:\n # IPython < 3, pre kernelspec\n spec = {\n 'argv': [sys.executable, '-m', 'IPython.kernel', '-f', '{connection_file}'],\n 'language': 'python',\n 'display_name': 'overridden below',\n }\n resource_dir = ''\n else:\n ksm = kernelspec.KernelSpecManager()\n ks = ksm.get_kernel_spec('python')\n spec = ks.to_dict()\n resource_dir = ks.resource_dir\n else:\n # IPython >= 4\n spec = get_kernel_dict()\n resource_dir = RESOURCES\n \n spec['display_name'] = 'hashdist: %s' % kernel_name.replace('_', ' ')\n # set PATH env\n env = spec.setdefault('env', {})\n env['PATH'] = os.pathsep.join([pjoin(sys.prefix, 'bin'), os.defpath])\n \n # stage temporary kernel directory\n td = tempfile.mkdtemp()\n kernel_dir = pjoin(td, 'hashdist-%s' % kernel_name)\n if os.path.exists(resource_dir):\n shutil.copytree(resource_dir, kernel_dir)\n else:\n os.mkdir(kernel_dir)\n os.chmod(kernel_dir, 0o755)\n with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f:\n json.dump(spec, f, indent=1)\n \n # output the temp dir so our parent can finish installation\n print(kernel_dir)", "def _setup_kernel(self, program, kernel_name, *argv):\n kernel = cl.Kernel(program, kernel_name)\n for idx, value in enumerate(argv):\n kernel.set_arg(idx, value)\n\n return kernel", "def define_kernel(self, *args, **kwargs):\n k = getattr(kernels, self.kernel_name)\n k_base = getattr(kernels, self.base_kernel)\n\n kernel = k(base_graph_kernel=k_base, *args, **kwargs)\n\n return kernel", "def write_kernel_spec(path=None):\n if path is None:\n path = os.path.join(tempfile.mkdtemp(suffix='_kernels'), KERNEL_NAME)\n os.mkdir(path)\n\n # write kernel.json\n kernel_dict = get_kernel_dict()\n\n with open(pjoin(path, 'kernel.json'), 'w') as f:\n json.dump(kernel_dict, f, indent=1)\n\n return path", "def cuda_kernel_factory(nvrtc_kernel_str, dtypes, kernel_name=None):\n\n dtype_strs = get_dtype_strs(dtypes)\n\n for idx, dtype in enumerate(dtypes):\n nvrtc_kernel_str = nvrtc_kernel_str.replace(\n \"{%d}\" % idx, dtype_strs[idx]\n )\n\n kernel_name = f\"\"\"{uuid1()\n if kernel_name is None\n else kernel_name}_{\n \"\".join(dtype_strs).replace(\" \", \"_\")\n }\"\"\"\n\n nvrtc_kernel_str = \"%s\\nvoid %s%s\" % (\n extern_prefix,\n kernel_name,\n nvrtc_kernel_str,\n )\n\n if logger.should_log_for(logger.level_debug):\n logger.debug(str(nvrtc_kernel_str))\n\n return cp.RawKernel(nvrtc_kernel_str, kernel_name)", "def _newClusterId(self):\n return self.guidGenerator.new_id()", "def AllocateSystemOperationId(cls, client, callback):\r\n device_op_id = yield gen.Task(Device.AllocateSystemObjectId, client)\r\n op_id = Operation.ConstructOperationId(Device.SYSTEM, device_op_id)\r\n callback(op_id)", "def create_program(template, func, loc=None):\n\n k_args = []\n\n func.set_cl_kernel_args()\n k_args.extend(func.cl_args_name)\n\n # Build the kernel args string.\n kernel_args = ',\\n '.join(k_args)\n \n # Get the kernel workgroup code\n workgroup_code = func.get_cl_workgroup_code()\n \n # Construct the neighbor loop code.\n neighbor_loop_code = \"for (int src_id=0; src_id<nbrs; ++src_id)\"\n\n return template%(locals())", "def allocate_osd_id(\n cluster,\n fsid,\n keyring,\n ):\n\n LOG.debug('Allocating OSD id...')\n try:\n osd_id = _check_output(\n args=[\n 'ceph',\n '--cluster', cluster,\n '--name', 'client.bootstrap-osd',\n '--keyring', keyring,\n 'osd', 'create', '--concise',\n fsid,\n ],\n )\n except subprocess.CalledProcessError as e:\n raise Error('ceph osd create failed', e, e.output)\n osd_id = must_be_one_line(osd_id)\n check_osd_id(osd_id)\n return osd_id", "def kernel_name(op, layout=None):\n from cutlass_lib import library\n\n threadblock = op.tile_description.procedural_name()\n extended_name = op.extended_name()\n opcode_class_name = library.OpcodeClassNames[\n op.tile_description.math_instruction.opcode_class\n ]\n if layout is None:\n layout = op.layout_name()\n align_ab = op.A.alignment\n align_c = op.C.alignment\n name = KERNEL_KEY_TEMPLATE.render(\n threadblock=threadblock,\n extended_name=extended_name,\n opcode_class_name=opcode_class_name,\n layout=layout,\n align_ab=align_ab,\n align_c=align_c,\n )\n return name.replace(\"\\n\", \"\")", "def create_device(tf, name, role, dc, provider):\n tf.random_pet[name] = RandomPet()\n user_data = INSTALL_BASE \\\n + INSTALL_DOCKER \\\n + consul_setup(role, dc, CONSUL_JOIN_ARGS[provider].format(dc=dc, token=TOKENS[provider])) \\\n + nomad_setup(role, dc)\n tf.digitalocean_droplet[name] = CREATE_FUNCS[provider](\"${random_pet.%s.id}\" % name, role, dc, user_data)", "def mknod(filename, mode=0o600, device=0):\n pass", "def get_kernel(self, kid):\n return self.kernels.get(kid)", "def ker_class():\n ker = Kernel()\n return ker" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new session or returns existing one if path exists
def create_session( path: str, type: str, name: Optional[str] = None, kernel_name: Optional[str] = None, kernel_id: Optional[str] = None, ) -> str: ...
[ "def create_session(self):\n if not self.request.session.session_key:\n self.request.session.save()", "def use_session(session):\n if session:\n return session\n else:\n return create_session()", "def create(self):\r\n sessId = Session.generateId()\r\n return Session(sessId)", "def existing_session(sessionmaker):\n sess = sessionmaker()\n sess[\"test\"] = \"testval\"\n cookie = sess.save()\n return sessionmaker(str(cookie))", "def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session", "def createNewSession(self, transaction):\n msg = transaction.asyncRead()\n if \"pid\" in msg and \"sid\" in msg and \"layers\" in msg:\n layers = msg[\"layers\"]\n try:\n for layer in layers:\n typename = info.CaffeMetaInformation().getLayerType(layer)\n except UnknownLayerTypeException as e:\n msg[\"status\"] = False\n msg[\"error\"] = [e._msg]\n logging.error(\"Could not create session. Unknown layer\")\n transaction.send(msg)\n return\n dirp = self._createDirName(msg[\"sid\"])\n uid = self._createSession(msg[\"pid\"], dirp)\n self.findSessionBySessionUid(uid).setInitialSid(msg[\"sid\"])\n msg[\"status\"] = True\n msg[\"uid\"] = uid\n logging.info(\"Session created with UID '%s'\", uid)\n else:\n msg[\"status\"] = False\n msg[\"error\"] = [\"CreateNewSession: No PID or SID provided.\"]\n logging.error(\"Could not create session. No PID or SID provided.\")\n transaction.send(msg)", "def get_or_create(self, id: int):\n session = self.get(id)\n if not session:\n session = OBDSession(user_id=self.user_id, id=id)\n self.db_session.add(session)\n self.db_session.commit()\n\n return session", "def test_create_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)", "async def create_session(session: SessionModel, mongo: MongoDB = mongodb) -> SessionOutModel:\n if not await mongo.session_coll.find_one({\"id\": session.id}):\n await mongo.session_coll.insert_one(session.dict())\n else:\n await mongo.session_coll.update_one({\"id\": session.id}, {'$set': {'status': session.status}})\n return SessionOutModel(**session.dict())", "def _get_session(self, context):\n\n # Allocate a session if necessary\n if context.session is None:\n context.session = self.create_session(context)\n\n return context.session", "def _get_session(self, session_id):\n return self.sessions.setdefault(session_id, Session())", "def create_session(self, request):\r\n # TODO: Possible optimization here for settings.get\r\n s = session.Session(self._connection,\r\n self,\r\n request,\r\n self.settings.get('session_expiry')\r\n )\r\n\r\n self._sessions.add(s)\r\n\r\n return s", "def _init_session(self):\n if not self._session_id:\n self._session_id = str(uuid1())\n\n try:\n self._sqlite_db_path\\\n = str(Path(self._gf_tmp)/self._session_id)+'.db'\n self._config_path\\\n = str(Path(self._gf_tmp)/self._session_id)+'.yaml'\n except TypeError as err:\n Log.an().error(\n 'invalid geneflow tmp path: %s [%s]', self._gf_tmp, str(err)\n )\n return False\n\n return True", "def new_session(self):\n # use the uuid library to make the random key\n key = str(uuid.uuid4())\n cur = self.cursor()\n # store this new session key in the database with no likes in the value\n cur.execute(\"INSERT INTO sessions VALUES (?)\", (key,))\n self.commit()\n\n response.set_cookie('SESSION', key)\n \n return key", "def _find_or_create_session(self, req, id_token):\n userdb = self.userdb\n authname = userdb.find_session(id_token)\n if not authname:\n # There is no authenticated session for the user,\n # create a new one\n # XXX: should it be configurable whether this happens?\n authname = userdb.create_session(id_token)\n add_notice(req, _(\n \"Hello! You appear to be new here. \"\n \"A new authenticated session with \"\n \"username '%(authname)s' has been created for you.\",\n authname=authname))\n return authname", "def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')", "def _create_session(self, environment_id):\n return self.clients(\"murano\").sessions.configure(environment_id)", "def _createSession(self, pid, dir):\n # TODO\n session = ServerSession(self, dir, pid=pid)\n uid = session.uid\n self.sessions.append({\"pid\": pid, \"uid\": uid, \"session\": session})\n return uid", "def _CreateSession(self, success_path, user):\n return CreateUploadSession(self.__time_function(), success_path, user)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates an existing session.
def update_session( id: str, path: Optional[str] = None, name: Optional[str] = None, type: Optional[str] = None, kernel_name: Optional[str] = None, kernel_id: Optional[str] = None, ) -> None: ...
[ "def upsert_session(session_data):\n g_db['sessions'].update(\n get_session_id(session_data),\n {\n \"$set\": session_data,\n },\n upsert=True\n )", "def test_update_session(self):\r\n now = time.time()\r\n\r\n # Make sure the session has data so that it doesn't get dropped\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 1)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('123456', 0, 'foo', 'bar')\")\r\n\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n session.save() # updating should not require modifications\r\n\r\n self.assertEqual(PURGE_AGE, outcookie['trac_session']['expires'])\r\n\r\n cursor.execute(\"SELECT last_visit FROM session WHERE sid='123456' AND \"\r\n \"authenticated=0\")\r\n self.assertAlmostEqual(now, int(cursor.fetchone()[0]), -1)", "def update_or_create_session(session_id, data=None):\n r = _get_redis()\n r.set(session_id, json.dumps(data))\n r.incr(RedisConstants.REDIS_USAGE_COUNTER)\n if data is None:\n r.incr(RedisConstants.REDIS_SESSION_COUNTER)", "def send_session_update(session: Session):\n \"\"\"\n import json\n\n session_dict = json.loads(session.to_json())\n for song in session_dict['songs']:\n song['upvote_users'] = len(song['upvote_users'])\n \n\n emit('updated_session', json.dumps(session_dict), room=str(session.id))\n \"\"\"\n current_rooms = rooms()\n for song in session.songs:\n song.upvote_users = len(song.upvote_users)\n session_id = str(session.id)\n emit('updated_session', session.to_json(), room=str(session.id))", "def save_session(self, session):\n self.sessions[session.id] = session", "def set_session(session: Session) -> None:\n sympc.session.CURRENT_SESSION[str(session.uuid)] = session", "def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_session(self.id, updates)\n\n self.obj_reset_changes()", "def test_update_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n self.assertEqual(session.state, study_pb2.Session.STATE_VALID)\n\n session.state = study_pb2.Session.STATE_INVALID\n self.storage.update_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)\n self.assertEqual(session.state, study_pb2.Session.STATE_INVALID)", "def update_session_id(self):\n self.__session_id = self.api_base.get_session_id(\n self.__user_id, self.__company_id, self.__user_password, self.__entity_id)\n self.api_base.set_session_id(self.__session_id)\n self.contacts.set_session_id(self.__session_id)\n self.locations.set_session_id(self.__session_id)\n self.employees.set_session_id(self.__session_id)\n self.accounts.set_session_id(self.__session_id)\n self.expense_types.set_session_id(self.__session_id)\n self.attachments.set_session_id(self.__session_id)\n self.expense_reports.set_session_id(self.__session_id)\n self.vendors.set_session_id(self.__session_id)\n self.bills.set_session_id(self.__session_id)\n self.projects.set_session_id(self.__session_id)\n self.departments.set_session_id(self.__session_id)\n self.charge_card_accounts.set_session_id(self.__session_id)\n self.charge_card_transactions.set_session_id(self.__session_id)\n self.customers.set_session_id(self.__session_id)\n self.items.set_session_id(self.__session_id)\n self.ap_payments.set_session_id(self.__session_id)\n self.ar_invoices.set_session_id(self.__session_id)\n self.ar_payments.set_session_id(self.__session_id)\n self.reimbursements.set_session_id(self.__session_id)\n self.checking_accounts.set_session_id(self.__session_id)\n self.savings_accounts.set_session_id(self.__session_id)\n self.dimensions.set_session_id(self.__session_id)\n self.dimension_values.set_session_id(self.__session_id)\n self.tasks.set_session_id(self.__session_id)\n self.expense_payment_types.set_session_id(self.__session_id)\n self.location_entities.set_session_id(self.__session_id)\n self.tax_details.set_session_id(self.__session_id)\n self.gl_detail.set_session_id(self.__session_id)\n self.classes.set_session_id(self.__session_id)\n self.journal_entries.set_session_id(self.__session_id)\n self.rev_rec_schedules.set_session_id(self.__session_id)\n self.rev_rec_schedule_entries.set_session_id(self.__session_id)\n self.cost_types.set_session_id(self.__session_id)\n self.order_entry_transactions.set_session_id(self.__session_id)", "def update_session(request):\n if request.method == \"POST\":\n req_data = request.POST.get(\"session_data\", None)\n if req_data:\n if req_data == \"sidebar\":\n if \"sidebar\" in request.session.keys():\n request.session[\"sidebar\"][\"sticky\"] ^= True\n else:\n request.session[\"sidebar\"] = {}\n request.session[\"sidebar\"][\"sticky\"] = True\n request.session.save()\n data = {\n \"result\": \"success\",\n \"message\": \"Session updated\",\n }\n return JsonResponse(data)\n\n return HttpResponseNotAllowed([\"POST\"])", "def update_sessions():\n try:\n sessions = plexserver.sessions()\n except plexapi.exceptions.BadRequest:\n _LOGGER.exception('Error listing plex sessions')\n return\n\n plex_sessions.clear()\n for session in sessions:\n plex_sessions[session.player.machineIdentifier] = session", "async def edit_gaming_session(self, game_session_id, game_session_details):\n return await self._patch_request(\n f'{self.base_url}/api/v2/gaming_sessions/{game_session_id}', data=game_session_details)", "def session(self, session):\n\n self._session = session", "def store(self, session):\r\n session[self.session_key] = self", "def test_update_end_of_session(self):\n pass", "def set_login_session(self, session_id=None):\r\n meta = self.get_meta()\r\n old_login = meta.get('session_id', None)\r\n if old_login:\r\n SessionStore(session_key=old_login).delete()\r\n meta['session_id'] = session_id\r\n self.set_meta(meta)\r\n self.save()", "def _update_token(token):\n session.token = token", "def update_from_naucse(self, report_progress=print, session=None):\n if self.naucse_slug == None:\n raise ValueError(f'No naucse slug for course {self.course_name}')\n if session is None:\n session = requests.Session()\n url = NAUCSE_API_URL_TEMPLATE.format(self.naucse_slug)\n response = session.get(url)\n if response.status_code != 200:\n raise ValueError(f'Could not update course: {url} returned {response.status_code}')\n response.raise_for_status()\n course_info = response.json()['course']\n if 'subtitle' in course_info:\n self.course_name = f\"{course_info['title']} – {course_info['subtitle']}\"\n else:\n self.course_name = course_info['title']\n\n report_progress(f'Updating {self!r}')\n\n self.save()\n\n for session_info in course_info['sessions']:\n if 'time' not in session_info:\n report_progress(\n f'Skipping session without time: {session_info[\"title\"]}')\n else:\n session, created = Session.objects.get_or_create(\n course=self,\n slug=session_info['slug'],\n )\n if 'serial' in session_info:\n session.title = f'Lekce {session_info[\"serial\"]}'\n else:\n session.title = None\n session.text = session_info['title']\n published_date = parse_datetime(session_info['time']['start'])\n session.published_date = published_date\n\n if created:\n report_progress(f'Added {session!r}')\n else:\n report_progress(f'Updating {session!r}')\n\n session.save()", "def update_session_status(session_uid, status):\n session = Session.query.filter_by(uid=session_uid).first()\n if session:\n session.online = bool(status)\n db.session.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a two element tuple. The second element must be a Beliefs object which system1 will use to update the belief module. Once updated, the action queue will be emptied and the rules will be checked for satisfied conditions. The action queue will be refilled with new active actions from the rule list.
def process_belief(self, args): goal, belief = args if isinstance(belief, Beliefs): self.belief_module.process_belief(belief) self.initialize_action_queue() return [{}]
[ "def update(self):\n if self.queue != self.last_queue:\n if self.queue == \"OFF\":\n self.cfa.set_led(self.led, 0, 0)\n elif self.queue == \"RED\":\n self.cfa.set_led(self.led, 0, 100)\n elif self.queue == \"GREEN\":\n self.cfa.set_led(self.led, 100, 0)\n elif self.queue == \"YELLOW\":\n self.cfa.set_led(self.led, 50, 100)\n elif self.queue == \"ORANGE\":\n self.cfa.set_led(self.led, 100, 100)\n else:\n args = re.findall('^(\\d+):(\\d+)$', self.queue)\n for arg in args:\n self.cfa.set_led(self.led, int(arg[0]), int(arg[1]))\n self.last_queue = self.queue", "def update(self):\n startstate = self.state\n goalstates =self.env.getGoalStates()\n inputs = self.env.sense(self)\n self.action_sequence = self.drive(goalstates,inputs)\n action = self.choose_action() # Choose an action\n self.state = self.env.act(self,action) \n return", "def update(self):\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.priority * self.match_degree", "def _action(self, wloops: Any, beta: Any) -> Any:\n pass", "def HELPER_update_belief(self, old_belief, observation, gamma):\n observation = int(observation)\n #print \"old_belief:\", old_belief, type(old_belief)\n #print \"observation:\", observation, type(observation)\n #print \"gamma:\", gamma, type(gamma)\n\n diffs = [0.1*i for i in range(self.num_difficulty_bins)]\n new_belief = util.updateBelief(old_belief, None, observation, diffs, gamma)\n #print \"new_belief\", new_belief, type(new_belief)\n return new_belief", "def update_beliefs(self, corpus_id):\n logger.info('Updating beliefs for corpus \"%s\"' % corpus_id)\n # TODO check which options are appropriate for get_corpus\n corpus = self.get_corpus(corpus_id)\n be = BeliefEngine(self.scorer)\n stmts = list(corpus.statements.values())\n be.set_prior_probs(stmts)\n # Here we set beliefs based on actual curation\n for uuid, correct in corpus.curations.items():\n stmt = corpus.statements.get(uuid)\n if stmt is None:\n logger.warning('%s is not in the corpus.' % uuid)\n continue\n stmt.belief = correct\n belief_dict = {st.uuid: st.belief for st in stmts}\n return belief_dict", "def update_belief(self, state, action, reward):\n self.add_to_state_history(state)\n state = self.get_modified_state()\n self.belief.update(state, action, reward, self.alpha)\n self.alpha *= self.a_rate", "def _update_beliefs(self, features,\n beliefs):\n if (len(features) != len(beliefs) or features.ndim != 1):\n raise core.BadFeatureFnError()\n\n assert len(features) == len(beliefs)\n decay = self.rng.binomial(beliefs, self.params.decay_prob)\n updated_beliefs = [\n beliefs[i] + features[i] - decay[i] for i in range(len(beliefs))\n ]\n return updated_beliefs", "def update(self):\n\n state = self.build_state() # Get current state\n self.createQ(state) # Create 'state' in Q-table\n action = self.choose_action(state) # Choose an action\n reward = self.env.act(self, action) # Receive a reward \n self.learn(state, action, reward) # Q-learn\n \n return", "def update_mutex_actions(self, previous_layer_mutex_proposition):\n current_layer_actions = self.action_layer.get_actions()\n \"*** YOUR CODE HERE ***\"\n combs = []\n for ac1 in current_layer_actions:\n for ac2 in current_layer_actions:\n combs.append((ac1,ac2))\n\n def addMutexActs(pairOfActions):\n a1,a2 = pairOfActions\n return self.action_layer.add_mutex_actions(a1,a2) \n\n return list(map(addMutexActs,list(filter(lambda x:mutex_actions(x[0],x[1],previous_layer_mutex_proposition),\n list(filter(lambda i: (i[0]!=i[1]),combs))))))", "def _update_certain(self, possible_locations):\n # Agent is sure of its current location\n self.belief_state = State(possible_locations[0], 1)\n\n if self.prev_state and self.prev_state.certainty < 1:\n # now that agent is sure of its location, update past\n # beliefs to correct any false assumptions\n # TODO agent.update_previous_beliefs(belief_history, agent_belief_state)\n pass\n\n # TODO Assuming prev_action is relative, this should add\n # prev_state + prev_action == agent_belief_state.location\n if self.prev_action == self.belief_state.location:\n # agent is certain of current and past position\n # and is at its intended destination, no further work to do\n pass\n else:\n # agent did not end at intended destination\n # TODO agent.recovery_scheme()\n pass\n\n self.belief_history = []", "def update_based_on_topology(self, *args, **kwargs):\n for bfr in Configuration.get(\"switches\"):\n switch = bfr[\"name\"]\n\n self.update_bier_decap_rule(switch=switch)", "def __init__(self, sn, beliefs):\n assert all([type(x) is Belief for x in beliefs])\n self._sn = sn\n self._beliefs = beliefs", "def update(self):\n\n state = self.build_state() # Get current state\n self.createQ(state) # Create 'state' in Q-table\n action = self.choose_action(state) # Choose an action\n reward = self.env.act(self, action) # Receive a reward\n self.learn(state, action, reward) # Q-learn\n\n return", "def update_actions(self):\n pass", "def kb_retract(self, fact_or_rule):\n printv(\"Retracting {!r}\", 0, verbose, [fact_or_rule])\n ####################################################\n # Student code goes here\n\n if isinstance(fact_or_rule, Fact):\n if fact_or_rule not in self.facts:\n #print(\"fact not in bk!\")\n return\n else:\n #find the corresponding fact in kb\n index = self.facts.index(fact_or_rule)\n fact_or_rule = self.facts[index]\n #if the fact is not supported, remove it\n if len(fact_or_rule.supported_by) == 0:\n self.facts.remove(fact_or_rule)\n else:\n #print(\"can't retract!\")\n return\n elif isinstance(fact_or_rule, Rule):\n if fact_or_rule not in self.rules:\n #print(\"rule not in bk!\")\n return\n else:\n #find the corresponding rule in kb\n index = self.rules.index(fact_or_rule)\n fact_or_rule = self.rules[index]\n #if rule is not supported and not asserted, then remove it\n if len(fact_or_rule.supported_by) == 0 and fact_or_rule.asserted != True:\n self.rules.remove(fact_or_rule)\n else:\n #print(\"can't retract!\")\n return\n #remove the supported pairs of the facts that it supports\n for facts in fact_or_rule.supports_facts:\n for i in facts.supported_by:\n if fact_or_rule in i:\n facts.supported_by.remove(i)\n if facts.asserted != True:\n self.kb_retract(facts)\n #remove the supported pairs of the rules that it supports\n for rules in fact_or_rule.supports_rules:\n for i in rules.supported_by:\n if fact_or_rule in i:\n rules.supported_by.remove(i)\n if rules.asserted != True:\n self.kb_retract(rules)", "def updateWeapons(self):\n self.readyWeapons = []\n self.setWeaponStatus()\n\n for myWeapon in self.activeWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.alternateTargets = []\n\n if self.amsTargets != []:\n for myWeapon in self.amsWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.amsTargets = []", "def update(self):\n #should change to a Breadth first algorithm\n logger.info('Handling events')\n self.updating = True\n deque_len = 50\n last = deque([None]*deque_len, maxlen=deque_len)\n for i in range(self.threshold):\n try:\n event = self.events.pop(0)\n except IndexError:\n self.updating = False\n break\n logger.debug('Handling event {}'.format(event))\n for circuit in self.relations[id(event.obj)]:\n last.append(circuit)\n logger.debug('updating circuit {}'.format(circuit))\n circuit.update()\n else:\n self.updating = False\n error_str = 'Update threshold blew up; check for cyclic path.'\n error = RuntimeError(error_str, last)\n raise error", "def priority_one(self, actions):\n for need in self.agent.needs:\n if need.value == 0.0 and need.zero_time < 15:\n goals_keys = []\n goals_values = []\n\n for key, value in self.goals.items():\n goals_keys.append(key)\n goals_values.append(value)\n\n self.goals.clear()\n\n if len(goals_keys) == 2:\n print()\n msg = \"Values length (\" + str(len(goals_values)) + \") does not equal keys length (\" + \\\n str(len(goals_keys)) + \") or goals list does not equal 3 (\" + str(len(goals_keys)) + \")\"\n assert len(goals_values) == len(goals_keys) and len(goals_keys) == 3, msg\n\n if need.name not in goals_keys:\n goals_values[2] = goals_values[1]\n goals_keys[2] = goals_keys[1]\n goals_values[1] = goals_values[0]\n goals_keys[1] = goals_keys[0]\n goals_values[0] = need.value\n goals_keys[0] = need.name\n\n index = 0\n\n while index < self._limit:\n self.goals[goals_keys[index]] = goals_values[index]\n index += 1\n\n assert len(self.goals) == 3, \"goals length not 3: \" + str(self.goals) + \" \" + str(goals_keys) + \\\n \" \" + str(goals_values)\n\n for action in self._actions:\n if action.priority == 1 and need.name in action.need_modifiers and \\\n action.need_modifiers[need.name] > 0.0:\n self.remove_last(actions)\n actions.append(action)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls the belief module's emit_belief method to get and return a Beliefs object with the agents chosen belief for emission.
def emit_belief(self, args): goal, belief = args return [{belief: self.belief_module.emit_belief()}]
[ "def process_belief(self, args):\n goal, belief = args\n\n if isinstance(belief, Beliefs):\n self.belief_module.process_belief(belief)\n self.initialize_action_queue()\n\n return [{}]", "def calculateBeliefs(self):\n\n belief = {}\n\n for question in self.getQuestions():\n q = str(question.id)\n belief[q] = self.HELPER_init_belief()\n\n #print belief[q]\n for answer in self.getQuestionCompletedAnswers(question):\n #print q\n #print str(answer.question.id)\n assert str(answer.question.id) == q\n w_skill = answer.worker.inference_results['EM']['skill']\n # answer.value must be \"0\" or \"1\"\n assert answer.value == \"0\" or answer.value == \"1\"\n #print answer.value, w_skill\n belief[q] = self.HELPER_update_belief(belief[q], answer.value, w_skill)\n #print belief[q]\n\n #print \"Question beliefs:\", belief\n #print \"##################\"\n return belief", "def getBeliefDistribution(self):\n pass", "def getBeliefDistribution(self):\n pass", "def getBeliefDistribution(self):\n \"*** YOUR CODE HERE ***\"\n beliefs = DiscreteDistribution() # Empty discrete distribution object\n for position in self.particles:\n beliefs[position] += 1\n \"\"\"Every time I encounter the position in self.particles, I add one to the belief distribution to weigh\n all positions. After normalization, it will be weighted by the probability.\"\"\"\n\n beliefs.normalize()\n return beliefs", "def update_beliefs(self, corpus_id):\n logger.info('Updating beliefs for corpus \"%s\"' % corpus_id)\n # TODO check which options are appropriate for get_corpus\n corpus = self.get_corpus(corpus_id)\n be = BeliefEngine(self.scorer)\n stmts = list(corpus.statements.values())\n be.set_prior_probs(stmts)\n # Here we set beliefs based on actual curation\n for uuid, correct in corpus.curations.items():\n stmt = corpus.statements.get(uuid)\n if stmt is None:\n logger.warning('%s is not in the corpus.' % uuid)\n continue\n stmt.belief = correct\n belief_dict = {st.uuid: st.belief for st in stmts}\n return belief_dict", "def get_map_belief(self):\n self._update_belief_field()\n return self._belief_field", "def belief(self, element):\n return self.bel(element)", "def _compute_belief(self):\n # Compute current dt\n current_time = time.time()\n\n\n\n\n\n # Get the current human position\n try:\n (current_human_pos, rotation) = self._tf_listener.lookupTransform(self._darias_frame, self._human_frame,\n rospy.Time(0))\n current_human_pos = np.asarray(current_human_pos)\n\n except (tf.ExtrapolationException, tf.ConnectivityException, tf.LookupException):\n return\n\n self._compute_belief_from_pose_and_time(current_human_pos, current_time)", "def set_beliefs(self, beliefs):\n self.beliefs = beliefs", "def get_belief_scores(self):\n return self._belief_scores.copy()", "def _get_material_bioflows_for_bev(self):\n\n method = ('ILCD 2.0 2018 midpoint',\n 'resources', 'minerals and metals')\n year = self.years[0]\n act_str = \"transport, passenger car, fleet average, battery electric\"\n\n # upstream material demands are the same for all regions\n # so we can use GLO here\n act = Activity(\n Act.get((Act.name == act_str)\n & (Act.database == eidb_label(\n self.model, self.scenario, year))\n & (Act.location == self.regions[0])))\n lca = bw.LCA({act: 1}, method=method)\n lca.lci()\n lca.lcia()\n\n inv_bio = {value: key for key, value in lca.biosphere_dict.items()}\n\n ca = ContributionAnalysis()\n ef_contrib = ca.top_emissions(lca.characterized_inventory)\n return [inv_bio[int(el[1])] for el in ef_contrib]", "def belief_conflict(self, args):\n goal, belief = args\n if isinstance(belief, Beliefs):\n if self.belief_module.is_conflicting_belief(belief):\n return [{}]\n\n return []", "def getBeliefDistribution(self):\n jointDistribution = jointInference.getBeliefDistribution()\n dist = DiscreteDistribution()\n for t, prob in jointDistribution.items():\n dist[t[self.index - 1]] += prob\n return dist", "def get_belief(self):\n\n return np.concatenate(\n (np.array([self.z_g / self.z_up_limit, self.theta / self.max_theta]), self.belief.flatten()), axis=0)", "def __init__(self, sn, beliefs):\n assert all([type(x) is Belief for x in beliefs])\n self._sn = sn\n self._beliefs = beliefs", "def beam(self) -> Beam:\n\n return self._beam", "def test_edge_features(self):\n k = [4, 4, 4, 4, 4]\n mn = self.create_chain_model(k)\n\n d = 3\n\n for i in range(5):\n mn.set_edge_features((i, i+1), np.random.randn(d))\n\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp = MatrixBeliefPropagator(mn)\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert not np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on variable 0 did not change marginal of variable 4\"\n\n mn.set_edge_features((2, 3), np.zeros(d))\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on var 0 changed marginal of var 4, when the features should have made them independent\"", "def get_beam(self, e):\n v1, v2 = e[1]\n cur_visited = (v2, v1) in self.visited\n beam = self.beam_network.get_beam(e)\n return beam, cur_visited" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if an incoming belief is in conflict with internal beliefs. A conflict occurs when the belief is of opposite valence to a current belief. This method does not update own or perceived beliefs.
def belief_conflict(self, args): goal, belief = args if isinstance(belief, Beliefs): if self.belief_module.is_conflicting_belief(belief): return [{}] return []
[ "def has_conflict(self):\n for diffstat in self.diffstat():\n if diffstat.has_conflict:\n return True\n return False", "def check_conflicts(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\t# Now consider conflicts\n\t\tself.log('PHASE: conflicts', level=logging.DEBUG)\n\t\terrs = []\n\t\tself.pause_point('\\nNow checking for conflicts between modules', print_input=False, level=3)\n\t\tfor module_id in self.module_ids():\n\t\t\tif not cfg[module_id]['shutit.core.module.build']:\n\t\t\t\tcontinue\n\t\t\tconflicter = self.shutit_map[module_id]\n\t\t\tfor conflictee in conflicter.conflicts_with:\n\t\t\t\t# If the module id isn't there, there's no problem.\n\t\t\t\tconflictee_obj = self.shutit_map.get(conflictee)\n\t\t\t\tif conflictee_obj is None:\n\t\t\t\t\tcontinue\n\t\t\t\tif ((cfg[conflicter.module_id]['shutit.core.module.build'] or\n\t\t\t\t self.is_to_be_built_or_is_installed(conflicter)) and\n\t\t\t\t (cfg[conflictee_obj.module_id]['shutit.core.module.build'] or\n\t\t\t\t self.is_to_be_built_or_is_installed(conflictee_obj))):\n\t\t\t\t\terrs.append(('conflicter module id: ' + conflicter.module_id + ' is configured to be built or is already built but conflicts with module_id: ' + conflictee_obj.module_id,))\n\t\treturn errs", "def check_influence_sanity(self):\n for influence in crest.get_all_influences(self.model):\n assert influence._name is not None, f\"There is an Influence in {influence._parent._name} ({influence._parent.__class__.__name__}) whose name is 'None'\"\n assert influence._name != \"\", f\"There is an Update in {influence._parent._name} ({influence._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(influence.source, crest.Port), f\"Influence {influence._name}'s source is not a crest.Port\"\n assert influence.source in api.get_sources(influence._parent), f\"Influence's source {influence.source._name} ({influence.source}) is not in the sources of entity {influence._parent._name} ({influence._parent})\"\n\n assert isinstance(influence.target, crest.Port), f\"Influence {influence._name}'s target is not a crest.Port\"\n assert influence.target in api.get_targets(influence._parent), f\"Influence's target {influence.target._name} ({influence.target}) is not in the targets of entity {influence._parent._name} ({influence._parent})\"\n\n assert isinstance(influence.function, (crestml.LearnedFunction, types.FunctionType)), f\"Influence {influence._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert len(inspect.signature(influence.function).parameters) == 1, f\"An influence should not have arguments (except the input value)\"", "def is_backward_compatible(self):\n # type: () -> bool\n if not self.old_file:\n return True\n\n is_breaking_backwards = [\n self.is_docker_image_changed(),\n self.is_context_path_changed(),\n self.is_added_required_args(),\n self.is_arg_changed(),\n self.is_there_duplicates_args(),\n self.is_changed_subtype()\n ]\n\n # Add sane-doc-report exception\n # Sane-doc-report uses docker and every fix/change requires a docker tag change,\n # thus it won't be backwards compatible.\n # All other tests should be False (i.e. no problems)\n if self.file_path == 'Scripts/SaneDocReport/SaneDocReport.yml':\n return not any(is_breaking_backwards[1:])\n return not any(is_breaking_backwards)", "def check_intent_for_conflicts(self, intent):\n for imp in self.confirmed_attribute_implications:\n if (imp.premise & intent) != imp.premise:\n continue\n if (imp.conclusion & intent) == imp.conclusion:\n continue\n return False\n\n return True", "def violated(self) -> bool:\n ...", "def checkSpikeBonding (self):\r\n stable = True # If any bonds break this will be set to false\r\n stabilityChecker = True # Checks the result of each function call, if set to false then stable will be set to false\r\n # Go through each atom\r\n for i in range(len(self.mol)):\r\n # Go through each spike\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == True:\r\n stabilityChecker = self.stabilitySpike(self.mol[i].spikeArray[j])\r\n if stabilityChecker == False:\r\n stable = False\r\n #print (stable)\r\n if stable == True:\r\n print(\"No Bonds have broken \\n\")\r\n else:\r\n print (\"Bonds have broken \\n\")\r\n return stable", "def infect(self):\n cant_interact = False\n can_interact = True\n # if the person is dead they can not interact with others \n if(self.dead):\n return cant_interact\n # if they are immune they can interact with others but cannot get the virus\n # regardless of interactions they may have.\n # if they are infected they can interact and spread the contagion.\n if(self.infected or self.immune):\n return can_interact\n # set state to infected because they are infected.\n # update remaining life in hours.\n self.life_remaining_in_hours = self.life_in_hours\n self.infected = can_interact\n\n return can_interact", "def hasConflict(self):\n return len(self.coordinates) != len(set(self.coordinates))", "def resolve_conflict(self):\n\n self.pre_resolve_conflict(self)\n if self.implication_graph.conflict_node is None:\n self.post_resolve_conflict(self)\n self.post_resolve_conflict_true(self)\n return True\n\n logging.info(\"res: %s\" % self.implication_graph.conflict_node.variable)\n if self.decision_level <= 0:\n logging.info(\"res: conflict @ dl 0!\")\n self.post_resolve_conflict(self)\n self.post_resolve_conflict_false(self)\n return False\n \n # find the first UIP\n first_uip = self.implication_graph.find_first_uip()\n\n # find the antecedents and decision levels in the conflict graph\n antecedents, sorted_decision_levels = self.implication_graph.get_conflict_information(first_uip)\n logging.info(\"res: decision levels: %s\" % sorted_decision_levels)\n\n # determine the second-highest decision level\n if len(sorted_decision_levels) >= 2:\n decision_level = sorted_decision_levels[1]\n else:\n logging.info(\"res: no second-highest dl!\")\n self.post_resolve_conflict(self)\n self.post_resolve_conflict_false(self)\n return False\n\n # reset the decision level and undo all intermediate decisions\n self.decision_level = decision_level\n self.implication_graph.clear_decisions(decision_level)\n\n # learn a clause by resolution of the antecedents\n learned_clause = Clause([])\n for antecedent in antecedents:\n learned_clause = learned_clause.resolve(antecedent)\n\n # delete the conflict node\n self.conflict_node = None\n self.clauses.append(learned_clause)\n\n logging.info(\"res: back to decision level %s\" % decision_level)\n logging.info(\"res: learned clause %s\" % learned_clause)\n\n self.post_resolve_conflict(self)\n self.post_resolve_conflict_true(self)\n return True", "def refine_conflict(self):\n # Start refine conflict\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_REFINING_CONFLICT)\n self._notify_listeners_start_operation(listener.OPERATION_REFINE_CONFLICT)\n\n # Ensure cpo model is generated with all constraints named\n namecstrs = self.context.model.name_all_constraints\n if not namecstrs:\n self.context.model.name_all_constraints = True\n self.cpostr = None\n self.agent.solver.model_sent = False\n\n # Refine conflict\n msol = self.agent.refine_conflict()\n\n # Restore previous name constraints indicator\n self.context.model.name_all_constraints = namecstrs\n\n # Call listeners with conflict result\n for lstnr in self.listeners:\n lstnr.new_result(self, msol)\n\n # End refine conflict\n self._set_status(STATUS_IDLE)\n self._notify_listeners_end_operation()\n\n return msol", "def _badness(self, *pairings):\n score = 0\n if self.options[\"avoid_history\"]:\n score += sum([x.conflict_hist for x in pairings]) * self.options[\"history_penalty\"]\n if self.options[\"avoid_institution\"]:\n score += sum([x.conflict_inst for x in pairings]) * self.options[\"institution_penalty\"]\n return score", "def partial_change(self):\n return self.attempted_change() and not all(self._get_field_data())", "def is_backward_compatible(self) -> bool:\n\n answers = [\n not super().is_backward_compatible(),\n self.is_field_mapping_removed(),\n ]\n return not any(answers)", "def b3_correctness(el_a, el_b, system_el2kbid, gold_el2kbid):\n correct = False\n\n if(inSameSet(el_a, el_b, system_el2kbid) and \n inSameSet(el_a, el_b, gold_el2kbid) and\n sameLinking(el_a, el_b, system_el2kbid, gold_el2kbid) #THIS CONDITION DEPARTS FROM THE ORIGINAL BCUBED (extesion for the Entity Linking problem)\n ):\n correct = True\n\n return correct", "def _update_uncertain(self, possible_locations):\n # See if we can narrow down the location\n refined_locations = self._refine_loc(possible_locations)\n\n if len(refined_locations) == 1:\n return self.update_agent_location([refined_locations[0].location])\n elif len(refined_locations) == 0:\n # None of the current states are possible with the prev_state\n prev_possible_states = self.belief_history[-1]\n\n for l in sorted(prev_possible_states, key=lambda l: l.certainty):\n self.prev_state = l\n refined_locations = self._refine_loc(possible_locations)\n\n if not refined_locations:\n # Current percepts and past state are incompatible,\n # check the next past state with the most certainty\n continue\n else:\n return self._update_uncertain([s.location for s in refined_locations])\n else:\n # agent is unsure of past or current location, make best guess\n self.belief_history.append(refined_locations)\n self.belief_state = max(refined_locations, key=lambda s: s.certainty)", "def check_inhabitant_compatibility(self):\n return False", "def treat_conflict(curr_list, final_list):\n nb_conflicts = 0\n c = curr_list[0]\n c_fidx = final_list.index(c)\n for e in curr_list[1:]:\n if e in final_list and e:\n e_fidx = final_list.index(e)\n if e_fidx - c_fidx < 0:\n nb_conflicts += 1\n return nb_conflicts", "def checkForSideChangeRequest(self):\n inThirdRound = self.wonRounds[\"Team1\"] == 1 and self.wonRounds[\"Team2\"] == 1\n oneTeamAt11AndOtherTeamUnder11 = (self.counter[\"Team1\"] == 11 and self.counter[\"Team2\"] < 11) or\\\n (self.counter[\"Team2\"] == 11 and self.counter[\"Team1\"] < 11)\n if inThirdRound and oneTeamAt11AndOtherTeamUnder11:\n self.__notifySideChangeRequest()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
quad(f, a, b) > \int_a^b f(x) dx Uses some quadrature rule to evaluate the integral.
def quad(f, a, b): S, D = (b+a)/2.0, (b-a)/2.0 def rescaled_f(x): return f(x*D + S)*D return sum(w * rescaled_f(p) for w, p in zip(quad_weights, quad_points))
[ "def quad(func, a, b, args=()):\n\tx_units = a.units\n\tf_units = func(.5*(a+b)).units\n\n\tI, abserr = sciquad(\n\t\tlambda x : func(x*x_units).to(f_units).magnitude,\n\t\ta.magnitude, b.to(x_units).magnitude,\n\t\targs)\n\n\treturn I*x_units*f_units, abserr*x_units*f_units", "def adaptive_quad(f, a, b, quad=simpson, eps=1e-7):\n int_with5 = quad(f, a, b, 5)\n int_with10 = quad(f, a, b, 10)\n m = (a + b) / 2\n if abs(int_with5 - int_with10) < eps:\n return int_with10\n else:\n return (adaptive_quad(f, a, m, quad, eps) +\n adaptive_quad(f, m, b, quad, eps))", "def using_quad():\n return scipy.integrate.quad(lambda x: x ** 2, -1, 2)", "def myQuad(x,Q,b):\n #Make sure the input is numpy compatible\n x = np.array(x)\n Q = np.array(Q)\n b = np.array(b)\n # Run f(x)\n r = 0.5 * np.matmul(x.transpose(), np.matmul(Q, x)) - np.matmul(b.transpose(), x)\n # Since we're hoping for a symmetric, positive definite Q, I'm taking\n # the shortcut to the gradient where d/dx[x^TAx] = 2Ax if A is symmetric\n # and positive definite.\n g = np.matmul(Q,x) - b\n return r,g", "def integral(a, b, f, STEPS=20):\n w = b - a\n # assert(w >= 0)\n if not w:\n return 0\n step = w / STEPS\n return step * sum(f(a + step * (0.5 + i)) for i in range(STEPS))", "def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):\n\n def temp_ranges(*args):\n return [gfun(args[0]) if callable(gfun) else gfun,\n hfun(args[0]) if callable(hfun) else hfun]\n\n return nquad(func, [temp_ranges, [a, b]], args=args,\n opts={\"epsabs\": epsabs, \"epsrel\": epsrel})", "def add_quad(a, b):\n s = np.sqrt(np.square(a) + np.square(b))\n return s", "def integrate(f, a, b):\n if(a > b):\n return - integrate(f, b, a)\n total = 0\n iters = 1000\n while(a <= b):\n total += (f(a) + f(a+1.0/iters)) / (2 * iters)\n a += 1.0 / iters\n return round(total, 8)", "def complex_quad(g, a, b):\n # 2501: Amount of used samples for the trapezoidal rule\n t = np.linspace(a, b, 2501) \n x = g(t)\n return integrate.simps(y=x, x=t) # Use Simpson's rule to compute integral from samples.", "def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,\n epsrel=1.49e-8):\n # f(z, y, x)\n # qfun/rfun (x, y)\n # gfun/hfun(x)\n # nquad will hand (y, x, t0, ...) to ranges0\n # nquad will hand (x, t0, ...) to ranges1\n # Stupid different API...\n\n def ranges0(*args):\n return [qfun(args[1], args[0]) if callable(qfun) else qfun,\n rfun(args[1], args[0]) if callable(rfun) else rfun]\n\n def ranges1(*args):\n return [gfun(args[0]) if callable(gfun) else gfun,\n hfun(args[0]) if callable(hfun) else hfun]\n\n ranges = [ranges0, ranges1, [a, b]]\n return nquad(func, ranges, args=args,\n opts={\"epsabs\": epsabs, \"epsrel\": epsrel})", "def integrate_f_from0(b):\n integral, err = scipy.integrate.quad(f, 0, b)\n return integral", "def quad(self, b):\n return b.T @ self.solve(b)", "def complex_quad(g, a, b):\n # 2501: Amount of used samples for the trapezoidal rule\n t = np.linspace(a, b, 2501)\n x = g(t)\n return integrate.simps(y=x, x=t)", "def integral(self, a, b):\n ad = self.anti_derivative()\n if type(ad) == str:\n raise ValueError(\"Cannot integrate 1/x.\")\n else:\n return ad(b) - ad(a)", "def integrate(f, inf_lim, sup_lim):\n function = get_function_from_text(f)\n return sp_integrate.quad(function, inf_lim, sup_lim)[0]", "def integrate(f,dx=1.0):\n return (dx*sum(f)-((f[0]+f[-1])/2)) # trapazoidal just adds a half the sum of two numbers, and runs through so it adds two halves of each number except the fist and last. ", "def sp_integrate_1D ( func , xmin , xmax , *args , **kwargs ) : \n from scipy import integrate\n ##\n result = integrate.quad ( func , xmin , xmax , *args , **kwargs )\n return result[0]", "def integral(f, h=0.001):\r\n return lambda a,b: \\\r\n h * sum(f(a+i*h) \\\r\n\t\t\tfor i in range(0, int((b-a)/h )))", "def scintegral(f,s,a,b):\n\tx = sc.linspace(0,10,1000)\n\ti_trapez = integ.trapz(f(x), x)\n\ti_simpson = integ.simps(f(x), x)\n\ti_quad = integ.quad(f, 0, 10)\n\ti_quad2 = i_quad[0]\n\t\n\tprint \"integ.trapz() bei \" + str(s) + \" Stützpunkten: \" + str(i_trapez)\n\tprint \"integ.simps() bei \" + str(s) + \" Stützpunkten: \" + str(i_simpson)\n\tprint \"integ.quad() bei \" + str(s) + \" Stützpunkten: \" + str(i_quad2)\n\t\n\tintegral_pre = [i_quad2, i_trapez, i_simpson]\n\treturn integral_pre" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List or create friendrequests. Create an unconfirmed friendship between two users. Or return all friendships which are not confirmed for the current user.
def create_friend_request(): if request.method == "GET": friend_requests = [f.to_dict() for f in g.user.get_friend_requests()] return jsonify({'success': True, 'friend_requests': friend_requests}) if request.method == "POST": # Get recieving user id from request json = request.get_json() if json is None: raise CustomError(400, message="No JSON included or Content-Type" "is not application/json") if 'recieving_user_id' not in json: raise CustomError(400, message="Must include recieving_user_id") recieving_user_id = json['recieving_user_id'] # Get the user object recieving_user = User.query.get(recieving_user_id) if recieving_user is None: raise CustomError( 404, message='User with id: {} was not found.'.format( recieving_user_id) ) # Check friendship does not already exist friendship_exists = Friendship.query.filter( (Friendship.actioning_user_id == g.user.id) | (Friendship.recieving_user_id == g.user.id), (Friendship.actioning_user_id == recieving_user_id) | (Friendship.recieving_user_id == recieving_user_id) ).first() if friendship_exists: raise CustomError( 409, message="There is either a pending friend request between the" "two users or the two users are already friends." ) # Insert friend request friend_request = Friendship(g.user, recieving_user) db.session.add(friend_request) db.session.commit() return jsonify({'success': True}), 201
[ "def get_friend_requests(self, user):\n return self.filter(addresser_user=user, status=Friendship.STATUS_PENDING, active=True)", "def add_friend(self, from_user, to_user, message=None):\n if from_user == to_user:\n raise ValidationError(\"Users cannot be friends with themselves\")\n\n if self.are_friends(from_user, to_user):\n raise IntegrityError(\"Users are already friends\")\n\n if message is None:\n message = ''\n\n request, created = FriendshipRequest.objects.get_or_create(\n from_user=from_user,\n to_user=to_user,\n )\n\n if created is False:\n raise IntegrityError(\"Friendship already requested\")\n\n if message:\n request.message = message\n request.save()\n return request", "def list(self, request):\n\n user_profile = get_object_or_404(UserProfile, user=request.user)\n #   Get all sent accepted invitations\n sent = user_profile.creator_friendships.filter(status=1)\n # Get all received accepted invitations\n received = user_profile.invited_friendships.filter(status=1)\n #   Combine results to get all friends:\n friends = []\n for friendship in sent:\n friends.append(UserProfileSerializer(friendship.user_2).data)\n for friendship in received:\n friends.append(UserProfileSerializer(friendship.user_1).data)\n return Response(friends, status=rest_status.HTTP_200_OK)", "def friendship_request_list(request, template_name='/friend/requests_list.html'):\n # friendship_requests = Friend.objects.requests(request.user)\n friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True)\n\n return render(request, template_name, {'requests': friendship_requests})", "def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')", "def accept(self):\n relation1 = Friend.objects.create(\n from_user=self.from_user,\n to_user=self.to_user\n )\n\n relation2 = Friend.objects.create(\n from_user=self.to_user,\n to_user=self.from_user\n )\n\n friendship_request_accepted.send(\n sender=self,\n from_user=self.from_user,\n to_user=self.to_user\n )\n\n self.delete()\n\n # Delete any reverse requests\n FriendshipRequest.objects.filter(\n from_user=self.to_user,\n to_user=self.from_user\n ).delete()\n\n\n\n return True", "def create(self, request):\n friend_obj = Friend.objects.add_friend(\n request.user, # The sender\n get_object_or_404(User, pk=request.data['user_id']), # The recipient\n message=request.data.get('message', '')\n )\n\n return Response(\n FriendshipRequestSerializer(friend_obj).data,\n status.HTTP_201_CREATED\n )", "def pending_invitations(self, request):\n\n user_profile = get_object_or_404(UserProfile, user=request.user)\n #   Get all sent pending invitation\n sent = user_profile.creator_friendships.filter(status=0)\n # Get all received pending invitation\n received = user_profile.invited_friendships.filter(status=0)\n #   Serialize all and create a dict from it\n data = {\"sent\": [], \"received\": []}\n for friendship in sent:\n data[\"sent\"].append(FriendShipSerializer(friendship).data)\n for friendship in received:\n data[\"received\"].append(FriendShipSerializer(friendship).data)\n #   Return response with these 2 informations\n return Response(data, status=rest_status.HTTP_200_OK)", "def friendship_request_list(request):\n # friendship_requests = Friend.objects.requests(request.user)\n friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True)\n #print(friendship_requests)\n return render(request, 'friends/requests_list.html', {'requests': friendship_requests})", "async def send_friend_request(self):\n\n logging.debug(\"Sending friend request to \" + self.username)\n\n if self.is_friend:\n raise ObjectErrors.AlreadyFriends(\n \"You are already friends with \" + self.display_name)\n\n await self.client.request.post(\n \"/user/%s/friendRequest\" % self.id)", "def get_friends(self):\n f1 = User.objects.filter(has_flagged__receiver=self.user,\n has_flagged__confirmed__isnull=False,\n has_flagged__flag_type=1)\n f2 = User.objects.filter(was_flagged__sender=self.user,\n was_flagged__confirmed__isnull=False,\n was_flagged__flag_type=1)\n return (list(f1.prefetch_related('profile')) +\n list(f2.prefetch_related('profile')))", "def add_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Send friend request\n if not mock_db.add_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when adding friend!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def accept_fr():\n\n if len(request.args) < 2:\n redirect(URL(\"user\", \"friend_requests\"))\n\n friend_id = int(request.args[0])\n row_id = int(request.args[1])\n user_id = session.user_id\n\n # Add friend ID to user's friends list\n add_friend(user_id, friend_id)\n\n # Add user ID to friend's friends list\n add_friend(friend_id, user_id)\n\n # Delete the friend request row\n db(db.friend_requests.id == row_id).delete()\n\n redirect(URL(\"user\", \"friend_requests\"))\n return dict()", "def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)", "def find_friends(request):\n find_list = []\n sent_requests = set()\n rec_requests = set()\n sent_f_requests = FriendRequest.objects.filter(\n from_user=request.user\n )\n rec_f_requests = FriendRequest.objects.filter(\n to_user=request.user\n )\n\n me = request.user\n my_friends = me.profile.friends.all()\n my_family = me.relations.all()\n profiles = Profile.objects.exclude(\n user=request.user\n )\n for user in profiles:\n user_friends = user.friends.all()\n for friend in user_friends:\n if friend not in find_list and friend != me:\n if friend not in my_friends and friend not in my_family:\n find_list.append(friend)\n\n template = 'profiles/find_friends.html'\n context = {\n 'find_list': find_list,\n }\n return render(request, template, context)", "def friending(user, friend):\n user.update(add_to_set__friends=friend)\n friend.update(add_to_set__friends=user)", "def send_friend_request(self, request, pk=None):\n\n try:\n other_player = models.Player.objects.get(pk=pk)\n except models.Player.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n player = request.user.player\n\n # Return 403 if users are already friends\n if player.friends.filter(id=other_player.id).count() == 1:\n return Response(status=status.HTTP_403_FORBIDDEN)\n\n # If other_player has already sent a friend request to player, add as friends immediately\n existing_friend_request_queryset = models.Request.objects.filter(request_from__id=other_player.id, request_to__id=player.id)\n existing_friend_request = existing_friend_request_queryset.first()\n if existing_friend_request:\n existing_friend_request.delete()\n player.friends.add(other_player.id)\n return Response(status=status.HTTP_200_OK)\n\n models.Request(request_from=player, request_to=other_player).save()\n\n return Response(status=status.HTTP_200_OK)", "def get_friend_request_with_id(id):\n # Get friend request\n friendship = Friendship.query.get(id)\n if friendship is None:\n raise CustomError(\n 404,\n message=\"Friendship with id: {} not found.\".format(id)\n )\n can_view = friendship.actioning_user_id == g.user.id or \\\n friendship.recieving_user_id == g.user.id\n # Check user is has permission to view that request\n if not can_view:\n raise CustomError(\n 401,\n message=\"You are not authorised to view this resource.\"\n )\n\n if request.method == \"GET\":\n return jsonify({'success': True, 'friendship': friendship.to_dict()})\n\n if request.method == \"PATCH\":\n if friendship.recieving_user_id != g.user.id:\n raise CustomError(\n 401,\n message=\"You are not authorised to update this object.\"\n )\n\n json = request.get_json()\n if json is None:\n raise CustomError(400, message=\"No JSON included or Content-Type\"\n \"is not application/json\")\n if 'confirmed' in json:\n friendship.confirmed = json['confirmed']\n\n db.session.commit()\n return jsonify({'success': True, 'friendship': friendship.to_dict()})\n\n if request.method == \"DELETE\":\n db.session.delete(friendship)\n db.session.commit()\n return jsonify({'success': True})", "def accept(self):\n receiver_friend_list = FriendList.objects.filter(user_id=self.receiver_id)\n sender_friend_list = FriendList.objects.filter(user_id=self.sender_id)\n if(receiver_friend_list.exists()):\n receiver_friend_list = receiver_friend_list[0]\n else:\n receiver_friend_list = FriendList.objects.create(user_id=self.receiver_id)\n\n if(sender_friend_list.exists()):\n sender_friend_list = sender_friend_list[0]\n else:\n sender_friend_list = FriendList.objects.create(user_id=self.sender_id)\n\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender_id)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver_id)\n self.is_active = False\n self.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get, update or delete friendship with the specified id.
def get_friend_request_with_id(id): # Get friend request friendship = Friendship.query.get(id) if friendship is None: raise CustomError( 404, message="Friendship with id: {} not found.".format(id) ) can_view = friendship.actioning_user_id == g.user.id or \ friendship.recieving_user_id == g.user.id # Check user is has permission to view that request if not can_view: raise CustomError( 401, message="You are not authorised to view this resource." ) if request.method == "GET": return jsonify({'success': True, 'friendship': friendship.to_dict()}) if request.method == "PATCH": if friendship.recieving_user_id != g.user.id: raise CustomError( 401, message="You are not authorised to update this object." ) json = request.get_json() if json is None: raise CustomError(400, message="No JSON included or Content-Type" "is not application/json") if 'confirmed' in json: friendship.confirmed = json['confirmed'] db.session.commit() return jsonify({'success': True, 'friendship': friendship.to_dict()}) if request.method == "DELETE": db.session.delete(friendship) db.session.commit() return jsonify({'success': True})
[ "def fully_update_friend(id: str):\r\n try:\r\n request_payload = api_helpers.json_payload(request)\r\n api_helpers.verify_required_data_present(\r\n request_payload, FRIEND_RESOURCE_ELEMENTS)\r\n except ValueError as error:\r\n error_response = make_response(jsonify({\"error\": str(error)}), 400)\r\n return error_response\r\n\r\n existing_friend = datastore.get_friend(g.datastore, id)\r\n if existing_friend:\r\n datastore.fully_update_friend(g.datastore, request_payload)\r\n response = make_response(\r\n jsonify({\"message\": \"Friend resource updated.\"}), 201)\r\n return response\r\n\r\n error_response = make_response(\r\n jsonify(\r\n {\"error\": \"No friend resource exists that matches \"\r\n \"the given id: {}\".format(id)}),\r\n 404)\r\n return error_response", "def get_friend(id: str):\r\n\r\n try:\r\n return jsonify(\r\n datastore.get_friend(g.datastore, id))\r\n except TypeError:\r\n error_response = make_response(\r\n jsonify({\"error\": \"No such friend exists.\"}), 404)\r\n return error_response", "def specific_friend(id: str):\n for friend in datastore.friends:\n if friend['id'].lower() == id.lower():\n return jsonify(friend)\n\n error_response = make_response(\n jsonify(\n {\"error\": \"No friend found with the specified identifier. \"\n \"BFP is a Big Fat Panda Loser!\"}), 404)\n\n return error_response", "def destroy_friend(id: str):\r\n try:\r\n datastore.delete_friend(g.datastore, id)\r\n except ValueError:\r\n error_response = make_response(\r\n jsonify({\"error\": \"No such friend exists.\"}), 404)\r\n return error_response\r\n\r\n return jsonify({\"message\": \"Friend resource removed.\"})", "def update_ship(id):\n data = request.get_json()\n print(data)\n for ship in db['ships']:\n if ship['id'] == id:\n if data['name']:\n ship['name'] == data['name']\n if data['age']:\n ship['age'] == data['age']\n return ship, status.HTTP_202_ACCEPTED\n return {}, status.HTTP_404_NOT_FOUND", "def delete_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request has been removed.'\n )\n return redirect('profiles:my_requests')", "def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')", "def get_ship(self, ship_id):\n return self._ships[ship_id]", "async def delete(self) -> None:\r\n await self.client.delete_friend_request(self)", "def load_friends(user_id):\n if s.is_protected(user_id) or \\\n s.has_friends(user_id): # loaded before\n return\n try:\n friends = sorted(list(t.friends_ids(user_id)))\n s.set_friends(user_id, friends)\n except TweepError, e:\n if 'Not authorized' in str(e):\n s.mark_protected(user_id)", "def add_friend(self, friend_id):\n if Relationship.objects.filter(from_user_id=self.pk, to_user_id=friend_id).exists():\n return False\n else:\n friend = RegisteredUser.objects.filter(id=friend_id)\n if friend.count() == 1:\n friend = friend[0]\n Relationship.objects.create(from_user=self, to_user=friend, balance=0.0)\n return True\n else:\n return False", "def delete_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._DELETE(path))", "def scrap_ship(self, ship_id):\n r = requests.delete(self.base_url + f'/users/{self.username}/ships/{ship_id}', headers=self.auth_header)\n return r.text", "def get_friends(self, user_id):\n return self._scrape_html_friends(user_id)", "def friendship(self) -> dict[str, Any]:\r\n response: dict[Any, Any] = self._request_builder.get(\r\n url=f\"{BASE_PATH['profile_uri']}{API_PATH['friends_summary'].format(account_id=self.account_id)}\"\r\n ).json()\r\n return response", "def delete_by_id(id: int) -> WishlistItem:\n\n wishlist_item = WishlistItemService.get_by_id(id)\n if not wishlist_item:\n return None\n db.session.delete(wishlist_item)\n db.session.commit()\n return wishlist_item", "def get_relationship(self, id: str) -> Optional[Relationship]:\n return self._relationships_by_id.get(id)", "def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')", "def send_request(request, id):\n user = get_object_or_404(User, id=id)\n f_request, created = FriendRequest.objects.get_or_create(\n from_user=request.user,\n to_user=user\n )\n if created:\n messages.success(\n request,\n f'Your friend request to {user} has been sent.'\n )\n\n return redirect('/profiles/%s/' % user.profile.slug)\n messages.info(\n request,\n f'You have already sent a friend request to {user}'\n )\n return redirect('/profiles/%s/' % user.profile.slug)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
outLookSender is not utilized in this module but wrote the function in case we want to send from an outlook account in the future
def outLookSender(receiverAddress, receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=False): subj = f'Engineers from {retainedCompany} Search' if returnHTML: [text, html] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML) else: [text] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML) outlook = app('Microsoft Outlook') msg = outlook.make( new=k.outgoing_message, with_properties={ k.subject: subj, k.plain_text_content: text } ) msg.make( new=k.recipient, with_properties={ k.email_address: { k.name: receiverName, k.address: receiverAddress } } ) msg.send()
[ "def _send_outlook(self):\n mail = self._get_outlook_mail()\n mail.send", "def send_email(to, subject, body, from_name):\r\n\r\n FOOTER = (\"<font color = '#D9D9D9' face = 'Segoe UI'>---------------------------------</font><br/>\" +\r\n \"<span style='font-size: 12px; font-family: Segoe UI; color: #7F7F7F;'>\" +\r\n \"This e-mail was automatically generated. To unsubscribe, get the \" +\r\n \"source, report bugs or request features, contact \" + from_name + \" </span>\")\r\n \r\n email = outlook_process.CreateItem(0)\r\n email.To = to\r\n email.Subject = subject\r\n email.HTMLBody = body + FOOTER\r\n email.Display()", "def test_request_sms_recipient_export(self):\n pass", "def test_send_smtp_envelope(self):\n pass", "def non_html_email_sender(request, recipients,subject,body,sender=None,extra_headers=None):\n mailer = get_mailer(request)\n if not sender:\n sender = 'no-reply@nairabricks.com'\n message = Message(subject=subject,\n sender=sender,\n recipients=[recipients],\n body=body,extra_headers=extra_headers)\n mailer.send(message)", "def _get_outlook_mail(self):\n import win32com.client as win32\n if sys.platform == 'win32':\n outlook = win32.Dispatch('outlook.application')\n mail = outlook.CreateItem(0)\n mail.To = \";\".join(self.mail_to)\n mail.CC = \";\".join(self.mail_cc)\n mail.BCC = \";\".join(self.mail_bcc)\n mail.Subject = self.subject\n if self.body_type == EmailHelper.BODY_TYPE_HTML:\n mail.BodyFormat = 3\n mail.HTMLBody = self.body\n elif self.body_type == EmailHelper.BODY_TYPE_TXT:\n mail.Body = self.body\n else:\n raise BodyTypeError('Unknown body type: {0}'.format(self.body_type))\n\n for attachment in self.attachments:\n if not os.path.isfile(attachment):\n raise AttachmentError('Attachment not found {0}'.format(attachment))\n else:\n mail.Attachments.Add(attachment)\n\n return mail\n else:\n raise Exception('Cannot create an outlook message if not win32.')", "def sendRedemptionEmail():\n return", "def test_send_sender_info(self):\n result = self.api.send(self.EMAIL_ID, self.recipient, email_data=self.email_data, sender=self.sender)\n self.assertSuccess(result)", "def test_get_imap_smtp_access(self):\n pass", "def __init__(self):\n self.outlook = win32.Dispatch('outlook.application')\n locale.setlocale(locale.LC_ALL, '')", "def replyMessage(_email, _name):\n\n _mailer = app.config['MAIL_USERNAME']\n mesg = Message(\"Message Received\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[_email])\n mesg.body = f'''Hello {_name},\nThe message you sent to Randy has been received. \nRandy will contact you within 24 hours.\nThank you.\n\nRegards,\nRandy\n\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(mesg)\n return 'OK'", "def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr", "def test_send_to_self(self):\r\n # Now we know we have pulled up the instructor dash's email view\r\n # (in the setUp method), we can test sending an email.\r\n test_email = {\r\n 'action': 'send',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # Check that outbox is as expected\r\n self.assertEqual(len(mail.outbox), 1)\r\n self.assertEqual(len(mail.outbox[0].to), 1)\r\n self.assertEquals(mail.outbox[0].to[0], self.instructor.email)\r\n self.assertEquals(\r\n mail.outbox[0].subject,\r\n '[' + self.course.display_name + ']' + ' test subject for myself'\r\n )", "def outlook(self):\n if \"outlook\" in self._prop_dict:\n if isinstance(self._prop_dict[\"outlook\"], OneDriveObjectBase):\n return self._prop_dict[\"outlook\"]\n else :\n self._prop_dict[\"outlook\"] = OutlookUser(self._prop_dict[\"outlook\"])\n return self._prop_dict[\"outlook\"]\n\n return None", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <test_using_invite_use_host_in_from_email@example.com>\")", "def get_email_address_outsending(name_server):# {{{\n name_server = name_server.lower()\n if name_server == \"subcons\":\n return \"no-reply.SubCons@bioinfo.se\"\n elif name_server == \"topcons2\":\n return \"no-reply.TOPCONS@topcons.net\"\n elif name_server == \"scampi2\":\n return \"no-reply.SCAMPI@bioinfo.se\"\n elif name_server == \"boctopus2\":\n return \"no-reply.BOCTOPUS@bioinfo.se\"\n elif name_server == \"proq3\":\n return \"no-reply.PROQ3@bioinfo.se\"\n elif name_server == \"prodres\":\n return \"no-reply.PRODRES@bioinfo.se\"\n elif name_server == \"pconsc3\":\n return \"no-reply.PCONSC3@bioinfo.se\"\n elif name_server == \"predzinc\":\n return \"no-reply.predzinc@bioshu.se\"", "def test_get_inbox_sent_emails(self):\n pass", "def sender(self, account, password):\n self.gmail_sender = account \n self.gmail_password = password", "def sendBonusEmails():\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
emailJobs is a function that is used to email jobs/careers email addresses for companies in a dataframe
def emailJobs( df, retainedCompany, senderName, defaultSenderEmail, emailPassword, senderTitle, senderCompany, senderCompanyHomePage, senderPhone, noContactCompanyListPickleFileName, port=465, returnHTML=True ): try: with open(noContactCompanyListPickleFileName, 'rb') as inputFile: noContactCompanyList = pickle.load(inputFile) except: noContactCompanyList = [] for i in range(len(df)): companyName = df['Organization Name'][i] if companyName.lower() in noContactCompanyList: pass try: domainName = df['Domain'][i] jobsEmails = [prefix + '@' + domainName for prefix in ['jobs', 'careers']] # email all the jobs pages for that copmany sendEmails( 'guys', # addressing general company, so use 'guys' instead of individual name retainedCompany, companyName, jobsEmails, senderName, defaultSenderEmail, emailPassword, senderTitle, senderCompany, senderCompanyHomePage, senderPhone, port=port, returnHTML = returnHTML ) except: pass
[ "def send_email(jobs):\n jobs = jobs\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login(EMAIL, PASS)\n\n subject = f\"Job Scraper Results\"\n\n if jobs != \"Not working\":\n body = []\n job_ids = [\n jobs[x] for x in sorted(jobs.keys(), key=lambda x: jobs[x][0], reverse=True)\n ][:25]\n for jobID in job_ids:\n score, link, title, company, date_posted, location, full_text = jobID\n body.append(\n f\"({score}) {title} at {company} in {location} posted \\\n {date_posted[5:11]}\\n{link}\\n... {full_text[100:500]} ...\"\n )\n if len(body) == 0:\n body = body + (\"\\nNo results.\")\n body = \"\\n\\n\\n\".join(body)\n body = body.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n msg = f\"Subject: {subject}\\n\\n{body}\"\n else:\n msg = f\"Subject: {subject} - {jobs}\\n\\n{jobs}\"\n\n msg = f\"From: {EMAIL}\\r\\nTo: {EMAIL}\\r\\n\" + msg\n\n server.sendmail(EMAIL, EMAIL, msg)\n\n timezone_ny = pytz.timezone(\"America/NEW_York\")\n datetime_ny = datetime.now(timezone_ny)\n print(f\"E-mail was sent at {datetime_ny.strftime('%H:%M')}.\\n\\n\")\n\n server.quit()", "def notify_job_by_email(info):\n\n # build params\n params = {}\n params[\"id\"] = info[\"job_id\"]\n params[\"rule_name\"] = info[\"rule\"][\"rule_name\"]\n params[\"username\"] = info[\"rule\"][\"username\"]\n kwargs = json.loads(info[\"rule\"][\"kwargs\"])\n params[\"emails\"] = kwargs[\"email_addresses\"]\n rule_hit = info[\"rule_hit\"]\n params[\"url\"] = rule_hit[\"_source\"][\"job\"][\"job_info\"][\"job_url\"]\n job = {\n \"type\": \"notify_job_by_email\",\n \"name\": \"action-notify_job_by_email-%s\" % info[\"job_id\"],\n \"tag\": params[\"rule_name\"],\n \"username\": params[\"username\"],\n \"params\": params,\n \"localize_urls\": [],\n }\n\n return job", "def sync_email_exception_job():\n logger.info('Start email exception job')\n eoh = EmailOperationHelper(**settings.MAIL_KWARGS)\n sys_conf = SystemSettingsModel.get_settings()\n if not sys_conf.get(\"use_email_alert\"):\n logger.info('End email exception job, the email alert is closed')\n return\n default_recipients = sys_conf.get(\"default_recipients\")\n for node in NodesModel.get_list():\n recipients = node.recipients or default_recipients\n if not recipients:\n continue\n recipients = recipients.split(\";\")\n subject = \"Node({}-{}) Exception of SpiderPlatform\".format(\n node.node_type, node.host_port)\n models = NodesExceptionsModel.get_limit(\n is_closed=False, is_emailed=False, node_md5=node.vc_md5)\n if not models:\n continue\n content = \"\\r\\n\\r\\n\".join([m.get_email_content() for m in models])\n eoh.send_mail(subject, content, recipients=recipients)\n for model in models:\n NodesExceptionsModel.update_one(model=model, is_emailed=True)\n pass\n for project in ProjectsModel.get_list():\n recipients = project.recipients or default_recipients\n if not recipients:\n continue\n recipients = recipients.split(\";\")\n subject = \"Project({}) Exception of SpiderPlatform\".format(\n project.project_name)\n models = JobsExceptionsModel.get_limit(\n is_closed=False, is_emailed=False,\n project_name=project.project_name)\n if not models:\n continue\n content = \"\\r\\n\\r\\n\".join([m.get_email_content() for m in models])\n eoh.send_mail(subject, content, recipients=recipients)\n for model in models:\n JobsExceptionsModel.update_one(model=model, is_emailed=True)\n pass\n logger.info('End email exception job')\n pass", "def send_email_with_service(cls, email_scheduler_object):\n raise NotImplementedError", "def send_validation_emails(self, **_):\n email_config = config.getSettingJson(config.GENOMIC_DAILY_VALIDATION_EMAILS, {})\n\n if not email_config.get('send_emails'):\n return\n\n validation_incidents = self.incident_dao.get_new_ingestion_incidents()\n\n if not validation_incidents:\n logging.info('No records found for validation email notifications')\n return\n\n recipients, cc_recipients = email_config.get('recipients'), email_config.get('cc_recipients')\n\n for gc, recipient_list in recipients.items():\n gc_validation_emails_to_send = list(filter(lambda x: x.submitted_gc_site_id == gc, validation_incidents))\n\n if gc_validation_emails_to_send:\n for gc_validation_email in gc_validation_emails_to_send:\n validation_message = gc_validation_email.message.split(':', 1)[1]\n message = f\"{validation_message.strip()}\\n\\n\"\n message += f\"Full file path: gs://{gc_validation_email.filePath}\\n\\n\"\n message += \"Please correct this file and re-upload to designated bucket.\"\n\n email_message = Email(\n recipients=recipient_list,\n cc_recipients=cc_recipients,\n subject=\"All of Us GC/DRC Manifest Ingestion Failure\",\n plain_text_content=message\n )\n\n EmailService.send_email(email_message)\n\n self.incident_dao.batch_update_incident_fields([obj.id for obj in gc_validation_emails_to_send])", "def send_report_to_email(report, email): \n this = \"not implemented. should throw an error.\"", "def send_emails():\n\tprint('sending email...')\n\t\n\tcontacts = get_contacts()\n\tfsa_internship = get_fsa_internship()\n\n\tif not fsa_internship:\n\t\tsend_ack_email()\n\t\treturn\n\t\n\tfor name, email in contacts.items():\n\t\temail_text = MIMEMultipart()\n\n\t\tmessage = read_template().substitute(who=name, internship=fsa_internship, url=url)\n\n\t\ttoday = datetime.date.today()\n\t\tdate = today.strftime('%d/%m')\n\n\t\temail_text['From'] = ADDRESS\n\t\temail_text['To'] = email\n\t\temail_text['Subject'] = 'Chamada MPBA - {}'.format(date)\n\n\t\temail_text.attach(MIMEText(message, 'plain'))\n\n\t\temail_server.send_message(email_text)\n\n\t\tdel email_text", "def send_email_alerts(cfg, slots_resp_df, subscribers_df):\n slots_resp_df = slots_resp_df.melt(id_vars=slots_resp_df.columns.difference(eval(cfg['dose_cols_slots'])).to_list(),\n value_vars=eval(cfg['dose_cols_slots']), var_name=cfg['dose_col_main'],\n value_name=cfg['capacity_col_main'])\n\n slots_resp_df[cfg['dose_col_main']] = slots_resp_df[cfg['dose_col_main']].apply(lambda x: x[-1])\n slots_resp_df = slots_resp_df[slots_resp_df[cfg['capacity_col_main']] > 0]\n\n slots_resp_df_partial = slots_resp_df[eval(cfg['slots_resp_df_partial_cols']) +\n eval(cfg['subscribers_group_by_cols']) +\n eval(cfg['geo_cols'])]\n\n slots_resp_df_partial.loc[:, 'info'] = \"Pincode: \" + slots_resp_df_partial['pincode'].map(str) \\\n + \" | Center: \" + slots_resp_df_partial['name'].map(str) \\\n + \" | Vaccine: \" + slots_resp_df_partial['vaccine'].map(str)\\\n + \" | Dose: \" + slots_resp_df_partial['dose'].map(str)\\\n + \" | Capacity: \" + slots_resp_df_partial['capacity'].map(str)\\\n + \" | Date: \" + slots_resp_df_partial['date'].map(str)\\\n + \"\\n\\n\"\n\n slots_resp_df_aggr = slots_resp_df_partial[eval(cfg['subscribers_group_by_cols'])\n + eval(cfg['geo_cols'])\n + eval(cfg['email_message_info_col'])]\\\n .groupby(eval(cfg['subscribers_group_by_cols'])+eval(cfg['geo_cols'])).agg(lambda x: ' \\n'.join(set(x)))\n\n slots_resp_df_aggr = slots_resp_df_aggr.reset_index().astype(str)\n subscribers_df = subscribers_df.astype(str)\n slots_resp_final = slots_resp_df_aggr.merge(subscribers_df, on=eval(cfg['subscribers_group_by_cols'])+eval(cfg['geo_cols']))\n\n send_notification(cfg, slots_resp_final)\n\n pass", "def emailFromJob(job):\n job = wildcard + strReplace(job,\"*.-_+1234567890\",wildcard) + wildcard\n \n query = \"SELECT \" + job_tb_email + \" FROM \" + job_tb + \" WHERE LOWER(\" + job_tb_name + ') LIKE \"' + job.lower() + '\" LIMIT 1'\n return mqs(query)", "def sendBonusEmails():\n return", "def send_email(geocentric_coordinates_transformated_to_ITRF_final_list, data):\n pandas.read_json(json.dumps(geocentric_coordinates_transformated_to_ITRF_final_list)).to_excel(\n data_output + \"/\" + data['filename'] + \"_results.xlsx\")\n msg = Message('ITRF Transformations', sender=app.config['MAIL_USERNAME'], recipients=[data['email']])\n msg.body = make_email_message(data['itrf_begin'], data['epoch_begin'], data['itrf_final'], data['epoch_final'],\n data['velocity'], data['date'])\n with app.open_resource(data_output + \"/\" + data['filename'] + \"_results.xlsx\") as fp:\n file_name = data['filename'] + \"_results\"\n msg.attach(file_name + \".xlsx\", file_name + \"/xlsx\", fp.read())\n mail.send(msg)", "def send_email(db, server, rows):\r\n\r\n col_names = dict()\r\n for loc, key in enumerate(rows[0]):\r\n col_names[key] = loc\r\n\r\n for row in rows[1:]:\r\n \r\n confirmation = row[col_names[\"confirmation\"]]\r\n if int(confirmation): continue\r\n \r\n email = row[col_names[\"email\"]]\r\n prod_id = row[col_names[\"product_id\"]]\r\n row_id = row[col_names[\"id\"]]\r\n if not re.match(\"\\w+@\\w+\\.\\w+\", email, re.ASCII):\r\n continue\r\n try:\r\n server.sendmail(EMAIL_ADDR, email,\r\n \"thank you for your order of %s\" % (prod_id))\r\n changed = change_value(db, row_id, \"confirmation\", 1)\r\n \r\n except ValueError as e:\r\n print(\"Email error \", e)\r\n continue\r\n\r\n time.sleep(1)\r\n\r\n return", "def bulk_email(self, email_data):\n email_list = []\n for edata in email_data:\n if not isinstance(edata, EmailParameters):\n print(\"Invalid emails parameters\")\n continue\n try:\n email = edata.to\n cc_email = edata.cc\n bcc_email = edata.bcc\n subject = edata.subject\n data = edata.body\n template = edata.body[\"template\"]\n except Exception as e:\n print(\"Cannot send mail to {}\".format(e))\n continue\n if email is None:\n print(\"Email is empty!\")\n continue\n for em in email:\n if self.EMAIL_REGX.match(em) is None:\n print(\"Invalid email address!\")\n continue\n message = self._mail_render(data, template)\n email_list.append((email, subject, message, cc_email, bcc_email))\n if email_list:\n self.send_bulk(email_list)\n else:\n print(\"Cannot send mail to Email is empty!\")", "def add_email():\n print('\\nAdding Emails to Student Data.')\n # Confirm the required files are in place\n required_files = ['Student Data File', 'Student Data Headings File',\n 'Emails File']\n ad.confirm_files('Add Emails Data', required_files)\n # Load student data file\n student_data = ft.get_csv_fname_load('Student Data')\n # Load student headings file\n student_data_headings = ft.get_headings_fname_load(\n 'Student Data Headings File')\n # Load emails file\n email_data = ft.get_csv_fname_load('Email Data')\n # Create dataframe for student data\n student_data_df = pd.DataFrame(data = student_data,\n columns = student_data_headings)\n # Create dataframe for email data\n email_cols = ['StudentID', 'Email']\n email_data_df = pd.DataFrame(data = email_data, columns = email_cols)\n # Merge tables on StudentID\n updated_data_df = pd.merge(student_data_df, email_data_df, on='StudentID',\n how='left')\n # Check if wish to filter results based on course\n message = 'Do you wish to filter the results?'\n to_filter = ad.check_action(message)\n # Filter results based on course if necessary\n if to_filter:\n updated_data_df = filter_results(updated_data_df)\n # Save file to disk\n file_name = 'Student_Data_Emails_{}.csv'.format(ft.generate_time_string())\n updated_data_df.to_csv(file_name, index=False)\n print('\\nData has been saved to {}'.format(file_name))", "def find_jobs_from(website, job_title, location, limit, desired_characs, filename=\"results.xls\"):\n \n if website == 'Indeed':\n url, page_final = urls_indeed_pages(job_title, location, limit)\n\n jobs_list_final = {}\n n_page = 0\n num_listings_final = 0\n\n while n_page < page_final:\n start = limit * n_page\n\n url_page = str(url)+'&start='+str(start)\n print(\"Working on page: \",n_page,\" with URL: \", url_page)\n\n job_soup = load_indeed_jobs_div(url_page)\n jobs_list, num_listings = extract_job_information_indeed(job_soup, desired_characs, n_page)\n\n df2 = pd.DataFrame(jobs_list)\n print(df2.head())\n\n if n_page == 0:\n jobs_df = df2\n else:\n jobs_df = pd.concat([jobs_df, df2], ignore_index=True)\n\n print(jobs_df.head())\n num_listings_final += num_listings\n n_page += 1\n\n jobs_df.to_excel(filename)\n time.sleep(1500)\n #save_jobs_to_excel(jobs_df, filename)\n \n print('{} new job postings retrieved from {}. Stored in {}.'.format(num_listings_final, \n website, filename))", "def add_recipients(df, all_emails):\n user = df[\"sender\"].iloc[0] # ID of the user\n emails = all_emails[user]\n df[\"emails\"] = str(list(emails))\n df[\"emails\"] = df[\"emails\"].map(literal_eval)\n return df", "def send_email_to_all():\n # for a single name, create an email file for each of their donations\n x = [send_email(str(s), False) for s in donor_db]", "def execute_intercom_company_updates(df_company_url, df_users):\n \n \n #merge Intercom company url and app creator Mixpanel profiles\n df_users_intercom_merge = pd.merge(df_users, df_company_url, on='email', how='left')\n\n #remove app creators without company url\n df_users_intercom_merge = df_users_intercom_merge[~df_users_intercom_merge.company_url.isnull()]\n\n #keep app creators who currently do not have a company domain in Mixpanel. Not override existing company data in Mixpanel.\n df_users_intercom_merge = df_users_intercom_merge[df_users_intercom_merge.company_domain.isnull()]\n\n #extract company data from Clearbit and update Mixpanel app creator profiles\n company_urls_final = df_users_intercom_merge['company_url'].tolist()\n distinct_id_final = df_users_intercom_merge['distinct_id'].tolist()\n df_clearbit_enriched_company_url_push = get_clearbit_data(company_urls_final, distinct_id_final)\n \n return df_clearbit_enriched_company_url_push", "def send_html_email(new_jobs):\r\n msg = EmailMessage()\r\n msg['Subject'] = \"{} nouvelles offre(s) sur JobUp !\".format(len(new_jobs))\r\n msg['From'] = Address(EMAIL_FROM, GMAIL_ADDRESS)\r\n msg['To'] = Address(EMAIL_TO, GMAIL_ADDRESS)\r\n\r\n log.debug(\"Building email body\")\r\n email_plain = []\r\n email_html = [\"<html><head></head><body>\"]\r\n for job in new_jobs:\r\n job_plain = \"{} - {}:\\n\\n{}\\n{}\\n\\n\".format(job['job_title'],\r\n job['job_company'],\r\n job['job_desc'],\r\n job['job_url'])\r\n job_html = \"\"\"\\\r\n <p><b>{} - {}</b></p>\r\n <p>{}</p>\r\n <p><a href=\"{}\">Lien vers l'annonce</a></p>\r\n ********************<br/><br/>\r\n \"\"\".format(job['job_title'],\r\n job['job_company'],\r\n job['job_desc'],\r\n job['job_url'])\r\n email_plain.append(job_plain)\r\n email_html.append(job_html)\r\n email_plain = \"********************\\n\".join(email_plain)\r\n email_html = \"\".join(email_html)\r\n\r\n msg.set_content(email_plain)\r\n msg.add_alternative(email_html, subtype='html')\r\n\r\n try:\r\n log.debug(\"Initializing SMTP connection to GMail servers\")\r\n with smtplib.SMTP('smtp.gmail.com', 587, timeout=10) as server:\r\n log.debug(\"Connecting to GMail servers using TLS\")\r\n server.starttls()\r\n log.debug(\"Sending GMail credentials\")\r\n server.login(GMAIL_ADDRESS, GMAIL_PWD)\r\n log.debug(\"Sending email\")\r\n server.send_message(msg)\r\n log.info(\"HTML e-mail sent!\")\r\n except socket.timeout:\r\n log.error(ANSIColors.wrap(\"Reached timeout of 10s while connecting to GMail servers!\", ANSIColors.FAIL))\r\n except Exception as e:\r\n log.error(ANSIColors.wrap(\"Error: could not send email at this time:\\n\" + str(e), ANSIColors.FAIL))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display HTML icon of OS distribution.
def show_os_icon(self): if self.os == 0: return "<i class='devicon-debian-plain'></i>" elif self.os == 1: return "<i class='devicon-redhat-plain'></i>" else: return "?"
[ "def downloadicon_name(self):\n return 'platform_%s.gif' % \\\n re.sub(r'\\W', '_', self.context.getPlatform()).lower()", "def icon():\n\n return None", "def MimeTypeIcon():", "def icon(self):\r\n return Meta.server_icon(self.guild)", "def icon(self):\n return ICON", "def get_icon_title(self): # real signature unknown; restored from __doc__\n return \"\"", "def icon(self):\n\n # look for icon one level up from this hook's folder in \"icons\" folder\n return os.path.join(\n self.disk_location,\n \"icons\",\n \"publish.png\"\n )", "def icon_content(self) -> str:\n return pulumi.get(self, \"icon_content\")", "async def icon(self, ctx: lifesaver.Context):\n if not ctx.guild.icon:\n await ctx.send(\"This server doesn't have a custom icon.\")\n return\n\n await ctx.send(ctx.guild.icon.replace(format=\"png\"))", "def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''", "def get_icon_name(self):\n return self.desktop_entry.getIcon() if self.desktop_entry else ''", "def icon(self):\r\n icon_path = \":/plugins/pdok_services/icon.png\"\r\n icon = QtGui.QIcon(icon_path)\r\n return icon", "def ionfn(name):\n filename = os.path.abspath(os.path.join(_ICON_DIR, \"ionicons\", \"png\", \"512\", \"{}.png\".format(name)))\n if not os.path.exists(filename):\n raise FileNotFoundError(\"Icon file '{}' does not exist\".format(filename))\n return filename", "def data_target_icon(cls):\n return static(\"img/nautobot_logo.png\")", "def data_source_icon(cls):\n return static(\"img/nautobot_logo.png\")", "def get_icon() -> str:\n icon = FILES_DIR / \"icon.ico\"\n if icon.is_file():\n return str(icon)\n else:\n raise FileNotFoundError(f\"There is no icon file in: {icon}\")", "def icon(self) -> str | None:\n value = self.entity_description.icon\n if self.entity_description.key == \"weather\":\n value = self.state\n if value is None:\n value = \"sunny\"\n elif value == \"partlycloudy\":\n value = \"partly-cloudy\"\n value = f\"mdi:weather-{value}\"\n\n return value", "def get_action_icon(*args):\n return _ida_kernwin.get_action_icon(*args)", "def render_package_icon(self, column, cell, model, iter, data):\n path = model.get_path(iter)\n if path.get_depth() == 0:\n cell.props.visible = False\n else:\n cell.props.visible = True\n cell.props.icon_name = \"applications-other\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets "_total_posts" as amount of posts in the VK domain.
async def _set_total_posts_in_domain(self) -> None: logger.info('Getting total posts in "vk.com/%s"...', self.vk_domain) params = { "v": settings.VKAPI_VERSION, "access_token": settings.VKAPI_TOKEN, "count": 1, # Enough just to get total post in domain. "domain": self.vk_domain, } # Data fetching. response = await vk_asynchronous_request( self._url_wall_get, params, domain=self.vk_domain, ) self._total_posts_in_domain = response["response"]["count"] logger.info("Total posts in VK domain: %s", self._total_posts_in_domain)
[ "def get_post_count(self):\n self.post_count = 0\n for post in self.postlist.postlist:\n self.post_count += 1", "def get_post_count(self):\n self.post_count = 0\n for post in self.postlist:\n self.post_count += 1", "def posts_count(self):\n return self.obj.direct_posts_count + sum(n.posts_count for n in self.children)", "def update_count_total(self):\n self.count_total = self.entries.count()", "def posts_count(self):\n return Post.objects.filter(user__username = self.user.username).count()", "def count_posts(self, filter: dict) -> int:\n pass", "def num_posts(self):\n\n return FlicketTicket.query.filter_by(started_id=self.id).count() + FlicketPost.query.filter_by(\n user_id=self.id).count()", "def increase_posts_count(sender, instance, **kwargs):\n\n if kwargs.get('raw'):\n # do nothing, when loading data (fixtures)\n return\n\n if instance.poster is None:\n # An anonymous post is considered. No profile can be updated in\n # that case.\n return\n\n profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)\n increase_posts_count = False\n\n if instance.pk:\n try:\n old_instance = instance.__class__._default_manager.get(pk=instance.pk)\n except ObjectDoesNotExist: # pragma: no cover\n # This should never happen (except with django loaddata command)\n increase_posts_count = True\n old_instance = None\n if old_instance and old_instance.approved is False and instance.approved is True:\n increase_posts_count = True\n elif instance.approved:\n increase_posts_count = True\n\n if increase_posts_count:\n profile.posts_count = F('posts_count') + 1\n profile.save()", "def get_posts_num(group_id):\n return int(vk.api_method(\"wall.get\",\n owner_id=\"-\"+group_id)[\"response\"][\"count\"])", "def get_total_subject_posts(subject):\n\n total_posts = 0\n for ticket in subject.tickets.all():\n total_posts += ticket.posts.count()\n return total_posts", "def __update_total_count(self, emote_dict):\n for emote, count in emote_dict.items():\n if emote not in self.counts:\n self.counts[emote] = 0\n\n self.counts[emote] += count\n self.database.set_count(emote, self.counts[emote])", "def decrease_posts_count_after_post_unaproval(sender, instance, **kwargs):\n\n if kwargs.get('raw'):\n # do nothing, when loading data (fixtures)\n return\n\n if not instance.pk or not instance.poster:\n # Do not consider posts being created or posts of anonymous users\n return\n\n profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)\n\n try:\n old_instance = instance.__class__._default_manager.get(pk=instance.pk)\n except ObjectDoesNotExist: # pragma: no cover\n # This should never happen (except with django loaddata command)\n return\n\n if old_instance and old_instance.approved is True and instance.approved is False:\n profile.posts_count = F('posts_count') - 1\n profile.save()", "def total_count(self, total_count):\n self._total_count = total_count", "def set_total(self, value):\n self.set_field(\"total\", value)\n self.set_field(\"current_page\", 1)", "def count_posts(self, tags=None):\n return self._get(\"counts/posts.json\", {\"tags\": tags})", "def total_pages(self, total_pages):\n\n self._total_pages = total_pages", "def _update_count(self):\n self._count = len(self._items)", "def _set_all_page_num(self):\n res = get(self.url, headers=self.headers)\n post_num = re.findall(r'微博\\[(\\d+)\\]', res.text)[0]\n page_num = re.findall(r'\\/(\\d+)页', res.text)[0]\n self._current_page -= 1\n self._all_page_num = int(page_num)\n self._all_post_num = int(post_num)", "def _put_posts(self, posts, posting):\n parent = self._currentRule.parent()\n putPosts = []\n now = datetime.datetime.utcnow()\n for post in posts :\n urlHash = post.pop('urlHash')\n url = post.pop('url')\n feedEntry=post.pop('feedEntry')\n if feedEntry is not None:\n post['feedId']=feedEntry.id\n if isinstance(feedEntry, deal_util.DealFeedEntry):\n post['extra'] = feedEntry.deal_location_category\n if feedEntry.deal_location_category:\n logging.info(\"TopDeal: marked a top deal '%s' for a post of campaign '%s' of user %s\" % \n (feedEntry.deal_location_category,\n self._currentRule.name, \n self._currentRule.parent().keyNameStrip()))\n if url is not None :\n dataPost = SPost.get_by_key_name(SPost.keyName(urlHash), parent)\n if dataPost is not None:\n logging.error('Post already exist for user %s of rule %s'%(parent.keyNameStrip(),self._currentRule.name))\n logging.error('The post data %s'%str(dataPost))\n post.update(dict(key_name=SPost.keyName(urlHash),url=url,urlHash=urlHash))\n rule = self._currentRule\n if rule.randomize:\n timeWindow = rule.randomizeTimeWindow - 5\n if timeWindow < 0:\n timeWindow = 0\n delta = datetime.timedelta(minutes=random.randint(0, timeWindow))\n scheduleNext = now + delta\n else:\n scheduleNext = now\n post.update(dict(parent=parent,type=self.getPostType(),execution=posting,revision=posting.revision,state=camp_const.EXECUTION_STATE_INIT,campaign=rule,scheduleNext=scheduleNext))\n post_obj=SPost(**post)\n putPosts.append(post_obj)\n try :\n db.put(putPosts)\n self._posts = putPosts\n self._posting = posting\n except Exception, e :\n objList = []\n for obj in putPosts :\n objList.append(str(obj.key().name()))\n msg = \"%s! The put object list is: %s.\" % (e.message, \",\".join(objList)) \n logging.error(msg)\n raise e" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetches posts from VK domain asynchronously and put it into "posts" attribute.
async def fetch_posts(self) -> None: async def fetch_posts_for_offset(offset) -> list: logger.info( "(offset %i) Start fetching posts from vk.com/%s...", offset, self.vk_domain, ) # VK Script code for /execute method. vks_code = get_wall_post_template.substitute( { "domain": self.vk_domain, "offset": offset, "posts_per_portion": self._posts_per_portion, "execution_times": self._execution_times, } ) params = { "v": settings.VKAPI_VERSION, "access_token": settings.VKAPI_TOKEN, "code": vks_code, } url = self._url_execute # Posts fetching. resp_json = await vk_asynchronous_request( url, params, domain=self.vk_domain, offset=offset, ) logger.info( "(offset %i) End fetching posts from vk.com/%s...", offset, self.vk_domain, ) # Gathered posts handling. posts_from_vk = resp_json["response"]["items"] posts = posts_as_schemas(posts_from_vk) del posts_from_vk return posts # Checks and preparations. await self._set_total_posts_in_domain() if not self._total_posts_in_domain: return # Creating tasks for fetching. tasks = [] posts_per_task = self._posts_per_portion * self._execution_times offsets = list(range(0, self._total_posts_in_domain, posts_per_task)) for offset in offsets: tasks.append(asyncio.create_task(fetch_posts_for_offset(offset))) # Running tasks. logger.info("Start fetching posts from vk.com/%s...", self.vk_domain) results = await asyncio.gather(*tasks) logger.info("End fetching posts from vk.com/%s...", self.vk_domain) # Flatting results from many tasks into one list. self._posts = [post for result in results for post in result] # Final actions. if self.sort_by_likes: self._posts = list(sorted(self.posts, key=lambda p: p.likes, reverse=True)) if self.amount_to_fetch: self._posts = self._posts[: self.amount_to_fetch]
[ "async def request_posts(self):\n posts = await database_sync_to_async(self.get_posts)()\n for post in posts:\n await self.send_json(models.post_to_dict(post))", "def get_posts(self):\n print(\"\\nReading posts currently in database\")\n print(\"---------------------------------------\")\n \n res = requests.get(API_POST_URL)\n self.posts.extend(res.json())\n \n print(\"{} posts read from database successfully\".format(\n len(res.json()))\n )", "def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')", "def fetch_post_from_api(post_id: int, rank: int) -> AsyncResult:\n r = requests.get(f\"{settings.HACKERNEWS_API_URL}item/{post_id}.json\")\n r.raise_for_status()\n\n post_res = r.json()\n url = post_res.get(\"url\", f\"{settings.HACKERNEWS_URL}item?id={post_id}\")\n post = {\n \"id\": post_id,\n \"url\": url,\n \"subject\": post_res.get(\"title\"),\n \"rank\": rank,\n \"age\": timeago.format(\n datetime.fromtimestamp(post_res.get(\"time\")), datetime.now()\n ),\n \"score\": post_res.get(\"score\"),\n \"submitted_by\": post_res.get(\"by\"),\n \"num_of_comments\": len(post_res.get(\"kids\", [])),\n }\n\n persist_result = persist.delay(post)\n fetch_sentiment.delay(post)\n return persist_result", "def _retrieve_posts(self, blog_name) -> list:\n posts = []\n offset = 0\n limit = 50\n while True:\n payload = self.client.posts(blog_name, limit=limit, offset=offset, reblog_info=True)\n posts += payload['posts']\n if not payload['posts']:\n break\n offset += limit\n return posts", "async def _set_total_posts_in_domain(self) -> None:\n\n logger.info('Getting total posts in \"vk.com/%s\"...', self.vk_domain)\n\n params = {\n \"v\": settings.VKAPI_VERSION,\n \"access_token\": settings.VKAPI_TOKEN,\n \"count\": 1, # Enough just to get total post in domain.\n \"domain\": self.vk_domain,\n }\n\n # Data fetching.\n response = await vk_asynchronous_request(\n self._url_wall_get,\n params,\n domain=self.vk_domain,\n )\n\n self._total_posts_in_domain = response[\"response\"][\"count\"]\n logger.info(\"Total posts in VK domain: %s\", self._total_posts_in_domain)", "def fetch_from_api() -> None:\n APIFetcherTracker.activate()\n logger.info(\"Fetching from api\")\n try:\n post_ids = requests.get(f\"{settings.HACKERNEWS_API_URL}topstories.json\").json()\n except (\n requests.exceptions.HTTPError,\n requests.exceptions.ConnectionError,\n requests.exceptions.Timeout,\n ) as err:\n APIFetcherTracker.fail()\n raise err\n\n group_result = group(\n fetch_post_from_api.s(post_id, rank) for rank, post_id in enumerate(post_ids, 1)\n ).delay()\n\n max_retries = 10\n wait_time = 1\n with allow_join_result():\n while not all([task_result.ready() for task_result in group_result.get()]):\n if max_retries >= wait_time:\n logger.info(\n \"Waiting {wait_time} seconds for all persist tasks to finish\"\n )\n time.sleep(wait_time)\n wait_time += 1\n else:\n APIFetcherTracker.fail()\n raise Exception(\n \"Maximum retry exceeded while waiting all persist tasks to finish.\"\n \"Backing off.\"\n )\n\n logger.info(f\"Number of posts processed: {len(post_ids)}\")\n APIFetcherTracker.finish()\n Post.objects.exclude(id__in=post_ids).delete()", "def load_posts(self):\n self.execute(f\"\"\"\n SELECT post.id, post.thread_id, post.root_post_id, post.user_id, post.body, post.anonymous_post, post.created_at\n FROM post\n INNER JOIN thread ON post.thread_id = thread.id\n WHERE post.thread_id = '{self.id}'\n \"\"\")\n posts = self.fetchall()\n return [Post(post[0], post[1], post[2], post[3], post[4], post[5], post[6]) for post in posts]", "def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects", "def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list", "def retreive_n_posts(api, group_id: int, count: int) -> List[str]:\n posts = []\n for offset in range(1, count, VK_MAXIMUM_POSTS):\n response = api.wall.get(owner_id=-group_id,\n count=VK_MAXIMUM_POSTS,\n offset=offset)\n posts += [item[\"text\"] for item in response[\"items\"]]\n return posts", "def posts(self):\n if(len(self.getPosts()) <= 0):\n print \"No hi ha cap post\"\n return\n\n for post in self.getPosts().values():\n print post\n print", "def get_posts_data(**kwargs):\n task_instance = kwargs['task_instance']\n # get config passed from previous task\n config = task_instance.xcom_pull(task_ids='get_config', key='config')\n print(config)\n\n search_term = config.get('search query')\n max_results = config.get('max results')\n worksheet = config.get('output sheet')\n\n pages = 1 if ((max_results - 2) <= 0) else math.ceil((max_results - 2) / 4) + 1\n current_time = datetime.now()\n GOOGLE_SHEET_ID = Variable.get(\"GOOGLE_SHEET_ID\")\n\n reactions = ['like', 'love', 'wow', 'haha', 'support', 'anger', 'sorry']\n\n for post in get_posts(search_term, pages=pages, extra_info=True):\n output = [current_time.strftime(\"%Y-%m-%d %H:%M:%S\"), post['post_id'],\n post['user_id'], post['time'].strftime(\"%Y-%m-%d %H:%M:%S\"),\n post['post_text'], post['likes'], post['comments'], post['shares']]\n\n if 'reactions' in post:\n for reaction in reactions:\n value = post['reactions'][reaction] if (reaction in post['reactions']) else 0\n output.append(value)\n\n output = output + [post['post_url'], post['link'], post['shared_text']]\n\n google_sheet.append_to_sheet(GOOGLE_SHEET_ID, worksheet, output)", "def update_all_partner_posts(async=True):\n from partner_feeds.models import Partner\n\n partners = Partner.objects.all()\n for partner in partners:\n # find all the posts in the current partner feeds and update them\n if async:\n update_posts_for_feed.delay(partner)\n else:\n update_posts_for_feed(partner)", "def take_1000_posts():\n\n token = '64774e6764774e6764774e6781641d8a0e6647764774e67389e143fe98e964427a24577'\n version = 5.92\n domain = 'fit4life_official'\n\n # кол-во постов для отображения\n count = 100\n\n # \"offset\" смещение, необходимое для выборки определенного подмножества записей,\n # когда спарсили первые 100 постов прибавляем для начала отсчета 101-200 и т.д.\n offset = 0\n all_post = []\n\n # в цикле можно установить какое кол-во постов нужно\n while offset < 1000:\n response = requests.get('https://api.vk.com/method/wall.get',\n params={\n 'access_token': token,\n 'v': version,\n 'domain': domain,\n 'count': count,\n 'offset': offset\n })\n data = response.json()['response']['items']\n offset += 100\n\n # Расширяет список [], добавляя в конец все элементы списка \"data\"\n all_post.extend(data)\n print('{} posts received, OK!'.format(offset))\n time.sleep(0.5)\n return all_post", "def get_posts(self):\n return self.blog_posts.all()", "def get_posts():\n db = connect()\n c = db.cursor()\n c.execute(\"SELECT content, time FROM posts ORDER BY time DESC\")\n posts = c.fetchall()\n db.close()\n return posts", "async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]:\n # Reddit's JSON responses only provide 25 posts at most.\n if not 25 >= amount > 0:\n raise ValueError(\"Invalid amount of subreddit posts requested.\")\n\n # Renew the token if necessary.\n if not self.access_token or self.access_token.expires_at < datetime.utcnow():\n await self.get_access_token()\n\n url = f\"{self.OAUTH_URL}/{route}\"\n for _ in range(self.MAX_RETRIES):\n response = await self.bot.http_session.get(\n url=url,\n headers={**self.HEADERS, \"Authorization\": f\"bearer {self.access_token.token}\"},\n params=params\n )\n if response.status == 200 and response.content_type == 'application/json':\n # Got appropriate response - process and return.\n content = await response.json()\n posts = content[\"data\"][\"children\"]\n return posts[:amount]\n\n await asyncio.sleep(3)\n\n log.debug(f\"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}\")\n return list() # Failed to get appropriate response within allowed number of retries.", "def posts(conf, dlcs, *urls, **opts):\n\n posts = cached_posts(conf, dlcs, opts['keep_cache'])\n for post in posts['posts']:\n if urls and not post['href'] in urls:\n continue\n else:\n print output('posts', opts, post)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates posts as Pydantic schemas based on posts data given from VK API.
def posts_as_schemas(posts_from_vk: list[dict]) -> list[Post]: posts = [] for post_from_vk in posts_from_vk: try: post = Post( date=post_from_vk["date"], likes=post_from_vk["likes"]["count"], text=post_from_vk["text"], path=f"wall{post_from_vk['owner_id']}_" f"{post_from_vk['id']}", photos=[], videos=[], ) except KeyError as exc: logger.error("No key %s for post: %s", exc, post_from_vk) continue # Collect attachments (photos, videos etc.). if "attachments" in post_from_vk: attachments = post_from_vk["attachments"] for attachment in attachments: if attachment["type"] == "photo": try: photo = PostPhoto(url="") photo.url = attachment["photo"]["sizes"][-1]["url"] post.photos.append(photo) except KeyError as exc: logger.error("No key %s for photo: %s", exc, post_from_vk) elif attachment["type"] == "video": video = PostVideo(first_frame_url="") video_from_vk = attachment["video"] if "first_frame" in video_from_vk: video.first_frame_url = video_from_vk["first_frame"][-1]["url"] elif "image" in video_from_vk: video.first_frame_url = video_from_vk["image"][-1]["url"] else: logger.error("No video image found: %s", post) continue post.videos.append(video) posts.append(post) return posts
[ "def posts_post():\n data = request.json\n\n try:\n validate(data, post_schema)\n except ValidationError as error:\n data = {\"message\": error.message}\n return Response(json.dumps(data), 422, mimetype=\"application/json\")\n\n post = Post(title=data[\"title\"], body=data[\"body\"])\n session.add(post)\n session.commit()\n\n data = json.dumps(post.as_dictionary())\n headers = {\"Location\": url_for(\"post_get\", id=post.id)}\n\n return Response(data, 201, headers=headers, mimetype=\"application/json\")", "def prepare_post_data(self, response):\n\n # handle post array inconsistenices in other apis\n json_data = response.json()\n if \"posts\" in json_data:\n posts = json_data[\"posts\"]\n else:\n posts = json_data\n\n # marshal and slap on expected metadata\n for post in posts:\n post[\"source\"] = self.host\n post[\"author\"][\"host\"] = self.host\n\n return posts", "def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict", "def _put_posts(self, posts, posting):\n parent = self._currentRule.parent()\n putPosts = []\n now = datetime.datetime.utcnow()\n for post in posts :\n urlHash = post.pop('urlHash')\n url = post.pop('url')\n feedEntry=post.pop('feedEntry')\n if feedEntry is not None:\n post['feedId']=feedEntry.id\n if isinstance(feedEntry, deal_util.DealFeedEntry):\n post['extra'] = feedEntry.deal_location_category\n if feedEntry.deal_location_category:\n logging.info(\"TopDeal: marked a top deal '%s' for a post of campaign '%s' of user %s\" % \n (feedEntry.deal_location_category,\n self._currentRule.name, \n self._currentRule.parent().keyNameStrip()))\n if url is not None :\n dataPost = SPost.get_by_key_name(SPost.keyName(urlHash), parent)\n if dataPost is not None:\n logging.error('Post already exist for user %s of rule %s'%(parent.keyNameStrip(),self._currentRule.name))\n logging.error('The post data %s'%str(dataPost))\n post.update(dict(key_name=SPost.keyName(urlHash),url=url,urlHash=urlHash))\n rule = self._currentRule\n if rule.randomize:\n timeWindow = rule.randomizeTimeWindow - 5\n if timeWindow < 0:\n timeWindow = 0\n delta = datetime.timedelta(minutes=random.randint(0, timeWindow))\n scheduleNext = now + delta\n else:\n scheduleNext = now\n post.update(dict(parent=parent,type=self.getPostType(),execution=posting,revision=posting.revision,state=camp_const.EXECUTION_STATE_INIT,campaign=rule,scheduleNext=scheduleNext))\n post_obj=SPost(**post)\n putPosts.append(post_obj)\n try :\n db.put(putPosts)\n self._posts = putPosts\n self._posting = posting\n except Exception, e :\n objList = []\n for obj in putPosts :\n objList.append(str(obj.key().name()))\n msg = \"%s! The put object list is: %s.\" % (e.message, \",\".join(objList)) \n logging.error(msg)\n raise e", "def create_post(self, post_json):\n\n response = _create(\n post_json,\n self.base_url,\n self.session,\n resource_type = \"posts\"\n )\n\n \n\n return response", "def create_post():\n data = request.get_json()\n\n print(list(data.keys()))\n\n user_id = data['user_id']\n print(user_id)\n username = data['username']\n print(username)\n message = data['message']\n print(message)\n # creation_dt = data['creation_dt']\n # print(creation_dt)\n\n creation_dt = time.time()\n post_id = red.incr('postid')\n\n pipe = red.pipeline()\n pipe.hset(f'post:{post_id}', 'post_id', post_id)\n pipe.hset(f'post:{post_id}', 'user_id', user_id)\n pipe.hset(f'post:{post_id}', 'username', username)\n pipe.hset(f'post:{post_id}', 'message', message)\n pipe.hset(f'post:{post_id}', 'likes', 0)\n pipe.hset(f'post:{post_id}', 'comments', 0)\n pipe.hset(f'post:{post_id}', 'creation_dt', creation_dt)\n\n mapping = {post_id: creation_dt}\n pipe.zadd('postlist', mapping)\n pipe.zadd(f'userpostlist:{user_id}', mapping)\n pipe.zadd(f'upostlist:{user_id}', mapping)\n\n # TODO: update use profile\n # Update post count in user profile\n\n # TODO: updated post list for followers\n\n # followers = red.zrevrange(f\"follower:{user_id}\", 0, -1)\n # for follower in followers:\n # pipe.zadd(f'upostlist:{follower}', mapping)\n\n pipe.execute()\n\n result = {'post_id': post_id}\n\n response = {'message': 'Success', 'data': result}\n\n return jsonify(response), 201", "def get_post_model(self, data):\n blog_post = Post(\n title=data.get('title', data),\n article=data.get('article', data),\n heading=data.get('heading', data),\n slug=data.get('slug', data),\n date_modified=data.get('date_modified', data),\n date_created=data.get('date_created', data),\n status=data.get('status', data),\n image_URL=data.get('status', data),\n author=data.get('author', data)\n )\n\n return blog_post", "def create(self, validated_data):\n return Post.objects.create(**validated_data)", "def marshal_posts(shard, post_list):\n out = []\n for post in post_list:\n post_dict = dict(\n shardId=shard,\n archiveType=models.Post.ARCHIVE_REVERSE_MAPPING[post.archive_type],\n nickname=post.nickname,\n title=post.title,\n body=post.body,\n postTimeMs=models.datetime_to_stamp_ms(post.post_time),\n sequenceId=getattr(post, 'sequence', None),\n newTopicId=post.new_topic,\n postId=post.post_id)\n out.append(post_dict)\n return out", "def convert_posts_key(cls, posts: list[dict[str, Any]]):\r\n ret = []\r\n\r\n for post in posts:\r\n ret.append({\r\n cls.POSTS_SEQ_ID: post[UnitAnalysisPostKey.SEQ_ID],\r\n cls.POSTS_LANG: post[UnitAnalysisPostKey.LANG_CODE],\r\n cls.POSTS_TYPE: post[UnitAnalysisPostKey.TYPE],\r\n cls.POSTS_UNIT_NAME: post[UnitAnalysisPostKey.UNIT_NAME],\r\n cls.POSTS_LAST_MODIFIED: post[UnitAnalysisPostKey.DT_LAST_MODIFIED],\r\n cls.POSTS_PUBLISHED: post[UnitAnalysisPostKey.DT_PUBLISHED],\r\n cls.POSTS_VIEW_COUNT: post[UnitAnalysisPostKey.VIEW_COUNT],\r\n })\r\n\r\n return ret", "def transform_posts(self, *args):\n # Unpacking Args Tuple:\n posts_dict = args[0]\n \n # Remove for next version:\n \"\"\"\n # Querying existing posts from the database during current day:\n start_date = date.today() - timedelta(days=1) # Today's date range for API.\n end_date = start_date + timedelta(days=2)\n\n existing_posts_id = []\n try:\n existing_posts = self.query_con.get_subreddit_data(\n self.subreddit_name, \n start_date=start_date, \n end_date=end_date)\n \n existing_posts_id = existing_posts.index\n except:\n pass\n\n # Extracting unqiue keys from the posts_dict.keys() that are not present in the existing_post_id:\n unique_id_keys = list(set(posts_dict.keys()) - set(existing_posts_id))\n \n # Unpacking the \"Author\" parameter and extending Author derived params to the end of the content\n # list for each dict key-value pair that is unique (not in the database):\n\n # Creating the list of unique post dicts to be passed to the load function:\n unique_posts = [\n\n # Unpacking list for faster appending:\n self._transform_post_content_lst(post_id, content_lst) for post_id, content_lst \n in posts_dict.items() if post_id in unique_id_keys\n \n ]\n \"\"\"\n\n # Transforming post data external of unique post ID filtering:\n unique_posts = [\n self._transform_post_content_lst(post_id, content_lst) for post_id, content_lst in\n posts_dict.items()]\n\n self.logger.info(f\"Raw Post Data Transformed. Formatted posts ({len(unique_posts)}) data being passed to Loading method\", \"reddit\", \"pipeline\", 200)\n yield unique_posts", "def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')", "async def request_posts(self):\n posts = await database_sync_to_async(self.get_posts)()\n for post in posts:\n await self.send_json(models.post_to_dict(post))", "def post(self):\n data = request.json\n return create_new_blog(data=data)", "def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list", "def post_schema():\n return StructType([\n StructField(\"Accept\", StringType(), True),\n StructField(\"Host\", StringType(), True),\n StructField(\"User-Agent\", StringType(), True),\n StructField(\"event_type\", StringType(), True),\n StructField(\"Content-Length\", StringType(), True),\n StructField(\"Content-Type\", StringType(), True),\n StructField(\"attributes\", StringType(), True)\n ])", "def save_posts(self, posts):\n return self.collection.insert_many(map(lambda post: post.serialize(), posts))", "def fill_post_dict(posts):\n\n post_dict = {}\n\n # Fills the post_dict with amount of likes, title and path\n for post in posts:\n likes = len(db.execute(\"SELECT post_id FROM likes WHERE post_id=:post_id\", post_id=post[\"id\"]))\n titles = db.execute(\"SELECT title FROM uploads WHERE id=:id\", id=post[\"id\"])\n title = titles[0][\"title\"]\n post_dict[post[\"id\"]] = (post[\"path\"], likes, title)\n\n return post_dict", "def post(self):\n data = request.get_json()\n return business.create_blog(data['title'], data['context'], data['user_id'], data['tags'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds an HParam object with default hyperparameters.
def default_hparams(): raise NotImplementedError('Not implemented')
[ "def get_default_hparams():\n return HParams(\n train_epochs=5,\n do_fine_tuning=False,\n batch_size=32,\n learning_rate=0.005,\n momentum=0.9,\n dropout_rate=0.2,\n l1_regularizer=0.0,\n l2_regularizer=0.0001,\n label_smoothing=0.1,\n validation_split=0.2,\n do_data_augmentation=False,\n rotation_range=40,\n horizontal_flip=True,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2)", "def default_hparams():\n hparams = tf.contrib.training.HParams()\n component_hparams = SparseConvAutoencoderMaxPoolComponent.default_hparams()\n stack_hparams = ['num_layers', 'batch_size', 'sum_norm']\n\n for key, value in component_hparams.values().items():\n if key not in stack_hparams:\n hparams.add_hparam(key, [value])\n\n hparams.add_hparam('num_layers', 1)\n hparams.add_hparam('batch_size', component_hparams.batch_size)\n hparams.add_hparam('sum_norm', [-1])\n hparams.add_hparam('output_features', 'output')\n\n return hparams", "def get_default_hparams():\n hparams_map = base_model.get_default_hparams().values()\n hparams_map.update({\n 'conditional': True,\n 'dec_rnn_size': [512], # Decoder RNN: number of units per layer.\n 'dec_rnn_attn_len': 0, # Decoder RNN: length of attention vector.\n 'enc_rnn_size': [256], # Encoder RNN: number of units per layer per dir.\n 'dropout_keep_prob': 1.0, # Probability all dropout keep.\n 'sampling_schedule': 'constant', # constant, exponential, inverse_sigmoid\n 'sampling_rate': 0.0, # Interpretation is based on `sampling_schedule`.\n })\n return tf.contrib.training.HParams(**hparams_map)", "def default_opts():\n hparams = Workflow.default_opts()\n\n # Training features\n #hparams.add_hparam('random_offsets', True)\n hparams.add_hparam('train_max_sequence_length', 0)\n hparams.add_hparam('test_max_sequence_length', 0)\n hparams.add_hparam('stochastic_forgetting_probability', 0.0)\n\n # Testing & measurement\n hparams.add_hparam('num_validating_batches', 1)\n hparams.add_hparam('num_testing_batches', 1)\n hparams.add_hparam('average_accuracy_interval', 100)\n hparams.add_hparam('perplexity_interval', 100)\n hparams.add_hparam('debug_start', -1)\n hparams.add_hparam('test_primer', None)\n hparams.add_hparam('test_distributions_filename', None)\n\n # Dataset & embedding\n hparams.add_hparam('corpus_train', '')\n hparams.add_hparam('corpus_test', '')\n\n hparams.add_hparam('embedding_file', '')\n hparams.add_hparam('token_file', '')\n hparams.add_hparam('token_delimiter', ',')\n hparams.add_hparam('eos_token', '</s>')\n\n return hparams", "def make_default_hyperparameters(dim):\n return numpy.ones(dim + 1)", "def create_or_load_hparams(default_hparams, hparams_path):\n hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)\n hparams = extend_hparams(hparams)\n # Print HParams\n utils.print_hparams(hparams)\n return hparams", "def init_default_parameters(self):\n \n p = Struct()\n # for Avoid Obstacle testing no need for goal\n # p.goal = Struct()\n # p.goal.x = 0.5 #m\n # p.goal.y = 0.5 #m\n p.velocity = 0.2 #m/sec\n p.gains = Struct()\n p.gains.kp = 4.0\n p.gains.ki = 0.2\n p.gains.kd = 0.0\n \n self.parameters = p", "def set_default_hyperparameters(self):\n self.hyperparameter_space = {\n 'scale_X': hp.choice('scale_X', ['std', 'mm01', 'mm11', None]),\n 'scale_y': hp.choice('scale_y', ['std', 'mm01', 'mm11', None]),\n }\n\n if self.input_obj.keywords['pes_format'] == 'interatomics':\n self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': True,'morse_alpha': hp.quniform('morse_alpha', 1, 2, 0.1)},{'morse': False}]))\n else:\n self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': False}]))\n if self.pip:\n val = hp.choice('pip',[{'pip': True,'degree_reduction': hp.choice('degree_reduction', [True,False])}])\n self.set_hyperparameter('pip', val)\n else:\n self.set_hyperparameter('pip', hp.choice('pip', [{'pip': False}]))\n\n if self.input_obj.keywords['gp_ard'] == 'opt': # auto relevancy determination (independant length scales for each feature)\n self.set_hyperparameter('ARD', hp.choice('ARD', [True,False]))\n #TODO add optional space inclusions, something like: if option: self.hyperparameter_space['newoption'] = hp.choice(..)", "def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj", "def update_hyperopt_params_with_defaults(hyperopt_params: HyperoptConfigDict) -> None:\n from ludwig.hyperopt.execution import executor_registry\n\n set_default_value(hyperopt_params, EXECUTOR, {})\n set_default_value(hyperopt_params, SPLIT, VALIDATION)\n set_default_value(hyperopt_params, \"output_feature\", COMBINED)\n set_default_value(hyperopt_params, METRIC, LOSS)\n set_default_value(hyperopt_params, GOAL, MINIMIZE)\n\n set_default_values(\n hyperopt_params[EXECUTOR],\n {TYPE: RAY, NUM_SAMPLES: 1, MAX_CONCURRENT_TRIALS: AUTO},\n )\n\n if hyperopt_params[EXECUTOR].get(\"trial_driver_resources\") is None:\n hyperopt_params[EXECUTOR][\"trial_driver_resources\"] = {\"CPU\": 1, \"GPU\": 0}\n\n executor = get_from_registry(hyperopt_params[EXECUTOR][TYPE], executor_registry)\n executor_defaults = {k: v for k, v in executor.__dict__.items() if k in get_class_attributes(executor)}\n set_default_values(\n hyperopt_params[EXECUTOR],\n executor_defaults,\n )", "def overwrite_hyperparams(self):\n try:\n default_hyperparams = self.hyperparams\n for key in default_hyperparams:\n try:\n flag = self.FLAGS[key]\n param_value = flag.value\n if param_value is not None:\n self.hyperparams[key] = param_value\n except:\n pass\n except:\n pass", "def Params_defaultParams(): # real signature unknown; restored from __doc__\n pass", "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def _sets_default_params(self):\n pass", "def add_default_params(self):\r\n self.params = class_from_string(\r\n BaseFramework._configuration._default_param_type\r\n )()", "def __init__(self, **kwargs):\n # Register the hyperparameters and their type in _hparam_types.\n # _hparam_types maps the parameter name to a tuple (type, bool).\n # The type value is the type of the parameter for scalar hyperparameters,\n # or the type of the list elements for multidimensional hyperparameters.\n # The bool value is True if the value is a list, False otherwise.\n self._hparam_types = {}\n for name, value in six.iteritems(kwargs):\n self.add_hparam(name, value)", "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def _add_parameter_default(self, msg_param):\n default_types = msg_param.default_types\n while default_types: # iterate over each bit\n def_type = default_types & (~default_types+1)\n default_types ^= def_type\n def_type -= 1\n if def_type not in self._default_parameters:\n self._default_parameters[def_type] = {}\n self._default_parameters[def_type][msg_param.key] = msg_param.value", "def create_or_load_hparams(\n out_dir, default_hparams, hparams_path, save_hparams=True):\n hparams = utils.load_hparams(out_dir)\n if not hparams:\n hparams = default_hparams\n hparams = utils.maybe_parse_standard_hparams(\n hparams, hparams_path)\n else:\n hparams = ensure_compatible_hparams(hparams, default_hparams, hparams_path)\n hparams = extend_hparams(hparams)\n\n # Save HParams\n if save_hparams:\n utils.save_hparams(out_dir, hparams)\n for metric in hparams.metrics:\n utils.save_hparams(getattr(hparams, \"best_\" + metric + \"_dir\"), hparams)\n\n # Print HParams\n utils.print_hparams(hparams)\n return hparams" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluates the trained model using the specified features and labels.
def evaluate(self, features, labels): raise NotImplementedError('Not implemented')
[ "def evaluate(self, trained_model, model_input, *args, **kwargs):\r\n pass", "def evaluate_model(model, scaled_test_images, test_labels):\n return model.evaluate(scaled_test_images, test_labels,verbose=2)", "def evaluate_model(preds, labels):\n ACC, TN, FN, TP, FP = 0, 0, 0, 0, 0\n\n for user in range(num_of_labeled_users):\n user_preds = preds[user]\n user_labels = labels[user][num_of_genuine_segments:]\n ACC += accuracy_score(user_labels, user_preds)\n cm = confusion_matrix(user_labels, user_preds)\n TN += cm[0][0]\n FN += cm[1][0]\n TP += cm[1][1]\n FP += cm[0][1]\n\n print(\"#\" * 10)\n print(\"average accuracy = %s\" % (ACC / num_of_labeled_users))\n print(\"average true_negative = %s\" % (TN / num_of_labeled_users))\n print(\"average false_negative = %s\" % (FN / num_of_labeled_users))\n print(\"average true_positive = %s\" % (TP / num_of_labeled_users))\n print(\"average false_positive = %s\" % (FP / num_of_labeled_users))\n print(\"#\" * 10)", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n result_df = get_eval_result(np.array(Y_test), Y_pred, category_names)\n print(result_df);", "def evaluate(self, train_set=\"train_set\", test_set=\"test_set\", targets=\"targets\", k=10):\n\n test_set = self.cache.fetch(test_set) if isinstance(test_set, str) else test_set\n\n # Predict\n preds = self.run(dataset=train_set, targets=targets, k=k)\n\n # Evaluate model\n print(\"evaluating model ...\")\n score = evaluate(preds, test_set)\n print(\"MAP@{}: {:.5f}\\n\".format(k, score))\n\n return score", "def eval_model(\n self, eval_df, multi_label=False, output_dir=None, verbose=False, **kwargs\n ):\n\n if not output_dir:\n output_dir = self.args[\"output_dir\"]\n\n self._move_model_to_device()\n\n result, model_outputs, wrong_preds = self.evaluate(\n eval_df, output_dir, multi_label=multi_label, **kwargs\n )\n self.results.update(result)\n\n if verbose:\n print(self.results)\n\n return result, model_outputs, wrong_preds", "def eval_text(self, features, fnames):\n\t\tfor m in METRICS:\n\t\t\tprint ' Use %s metric...' % m\n\t\t\tfor clf in (LR, DT, SVM):\n\t\t\t\tprint ' Use %s classifier...' % clf.NAME\n\t\t\t\tfeatures_named = zip(fnames, features[m])\n\t\t\t\tfor fname, columns in features_named:\n\t\t\t\t\tprint ' Use %s feature set...' % fname\n\t\t\t\t\tprfs_named = zip(('p', 'r', 'f'), get_prf(cv_10fold(clf, columns)))\n\t\t\t\t\tfor sname, scores in prfs_named:\n\t\t\t\t\t\tcol_label = '_'.join([sname, clf.LABEL, m, fname])\n\t\t\t\t\t\tself.results[col_label] = scores", "def test(self, scenes, labels, verbose=1):\n print(\"Evaluation metrics: \")\n prepared_scenes = self.prepare_scenes(scenes)\n print(self.evaluate(prepared_scenes, labels, verbose=verbose))\n results = self.predict(prepared_scenes, verbose=verbose)\n result_labels = [i for i in range(len(self._quantifier_names))\n if any([result[i] for result in results])]\n result_targets = [self._quantifier_names[label] for label in result_labels]\n if result_targets:\n print(\"Classification report: \")\n print(classification_report(labels, results,\n labels=result_labels,\n target_names=result_targets,\n digits=num_symbols))\n # return results to allow further testing if necessary\n else:\n print('No classifications available for report')\n return results, result_labels, result_targets", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def evaluate_model(model, X_test, Y_test, category_names):\n #predict labels for test data\n Y_predicted = model.predict(X_test)\n #Iterate through all 36 possible labels to in order to compare prediction\n for i in range(len(category_names)):\n print(\"Label:\",category_names[i])\n print(classification_report(Y_test.values[:, i], Y_predicted[:, i]))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n display_results(Y_test, y_pred)\n print(\"model score: %.3f\" % model.score(X_test, Y_test))\n #Report the f1 score, precision and recall for each output category of the dataset\n print(classification_report(Y_test, y_pred, target_names=category_names))", "def evaluate(self):\n batch_losses = []\n all_predictions = []\n for (inputs, targets) in self._test_data:\n inputs = torch.Tensor(inputs)\n targets = torch.Tensor(targets[:, self._use_testmat_ixs])\n\n if self.use_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n with torch.no_grad():\n predictions = None\n if _is_lua_trained_model(self.model):\n predictions = self.model.forward(\n inputs.transpose(1, 2).contiguous().unsqueeze_(2))\n else:\n predictions = self.model.forward(\n inputs.transpose(1, 2))\n predictions = predictions[:, self._use_ixs]\n loss = self.criterion(predictions, targets)\n\n all_predictions.append(predictions.data.cpu().numpy())\n batch_losses.append(loss.item())\n all_predictions = np.vstack(all_predictions)\n\n average_scores = self._metrics.update(\n all_predictions, self._all_test_targets)\n\n self._metrics.visualize(\n all_predictions, self._all_test_targets, self.output_dir)\n\n np.savez_compressed(\n os.path.join(self.output_dir, \"test_predictions.npz\"),\n data=all_predictions)\n\n loss = np.average(batch_losses)\n logger.info(\"test loss: {0}\".format(loss))\n for name, score in average_scores.items():\n logger.info(\"test {0}: {1}\".format(name, score))\n\n test_performance = os.path.join(\n self.output_dir, \"test_performance.txt\")\n feature_scores_dict = self._metrics.write_feature_scores_to_file(\n test_performance)\n\n return feature_scores_dict", "def evaluate_classifier(data, classifier):\r\n\tevaluate_input = tf.estimator.inputs.numpy_input_fn(\r\n\t\tx={\"x\": data[\"inputs\"]},\r\n\t\ty=data[\"labels\"],\r\n\t\tnum_epochs=1,\r\n\t\tshuffle=False)\r\n\tclassifier.evaluate(input_fn=evaluate_input)", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def evaluate(self, *args, **kwargs):\n\n \n return self.model.evaluate( *args, **kwargs)", "def test(model, test_labels, test_images):\n\n model.evaluate(test_images, test_labels, batch_size=model.batch_size)", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test) #Predict\n \n #List to save Evaluation Metrics' Results\n Acc = [] #Accuracy\n Prc = [] #Precision\n Rec = [] #Recall\n F1 = [] #F1-Score\n \n #Evaluate every column\n for ind, col in enumerate(Y_test.columns):\n\n y_true = Y_test[col]\n y_pred = Y_pred[:,ind] \n \n #Metrics \n acc = accuracy_score(y_true, y_pred) #Accuracy\n prc = precision_score(y_true, y_pred) #Precision\n rec = recall_score(y_true, y_pred) #Recall\n f1 = f1_score(y_true, y_pred) #F1-Score\n \n Acc.append(acc)\n Prc.append(prc)\n Rec.append(rec)\n F1.append(f1)\n \n #Create dataset to save evaluation results into a .csv file\n data = np.c_[Acc, Prc, Rec, F1]\n Eval = pd.DataFrame(data, index = category_names,\n columns = ['Accuracy','Precision','Recall', \"F1-Score\"])\n Eval.to_csv('evaluation_results.csv')", "def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval", "def evaluate_model(model, X_test, Y_test, category_names):\n\n y_pred = model.predict(X_test)\n Y_test_as_array = np.array(Y_test)\n for i in range(len(category_names)):\n print(\"{} accuracy {} precision {} recall {} f1 {}\".format(\n category_names[i],\n (y_pred[:, i] == Y_test_as_array[:, i]).mean(), # accuracy\n precision_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # precision\n recall_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # recall\n f1_score(Y_test_as_array[:, i], y_pred[:, i], average=None) # f1\n ))\n print(\"mean accuracy {}\".format((y_pred == Y_test_as_array).mean().mean()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple wrapper around sklearn's learning curve module
def learning_curve(self, features, labels): return learning_curve(self._model, features, labels)
[ "def from_sklearn(est):\n return from_sklearn.dispatch(est)", "def learning_curve(X_train_scaled, Y_train, classifier, score, train_sizes: list):\n # initial parameters\n RANDOM_STATE = 42\n TRAIN_SIZES = train_sizes\n K_FOLDS = 5\n SCORER = make_scorer(score)\n SAMPLING_RATIO = 1\n\n # parameters\n X = X_train_scaled\n Y = Y_train\n classifier = classifier\n\n # Make pipeline (non funziona per cambiamkento alla libreria per RandomUnderSampler\n \"\"\"undersampler = imblearn.under_sampling.RandomUnderSampler(random_state=RANDOM_STATE, sampling_strategy=SAMPLING_RATIO)\n pipeline = Pipeline([('und',undersampler), ('cls',classifier)])\n pipeline.fit(X,Y).predict_proba(Y)\"\"\"\n\n train_sizes, train_scores, test_scores, fit_times, _ = skl.model_selection.learning_curve(classifier, X, Y,\n cv=K_FOLDS,\n train_sizes=TRAIN_SIZES,\n return_times=True,\n n_jobs=-1,\n scoring=SCORER)\n print(train_sizes)\n print(train_scores)\n print(test_scores)\n print(\"mean train scores: \\n\", train_scores.mean(axis=1))\n print(\"mean test scores: \\n\", test_scores.mean(axis=1))\n plt.plot(train_sizes, train_scores.mean(axis=1), label=\"train\")\n plt.plot(train_sizes, test_scores.mean(axis=1), label=\"test\")", "def learning_curve(estimator, X_train, X_valid, score=\"f1\", train_sizes=None,\n hparams=None, shuffle=False, random_state=None):\n\n # check model type\n if isinstance(estimator, NamedEntityRecognitionModel):\n annotation_type = \"annotation\"\n if isinstance(estimator, ModelEnsembleNER):\n annotation_labels = set()\n for model in estimator.models:\n annotation_labels.update(model.entity_labels)\n annotation_labels = list(annotation_labels)\n else:\n annotation_labels = estimator.entity_labels\n elif isinstance(estimator, RelationExtractionModel):\n annotation_type = \"relation\"\n if isinstance(estimator, REModelEnsemble):\n annotation_labels = set()\n for model in estimator.models:\n annotation_labels.update(model.relation_labels)\n annotation_labels = list(annotation_labels)\n else:\n annotation_labels = estimator.relation_labels\n else:\n raise TypeError(\"Given estimator is of type '{}' which is not supported\".format(type(estimator)))\n\n # determine annotation label\n if annotation_labels:\n if len(annotation_labels) > 1:\n log.debug(\"Learning curves currently support either one label or all labels: building for all labels\")\n annotation_label = None\n else:\n annotation_label = annotation_labels[0]\n else:\n annotation_label = None\n\n # make default train sizes as fractions\n if not train_sizes:\n train_sizes = [s * 0.1 for s in range(1, 11)]\n\n # shuffle training data if necessary\n if shuffle:\n if random_state:\n random.Random(random_state).shuffle(X_train)\n else:\n random.shuffle(X_train)\n\n # collect scores for each training subset\n train_scores = []\n valid_scores = []\n\n for train_size in train_sizes:\n docs_to_train = X_train[:int(train_size * len(X_train))]\n if not docs_to_train:\n log.debug(\"No documents to train: check your train sizes\")\n\n base_estimator = clone(estimator)\n\n if hparams:\n base_estimator.fit(X=docs_to_train, y=None, **hparams)\n else:\n base_estimator.fit(X=docs_to_train, y=None)\n\n X_train_pred = base_estimator.transform(docs_to_train)\n X_valid_pred = base_estimator.transform(X_valid)\n\n score_train = annotation_precision_recall_f1score(\n X_train_pred, docs_to_train, ann_label=annotation_label, ann_type=annotation_type)\n\n score_valid = annotation_precision_recall_f1score(\n X_valid_pred, X_valid, ann_label=annotation_label, ann_type=annotation_type)\n\n if score == \"precision\":\n train_scores.append(score_train[0])\n valid_scores.append(score_valid[0])\n elif score == \"recall\":\n train_scores.append(score_train[1])\n valid_scores.append(score_valid[1])\n elif score == \"f1\":\n train_scores.append(score_train[2])\n valid_scores.append(score_valid[2])\n else:\n raise ValueError(\"Cannot determine the type of scoring '{}'\".format(score))\n\n return train_sizes, train_scores, valid_scores", "def linear_regression():\n return LinearRegression()", "def train_linear(X_train, y_train):\n reg = LinearRegression()\n reg.fit(X_train, y_train)\n return reg", "def test_learning_curves():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n data = p.learning_curves()", "def fit_sklearn(self):\n self.model = linear_model.LinearRegression()\n self.model.fit(self.x_train, self.y_train)", "def __train_perceptron(x_train, y_train):\n pass", "def train(self, X, y):", "def train(self, x, y, initial=(1., 1e-6, 1.)):\n self._weights = curve_fit(self.__curve__, x, y, p0=initial)[0]", "def train_model(features, target):\n lr = LinearRegression()\n lr.fit(features, target)\n y_pred = lr.predict(features)\n r2 = lr.score(features, target)\n rsme = mean_squared_error(target, y_pred)\n print('R Squared:' + str(r2), 'RSME:' + str((rsme**.5)))\n return lr", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def plot_learning_curve(model, X_train, X_test, y_train, y_test):\n\n m, train_scores, valid_scores = learning_curve(estimator = model, \n X = X_train, y = y_train.ravel(), train_sizes = np.linspace(0.1,1.0, 80))\n\n train_cv_err = np.mean(train_scores, axis=1)\n test_cv_err = np.mean(valid_scores, axis=1)\n tr, = plt.plot(m, train_cv_err)\n ts, = plt.plot(m, test_cv_err)\n plt.legend((tr, ts), ('training error', 'test error'), loc = 'best')\n plt.title('Learning Curve')\n plt.xlabel('Data Points')\n plt.ylabel('Accuracy')", "def test_sklearn_compatible(estimator):\n check_estimator(estimator)", "def train_linear(train_df, target, alpha=0, l1_ratio=0.5):\n assert 0 <= l1_ratio <= 1\n if alpha == 0:\n lm = sklearn.linear_model.LinearRegression()\n elif l1_ratio == 0:\n lm = sklearn.linear_model.Ridge(alpha=alpha)\n elif l1_ratio == 1:\n lm = sklearn.linear_model.Lasso(alpha=alpha)\n else:\n lm = sklearn.linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio)\n lm.fit(train_df, target)\n return lm", "def plot_learning_curve(estimator, x, y, cv=5, n_jobs=1,\n train_sizes=np.linspace(.05, 1., 20)):\n\n # 套接learning_curve,返回训练集和测试集的score和对应的size\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, x, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n \"\"\"\n eg: train_scores shape = (20, 5)\n array([[ 0.8571, 0.9143, 0.9143, 0.9143, 0.9143],\n [ 0.8169, 0.8732, 0.8732, 0.8732, 0.8732],\n [ 0.8208, 0.8396, 0.8396, 0.8396, 0.8396],\n [ 0.8028, 0.8099, 0.8099, 0.8099, 0.8099],\n [ 0.8146, 0.8202, 0.8146, 0.8146, 0.8146],\n [ 0.8263, 0.8263, 0.8216, 0.8216, 0.8216],\n [ 0.8153, 0.8273, 0.8112, 0.8112, 0.8112],\n [ 0.8063, 0.8169, 0.7993, 0.7993, 0.7993],\n [ 0.8156, 0.8281, 0.8063, 0.8063, 0.8063],\n [ 0.8169, 0.8254, 0.8254, 0.8254, 0.8254],\n [ 0.8184, 0.8235, 0.8261, 0.8312, 0.8312],\n [ 0.815 , 0.822 , 0.8197, 0.822 , 0.822 ],\n [ 0.816 , 0.8203, 0.8203, 0.8182, 0.8182],\n [ 0.8133, 0.8173, 0.8173, 0.8253, 0.8253],\n [ 0.8109, 0.8127, 0.8146, 0.8202, 0.8221],\n [ 0.8155, 0.819 , 0.8172, 0.8207, 0.8225],\n [ 0.8149, 0.8248, 0.8231, 0.8248, 0.8198],\n [ 0.8187, 0.8281, 0.825 , 0.8328, 0.8219],\n [ 0.8254, 0.8299, 0.8284, 0.8343, 0.8166],\n [ 0.8272, 0.8315, 0.8301, 0.8343, 0.8174]])\n \"\"\"\n train_scores_mean = np.mean(train_scores, axis=1)\n \"\"\"\n eg: train_scores_mean\n array([ 0.9029, 0.862 , 0.8358, 0.8085, 0.8157, 0.8235, 0.8153,\n 0.8042, 0.8125, 0.8237, 0.8261, 0.8201, 0.8186, 0.8197,\n 0.8161, 0.819 , 0.8215, 0.8253, 0.8269, 0.8281])\n \"\"\"\n train_scores_std = np.std(train_scores, axis=1)\n\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n \"\"\"\n eg: test_scores_std\n array([ 0.0751, 0.0607, 0.0314, 0.0059, 0.0047, 0.0066, 0.0074,\n 0.0051, 0.0107, 0.0115, 0.0107, 0.012 , 0.0142, 0.018 ,\n 0.0134, 0.0167, 0.0167, 0.0127, 0.0128, 0.0113])\n \"\"\"\n # 开始可视化学习曲线\n plt.figure()\n plt.title('learning curve')\n plt.xlabel(\"train sizes\")\n plt.ylabel(\"scores\")\n plt.gca().invert_yaxis()\n plt.grid()\n # 对train_scores的均值和方差区域进行填充\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,\n alpha=0.1, color=\"g\")\n # 对test_scores的均值和方差区域进行填充\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,\n alpha=0.1, color=\"r\")\n # 把train_scores_mean标注圆圈\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"g\", label=\"train scores\")\n # 把ttest_scores_mean标注圆圈\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"r\", label=\"test scores\")\n plt.legend(loc=\"best\")\n plt.draw()\n plt.gca().invert_yaxis()\n plt.show()", "def fit(self, target):", "def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def nnRegression(data):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an agent membership
def create_agent_membership(self, context, agent_membership): am = agent_membership['agent_membership'] with context.session.begin(subtransactions=True): am_db = AgentMembership(id=am['id'], ip_address=am['ip_address']) context.session.add(am_db) return self._make_agent_membership_dict(am_db)
[ "def create_agent_membership(self, body=None):\n return self.post(self.agent_memberships_path, body=body)", "def create_members(client, master_detector_id):\n for account in aws_account_dict.keys():\n # Don't invite yourself, so skip the master account\n if account != master_aws_account_number:\n client.create_members(\n AccountDetails=[\n {\n 'AccountId': account,\n 'Email': aws_account_dict['account']\n }\n ],\n DetectorId=master_detector_id\n )\n\n print('Added Account {monitored} to member list in GuardDuty in Account {master}'.format(\n monitored=account, master=master_aws_account_number))", "def add_member(account, aws_region, master_detector_id):\n gd_client = assume_role(master_aws_account_number, cloudformation_exec_role, aws_region)\n\n gd_client.create_members(\n AccountDetails=[\n {\n 'AccountId': account,\n 'Email': aws_account_dict[account]\n }\n ],\n DetectorId=master_detector_id\n )\n\n print('Added Account {monitored} to member list in GuardDuty master account {master} for region {region}'.format(\n monitored=account, master=master_aws_account_number, region=aws_region))", "def create_agent_group():\n try:\n existing_agent_group = Group.objects.get(name='Agent')\n except ObjectDoesNotExist:\n existing_agent_group = None\n\n if not existing_agent_group:\n\n new_group, created = Group.objects.get_or_create(name='Agent')\n if created:\n # Set up permissions\n content_type = ContentType.objects.get_for_model(User)\n\n change_campaign = Permission.objects.get(content_type=content_type, codename='change_campaign')\n\n add_round = Permission.objects.get(content_type=content_type, codename='add_round')\n change_round = Permission.objects.get(content_type=content_type, codename='change_round')\n\n add_address = Permission.objects.get(content_type=content_type, codename='add_address')\n change_address = Permission.objects.get(content_type=content_type, codename='change_address')\n\n add_street = Permission.objects.get(content_type=content_type, codename='add_street')\n change_street = Permission.objects.get(content_type=content_type, codename='change_street')\n\n # Add permissions to newly-created group.\n new_group.permissions.add(\n change_campaign,\n add_round,\n change_round,\n add_street,\n change_street,\n add_address,\n change_address,\n )\n new_group.save()\n\n return", "def create_agent(self, status):\n x, y = self.random_position()\n\n self.population.append(Agent(x=x, y=y, status=status, \n id = len(self.population)+1))", "def test_projects_agents_create(self):\n pass", "def create_participant(sender, **kw):\n user = kw['instance']\n if kw['created']:\n participant = Participant(user=user, agreed=False)\n participant.save()", "def create_member(self, context, member):\n LOG.info(\"Received request 'Create Member' for Pool:%(pool_id)s \",\n {'pool_id': member['pool_id']})\n arg_dict = {'context': context,\n lb_const.MEMBER: member,\n }\n self._send_event(lb_const.EVENT_CREATE_MEMBER_V2, arg_dict,\n serialize=True,\n binding_key=member[lb_const.POOL]['loadbalancer_id'],\n key=member['id'])", "def create_creator_member(sender, **kwargs):\n if kwargs.get('created'):\n league = kwargs['instance']\n league.members.create(user=league.creator,\n fb_uid=league.creator.fb_uid,\n status='creator')", "def _create_member(self, **kwargs):\n category_name = kwargs.pop('category_name', Category.ACTIVE)\n params = {\n 'category': Category.objects.get(name=category_name),\n 'first_payment_month': 8,\n 'first_payment_year': 2015,\n 'has_student_certificate': False,\n 'has_subscription_letter': True,\n 'has_collaborator_acceptance': False,\n }\n params = {k: kwargs.pop(k, v) for k, v in params.items()}\n member = Member.objects.create(**params)\n\n # create the related person\n params = {\n 'membership': member,\n 'nickname': 'test-nick',\n 'picture': 'fake-pic',\n }\n params = {k: kwargs.pop(k, v) for k, v in params.items()}\n Person.objects.create(**params)\n\n assert not kwargs, kwargs # would indicate a misuse of the parameters\n return member", "def create_node(self, participant, network):\n return Agent(network=network, participant=participant)", "def create(self, identity, data=None, record=None, **kwargs):\n if system_process in identity.provides:\n return\n\n member = {\n \"type\": \"user\",\n \"id\": str(identity.id),\n }\n self.service.members.add(\n # the user is not yet owner of the community (is being added)\n # therefore we cannot use `identity`\n system_identity,\n record.id,\n {\"members\": [member], \"role\": current_roles.owner_role.name},\n uow=self.uow,\n )\n\n # Invalidate the membership cache\n on_user_membership_change(identity=identity)", "def post(self):\n\n parser = reqparse.RequestParser()\n parser.add_argument('first_name', required=True, type=str)\n parser.add_argument('last_name', required=False, type=str)\n parser.add_argument('birthdate', required=True, type=parse_iso_date)\n parser.add_argument('email', required=True, type=str)\n parser.add_argument('password', required=True, type=str)\n parser.add_argument('login', required=True, type=str)\n parser.add_argument('person_id', required=False, type=int)\n parser.add_argument('room_id', required=False, type=int)\n parser.add_argument('move_in_date', required=False, type=parse_iso_date)\n parser.add_argument('previous_dorm', required=False, type=str)\n args = parser.parse_args()\n\n room = None\n swdd_person_id = None\n\n if args.room_id is not None:\n room = Room.get(args.room_id)\n\n if room is None:\n abort(404, message=\"Invalid room\", code=\"invalid_room\")\n\n if args.person_id is not None:\n swdd_person_id = get_swdd_person_id(args.first_name, args.last_name, args.birthdate)\n\n # some tenants have an additional semicolon added to their last names\n if swdd_person_id is None:\n swdd_person_id = get_swdd_person_id(\n args.first_name, args.last_name + \";\", args.birthdate\n )\n\n if swdd_person_id != args.person_id:\n abort(400, message=\"Person id does not match\", code=\"person_id_mismatch\")\n\n name = get_name_from_first_last(args.first_name, args.last_name)\n\n try:\n mr = create_member_request(name, args.email, args.password, args.login, args.birthdate,\n swdd_person_id, room, args.move_in_date, args.previous_dorm)\n except UserExistsException:\n abort(400, message=\"User already exists\", code=\"user_exists\")\n except UserExistsInRoomException:\n abort(400, message=\"A user with a similar name already lives in this room\",\n code=\"similar_user_exists\")\n except EmailTakenException:\n abort(400, message=\"E-Mail address already in use\", code=\"email_taken\")\n except LoginTakenException:\n abort(400, message=\"Login already in use\", code=\"login_taken\")\n except IllegalEmailError:\n abort(400, message=\"Illegal E-Mail address\", code=\"email_illegal\")\n except IllegalLoginError:\n abort(400, message=\"Illegal login\", code=\"login_illegal\")\n except NoTenancyForRoomException:\n abort(400, message=\"The given person has no tenancy for the room\",\n code=\"no_tenancy_in_room\")\n except MoveInDateInvalidException:\n abort(400, message=\"The move-in date is invalid\", code=\"move_in_date_invalid\")\n else:\n session.session.commit()\n\n return mr.id", "def test_admin_create_member(self):\n before_count = len(Member.query.all())\n url = \"/admin/member/new/?url=%2Fadmin%2Fmember%2F\"\n data = {\n \"name\": \"New member\",\n \"current\": \"y\",\n \"monitored\": \"y\",\n \"house\": \"__None\",\n \"party\": \"__None\",\n \"province\": \"__None\",\n \"bio\": \"\",\n \"pa_link\": \"\",\n }\n response = self.make_request(url, self.user, data=data, method=\"POST\")\n after_count = len(Member.query.all())\n self.assertEqual(302, response.status_code)\n self.assertLess(before_count, after_count)\n\n created_member = Member.query.filter(Member.name == data[\"name\"]).scalar()\n self.assertTrue(created_member)\n self.created_objects.append(created_member)", "def test_alert_create_for_site_members(self):\n pass", "def test_agent_creation():\n agent = AgentFactory()\n agent.name = 'agent test name'\n agent.save()\n assert agent.name == 'agent test name'", "def invite_members(aws_region, account, master_detector_id):\n\n gd_client = assume_role(master_aws_account_number, cloudformation_exec_role, aws_region)\n\n gd_client.invite_members(\n AccountIds=[\n account\n ],\n DetectorId=master_detector_id,\n Message=gd_invite_message\n )\n\n print('Invited Account {monitored} to GuardDuty master account {master} in region {region}'.format(\n monitored=account, master=master_aws_account_number, region=aws_region))", "def _generate_agent_node(self, agent):\n agent = URIRef(\"{}{}\".format(PIT[\"agent\"], agent))\n #add agent to graph\n self.graph.add( (agent, RDF.type, PROV.Agent) )\n self.graph.add( (self.entity, PROV.wasAttributedTo, agent) )", "def test_api__create_workspace_member_role__ok_200__new_user(self):\n self.testapp.authorization = (\"Basic\", (\"admin@admin.admin\", \"admin@admin.admin\"))\n # create workspace role\n params = {\n \"user_id\": None,\n \"user_public_name\": None,\n \"user_email\": \"bob@bob.bob\",\n \"role\": \"content-manager\",\n }\n res = self.testapp.post_json(\"/api/v2/workspaces/1/members\", status=200, params=params)\n user_role_found = res.json_body\n assert user_role_found[\"newly_created\"] is True\n assert user_role_found[\"email_sent\"] is False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test load class works correctly and raises right exceptions.
def test_load_class(): full_classname = 'collections.namedtuple' cls_ = load_class(full_classname) assert cls_ is collections.namedtuple with pytest.raises(ValueError): full_classname = 'collections.Foobar' load_class(full_classname) with pytest.raises(ImportError): full_classname = 'barfoo.Foobar' load_class(full_classname)
[ "def test_load_from_class():\n cl = ClassLoader()\n\n cf = ClassFile.create('TestClass')\n cl.update(cf)\n\n assert cl.load('TestClass') is cf", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def test_load_from_directory():\n with tempfile.TemporaryDirectory() as dir:\n shutil.copy(\n os.path.join(\n os.path.dirname(__file__),\n 'data',\n 'HelloWorld.class'\n ),\n dir\n )\n\n cl = ClassLoader()\n cl.update(dir)\n\n assert isinstance(cl.load('HelloWorld'), cl.klass)", "def test_load(self):\n classifier_load_test = Classifier(\"data/train\",\"data/test\",\"test\")\n try:\n classifier_load_test.load(\"test\")\n self.assert_(True)\n except Exception as fallo_abrir:\n self.assert_(False)", "def test_load_data(self):\n pass", "def test_class_errored(self, cls, exception):", "def test_no_classes_found(self):\r\n from rapidsms.backends.base import BackendBase\r\n module = import_module('rapidsms.utils.test_modules')\r\n self.assertRaises(AttributeError, get_class, module, BackendBase)", "def test_class_started(self, cls):", "def test_load(self):\n self.create_label_directories()\n\n class_to_label_map, label_to_class_map = load_class_id_to_label_mapping(PROJECT_NAME,\n Path(MLLIB_TMP_IMAGE_ROOT))\n\n v = sorted(class_to_label_map.values())\n\n actual = v\n expected = [\"Abigail Johnson\", \"Bridget Barnes\", \"Cheryl Cooper\"]\n\n result = actual == expected\n self.assertTrue(result, \"actual does not match expected. \\nActual:\\n%s, \\nExpected:\\n%s\" % (actual, expected))\n\n v = sorted(label_to_class_map.values())\n\n actual = v\n expected = [0, 1, 2]\n\n result = actual == expected\n self.assertTrue(result, \"actual does not match expected. \\nActual:\\n%s, \\nExpected:\\n%s\" % (actual, expected))", "def test3(self):\n # arrange\n model_manager = ModelManager()\n\n # act\n exception_raised = False\n exception_message = \"\"\n try:\n model_manager.load_models(configuration=[\n {\n \"module_name\": \"tests.model_manager_test\",\n \"class_name\": \"SomeClass\" # using the class defined at the top of this file to test\n }\n ])\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"The ModelManager can only hold references to objects of type MLModel.\")", "def test_downloader_load_class():\n dl = Downloader('http://www.comicextra.com/daredevil-2016/chapter-600/full')\n klass = dl.load_class()\n assert klass == ComicExtra", "def testLoadError(self):\n self.assertRaises(SyntaxError, self.simple.loadFromStream, \"bork bork bork\")\n self.assertRaises(NameError, self.simple.loadFromStream, \"config.f = bork\")", "def test_loader(cls):\r\n return _test_loader_factory(cls)", "def test_multiple_classes_found(self):\r\n module = import_module('rapidsms.utils.test_modules')\r\n self.assertRaises(AttributeError, get_class, module, ParentA)", "def test_initialization(self):\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\n self.assertEqual(test_node.package, self.TEST_PKG)\n self.assertEqual(test_node.class_name, self.TEST_CLS)", "def test_class1_useclass_error(self):\n obj0 = classes.Class1()\n self.assertRaises(TypeError, classes.useclass(obj0))", "def test_import_string_missing_class_or_attribute(self):\n valid_module = 'ttgn.pokedex'\n invalid_class = 'NonexistentClass'\n with pytest.raises(ImportError) as error:\n utils.import_string('{}.{}'.format(valid_module, invalid_class))\n assert 'Module {} has no class or attribute {}'.format(\n valid_module, invalid_class) == str(error.value)", "def test_component_loading(self):\n # we only have two component\n self.assertEqual(1, len(self.component_manager.components))\n self.assertTrue(TestDriver in self.component_manager)\n \n # make sure it is the right one!\n self.assertEqual(self.component, self.component_manager[TestDriver])\n \n def test_not_component_exception():\n \"\"\"\n Raises exception when called\n \"\"\"\n self.component_manager[TestNotLoadableComponant]\n self.assertRaises(core.TicError, test_not_component_exception)\n \n def test_exception_on_init():\n \"\"\"\n TODOC\n \"\"\"\n self.component_manager[TestErrorOnInitComponant]\n self.assertRaises(core.TicError, test_exception_on_init)", "def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
irq_handler contains the code you want to execute when the interrupt occurs. Define your own callback function here by rewriting the code. We make an LED flash in this example.
def irq_handler(): # open an LED session with LEDs() as LED: # specify the LED which you want to control led = Led.LED1 # specify the LED status led_on_off = True # writes values 10 times, which makes LED1 flash for 3 seconds for x in range(0, 10): # turn LED0 on or off LED.write(led, led_on_off) # add a short delay time.sleep(0.3) # if the LED is on, set the parameter to off # if the LED is off, set the parameter to on led_on_off = not led_on_off
[ "def irq(self, handler: Callable, trigger: int, hard: bool = False) -> Callable:", "def enable_irq(state:int):", "def enable_irq(state=True):\n pass", "def disable_irq() -> int:", "def enable_irq(state: bool = True, /) -> None:", "def disable_irq() -> bool:", "def test_interrupt_callback(self, mock_gpio):\n blk = GPIOInterrupts()\n self.configure_block(blk, {})\n blk._callback(0)\n self.assert_num_signals_notified(1)", "def test_interrupt(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.add_event_detection') as mock_detection:\n with patch('RPi.GPIO.add_event_callback') as mock_callback:\n gpio.interrupt(self._callback, 0)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()\n mock_detection.called_once_with(0, GPIO.BOTH)\n mock_callback.called_once_with(0, self._callback)", "def stopCallback (self):\n GPIO.remove_event_detect (self.IRQ_PIN)\n self.hasCallback = False", "def interrupt_handler(signal, frame):\n print(\"Shutting down IDS\")\n sys.exit(0)", "def interrupt(self, callback, pin, pull_up_down=None, bouncetime=200):\n with self._gpio_lock:\n if pull_up_down is not None:\n if pull_up_down:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n # Use falling detection since we are pulled up\n GPIO.add_event_detect(\n pin, GPIO.FALLING, bouncetime=bouncetime)\n else:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n # Use rising detection since we are pulled down\n GPIO.add_event_detect(\n pin, GPIO.RISING, bouncetime=bouncetime)\n else:\n GPIO.setup(pin, GPIO.IN)\n GPIO.add_event_detect(pin, GPIO.BOTH, bouncetime=bouncetime)\n GPIO.add_event_callback(pin, callback)\n self.logger.debug(\n \"Set interrupt callback of GPIO pin {}\".format(pin))", "def imu_fth_isr(gpio, level, tick):\n isr_time = time.time()\n\n # Sometimes INT1 can trigger again as the FIFO is being read and filled\n # back up at the same time. If the time since the last tick is less than\n # 0.1s then exit the ISR.\n global last_tick\n MIN_TICK_DIFF_US = 10**5 \n tick_diff = pigpio.tickDiff(last_tick, tick)\n print(f\"Time since last tick {tick_diff / 10**6} seconds\")\n if tick_diff < MIN_TICK_DIFF_US:\n return\n\n global fifo_start\n print(f\"Interrupt at {isr_time}\")\n print(f\"FIFO fill time: {isr_time - fifo_start:4.03f} seconds\")\n fifo_start = isr_time\n\n # Read FIFO status\n status1 = imu._fifo_status1\n status2 = imu._fifo_status2\n status3 = imu._fifo_status3\n status4 = imu._fifo_status4\n\n # Number of unread words (16 bits) \n unread_words = ((status2 & 0x0F) << 8) + status1\n print(f\"Words in FIFO: {unread_words}\")\n\n # Pattern index\n # In our case, the accelerometer and gyroscope data rates are equal, so the\n # pattern is in [0:5] where\n # 0 -> Gx\n # 1 -> Gy\n # 2 -> Gz\n # 3 -> Ax\n # 4 -> Ay\n # 5 -> Az\n pattern_index = (status4 << 8) + status3\n print(f\"Index of next reading: {pattern_index}\")\n\n # Read in multiples of 6, the number of readings from Gx to Az\n BYTES_PER_WORD = 2\n WORDS_PER_PATTERN = 6\n words_to_read = unread_words // WORDS_PER_PATTERN * WORDS_PER_PATTERN\n buffer_size = words_to_read * BYTES_PER_WORD\n buffer = bytearray(buffer_size)\n FIFO_DATA_OUT_L = bytearray(b'\\x3E')\n\n # Read FIFO data into buffer\n start_time = time.time()\n imu.i2c_device.write_then_readinto(FIFO_DATA_OUT_L, buffer)\n end_time = time.time()\n total_read_time = end_time - start_time\n print(f\"{buffer_size} bytes read in {total_read_time:.6f} seconds. {buffer_size/total_read_time:.0f} bytes/s\")\n\n # Read FIFO status\n status1 = imu._fifo_status1\n status2 = imu._fifo_status2\n status3 = imu._fifo_status3\n status4 = imu._fifo_status4\n unread_words = ((status2 & 0x0F) << 8) + status1\n print(f\"Words in FIFO: {unread_words}\")\n pattern_index = (status4 << 8) + status3\n print(f\"Index of next reading: {pattern_index}\")\n\n last_tick = tick\n\n # Print data\n PREVIEW_BYTES = 12\n print(f\"buffer = {buffer[:PREVIEW_BYTES].hex()} ... {buffer[-PREVIEW_BYTES:].hex()} | Len: {len(buffer)}\")\n data = [parse_fifo_data(buffer[i:i+2]) for i in range(0, len(buffer), 2)]\n print(f\"data = [{', '.join(map(str, data[:PREVIEW_BYTES]))}, ..., {', '.join(map(str, data[-PREVIEW_BYTES:]))}] | Len: {len(data)}\")\n\n print()", "def callback(self, user_gpio, edge=RISING_EDGE, func=None):\n return _callback(self._notify, user_gpio, edge, func)", "def interruptible(fn):\n logger.debug(\"In @interruptible: Decorating method \"+ fn.__name__)\n fn._morse_service_interruptible = True\n\n return fn", "def add_button_callback(self, button, function, event=BUTTON_DOWN, threaded=True):\n\t\tif event == LCD.BUTTON_DOWN:\n\t\t\tedge = 'falling'\n\t\telif event == LCD.BUTTON_UP:\n\t\t\tedge = 'rising'\n\t\telif event == LCD.BUTTON_EITHER:\n\t\t\tedge = 'both'\n\t\tRPIO.add_interrupt_callback(button, function, edge, RPIO.PUD_UP, threaded, 20)", "def startCallback (self):\n if self.hasCallback:\n return\n # set up IRQ interrupt function. GPIO.setmode should alreay have been called\n GPIO.setup(self.IRQ_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect (self.IRQ_PIN, GPIO.FALLING)\n GPIO.add_event_callback (self.IRQ_PIN, AHF_LickDetectorCallback) \n self.hasCallack = True\n # state of touches from one invocation to next, used in callback to separate touches from untouches\n self.prevTouches = self.mpr121.touched()", "def irq(self):\n if self._irq is None:\n raise NotImplementedError(\"Peripheral info does not have an IRQ line\"\n .format(self))\n return self._irq", "def led_clicked(arduino, led):\r\n \r\n led.update_state()\r\n led.update_message()\r\n arduino_write(arduino, (str(led.pin) + ':' + str(int(led.state))))", "def callback(self, handler, arg=None):\r\n self.__handler__ = handler\r\n self.__args__ = arg\r\n self.__set_alarm__()\r\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This property returns the training data, and loads the training data if it doesn't exist. Note that this function returns the training data and labels in the form ([MPS input size, batch, other dimensions], [batch, classifications]) in accordance with how it is used in the MPS and MPSOptimizer classes. If the data is required in the form ([batch, MPS input size, other dimensions], [batch, classifications]), the variable _training_data should be used
def training_data(self): if self._training_data is None: self._load_training_data() if self._swapped_training_data is None: self._swapped_training_data = {} for key, value in self._training_data.items(): self._swapped_training_data[key] = value return self._swapped_training_data
[ "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def training_data(self) -> List[state_domain.TrainingDataDict]:\n return self._training_data", "def load_training_data(self):\n self.flux_data = pd.read_csv(settings['RAW_TRAINING_PATH'])\n self.meta_data = pd.read_csv(settings[\"RAW_TRAINING_METADATA_PATH\"])\n\n # Label folds\n y = self.meta_data['target']\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n kfold_indices = -1*np.ones(len(y))\n for idx, (fold_train, fold_val) in enumerate(folds.split(y, y)):\n kfold_indices[fold_val] = idx\n self.meta_data['fold'] = kfold_indices\n\n self.dataset_name = 'train'", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def get_training_data(self):\n train_data = None\n \n if self.left_data is not None:\n train_data = self.left_data\n \n if self.right_data is not None:\n if train_data is not None:\n train_data = train_data.join(self.right_data)\n else:\n train_data = self.right_data\n \n return train_data", "def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)", "def load_data(self):\n\n train_data, eval_data = DataHelper(self.config.train_data_path,\n self.config.eval_data_path,\n self.config.output_path,\n self.config.max_sequence_length).gen_data()\n return train_data, eval_data", "def load_external_training_data(self, data):\n self._train_set = data.to(self.device)\n self.train_loader = torch.utils.data.DataLoader(\n self._train_set, batch_size=self.batch_size)\n print(\"training data loaded\")", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n self.trainingData = trainingData\n self.trainingLabels = trainingLabels", "def training_set(self):\n return self._training_set", "def set_training_data(self):\n path_outdir = QFileDialog.getExistingDirectory(self, 'Model Directory', self.path_output_classified,\n QFileDialog.DontResolveSymlinks)\n if path_outdir:\n self.path_classified = self.source_training_data.text()\n #print(self.path_classified)", "def make_training_and_validation_data(\n self,\n BatchSize=100,\n TrainingSetInPercent=70,\n ValidationSetInPercent=30,\n NoBatches=False):\n\n if self._DataSet is None:\n raise Exception(\n \"No dataset initialized please call\\\n init_dataset first!\")\n if self._SymmFunSet is None:\n raise Exception(\n \"No symmetry function set initialized please call\\\n create_symmetry_functions or init_dataset first!\")\n\n\n if not NoBatches:\n # Get training data\n self.TrainingBatches = self.get_data(\n BatchSize, TrainingSetInPercent, NoBatches)\n # Get validation data\n self.ValidationBatches = self.get_data(\n BatchSize, ValidationSetInPercent, NoBatches)\n else:\n # Get training data\n temp = self.get_data(BatchSize, TrainingSetInPercent, NoBatches)\n self._TrainingInputs = temp[0][0]\n self._TrainingOutputs = temp[0][1]\n # Get validation data\n temp = self.get_data(BatchSize, ValidationSetInPercent, NoBatches)\n self._ValidationInputs = temp[0][0]\n self._ValidationOutputs = temp[0][0]", "def data_training(self, name_of_file):\n\t\tself.training_inputs = []\n\t\tself.labels = []\n\t\tself.training_inputs, self.labels = self.data_organizer(name_of_file)\n\t\tself.network = Network_manager()\n\t\tself.network.directioner_of_trainning_data(self.training_inputs,self.labels)", "def training(self):\n return getattr(self, '_training', False)", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def load_data(self):\n\n if self.opt.isTrain:\n with open(os.path.join(self.opt.csv_path, self.opt.train_filename), 'r') as csvfile:\n csvr = csv.reader(csvfile, delimiter=',')\n for l in csvr:\n train_list = l\n train_set = self.dataset_class(self.opt, train_list)\n\n with open(os.path.join(self.opt.csv_path, self.opt.val_filename), 'r') as csvfile:\n csvr = csv.reader(csvfile, delimiter=',')\n for l in csvr:\n val_list = l\n val_set = self.dataset_class(self.opt, val_list)\n\n self.dataset = train_set\n\n self.len_train_set = len(train_set)\n self.len_val_set = len(val_set)\n\n self.train_loader = torch.utils.data.DataLoader(train_set,\n batch_size=self.opt.batch_size,\n shuffle=self.opt.shuffle_data,\n num_workers=int(self.opt.num_threads))\n self.val_loader = torch.utils.data.DataLoader(val_set,\n batch_size=self.opt.batch_size,\n shuffle=self.opt.shuffle_data,\n num_workers=int(self.opt.num_threads))\n\n self.data_loader = self.train_loader\n else:\n # At test time\n with open(os.path.join(self.opt.csv_path, self.opt.test_filename), 'r') as csvfile:\n csvr = csv.reader(csvfile, delimiter=',')\n for l in csvr:\n test_list = l\n test_set = self.dataset_class(self.opt, test_list)\n\n self.len_test_set = len(test_set)\n\n self.dataset = test_set\n\n self.data_loader = torch.utils.data.DataLoader(test_set,\n batch_size=1,\n shuffle=False,\n num_workers=int(self.opt.num_threads))\n\n return self", "def __load_data(self):\n print(\"loading training data...\")\n training_data = []\n files = glob('data/*.json')\n for file in files:\n print(\"loading\", file)\n with open(file) as data_file:\n training_data.append(json.load(data_file))\n return training_data", "def get_train_dataset(self):\n return self.train_dataset", "def training_documents(self):\n return self._training_documents" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Property giving the number of training samples for each key
def num_train_samples(self): if self._num_training_samples is None: for key, value in self._training_data.items(): self._num_training_samples[key] = len(value[0]) return self._num_training_samples
[ "def train_size(self):", "def num_test_samples(self):\n if self._num_test_samples is None:\n for key, value in self._test_data.items():\n self._num_test_samples[key] = len(value[0])\n return self._num_test_samples", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def num_train_instances(self):\n raise NotImplementedError()", "def num_train_samples(self):\n if self.train_data is None:\n return 0\n return self.num_train", "def __len__(self):\n return self.num_samples", "def GetNumberOfSamples(self):\n ...", "def get_number_of_loadable_samples(self):\n return None", "def getNumberOfKeys(self) -> int:\n ...", "def test_train_data_length(self):\n total_count = 0\n for batch in self._dataset.get_train():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_train_len())", "def training_set_count(self) -> int:\n return pulumi.get(self, \"training_set_count\")", "def train_size(self):\n train_set = self.train_set()\n if isinstance(train_set, collections.Iterable):\n return len(list(train_set))\n else:\n return None", "def count_trainable_params(model):\n weights = model.trainable_weights\n total_trainable_params = int(sum(np.prod(p.shape) for p in object_identity.ObjectIdentitySet(weights)))\n return total_trainable_params", "def __len__(self):\n \n num_batches_per_epoch = np.floor(len(self.training_data_dict)/self.user_args.batch_size)\n return int(num_batches_per_epoch)", "def test_len_trainset(self):\n self.assertEqual(self.__dataset.get_train_len, 10000)", "def get_train_sizes(self):\n self.p = {}\n try:\n for l in self.X:\n self.p[l] = self.X_train[l].shape[0]\n except AttributeError:\n raise AttributeError('no train/test split created')\n total = float(sum(self.p.values()))\n self.p = {k: v/total for k, v in self.p.items()}\n return", "def get_number_of_loadable_samples(self):\n return len(self.gt_paths)", "def size(self):\r\n return len(self._train_datas)", "def vocab_size(self):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Property giving the number of test samples
def num_test_samples(self): if self._num_test_samples is None: for key, value in self._test_data.items(): self._num_test_samples[key] = len(value[0]) return self._num_test_samples
[ "def num_test_samples(self):\n if self.eval_data is None:\n return 0\n return self.num_test", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def setTestSampleSize(self, Ntest):\n self.Ntest = Ntest", "def GetNumberOfSamples(self):\n ...", "def __len__(self):\n return self.num_samples", "def num_trials(self):", "def train_size(self):", "def test_batch_size(self):\n return self._test_batch_size", "def test_test_data_length(self):\n total_count = 0\n for batch in self._dataset.get_test():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_test_len())", "def test_getSampleCount(self):\r\n self.assertEqual(self.estimator1.getSampleCount(), 1)", "def test_len_testset(self):\n self.assertEqual(self.__dataset.get_test_len, 1000)", "def nsamples(self) -> int:\n return self.shadow_tree.get_node_nsamples(self.id)", "def num_train_samples(self):\n if self._num_training_samples is None:\n for key, value in self._training_data.items():\n self._num_training_samples[key] = len(value[0])\n return self._num_training_samples", "def test_len_trainset(self):\n self.assertEqual(self.__dataset.get_train_len, 10000)", "def test_size(self) -> int:\n return int(self.data_size * self.__test_fraction)", "def getSampleCount(self): # real signature unknown; restored from __doc__\n pass", "def test_count(self):\n count = 0\n for network in self.config['network']:\n count += len(network['test'])\n return count", "def _get_samples(self) -> int:\n return self._samples", "def get_number_of_loadable_samples(self):\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Highlights currentSelection on stdscr.
def highlightSelection(stdscr, selection): s = tuple(list(selection.addStrArgs)+[curses.A_REVERSE]) stdscr.addstr(*s)
[ "def capture_highlighted_text(self, event):\n highlighted_by_cursor = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n self.text_selected.set(highlighted_by_cursor)", "def draw_selected(self):\n if self.get_selected() is not None and not self.check_if_locked(self.get_selected()):\n self.color_cell(pos=self.get_selected(\n ), color=SELECTED_INVALID if self.get_selected() in self.invalid else SELECTED)", "def highlight_color(self):\n return curses.color_pair(4) if self.cycling else curses.color_pair(2)", "def interactive_select(space, current):\n print \"Type an element name, an element index, or an unambiguous prefix to add to your selection.\"\n print \"Type '\" + color_code(MAGENTA) + \"list\" + CLEAR_COLOR +\"' to see the list of valid selections/indices.\"\n print \"Type '\" + color_code(MAGENTA) + \"clear\" + CLEAR_COLOR +\"' to clear selection.\"\n print \"Enter an empty line when done.\\n\"\n \n done = False\n while not done:\n print color_code(BLACK, bold=True), \"\\nCurrent selection\" + CLEAR_COLOR + \":\", (current if current else \"None\")\n tentative = raw_input(color_code(YELLOW) + \"Selection or Command\" + CLEAR_COLOR + \": \")\n matches = [el for el in space if el.startswith(tentative)]\n try: index = int(tentative)\n except ValueError: index = None\n if tentative == 'list':\n for i,el in enumerate(space):\n print \"\\t\", color_code(BLUE, bold=True), i, CLEAR_COLOR, el\n print \"\\n\"\n elif tentative == 'clear':\n current = []\n elif tentative == '':\n if current:\n print color_code(GREEN), \"\\nFinal selection\" + CLEAR_COLOR + \":\", current, \"\\n\\n\"\n done = True\n else:\n print_error(\"Must select at least one\")\n elif len(matches) > 1:\n print_error(\"Multiple matches found for `{}' ({})\".format(tentative, matches))\n elif len(matches):\n if matches[0] in current:\n print_warning(\"{} was already selected\".format(matches[0]))\n else:\n current.append(matches[0])\n elif index is not None:\n if index < 0 or index >= len(space):\n print_error(\"Invalid index {}\".format(index))\n elif space[index] in current:\n print_warning(\"{} was already selected\".format(space[index]))\n else:\n current.append(space[index])\n else:\n print_error(\"Unknown token: {}\".format(tentative))\n \n return current", "def _highlight_current_box(self):\n if self.settings.mouse_x <= self.settings.screen_width - 300:\n #Highlight row selected box appears in\n pygame.draw.rect(self.screen, LIGHTBLUE, (0, self.settings.current_y_coord, self.settings.screen_width - 300, self.settings.cell_size), 0)\n\n #Highlight cloumn selected box appears in\n pygame.draw.rect(self.screen, LIGHTBLUE, (self.settings.current_x_coord, 0, self.settings.cell_size, self.settings.screen_width), 0)\n\n #Highlight selected box\n pygame.draw.rect(self.screen, BLUE, (self.settings.current_x_coord, self.settings.current_y_coord, self.settings.cell_size, self.settings.cell_size), 0)", "def highlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)\n cursor.setCharFormat(format_)\n except Exception as err:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n msg += \"\\n\" + str(err)\n logger.debug(msg)", "def draw_selection(self):\n\n if self.selection:\n x, y = self.selection\n x, y = x * const.BOX_SIZE, y * const.BOX_SIZE\n pyxel.rect(x1=x + 1, x2=x + 14, y1=y + 1, y2=y + 14, col=const.C_SELECTION)", "def active_selection():\r\n\r\n om.MGlobal.getActiveSelectionList()", "def highlight_current_line(self, filename, line):\r\n\t\traise NotImplementedError()", "def set_highlight(*args):\n return _ida_kernwin.set_highlight(*args)", "def capture_selection(self):\n self._selection = self.get_selection()\n self._n = 0", "def highlight(self):\n # if self.vars['highlight_setting'] == 1:\n for match in SYNTAX_GROUPS:\n self.vim.command(f'highlight link {match[\"name\"]} {match[\"link\"]}')", "def unhighlight(self, current=False):\n if current:\n if self.currentEditor is not None:\n self.currentEditor.highlight()\n else:\n for editor in self.editors:\n editor.highlight()", "def clear_current_line_highlighting(self):\r\n\t\traise NotImplementedError()", "def _select(self, drawable):\n drawable.show_selected_highlight(self._canvas)\n self._selected_drawables.add(drawable)\n self._on_selection_changed()", "def highlightCode(self, _event=None):\n count = 0\n if self.text.tag_ranges('sel'):\n self.text.tag_add('color' + str(count), tk.SEL_FIRST, tk.SEL_LAST)\n self.text.tag_configure('color' + str(count), foreground='black', background='yellow')\n count += 1\n else:\n # Do this if you want to overwrite all selection colors when you change color without selection\n # for tag in text.tag_names():\n # text.tag_delete(tag)\n self.text.config(foreground='yellow')\n\n fileContainingText = open(newTextFile, \"a\")\n\n hText = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n fileContainingText.write(hText)", "def rehighlight(self):\n start = time.time()\n QtWidgets.QApplication.setOverrideCursor(\n QtGui.QCursor(QtCore.Qt.WaitCursor))\n try:\n super(SyntaxHighlighter, self).rehighlight()\n except RuntimeError:\n # cloned widget, no need to rehighlight the same document twice ;)\n pass\n QtWidgets.QApplication.restoreOverrideCursor()\n end = time.time()\n _logger().debug('rehighlight duration: %fs' % (end - start))", "def highlight(self, highlight):\n self._highlight = highlight", "def highlight(self):\n return self._highlight" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This initializes the DotStars object by setting a buffer, and creating an SPI object. The start and end frames for the SPI communication are created, and the leds are cleared of values.
def __init__(self, leds): self.ledcount = leds # create a buffer self.buffersize = self.ledcount * 4 self.buffer = bytearray(self.ledcount * 4) self.emptybuffer = bytearray(self.ledcount * 4) for i in range(0, self.buffersize, 4): self.emptybuffer[i] = 0xff self.emptybuffer[i + 1] = 0x0 self.emptybuffer[i + 2] = 0x0 self.emptybuffer[i + 3] = 0x0 # Start frame and endframe for the SPI communication (end frame is not # needed) self.startframe = bytes([0x00, 0x00, 0x00, 0x00]) self.endframe = bytes([0xff, 0xff, 0xff, 0xff]) # initialize SPI (needs to be at 45 MHz in order to maximize the speed. # This is the limiting factor for the system's speed) self.spi = SPI(1, SPI.MASTER, baudrate=45000000, polarity=0, phase=0, bits=8, firstbit=SPI.MSB) self.clearleds()
[ "def SPIsetup(self):\n self.writecmd(0x01,0x10,0,self.data); #SPI/SETUP", "def spi_init(self):\n self.lw.ctrl_transfer(bmRequestType=0xC0, bRequest=23,\n wValue=0, wIndex=0,\n data_or_wLength=8, timeout=USB_TIMEOUT)", "def init(\n baudrate=1000000, bits=8, mode=0, sclk=\"pin13\", mosi=\"pin15\", miso=\"pin14\"\n ):\n utils.print_for_unimplemented_functions(SPI.init.__qualname__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)", "def _init_config(self, width, height, spi=None, spiMosi= None, spiDC=None, spiCS=None, spiReset=None, spiClk=None):\n self._spi = spi\n self._spi_mosi = spiMosi\n self._spi_dc = spiDC\n self._spi_cs = spiCS\n self._spi_reset = spiReset\n self._spi_clk = spiClk\n\n self.width = width\n self.height = height", "def ADS124_connect(self) :\n self.spi = spidev.SpiDev()\n self.spi.open(0,0)\n self.spi.cshigh = False\n self.spi.mode = 0b01\n self.spi.max_speed_hz = 3814\n return", "def __init__(self, spi=0, CD=36, reset=48):\n this = _pyupm_lcd.new_EBOLED(spi, CD, reset)\n try:\n self.this.append(this)\n except Exception:\n self.this = this", "def initiate():\n\n log = \"Initiate the SPI communication of the OPC-N3\"\n logger.debug(log)\n\n time.sleep(1)\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x01])\n reading = spi.readbytes(3)\n log = \"Data read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x03])\n reading = spi.readbytes(9)\n log = \"Bytes read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n # SPI conncetion\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x02, 0x92, 0x07])\n reading = spi.readbytes(2)\n log = \"Bytes read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n return", "def __init__(self, bus=0, chip_select=0, spi_callback=None):\n self.bus = bus\n self.chip_select = chip_select\n self.spi_callback = spi_callback\n self.fd = None\n spi_device = \"%s%d.%d\" % (SPIDEV, self.bus, self.chip_select)\n self.open_fd(spi_device)", "def __init__(self, stencil_coefs, loffset, roffset):\n self.stencil_coefs = stencil_coefs\n self.loffset = loffset\n self.roffset = roffset", "def spi_controller(\n # ---[ Module Ports]---\n glbl, # global interface, clock, reset, etc.\n spibus, # external SPI bus\n # optional ports\n fifobus=None, # streaming interface, FIFO bus\n mmbus=None, # memory-mapped bus, contro status access\n cso=None, # control-status object\n \n # ---[ Module Parameters ]---\n include_fifo=True, # include aan 8 byte deep FIFO\n):\n clock, reset = glbl.clock, glbl.reset\n if cso is None:\n cso = spi_controller.cso()\n\n # -- local signals --\n ena = Signal(False)\n clkcnt = Signal(modbv(0, min=0, max=2**12))\n bcnt = Signal(intbv(0, min=0, max=8))\n\n # separate tx and rx shift-registers (could be one in the same)\n treg = Signal(intbv(0)[8:]) # tx shift register\n rreg = Signal(intbv(0)[8:]) # rx shift register\n\n x_sck, x_ss, x_mosi, x_miso = Signals(bool(0), 4)\n\n # internal FIFO bus interfaces\n # external FIFO side (FIFO to external SPI bus)\n itx = FIFOBus(size=fifobus.size, width=fifobus.width)\n # internal FIFO side (FIFO to internal bus)\n irx = FIFOBus(size=fifobus.size, width=fifobus.width)\n \n states = enum('idle', 'wait_hclk', 'data_in', 'data_change',\n 'write_fifo', 'end')\n state = Signal(states.idle)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # memory- mapped registers\n # add the peripheral's regfile to the bus (informational only)\n # @todo: the automatic building of the register files is incomplete\n if mmbus is not None:\n # the register-file (rf) will drive all the cso signals\n rf = cso.get_register_file()\n mmbus.add(rf, 'spi')\n\n # FIFO for the wishbone data transfer\n if include_fifo:\n fifo_fast.debug = spi_controller.debug\n fifo_tx_inst = fifo_fast(reset, clock, itx)\n fifo_rx_inst = fifo_fast(reset, clock, irx)\n\n @always_comb\n def rtl_assign():\n cso.tx_fifo_count.next = itx.count\n cso.rx_fifo_count.next = irx.count\n\n if clkcnt > 0:\n ena.next = False\n else:\n ena.next = True\n\n clock_counts = tuple([(2**ii)-1 for ii in range(13)])\n\n @always(clock.posedge)\n def rtl_clk_div():\n if cso.enable and clkcnt != 0 and state != states.idle:\n clkcnt.next = (clkcnt - 1)\n else:\n clkcnt.next = clock_counts[cso.clock_divisor]\n\n @always_seq(clock.posedge, reset=reset)\n def rtl_state_and_more():\n \"\"\"\n Designed to the following timing diagram\n\n SCK CPOL=0 ______/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\ \n CPOL=1 ------\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/ \n SS ---\\_______________________________________________________________________ \n CPHA=0 MOSI ...|.0....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0.....| \n MISO ...|.0....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0.....| \n CPHA=1 MOSI ...|....0.....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0...\n MISO ......|.0.....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0...\n \"\"\"\n if not cso.enable:\n state.next = states.idle\n bcnt.next = 0\n treg.next = 0\n \n itx.read.next = False\n irx.write.next = False\n\n x_sck.next = False\n x_ss.next = False\n else:\n if not cso.freeze:\n # ~~~~ Idle state ~~~~\n if state == states.idle:\n bcnt.next = 7\n treg.next = itx.read_data\n x_sck.next = cso.clock_polarity\n irx.write.next = False\n \n if not itx.empty and not irx.full:\n itx.read.next = True\n x_ss.next = False\n if cso.clock_phase: # Clock in on second phase\n state.next = states.wait_hclk\n else: # Clock in on first phase\n state.next = states.data_in\n else:\n itx.read.next = False\n x_ss.next = True\n\n # ~~~~ Wait half clock period for cpha=1 ~~~~\n elif state == states.wait_hclk:\n itx.read.next = False\n irx.write.next = False\n if ena:\n x_sck.next = not x_sck\n state.next = states.data_in\n\n # ~~~~ Clock data in (and out) ~~~~\n elif state == states.data_in:\n itx.read.next = False\n irx.write.next = False\n if ena: # clk div\n x_sck.next = not x_sck\n rreg.next = concat(rreg[7:0], x_miso)\n \n if cso.clock_phase and bcnt == 0:\n irx.write.next = True\n if itx.empty or irx.full:\n state.next = states.end\n else:\n state.next = states.data_change\n else:\n state.next = states.data_change\n\n # ~~~~ Get ready for next byte out/in ~~~~\n elif state == states.data_change:\n itx.read.next = False\n irx.write.next = False\n if ena:\n x_sck.next = not x_sck\n if bcnt == 0: \n if not cso.clock_phase:\n irx.write.next = True\n \n if itx.empty or irx.full:\n state.next = states.end\n else: # more data to transfer\n bcnt.next = 7\n state.next = states.data_in\n itx.read.next = True\n treg.next = itx.read_data\n else:\n treg.next = concat(treg[7:0], intbv(0)[1:])\n bcnt.next = bcnt - 1 \n state.next = states.data_in\n\n # ~~~~ End state ~~~~\n elif state == states.end:\n itx.read.next = False\n irx.write.next = False\n if ena: # Wait half clock cycle go idle\n state.next = states.idle\n\n # Shouldn't happen, error in logic\n else:\n state.next = states.idle\n assert False, \"SPI Invalid State\"\n\n @always_comb\n def rtl_fifo_sel():\n \"\"\"\n The `itx` and `irx` FIFO interfaces are driven by different\n logic depending on the configuration. This modules accesses\n the `itx` read side and drives the `irx` write side. The\n `itx` write side is driven by the `cso` or the `fifobus` port.\n The `irx` read side is accessed by the `cso` or the `fifobus`\n port.\n \"\"\"\n if cso.bypass_fifo:\n # data comes from the register file\n cso.tx_empty.next = itx.empty\n cso.tx_full.next = itx.full\n itx.write_data.next = cso.tx_byte\n\n cso.rx_empty.next = irx.empty\n cso.rx_full.next = irx.full\n cso.rx_byte.next = irx.read_data\n cso.rx_byte_valid.next = irx.read_valid\n\n # @todo: if cso.tx_byte write signal (written by bus) drive the\n # @todo: FIFO write signals, same if the cso.rx_byte is accessed\n itx.write.next = cso.tx_write\n irx.read.next = cso.rx_read\n\n else:\n # data comes from external FIFO bus interface\n fifobus.full.next = itx.full\n itx.write_data.next = fifobus.write_data\n itx.write.next = fifobus.write\n\n fifobus.empty.next = irx.empty\n fifobus.read_data.next = irx.read_data\n fifobus.read_valid.next = irx.read_valid\n irx.read.next = fifobus.read\n\n # same for all modes\n irx.write_data.next = rreg\n\n @always_comb\n def rtl_x_mosi():\n # @todo lsb control signal\n x_mosi.next = treg[7]\n\n @always_comb\n def rtl_gate_mosi():\n if cso.loopback:\n spibus.mosi.next = False\n else:\n spibus.mosi.next = x_mosi\n\n @always_comb #(clock.posedge)\n def rtl_spi_sigs():\n spibus.sck.next = x_sck\n if cso.loopback:\n x_miso.next = x_mosi\n else:\n x_miso.next = spibus.miso\n\n @always_comb\n def rtl_slave_select():\n if cso.manual_slave_select:\n spibus.ss.next = ~cso.slave_select\n elif x_ss:\n spibus.ss.next = 0xFF\n else:\n spibus.ss.next = ~cso.slave_select\n\n # myhdl generators in the __debug__ conditionals are not converted.\n if spi_controller.debug:\n @instance\n def mon_state():\n print(\" :{:<8d}: initial state {}\".format(\n now(), str(state)))\n \n while True:\n yield state\n print(\" :{:<8d}: state transition --> {}\".format(\n now(), str(state)))\n \n fbidle = intbv('0000')[4:]\n\n @instance\n def mon_trace():\n while True:\n yield clock.posedge\n ccfb = concat(itx.write, itx.read, irx.write, irx.read)\n if ccfb != fbidle:\n fstr = \" :{:<8d}: tx: w{} r{}, f{} e{}, rx: w{} r{} f{} e{}\"\n print(fstr.format(now(),\n int(itx.write), int(itx.read), int(itx.full), int(itx.empty),\n int(irx.write), int(irx.read), int(irx.full), int(irx.empty),)\n )\n \n @always(clock.posedge)\n def mon_tx_fifo_write():\n if itx.write:\n print(\" WRITE tx fifo {:02X}\".format(int(itx.write_data)))\n if itx.read:\n print(\" READ tx fifo {:02X}\".format(int(itx.read_data)))\n \n @always(clock.posedge)\n def mon_rx_fifo_write():\n if irx.write:\n print(\" WRITE rx fifo {:02X}\".format(int(irx.write_data)))\n \n if irx.read:\n print(\" READ rx fifo {:02X}\".format(int(irx.read_data)))\n\n # return the myhdl generators\n gens = myhdl.instances()\n return gens", "def __init__(self, num_leds=25, dev_file='/dev/spidev1.0'):\n\n self.num_leds = num_leds\n self._frame_hold = False\n\n # Open the file\n self._spi = open(dev_file, 'wb')\n\n # Make the underlying byte array\n self._bytes = bytearray(3 * self.num_leds)\n\n # Write out bytes to turn off LEDs\n self.refresh()", "def __init__( # pylint: disable=too-many-arguments\n self,\n spi: busio.SPI,\n latch: digitalio.DigitalInOut,\n columns: int,\n lines: int,\n backlight_inverted: bool = False,\n ):\n\n self._shift_register = adafruit_74hc595.ShiftRegister74HC595(spi, latch)\n reset = self._shift_register.get_pin(1)\n enable = self._shift_register.get_pin(2)\n db4 = self._shift_register.get_pin(6)\n db5 = self._shift_register.get_pin(5)\n db6 = self._shift_register.get_pin(4)\n db7 = self._shift_register.get_pin(3)\n backlight_pin = self._shift_register.get_pin(7)\n super().__init__(\n reset,\n enable,\n db4,\n db5,\n db6,\n db7,\n columns,\n lines,\n backlight_pin=backlight_pin,\n backlight_inverted=backlight_inverted,\n )", "def __init__(self):\r\n # Check device ID.\r\n chip_id = self._read_byte(_BME280_REGISTER_CHIPID)\r\n if _BME280_CHIPID != chip_id:\r\n raise RuntimeError('Failed to find BME280! Chip ID 0x%x' % chip_id)\r\n self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)\r\n time.sleep(0.5)\r\n self._read_coefficients()\r\n self.sea_level_pressure = 1013.25\r\n \"\"\"Pressure in hectoPascals at sea level. Used to calibrate `altitude`.\"\"\"\r\n # turn on humidity oversample 16x\r\n self._write_register_byte(_BME280_REGISTER_CTRL_HUM, 0x03)\r\n self._t_fine = None", "def setup_buffer(self):\n ## Validate ##\n assert self.ChanReady and self.ModeReady, \"The Mode & Channels must be configured before Buffer!\"\n assert len(self.Segments) > 0, \"No Segments defined! Nothing to put in Buffer.\"\n\n ## Gather Information from Board ##\n num_chan = int32(0) # Number of Open Channels\n mem_size = uint64(0) # Total Memory ~ 4.3 GB\n spcm_dwGetParam_i32(self.hCard, SPC_CHCOUNT, byref(num_chan))\n spcm_dwGetParam_i64(self.hCard, SPC_PCIMEMSIZE, byref(mem_size))\n#######################################################################################################################\n ## Configures Memory Size & Divisions ##\n num_segs = int32(2)\n if self.Mode == 'sequential':\n buf_size = max([seg.SampleLength for seg in self.Segments])*2*num_chan.value\n while num_segs.value < len(self.Segments):\n num_segs.value <<= 1\n assert buf_size <= mem_size.value / num_segs.value, \"One of the segments is too large!\"\n\n spcm_dwSetParam_i32(self.hCard, SPC_SEQMODE_MAXSEGMENTS, num_segs)\n spcm_dwSetParam_i32(self.hCard, SPC_SEQMODE_STARTSTEP, 0)\n\n num_segs = len(self.Segments)\n print(\"Num Segments: \", num_segs)\n else:\n buf_size = self.Segments[0].SampleLength*2*num_chan.value\n spcm_dwSetParam_i64(self.hCard, SPC_MEMSIZE, int64(self.Segments[0].SampleLength))\n\n ## Sets up a local Software Buffer for Transfer to Board ##\n pv_buf = pvAllocMemPageAligned(buf_size) # Allocates space on PC\n pn_buf = cast(pv_buf, ptr16) # Casts pointer into something usable\n\n ## Loads each necessary Segment ##\n if self.Mode == 'sequential':\n for i, seg in enumerate(self.Segments):\n spcm_dwSetParam_i32(self.hCard, SPC_SEQMODE_WRITESEGMENT, i)\n spcm_dwSetParam_i32(self.hCard, SPC_SEQMODE_SEGMENTSIZE, seg.SampleLength)\n self._error_check()\n\n buf_size = seg.SampleLength * 2 * num_chan.value # Calculates Segment Size in Bytes\n self._compute_and_load(seg, pn_buf, pv_buf, uint64(buf_size))\n else:\n buf_size = self.Segments[0].SampleLength * 2 * num_chan.value\n self._compute_and_load(self.Segments[0], pn_buf, pv_buf, uint64(buf_size))\n#######################################################################################################################\n ## Clock ##\n spcm_dwSetParam_i32(self.hCard, SPC_CLOCKMODE, SPC_CM_INTPLL) # Sets out internal Quarts Clock For Sampling\n spcm_dwSetParam_i64(self.hCard, SPC_SAMPLERATE, int64(int(SAMP_FREQ))) # Sets Sampling Rate\n spcm_dwSetParam_i32(self.hCard, SPC_CLOCKOUT, 0) # Disables Clock Output\n check_clock = int64(0)\n spcm_dwGetParam_i64(self.hCard, SPC_SAMPLERATE, byref(check_clock)) # Checks Sampling Rate\n # print(\"Achieved Sampling Rate: \", check_clock.value)\n\n self._error_check()\n self.BufReady = True", "def __init__(self, spi_rack, module, max_current=50e-3, reset_currents=True):\n self.spi_rack = spi_rack\n self.module = module\n self.span = [np.NaN]*4\n self.currents = [np.NaN]*4\n self.max_current = max_current\n\n for i in range(4):\n self.get_settings(i)\n\n if reset_currents:\n for i in range(4):\n self.change_span(i, S4g_module.range_max_bi)\n self.set_current(i, 0.0)", "def __init__(self):\n self.beep_data = None\n self.health_data = None\n self.gps_data = None", "def initialize_ssp(self, trajectory):\n ssp = self.voc[\"Zero\"]\n for spec in trajectory.object_specs:\n ssp += self.encode_point(spec.x, spec.y, spec.name)\n\n return ssp", "def initialize(self):\n self.logger.debug('Dummy Generic Serial Controller device initialized')\n self._empty_buffer()\n self._is_initialized = True", "def __init__(self):\n self.stm32 = None\n self.configuration = {\n \"port\": os.environ.get(\"STM32LOADER_SERIAL_PORT\"),\n \"baud\": 115200,\n \"parity\": self.PARITY[\"even\"],\n \"family\": os.environ.get(\"STM32LOADER_FAMILY\"),\n \"address\": 0x08000000,\n \"erase\": False,\n \"unprotect\": False,\n \"write\": False,\n \"verify\": False,\n \"read\": False,\n \"go_address\": -1,\n \"swap_rts_dtr\": False,\n \"reset_active_high\": False,\n \"boot0_active_low\": False,\n \"hide_progress_bar\": False,\n \"data_file\": None,\n }\n self.verbosity = DEFAULT_VERBOSITY" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method clears all the LEDs in the DotStar object
def clearleds(self): self.buffer = self.emptybuffer[:]
[ "def clear_lights(self):\n pass", "def clearLEDs(self):\n for pixel in range(self.__striplength):\n self.__pixels[pixel][0:3] = [0, 0, 0]", "def reset_all(self):\n self.led_service.reset(\"AllLeds\")", "def reset(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].reset()", "def all_off():\n for led in LEDS:\n if led.is_lit:\n led.off()", "def off(self):\n for light in self.all:\n GPIO.output(light, 0)", "def turn_all_off(self):\n for led_type in LED:\n self.led_off(led_type)\n logging.info('LED: ALL - Status: 0')", "def clear_pitches(cls):\n Pitches.all_pitches.clear()", "def Reset(self):\n for device in self._detected_devices.values():\n device.Reset()", "def reset(self):\n for i in range(self.shapeRow):\n for j in range(self.shapeColumn):\n self.buttons[i][j].setText(\" \")", "def removeAll(self):\n self.storeFuncColor = []\n self.canvas.delete(ALL)", "def clear_bonds(self):\n self.ffi.chfl_frame_clear_bonds(self.mut_ptr)", "def off_all(self):\n for outlet in self.power_switch:\n outlet.off()\n self._verify_state(outlet.name, 'OFF')", "def clear_all(cls):\n del cls.buttons[:]", "def clear_explored(self):\n\n for node in self.nodes.values():\n node.explored = False", "def remove_all_lights(self):\n self.RemoveAllLights()\n self._lights.clear()", "def clear(self):\n for animal in self.animals:\n animal.undraw()\n self.animals = []\n self.canvas.delete('all')", "def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []", "def reset(self):\n for bod in self.points:\n bod.isVisible = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }