{"blob_id": "8ed98201369c7481a0eeea69651201c3e97d130d", "bodies": ["links = ['/bin', '/sbin', '/lib']\nfor l in links:\n status, output = self.target.run('readlink %s' % l)\n self.assertEqual(status, 0, 'usrmerge error: %s should be a symbolic link' % l)", "binaries = ['/usr/bin/flatpak', '/usr/bin/gpgme-tool', '/usr/bin/gpg']\nfor b in binaries:\n status, output = self.target.run('ls %s' % b)\n self.assertEqual(status, 0, 'flatpak basic binary %s missing' % b)"], "bodies_text": "<|body_start_0|>\n links = ['/bin', '/sbin', '/lib']\n for l in links:\n status, output = self.target.run('readlink %s' % l)\n self.assertEqual(status, 0, 'usrmerge error: %s should be a symbolic link' % l)\n<|end_body_0|>\n\n<|body_start_1|>\n binaries = ['/usr/bin/flatpak', '/usr/bin/gpgme-tool', '/usr/bin/gpg']\n for b in binaries:\n status, output = self.target.run('ls %s' % b)\n self.assertEqual(status, 0, 'flatpak basic binary %s missing' % b)\n<|end_body_1|>\n", "class_docstring": "flatpak sanity tests", "class_name": "SanityTestFlatpak", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SanityTestFlatpak:\n \"\"\"flatpak sanity tests\"\"\"\n\n def test_flatpak_usrmerge(self):\n \"\"\"check if / and /usr are properly merged\"\"\"\n <|body_0|>\n\n def test_basic_binaries(self):\n \"\"\"check if basic flatpak binaries exist\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n links = ['/bin', '/sbin', '/lib']\n for l in links:\n status, output = self.target.run('readlink %s' % l)\n self.assertEqual(status, 0, 'usrmerge error: %s should be a symbolic link' % l)\n<|end_body_0|>\n\n<|body_start_1|>\n binaries = ['/usr/bin/flatpak', '/usr/bin/gpgme-tool', '/usr/bin/gpg']\n for b in binaries:\n status, output = self.target.run('ls %s' % b)\n self.assertEqual(status, 0, 'flatpak basic binary %s missing' % b)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000000", "length_bytes": 1063, "license_type": "permissive", "methods": [{"docstring": "check if / and /usr are properly merged", "name": "test_flatpak_usrmerge", "signature": "def test_flatpak_usrmerge(self)"}, {"docstring": "check if basic flatpak binaries exist", "name": "test_basic_binaries", "signature": "def test_basic_binaries(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_022946", "prompt": "Implement the Python class `SanityTestFlatpak` described below.\n\nClass description:\nflatpak sanity tests\n\nMethod signatures and docstrings:\n- def test_flatpak_usrmerge(self): check if / and /usr are properly merged\n- def test_basic_binaries(self): check if basic flatpak binaries exist", "prompted_full_text": "Implement the Python class `SanityTestFlatpak` described below.\n\nClass description:\nflatpak sanity tests\n\nMethod signatures and docstrings:\n- def test_flatpak_usrmerge(self): check if / and /usr are properly merged\n- def test_basic_binaries(self): check if basic flatpak binaries exist\n\n<|skeleton|>\nclass SanityTestFlatpak:\n \"\"\"flatpak sanity tests\"\"\"\n\n def test_flatpak_usrmerge(self):\n \"\"\"check if / and /usr are properly merged\"\"\"\n <|body_0|>\n\n def test_basic_binaries(self):\n \"\"\"check if basic flatpak binaries exist\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n links = ['/bin', '/sbin', '/lib']\n for l in links:\n status, output = self.target.run('readlink %s' % l)\n self.assertEqual(status, 0, 'usrmerge error: %s should be a symbolic link' % l)\n<|end_body_0|>\n\n<|body_start_1|>\n binaries = ['/usr/bin/flatpak', '/usr/bin/gpgme-tool', '/usr/bin/gpg']\n for b in binaries:\n status, output = self.target.run('ls %s' % b)\n self.assertEqual(status, 0, 'flatpak basic binary %s missing' % b)\n<|end_body_1|>\n", "revision_id": "786a4de29c30b47f885d8ad9cb2d110a08919ebd", "skeleton": "<|skeleton|>\nclass SanityTestFlatpak:\n \"\"\"flatpak sanity tests\"\"\"\n\n def test_flatpak_usrmerge(self):\n \"\"\"check if / and /usr are properly merged\"\"\"\n <|body_0|>\n\n def test_basic_binaries(self):\n \"\"\"check if basic flatpak binaries exist\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SanityTestFlatpak:\n \"\"\"flatpak sanity tests\"\"\"\n\n def test_flatpak_usrmerge(self):\n \"\"\"check if / and /usr are properly merged\"\"\"\n links = ['/bin', '/sbin', '/lib']\n for l in links:\n status, output = self.target.run('readlink %s' % l)\n self.assertEqual(status, 0, 'usrmerge error: %s should be a symbolic link' % l)\n\n def test_basic_binaries(self):\n \"\"\"check if basic flatpak binaries exist\"\"\"\n binaries = ['/usr/bin/flatpak', '/usr/bin/gpgme-tool', '/usr/bin/gpg']\n for b in binaries:\n status, output = self.target.run('ls %s' % b)\n self.assertEqual(status, 0, 'flatpak basic binary %s missing' % b)\n", "source": "the_stack_v2_python_sparse", "source_path": "meta-iotqa/lib/oeqa/runtime/sanity/flatpak.py", "source_repo": "intel/intel-iot-refkit", "split": "val", "star_events_count": 38}
{"blob_id": "bf013ec8d893a396bb3d1f12c7df59d0f5ea55ee", "bodies": ["if not table_name or not column_name or (not parent):\n raise ValueError('Missing table_name, column_name or parent value.')\nif row_condition and (not isinstance(row_condition, tuple) or len(row_condition) != 3):\n raise ValueError('Unsupported row_condition not a tuple in the form: (column_name, operator, value).')\nsuper(SQLiteBlobPathSpec, self).__init__(parent=parent, **kwargs)\nself.column_name = column_name\nself.row_condition = row_condition\nself.row_index = row_index\nself.table_name = table_name", "string_parts = []\nstring_parts.append(f'table name: {self.table_name:s}')\nstring_parts.append(f'column name: {self.column_name:s}')\nif self.row_condition is not None:\n row_condition_string = ' '.join([f'{value!s}' for value in self.row_condition])\n string_parts.append(f'row condition: \"{row_condition_string:s}\"')\nif self.row_index is not None:\n string_parts.append(f'row index: {self.row_index:d}')\nreturn self._GetComparable(sub_comparable_string=', '.join(string_parts))"], "bodies_text": "<|body_start_0|>\n if not table_name or not column_name or (not parent):\n raise ValueError('Missing table_name, column_name or parent value.')\n if row_condition and (not isinstance(row_condition, tuple) or len(row_condition) != 3):\n raise ValueError('Unsupported row_condition not a tuple in the form: (column_name, operator, value).')\n super(SQLiteBlobPathSpec, self).__init__(parent=parent, **kwargs)\n self.column_name = column_name\n self.row_condition = row_condition\n self.row_index = row_index\n self.table_name = table_name\n<|end_body_0|>\n\n<|body_start_1|>\n string_parts = []\n string_parts.append(f'table name: {self.table_name:s}')\n string_parts.append(f'column name: {self.column_name:s}')\n if self.row_condition is not None:\n row_condition_string = ' '.join([f'{value!s}' for value in self.row_condition])\n string_parts.append(f'row condition: \"{row_condition_string:s}\"')\n if self.row_index is not None:\n string_parts.append(f'row index: {self.row_index:d}')\n return self._GetComparable(sub_comparable_string=', '.join(string_parts))\n<|end_body_1|>\n", "class_docstring": "SQLite blob file path specification. Attributes: column_name (str): name of the column in which the blob is stored. row_condition (tuple): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (int): index of the row in which the blob is stored. table_name (str): name of the table in which the blob is stored.", "class_name": "SQLiteBlobPathSpec", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SQLiteBlobPathSpec:\n \"\"\"SQLite blob file path specification. Attributes: column_name (str): name of the column in which the blob is stored. row_condition (tuple): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (int): index of the row in which the blob is stored. table_name (str): name of the table in which the blob is stored.\"\"\"\n\n def __init__(self, column_name=None, parent=None, row_condition=None, row_index=None, table_name=None, **kwargs):\n \"\"\"Initializes a path specification. Note that the SQLite blob file path specification must have a parent. Args: column_name (Optional[str]): name of the column in which the blob is stored. parent (Optional[PathSpec]): parent path specification. row_condition (Optional[tuple]): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (Optional[int]): index of the row in which the blob is stored. table_name (Optional[str]): name of the table in which the blob is stored. Raises: ValueError: when table_name, column_name, row_condition and row_index, or parent is not set.\"\"\"\n <|body_0|>\n\n def comparable(self):\n \"\"\"str: comparable representation of the path specification.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not table_name or not column_name or (not parent):\n raise ValueError('Missing table_name, column_name or parent value.')\n if row_condition and (not isinstance(row_condition, tuple) or len(row_condition) != 3):\n raise ValueError('Unsupported row_condition not a tuple in the form: (column_name, operator, value).')\n super(SQLiteBlobPathSpec, self).__init__(parent=parent, **kwargs)\n self.column_name = column_name\n self.row_condition = row_condition\n self.row_index = row_index\n self.table_name = table_name\n<|end_body_0|>\n\n<|body_start_1|>\n string_parts = []\n string_parts.append(f'table name: {self.table_name:s}')\n string_parts.append(f'column name: {self.column_name:s}')\n if self.row_condition is not None:\n row_condition_string = ' '.join([f'{value!s}' for value in self.row_condition])\n string_parts.append(f'row condition: \"{row_condition_string:s}\"')\n if self.row_index is not None:\n string_parts.append(f'row index: {self.row_index:d}')\n return self._GetComparable(sub_comparable_string=', '.join(string_parts))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000001", "length_bytes": 2964, "license_type": "permissive", "methods": [{"docstring": "Initializes a path specification. Note that the SQLite blob file path specification must have a parent. Args: column_name (Optional[str]): name of the column in which the blob is stored. parent (Optional[PathSpec]): parent path specification. row_condition (Optional[tuple]): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (Optional[int]): index of the row in which the blob is stored. table_name (Optional[str]): name of the table in which the blob is stored. Raises: ValueError: when table_name, column_name, row_condition and row_index, or parent is not set.", "name": "__init__", "signature": "def __init__(self, column_name=None, parent=None, row_condition=None, row_index=None, table_name=None, **kwargs)"}, {"docstring": "str: comparable representation of the path specification.", "name": "comparable", "signature": "def comparable(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_022280", "prompt": "Implement the Python class `SQLiteBlobPathSpec` described below.\n\nClass description:\nSQLite blob file path specification. Attributes: column_name (str): name of the column in which the blob is stored. row_condition (tuple): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (int): index of the row in which the blob is stored. table_name (str): name of the table in which the blob is stored.\n\nMethod signatures and docstrings:\n- def __init__(self, column_name=None, parent=None, row_condition=None, row_index=None, table_name=None, **kwargs): Initializes a path specification. Note that the SQLite blob file path specification must have a parent. Args: column_name (Optional[str]): name of the column in which the blob is stored. parent (Optional[PathSpec]): parent path specification. row_condition (Optional[tuple]): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (Optional[int]): index of the row in which the blob is stored. table_name (Optional[str]): name of the table in which the blob is stored. Raises: ValueError: when table_name, column_name, row_condition and row_index, or parent is not set.\n- def comparable(self): str: comparable representation of the path specification.", "prompted_full_text": "Implement the Python class `SQLiteBlobPathSpec` described below.\n\nClass description:\nSQLite blob file path specification. Attributes: column_name (str): name of the column in which the blob is stored. row_condition (tuple): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (int): index of the row in which the blob is stored. table_name (str): name of the table in which the blob is stored.\n\nMethod signatures and docstrings:\n- def __init__(self, column_name=None, parent=None, row_condition=None, row_index=None, table_name=None, **kwargs): Initializes a path specification. Note that the SQLite blob file path specification must have a parent. Args: column_name (Optional[str]): name of the column in which the blob is stored. parent (Optional[PathSpec]): parent path specification. row_condition (Optional[tuple]): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (Optional[int]): index of the row in which the blob is stored. table_name (Optional[str]): name of the table in which the blob is stored. Raises: ValueError: when table_name, column_name, row_condition and row_index, or parent is not set.\n- def comparable(self): str: comparable representation of the path specification.\n\n<|skeleton|>\nclass SQLiteBlobPathSpec:\n \"\"\"SQLite blob file path specification. Attributes: column_name (str): name of the column in which the blob is stored. row_condition (tuple): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (int): index of the row in which the blob is stored. table_name (str): name of the table in which the blob is stored.\"\"\"\n\n def __init__(self, column_name=None, parent=None, row_condition=None, row_index=None, table_name=None, **kwargs):\n \"\"\"Initializes a path specification. Note that the SQLite blob file path specification must have a parent. Args: column_name (Optional[str]): name of the column in which the blob is stored. parent (Optional[PathSpec]): parent path specification. row_condition (Optional[tuple]): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (Optional[int]): index of the row in which the blob is stored. table_name (Optional[str]): name of the table in which the blob is stored. Raises: ValueError: when table_name, column_name, row_condition and row_index, or parent is not set.\"\"\"\n <|body_0|>\n\n def comparable(self):\n \"\"\"str: comparable representation of the path specification.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not table_name or not column_name or (not parent):\n raise ValueError('Missing table_name, column_name or parent value.')\n if row_condition and (not isinstance(row_condition, tuple) or len(row_condition) != 3):\n raise ValueError('Unsupported row_condition not a tuple in the form: (column_name, operator, value).')\n super(SQLiteBlobPathSpec, self).__init__(parent=parent, **kwargs)\n self.column_name = column_name\n self.row_condition = row_condition\n self.row_index = row_index\n self.table_name = table_name\n<|end_body_0|>\n\n<|body_start_1|>\n string_parts = []\n string_parts.append(f'table name: {self.table_name:s}')\n string_parts.append(f'column name: {self.column_name:s}')\n if self.row_condition is not None:\n row_condition_string = ' '.join([f'{value!s}' for value in self.row_condition])\n string_parts.append(f'row condition: \"{row_condition_string:s}\"')\n if self.row_index is not None:\n string_parts.append(f'row index: {self.row_index:d}')\n return self._GetComparable(sub_comparable_string=', '.join(string_parts))\n<|end_body_1|>\n", "revision_id": "28756d910e951a22c5f0b2bcf5184f055a19d544", "skeleton": "<|skeleton|>\nclass SQLiteBlobPathSpec:\n \"\"\"SQLite blob file path specification. Attributes: column_name (str): name of the column in which the blob is stored. row_condition (tuple): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (int): index of the row in which the blob is stored. table_name (str): name of the table in which the blob is stored.\"\"\"\n\n def __init__(self, column_name=None, parent=None, row_condition=None, row_index=None, table_name=None, **kwargs):\n \"\"\"Initializes a path specification. Note that the SQLite blob file path specification must have a parent. Args: column_name (Optional[str]): name of the column in which the blob is stored. parent (Optional[PathSpec]): parent path specification. row_condition (Optional[tuple]): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (Optional[int]): index of the row in which the blob is stored. table_name (Optional[str]): name of the table in which the blob is stored. Raises: ValueError: when table_name, column_name, row_condition and row_index, or parent is not set.\"\"\"\n <|body_0|>\n\n def comparable(self):\n \"\"\"str: comparable representation of the path specification.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SQLiteBlobPathSpec:\n \"\"\"SQLite blob file path specification. Attributes: column_name (str): name of the column in which the blob is stored. row_condition (tuple): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (int): index of the row in which the blob is stored. table_name (str): name of the table in which the blob is stored.\"\"\"\n\n def __init__(self, column_name=None, parent=None, row_condition=None, row_index=None, table_name=None, **kwargs):\n \"\"\"Initializes a path specification. Note that the SQLite blob file path specification must have a parent. Args: column_name (Optional[str]): name of the column in which the blob is stored. parent (Optional[PathSpec]): parent path specification. row_condition (Optional[tuple]): condition of the row in which the blob is stored. The condition is a tuple in the form: (column_name, operator, value). The condition must yield a single result. row_index (Optional[int]): index of the row in which the blob is stored. table_name (Optional[str]): name of the table in which the blob is stored. Raises: ValueError: when table_name, column_name, row_condition and row_index, or parent is not set.\"\"\"\n if not table_name or not column_name or (not parent):\n raise ValueError('Missing table_name, column_name or parent value.')\n if row_condition and (not isinstance(row_condition, tuple) or len(row_condition) != 3):\n raise ValueError('Unsupported row_condition not a tuple in the form: (column_name, operator, value).')\n super(SQLiteBlobPathSpec, self).__init__(parent=parent, **kwargs)\n self.column_name = column_name\n self.row_condition = row_condition\n self.row_index = row_index\n self.table_name = table_name\n\n def comparable(self):\n \"\"\"str: comparable representation of the path specification.\"\"\"\n string_parts = []\n string_parts.append(f'table name: {self.table_name:s}')\n string_parts.append(f'column name: {self.column_name:s}')\n if self.row_condition is not None:\n row_condition_string = ' '.join([f'{value!s}' for value in self.row_condition])\n string_parts.append(f'row condition: \"{row_condition_string:s}\"')\n if self.row_index is not None:\n string_parts.append(f'row index: {self.row_index:d}')\n return self._GetComparable(sub_comparable_string=', '.join(string_parts))\n", "source": "the_stack_v2_python_sparse", "source_path": "dfvfs/path/sqlite_blob_path_spec.py", "source_repo": "log2timeline/dfvfs", "split": "val", "star_events_count": 197}
{"blob_id": "46301e88ca00c2414752ddb3e5c5785876cd697b", "bodies": ["self.name = name\nif criteria is None:\n self.criteria = []\nelse:\n self.criteria = criteria\nself.color = color", "all_indices = set()\nself.criteria.sort(key=lambda x: x.and_or, reverse=True)\nfor i, c in enumerate(self.criteria):\n indices = c.select(sca)\n m = c.and_or\n if m == 'and' and i > 0:\n all_indices.intersection_update(indices)\n else:\n all_indices.update(indices)\nreturn np.array(list(all_indices))"], "bodies_text": "<|body_start_0|>\n self.name = name\n if criteria is None:\n self.criteria = []\n else:\n self.criteria = criteria\n self.color = color\n<|end_body_0|>\n\n<|body_start_1|>\n all_indices = set()\n self.criteria.sort(key=lambda x: x.and_or, reverse=True)\n for i, c in enumerate(self.criteria):\n indices = c.select(sca)\n m = c.and_or\n if m == 'and' and i > 0:\n all_indices.intersection_update(indices)\n else:\n all_indices.update(indices)\n return np.array(list(all_indices))\n<|end_body_1|>\n", "class_docstring": "this class represents a single label", "class_name": "CustomLabel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomLabel:\n \"\"\"this class represents a single label\"\"\"\n\n def __init__(self, name, criteria=None, color=None):\n \"\"\"Args: name (str) criteria: list of LabelCriterion objects\"\"\"\n <|body_0|>\n\n def select_cells(self, sca):\n \"\"\"Selects cells corresponding to the given label\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n if criteria is None:\n self.criteria = []\n else:\n self.criteria = criteria\n self.color = color\n<|end_body_0|>\n\n<|body_start_1|>\n all_indices = set()\n self.criteria.sort(key=lambda x: x.and_or, reverse=True)\n for i, c in enumerate(self.criteria):\n indices = c.select(sca)\n m = c.and_or\n if m == 'and' and i > 0:\n all_indices.intersection_update(indices)\n else:\n all_indices.update(indices)\n return np.array(list(all_indices))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000002", "length_bytes": 6289, "license_type": "no_license", "methods": [{"docstring": "Args: name (str) criteria: list of LabelCriterion objects", "name": "__init__", "signature": "def __init__(self, name, criteria=None, color=None)"}, {"docstring": "Selects cells corresponding to the given label", "name": "select_cells", "signature": "def select_cells(self, sca)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_023600", "prompt": "Implement the Python class `CustomLabel` described below.\n\nClass description:\nthis class represents a single label\n\nMethod signatures and docstrings:\n- def __init__(self, name, criteria=None, color=None): Args: name (str) criteria: list of LabelCriterion objects\n- def select_cells(self, sca): Selects cells corresponding to the given label", "prompted_full_text": "Implement the Python class `CustomLabel` described below.\n\nClass description:\nthis class represents a single label\n\nMethod signatures and docstrings:\n- def __init__(self, name, criteria=None, color=None): Args: name (str) criteria: list of LabelCriterion objects\n- def select_cells(self, sca): Selects cells corresponding to the given label\n\n<|skeleton|>\nclass CustomLabel:\n \"\"\"this class represents a single label\"\"\"\n\n def __init__(self, name, criteria=None, color=None):\n \"\"\"Args: name (str) criteria: list of LabelCriterion objects\"\"\"\n <|body_0|>\n\n def select_cells(self, sca):\n \"\"\"Selects cells corresponding to the given label\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n if criteria is None:\n self.criteria = []\n else:\n self.criteria = criteria\n self.color = color\n<|end_body_0|>\n\n<|body_start_1|>\n all_indices = set()\n self.criteria.sort(key=lambda x: x.and_or, reverse=True)\n for i, c in enumerate(self.criteria):\n indices = c.select(sca)\n m = c.and_or\n if m == 'and' and i > 0:\n all_indices.intersection_update(indices)\n else:\n all_indices.update(indices)\n return np.array(list(all_indices))\n<|end_body_1|>\n", "revision_id": "a64425ca5bff57c3fe336e47fddf00fe2bbc1e75", "skeleton": "<|skeleton|>\nclass CustomLabel:\n \"\"\"this class represents a single label\"\"\"\n\n def __init__(self, name, criteria=None, color=None):\n \"\"\"Args: name (str) criteria: list of LabelCriterion objects\"\"\"\n <|body_0|>\n\n def select_cells(self, sca):\n \"\"\"Selects cells corresponding to the given label\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CustomLabel:\n \"\"\"this class represents a single label\"\"\"\n\n def __init__(self, name, criteria=None, color=None):\n \"\"\"Args: name (str) criteria: list of LabelCriterion objects\"\"\"\n self.name = name\n if criteria is None:\n self.criteria = []\n else:\n self.criteria = criteria\n self.color = color\n\n def select_cells(self, sca):\n \"\"\"Selects cells corresponding to the given label\"\"\"\n all_indices = set()\n self.criteria.sort(key=lambda x: x.and_or, reverse=True)\n for i, c in enumerate(self.criteria):\n indices = c.select(sca)\n m = c.and_or\n if m == 'and' and i > 0:\n all_indices.intersection_update(indices)\n else:\n all_indices.update(indices)\n return np.array(list(all_indices))\n", "source": "the_stack_v2_python_sparse", "source_path": "uncurl_analysis/custom_cell_selection.py", "source_repo": "yjzhang/uncurl_analysis", "split": "val", "star_events_count": 1}
{"blob_id": "4c3f94d40e16cbe5307cf6d59cfc6760fe5f1f04", "bodies": ["print('getting user view')\nusers = User.objects.all()\nuser_serializer = UserSerializers(users, many=True)\nreturn Response({'data': user_serializer.data}, status=status.HTTP_200_OK)", "print('posting......')\ndata = request.data\nprint('data is......')\nprint(data)\nsalt = get_salt()\nhashed_password = hash_string(salt, data['password'])\nserializer = UserSerializers(data=data)\nif serializer.is_valid():\n serializer.save(salt=salt, hashed_password=hashed_password)\n print(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\nelif serializer.errors:\n print(serializer.errors)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)"], "bodies_text": "<|body_start_0|>\n print('getting user view')\n users = User.objects.all()\n user_serializer = UserSerializers(users, many=True)\n return Response({'data': user_serializer.data}, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n print('posting......')\n data = request.data\n print('data is......')\n print(data)\n salt = get_salt()\n hashed_password = hash_string(salt, data['password'])\n serializer = UserSerializers(data=data)\n if serializer.is_valid():\n serializer.save(salt=salt, hashed_password=hashed_password)\n print(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n elif serializer.errors:\n print(serializer.errors)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "class_docstring": "APIView of the user...", "class_name": "user_view", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass user_view:\n \"\"\"APIView of the user...\"\"\"\n\n def get(self, request):\n \"\"\"Get request to list all the users...\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Post request to create new users...\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n print('getting user view')\n users = User.objects.all()\n user_serializer = UserSerializers(users, many=True)\n return Response({'data': user_serializer.data}, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n print('posting......')\n data = request.data\n print('data is......')\n print(data)\n salt = get_salt()\n hashed_password = hash_string(salt, data['password'])\n serializer = UserSerializers(data=data)\n if serializer.is_valid():\n serializer.save(salt=salt, hashed_password=hashed_password)\n print(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n elif serializer.errors:\n print(serializer.errors)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000003", "length_bytes": 17182, "license_type": "no_license", "methods": [{"docstring": "Get request to list all the users...", "name": "get", "signature": "def get(self, request)"}, {"docstring": "Post request to create new users...", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "prompt": "Implement the Python class `user_view` described below.\n\nClass description:\nAPIView of the user...\n\nMethod signatures and docstrings:\n- def get(self, request): Get request to list all the users...\n- def post(self, request): Post request to create new users...", "prompted_full_text": "Implement the Python class `user_view` described below.\n\nClass description:\nAPIView of the user...\n\nMethod signatures and docstrings:\n- def get(self, request): Get request to list all the users...\n- def post(self, request): Post request to create new users...\n\n<|skeleton|>\nclass user_view:\n \"\"\"APIView of the user...\"\"\"\n\n def get(self, request):\n \"\"\"Get request to list all the users...\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Post request to create new users...\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n print('getting user view')\n users = User.objects.all()\n user_serializer = UserSerializers(users, many=True)\n return Response({'data': user_serializer.data}, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n print('posting......')\n data = request.data\n print('data is......')\n print(data)\n salt = get_salt()\n hashed_password = hash_string(salt, data['password'])\n serializer = UserSerializers(data=data)\n if serializer.is_valid():\n serializer.save(salt=salt, hashed_password=hashed_password)\n print(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n elif serializer.errors:\n print(serializer.errors)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "revision_id": "6dc0a0de2e8cfda45440cc5095dcf4b27346e854", "skeleton": "<|skeleton|>\nclass user_view:\n \"\"\"APIView of the user...\"\"\"\n\n def get(self, request):\n \"\"\"Get request to list all the users...\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Post request to create new users...\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class user_view:\n \"\"\"APIView of the user...\"\"\"\n\n def get(self, request):\n \"\"\"Get request to list all the users...\"\"\"\n print('getting user view')\n users = User.objects.all()\n user_serializer = UserSerializers(users, many=True)\n return Response({'data': user_serializer.data}, status=status.HTTP_200_OK)\n\n def post(self, request):\n \"\"\"Post request to create new users...\"\"\"\n print('posting......')\n data = request.data\n print('data is......')\n print(data)\n salt = get_salt()\n hashed_password = hash_string(salt, data['password'])\n serializer = UserSerializers(data=data)\n if serializer.is_valid():\n serializer.save(salt=salt, hashed_password=hashed_password)\n print(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n elif serializer.errors:\n print(serializer.errors)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/account/views.py", "source_repo": "bbkchdhry/django_cms", "split": "val", "star_events_count": 0}
{"blob_id": "a99e3d377ec549f06f0ddcaf41d5e1eb1ce41e27", "bodies": ["cmdlines = os.popen('atq').readlines()\ncmd = []\nfor oneline in cmdlines:\n cmdtemp = oneline.split()\n cmddict = {'num': cmdtemp[0], 'user': cmdtemp[7], 'time': cmdtemp[1] + ' ' + cmdtemp[2] + ' ' + cmdtemp[3] + ' ' + cmdtemp[4] + ' ' + cmdtemp[5]}\n cmd.append(cmddict)\nreturn Response(cmd, status=status.HTTP_200_OK)", "req = request.data\nusername = req['username']\ntime = req['time']\npath = req['path']\ncmd = req['cmd']\nat_file = open('/tmp/at.txt', 'w')\ntry:\n at_file.write(cmd)\nfinally:\n at_file.close()\ncommand = 'cd ' + path + ' && su ' + username + \" -c 'at \" + time + ' -f ' + \"/tmp/at.txt'\"\ncode, info = commands.getstatusoutput(command)\nif os.path.exists('/tmp/at.txt'):\n os.remove('/tmp/at.txt')\nif code != 0:\n return Response(info, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\nelse:\n return Response(info, status=status.HTTP_201_CREATED)"], "bodies_text": "<|body_start_0|>\n cmdlines = os.popen('atq').readlines()\n cmd = []\n for oneline in cmdlines:\n cmdtemp = oneline.split()\n cmddict = {'num': cmdtemp[0], 'user': cmdtemp[7], 'time': cmdtemp[1] + ' ' + cmdtemp[2] + ' ' + cmdtemp[3] + ' ' + cmdtemp[4] + ' ' + cmdtemp[5]}\n cmd.append(cmddict)\n return Response(cmd, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n req = request.data\n username = req['username']\n time = req['time']\n path = req['path']\n cmd = req['cmd']\n at_file = open('/tmp/at.txt', 'w')\n try:\n at_file.write(cmd)\n finally:\n at_file.close()\n command = 'cd ' + path + ' && su ' + username + \" -c 'at \" + time + ' -f ' + \"/tmp/at.txt'\"\n code, info = commands.getstatusoutput(command)\n if os.path.exists('/tmp/at.txt'):\n os.remove('/tmp/at.txt')\n if code != 0:\n return Response(info, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n else:\n return Response(info, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "class_docstring": "List all precmd", "class_name": "CmdList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CmdList:\n \"\"\"List all precmd\"\"\"\n\n def get(self, request, format=None):\n \"\"\"Get precmd list.\"\"\"\n <|body_0|>\n\n def post(self, request, format=None):\n \"\"\"craet one schedual cmd.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cmdlines = os.popen('atq').readlines()\n cmd = []\n for oneline in cmdlines:\n cmdtemp = oneline.split()\n cmddict = {'num': cmdtemp[0], 'user': cmdtemp[7], 'time': cmdtemp[1] + ' ' + cmdtemp[2] + ' ' + cmdtemp[3] + ' ' + cmdtemp[4] + ' ' + cmdtemp[5]}\n cmd.append(cmddict)\n return Response(cmd, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n req = request.data\n username = req['username']\n time = req['time']\n path = req['path']\n cmd = req['cmd']\n at_file = open('/tmp/at.txt', 'w')\n try:\n at_file.write(cmd)\n finally:\n at_file.close()\n command = 'cd ' + path + ' && su ' + username + \" -c 'at \" + time + ' -f ' + \"/tmp/at.txt'\"\n code, info = commands.getstatusoutput(command)\n if os.path.exists('/tmp/at.txt'):\n os.remove('/tmp/at.txt')\n if code != 0:\n return Response(info, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n else:\n return Response(info, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000004", "length_bytes": 2744, "license_type": "no_license", "methods": [{"docstring": "Get precmd list.", "name": "get", "signature": "def get(self, request, format=None)"}, {"docstring": "craet one schedual cmd.", "name": "post", "signature": "def post(self, request, format=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_054008", "prompt": "Implement the Python class `CmdList` described below.\n\nClass description:\nList all precmd\n\nMethod signatures and docstrings:\n- def get(self, request, format=None): Get precmd list.\n- def post(self, request, format=None): craet one schedual cmd.", "prompted_full_text": "Implement the Python class `CmdList` described below.\n\nClass description:\nList all precmd\n\nMethod signatures and docstrings:\n- def get(self, request, format=None): Get precmd list.\n- def post(self, request, format=None): craet one schedual cmd.\n\n<|skeleton|>\nclass CmdList:\n \"\"\"List all precmd\"\"\"\n\n def get(self, request, format=None):\n \"\"\"Get precmd list.\"\"\"\n <|body_0|>\n\n def post(self, request, format=None):\n \"\"\"craet one schedual cmd.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cmdlines = os.popen('atq').readlines()\n cmd = []\n for oneline in cmdlines:\n cmdtemp = oneline.split()\n cmddict = {'num': cmdtemp[0], 'user': cmdtemp[7], 'time': cmdtemp[1] + ' ' + cmdtemp[2] + ' ' + cmdtemp[3] + ' ' + cmdtemp[4] + ' ' + cmdtemp[5]}\n cmd.append(cmddict)\n return Response(cmd, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n req = request.data\n username = req['username']\n time = req['time']\n path = req['path']\n cmd = req['cmd']\n at_file = open('/tmp/at.txt', 'w')\n try:\n at_file.write(cmd)\n finally:\n at_file.close()\n command = 'cd ' + path + ' && su ' + username + \" -c 'at \" + time + ' -f ' + \"/tmp/at.txt'\"\n code, info = commands.getstatusoutput(command)\n if os.path.exists('/tmp/at.txt'):\n os.remove('/tmp/at.txt')\n if code != 0:\n return Response(info, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n else:\n return Response(info, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "revision_id": "7f801a569a396a27371d0831752595877c224a6b", "skeleton": "<|skeleton|>\nclass CmdList:\n \"\"\"List all precmd\"\"\"\n\n def get(self, request, format=None):\n \"\"\"Get precmd list.\"\"\"\n <|body_0|>\n\n def post(self, request, format=None):\n \"\"\"craet one schedual cmd.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CmdList:\n \"\"\"List all precmd\"\"\"\n\n def get(self, request, format=None):\n \"\"\"Get precmd list.\"\"\"\n cmdlines = os.popen('atq').readlines()\n cmd = []\n for oneline in cmdlines:\n cmdtemp = oneline.split()\n cmddict = {'num': cmdtemp[0], 'user': cmdtemp[7], 'time': cmdtemp[1] + ' ' + cmdtemp[2] + ' ' + cmdtemp[3] + ' ' + cmdtemp[4] + ' ' + cmdtemp[5]}\n cmd.append(cmddict)\n return Response(cmd, status=status.HTTP_200_OK)\n\n def post(self, request, format=None):\n \"\"\"craet one schedual cmd.\"\"\"\n req = request.data\n username = req['username']\n time = req['time']\n path = req['path']\n cmd = req['cmd']\n at_file = open('/tmp/at.txt', 'w')\n try:\n at_file.write(cmd)\n finally:\n at_file.close()\n command = 'cd ' + path + ' && su ' + username + \" -c 'at \" + time + ' -f ' + \"/tmp/at.txt'\"\n code, info = commands.getstatusoutput(command)\n if os.path.exists('/tmp/at.txt'):\n os.remove('/tmp/at.txt')\n if code != 0:\n return Response(info, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n else:\n return Response(info, status=status.HTTP_201_CREATED)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python_projects/flask_projects/unicorn_project/precmd/views.py", "source_repo": "sdtimothy8/Coding", "split": "val", "star_events_count": 0}
{"blob_id": "24ab4d2d697c9b059244d61622355cb2dc520229", "bodies": ["super(RNNDecodeCell, self).__init__()\nself._hidden_size = hidden_size\nself._dropout = dropout\nself._init_scale = init_scale\nself._name = name\nparam = fluid.ParamAttr(initializer=nn_utils.uniform(self._init_scale))\nbias = fluid.ParamAttr(initializer=nn_utils.zero)\nself.rnn_cell = layers.LSTMCell(hidden_size, param, bias, name=name)", "step_feed, step_state = cell_state\nstep_input = layers.concat([step_input, step_feed], 1)\nstep_out, new_state = self.rnn_cell(step_input, step_state)\ndecode_attn = models.Attention('dot_prod', name=self._name + '_attn')\nattn_out = decode_attn.forward(step_out, attn_k, attn_v, padding_mask=padding_mask)\noutput = layers.fc(layers.concat([step_out, attn_out], axis=-1), size=self._hidden_size, num_flatten_dims=1, act='tanh', name=self._name + '_out_fc', **nn_utils.param_attr(self._name + '_out_fc', self._init_scale, need_bias=False))\nif self._dropout > 0.0:\n output = layers.dropout(x=output, dropout_prob=self._dropout, dropout_implementation='upscale_in_train')\nreturn (output, [output, new_state])"], "bodies_text": "<|body_start_0|>\n super(RNNDecodeCell, self).__init__()\n self._hidden_size = hidden_size\n self._dropout = dropout\n self._init_scale = init_scale\n self._name = name\n param = fluid.ParamAttr(initializer=nn_utils.uniform(self._init_scale))\n bias = fluid.ParamAttr(initializer=nn_utils.zero)\n self.rnn_cell = layers.LSTMCell(hidden_size, param, bias, name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n step_feed, step_state = cell_state\n step_input = layers.concat([step_input, step_feed], 1)\n step_out, new_state = self.rnn_cell(step_input, step_state)\n decode_attn = models.Attention('dot_prod', name=self._name + '_attn')\n attn_out = decode_attn.forward(step_out, attn_k, attn_v, padding_mask=padding_mask)\n output = layers.fc(layers.concat([step_out, attn_out], axis=-1), size=self._hidden_size, num_flatten_dims=1, act='tanh', name=self._name + '_out_fc', **nn_utils.param_attr(self._name + '_out_fc', self._init_scale, need_bias=False))\n if self._dropout > 0.0:\n output = layers.dropout(x=output, dropout_prob=self._dropout, dropout_implementation='upscale_in_train')\n return (output, [output, new_state])\n<|end_body_1|>\n", "class_docstring": "LSTM Decoder Cell", "class_name": "RNNDecodeCell", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RNNDecodeCell:\n \"\"\"LSTM Decoder Cell\"\"\"\n\n def __init__(self, hidden_size, dropout=0.0, init_scale=-1, name='rnn_decode_cell'):\n \"\"\"init of class Args: hidden_size (TYPE): NULL dropout (TYPE): Default is 0. init_scale (TYPE): Default is -1, means paddle default initializer is used. name (str): param name scope\"\"\"\n <|body_0|>\n\n def call(self, step_input, cell_state, attn_k, attn_v, padding_mask):\n \"\"\"one step call Args: step_input (Variable): [batch_size, hidden_size] cell_state (tuple): (Variable, Variable) Returns: tuple same as input: (Variable, (Variable, Variable)) Raises: NULL\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RNNDecodeCell, self).__init__()\n self._hidden_size = hidden_size\n self._dropout = dropout\n self._init_scale = init_scale\n self._name = name\n param = fluid.ParamAttr(initializer=nn_utils.uniform(self._init_scale))\n bias = fluid.ParamAttr(initializer=nn_utils.zero)\n self.rnn_cell = layers.LSTMCell(hidden_size, param, bias, name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n step_feed, step_state = cell_state\n step_input = layers.concat([step_input, step_feed], 1)\n step_out, new_state = self.rnn_cell(step_input, step_state)\n decode_attn = models.Attention('dot_prod', name=self._name + '_attn')\n attn_out = decode_attn.forward(step_out, attn_k, attn_v, padding_mask=padding_mask)\n output = layers.fc(layers.concat([step_out, attn_out], axis=-1), size=self._hidden_size, num_flatten_dims=1, act='tanh', name=self._name + '_out_fc', **nn_utils.param_attr(self._name + '_out_fc', self._init_scale, need_bias=False))\n if self._dropout > 0.0:\n output = layers.dropout(x=output, dropout_prob=self._dropout, dropout_implementation='upscale_in_train')\n return (output, [output, new_state])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000005", "length_bytes": 3791, "license_type": "permissive", "methods": [{"docstring": "init of class Args: hidden_size (TYPE): NULL dropout (TYPE): Default is 0. init_scale (TYPE): Default is -1, means paddle default initializer is used. name (str): param name scope", "name": "__init__", "signature": "def __init__(self, hidden_size, dropout=0.0, init_scale=-1, name='rnn_decode_cell')"}, {"docstring": "one step call Args: step_input (Variable): [batch_size, hidden_size] cell_state (tuple): (Variable, Variable) Returns: tuple same as input: (Variable, (Variable, Variable)) Raises: NULL", "name": "call", "signature": "def call(self, step_input, cell_state, attn_k, attn_v, padding_mask)"}], "n_methods": 2, "prompt": "Implement the Python class `RNNDecodeCell` described below.\n\nClass description:\nLSTM Decoder Cell\n\nMethod signatures and docstrings:\n- def __init__(self, hidden_size, dropout=0.0, init_scale=-1, name='rnn_decode_cell'): init of class Args: hidden_size (TYPE): NULL dropout (TYPE): Default is 0. init_scale (TYPE): Default is -1, means paddle default initializer is used. name (str): param name scope\n- def call(self, step_input, cell_state, attn_k, attn_v, padding_mask): one step call Args: step_input (Variable): [batch_size, hidden_size] cell_state (tuple): (Variable, Variable) Returns: tuple same as input: (Variable, (Variable, Variable)) Raises: NULL", "prompted_full_text": "Implement the Python class `RNNDecodeCell` described below.\n\nClass description:\nLSTM Decoder Cell\n\nMethod signatures and docstrings:\n- def __init__(self, hidden_size, dropout=0.0, init_scale=-1, name='rnn_decode_cell'): init of class Args: hidden_size (TYPE): NULL dropout (TYPE): Default is 0. init_scale (TYPE): Default is -1, means paddle default initializer is used. name (str): param name scope\n- def call(self, step_input, cell_state, attn_k, attn_v, padding_mask): one step call Args: step_input (Variable): [batch_size, hidden_size] cell_state (tuple): (Variable, Variable) Returns: tuple same as input: (Variable, (Variable, Variable)) Raises: NULL\n\n<|skeleton|>\nclass RNNDecodeCell:\n \"\"\"LSTM Decoder Cell\"\"\"\n\n def __init__(self, hidden_size, dropout=0.0, init_scale=-1, name='rnn_decode_cell'):\n \"\"\"init of class Args: hidden_size (TYPE): NULL dropout (TYPE): Default is 0. init_scale (TYPE): Default is -1, means paddle default initializer is used. name (str): param name scope\"\"\"\n <|body_0|>\n\n def call(self, step_input, cell_state, attn_k, attn_v, padding_mask):\n \"\"\"one step call Args: step_input (Variable): [batch_size, hidden_size] cell_state (tuple): (Variable, Variable) Returns: tuple same as input: (Variable, (Variable, Variable)) Raises: NULL\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RNNDecodeCell, self).__init__()\n self._hidden_size = hidden_size\n self._dropout = dropout\n self._init_scale = init_scale\n self._name = name\n param = fluid.ParamAttr(initializer=nn_utils.uniform(self._init_scale))\n bias = fluid.ParamAttr(initializer=nn_utils.zero)\n self.rnn_cell = layers.LSTMCell(hidden_size, param, bias, name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n step_feed, step_state = cell_state\n step_input = layers.concat([step_input, step_feed], 1)\n step_out, new_state = self.rnn_cell(step_input, step_state)\n decode_attn = models.Attention('dot_prod', name=self._name + '_attn')\n attn_out = decode_attn.forward(step_out, attn_k, attn_v, padding_mask=padding_mask)\n output = layers.fc(layers.concat([step_out, attn_out], axis=-1), size=self._hidden_size, num_flatten_dims=1, act='tanh', name=self._name + '_out_fc', **nn_utils.param_attr(self._name + '_out_fc', self._init_scale, need_bias=False))\n if self._dropout > 0.0:\n output = layers.dropout(x=output, dropout_prob=self._dropout, dropout_implementation='upscale_in_train')\n return (output, [output, new_state])\n<|end_body_1|>\n", "revision_id": "e08f3cb7b9db4c837000316c791542580ba02624", "skeleton": "<|skeleton|>\nclass RNNDecodeCell:\n \"\"\"LSTM Decoder Cell\"\"\"\n\n def __init__(self, hidden_size, dropout=0.0, init_scale=-1, name='rnn_decode_cell'):\n \"\"\"init of class Args: hidden_size (TYPE): NULL dropout (TYPE): Default is 0. init_scale (TYPE): Default is -1, means paddle default initializer is used. name (str): param name scope\"\"\"\n <|body_0|>\n\n def call(self, step_input, cell_state, attn_k, attn_v, padding_mask):\n \"\"\"one step call Args: step_input (Variable): [batch_size, hidden_size] cell_state (tuple): (Variable, Variable) Returns: tuple same as input: (Variable, (Variable, Variable)) Raises: NULL\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RNNDecodeCell:\n \"\"\"LSTM Decoder Cell\"\"\"\n\n def __init__(self, hidden_size, dropout=0.0, init_scale=-1, name='rnn_decode_cell'):\n \"\"\"init of class Args: hidden_size (TYPE): NULL dropout (TYPE): Default is 0. init_scale (TYPE): Default is -1, means paddle default initializer is used. name (str): param name scope\"\"\"\n super(RNNDecodeCell, self).__init__()\n self._hidden_size = hidden_size\n self._dropout = dropout\n self._init_scale = init_scale\n self._name = name\n param = fluid.ParamAttr(initializer=nn_utils.uniform(self._init_scale))\n bias = fluid.ParamAttr(initializer=nn_utils.zero)\n self.rnn_cell = layers.LSTMCell(hidden_size, param, bias, name=name)\n\n def call(self, step_input, cell_state, attn_k, attn_v, padding_mask):\n \"\"\"one step call Args: step_input (Variable): [batch_size, hidden_size] cell_state (tuple): (Variable, Variable) Returns: tuple same as input: (Variable, (Variable, Variable)) Raises: NULL\"\"\"\n step_feed, step_state = cell_state\n step_input = layers.concat([step_input, step_feed], 1)\n step_out, new_state = self.rnn_cell(step_input, step_state)\n decode_attn = models.Attention('dot_prod', name=self._name + '_attn')\n attn_out = decode_attn.forward(step_out, attn_k, attn_v, padding_mask=padding_mask)\n output = layers.fc(layers.concat([step_out, attn_out], axis=-1), size=self._hidden_size, num_flatten_dims=1, act='tanh', name=self._name + '_out_fc', **nn_utils.param_attr(self._name + '_out_fc', self._init_scale, need_bias=False))\n if self._dropout > 0.0:\n output = layers.dropout(x=output, dropout_prob=self._dropout, dropout_implementation='upscale_in_train')\n return (output, [output, new_state])\n", "source": "the_stack_v2_python_sparse", "source_path": "NLP/DuSQL-Baseline/text2sql/models/rnn_decode_cell.py", "source_repo": "ajayvbabu/Research", "split": "val", "star_events_count": 0}
{"blob_id": "e1ddc42257570b73de0d727b570cb51153da39df", "bodies": ["def dfs(node):\n if node.left:\n node.left.val = 2 * node.val + 1\n self.num_set.add(node.left.val)\n dfs(node.left)\n if node.right:\n node.right.val = 2 * node.val + 2\n self.num_set.add(node.right.val)\n dfs(node.right)\n return\nself.num_set = set()\nroot.val = 0\nself.num_set.add(0)\ndfs(root)\nself.root = root", "if target in self.num_set:\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n def dfs(node):\n if node.left:\n node.left.val = 2 * node.val + 1\n self.num_set.add(node.left.val)\n dfs(node.left)\n if node.right:\n node.right.val = 2 * node.val + 2\n self.num_set.add(node.right.val)\n dfs(node.right)\n return\n self.num_set = set()\n root.val = 0\n self.num_set.add(0)\n dfs(root)\n self.root = root\n<|end_body_0|>\n\n<|body_start_1|>\n if target in self.num_set:\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "FindElements", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FindElements:\n\n def __init__(self, root):\n \"\"\":type root: TreeNode\"\"\"\n <|body_0|>\n\n def find(self, target):\n \"\"\":type target: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(node):\n if node.left:\n node.left.val = 2 * node.val + 1\n self.num_set.add(node.left.val)\n dfs(node.left)\n if node.right:\n node.right.val = 2 * node.val + 2\n self.num_set.add(node.right.val)\n dfs(node.right)\n return\n self.num_set = set()\n root.val = 0\n self.num_set.add(0)\n dfs(root)\n self.root = root\n<|end_body_0|>\n\n<|body_start_1|>\n if target in self.num_set:\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000006", "length_bytes": 3159, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode", "name": "__init__", "signature": "def __init__(self, root)"}, {"docstring": ":type target: int :rtype: bool", "name": "find", "signature": "def find(self, target)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032007", "prompt": "Implement the Python class `FindElements` described below.\n\nClass description:\nImplement the FindElements class.\n\nMethod signatures and docstrings:\n- def __init__(self, root): :type root: TreeNode\n- def find(self, target): :type target: int :rtype: bool", "prompted_full_text": "Implement the Python class `FindElements` described below.\n\nClass description:\nImplement the FindElements class.\n\nMethod signatures and docstrings:\n- def __init__(self, root): :type root: TreeNode\n- def find(self, target): :type target: int :rtype: bool\n\n<|skeleton|>\nclass FindElements:\n\n def __init__(self, root):\n \"\"\":type root: TreeNode\"\"\"\n <|body_0|>\n\n def find(self, target):\n \"\"\":type target: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(node):\n if node.left:\n node.left.val = 2 * node.val + 1\n self.num_set.add(node.left.val)\n dfs(node.left)\n if node.right:\n node.right.val = 2 * node.val + 2\n self.num_set.add(node.right.val)\n dfs(node.right)\n return\n self.num_set = set()\n root.val = 0\n self.num_set.add(0)\n dfs(root)\n self.root = root\n<|end_body_0|>\n\n<|body_start_1|>\n if target in self.num_set:\n return True\n return False\n<|end_body_1|>\n", "revision_id": "80940738f9eab7f641efb2df9bce8b7bc888a4eb", "skeleton": "<|skeleton|>\nclass FindElements:\n\n def __init__(self, root):\n \"\"\":type root: TreeNode\"\"\"\n <|body_0|>\n\n def find(self, target):\n \"\"\":type target: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FindElements:\n def __init__(self, root):\n \"\"\":type root: TreeNode\"\"\"\n def dfs(node):\n if node.left:\n node.left.val = 2 * node.val + 1\n self.num_set.add(node.left.val)\n dfs(node.left)\n if node.right:\n node.right.val = 2 * node.val + 2\n self.num_set.add(node.right.val)\n dfs(node.right)\n return\n self.num_set = set()\n root.val = 0\n self.num_set.add(0)\n dfs(root)\n self.root = root\n\n def find(self, target):\n \"\"\":type target: int :rtype: bool\"\"\"\n if target in self.num_set:\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "1261. 在受污染的二叉树中查找元素.py", "source_repo": "half-empty/LeetCode", "split": "val", "star_events_count": 0}
{"blob_id": "6c2cc2009c99a7814846642daad27a99073c6cd0", "bodies": ["super().save_model(request, obj, form, change)\nfrom celery_tasks.tasks import generate_static_index_html\ngenerate_static_index_html.delay()\ncache.delete('index_page_data')", "super().save_model(request, obj)\nfrom celery_tasks.tasks import generate_static_index_html\ngenerate_static_index_html.delay()\ncache.delete('index_page_data')"], "bodies_text": "<|body_start_0|>\n super().save_model(request, obj, form, change)\n from celery_tasks.tasks import generate_static_index_html\n generate_static_index_html.delay()\n cache.delete('index_page_data')\n<|end_body_0|>\n\n<|body_start_1|>\n super().save_model(request, obj)\n from celery_tasks.tasks import generate_static_index_html\n generate_static_index_html.delay()\n cache.delete('index_page_data')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BaseModelAdmin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseModelAdmin:\n\n def save_model(self, request, obj, form, change):\n \"\"\"后台新增或更新表中的数据时调用这个方法\"\"\"\n <|body_0|>\n\n def delete_model(self, request, obj):\n \"\"\"删除表中的数据时也使用\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().save_model(request, obj, form, change)\n from celery_tasks.tasks import generate_static_index_html\n generate_static_index_html.delay()\n cache.delete('index_page_data')\n<|end_body_0|>\n\n<|body_start_1|>\n super().save_model(request, obj)\n from celery_tasks.tasks import generate_static_index_html\n generate_static_index_html.delay()\n cache.delete('index_page_data')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000007", "length_bytes": 1872, "license_type": "no_license", "methods": [{"docstring": "后台新增或更新表中的数据时调用这个方法", "name": "save_model", "signature": "def save_model(self, request, obj, form, change)"}, {"docstring": "删除表中的数据时也使用", "name": "delete_model", "signature": "def delete_model(self, request, obj)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_019080", "prompt": "Implement the Python class `BaseModelAdmin` described below.\n\nClass description:\nImplement the BaseModelAdmin class.\n\nMethod signatures and docstrings:\n- def save_model(self, request, obj, form, change): 后台新增或更新表中的数据时调用这个方法\n- def delete_model(self, request, obj): 删除表中的数据时也使用", "prompted_full_text": "Implement the Python class `BaseModelAdmin` described below.\n\nClass description:\nImplement the BaseModelAdmin class.\n\nMethod signatures and docstrings:\n- def save_model(self, request, obj, form, change): 后台新增或更新表中的数据时调用这个方法\n- def delete_model(self, request, obj): 删除表中的数据时也使用\n\n<|skeleton|>\nclass BaseModelAdmin:\n\n def save_model(self, request, obj, form, change):\n \"\"\"后台新增或更新表中的数据时调用这个方法\"\"\"\n <|body_0|>\n\n def delete_model(self, request, obj):\n \"\"\"删除表中的数据时也使用\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().save_model(request, obj, form, change)\n from celery_tasks.tasks import generate_static_index_html\n generate_static_index_html.delay()\n cache.delete('index_page_data')\n<|end_body_0|>\n\n<|body_start_1|>\n super().save_model(request, obj)\n from celery_tasks.tasks import generate_static_index_html\n generate_static_index_html.delay()\n cache.delete('index_page_data')\n<|end_body_1|>\n", "revision_id": "206909fa8ab76de4b2aa5cabc9d76e9977809d46", "skeleton": "<|skeleton|>\nclass BaseModelAdmin:\n\n def save_model(self, request, obj, form, change):\n \"\"\"后台新增或更新表中的数据时调用这个方法\"\"\"\n <|body_0|>\n\n def delete_model(self, request, obj):\n \"\"\"删除表中的数据时也使用\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BaseModelAdmin:\n def save_model(self, request, obj, form, change):\n \"\"\"后台新增或更新表中的数据时调用这个方法\"\"\"\n super().save_model(request, obj, form, change)\n from celery_tasks.tasks import generate_static_index_html\n generate_static_index_html.delay()\n cache.delete('index_page_data')\n\n def delete_model(self, request, obj):\n \"\"\"删除表中的数据时也使用\"\"\"\n super().save_model(request, obj)\n from celery_tasks.tasks import generate_static_index_html\n generate_static_index_html.delay()\n cache.delete('index_page_data')\n", "source": "the_stack_v2_python_sparse", "source_path": "E-commerce/dailyfresh/apps/goods/admin.py", "source_repo": "zxk1994/Project", "split": "val", "star_events_count": 0}
{"blob_id": "c7230d97d9d9d88a56995a2e7f5d613e0e5be556", "bodies": ["self._context = zmqcontext\nself._monitor_sock = self._context.socket(zmq.PUSH)\nself._monitor_sock.connect('tcp://127.0.0.101:10002')\nself.total_data_counter = 0\nself.current_data_counter = 0\nself.got_data = False\nself.data_timeout = None\nself.mon_wait = 1.0\nself.name = name\nself.past = None\nloop.create_task(self.sendmon())\nself.loop = loop", "self.total_data_counter += 1\nself.current_data_counter += 1\nself.got_data = True", "now = datetime.datetime.now().timestamp()\nif self.past is None:\n self.past = now\n self.current_data_counter = 0\n return\ndt = now - self.past\nself.past = now\nrate = self.current_data_counter / dt\nself.current_data_counter = 0\nreturn rate", "while True:\n await asyncio.sleep(self.mon_wait)\n mdata = MonitorData()\n tstamp = datetime.datetime.utcnow().timestamp()\n mdata.time.sec = int(tstamp)\n mdata.time.nsec = int((tstamp - mdata.time.sec) * 1000000000.0)\n mdata.reciver.pid = os.getpid()\n mdata.reciver.name = self.name\n rate = self._compute_rates()\n if rate is None:\n continue\n mdata.reciver.data_rate = rate\n mdata.reciver.recv_data = self.got_data\n self.got_data = False\n self._monitor_sock.send(mdata.SerializeToString())"], "bodies_text": "<|body_start_0|>\n self._context = zmqcontext\n self._monitor_sock = self._context.socket(zmq.PUSH)\n self._monitor_sock.connect('tcp://127.0.0.101:10002')\n self.total_data_counter = 0\n self.current_data_counter = 0\n self.got_data = False\n self.data_timeout = None\n self.mon_wait = 1.0\n self.name = name\n self.past = None\n loop.create_task(self.sendmon())\n self.loop = loop\n<|end_body_0|>\n\n<|body_start_1|>\n self.total_data_counter += 1\n self.current_data_counter += 1\n self.got_data = True\n<|end_body_1|>\n\n<|body_start_2|>\n now = datetime.datetime.now().timestamp()\n if self.past is None:\n self.past = now\n self.current_data_counter = 0\n return\n dt = now - self.past\n self.past = now\n rate = self.current_data_counter / dt\n self.current_data_counter = 0\n return rate\n<|end_body_2|>\n\n<|body_start_3|>\n while True:\n await asyncio.sleep(self.mon_wait)\n mdata = MonitorData()\n tstamp = datetime.datetime.utcnow().timestamp()\n mdata.time.sec = int(tstamp)\n mdata.time.nsec = int((tstamp - mdata.time.sec) * 1000000000.0)\n mdata.reciver.pid = os.getpid()\n mdata.reciver.name = self.name\n rate = self._compute_rates()\n if rate is None:\n continue\n mdata.reciver.data_rate = rate\n mdata.reciver.recv_data = self.got_data\n self.got_data = False\n self._monitor_sock.send(mdata.SerializeToString())\n<|end_body_3|>\n", "class_docstring": "", "class_name": "ReceiverMonSender", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReceiverMonSender:\n\n def __init__(self, name, loop, zmqcontext):\n \"\"\"Summary Args: name (TYPE): Description loop (TYPE): Description zmqcontext (TYPE): Description\"\"\"\n <|body_0|>\n\n def register_data_packet(self):\n \"\"\"Summary\"\"\"\n <|body_1|>\n\n def _compute_rates(self):\n \"\"\"Summary Returns: TYPE: Description\"\"\"\n <|body_2|>\n\n async def sendmon(self):\n \"\"\"Summary\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._context = zmqcontext\n self._monitor_sock = self._context.socket(zmq.PUSH)\n self._monitor_sock.connect('tcp://127.0.0.101:10002')\n self.total_data_counter = 0\n self.current_data_counter = 0\n self.got_data = False\n self.data_timeout = None\n self.mon_wait = 1.0\n self.name = name\n self.past = None\n loop.create_task(self.sendmon())\n self.loop = loop\n<|end_body_0|>\n\n<|body_start_1|>\n self.total_data_counter += 1\n self.current_data_counter += 1\n self.got_data = True\n<|end_body_1|>\n\n<|body_start_2|>\n now = datetime.datetime.now().timestamp()\n if self.past is None:\n self.past = now\n self.current_data_counter = 0\n return\n dt = now - self.past\n self.past = now\n rate = self.current_data_counter / dt\n self.current_data_counter = 0\n return rate\n<|end_body_2|>\n\n<|body_start_3|>\n while True:\n await asyncio.sleep(self.mon_wait)\n mdata = MonitorData()\n tstamp = datetime.datetime.utcnow().timestamp()\n mdata.time.sec = int(tstamp)\n mdata.time.nsec = int((tstamp - mdata.time.sec) * 1000000000.0)\n mdata.reciver.pid = os.getpid()\n mdata.reciver.name = self.name\n rate = self._compute_rates()\n if rate is None:\n continue\n mdata.reciver.data_rate = rate\n mdata.reciver.recv_data = self.got_data\n self.got_data = False\n self._monitor_sock.send(mdata.SerializeToString())\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000008", "length_bytes": 2244, "license_type": "permissive", "methods": [{"docstring": "Summary Args: name (TYPE): Description loop (TYPE): Description zmqcontext (TYPE): Description", "name": "__init__", "signature": "def __init__(self, name, loop, zmqcontext)"}, {"docstring": "Summary", "name": "register_data_packet", "signature": "def register_data_packet(self)"}, {"docstring": "Summary Returns: TYPE: Description", "name": "_compute_rates", "signature": "def _compute_rates(self)"}, {"docstring": "Summary", "name": "sendmon", "signature": "async def sendmon(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_001686", "prompt": "Implement the Python class `ReceiverMonSender` described below.\n\nClass description:\nImplement the ReceiverMonSender class.\n\nMethod signatures and docstrings:\n- def __init__(self, name, loop, zmqcontext): Summary Args: name (TYPE): Description loop (TYPE): Description zmqcontext (TYPE): Description\n- def register_data_packet(self): Summary\n- def _compute_rates(self): Summary Returns: TYPE: Description\n- async def sendmon(self): Summary", "prompted_full_text": "Implement the Python class `ReceiverMonSender` described below.\n\nClass description:\nImplement the ReceiverMonSender class.\n\nMethod signatures and docstrings:\n- def __init__(self, name, loop, zmqcontext): Summary Args: name (TYPE): Description loop (TYPE): Description zmqcontext (TYPE): Description\n- def register_data_packet(self): Summary\n- def _compute_rates(self): Summary Returns: TYPE: Description\n- async def sendmon(self): Summary\n\n<|skeleton|>\nclass ReceiverMonSender:\n\n def __init__(self, name, loop, zmqcontext):\n \"\"\"Summary Args: name (TYPE): Description loop (TYPE): Description zmqcontext (TYPE): Description\"\"\"\n <|body_0|>\n\n def register_data_packet(self):\n \"\"\"Summary\"\"\"\n <|body_1|>\n\n def _compute_rates(self):\n \"\"\"Summary Returns: TYPE: Description\"\"\"\n <|body_2|>\n\n async def sendmon(self):\n \"\"\"Summary\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._context = zmqcontext\n self._monitor_sock = self._context.socket(zmq.PUSH)\n self._monitor_sock.connect('tcp://127.0.0.101:10002')\n self.total_data_counter = 0\n self.current_data_counter = 0\n self.got_data = False\n self.data_timeout = None\n self.mon_wait = 1.0\n self.name = name\n self.past = None\n loop.create_task(self.sendmon())\n self.loop = loop\n<|end_body_0|>\n\n<|body_start_1|>\n self.total_data_counter += 1\n self.current_data_counter += 1\n self.got_data = True\n<|end_body_1|>\n\n<|body_start_2|>\n now = datetime.datetime.now().timestamp()\n if self.past is None:\n self.past = now\n self.current_data_counter = 0\n return\n dt = now - self.past\n self.past = now\n rate = self.current_data_counter / dt\n self.current_data_counter = 0\n return rate\n<|end_body_2|>\n\n<|body_start_3|>\n while True:\n await asyncio.sleep(self.mon_wait)\n mdata = MonitorData()\n tstamp = datetime.datetime.utcnow().timestamp()\n mdata.time.sec = int(tstamp)\n mdata.time.nsec = int((tstamp - mdata.time.sec) * 1000000000.0)\n mdata.reciver.pid = os.getpid()\n mdata.reciver.name = self.name\n rate = self._compute_rates()\n if rate is None:\n continue\n mdata.reciver.data_rate = rate\n mdata.reciver.recv_data = self.got_data\n self.got_data = False\n self._monitor_sock.send(mdata.SerializeToString())\n<|end_body_3|>\n", "revision_id": "4f9a84c34b580548a53719e000dc623f6e1b8a40", "skeleton": "<|skeleton|>\nclass ReceiverMonSender:\n\n def __init__(self, name, loop, zmqcontext):\n \"\"\"Summary Args: name (TYPE): Description loop (TYPE): Description zmqcontext (TYPE): Description\"\"\"\n <|body_0|>\n\n def register_data_packet(self):\n \"\"\"Summary\"\"\"\n <|body_1|>\n\n def _compute_rates(self):\n \"\"\"Summary Returns: TYPE: Description\"\"\"\n <|body_2|>\n\n async def sendmon(self):\n \"\"\"Summary\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ReceiverMonSender:\n def __init__(self, name, loop, zmqcontext):\n \"\"\"Summary Args: name (TYPE): Description loop (TYPE): Description zmqcontext (TYPE): Description\"\"\"\n self._context = zmqcontext\n self._monitor_sock = self._context.socket(zmq.PUSH)\n self._monitor_sock.connect('tcp://127.0.0.101:10002')\n self.total_data_counter = 0\n self.current_data_counter = 0\n self.got_data = False\n self.data_timeout = None\n self.mon_wait = 1.0\n self.name = name\n self.past = None\n loop.create_task(self.sendmon())\n self.loop = loop\n\n def register_data_packet(self):\n \"\"\"Summary\"\"\"\n self.total_data_counter += 1\n self.current_data_counter += 1\n self.got_data = True\n\n def _compute_rates(self):\n \"\"\"Summary Returns: TYPE: Description\"\"\"\n now = datetime.datetime.now().timestamp()\n if self.past is None:\n self.past = now\n self.current_data_counter = 0\n return\n dt = now - self.past\n self.past = now\n rate = self.current_data_counter / dt\n self.current_data_counter = 0\n return rate\n\n async def sendmon(self):\n \"\"\"Summary\"\"\"\n while True:\n await asyncio.sleep(self.mon_wait)\n mdata = MonitorData()\n tstamp = datetime.datetime.utcnow().timestamp()\n mdata.time.sec = int(tstamp)\n mdata.time.nsec = int((tstamp - mdata.time.sec) * 1000000000.0)\n mdata.reciver.pid = os.getpid()\n mdata.reciver.name = self.name\n rate = self._compute_rates()\n if rate is None:\n continue\n mdata.reciver.data_rate = rate\n mdata.reciver.recv_data = self.got_data\n self.got_data = False\n self._monitor_sock.send(mdata.SerializeToString())\n", "source": "the_stack_v2_python_sparse", "source_path": "ssdaq/receivers/mon_sender.py", "source_repo": "sflis/SSDAQ", "split": "val", "star_events_count": 1}
{"blob_id": "390c2c0f453a0a2b419f0e2465d3b9d0d206df42", "bodies": ["self.hidden_size = hidden_size\nself.keep_prob = keep_prob\nself.rnn_cell_fw = tf.contrib.rnn.GRUCell(self.hidden_size)\nself.rnn_cell_fw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_fw, input_keep_prob=self.keep_prob)\nself.rnn_cell_bw = tf.contrib.rnn.GRUCell(self.hidden_size)\nself.rnn_cell_bw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_bw, input_keep_prob=self.keep_prob)", "with vs.variable_scope('RNNEncoder'):\n input_lens = tf.reduce_sum(masks, reduction_indices=1)\n (fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(self.rnn_cell_fw, self.rnn_cell_bw, inputs, input_lens, dtype=tf.float32)\n out = tf.concat([fw_out, bw_out], 2)\n out = tf.nn.dropout(out, self.keep_prob)\n return out"], "bodies_text": "<|body_start_0|>\n self.hidden_size = hidden_size\n self.keep_prob = keep_prob\n self.rnn_cell_fw = tf.contrib.rnn.GRUCell(self.hidden_size)\n self.rnn_cell_fw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_fw, input_keep_prob=self.keep_prob)\n self.rnn_cell_bw = tf.contrib.rnn.GRUCell(self.hidden_size)\n self.rnn_cell_bw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_bw, input_keep_prob=self.keep_prob)\n<|end_body_0|>\n\n<|body_start_1|>\n with vs.variable_scope('RNNEncoder'):\n input_lens = tf.reduce_sum(masks, reduction_indices=1)\n (fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(self.rnn_cell_fw, self.rnn_cell_bw, inputs, input_lens, dtype=tf.float32)\n out = tf.concat([fw_out, bw_out], 2)\n out = tf.nn.dropout(out, self.keep_prob)\n return out\n<|end_body_1|>\n", "class_docstring": "General-purpose module to encode a sequence using a RNN. It feeds the input through a RNN and returns all the hidden states. Note: In lecture 8, we talked about how you might use a RNN as an \"encoder\" to get a single, fixed size vector representation of a sequence (e.g. by taking element-wise max of hidden states). Here, we're using the RNN as an \"encoder\" but we're not taking max; we're just returning all the hidden states. The terminology \"encoder\" still applies because we're getting a different \"encoding\" of each position in the sequence, and we'll use the encodings downstream in the model. This code uses a bidirectional GRU, but you could experiment with other types of RNN.", "class_name": "RNNEncoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RNNEncoder:\n \"\"\"General-purpose module to encode a sequence using a RNN. It feeds the input through a RNN and returns all the hidden states. Note: In lecture 8, we talked about how you might use a RNN as an \"encoder\" to get a single, fixed size vector representation of a sequence (e.g. by taking element-wise max of hidden states). Here, we're using the RNN as an \"encoder\" but we're not taking max; we're just returning all the hidden states. The terminology \"encoder\" still applies because we're getting a different \"encoding\" of each position in the sequence, and we'll use the encodings downstream in the model. This code uses a bidirectional GRU, but you could experiment with other types of RNN.\"\"\"\n\n def __init__(self, hidden_size, keep_prob):\n \"\"\"Inputs: hidden_size: int. Hidden size of the RNN keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)\"\"\"\n <|body_0|>\n\n def build_graph(self, inputs, masks):\n \"\"\"Inputs: inputs: Tensor shape (batch_size, seq_len, input_size) masks: Tensor shape (batch_size, seq_len). Has 1s where there is real input, 0s where there's padding. This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps. Returns: out: Tensor shape (batch_size, seq_len, hidden_size*2). This is all hidden states (fw and bw hidden states are concatenated).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.hidden_size = hidden_size\n self.keep_prob = keep_prob\n self.rnn_cell_fw = tf.contrib.rnn.GRUCell(self.hidden_size)\n self.rnn_cell_fw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_fw, input_keep_prob=self.keep_prob)\n self.rnn_cell_bw = tf.contrib.rnn.GRUCell(self.hidden_size)\n self.rnn_cell_bw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_bw, input_keep_prob=self.keep_prob)\n<|end_body_0|>\n\n<|body_start_1|>\n with vs.variable_scope('RNNEncoder'):\n input_lens = tf.reduce_sum(masks, reduction_indices=1)\n (fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(self.rnn_cell_fw, self.rnn_cell_bw, inputs, input_lens, dtype=tf.float32)\n out = tf.concat([fw_out, bw_out], 2)\n out = tf.nn.dropout(out, self.keep_prob)\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000009", "length_bytes": 13280, "license_type": "no_license", "methods": [{"docstring": "Inputs: hidden_size: int. Hidden size of the RNN keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)", "name": "__init__", "signature": "def __init__(self, hidden_size, keep_prob)"}, {"docstring": "Inputs: inputs: Tensor shape (batch_size, seq_len, input_size) masks: Tensor shape (batch_size, seq_len). Has 1s where there is real input, 0s where there's padding. This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps. Returns: out: Tensor shape (batch_size, seq_len, hidden_size*2). This is all hidden states (fw and bw hidden states are concatenated).", "name": "build_graph", "signature": "def build_graph(self, inputs, masks)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049131", "prompt": "Implement the Python class `RNNEncoder` described below.\n\nClass description:\nGeneral-purpose module to encode a sequence using a RNN. It feeds the input through a RNN and returns all the hidden states. Note: In lecture 8, we talked about how you might use a RNN as an \"encoder\" to get a single, fixed size vector representation of a sequence (e.g. by taking element-wise max of hidden states). Here, we're using the RNN as an \"encoder\" but we're not taking max; we're just returning all the hidden states. The terminology \"encoder\" still applies because we're getting a different \"encoding\" of each position in the sequence, and we'll use the encodings downstream in the model. This code uses a bidirectional GRU, but you could experiment with other types of RNN.\n\nMethod signatures and docstrings:\n- def __init__(self, hidden_size, keep_prob): Inputs: hidden_size: int. Hidden size of the RNN keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)\n- def build_graph(self, inputs, masks): Inputs: inputs: Tensor shape (batch_size, seq_len, input_size) masks: Tensor shape (batch_size, seq_len). Has 1s where there is real input, 0s where there's padding. This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps. Returns: out: Tensor shape (batch_size, seq_len, hidden_size*2). This is all hidden states (fw and bw hidden states are concatenated).", "prompted_full_text": "Implement the Python class `RNNEncoder` described below.\n\nClass description:\nGeneral-purpose module to encode a sequence using a RNN. It feeds the input through a RNN and returns all the hidden states. Note: In lecture 8, we talked about how you might use a RNN as an \"encoder\" to get a single, fixed size vector representation of a sequence (e.g. by taking element-wise max of hidden states). Here, we're using the RNN as an \"encoder\" but we're not taking max; we're just returning all the hidden states. The terminology \"encoder\" still applies because we're getting a different \"encoding\" of each position in the sequence, and we'll use the encodings downstream in the model. This code uses a bidirectional GRU, but you could experiment with other types of RNN.\n\nMethod signatures and docstrings:\n- def __init__(self, hidden_size, keep_prob): Inputs: hidden_size: int. Hidden size of the RNN keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)\n- def build_graph(self, inputs, masks): Inputs: inputs: Tensor shape (batch_size, seq_len, input_size) masks: Tensor shape (batch_size, seq_len). Has 1s where there is real input, 0s where there's padding. This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps. Returns: out: Tensor shape (batch_size, seq_len, hidden_size*2). This is all hidden states (fw and bw hidden states are concatenated).\n\n<|skeleton|>\nclass RNNEncoder:\n \"\"\"General-purpose module to encode a sequence using a RNN. It feeds the input through a RNN and returns all the hidden states. Note: In lecture 8, we talked about how you might use a RNN as an \"encoder\" to get a single, fixed size vector representation of a sequence (e.g. by taking element-wise max of hidden states). Here, we're using the RNN as an \"encoder\" but we're not taking max; we're just returning all the hidden states. The terminology \"encoder\" still applies because we're getting a different \"encoding\" of each position in the sequence, and we'll use the encodings downstream in the model. This code uses a bidirectional GRU, but you could experiment with other types of RNN.\"\"\"\n\n def __init__(self, hidden_size, keep_prob):\n \"\"\"Inputs: hidden_size: int. Hidden size of the RNN keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)\"\"\"\n <|body_0|>\n\n def build_graph(self, inputs, masks):\n \"\"\"Inputs: inputs: Tensor shape (batch_size, seq_len, input_size) masks: Tensor shape (batch_size, seq_len). Has 1s where there is real input, 0s where there's padding. This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps. Returns: out: Tensor shape (batch_size, seq_len, hidden_size*2). This is all hidden states (fw and bw hidden states are concatenated).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.hidden_size = hidden_size\n self.keep_prob = keep_prob\n self.rnn_cell_fw = tf.contrib.rnn.GRUCell(self.hidden_size)\n self.rnn_cell_fw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_fw, input_keep_prob=self.keep_prob)\n self.rnn_cell_bw = tf.contrib.rnn.GRUCell(self.hidden_size)\n self.rnn_cell_bw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_bw, input_keep_prob=self.keep_prob)\n<|end_body_0|>\n\n<|body_start_1|>\n with vs.variable_scope('RNNEncoder'):\n input_lens = tf.reduce_sum(masks, reduction_indices=1)\n (fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(self.rnn_cell_fw, self.rnn_cell_bw, inputs, input_lens, dtype=tf.float32)\n out = tf.concat([fw_out, bw_out], 2)\n out = tf.nn.dropout(out, self.keep_prob)\n return out\n<|end_body_1|>\n", "revision_id": "5725304cc260ea3ebf7883e9a20ce15a19fe2c20", "skeleton": "<|skeleton|>\nclass RNNEncoder:\n \"\"\"General-purpose module to encode a sequence using a RNN. It feeds the input through a RNN and returns all the hidden states. Note: In lecture 8, we talked about how you might use a RNN as an \"encoder\" to get a single, fixed size vector representation of a sequence (e.g. by taking element-wise max of hidden states). Here, we're using the RNN as an \"encoder\" but we're not taking max; we're just returning all the hidden states. The terminology \"encoder\" still applies because we're getting a different \"encoding\" of each position in the sequence, and we'll use the encodings downstream in the model. This code uses a bidirectional GRU, but you could experiment with other types of RNN.\"\"\"\n\n def __init__(self, hidden_size, keep_prob):\n \"\"\"Inputs: hidden_size: int. Hidden size of the RNN keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)\"\"\"\n <|body_0|>\n\n def build_graph(self, inputs, masks):\n \"\"\"Inputs: inputs: Tensor shape (batch_size, seq_len, input_size) masks: Tensor shape (batch_size, seq_len). Has 1s where there is real input, 0s where there's padding. This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps. Returns: out: Tensor shape (batch_size, seq_len, hidden_size*2). This is all hidden states (fw and bw hidden states are concatenated).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RNNEncoder:\n \"\"\"General-purpose module to encode a sequence using a RNN. It feeds the input through a RNN and returns all the hidden states. Note: In lecture 8, we talked about how you might use a RNN as an \"encoder\" to get a single, fixed size vector representation of a sequence (e.g. by taking element-wise max of hidden states). Here, we're using the RNN as an \"encoder\" but we're not taking max; we're just returning all the hidden states. The terminology \"encoder\" still applies because we're getting a different \"encoding\" of each position in the sequence, and we'll use the encodings downstream in the model. This code uses a bidirectional GRU, but you could experiment with other types of RNN.\"\"\"\n\n def __init__(self, hidden_size, keep_prob):\n \"\"\"Inputs: hidden_size: int. Hidden size of the RNN keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)\"\"\"\n self.hidden_size = hidden_size\n self.keep_prob = keep_prob\n self.rnn_cell_fw = tf.contrib.rnn.GRUCell(self.hidden_size)\n self.rnn_cell_fw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_fw, input_keep_prob=self.keep_prob)\n self.rnn_cell_bw = tf.contrib.rnn.GRUCell(self.hidden_size)\n self.rnn_cell_bw = tf.contrib.rnn.DropoutWrapper(self.rnn_cell_bw, input_keep_prob=self.keep_prob)\n\n def build_graph(self, inputs, masks):\n \"\"\"Inputs: inputs: Tensor shape (batch_size, seq_len, input_size) masks: Tensor shape (batch_size, seq_len). Has 1s where there is real input, 0s where there's padding. This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps. Returns: out: Tensor shape (batch_size, seq_len, hidden_size*2). This is all hidden states (fw and bw hidden states are concatenated).\"\"\"\n with vs.variable_scope('RNNEncoder'):\n input_lens = tf.reduce_sum(masks, reduction_indices=1)\n (fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(self.rnn_cell_fw, self.rnn_cell_bw, inputs, input_lens, dtype=tf.float32)\n out = tf.concat([fw_out, bw_out], 2)\n out = tf.nn.dropout(out, self.keep_prob)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "project/src/deep_learning_models/modules.py", "source_repo": "laksh9950/cs229-1", "split": "val", "star_events_count": 0}
{"blob_id": "9f2ad0541d077cdc022c8560ee1189941ff9936c", "bodies": ["super(ConvNet, self).__init__()\nif norm_layer is None:\n norm_layer = nn.BatchNorm1d\nself._norm_layer = norm_layer\nif act_layer is None:\n act_layer = nn.ReLU\nself._act_layer = act_layer\nself.conv1 = conv3x3(1, 16, stride=1)\nself.bn1 = norm_layer(16)\nself.stack1 = self._make_stack(block=block, num_layers=layers[0], inplanes=16, outplanes=32, kernel_size=3, stride=2)\nself.stack2 = self._make_stack(block=block, num_layers=layers[1], inplanes=32, outplanes=64, kernel_size=3, stride=2)\nself.stack3 = self._make_stack(block=block, num_layers=layers[2], inplanes=64, outplanes=128, kernel_size=3, stride=2)\nself.avgpool = nn.AdaptiveAvgPool1d(1)\nself.fc = nn.Linear(128, latent_dim)\nself.relu = self._act_layer(inplace=True)\nfor m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "norm_layer = self._norm_layer\nact_layer = self._act_layer\ndownsample = None\nif stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(conv1x1(inplanes, outplanes, stride=stride), norm_layer(outplanes))\nlayers = []\nlayers.append(block(inplanes, outplanes, kernel_size=kernel_size, stride=stride, downsample=downsample, norm_layer=norm_layer, act_layer=act_layer))\nfor _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, kernel_size=kernel_size, stride=1, downsample=None, norm_layer=norm_layer, act_layer=act_layer))\nreturn nn.Sequential(*layers)", "out = self.bn1(self.conv1(x))\nout = self.relu(out)\nout = self.stack1(out)\nout = self.stack2(out)\nout = self.stack3(out)\nout = self.avgpool(out)\nout = torch.flatten(out, 1)\nout = self.fc(out)\nreturn out"], "bodies_text": "<|body_start_0|>\n super(ConvNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm1d\n self._norm_layer = norm_layer\n if act_layer is None:\n act_layer = nn.ReLU\n self._act_layer = act_layer\n self.conv1 = conv3x3(1, 16, stride=1)\n self.bn1 = norm_layer(16)\n self.stack1 = self._make_stack(block=block, num_layers=layers[0], inplanes=16, outplanes=32, kernel_size=3, stride=2)\n self.stack2 = self._make_stack(block=block, num_layers=layers[1], inplanes=32, outplanes=64, kernel_size=3, stride=2)\n self.stack3 = self._make_stack(block=block, num_layers=layers[2], inplanes=64, outplanes=128, kernel_size=3, stride=2)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Linear(128, latent_dim)\n self.relu = self._act_layer(inplace=True)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n norm_layer = self._norm_layer\n act_layer = self._act_layer\n downsample = None\n if stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(conv1x1(inplanes, outplanes, stride=stride), norm_layer(outplanes))\n layers = []\n layers.append(block(inplanes, outplanes, kernel_size=kernel_size, stride=stride, downsample=downsample, norm_layer=norm_layer, act_layer=act_layer))\n for _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, kernel_size=kernel_size, stride=1, downsample=None, norm_layer=norm_layer, act_layer=act_layer))\n return nn.Sequential(*layers)\n<|end_body_1|>\n\n<|body_start_2|>\n out = self.bn1(self.conv1(x))\n out = self.relu(out)\n out = self.stack1(out)\n out = self.stack2(out)\n out = self.stack3(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out\n<|end_body_2|>\n", "class_docstring": "Basic CNN architecture", "class_name": "ConvNet", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConvNet:\n \"\"\"Basic CNN architecture\"\"\"\n\n def __init__(self, block, layers, latent_dim=512, norm_layer=None, act_layer=None):\n \"\"\"Constructor Args: block: (nn.Module) building block; e.g., BasicBlock layers: (list of int) a list of integers specifying number of blocks per stack latent_dim: (int) dimension of latent space at network output; default = 512 norm_layer: (nn.Module) normalization layer; default = nn.BatchNorm2d act_layer: (nn.Module) activation layer; default = nn.ReLU\"\"\"\n <|body_0|>\n\n def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3, stride=1):\n \"\"\"Build a stack of blocks - first block in stack can have stride > 1 + downsample for skip connection (if applicable) - other blocks in stack have stride=1, inplanes=outplanes (if applicable) Args: block: (nn.Module) building block num_layers: (int) number of blocks in the stack inplanes: (int) number of input channels to the stack outplanes: (int) number of output channels to the stack kernel_size: (int) conv filter size; 1x1, 3x3, 5x5 stride: (int) number of stride for conv filter in first block; for other blocks stride=1 Returns: (nn.Module) a stack of blocks; returned by calling nn.Sequential()\"\"\"\n <|body_1|>\n\n def forward(self, x):\n \"\"\"forward method\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ConvNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm1d\n self._norm_layer = norm_layer\n if act_layer is None:\n act_layer = nn.ReLU\n self._act_layer = act_layer\n self.conv1 = conv3x3(1, 16, stride=1)\n self.bn1 = norm_layer(16)\n self.stack1 = self._make_stack(block=block, num_layers=layers[0], inplanes=16, outplanes=32, kernel_size=3, stride=2)\n self.stack2 = self._make_stack(block=block, num_layers=layers[1], inplanes=32, outplanes=64, kernel_size=3, stride=2)\n self.stack3 = self._make_stack(block=block, num_layers=layers[2], inplanes=64, outplanes=128, kernel_size=3, stride=2)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Linear(128, latent_dim)\n self.relu = self._act_layer(inplace=True)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n norm_layer = self._norm_layer\n act_layer = self._act_layer\n downsample = None\n if stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(conv1x1(inplanes, outplanes, stride=stride), norm_layer(outplanes))\n layers = []\n layers.append(block(inplanes, outplanes, kernel_size=kernel_size, stride=stride, downsample=downsample, norm_layer=norm_layer, act_layer=act_layer))\n for _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, kernel_size=kernel_size, stride=1, downsample=None, norm_layer=norm_layer, act_layer=act_layer))\n return nn.Sequential(*layers)\n<|end_body_1|>\n\n<|body_start_2|>\n out = self.bn1(self.conv1(x))\n out = self.relu(out)\n out = self.stack1(out)\n out = self.stack2(out)\n out = self.stack3(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000010", "length_bytes": 8913, "license_type": "permissive", "methods": [{"docstring": "Constructor Args: block: (nn.Module) building block; e.g., BasicBlock layers: (list of int) a list of integers specifying number of blocks per stack latent_dim: (int) dimension of latent space at network output; default = 512 norm_layer: (nn.Module) normalization layer; default = nn.BatchNorm2d act_layer: (nn.Module) activation layer; default = nn.ReLU", "name": "__init__", "signature": "def __init__(self, block, layers, latent_dim=512, norm_layer=None, act_layer=None)"}, {"docstring": "Build a stack of blocks - first block in stack can have stride > 1 + downsample for skip connection (if applicable) - other blocks in stack have stride=1, inplanes=outplanes (if applicable) Args: block: (nn.Module) building block num_layers: (int) number of blocks in the stack inplanes: (int) number of input channels to the stack outplanes: (int) number of output channels to the stack kernel_size: (int) conv filter size; 1x1, 3x3, 5x5 stride: (int) number of stride for conv filter in first block; for other blocks stride=1 Returns: (nn.Module) a stack of blocks; returned by calling nn.Sequential()", "name": "_make_stack", "signature": "def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3, stride=1)"}, {"docstring": "forward method", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_046984", "prompt": "Implement the Python class `ConvNet` described below.\n\nClass description:\nBasic CNN architecture\n\nMethod signatures and docstrings:\n- def __init__(self, block, layers, latent_dim=512, norm_layer=None, act_layer=None): Constructor Args: block: (nn.Module) building block; e.g., BasicBlock layers: (list of int) a list of integers specifying number of blocks per stack latent_dim: (int) dimension of latent space at network output; default = 512 norm_layer: (nn.Module) normalization layer; default = nn.BatchNorm2d act_layer: (nn.Module) activation layer; default = nn.ReLU\n- def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3, stride=1): Build a stack of blocks - first block in stack can have stride > 1 + downsample for skip connection (if applicable) - other blocks in stack have stride=1, inplanes=outplanes (if applicable) Args: block: (nn.Module) building block num_layers: (int) number of blocks in the stack inplanes: (int) number of input channels to the stack outplanes: (int) number of output channels to the stack kernel_size: (int) conv filter size; 1x1, 3x3, 5x5 stride: (int) number of stride for conv filter in first block; for other blocks stride=1 Returns: (nn.Module) a stack of blocks; returned by calling nn.Sequential()\n- def forward(self, x): forward method", "prompted_full_text": "Implement the Python class `ConvNet` described below.\n\nClass description:\nBasic CNN architecture\n\nMethod signatures and docstrings:\n- def __init__(self, block, layers, latent_dim=512, norm_layer=None, act_layer=None): Constructor Args: block: (nn.Module) building block; e.g., BasicBlock layers: (list of int) a list of integers specifying number of blocks per stack latent_dim: (int) dimension of latent space at network output; default = 512 norm_layer: (nn.Module) normalization layer; default = nn.BatchNorm2d act_layer: (nn.Module) activation layer; default = nn.ReLU\n- def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3, stride=1): Build a stack of blocks - first block in stack can have stride > 1 + downsample for skip connection (if applicable) - other blocks in stack have stride=1, inplanes=outplanes (if applicable) Args: block: (nn.Module) building block num_layers: (int) number of blocks in the stack inplanes: (int) number of input channels to the stack outplanes: (int) number of output channels to the stack kernel_size: (int) conv filter size; 1x1, 3x3, 5x5 stride: (int) number of stride for conv filter in first block; for other blocks stride=1 Returns: (nn.Module) a stack of blocks; returned by calling nn.Sequential()\n- def forward(self, x): forward method\n\n<|skeleton|>\nclass ConvNet:\n \"\"\"Basic CNN architecture\"\"\"\n\n def __init__(self, block, layers, latent_dim=512, norm_layer=None, act_layer=None):\n \"\"\"Constructor Args: block: (nn.Module) building block; e.g., BasicBlock layers: (list of int) a list of integers specifying number of blocks per stack latent_dim: (int) dimension of latent space at network output; default = 512 norm_layer: (nn.Module) normalization layer; default = nn.BatchNorm2d act_layer: (nn.Module) activation layer; default = nn.ReLU\"\"\"\n <|body_0|>\n\n def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3, stride=1):\n \"\"\"Build a stack of blocks - first block in stack can have stride > 1 + downsample for skip connection (if applicable) - other blocks in stack have stride=1, inplanes=outplanes (if applicable) Args: block: (nn.Module) building block num_layers: (int) number of blocks in the stack inplanes: (int) number of input channels to the stack outplanes: (int) number of output channels to the stack kernel_size: (int) conv filter size; 1x1, 3x3, 5x5 stride: (int) number of stride for conv filter in first block; for other blocks stride=1 Returns: (nn.Module) a stack of blocks; returned by calling nn.Sequential()\"\"\"\n <|body_1|>\n\n def forward(self, x):\n \"\"\"forward method\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ConvNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm1d\n self._norm_layer = norm_layer\n if act_layer is None:\n act_layer = nn.ReLU\n self._act_layer = act_layer\n self.conv1 = conv3x3(1, 16, stride=1)\n self.bn1 = norm_layer(16)\n self.stack1 = self._make_stack(block=block, num_layers=layers[0], inplanes=16, outplanes=32, kernel_size=3, stride=2)\n self.stack2 = self._make_stack(block=block, num_layers=layers[1], inplanes=32, outplanes=64, kernel_size=3, stride=2)\n self.stack3 = self._make_stack(block=block, num_layers=layers[2], inplanes=64, outplanes=128, kernel_size=3, stride=2)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Linear(128, latent_dim)\n self.relu = self._act_layer(inplace=True)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n norm_layer = self._norm_layer\n act_layer = self._act_layer\n downsample = None\n if stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(conv1x1(inplanes, outplanes, stride=stride), norm_layer(outplanes))\n layers = []\n layers.append(block(inplanes, outplanes, kernel_size=kernel_size, stride=stride, downsample=downsample, norm_layer=norm_layer, act_layer=act_layer))\n for _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, kernel_size=kernel_size, stride=1, downsample=None, norm_layer=norm_layer, act_layer=act_layer))\n return nn.Sequential(*layers)\n<|end_body_1|>\n\n<|body_start_2|>\n out = self.bn1(self.conv1(x))\n out = self.relu(out)\n out = self.stack1(out)\n out = self.stack2(out)\n out = self.stack3(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out\n<|end_body_2|>\n", "revision_id": "3ad344901c3bb59e0bc16bb70202d2cfd538fd77", "skeleton": "<|skeleton|>\nclass ConvNet:\n \"\"\"Basic CNN architecture\"\"\"\n\n def __init__(self, block, layers, latent_dim=512, norm_layer=None, act_layer=None):\n \"\"\"Constructor Args: block: (nn.Module) building block; e.g., BasicBlock layers: (list of int) a list of integers specifying number of blocks per stack latent_dim: (int) dimension of latent space at network output; default = 512 norm_layer: (nn.Module) normalization layer; default = nn.BatchNorm2d act_layer: (nn.Module) activation layer; default = nn.ReLU\"\"\"\n <|body_0|>\n\n def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3, stride=1):\n \"\"\"Build a stack of blocks - first block in stack can have stride > 1 + downsample for skip connection (if applicable) - other blocks in stack have stride=1, inplanes=outplanes (if applicable) Args: block: (nn.Module) building block num_layers: (int) number of blocks in the stack inplanes: (int) number of input channels to the stack outplanes: (int) number of output channels to the stack kernel_size: (int) conv filter size; 1x1, 3x3, 5x5 stride: (int) number of stride for conv filter in first block; for other blocks stride=1 Returns: (nn.Module) a stack of blocks; returned by calling nn.Sequential()\"\"\"\n <|body_1|>\n\n def forward(self, x):\n \"\"\"forward method\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ConvNet:\n \"\"\"Basic CNN architecture\"\"\"\n\n def __init__(self, block, layers, latent_dim=512, norm_layer=None, act_layer=None):\n \"\"\"Constructor Args: block: (nn.Module) building block; e.g., BasicBlock layers: (list of int) a list of integers specifying number of blocks per stack latent_dim: (int) dimension of latent space at network output; default = 512 norm_layer: (nn.Module) normalization layer; default = nn.BatchNorm2d act_layer: (nn.Module) activation layer; default = nn.ReLU\"\"\"\n super(ConvNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm1d\n self._norm_layer = norm_layer\n if act_layer is None:\n act_layer = nn.ReLU\n self._act_layer = act_layer\n self.conv1 = conv3x3(1, 16, stride=1)\n self.bn1 = norm_layer(16)\n self.stack1 = self._make_stack(block=block, num_layers=layers[0], inplanes=16, outplanes=32, kernel_size=3, stride=2)\n self.stack2 = self._make_stack(block=block, num_layers=layers[1], inplanes=32, outplanes=64, kernel_size=3, stride=2)\n self.stack3 = self._make_stack(block=block, num_layers=layers[2], inplanes=64, outplanes=128, kernel_size=3, stride=2)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Linear(128, latent_dim)\n self.relu = self._act_layer(inplace=True)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3, stride=1):\n \"\"\"Build a stack of blocks - first block in stack can have stride > 1 + downsample for skip connection (if applicable) - other blocks in stack have stride=1, inplanes=outplanes (if applicable) Args: block: (nn.Module) building block num_layers: (int) number of blocks in the stack inplanes: (int) number of input channels to the stack outplanes: (int) number of output channels to the stack kernel_size: (int) conv filter size; 1x1, 3x3, 5x5 stride: (int) number of stride for conv filter in first block; for other blocks stride=1 Returns: (nn.Module) a stack of blocks; returned by calling nn.Sequential()\"\"\"\n norm_layer = self._norm_layer\n act_layer = self._act_layer\n downsample = None\n if stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(conv1x1(inplanes, outplanes, stride=stride), norm_layer(outplanes))\n layers = []\n layers.append(block(inplanes, outplanes, kernel_size=kernel_size, stride=stride, downsample=downsample, norm_layer=norm_layer, act_layer=act_layer))\n for _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, kernel_size=kernel_size, stride=1, downsample=None, norm_layer=norm_layer, act_layer=act_layer))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"forward method\"\"\"\n out = self.bn1(self.conv1(x))\n out = self.relu(out)\n out = self.stack1(out)\n out = self.stack2(out)\n out = self.stack3(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "baselines/common/networks/cnn.py", "source_repo": "baihuaxie/drl-lib", "split": "val", "star_events_count": 0}
{"blob_id": "f79ecef14e6da9f9c49379f9f9a7f7f07731d207", "bodies": ["self.name = 'Background'\nself.obsid = None\nself.instrument = 'CTA'\nself.spatial = {'type': 'CTAIrfBackground'}\nself.spectral = {'type': 'PowerLaw', 'param': {'Prefactor': {'value': 1.0, 'free': True}, 'Index': {'value': 0, 'free': True}, 'PivotEnergy': {'value': 1.0 * u.TeV, 'free': False}}}", "if bkg_type == 'CTAIrfBackground':\n self.spatial = {'type': 'CTAIrfBackground'}\nelif bkg_type == 'Gaussian':\n self.spatial = {'type': 'Gaussian', 'param': {'Sigma': {'value': 3.0 * u.deg, 'free': False}}}\nelse:\n raise ValueError('The baground type you are trying to set is not implemented.')", "print('--- name: ' + self.name + ' ---')\nprint('--- ObsID: ' + self.obsid + ' ---')\nprint('--- Spatial model: ' + self.spatial['type'])\nif 'param' in self.spatial.keys():\n for key in self.spatial['param'].keys():\n print(' ' + key + ': ' + str(self.spatial['param'][key]))\nprint('--- Spectral model: ' + self.spectral['type'])\nif 'param' in self.spectral.keys():\n for key in self.spectral['param'].keys():\n print(' ' + key + ': ' + str(self.spectral['param'][key]))"], "bodies_text": "<|body_start_0|>\n self.name = 'Background'\n self.obsid = None\n self.instrument = 'CTA'\n self.spatial = {'type': 'CTAIrfBackground'}\n self.spectral = {'type': 'PowerLaw', 'param': {'Prefactor': {'value': 1.0, 'free': True}, 'Index': {'value': 0, 'free': True}, 'PivotEnergy': {'value': 1.0 * u.TeV, 'free': False}}}\n<|end_body_0|>\n\n<|body_start_1|>\n if bkg_type == 'CTAIrfBackground':\n self.spatial = {'type': 'CTAIrfBackground'}\n elif bkg_type == 'Gaussian':\n self.spatial = {'type': 'Gaussian', 'param': {'Sigma': {'value': 3.0 * u.deg, 'free': False}}}\n else:\n raise ValueError('The baground type you are trying to set is not implemented.')\n<|end_body_1|>\n\n<|body_start_2|>\n print('--- name: ' + self.name + ' ---')\n print('--- ObsID: ' + self.obsid + ' ---')\n print('--- Spatial model: ' + self.spatial['type'])\n if 'param' in self.spatial.keys():\n for key in self.spatial['param'].keys():\n print(' ' + key + ': ' + str(self.spatial['param'][key]))\n print('--- Spectral model: ' + self.spectral['type'])\n if 'param' in self.spectral.keys():\n for key in self.spectral['param'].keys():\n print(' ' + key + ': ' + str(self.spectral['param'][key]))\n<|end_body_2|>\n", "class_docstring": "Background class. This class defines the Background object. The background is defined by its attribute (name, spatial, spectral). Attributes ---------- - name : the label of the given background - obsid (str): to match a given background to its obsID - spatial : the spatial properties of the background - spectral : the spectral properties of the background Methods ---------- - set_spatial_std_bkg - print_bkg", "class_name": "Background", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Background:\n \"\"\"Background class. This class defines the Background object. The background is defined by its attribute (name, spatial, spectral). Attributes ---------- - name : the label of the given background - obsid (str): to match a given background to its obsID - spatial : the spatial properties of the background - spectral : the spectral properties of the background Methods ---------- - set_spatial_std_bkg - print_bkg\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the background object. Parameters ---------- - name (str): the label of the given background - spatial (dictionary): contain coordinates of the background - spectral (dictionary): spectral properties of the background\"\"\"\n <|body_0|>\n\n def set_spatial_std_bkg(self, bkg_type='CTAIrfBackground'):\n \"\"\"Set the spatial part of the background to standard model. Available standard models are - CTAIrfBackground - Gaussian see also http://cta.irap.omp.eu/ctools/users/user_manual/models_spatial_bgd.html for further implementation. Parameters ---------- - bkg_type (str): background type\"\"\"\n <|body_1|>\n\n def print_bkg(self):\n \"\"\"Show the background model. Parameters ----------\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = 'Background'\n self.obsid = None\n self.instrument = 'CTA'\n self.spatial = {'type': 'CTAIrfBackground'}\n self.spectral = {'type': 'PowerLaw', 'param': {'Prefactor': {'value': 1.0, 'free': True}, 'Index': {'value': 0, 'free': True}, 'PivotEnergy': {'value': 1.0 * u.TeV, 'free': False}}}\n<|end_body_0|>\n\n<|body_start_1|>\n if bkg_type == 'CTAIrfBackground':\n self.spatial = {'type': 'CTAIrfBackground'}\n elif bkg_type == 'Gaussian':\n self.spatial = {'type': 'Gaussian', 'param': {'Sigma': {'value': 3.0 * u.deg, 'free': False}}}\n else:\n raise ValueError('The baground type you are trying to set is not implemented.')\n<|end_body_1|>\n\n<|body_start_2|>\n print('--- name: ' + self.name + ' ---')\n print('--- ObsID: ' + self.obsid + ' ---')\n print('--- Spatial model: ' + self.spatial['type'])\n if 'param' in self.spatial.keys():\n for key in self.spatial['param'].keys():\n print(' ' + key + ': ' + str(self.spatial['param'][key]))\n print('--- Spectral model: ' + self.spectral['type'])\n if 'param' in self.spectral.keys():\n for key in self.spectral['param'].keys():\n print(' ' + key + ': ' + str(self.spectral['param'][key]))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000011", "length_bytes": 4000, "license_type": "no_license", "methods": [{"docstring": "Initialize the background object. Parameters ---------- - name (str): the label of the given background - spatial (dictionary): contain coordinates of the background - spectral (dictionary): spectral properties of the background", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Set the spatial part of the background to standard model. Available standard models are - CTAIrfBackground - Gaussian see also http://cta.irap.omp.eu/ctools/users/user_manual/models_spatial_bgd.html for further implementation. Parameters ---------- - bkg_type (str): background type", "name": "set_spatial_std_bkg", "signature": "def set_spatial_std_bkg(self, bkg_type='CTAIrfBackground')"}, {"docstring": "Show the background model. Parameters ----------", "name": "print_bkg", "signature": "def print_bkg(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_038452", "prompt": "Implement the Python class `Background` described below.\n\nClass description:\nBackground class. This class defines the Background object. The background is defined by its attribute (name, spatial, spectral). Attributes ---------- - name : the label of the given background - obsid (str): to match a given background to its obsID - spatial : the spatial properties of the background - spectral : the spectral properties of the background Methods ---------- - set_spatial_std_bkg - print_bkg\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize the background object. Parameters ---------- - name (str): the label of the given background - spatial (dictionary): contain coordinates of the background - spectral (dictionary): spectral properties of the background\n- def set_spatial_std_bkg(self, bkg_type='CTAIrfBackground'): Set the spatial part of the background to standard model. Available standard models are - CTAIrfBackground - Gaussian see also http://cta.irap.omp.eu/ctools/users/user_manual/models_spatial_bgd.html for further implementation. Parameters ---------- - bkg_type (str): background type\n- def print_bkg(self): Show the background model. Parameters ----------", "prompted_full_text": "Implement the Python class `Background` described below.\n\nClass description:\nBackground class. This class defines the Background object. The background is defined by its attribute (name, spatial, spectral). Attributes ---------- - name : the label of the given background - obsid (str): to match a given background to its obsID - spatial : the spatial properties of the background - spectral : the spectral properties of the background Methods ---------- - set_spatial_std_bkg - print_bkg\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize the background object. Parameters ---------- - name (str): the label of the given background - spatial (dictionary): contain coordinates of the background - spectral (dictionary): spectral properties of the background\n- def set_spatial_std_bkg(self, bkg_type='CTAIrfBackground'): Set the spatial part of the background to standard model. Available standard models are - CTAIrfBackground - Gaussian see also http://cta.irap.omp.eu/ctools/users/user_manual/models_spatial_bgd.html for further implementation. Parameters ---------- - bkg_type (str): background type\n- def print_bkg(self): Show the background model. Parameters ----------\n\n<|skeleton|>\nclass Background:\n \"\"\"Background class. This class defines the Background object. The background is defined by its attribute (name, spatial, spectral). Attributes ---------- - name : the label of the given background - obsid (str): to match a given background to its obsID - spatial : the spatial properties of the background - spectral : the spectral properties of the background Methods ---------- - set_spatial_std_bkg - print_bkg\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the background object. Parameters ---------- - name (str): the label of the given background - spatial (dictionary): contain coordinates of the background - spectral (dictionary): spectral properties of the background\"\"\"\n <|body_0|>\n\n def set_spatial_std_bkg(self, bkg_type='CTAIrfBackground'):\n \"\"\"Set the spatial part of the background to standard model. Available standard models are - CTAIrfBackground - Gaussian see also http://cta.irap.omp.eu/ctools/users/user_manual/models_spatial_bgd.html for further implementation. Parameters ---------- - bkg_type (str): background type\"\"\"\n <|body_1|>\n\n def print_bkg(self):\n \"\"\"Show the background model. Parameters ----------\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = 'Background'\n self.obsid = None\n self.instrument = 'CTA'\n self.spatial = {'type': 'CTAIrfBackground'}\n self.spectral = {'type': 'PowerLaw', 'param': {'Prefactor': {'value': 1.0, 'free': True}, 'Index': {'value': 0, 'free': True}, 'PivotEnergy': {'value': 1.0 * u.TeV, 'free': False}}}\n<|end_body_0|>\n\n<|body_start_1|>\n if bkg_type == 'CTAIrfBackground':\n self.spatial = {'type': 'CTAIrfBackground'}\n elif bkg_type == 'Gaussian':\n self.spatial = {'type': 'Gaussian', 'param': {'Sigma': {'value': 3.0 * u.deg, 'free': False}}}\n else:\n raise ValueError('The baground type you are trying to set is not implemented.')\n<|end_body_1|>\n\n<|body_start_2|>\n print('--- name: ' + self.name + ' ---')\n print('--- ObsID: ' + self.obsid + ' ---')\n print('--- Spatial model: ' + self.spatial['type'])\n if 'param' in self.spatial.keys():\n for key in self.spatial['param'].keys():\n print(' ' + key + ': ' + str(self.spatial['param'][key]))\n print('--- Spectral model: ' + self.spectral['type'])\n if 'param' in self.spectral.keys():\n for key in self.spectral['param'].keys():\n print(' ' + key + ': ' + str(self.spectral['param'][key]))\n<|end_body_2|>\n", "revision_id": "660125b16820e6b0dc85fc201595d21648e058d7", "skeleton": "<|skeleton|>\nclass Background:\n \"\"\"Background class. This class defines the Background object. The background is defined by its attribute (name, spatial, spectral). Attributes ---------- - name : the label of the given background - obsid (str): to match a given background to its obsID - spatial : the spatial properties of the background - spectral : the spectral properties of the background Methods ---------- - set_spatial_std_bkg - print_bkg\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the background object. Parameters ---------- - name (str): the label of the given background - spatial (dictionary): contain coordinates of the background - spectral (dictionary): spectral properties of the background\"\"\"\n <|body_0|>\n\n def set_spatial_std_bkg(self, bkg_type='CTAIrfBackground'):\n \"\"\"Set the spatial part of the background to standard model. Available standard models are - CTAIrfBackground - Gaussian see also http://cta.irap.omp.eu/ctools/users/user_manual/models_spatial_bgd.html for further implementation. Parameters ---------- - bkg_type (str): background type\"\"\"\n <|body_1|>\n\n def print_bkg(self):\n \"\"\"Show the background model. Parameters ----------\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Background:\n \"\"\"Background class. This class defines the Background object. The background is defined by its attribute (name, spatial, spectral). Attributes ---------- - name : the label of the given background - obsid (str): to match a given background to its obsID - spatial : the spatial properties of the background - spectral : the spectral properties of the background Methods ---------- - set_spatial_std_bkg - print_bkg\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the background object. Parameters ---------- - name (str): the label of the given background - spatial (dictionary): contain coordinates of the background - spectral (dictionary): spectral properties of the background\"\"\"\n self.name = 'Background'\n self.obsid = None\n self.instrument = 'CTA'\n self.spatial = {'type': 'CTAIrfBackground'}\n self.spectral = {'type': 'PowerLaw', 'param': {'Prefactor': {'value': 1.0, 'free': True}, 'Index': {'value': 0, 'free': True}, 'PivotEnergy': {'value': 1.0 * u.TeV, 'free': False}}}\n\n def set_spatial_std_bkg(self, bkg_type='CTAIrfBackground'):\n \"\"\"Set the spatial part of the background to standard model. Available standard models are - CTAIrfBackground - Gaussian see also http://cta.irap.omp.eu/ctools/users/user_manual/models_spatial_bgd.html for further implementation. Parameters ---------- - bkg_type (str): background type\"\"\"\n if bkg_type == 'CTAIrfBackground':\n self.spatial = {'type': 'CTAIrfBackground'}\n elif bkg_type == 'Gaussian':\n self.spatial = {'type': 'Gaussian', 'param': {'Sigma': {'value': 3.0 * u.deg, 'free': False}}}\n else:\n raise ValueError('The baground type you are trying to set is not implemented.')\n\n def print_bkg(self):\n \"\"\"Show the background model. Parameters ----------\"\"\"\n print('--- name: ' + self.name + ' ---')\n print('--- ObsID: ' + self.obsid + ' ---')\n print('--- Spatial model: ' + self.spatial['type'])\n if 'param' in self.spatial.keys():\n for key in self.spatial['param'].keys():\n print(' ' + key + ': ' + str(self.spatial['param'][key]))\n print('--- Spectral model: ' + self.spectral['type'])\n if 'param' in self.spectral.keys():\n for key in self.spectral['param'].keys():\n print(' ' + key + ': ' + str(self.spectral['param'][key]))\n", "source": "the_stack_v2_python_sparse", "source_path": "Tools/background.py", "source_repo": "astroolka/ClusterPipe", "split": "val", "star_events_count": 0}
{"blob_id": "0c74402a627d820f22efd863c4204679d185e67b", "bodies": ["try:\n repo_type = RepoType[str(repo).upper()]\nexcept Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\nreturn empty_result('success', get_repo_status(repo_type))", "json_data = request.get_json()\ntry:\n repo_type = RepoType[str(repo).upper()]\nexcept Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\nif 'action' in json_data:\n if str(json_data['action']).upper() == 'REFRESH':\n try:\n res = refresh_repo(repo_type, get_jwt_identity())\n return empty_result('success', res)\n except VerifyPathException as e:\n return (empty_result('error', 'Repository structure is invalid ({}): {}'.format(type(e).__name__, str(e))), 400)\n except JoblockError as e:\n return (empty_result('error', 'Another job is locking configuration of devices, try again later ({})'.format(str(e))), 503)\n except SettingsSyntaxError as e:\n return (empty_result('error', 'Syntax error in repository: {}'.format(str(e))), 400)\n except Exception as e:\n return (empty_result('error', 'Error in repository: {}'.format(str(e))), 500)\n else:\n return (empty_result('error', 'Invalid action'), 400)\nelse:\n return (empty_result('error', 'No action specified'), 400)"], "bodies_text": "<|body_start_0|>\n try:\n repo_type = RepoType[str(repo).upper()]\n except Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\n return empty_result('success', get_repo_status(repo_type))\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json()\n try:\n repo_type = RepoType[str(repo).upper()]\n except Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\n if 'action' in json_data:\n if str(json_data['action']).upper() == 'REFRESH':\n try:\n res = refresh_repo(repo_type, get_jwt_identity())\n return empty_result('success', res)\n except VerifyPathException as e:\n return (empty_result('error', 'Repository structure is invalid ({}): {}'.format(type(e).__name__, str(e))), 400)\n except JoblockError as e:\n return (empty_result('error', 'Another job is locking configuration of devices, try again later ({})'.format(str(e))), 503)\n except SettingsSyntaxError as e:\n return (empty_result('error', 'Syntax error in repository: {}'.format(str(e))), 400)\n except Exception as e:\n return (empty_result('error', 'Error in repository: {}'.format(str(e))), 500)\n else:\n return (empty_result('error', 'Invalid action'), 400)\n else:\n return (empty_result('error', 'No action specified'), 400)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RepositoryApi", "detected_licenses": ["BSD-2-Clause-Views", "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RepositoryApi:\n\n def get(self, repo):\n \"\"\"Get repository information\"\"\"\n <|body_0|>\n\n def put(self, repo):\n \"\"\"Modify repository\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n repo_type = RepoType[str(repo).upper()]\n except Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\n return empty_result('success', get_repo_status(repo_type))\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json()\n try:\n repo_type = RepoType[str(repo).upper()]\n except Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\n if 'action' in json_data:\n if str(json_data['action']).upper() == 'REFRESH':\n try:\n res = refresh_repo(repo_type, get_jwt_identity())\n return empty_result('success', res)\n except VerifyPathException as e:\n return (empty_result('error', 'Repository structure is invalid ({}): {}'.format(type(e).__name__, str(e))), 400)\n except JoblockError as e:\n return (empty_result('error', 'Another job is locking configuration of devices, try again later ({})'.format(str(e))), 503)\n except SettingsSyntaxError as e:\n return (empty_result('error', 'Syntax error in repository: {}'.format(str(e))), 400)\n except Exception as e:\n return (empty_result('error', 'Error in repository: {}'.format(str(e))), 500)\n else:\n return (empty_result('error', 'Invalid action'), 400)\n else:\n return (empty_result('error', 'No action specified'), 400)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000012", "length_bytes": 2805, "license_type": "permissive", "methods": [{"docstring": "Get repository information", "name": "get", "signature": "def get(self, repo)"}, {"docstring": "Modify repository", "name": "put", "signature": "def put(self, repo)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007355", "prompt": "Implement the Python class `RepositoryApi` described below.\n\nClass description:\nImplement the RepositoryApi class.\n\nMethod signatures and docstrings:\n- def get(self, repo): Get repository information\n- def put(self, repo): Modify repository", "prompted_full_text": "Implement the Python class `RepositoryApi` described below.\n\nClass description:\nImplement the RepositoryApi class.\n\nMethod signatures and docstrings:\n- def get(self, repo): Get repository information\n- def put(self, repo): Modify repository\n\n<|skeleton|>\nclass RepositoryApi:\n\n def get(self, repo):\n \"\"\"Get repository information\"\"\"\n <|body_0|>\n\n def put(self, repo):\n \"\"\"Modify repository\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n repo_type = RepoType[str(repo).upper()]\n except Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\n return empty_result('success', get_repo_status(repo_type))\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json()\n try:\n repo_type = RepoType[str(repo).upper()]\n except Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\n if 'action' in json_data:\n if str(json_data['action']).upper() == 'REFRESH':\n try:\n res = refresh_repo(repo_type, get_jwt_identity())\n return empty_result('success', res)\n except VerifyPathException as e:\n return (empty_result('error', 'Repository structure is invalid ({}): {}'.format(type(e).__name__, str(e))), 400)\n except JoblockError as e:\n return (empty_result('error', 'Another job is locking configuration of devices, try again later ({})'.format(str(e))), 503)\n except SettingsSyntaxError as e:\n return (empty_result('error', 'Syntax error in repository: {}'.format(str(e))), 400)\n except Exception as e:\n return (empty_result('error', 'Error in repository: {}'.format(str(e))), 500)\n else:\n return (empty_result('error', 'Invalid action'), 400)\n else:\n return (empty_result('error', 'No action specified'), 400)\n<|end_body_1|>\n", "revision_id": "d755dfed69bebe0c7bea66ad1802cba2cd89fec8", "skeleton": "<|skeleton|>\nclass RepositoryApi:\n\n def get(self, repo):\n \"\"\"Get repository information\"\"\"\n <|body_0|>\n\n def put(self, repo):\n \"\"\"Modify repository\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RepositoryApi:\n def get(self, repo):\n \"\"\"Get repository information\"\"\"\n try:\n repo_type = RepoType[str(repo).upper()]\n except Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\n return empty_result('success', get_repo_status(repo_type))\n\n def put(self, repo):\n \"\"\"Modify repository\"\"\"\n json_data = request.get_json()\n try:\n repo_type = RepoType[str(repo).upper()]\n except Exception:\n return (empty_result('error', 'Invalid repository type'), 400)\n if 'action' in json_data:\n if str(json_data['action']).upper() == 'REFRESH':\n try:\n res = refresh_repo(repo_type, get_jwt_identity())\n return empty_result('success', res)\n except VerifyPathException as e:\n return (empty_result('error', 'Repository structure is invalid ({}): {}'.format(type(e).__name__, str(e))), 400)\n except JoblockError as e:\n return (empty_result('error', 'Another job is locking configuration of devices, try again later ({})'.format(str(e))), 503)\n except SettingsSyntaxError as e:\n return (empty_result('error', 'Syntax error in repository: {}'.format(str(e))), 400)\n except Exception as e:\n return (empty_result('error', 'Error in repository: {}'.format(str(e))), 500)\n else:\n return (empty_result('error', 'Invalid action'), 400)\n else:\n return (empty_result('error', 'No action specified'), 400)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/cnaas_nms/api/repository.py", "source_repo": "SUNET/cnaas-nms", "split": "val", "star_events_count": 67}
{"blob_id": "48e12b5eb1948002bd14772ff0ef0052008e8426", "bodies": ["self.view = view\nself.settings = settings.ViewSettings(view)\nself.busy = False\nself.events = 0\nself.latest_time = 0.0\nself.delay = 0", "if event_id & ACTIVATED:\n if not (self.settings.get('live_mode') or self.settings.get('focus_change_mode')):\n return\nelif event_id & MODIFIED:\n if not self.settings.get('live_mode'):\n return\nself.latest_time = time.time()\nself.events |= event_id\nif not self.busy:\n self.delay = max(200, self.settings.get('debounce_delay', 1000))\n self.start_timer(200)", "start_time = self.latest_time\n\ndef worker():\n \"\"\"The function called after some idle time.\"\"\"\n if start_time < self.latest_time:\n self.start_timer(self.delay)\n return\n self.busy = False\n if not self.is_view_visible():\n return\n self.view.run_command('git_gutter', {'events': self.events})\n self.events = 0\nself.busy = True\nsublime.set_timeout(worker, delay)", "window = self.view.window()\nif window:\n view_id = self.view.id()\n for group in range(window.num_groups()):\n active_view = window.active_view_in_group(group)\n if active_view and active_view.id() == view_id:\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n self.view = view\n self.settings = settings.ViewSettings(view)\n self.busy = False\n self.events = 0\n self.latest_time = 0.0\n self.delay = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if event_id & ACTIVATED:\n if not (self.settings.get('live_mode') or self.settings.get('focus_change_mode')):\n return\n elif event_id & MODIFIED:\n if not self.settings.get('live_mode'):\n return\n self.latest_time = time.time()\n self.events |= event_id\n if not self.busy:\n self.delay = max(200, self.settings.get('debounce_delay', 1000))\n self.start_timer(200)\n<|end_body_1|>\n\n<|body_start_2|>\n start_time = self.latest_time\n\n def worker():\n \"\"\"The function called after some idle time.\"\"\"\n if start_time < self.latest_time:\n self.start_timer(self.delay)\n return\n self.busy = False\n if not self.is_view_visible():\n return\n self.view.run_command('git_gutter', {'events': self.events})\n self.events = 0\n self.busy = True\n sublime.set_timeout(worker, delay)\n<|end_body_2|>\n\n<|body_start_3|>\n window = self.view.window()\n if window:\n view_id = self.view.id()\n for group in range(window.num_groups()):\n active_view = window.active_view_in_group(group)\n if active_view and active_view.id() == view_id:\n return True\n return False\n<|end_body_3|>\n", "class_docstring": "The class queues and forwards view events to GitGutterCommand. A ViewEventListener object queues all events received from a view and starts a single sublime timer to forward the event to GitGutterCommand after some idle time. This ensures not to bloat sublime API due to dozens of timers running for debouncing events.", "class_name": "ViewEventListener", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ViewEventListener:\n \"\"\"The class queues and forwards view events to GitGutterCommand. A ViewEventListener object queues all events received from a view and starts a single sublime timer to forward the event to GitGutterCommand after some idle time. This ensures not to bloat sublime API due to dozens of timers running for debouncing events.\"\"\"\n\n def __init__(self, view):\n \"\"\"Initialize ViewEventListener object. Arguments: view (View): The view the object is created for.\"\"\"\n <|body_0|>\n\n def push(self, event_id):\n \"\"\"Push the event to the queue and start idle timer. Add the event identifier to 'events' and update the 'latest_time'. This marks an event to be received rather than counting the number of received events. The idle timer is started only, if no other one is already in flight. Arguments: event_id (int): One of the event identifiers.\"\"\"\n <|body_1|>\n\n def start_timer(self, delay):\n \"\"\"Run GitGutterCommand after some idle time. Check if no more events were received during idle time and run GitGutterCommand if not. Restart timer to check later, otherwise. Timer is stopped without calling GitGutterCommand, if a view is not visible to save some resources. Evaluation will be triggered by activating the view next time. Arguments: delay (int): The delay in milliseconds to wait until probably forward the events, if no other event was received in the meanwhile.\"\"\"\n <|body_2|>\n\n def is_view_visible(self):\n \"\"\"Determine if the view is visible. Only an active view of a group is visible. Returns: bool: True if the view is visible in any window.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.view = view\n self.settings = settings.ViewSettings(view)\n self.busy = False\n self.events = 0\n self.latest_time = 0.0\n self.delay = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if event_id & ACTIVATED:\n if not (self.settings.get('live_mode') or self.settings.get('focus_change_mode')):\n return\n elif event_id & MODIFIED:\n if not self.settings.get('live_mode'):\n return\n self.latest_time = time.time()\n self.events |= event_id\n if not self.busy:\n self.delay = max(200, self.settings.get('debounce_delay', 1000))\n self.start_timer(200)\n<|end_body_1|>\n\n<|body_start_2|>\n start_time = self.latest_time\n\n def worker():\n \"\"\"The function called after some idle time.\"\"\"\n if start_time < self.latest_time:\n self.start_timer(self.delay)\n return\n self.busy = False\n if not self.is_view_visible():\n return\n self.view.run_command('git_gutter', {'events': self.events})\n self.events = 0\n self.busy = True\n sublime.set_timeout(worker, delay)\n<|end_body_2|>\n\n<|body_start_3|>\n window = self.view.window()\n if window:\n view_id = self.view.id()\n for group in range(window.num_groups()):\n active_view = window.active_view_in_group(group)\n if active_view and active_view.id() == view_id:\n return True\n return False\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000013", "length_bytes": 10270, "license_type": "permissive", "methods": [{"docstring": "Initialize ViewEventListener object. Arguments: view (View): The view the object is created for.", "name": "__init__", "signature": "def __init__(self, view)"}, {"docstring": "Push the event to the queue and start idle timer. Add the event identifier to 'events' and update the 'latest_time'. This marks an event to be received rather than counting the number of received events. The idle timer is started only, if no other one is already in flight. Arguments: event_id (int): One of the event identifiers.", "name": "push", "signature": "def push(self, event_id)"}, {"docstring": "Run GitGutterCommand after some idle time. Check if no more events were received during idle time and run GitGutterCommand if not. Restart timer to check later, otherwise. Timer is stopped without calling GitGutterCommand, if a view is not visible to save some resources. Evaluation will be triggered by activating the view next time. Arguments: delay (int): The delay in milliseconds to wait until probably forward the events, if no other event was received in the meanwhile.", "name": "start_timer", "signature": "def start_timer(self, delay)"}, {"docstring": "Determine if the view is visible. Only an active view of a group is visible. Returns: bool: True if the view is visible in any window.", "name": "is_view_visible", "signature": "def is_view_visible(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_042516", "prompt": "Implement the Python class `ViewEventListener` described below.\n\nClass description:\nThe class queues and forwards view events to GitGutterCommand. A ViewEventListener object queues all events received from a view and starts a single sublime timer to forward the event to GitGutterCommand after some idle time. This ensures not to bloat sublime API due to dozens of timers running for debouncing events.\n\nMethod signatures and docstrings:\n- def __init__(self, view): Initialize ViewEventListener object. Arguments: view (View): The view the object is created for.\n- def push(self, event_id): Push the event to the queue and start idle timer. Add the event identifier to 'events' and update the 'latest_time'. This marks an event to be received rather than counting the number of received events. The idle timer is started only, if no other one is already in flight. Arguments: event_id (int): One of the event identifiers.\n- def start_timer(self, delay): Run GitGutterCommand after some idle time. Check if no more events were received during idle time and run GitGutterCommand if not. Restart timer to check later, otherwise. Timer is stopped without calling GitGutterCommand, if a view is not visible to save some resources. Evaluation will be triggered by activating the view next time. Arguments: delay (int): The delay in milliseconds to wait until probably forward the events, if no other event was received in the meanwhile.\n- def is_view_visible(self): Determine if the view is visible. Only an active view of a group is visible. Returns: bool: True if the view is visible in any window.", "prompted_full_text": "Implement the Python class `ViewEventListener` described below.\n\nClass description:\nThe class queues and forwards view events to GitGutterCommand. A ViewEventListener object queues all events received from a view and starts a single sublime timer to forward the event to GitGutterCommand after some idle time. This ensures not to bloat sublime API due to dozens of timers running for debouncing events.\n\nMethod signatures and docstrings:\n- def __init__(self, view): Initialize ViewEventListener object. Arguments: view (View): The view the object is created for.\n- def push(self, event_id): Push the event to the queue and start idle timer. Add the event identifier to 'events' and update the 'latest_time'. This marks an event to be received rather than counting the number of received events. The idle timer is started only, if no other one is already in flight. Arguments: event_id (int): One of the event identifiers.\n- def start_timer(self, delay): Run GitGutterCommand after some idle time. Check if no more events were received during idle time and run GitGutterCommand if not. Restart timer to check later, otherwise. Timer is stopped without calling GitGutterCommand, if a view is not visible to save some resources. Evaluation will be triggered by activating the view next time. Arguments: delay (int): The delay in milliseconds to wait until probably forward the events, if no other event was received in the meanwhile.\n- def is_view_visible(self): Determine if the view is visible. Only an active view of a group is visible. Returns: bool: True if the view is visible in any window.\n\n<|skeleton|>\nclass ViewEventListener:\n \"\"\"The class queues and forwards view events to GitGutterCommand. A ViewEventListener object queues all events received from a view and starts a single sublime timer to forward the event to GitGutterCommand after some idle time. This ensures not to bloat sublime API due to dozens of timers running for debouncing events.\"\"\"\n\n def __init__(self, view):\n \"\"\"Initialize ViewEventListener object. Arguments: view (View): The view the object is created for.\"\"\"\n <|body_0|>\n\n def push(self, event_id):\n \"\"\"Push the event to the queue and start idle timer. Add the event identifier to 'events' and update the 'latest_time'. This marks an event to be received rather than counting the number of received events. The idle timer is started only, if no other one is already in flight. Arguments: event_id (int): One of the event identifiers.\"\"\"\n <|body_1|>\n\n def start_timer(self, delay):\n \"\"\"Run GitGutterCommand after some idle time. Check if no more events were received during idle time and run GitGutterCommand if not. Restart timer to check later, otherwise. Timer is stopped without calling GitGutterCommand, if a view is not visible to save some resources. Evaluation will be triggered by activating the view next time. Arguments: delay (int): The delay in milliseconds to wait until probably forward the events, if no other event was received in the meanwhile.\"\"\"\n <|body_2|>\n\n def is_view_visible(self):\n \"\"\"Determine if the view is visible. Only an active view of a group is visible. Returns: bool: True if the view is visible in any window.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.view = view\n self.settings = settings.ViewSettings(view)\n self.busy = False\n self.events = 0\n self.latest_time = 0.0\n self.delay = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if event_id & ACTIVATED:\n if not (self.settings.get('live_mode') or self.settings.get('focus_change_mode')):\n return\n elif event_id & MODIFIED:\n if not self.settings.get('live_mode'):\n return\n self.latest_time = time.time()\n self.events |= event_id\n if not self.busy:\n self.delay = max(200, self.settings.get('debounce_delay', 1000))\n self.start_timer(200)\n<|end_body_1|>\n\n<|body_start_2|>\n start_time = self.latest_time\n\n def worker():\n \"\"\"The function called after some idle time.\"\"\"\n if start_time < self.latest_time:\n self.start_timer(self.delay)\n return\n self.busy = False\n if not self.is_view_visible():\n return\n self.view.run_command('git_gutter', {'events': self.events})\n self.events = 0\n self.busy = True\n sublime.set_timeout(worker, delay)\n<|end_body_2|>\n\n<|body_start_3|>\n window = self.view.window()\n if window:\n view_id = self.view.id()\n for group in range(window.num_groups()):\n active_view = window.active_view_in_group(group)\n if active_view and active_view.id() == view_id:\n return True\n return False\n<|end_body_3|>\n", "revision_id": "5246bcebbf7545e1d9ec68b7f2769252bc42afea", "skeleton": "<|skeleton|>\nclass ViewEventListener:\n \"\"\"The class queues and forwards view events to GitGutterCommand. A ViewEventListener object queues all events received from a view and starts a single sublime timer to forward the event to GitGutterCommand after some idle time. This ensures not to bloat sublime API due to dozens of timers running for debouncing events.\"\"\"\n\n def __init__(self, view):\n \"\"\"Initialize ViewEventListener object. Arguments: view (View): The view the object is created for.\"\"\"\n <|body_0|>\n\n def push(self, event_id):\n \"\"\"Push the event to the queue and start idle timer. Add the event identifier to 'events' and update the 'latest_time'. This marks an event to be received rather than counting the number of received events. The idle timer is started only, if no other one is already in flight. Arguments: event_id (int): One of the event identifiers.\"\"\"\n <|body_1|>\n\n def start_timer(self, delay):\n \"\"\"Run GitGutterCommand after some idle time. Check if no more events were received during idle time and run GitGutterCommand if not. Restart timer to check later, otherwise. Timer is stopped without calling GitGutterCommand, if a view is not visible to save some resources. Evaluation will be triggered by activating the view next time. Arguments: delay (int): The delay in milliseconds to wait until probably forward the events, if no other event was received in the meanwhile.\"\"\"\n <|body_2|>\n\n def is_view_visible(self):\n \"\"\"Determine if the view is visible. Only an active view of a group is visible. Returns: bool: True if the view is visible in any window.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ViewEventListener:\n \"\"\"The class queues and forwards view events to GitGutterCommand. A ViewEventListener object queues all events received from a view and starts a single sublime timer to forward the event to GitGutterCommand after some idle time. This ensures not to bloat sublime API due to dozens of timers running for debouncing events.\"\"\"\n\n def __init__(self, view):\n \"\"\"Initialize ViewEventListener object. Arguments: view (View): The view the object is created for.\"\"\"\n self.view = view\n self.settings = settings.ViewSettings(view)\n self.busy = False\n self.events = 0\n self.latest_time = 0.0\n self.delay = 0\n\n def push(self, event_id):\n \"\"\"Push the event to the queue and start idle timer. Add the event identifier to 'events' and update the 'latest_time'. This marks an event to be received rather than counting the number of received events. The idle timer is started only, if no other one is already in flight. Arguments: event_id (int): One of the event identifiers.\"\"\"\n if event_id & ACTIVATED:\n if not (self.settings.get('live_mode') or self.settings.get('focus_change_mode')):\n return\n elif event_id & MODIFIED:\n if not self.settings.get('live_mode'):\n return\n self.latest_time = time.time()\n self.events |= event_id\n if not self.busy:\n self.delay = max(200, self.settings.get('debounce_delay', 1000))\n self.start_timer(200)\n\n def start_timer(self, delay):\n \"\"\"Run GitGutterCommand after some idle time. Check if no more events were received during idle time and run GitGutterCommand if not. Restart timer to check later, otherwise. Timer is stopped without calling GitGutterCommand, if a view is not visible to save some resources. Evaluation will be triggered by activating the view next time. Arguments: delay (int): The delay in milliseconds to wait until probably forward the events, if no other event was received in the meanwhile.\"\"\"\n start_time = self.latest_time\n\n def worker():\n \"\"\"The function called after some idle time.\"\"\"\n if start_time < self.latest_time:\n self.start_timer(self.delay)\n return\n self.busy = False\n if not self.is_view_visible():\n return\n self.view.run_command('git_gutter', {'events': self.events})\n self.events = 0\n self.busy = True\n sublime.set_timeout(worker, delay)\n\n def is_view_visible(self):\n \"\"\"Determine if the view is visible. Only an active view of a group is visible. Returns: bool: True if the view is visible in any window.\"\"\"\n window = self.view.window()\n if window:\n view_id = self.view.id()\n for group in range(window.num_groups()):\n active_view = window.active_view_in_group(group)\n if active_view and active_view.id() == view_id:\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "modules/events.py", "source_repo": "jisaacks/GitGutter", "split": "val", "star_events_count": 1903}
{"blob_id": "c1ed945590b8e6007dc6137ecad43fdbab680693", "bodies": ["super(EncodingDetectFilter, self).__init__(builder)\nself._normalize = self.builder.decoder.normalize\nself._meta = self._normalize('meta')", "normalize = self._normalize\niname = normalize(name)\nif iname == self._meta:\n adict = dict([(normalize(key), val) for key, val in attr])\n value = str(adict.get(normalize('charset')) or '')\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n self.builder.handle_encoding(value)\n else:\n value = (adict.get(normalize('http-equiv')) or '').lower()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value == 'content-type':\n ctype = adict.get(normalize('content'))\n if ctype:\n if ctype.startswith('\"') or ctype.startswith(\"'\"):\n ctype = ctype[1:-1].strip()\n parsed = _parse_content_type(ctype)\n if parsed is not None:\n encoding = parsed[1].get('charset')\n if encoding:\n self.builder.handle_encoding(encoding[0].strip())\nself.builder.handle_starttag(name, attr, closed, data)", "match = self._PI_MATCH(str(data))\nif match:\n encoding = 'utf-8'\n for match in self._PI_ATT_ITER(match.group('attr')):\n key, value = match.group('name', 'value')\n if key or value:\n if key == 'encoding':\n value = value.strip()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n encoding = value\n break\n else:\n break\n self.builder.handle_encoding(encoding)\nself.builder.handle_pi(data)"], "bodies_text": "<|body_start_0|>\n super(EncodingDetectFilter, self).__init__(builder)\n self._normalize = self.builder.decoder.normalize\n self._meta = self._normalize('meta')\n<|end_body_0|>\n\n<|body_start_1|>\n normalize = self._normalize\n iname = normalize(name)\n if iname == self._meta:\n adict = dict([(normalize(key), val) for key, val in attr])\n value = str(adict.get(normalize('charset')) or '')\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n self.builder.handle_encoding(value)\n else:\n value = (adict.get(normalize('http-equiv')) or '').lower()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value == 'content-type':\n ctype = adict.get(normalize('content'))\n if ctype:\n if ctype.startswith('\"') or ctype.startswith(\"'\"):\n ctype = ctype[1:-1].strip()\n parsed = _parse_content_type(ctype)\n if parsed is not None:\n encoding = parsed[1].get('charset')\n if encoding:\n self.builder.handle_encoding(encoding[0].strip())\n self.builder.handle_starttag(name, attr, closed, data)\n<|end_body_1|>\n\n<|body_start_2|>\n match = self._PI_MATCH(str(data))\n if match:\n encoding = 'utf-8'\n for match in self._PI_ATT_ITER(match.group('attr')):\n key, value = match.group('name', 'value')\n if key or value:\n if key == 'encoding':\n value = value.strip()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n encoding = value\n break\n else:\n break\n self.builder.handle_encoding(encoding)\n self.builder.handle_pi(data)\n<|end_body_2|>\n", "class_docstring": "Extract template encoding and pass it properly to the builder", "class_name": "EncodingDetectFilter", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EncodingDetectFilter:\n \"\"\"Extract template encoding and pass it properly to the builder\"\"\"\n\n def __init__(self, builder):\n \"\"\"Initialization\"\"\"\n <|body_0|>\n\n def handle_starttag(self, name, attr, closed, data):\n \"\"\"Extract encoding from HTML meta element Here are samples for the expected formats:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\"\"\"\n <|body_1|>\n\n def handle_pi(self, data):\n \"\"\"Extract encoding from xml declaration Here's a sample for the expected format:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EncodingDetectFilter, self).__init__(builder)\n self._normalize = self.builder.decoder.normalize\n self._meta = self._normalize('meta')\n<|end_body_0|>\n\n<|body_start_1|>\n normalize = self._normalize\n iname = normalize(name)\n if iname == self._meta:\n adict = dict([(normalize(key), val) for key, val in attr])\n value = str(adict.get(normalize('charset')) or '')\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n self.builder.handle_encoding(value)\n else:\n value = (adict.get(normalize('http-equiv')) or '').lower()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value == 'content-type':\n ctype = adict.get(normalize('content'))\n if ctype:\n if ctype.startswith('\"') or ctype.startswith(\"'\"):\n ctype = ctype[1:-1].strip()\n parsed = _parse_content_type(ctype)\n if parsed is not None:\n encoding = parsed[1].get('charset')\n if encoding:\n self.builder.handle_encoding(encoding[0].strip())\n self.builder.handle_starttag(name, attr, closed, data)\n<|end_body_1|>\n\n<|body_start_2|>\n match = self._PI_MATCH(str(data))\n if match:\n encoding = 'utf-8'\n for match in self._PI_ATT_ITER(match.group('attr')):\n key, value = match.group('name', 'value')\n if key or value:\n if key == 'encoding':\n value = value.strip()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n encoding = value\n break\n else:\n break\n self.builder.handle_encoding(encoding)\n self.builder.handle_pi(data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000014", "length_bytes": 6907, "license_type": "permissive", "methods": [{"docstring": "Initialization", "name": "__init__", "signature": "def __init__(self, builder)"}, {"docstring": "Extract encoding from HTML meta element Here are samples for the expected formats:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`", "name": "handle_starttag", "signature": "def handle_starttag(self, name, attr, closed, data)"}, {"docstring": "Extract encoding from xml declaration Here's a sample for the expected format:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`", "name": "handle_pi", "signature": "def handle_pi(self, data)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006028", "prompt": "Implement the Python class `EncodingDetectFilter` described below.\n\nClass description:\nExtract template encoding and pass it properly to the builder\n\nMethod signatures and docstrings:\n- def __init__(self, builder): Initialization\n- def handle_starttag(self, name, attr, closed, data): Extract encoding from HTML meta element Here are samples for the expected formats:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\n- def handle_pi(self, data): Extract encoding from xml declaration Here's a sample for the expected format:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`", "prompted_full_text": "Implement the Python class `EncodingDetectFilter` described below.\n\nClass description:\nExtract template encoding and pass it properly to the builder\n\nMethod signatures and docstrings:\n- def __init__(self, builder): Initialization\n- def handle_starttag(self, name, attr, closed, data): Extract encoding from HTML meta element Here are samples for the expected formats:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\n- def handle_pi(self, data): Extract encoding from xml declaration Here's a sample for the expected format:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\n\n<|skeleton|>\nclass EncodingDetectFilter:\n \"\"\"Extract template encoding and pass it properly to the builder\"\"\"\n\n def __init__(self, builder):\n \"\"\"Initialization\"\"\"\n <|body_0|>\n\n def handle_starttag(self, name, attr, closed, data):\n \"\"\"Extract encoding from HTML meta element Here are samples for the expected formats:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\"\"\"\n <|body_1|>\n\n def handle_pi(self, data):\n \"\"\"Extract encoding from xml declaration Here's a sample for the expected format:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EncodingDetectFilter, self).__init__(builder)\n self._normalize = self.builder.decoder.normalize\n self._meta = self._normalize('meta')\n<|end_body_0|>\n\n<|body_start_1|>\n normalize = self._normalize\n iname = normalize(name)\n if iname == self._meta:\n adict = dict([(normalize(key), val) for key, val in attr])\n value = str(adict.get(normalize('charset')) or '')\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n self.builder.handle_encoding(value)\n else:\n value = (adict.get(normalize('http-equiv')) or '').lower()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value == 'content-type':\n ctype = adict.get(normalize('content'))\n if ctype:\n if ctype.startswith('\"') or ctype.startswith(\"'\"):\n ctype = ctype[1:-1].strip()\n parsed = _parse_content_type(ctype)\n if parsed is not None:\n encoding = parsed[1].get('charset')\n if encoding:\n self.builder.handle_encoding(encoding[0].strip())\n self.builder.handle_starttag(name, attr, closed, data)\n<|end_body_1|>\n\n<|body_start_2|>\n match = self._PI_MATCH(str(data))\n if match:\n encoding = 'utf-8'\n for match in self._PI_ATT_ITER(match.group('attr')):\n key, value = match.group('name', 'value')\n if key or value:\n if key == 'encoding':\n value = value.strip()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n encoding = value\n break\n else:\n break\n self.builder.handle_encoding(encoding)\n self.builder.handle_pi(data)\n<|end_body_2|>\n", "revision_id": "65a93080281f9ce5c0379e9dbb111f14965a8613", "skeleton": "<|skeleton|>\nclass EncodingDetectFilter:\n \"\"\"Extract template encoding and pass it properly to the builder\"\"\"\n\n def __init__(self, builder):\n \"\"\"Initialization\"\"\"\n <|body_0|>\n\n def handle_starttag(self, name, attr, closed, data):\n \"\"\"Extract encoding from HTML meta element Here are samples for the expected formats:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\"\"\"\n <|body_1|>\n\n def handle_pi(self, data):\n \"\"\"Extract encoding from xml declaration Here's a sample for the expected format:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EncodingDetectFilter:\n \"\"\"Extract template encoding and pass it properly to the builder\"\"\"\n\n def __init__(self, builder):\n \"\"\"Initialization\"\"\"\n super(EncodingDetectFilter, self).__init__(builder)\n self._normalize = self.builder.decoder.normalize\n self._meta = self._normalize('meta')\n\n def handle_starttag(self, name, attr, closed, data):\n \"\"\"Extract encoding from HTML meta element Here are samples for the expected formats:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\"\"\"\n normalize = self._normalize\n iname = normalize(name)\n if iname == self._meta:\n adict = dict([(normalize(key), val) for key, val in attr])\n value = str(adict.get(normalize('charset')) or '')\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n self.builder.handle_encoding(value)\n else:\n value = (adict.get(normalize('http-equiv')) or '').lower()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value == 'content-type':\n ctype = adict.get(normalize('content'))\n if ctype:\n if ctype.startswith('\"') or ctype.startswith(\"'\"):\n ctype = ctype[1:-1].strip()\n parsed = _parse_content_type(ctype)\n if parsed is not None:\n encoding = parsed[1].get('charset')\n if encoding:\n self.builder.handle_encoding(encoding[0].strip())\n self.builder.handle_starttag(name, attr, closed, data)\n\n def handle_pi(self, data):\n \"\"\"Extract encoding from xml declaration Here's a sample for the expected format:: The event is passed to the builder nevertheless. :See: `BuildingListenerInterface`\"\"\"\n match = self._PI_MATCH(str(data))\n if match:\n encoding = 'utf-8'\n for match in self._PI_ATT_ITER(match.group('attr')):\n key, value = match.group('name', 'value')\n if key or value:\n if key == 'encoding':\n value = value.strip()\n if value.startswith('\"') or value.startswith(\"'\"):\n value = value[1:-1].strip()\n if value:\n encoding = value\n break\n else:\n break\n self.builder.handle_encoding(encoding)\n self.builder.handle_pi(data)\n", "source": "the_stack_v2_python_sparse", "source_path": "tdi/markup/soup/filters.py", "source_repo": "ndparker/tdi", "split": "val", "star_events_count": 4}
{"blob_id": "9408a845744a6aee42996211d96a781aa228a71d", "bodies": ["self.args = arg_cls.parse()\nself.register_func = register_func\nself.mode = TaskMode(mode)\nfrom torch import distributed\nself.is_distributed = False\nif distributed.is_available():\n if len(self.args.use_gpus) > 1:\n if not self.args.use_data_parallel:\n self.is_distributed = True\ntask_dict = load_task_option_yaml(self.args.task_option_file)\ntask_dict = self.modify_task_dict(task_dict)\nself.register_func()\nfrom ..settings.spaces import Spaces\nself.task_option = Spaces.build(Spaces.NAME.TASK_OPTION, task_dict['ref'], task_dict)\nprint(self.task_option)", "task_dict['mode'] = self.mode\ntask_dict['resume'] = self.args.resume\nif self.args.pth_path:\n task_dict['model']['pth_path'] = self.args.pth_path\nif self.args.dataset_path:\n task_dict['dataset_path'] = self.args.dataset_path\nif self.args.output_path:\n task_dict['output_path'] = self.args.output_path\ntask_dict['is_distributed'] = self.is_distributed\nif self.args.test_option_file:\n if not os.path.isfile(self.args.test_option_file):\n raise FileNotFoundError(f'{self.args.test_option_file} does not exist or is not a file')\n task_dict['test_option'] = load_yaml(self.args.test_option_file)\ntask_dict['profiling'] = self.args.profiling\ntask_dict['profile_tool'] = self.args.profile_tool\ntask_dict['profile_memory'] = self.args.profile_memory\nreturn task_dict", "if run_task_func is None:\n run_task_func = run_task\nif self.is_distributed:\n import torch.multiprocessing as pt_mp\n from .ddp import launch_ddp_task\n n_gpus = len(self.args.use_gpus)\n pt_mp.spawn(fn=launch_ddp_task, nprocs=n_gpus, join=True, args=(n_gpus, run_task_func, self.args, self.task_option, self.register_func, *run_task_func_args))\nelse:\n run_task_func(self.args.use_gpus, self.args, self.task_option, self.register_func)"], "bodies_text": "<|body_start_0|>\n self.args = arg_cls.parse()\n self.register_func = register_func\n self.mode = TaskMode(mode)\n from torch import distributed\n self.is_distributed = False\n if distributed.is_available():\n if len(self.args.use_gpus) > 1:\n if not self.args.use_data_parallel:\n self.is_distributed = True\n task_dict = load_task_option_yaml(self.args.task_option_file)\n task_dict = self.modify_task_dict(task_dict)\n self.register_func()\n from ..settings.spaces import Spaces\n self.task_option = Spaces.build(Spaces.NAME.TASK_OPTION, task_dict['ref'], task_dict)\n print(self.task_option)\n<|end_body_0|>\n\n<|body_start_1|>\n task_dict['mode'] = self.mode\n task_dict['resume'] = self.args.resume\n if self.args.pth_path:\n task_dict['model']['pth_path'] = self.args.pth_path\n if self.args.dataset_path:\n task_dict['dataset_path'] = self.args.dataset_path\n if self.args.output_path:\n task_dict['output_path'] = self.args.output_path\n task_dict['is_distributed'] = self.is_distributed\n if self.args.test_option_file:\n if not os.path.isfile(self.args.test_option_file):\n raise FileNotFoundError(f'{self.args.test_option_file} does not exist or is not a file')\n task_dict['test_option'] = load_yaml(self.args.test_option_file)\n task_dict['profiling'] = self.args.profiling\n task_dict['profile_tool'] = self.args.profile_tool\n task_dict['profile_memory'] = self.args.profile_memory\n return task_dict\n<|end_body_1|>\n\n<|body_start_2|>\n if run_task_func is None:\n run_task_func = run_task\n if self.is_distributed:\n import torch.multiprocessing as pt_mp\n from .ddp import launch_ddp_task\n n_gpus = len(self.args.use_gpus)\n pt_mp.spawn(fn=launch_ddp_task, nprocs=n_gpus, join=True, args=(n_gpus, run_task_func, self.args, self.task_option, self.register_func, *run_task_func_args))\n else:\n run_task_func(self.args.use_gpus, self.args, self.task_option, self.register_func)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Launcher", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Launcher:\n\n def __init__(self, arg_cls: Type[MainArg], register_func: Callable, mode):\n \"\"\"Base class of launchers for building and running a task properly :param arg_cls: :param register_func: Callable to setup `settings.space.Spaces`, used before building the task :param mode: TaskMode\"\"\"\n <|body_0|>\n\n def modify_task_dict(self, task_dict: dict) -> dict:\n \"\"\"modify the task option dict before building the task option :param task_dict: Dict used to build the task option :return: modified task_dict\"\"\"\n <|body_1|>\n\n def run(self, run_task_func: Callable=None, *run_task_func_args):\n \"\"\"run the task by `run_task_func` in a proper method: single-gpu, multi-gpu DataParallel or multi-gpu DistributedDataParallel :param run_task_func: Callable to run the task :param run_task_func_args: arguments for `run_task_func`: (gpus: Sequence[int], main_args, task_option: TaskOption, register_func: Callable)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.args = arg_cls.parse()\n self.register_func = register_func\n self.mode = TaskMode(mode)\n from torch import distributed\n self.is_distributed = False\n if distributed.is_available():\n if len(self.args.use_gpus) > 1:\n if not self.args.use_data_parallel:\n self.is_distributed = True\n task_dict = load_task_option_yaml(self.args.task_option_file)\n task_dict = self.modify_task_dict(task_dict)\n self.register_func()\n from ..settings.spaces import Spaces\n self.task_option = Spaces.build(Spaces.NAME.TASK_OPTION, task_dict['ref'], task_dict)\n print(self.task_option)\n<|end_body_0|>\n\n<|body_start_1|>\n task_dict['mode'] = self.mode\n task_dict['resume'] = self.args.resume\n if self.args.pth_path:\n task_dict['model']['pth_path'] = self.args.pth_path\n if self.args.dataset_path:\n task_dict['dataset_path'] = self.args.dataset_path\n if self.args.output_path:\n task_dict['output_path'] = self.args.output_path\n task_dict['is_distributed'] = self.is_distributed\n if self.args.test_option_file:\n if not os.path.isfile(self.args.test_option_file):\n raise FileNotFoundError(f'{self.args.test_option_file} does not exist or is not a file')\n task_dict['test_option'] = load_yaml(self.args.test_option_file)\n task_dict['profiling'] = self.args.profiling\n task_dict['profile_tool'] = self.args.profile_tool\n task_dict['profile_memory'] = self.args.profile_memory\n return task_dict\n<|end_body_1|>\n\n<|body_start_2|>\n if run_task_func is None:\n run_task_func = run_task\n if self.is_distributed:\n import torch.multiprocessing as pt_mp\n from .ddp import launch_ddp_task\n n_gpus = len(self.args.use_gpus)\n pt_mp.spawn(fn=launch_ddp_task, nprocs=n_gpus, join=True, args=(n_gpus, run_task_func, self.args, self.task_option, self.register_func, *run_task_func_args))\n else:\n run_task_func(self.args.use_gpus, self.args, self.task_option, self.register_func)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000015", "length_bytes": 6217, "license_type": "no_license", "methods": [{"docstring": "Base class of launchers for building and running a task properly :param arg_cls: :param register_func: Callable to setup `settings.space.Spaces`, used before building the task :param mode: TaskMode", "name": "__init__", "signature": "def __init__(self, arg_cls: Type[MainArg], register_func: Callable, mode)"}, {"docstring": "modify the task option dict before building the task option :param task_dict: Dict used to build the task option :return: modified task_dict", "name": "modify_task_dict", "signature": "def modify_task_dict(self, task_dict: dict) -> dict"}, {"docstring": "run the task by `run_task_func` in a proper method: single-gpu, multi-gpu DataParallel or multi-gpu DistributedDataParallel :param run_task_func: Callable to run the task :param run_task_func_args: arguments for `run_task_func`: (gpus: Sequence[int], main_args, task_option: TaskOption, register_func: Callable)", "name": "run", "signature": "def run(self, run_task_func: Callable=None, *run_task_func_args)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003794", "prompt": "Implement the Python class `Launcher` described below.\n\nClass description:\nImplement the Launcher class.\n\nMethod signatures and docstrings:\n- def __init__(self, arg_cls: Type[MainArg], register_func: Callable, mode): Base class of launchers for building and running a task properly :param arg_cls: :param register_func: Callable to setup `settings.space.Spaces`, used before building the task :param mode: TaskMode\n- def modify_task_dict(self, task_dict: dict) -> dict: modify the task option dict before building the task option :param task_dict: Dict used to build the task option :return: modified task_dict\n- def run(self, run_task_func: Callable=None, *run_task_func_args): run the task by `run_task_func` in a proper method: single-gpu, multi-gpu DataParallel or multi-gpu DistributedDataParallel :param run_task_func: Callable to run the task :param run_task_func_args: arguments for `run_task_func`: (gpus: Sequence[int], main_args, task_option: TaskOption, register_func: Callable)", "prompted_full_text": "Implement the Python class `Launcher` described below.\n\nClass description:\nImplement the Launcher class.\n\nMethod signatures and docstrings:\n- def __init__(self, arg_cls: Type[MainArg], register_func: Callable, mode): Base class of launchers for building and running a task properly :param arg_cls: :param register_func: Callable to setup `settings.space.Spaces`, used before building the task :param mode: TaskMode\n- def modify_task_dict(self, task_dict: dict) -> dict: modify the task option dict before building the task option :param task_dict: Dict used to build the task option :return: modified task_dict\n- def run(self, run_task_func: Callable=None, *run_task_func_args): run the task by `run_task_func` in a proper method: single-gpu, multi-gpu DataParallel or multi-gpu DistributedDataParallel :param run_task_func: Callable to run the task :param run_task_func_args: arguments for `run_task_func`: (gpus: Sequence[int], main_args, task_option: TaskOption, register_func: Callable)\n\n<|skeleton|>\nclass Launcher:\n\n def __init__(self, arg_cls: Type[MainArg], register_func: Callable, mode):\n \"\"\"Base class of launchers for building and running a task properly :param arg_cls: :param register_func: Callable to setup `settings.space.Spaces`, used before building the task :param mode: TaskMode\"\"\"\n <|body_0|>\n\n def modify_task_dict(self, task_dict: dict) -> dict:\n \"\"\"modify the task option dict before building the task option :param task_dict: Dict used to build the task option :return: modified task_dict\"\"\"\n <|body_1|>\n\n def run(self, run_task_func: Callable=None, *run_task_func_args):\n \"\"\"run the task by `run_task_func` in a proper method: single-gpu, multi-gpu DataParallel or multi-gpu DistributedDataParallel :param run_task_func: Callable to run the task :param run_task_func_args: arguments for `run_task_func`: (gpus: Sequence[int], main_args, task_option: TaskOption, register_func: Callable)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.args = arg_cls.parse()\n self.register_func = register_func\n self.mode = TaskMode(mode)\n from torch import distributed\n self.is_distributed = False\n if distributed.is_available():\n if len(self.args.use_gpus) > 1:\n if not self.args.use_data_parallel:\n self.is_distributed = True\n task_dict = load_task_option_yaml(self.args.task_option_file)\n task_dict = self.modify_task_dict(task_dict)\n self.register_func()\n from ..settings.spaces import Spaces\n self.task_option = Spaces.build(Spaces.NAME.TASK_OPTION, task_dict['ref'], task_dict)\n print(self.task_option)\n<|end_body_0|>\n\n<|body_start_1|>\n task_dict['mode'] = self.mode\n task_dict['resume'] = self.args.resume\n if self.args.pth_path:\n task_dict['model']['pth_path'] = self.args.pth_path\n if self.args.dataset_path:\n task_dict['dataset_path'] = self.args.dataset_path\n if self.args.output_path:\n task_dict['output_path'] = self.args.output_path\n task_dict['is_distributed'] = self.is_distributed\n if self.args.test_option_file:\n if not os.path.isfile(self.args.test_option_file):\n raise FileNotFoundError(f'{self.args.test_option_file} does not exist or is not a file')\n task_dict['test_option'] = load_yaml(self.args.test_option_file)\n task_dict['profiling'] = self.args.profiling\n task_dict['profile_tool'] = self.args.profile_tool\n task_dict['profile_memory'] = self.args.profile_memory\n return task_dict\n<|end_body_1|>\n\n<|body_start_2|>\n if run_task_func is None:\n run_task_func = run_task\n if self.is_distributed:\n import torch.multiprocessing as pt_mp\n from .ddp import launch_ddp_task\n n_gpus = len(self.args.use_gpus)\n pt_mp.spawn(fn=launch_ddp_task, nprocs=n_gpus, join=True, args=(n_gpus, run_task_func, self.args, self.task_option, self.register_func, *run_task_func_args))\n else:\n run_task_func(self.args.use_gpus, self.args, self.task_option, self.register_func)\n<|end_body_2|>\n", "revision_id": "c9c2e32b484687ef5b110af3dd39f86ecfcb5337", "skeleton": "<|skeleton|>\nclass Launcher:\n\n def __init__(self, arg_cls: Type[MainArg], register_func: Callable, mode):\n \"\"\"Base class of launchers for building and running a task properly :param arg_cls: :param register_func: Callable to setup `settings.space.Spaces`, used before building the task :param mode: TaskMode\"\"\"\n <|body_0|>\n\n def modify_task_dict(self, task_dict: dict) -> dict:\n \"\"\"modify the task option dict before building the task option :param task_dict: Dict used to build the task option :return: modified task_dict\"\"\"\n <|body_1|>\n\n def run(self, run_task_func: Callable=None, *run_task_func_args):\n \"\"\"run the task by `run_task_func` in a proper method: single-gpu, multi-gpu DataParallel or multi-gpu DistributedDataParallel :param run_task_func: Callable to run the task :param run_task_func_args: arguments for `run_task_func`: (gpus: Sequence[int], main_args, task_option: TaskOption, register_func: Callable)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Launcher:\n def __init__(self, arg_cls: Type[MainArg], register_func: Callable, mode):\n \"\"\"Base class of launchers for building and running a task properly :param arg_cls: :param register_func: Callable to setup `settings.space.Spaces`, used before building the task :param mode: TaskMode\"\"\"\n self.args = arg_cls.parse()\n self.register_func = register_func\n self.mode = TaskMode(mode)\n from torch import distributed\n self.is_distributed = False\n if distributed.is_available():\n if len(self.args.use_gpus) > 1:\n if not self.args.use_data_parallel:\n self.is_distributed = True\n task_dict = load_task_option_yaml(self.args.task_option_file)\n task_dict = self.modify_task_dict(task_dict)\n self.register_func()\n from ..settings.spaces import Spaces\n self.task_option = Spaces.build(Spaces.NAME.TASK_OPTION, task_dict['ref'], task_dict)\n print(self.task_option)\n\n def modify_task_dict(self, task_dict: dict) -> dict:\n \"\"\"modify the task option dict before building the task option :param task_dict: Dict used to build the task option :return: modified task_dict\"\"\"\n task_dict['mode'] = self.mode\n task_dict['resume'] = self.args.resume\n if self.args.pth_path:\n task_dict['model']['pth_path'] = self.args.pth_path\n if self.args.dataset_path:\n task_dict['dataset_path'] = self.args.dataset_path\n if self.args.output_path:\n task_dict['output_path'] = self.args.output_path\n task_dict['is_distributed'] = self.is_distributed\n if self.args.test_option_file:\n if not os.path.isfile(self.args.test_option_file):\n raise FileNotFoundError(f'{self.args.test_option_file} does not exist or is not a file')\n task_dict['test_option'] = load_yaml(self.args.test_option_file)\n task_dict['profiling'] = self.args.profiling\n task_dict['profile_tool'] = self.args.profile_tool\n task_dict['profile_memory'] = self.args.profile_memory\n return task_dict\n\n def run(self, run_task_func: Callable=None, *run_task_func_args):\n \"\"\"run the task by `run_task_func` in a proper method: single-gpu, multi-gpu DataParallel or multi-gpu DistributedDataParallel :param run_task_func: Callable to run the task :param run_task_func_args: arguments for `run_task_func`: (gpus: Sequence[int], main_args, task_option: TaskOption, register_func: Callable)\"\"\"\n if run_task_func is None:\n run_task_func = run_task\n if self.is_distributed:\n import torch.multiprocessing as pt_mp\n from .ddp import launch_ddp_task\n n_gpus = len(self.args.use_gpus)\n pt_mp.spawn(fn=launch_ddp_task, nprocs=n_gpus, join=True, args=(n_gpus, run_task_func, self.args, self.task_option, self.register_func, *run_task_func_args))\n else:\n run_task_func(self.args.use_gpus, self.args, self.task_option, self.register_func)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pytorch_helper/launcher/launcher.py", "source_repo": "Aaronswei/BEVNet", "split": "val", "star_events_count": 0}
{"blob_id": "b468362ab150b41326c9f328203652541a2d3b9d", "bodies": ["while left >= 0 and right < len(s) and (s[left] == s[right]):\n left -= 1\n right += 1\nreturn right - left - 1", "if not s or len(s) < 1:\n return ''\nleft = right = 0\nfor index in range(len(s)):\n odd_len = self.expand_center(s, index, index)\n even_len = self.expand_center(s, index, index + 1)\n length = max(odd_len, even_len)\n if length > left + right:\n left = index - (length - 1) // 2\n right = index + length // 2\nreturn s[left:right + 1]"], "bodies_text": "<|body_start_0|>\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n left -= 1\n right += 1\n return right - left - 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not s or len(s) < 1:\n return ''\n left = right = 0\n for index in range(len(s)):\n odd_len = self.expand_center(s, index, index)\n even_len = self.expand_center(s, index, index + 1)\n length = max(odd_len, even_len)\n if length > left + right:\n left = index - (length - 1) // 2\n right = index + length // 2\n return s[left:right + 1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "String", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass String:\n\n def expand_center(self, s: str, left: int, right: int) -> int:\n \"\"\":param s: :param l: :param r: :return:\"\"\"\n <|body_0|>\n\n def longest_palindromic_substring(self, s: str) -> str:\n \"\"\"Approach: Expand Center Time Complexity: O(N^2) Space Complexity: O(1) :param s: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n left -= 1\n right += 1\n return right - left - 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not s or len(s) < 1:\n return ''\n left = right = 0\n for index in range(len(s)):\n odd_len = self.expand_center(s, index, index)\n even_len = self.expand_center(s, index, index + 1)\n length = max(odd_len, even_len)\n if length > left + right:\n left = index - (length - 1) // 2\n right = index + length // 2\n return s[left:right + 1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000016", "length_bytes": 1441, "license_type": "no_license", "methods": [{"docstring": ":param s: :param l: :param r: :return:", "name": "expand_center", "signature": "def expand_center(self, s: str, left: int, right: int) -> int"}, {"docstring": "Approach: Expand Center Time Complexity: O(N^2) Space Complexity: O(1) :param s: :return:", "name": "longest_palindromic_substring", "signature": "def longest_palindromic_substring(self, s: str) -> str"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039251", "prompt": "Implement the Python class `String` described below.\n\nClass description:\nImplement the String class.\n\nMethod signatures and docstrings:\n- def expand_center(self, s: str, left: int, right: int) -> int: :param s: :param l: :param r: :return:\n- def longest_palindromic_substring(self, s: str) -> str: Approach: Expand Center Time Complexity: O(N^2) Space Complexity: O(1) :param s: :return:", "prompted_full_text": "Implement the Python class `String` described below.\n\nClass description:\nImplement the String class.\n\nMethod signatures and docstrings:\n- def expand_center(self, s: str, left: int, right: int) -> int: :param s: :param l: :param r: :return:\n- def longest_palindromic_substring(self, s: str) -> str: Approach: Expand Center Time Complexity: O(N^2) Space Complexity: O(1) :param s: :return:\n\n<|skeleton|>\nclass String:\n\n def expand_center(self, s: str, left: int, right: int) -> int:\n \"\"\":param s: :param l: :param r: :return:\"\"\"\n <|body_0|>\n\n def longest_palindromic_substring(self, s: str) -> str:\n \"\"\"Approach: Expand Center Time Complexity: O(N^2) Space Complexity: O(1) :param s: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n left -= 1\n right += 1\n return right - left - 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not s or len(s) < 1:\n return ''\n left = right = 0\n for index in range(len(s)):\n odd_len = self.expand_center(s, index, index)\n even_len = self.expand_center(s, index, index + 1)\n length = max(odd_len, even_len)\n if length > left + right:\n left = index - (length - 1) // 2\n right = index + length // 2\n return s[left:right + 1]\n<|end_body_1|>\n", "revision_id": "65cc78b5afa0db064f9fe8f06597e3e120f7363d", "skeleton": "<|skeleton|>\nclass String:\n\n def expand_center(self, s: str, left: int, right: int) -> int:\n \"\"\":param s: :param l: :param r: :return:\"\"\"\n <|body_0|>\n\n def longest_palindromic_substring(self, s: str) -> str:\n \"\"\"Approach: Expand Center Time Complexity: O(N^2) Space Complexity: O(1) :param s: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class String:\n def expand_center(self, s: str, left: int, right: int) -> int:\n \"\"\":param s: :param l: :param r: :return:\"\"\"\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n left -= 1\n right += 1\n return right - left - 1\n\n def longest_palindromic_substring(self, s: str) -> str:\n \"\"\"Approach: Expand Center Time Complexity: O(N^2) Space Complexity: O(1) :param s: :return:\"\"\"\n if not s or len(s) < 1:\n return ''\n left = right = 0\n for index in range(len(s)):\n odd_len = self.expand_center(s, index, index)\n even_len = self.expand_center(s, index, index + 1)\n length = max(odd_len, even_len)\n if length > left + right:\n left = index - (length - 1) // 2\n right = index + length // 2\n return s[left:right + 1]\n", "source": "the_stack_v2_python_sparse", "source_path": "revisited__2021/math_and_string/longest_palindromic_substring.py", "source_repo": "Shiv2157k/leet_code", "split": "val", "star_events_count": 1}
{"blob_id": "fa8c85e32cb877e06d1df3183a229012768b1197", "bodies": ["self.CsvLocations = []\nself.ApiLocations = []\nself.columns = ['Address', 'Key', 'URL', 'Location']\nself.ApiURL = 'https://www.vaccinespotter.org/api/v0/stores/WA/pharmaca.json'\nself.csvFile = 'PharmacaLoc.csv'\nself.headers = {'if-modified-since': ''}\nself.ReadLocations()", "now = datetime.utcnow()\ntimeString = now.strftime('%a, %d %B %Y %H:%M:%S GMT')\nself.headers['if-modified-since'] = timeString\nr = requests.get(self.ApiURL, headers=self.headers)\nself.ApiLocations = r.json()\nwith open(self.csvFile) as csvfile:\n locReader = csv.reader(csvfile)\n for row in locReader:\n locationDic = {}\n if row[0] == self.columns[0]:\n continue\n for i, col in enumerate(self.columns):\n locationDic[col] = row[i]\n self.CsvLocations.append(locationDic)\nfor ApiLoc in self.ApiLocations:\n match = False\n for CsvLoc in self.CsvLocations:\n if ApiLoc['address'] == CsvLoc['Address']:\n CsvLoc['available'] = ApiLoc['appointments_available']\n match = True\n if not match:\n logging.info('Pharmaca location {} has no match in CSV'.format(ApiLoc['address']))\nreturn", "cols = self.columns\nfor loc in self.CsvLocations:\n scraper = Pharmaca(loc)\n keys, case, text = scraper.MakeGetRequest()\n logging.debug(f'keys={keys} case={case}')\nreturn"], "bodies_text": "<|body_start_0|>\n self.CsvLocations = []\n self.ApiLocations = []\n self.columns = ['Address', 'Key', 'URL', 'Location']\n self.ApiURL = 'https://www.vaccinespotter.org/api/v0/stores/WA/pharmaca.json'\n self.csvFile = 'PharmacaLoc.csv'\n self.headers = {'if-modified-since': ''}\n self.ReadLocations()\n<|end_body_0|>\n\n<|body_start_1|>\n now = datetime.utcnow()\n timeString = now.strftime('%a, %d %B %Y %H:%M:%S GMT')\n self.headers['if-modified-since'] = timeString\n r = requests.get(self.ApiURL, headers=self.headers)\n self.ApiLocations = r.json()\n with open(self.csvFile) as csvfile:\n locReader = csv.reader(csvfile)\n for row in locReader:\n locationDic = {}\n if row[0] == self.columns[0]:\n continue\n for i, col in enumerate(self.columns):\n locationDic[col] = row[i]\n self.CsvLocations.append(locationDic)\n for ApiLoc in self.ApiLocations:\n match = False\n for CsvLoc in self.CsvLocations:\n if ApiLoc['address'] == CsvLoc['Address']:\n CsvLoc['available'] = ApiLoc['appointments_available']\n match = True\n if not match:\n logging.info('Pharmaca location {} has no match in CSV'.format(ApiLoc['address']))\n return\n<|end_body_1|>\n\n<|body_start_2|>\n cols = self.columns\n for loc in self.CsvLocations:\n scraper = Pharmaca(loc)\n keys, case, text = scraper.MakeGetRequest()\n logging.debug(f'keys={keys} case={case}')\n return\n<|end_body_2|>\n", "class_docstring": "wrapper function for Pharmaca locations. Instantiates an instance of the Pharmaca class for each location", "class_name": "PharmacaWrapper", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PharmacaWrapper:\n \"\"\"wrapper function for Pharmaca locations. Instantiates an instance of the Pharmaca class for each location\"\"\"\n\n def __init__(self):\n \"\"\"initialize the Pharmaca class and call a method to load the csv with different location data\"\"\"\n <|body_0|>\n\n def ReadLocations(self):\n \"\"\"open the csv storing location data and read into self.CsvLocations. Also call the API to read availability information from vaccine-finder site. Combine these two dictionaries to get full info on each site\"\"\"\n <|body_1|>\n\n def MakeGetRequest(self):\n \"\"\"create a Pharmaca scraper class for each location and call method to scrape location and update air table\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.CsvLocations = []\n self.ApiLocations = []\n self.columns = ['Address', 'Key', 'URL', 'Location']\n self.ApiURL = 'https://www.vaccinespotter.org/api/v0/stores/WA/pharmaca.json'\n self.csvFile = 'PharmacaLoc.csv'\n self.headers = {'if-modified-since': ''}\n self.ReadLocations()\n<|end_body_0|>\n\n<|body_start_1|>\n now = datetime.utcnow()\n timeString = now.strftime('%a, %d %B %Y %H:%M:%S GMT')\n self.headers['if-modified-since'] = timeString\n r = requests.get(self.ApiURL, headers=self.headers)\n self.ApiLocations = r.json()\n with open(self.csvFile) as csvfile:\n locReader = csv.reader(csvfile)\n for row in locReader:\n locationDic = {}\n if row[0] == self.columns[0]:\n continue\n for i, col in enumerate(self.columns):\n locationDic[col] = row[i]\n self.CsvLocations.append(locationDic)\n for ApiLoc in self.ApiLocations:\n match = False\n for CsvLoc in self.CsvLocations:\n if ApiLoc['address'] == CsvLoc['Address']:\n CsvLoc['available'] = ApiLoc['appointments_available']\n match = True\n if not match:\n logging.info('Pharmaca location {} has no match in CSV'.format(ApiLoc['address']))\n return\n<|end_body_1|>\n\n<|body_start_2|>\n cols = self.columns\n for loc in self.CsvLocations:\n scraper = Pharmaca(loc)\n keys, case, text = scraper.MakeGetRequest()\n logging.debug(f'keys={keys} case={case}')\n return\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000017", "length_bytes": 3430, "license_type": "permissive", "methods": [{"docstring": "initialize the Pharmaca class and call a method to load the csv with different location data", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "open the csv storing location data and read into self.CsvLocations. Also call the API to read availability information from vaccine-finder site. Combine these two dictionaries to get full info on each site", "name": "ReadLocations", "signature": "def ReadLocations(self)"}, {"docstring": "create a Pharmaca scraper class for each location and call method to scrape location and update air table", "name": "MakeGetRequest", "signature": "def MakeGetRequest(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_021805", "prompt": "Implement the Python class `PharmacaWrapper` described below.\n\nClass description:\nwrapper function for Pharmaca locations. Instantiates an instance of the Pharmaca class for each location\n\nMethod signatures and docstrings:\n- def __init__(self): initialize the Pharmaca class and call a method to load the csv with different location data\n- def ReadLocations(self): open the csv storing location data and read into self.CsvLocations. Also call the API to read availability information from vaccine-finder site. Combine these two dictionaries to get full info on each site\n- def MakeGetRequest(self): create a Pharmaca scraper class for each location and call method to scrape location and update air table", "prompted_full_text": "Implement the Python class `PharmacaWrapper` described below.\n\nClass description:\nwrapper function for Pharmaca locations. Instantiates an instance of the Pharmaca class for each location\n\nMethod signatures and docstrings:\n- def __init__(self): initialize the Pharmaca class and call a method to load the csv with different location data\n- def ReadLocations(self): open the csv storing location data and read into self.CsvLocations. Also call the API to read availability information from vaccine-finder site. Combine these two dictionaries to get full info on each site\n- def MakeGetRequest(self): create a Pharmaca scraper class for each location and call method to scrape location and update air table\n\n<|skeleton|>\nclass PharmacaWrapper:\n \"\"\"wrapper function for Pharmaca locations. Instantiates an instance of the Pharmaca class for each location\"\"\"\n\n def __init__(self):\n \"\"\"initialize the Pharmaca class and call a method to load the csv with different location data\"\"\"\n <|body_0|>\n\n def ReadLocations(self):\n \"\"\"open the csv storing location data and read into self.CsvLocations. Also call the API to read availability information from vaccine-finder site. Combine these two dictionaries to get full info on each site\"\"\"\n <|body_1|>\n\n def MakeGetRequest(self):\n \"\"\"create a Pharmaca scraper class for each location and call method to scrape location and update air table\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.CsvLocations = []\n self.ApiLocations = []\n self.columns = ['Address', 'Key', 'URL', 'Location']\n self.ApiURL = 'https://www.vaccinespotter.org/api/v0/stores/WA/pharmaca.json'\n self.csvFile = 'PharmacaLoc.csv'\n self.headers = {'if-modified-since': ''}\n self.ReadLocations()\n<|end_body_0|>\n\n<|body_start_1|>\n now = datetime.utcnow()\n timeString = now.strftime('%a, %d %B %Y %H:%M:%S GMT')\n self.headers['if-modified-since'] = timeString\n r = requests.get(self.ApiURL, headers=self.headers)\n self.ApiLocations = r.json()\n with open(self.csvFile) as csvfile:\n locReader = csv.reader(csvfile)\n for row in locReader:\n locationDic = {}\n if row[0] == self.columns[0]:\n continue\n for i, col in enumerate(self.columns):\n locationDic[col] = row[i]\n self.CsvLocations.append(locationDic)\n for ApiLoc in self.ApiLocations:\n match = False\n for CsvLoc in self.CsvLocations:\n if ApiLoc['address'] == CsvLoc['Address']:\n CsvLoc['available'] = ApiLoc['appointments_available']\n match = True\n if not match:\n logging.info('Pharmaca location {} has no match in CSV'.format(ApiLoc['address']))\n return\n<|end_body_1|>\n\n<|body_start_2|>\n cols = self.columns\n for loc in self.CsvLocations:\n scraper = Pharmaca(loc)\n keys, case, text = scraper.MakeGetRequest()\n logging.debug(f'keys={keys} case={case}')\n return\n<|end_body_2|>\n", "revision_id": "28248155c136f9b267f0ada7749d30848de0981f", "skeleton": "<|skeleton|>\nclass PharmacaWrapper:\n \"\"\"wrapper function for Pharmaca locations. Instantiates an instance of the Pharmaca class for each location\"\"\"\n\n def __init__(self):\n \"\"\"initialize the Pharmaca class and call a method to load the csv with different location data\"\"\"\n <|body_0|>\n\n def ReadLocations(self):\n \"\"\"open the csv storing location data and read into self.CsvLocations. Also call the API to read availability information from vaccine-finder site. Combine these two dictionaries to get full info on each site\"\"\"\n <|body_1|>\n\n def MakeGetRequest(self):\n \"\"\"create a Pharmaca scraper class for each location and call method to scrape location and update air table\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PharmacaWrapper:\n \"\"\"wrapper function for Pharmaca locations. Instantiates an instance of the Pharmaca class for each location\"\"\"\n\n def __init__(self):\n \"\"\"initialize the Pharmaca class and call a method to load the csv with different location data\"\"\"\n self.CsvLocations = []\n self.ApiLocations = []\n self.columns = ['Address', 'Key', 'URL', 'Location']\n self.ApiURL = 'https://www.vaccinespotter.org/api/v0/stores/WA/pharmaca.json'\n self.csvFile = 'PharmacaLoc.csv'\n self.headers = {'if-modified-since': ''}\n self.ReadLocations()\n\n def ReadLocations(self):\n \"\"\"open the csv storing location data and read into self.CsvLocations. Also call the API to read availability information from vaccine-finder site. Combine these two dictionaries to get full info on each site\"\"\"\n now = datetime.utcnow()\n timeString = now.strftime('%a, %d %B %Y %H:%M:%S GMT')\n self.headers['if-modified-since'] = timeString\n r = requests.get(self.ApiURL, headers=self.headers)\n self.ApiLocations = r.json()\n with open(self.csvFile) as csvfile:\n locReader = csv.reader(csvfile)\n for row in locReader:\n locationDic = {}\n if row[0] == self.columns[0]:\n continue\n for i, col in enumerate(self.columns):\n locationDic[col] = row[i]\n self.CsvLocations.append(locationDic)\n for ApiLoc in self.ApiLocations:\n match = False\n for CsvLoc in self.CsvLocations:\n if ApiLoc['address'] == CsvLoc['Address']:\n CsvLoc['available'] = ApiLoc['appointments_available']\n match = True\n if not match:\n logging.info('Pharmaca location {} has no match in CSV'.format(ApiLoc['address']))\n return\n\n def MakeGetRequest(self):\n \"\"\"create a Pharmaca scraper class for each location and call method to scrape location and update air table\"\"\"\n cols = self.columns\n for loc in self.CsvLocations:\n scraper = Pharmaca(loc)\n keys, case, text = scraper.MakeGetRequest()\n logging.debug(f'keys={keys} case={case}')\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "python/PharmacaWrapper.py", "source_repo": "CovidWA/scrapers-oss", "split": "val", "star_events_count": 0}
{"blob_id": "c0cb427eaba296d5e957836e51d2581d61d561b4", "bodies": ["self.general_segmentator = Segmentator(ADE20K)\nself.face_detection = OpenCVFaceDetector()\nself.face_estimation = {name: model() for name, model in FACE_ESTIMATOR_MODELS.items()}\nself.face_segmentator = Segmentator(FACE_PARSING)\nself.object_counter = YOLOv5()", "details = {}\ngeneral_segmentation_mask = self.general_segmentator.predict(rgb_uint8_image=rgb_uint8_img)\ndetails[FIELDS.GENERAL_PARTS] = self.general_segmentator.get_elements_in_image(rgb_uint8_image=rgb_uint8_img, segmentation_mask=general_segmentation_mask)\ndetails[FIELDS.OBJECT_COUNTING] = self.object_counter.get_details(rgb_uint8_image=rgb_uint8_img)\nsimplified_colors_image = self.general_segmentator.get_simplified_colors_segmented_prediction(img=rgb_uint8_img, segmentation_mask=general_segmentation_mask, with_borders=True)\nrgb_uint8_img = resize(image=rgb_uint8_img, output_shape=general_segmentation_mask.shape, preserve_range=True).astype(np.uint8)\nfaces, boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.0, return_box_position=True)\nif len(faces) > 0:\n faces = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.2, boxes=boxes)\n margin_faces, margin_boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.3, boxes=boxes, return_box_position=True)\n details[FIELDS.FACES] = []\n for i, (face, margin_face, margin_box, strict_box) in enumerate(zip(faces, margin_faces, margin_boxes, boxes)):\n x1, y1, x2, y2 = strict_box\n estimations = {aspect: estimator.predict(face=face) for aspect, estimator in self.face_estimation.items()}\n estimations[FIELDS.TOTAL_AREA] = (y2 - y1) * (x2 - x1) / (rgb_uint8_img.shape[0] * rgb_uint8_img.shape[1])\n segmented_face_mask = self.face_segmentator.predict(rgb_uint8_image=margin_face)\n simplified_colors_face = self.face_segmentator.get_simplified_colors_segmented_prediction(img=margin_face, segmentation_mask=segmented_face_mask, with_borders=True, dilate_borders=True)\n simplified_colors_face = resize(image=simplified_colors_face, output_shape=margin_face.shape, preserve_range=True, order=3).astype(np.uint8)\n estimations[FIELDS.FACE_PARTS] = self.face_segmentator.get_elements_in_image(rgb_uint8_image=margin_face, segmentation_mask=segmented_face_mask)\n fuse_images_without_background(img=simplified_colors_image, img_to_insert=simplified_colors_face, img_to_insert_segmentation_mask=segmented_face_mask, box=margin_box)\n details[FIELDS.FACES].append(estimations)\nelse:\n details[FIELDS.FACES] = None\nif save_anonymized_img_as is not None:\n dirname = os.path.dirname(save_anonymized_img_as)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n Image.fromarray(simplified_colors_image).save(fp=save_anonymized_img_as)\nreturn details"], "bodies_text": "<|body_start_0|>\n self.general_segmentator = Segmentator(ADE20K)\n self.face_detection = OpenCVFaceDetector()\n self.face_estimation = {name: model() for name, model in FACE_ESTIMATOR_MODELS.items()}\n self.face_segmentator = Segmentator(FACE_PARSING)\n self.object_counter = YOLOv5()\n<|end_body_0|>\n\n<|body_start_1|>\n details = {}\n general_segmentation_mask = self.general_segmentator.predict(rgb_uint8_image=rgb_uint8_img)\n details[FIELDS.GENERAL_PARTS] = self.general_segmentator.get_elements_in_image(rgb_uint8_image=rgb_uint8_img, segmentation_mask=general_segmentation_mask)\n details[FIELDS.OBJECT_COUNTING] = self.object_counter.get_details(rgb_uint8_image=rgb_uint8_img)\n simplified_colors_image = self.general_segmentator.get_simplified_colors_segmented_prediction(img=rgb_uint8_img, segmentation_mask=general_segmentation_mask, with_borders=True)\n rgb_uint8_img = resize(image=rgb_uint8_img, output_shape=general_segmentation_mask.shape, preserve_range=True).astype(np.uint8)\n faces, boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.0, return_box_position=True)\n if len(faces) > 0:\n faces = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.2, boxes=boxes)\n margin_faces, margin_boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.3, boxes=boxes, return_box_position=True)\n details[FIELDS.FACES] = []\n for i, (face, margin_face, margin_box, strict_box) in enumerate(zip(faces, margin_faces, margin_boxes, boxes)):\n x1, y1, x2, y2 = strict_box\n estimations = {aspect: estimator.predict(face=face) for aspect, estimator in self.face_estimation.items()}\n estimations[FIELDS.TOTAL_AREA] = (y2 - y1) * (x2 - x1) / (rgb_uint8_img.shape[0] * rgb_uint8_img.shape[1])\n segmented_face_mask = self.face_segmentator.predict(rgb_uint8_image=margin_face)\n simplified_colors_face = self.face_segmentator.get_simplified_colors_segmented_prediction(img=margin_face, segmentation_mask=segmented_face_mask, with_borders=True, dilate_borders=True)\n simplified_colors_face = resize(image=simplified_colors_face, output_shape=margin_face.shape, preserve_range=True, order=3).astype(np.uint8)\n estimations[FIELDS.FACE_PARTS] = self.face_segmentator.get_elements_in_image(rgb_uint8_image=margin_face, segmentation_mask=segmented_face_mask)\n fuse_images_without_background(img=simplified_colors_image, img_to_insert=simplified_colors_face, img_to_insert_segmentation_mask=segmented_face_mask, box=margin_box)\n details[FIELDS.FACES].append(estimations)\n else:\n details[FIELDS.FACES] = None\n if save_anonymized_img_as is not None:\n dirname = os.path.dirname(save_anonymized_img_as)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n Image.fromarray(simplified_colors_image).save(fp=save_anonymized_img_as)\n return details\n<|end_body_1|>\n", "class_docstring": "Feature extraction pipeline which implement all the Feature Extraction modules defined into Models in order to extract all the details within an image and to return them into a standard dictionary form. Note that, as this project is intended to be centered in Data Gathering and Data Science purposes, most Feature Extraction models are not trained from scratch, but borrowed by different authors and hubs that offered pretrained versions of them as open source tools. Every one of them is cited into the correspondent Citation.md. Please take a look at their open repositories if you also want to download them (and star them). Note that some of their implementations also does not come from scratch", "class_name": "Pipeline", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Pipeline:\n \"\"\"Feature extraction pipeline which implement all the Feature Extraction modules defined into Models in order to extract all the details within an image and to return them into a standard dictionary form. Note that, as this project is intended to be centered in Data Gathering and Data Science purposes, most Feature Extraction models are not trained from scratch, but borrowed by different authors and hubs that offered pretrained versions of them as open source tools. Every one of them is cited into the correspondent Citation.md. Please take a look at their open repositories if you also want to download them (and star them). Note that some of their implementations also does not come from scratch\"\"\"\n\n def __init__(self):\n \"\"\"Initiate all the Feature extraction models. Including ADE20K segmentator (Of keras-segmentation library), a Face Detector (offered by OpenCV), different estimators of face characteristics (Gender, Age and Expression (Oferred by their correspondent authors)), a Face Parsing Segmentator model (adapted to Keras and pretrained by its correspondent author), and the YOLOv5 object detector (offered its author through torch hub).\"\"\"\n <|body_0|>\n\n def get_details_of_image(self, rgb_uint8_img, save_anonymized_img_as=None):\n \"\"\"Process the image with all the feature extractors within the Pipeline for squeezing all the information within it. :param rgb_uint8_img: HxWx3 numpy image in uint8 format. Image from which squeeze all information. :param save_anonymized_img_as: String Path. Path where to save the anonymized segmentation map. If no path is given, the segmentation map is not saved. Default: None. :return: Dictionary with all details of the image. Being this details: For each object detected by the general segmentator: The object, its area, its median basic color and its median xkcd color. Ordered by area. For each face detected: The apparent age, the apparent gender and the apparent expression. And for each pa\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.general_segmentator = Segmentator(ADE20K)\n self.face_detection = OpenCVFaceDetector()\n self.face_estimation = {name: model() for name, model in FACE_ESTIMATOR_MODELS.items()}\n self.face_segmentator = Segmentator(FACE_PARSING)\n self.object_counter = YOLOv5()\n<|end_body_0|>\n\n<|body_start_1|>\n details = {}\n general_segmentation_mask = self.general_segmentator.predict(rgb_uint8_image=rgb_uint8_img)\n details[FIELDS.GENERAL_PARTS] = self.general_segmentator.get_elements_in_image(rgb_uint8_image=rgb_uint8_img, segmentation_mask=general_segmentation_mask)\n details[FIELDS.OBJECT_COUNTING] = self.object_counter.get_details(rgb_uint8_image=rgb_uint8_img)\n simplified_colors_image = self.general_segmentator.get_simplified_colors_segmented_prediction(img=rgb_uint8_img, segmentation_mask=general_segmentation_mask, with_borders=True)\n rgb_uint8_img = resize(image=rgb_uint8_img, output_shape=general_segmentation_mask.shape, preserve_range=True).astype(np.uint8)\n faces, boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.0, return_box_position=True)\n if len(faces) > 0:\n faces = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.2, boxes=boxes)\n margin_faces, margin_boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.3, boxes=boxes, return_box_position=True)\n details[FIELDS.FACES] = []\n for i, (face, margin_face, margin_box, strict_box) in enumerate(zip(faces, margin_faces, margin_boxes, boxes)):\n x1, y1, x2, y2 = strict_box\n estimations = {aspect: estimator.predict(face=face) for aspect, estimator in self.face_estimation.items()}\n estimations[FIELDS.TOTAL_AREA] = (y2 - y1) * (x2 - x1) / (rgb_uint8_img.shape[0] * rgb_uint8_img.shape[1])\n segmented_face_mask = self.face_segmentator.predict(rgb_uint8_image=margin_face)\n simplified_colors_face = self.face_segmentator.get_simplified_colors_segmented_prediction(img=margin_face, segmentation_mask=segmented_face_mask, with_borders=True, dilate_borders=True)\n simplified_colors_face = resize(image=simplified_colors_face, output_shape=margin_face.shape, preserve_range=True, order=3).astype(np.uint8)\n estimations[FIELDS.FACE_PARTS] = self.face_segmentator.get_elements_in_image(rgb_uint8_image=margin_face, segmentation_mask=segmented_face_mask)\n fuse_images_without_background(img=simplified_colors_image, img_to_insert=simplified_colors_face, img_to_insert_segmentation_mask=segmented_face_mask, box=margin_box)\n details[FIELDS.FACES].append(estimations)\n else:\n details[FIELDS.FACES] = None\n if save_anonymized_img_as is not None:\n dirname = os.path.dirname(save_anonymized_img_as)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n Image.fromarray(simplified_colors_image).save(fp=save_anonymized_img_as)\n return details\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000018", "length_bytes": 8431, "license_type": "no_license", "methods": [{"docstring": "Initiate all the Feature extraction models. Including ADE20K segmentator (Of keras-segmentation library), a Face Detector (offered by OpenCV), different estimators of face characteristics (Gender, Age and Expression (Oferred by their correspondent authors)), a Face Parsing Segmentator model (adapted to Keras and pretrained by its correspondent author), and the YOLOv5 object detector (offered its author through torch hub).", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Process the image with all the feature extractors within the Pipeline for squeezing all the information within it. :param rgb_uint8_img: HxWx3 numpy image in uint8 format. Image from which squeeze all information. :param save_anonymized_img_as: String Path. Path where to save the anonymized segmentation map. If no path is given, the segmentation map is not saved. Default: None. :return: Dictionary with all details of the image. Being this details: For each object detected by the general segmentator: The object, its area, its median basic color and its median xkcd color. Ordered by area. For each face detected: The apparent age, the apparent gender and the apparent expression. And for each pa", "name": "get_details_of_image", "signature": "def get_details_of_image(self, rgb_uint8_img, save_anonymized_img_as=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000347", "prompt": "Implement the Python class `Pipeline` described below.\n\nClass description:\nFeature extraction pipeline which implement all the Feature Extraction modules defined into Models in order to extract all the details within an image and to return them into a standard dictionary form. Note that, as this project is intended to be centered in Data Gathering and Data Science purposes, most Feature Extraction models are not trained from scratch, but borrowed by different authors and hubs that offered pretrained versions of them as open source tools. Every one of them is cited into the correspondent Citation.md. Please take a look at their open repositories if you also want to download them (and star them). Note that some of their implementations also does not come from scratch\n\nMethod signatures and docstrings:\n- def __init__(self): Initiate all the Feature extraction models. Including ADE20K segmentator (Of keras-segmentation library), a Face Detector (offered by OpenCV), different estimators of face characteristics (Gender, Age and Expression (Oferred by their correspondent authors)), a Face Parsing Segmentator model (adapted to Keras and pretrained by its correspondent author), and the YOLOv5 object detector (offered its author through torch hub).\n- def get_details_of_image(self, rgb_uint8_img, save_anonymized_img_as=None): Process the image with all the feature extractors within the Pipeline for squeezing all the information within it. :param rgb_uint8_img: HxWx3 numpy image in uint8 format. Image from which squeeze all information. :param save_anonymized_img_as: String Path. Path where to save the anonymized segmentation map. If no path is given, the segmentation map is not saved. Default: None. :return: Dictionary with all details of the image. Being this details: For each object detected by the general segmentator: The object, its area, its median basic color and its median xkcd color. Ordered by area. For each face detected: The apparent age, the apparent gender and the apparent expression. And for each pa", "prompted_full_text": "Implement the Python class `Pipeline` described below.\n\nClass description:\nFeature extraction pipeline which implement all the Feature Extraction modules defined into Models in order to extract all the details within an image and to return them into a standard dictionary form. Note that, as this project is intended to be centered in Data Gathering and Data Science purposes, most Feature Extraction models are not trained from scratch, but borrowed by different authors and hubs that offered pretrained versions of them as open source tools. Every one of them is cited into the correspondent Citation.md. Please take a look at their open repositories if you also want to download them (and star them). Note that some of their implementations also does not come from scratch\n\nMethod signatures and docstrings:\n- def __init__(self): Initiate all the Feature extraction models. Including ADE20K segmentator (Of keras-segmentation library), a Face Detector (offered by OpenCV), different estimators of face characteristics (Gender, Age and Expression (Oferred by their correspondent authors)), a Face Parsing Segmentator model (adapted to Keras and pretrained by its correspondent author), and the YOLOv5 object detector (offered its author through torch hub).\n- def get_details_of_image(self, rgb_uint8_img, save_anonymized_img_as=None): Process the image with all the feature extractors within the Pipeline for squeezing all the information within it. :param rgb_uint8_img: HxWx3 numpy image in uint8 format. Image from which squeeze all information. :param save_anonymized_img_as: String Path. Path where to save the anonymized segmentation map. If no path is given, the segmentation map is not saved. Default: None. :return: Dictionary with all details of the image. Being this details: For each object detected by the general segmentator: The object, its area, its median basic color and its median xkcd color. Ordered by area. For each face detected: The apparent age, the apparent gender and the apparent expression. And for each pa\n\n<|skeleton|>\nclass Pipeline:\n \"\"\"Feature extraction pipeline which implement all the Feature Extraction modules defined into Models in order to extract all the details within an image and to return them into a standard dictionary form. Note that, as this project is intended to be centered in Data Gathering and Data Science purposes, most Feature Extraction models are not trained from scratch, but borrowed by different authors and hubs that offered pretrained versions of them as open source tools. Every one of them is cited into the correspondent Citation.md. Please take a look at their open repositories if you also want to download them (and star them). Note that some of their implementations also does not come from scratch\"\"\"\n\n def __init__(self):\n \"\"\"Initiate all the Feature extraction models. Including ADE20K segmentator (Of keras-segmentation library), a Face Detector (offered by OpenCV), different estimators of face characteristics (Gender, Age and Expression (Oferred by their correspondent authors)), a Face Parsing Segmentator model (adapted to Keras and pretrained by its correspondent author), and the YOLOv5 object detector (offered its author through torch hub).\"\"\"\n <|body_0|>\n\n def get_details_of_image(self, rgb_uint8_img, save_anonymized_img_as=None):\n \"\"\"Process the image with all the feature extractors within the Pipeline for squeezing all the information within it. :param rgb_uint8_img: HxWx3 numpy image in uint8 format. Image from which squeeze all information. :param save_anonymized_img_as: String Path. Path where to save the anonymized segmentation map. If no path is given, the segmentation map is not saved. Default: None. :return: Dictionary with all details of the image. Being this details: For each object detected by the general segmentator: The object, its area, its median basic color and its median xkcd color. Ordered by area. For each face detected: The apparent age, the apparent gender and the apparent expression. And for each pa\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.general_segmentator = Segmentator(ADE20K)\n self.face_detection = OpenCVFaceDetector()\n self.face_estimation = {name: model() for name, model in FACE_ESTIMATOR_MODELS.items()}\n self.face_segmentator = Segmentator(FACE_PARSING)\n self.object_counter = YOLOv5()\n<|end_body_0|>\n\n<|body_start_1|>\n details = {}\n general_segmentation_mask = self.general_segmentator.predict(rgb_uint8_image=rgb_uint8_img)\n details[FIELDS.GENERAL_PARTS] = self.general_segmentator.get_elements_in_image(rgb_uint8_image=rgb_uint8_img, segmentation_mask=general_segmentation_mask)\n details[FIELDS.OBJECT_COUNTING] = self.object_counter.get_details(rgb_uint8_image=rgb_uint8_img)\n simplified_colors_image = self.general_segmentator.get_simplified_colors_segmented_prediction(img=rgb_uint8_img, segmentation_mask=general_segmentation_mask, with_borders=True)\n rgb_uint8_img = resize(image=rgb_uint8_img, output_shape=general_segmentation_mask.shape, preserve_range=True).astype(np.uint8)\n faces, boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.0, return_box_position=True)\n if len(faces) > 0:\n faces = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.2, boxes=boxes)\n margin_faces, margin_boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.3, boxes=boxes, return_box_position=True)\n details[FIELDS.FACES] = []\n for i, (face, margin_face, margin_box, strict_box) in enumerate(zip(faces, margin_faces, margin_boxes, boxes)):\n x1, y1, x2, y2 = strict_box\n estimations = {aspect: estimator.predict(face=face) for aspect, estimator in self.face_estimation.items()}\n estimations[FIELDS.TOTAL_AREA] = (y2 - y1) * (x2 - x1) / (rgb_uint8_img.shape[0] * rgb_uint8_img.shape[1])\n segmented_face_mask = self.face_segmentator.predict(rgb_uint8_image=margin_face)\n simplified_colors_face = self.face_segmentator.get_simplified_colors_segmented_prediction(img=margin_face, segmentation_mask=segmented_face_mask, with_borders=True, dilate_borders=True)\n simplified_colors_face = resize(image=simplified_colors_face, output_shape=margin_face.shape, preserve_range=True, order=3).astype(np.uint8)\n estimations[FIELDS.FACE_PARTS] = self.face_segmentator.get_elements_in_image(rgb_uint8_image=margin_face, segmentation_mask=segmented_face_mask)\n fuse_images_without_background(img=simplified_colors_image, img_to_insert=simplified_colors_face, img_to_insert_segmentation_mask=segmented_face_mask, box=margin_box)\n details[FIELDS.FACES].append(estimations)\n else:\n details[FIELDS.FACES] = None\n if save_anonymized_img_as is not None:\n dirname = os.path.dirname(save_anonymized_img_as)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n Image.fromarray(simplified_colors_image).save(fp=save_anonymized_img_as)\n return details\n<|end_body_1|>\n", "revision_id": "7c24c0182d9bb5d092bab5eb9e4785f1c4b5a604", "skeleton": "<|skeleton|>\nclass Pipeline:\n \"\"\"Feature extraction pipeline which implement all the Feature Extraction modules defined into Models in order to extract all the details within an image and to return them into a standard dictionary form. Note that, as this project is intended to be centered in Data Gathering and Data Science purposes, most Feature Extraction models are not trained from scratch, but borrowed by different authors and hubs that offered pretrained versions of them as open source tools. Every one of them is cited into the correspondent Citation.md. Please take a look at their open repositories if you also want to download them (and star them). Note that some of their implementations also does not come from scratch\"\"\"\n\n def __init__(self):\n \"\"\"Initiate all the Feature extraction models. Including ADE20K segmentator (Of keras-segmentation library), a Face Detector (offered by OpenCV), different estimators of face characteristics (Gender, Age and Expression (Oferred by their correspondent authors)), a Face Parsing Segmentator model (adapted to Keras and pretrained by its correspondent author), and the YOLOv5 object detector (offered its author through torch hub).\"\"\"\n <|body_0|>\n\n def get_details_of_image(self, rgb_uint8_img, save_anonymized_img_as=None):\n \"\"\"Process the image with all the feature extractors within the Pipeline for squeezing all the information within it. :param rgb_uint8_img: HxWx3 numpy image in uint8 format. Image from which squeeze all information. :param save_anonymized_img_as: String Path. Path where to save the anonymized segmentation map. If no path is given, the segmentation map is not saved. Default: None. :return: Dictionary with all details of the image. Being this details: For each object detected by the general segmentator: The object, its area, its median basic color and its median xkcd color. Ordered by area. For each face detected: The apparent age, the apparent gender and the apparent expression. And for each pa\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Pipeline:\n \"\"\"Feature extraction pipeline which implement all the Feature Extraction modules defined into Models in order to extract all the details within an image and to return them into a standard dictionary form. Note that, as this project is intended to be centered in Data Gathering and Data Science purposes, most Feature Extraction models are not trained from scratch, but borrowed by different authors and hubs that offered pretrained versions of them as open source tools. Every one of them is cited into the correspondent Citation.md. Please take a look at their open repositories if you also want to download them (and star them). Note that some of their implementations also does not come from scratch\"\"\"\n\n def __init__(self):\n \"\"\"Initiate all the Feature extraction models. Including ADE20K segmentator (Of keras-segmentation library), a Face Detector (offered by OpenCV), different estimators of face characteristics (Gender, Age and Expression (Oferred by their correspondent authors)), a Face Parsing Segmentator model (adapted to Keras and pretrained by its correspondent author), and the YOLOv5 object detector (offered its author through torch hub).\"\"\"\n self.general_segmentator = Segmentator(ADE20K)\n self.face_detection = OpenCVFaceDetector()\n self.face_estimation = {name: model() for name, model in FACE_ESTIMATOR_MODELS.items()}\n self.face_segmentator = Segmentator(FACE_PARSING)\n self.object_counter = YOLOv5()\n\n def get_details_of_image(self, rgb_uint8_img, save_anonymized_img_as=None):\n \"\"\"Process the image with all the feature extractors within the Pipeline for squeezing all the information within it. :param rgb_uint8_img: HxWx3 numpy image in uint8 format. Image from which squeeze all information. :param save_anonymized_img_as: String Path. Path where to save the anonymized segmentation map. If no path is given, the segmentation map is not saved. Default: None. :return: Dictionary with all details of the image. Being this details: For each object detected by the general segmentator: The object, its area, its median basic color and its median xkcd color. Ordered by area. For each face detected: The apparent age, the apparent gender and the apparent expression. And for each pa\"\"\"\n details = {}\n general_segmentation_mask = self.general_segmentator.predict(rgb_uint8_image=rgb_uint8_img)\n details[FIELDS.GENERAL_PARTS] = self.general_segmentator.get_elements_in_image(rgb_uint8_image=rgb_uint8_img, segmentation_mask=general_segmentation_mask)\n details[FIELDS.OBJECT_COUNTING] = self.object_counter.get_details(rgb_uint8_image=rgb_uint8_img)\n simplified_colors_image = self.general_segmentator.get_simplified_colors_segmented_prediction(img=rgb_uint8_img, segmentation_mask=general_segmentation_mask, with_borders=True)\n rgb_uint8_img = resize(image=rgb_uint8_img, output_shape=general_segmentation_mask.shape, preserve_range=True).astype(np.uint8)\n faces, boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.0, return_box_position=True)\n if len(faces) > 0:\n faces = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.2, boxes=boxes)\n margin_faces, margin_boxes = self.face_detection.get_cropped_faces(rgb_uint8_image=rgb_uint8_img, margin=0.3, boxes=boxes, return_box_position=True)\n details[FIELDS.FACES] = []\n for i, (face, margin_face, margin_box, strict_box) in enumerate(zip(faces, margin_faces, margin_boxes, boxes)):\n x1, y1, x2, y2 = strict_box\n estimations = {aspect: estimator.predict(face=face) for aspect, estimator in self.face_estimation.items()}\n estimations[FIELDS.TOTAL_AREA] = (y2 - y1) * (x2 - x1) / (rgb_uint8_img.shape[0] * rgb_uint8_img.shape[1])\n segmented_face_mask = self.face_segmentator.predict(rgb_uint8_image=margin_face)\n simplified_colors_face = self.face_segmentator.get_simplified_colors_segmented_prediction(img=margin_face, segmentation_mask=segmented_face_mask, with_borders=True, dilate_borders=True)\n simplified_colors_face = resize(image=simplified_colors_face, output_shape=margin_face.shape, preserve_range=True, order=3).astype(np.uint8)\n estimations[FIELDS.FACE_PARTS] = self.face_segmentator.get_elements_in_image(rgb_uint8_image=margin_face, segmentation_mask=segmented_face_mask)\n fuse_images_without_background(img=simplified_colors_image, img_to_insert=simplified_colors_face, img_to_insert_segmentation_mask=segmented_face_mask, box=margin_box)\n details[FIELDS.FACES].append(estimations)\n else:\n details[FIELDS.FACES] = None\n if save_anonymized_img_as is not None:\n dirname = os.path.dirname(save_anonymized_img_as)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n Image.fromarray(simplified_colors_image).save(fp=save_anonymized_img_as)\n return details\n", "source": "the_stack_v2_python_sparse", "source_path": "BDA_Project_EricCañas_AlbaGarcía/src/ImageDetectors/VisualAnalysisPipeline.py", "source_repo": "Akvamarin/MAI", "split": "val", "star_events_count": 0}
{"blob_id": "60cfdbe48aae286d02fda3f6c3359b1a9d9a7663", "bodies": ["context = self.context\nrequest = context['request']\nif not request.user.check_password(value):\n raise serializers.ValidationError('Password is incorrect.')\nreturn value", "data = self.get_initial()\npassword1 = value\npassword2 = data.get('new_password2')\nif password1 != password2:\n raise serializers.ValidationError('The two Passwords must match.')\nreturn value", "password = validated_data['new_password1']\ninstance.set_password(password)\ninstance.save()\nreturn validated_data"], "bodies_text": "<|body_start_0|>\n context = self.context\n request = context['request']\n if not request.user.check_password(value):\n raise serializers.ValidationError('Password is incorrect.')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.get_initial()\n password1 = value\n password2 = data.get('new_password2')\n if password1 != password2:\n raise serializers.ValidationError('The two Passwords must match.')\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n password = validated_data['new_password1']\n instance.set_password(password)\n instance.save()\n return validated_data\n<|end_body_2|>\n", "class_docstring": "A serializer for password change.", "class_name": "PassowordChangeSerializer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PassowordChangeSerializer:\n \"\"\"A serializer for password change.\"\"\"\n\n def validate_old_password(self, value):\n \"\"\"Validate old password.\"\"\"\n <|body_0|>\n\n def validate_new_password1(self, value):\n \"\"\"Validate passwords.\"\"\"\n <|body_1|>\n\n def update(self, instance, validated_data):\n \"\"\"Update user's password.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = self.context\n request = context['request']\n if not request.user.check_password(value):\n raise serializers.ValidationError('Password is incorrect.')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.get_initial()\n password1 = value\n password2 = data.get('new_password2')\n if password1 != password2:\n raise serializers.ValidationError('The two Passwords must match.')\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n password = validated_data['new_password1']\n instance.set_password(password)\n instance.save()\n return validated_data\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000019", "length_bytes": 9392, "license_type": "no_license", "methods": [{"docstring": "Validate old password.", "name": "validate_old_password", "signature": "def validate_old_password(self, value)"}, {"docstring": "Validate passwords.", "name": "validate_new_password1", "signature": "def validate_new_password1(self, value)"}, {"docstring": "Update user's password.", "name": "update", "signature": "def update(self, instance, validated_data)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_013173", "prompt": "Implement the Python class `PassowordChangeSerializer` described below.\n\nClass description:\nA serializer for password change.\n\nMethod signatures and docstrings:\n- def validate_old_password(self, value): Validate old password.\n- def validate_new_password1(self, value): Validate passwords.\n- def update(self, instance, validated_data): Update user's password.", "prompted_full_text": "Implement the Python class `PassowordChangeSerializer` described below.\n\nClass description:\nA serializer for password change.\n\nMethod signatures and docstrings:\n- def validate_old_password(self, value): Validate old password.\n- def validate_new_password1(self, value): Validate passwords.\n- def update(self, instance, validated_data): Update user's password.\n\n<|skeleton|>\nclass PassowordChangeSerializer:\n \"\"\"A serializer for password change.\"\"\"\n\n def validate_old_password(self, value):\n \"\"\"Validate old password.\"\"\"\n <|body_0|>\n\n def validate_new_password1(self, value):\n \"\"\"Validate passwords.\"\"\"\n <|body_1|>\n\n def update(self, instance, validated_data):\n \"\"\"Update user's password.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = self.context\n request = context['request']\n if not request.user.check_password(value):\n raise serializers.ValidationError('Password is incorrect.')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.get_initial()\n password1 = value\n password2 = data.get('new_password2')\n if password1 != password2:\n raise serializers.ValidationError('The two Passwords must match.')\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n password = validated_data['new_password1']\n instance.set_password(password)\n instance.save()\n return validated_data\n<|end_body_2|>\n", "revision_id": "6c2e8bc6b0a172ff34d0f3191dfdebbd85584525", "skeleton": "<|skeleton|>\nclass PassowordChangeSerializer:\n \"\"\"A serializer for password change.\"\"\"\n\n def validate_old_password(self, value):\n \"\"\"Validate old password.\"\"\"\n <|body_0|>\n\n def validate_new_password1(self, value):\n \"\"\"Validate passwords.\"\"\"\n <|body_1|>\n\n def update(self, instance, validated_data):\n \"\"\"Update user's password.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PassowordChangeSerializer:\n \"\"\"A serializer for password change.\"\"\"\n\n def validate_old_password(self, value):\n \"\"\"Validate old password.\"\"\"\n context = self.context\n request = context['request']\n if not request.user.check_password(value):\n raise serializers.ValidationError('Password is incorrect.')\n return value\n\n def validate_new_password1(self, value):\n \"\"\"Validate passwords.\"\"\"\n data = self.get_initial()\n password1 = value\n password2 = data.get('new_password2')\n if password1 != password2:\n raise serializers.ValidationError('The two Passwords must match.')\n return value\n\n def update(self, instance, validated_data):\n \"\"\"Update user's password.\"\"\"\n password = validated_data['new_password1']\n instance.set_password(password)\n instance.save()\n return validated_data\n", "source": "the_stack_v2_python_sparse", "source_path": "accounts/serializers.py", "source_repo": "OmarFateh/api-ecommerce", "split": "val", "star_events_count": 1}
{"blob_id": "f20ed0c35271315c81ba179f5f36dfcc56d7ab27", "bodies": ["MOD = int(1000000000.0 + 7)\ncnt = Counter(arr)\nkeys = list(sorted(cnt.keys()))\n\n@lru_cache(None)\ndef comb(n, r):\n r = min(r, n - r)\n if r == 0:\n return 1\n return int(comb(n - 1, r - 1) * n / r) % MOD\n\ndef backtrack(i, t, cur):\n if t == 0 and sum(cur.values()) == 3:\n ret = 1\n for k, c in cur.items():\n ret *= comb(cnt[k], c)\n ret %= MOD\n return ret\n if t < 0 or i < 0 or len(cur) >= 3:\n return 0\n ret = backtrack(i - 1, t, cur)\n for c in range(1, cnt[keys[i]] + 1):\n if keys[i] * c > t:\n break\n cur[keys[i]] = c\n ret += backtrack(i - 1, t - c * keys[i], cur)\n cur.pop(keys[i])\n return ret % MOD\nreturn backtrack(len(keys) - 1, target, {})", "MOD = int(1000000000.0 + 7)\ncnt = Counter(arr)\nkeys = list(sorted(cnt.keys()))\nret = 0\nfor i in range(len(keys)):\n for j in range(i, len(keys)):\n remainder = target - keys[i] - keys[j]\n if remainder not in cnt or remainder < keys[j]:\n continue\n cases = 1\n for k, c in Counter([remainder, keys[i], keys[j]]).items():\n numerator = 1\n denominator = 1\n for _c in range(c):\n numerator *= cnt[k] - _c\n denominator *= _c + 1\n cases *= numerator // denominator\n cases %= MOD\n ret += cases\n ret %= MOD\nreturn ret"], "bodies_text": "<|body_start_0|>\n MOD = int(1000000000.0 + 7)\n cnt = Counter(arr)\n keys = list(sorted(cnt.keys()))\n\n @lru_cache(None)\n def comb(n, r):\n r = min(r, n - r)\n if r == 0:\n return 1\n return int(comb(n - 1, r - 1) * n / r) % MOD\n\n def backtrack(i, t, cur):\n if t == 0 and sum(cur.values()) == 3:\n ret = 1\n for k, c in cur.items():\n ret *= comb(cnt[k], c)\n ret %= MOD\n return ret\n if t < 0 or i < 0 or len(cur) >= 3:\n return 0\n ret = backtrack(i - 1, t, cur)\n for c in range(1, cnt[keys[i]] + 1):\n if keys[i] * c > t:\n break\n cur[keys[i]] = c\n ret += backtrack(i - 1, t - c * keys[i], cur)\n cur.pop(keys[i])\n return ret % MOD\n return backtrack(len(keys) - 1, target, {})\n<|end_body_0|>\n\n<|body_start_1|>\n MOD = int(1000000000.0 + 7)\n cnt = Counter(arr)\n keys = list(sorted(cnt.keys()))\n ret = 0\n for i in range(len(keys)):\n for j in range(i, len(keys)):\n remainder = target - keys[i] - keys[j]\n if remainder not in cnt or remainder < keys[j]:\n continue\n cases = 1\n for k, c in Counter([remainder, keys[i], keys[j]]).items():\n numerator = 1\n denominator = 1\n for _c in range(c):\n numerator *= cnt[k] - _c\n denominator *= _c + 1\n cases *= numerator // denominator\n cases %= MOD\n ret += cases\n ret %= MOD\n return ret\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def threeSumMulti(self, arr: List[int], target: int) -> int:\n \"\"\"TLE. This can be a generic solution with some modification.\"\"\"\n <|body_0|>\n\n def threeSumMulti(self, arr: List[int], target: int) -> int:\n \"\"\"Time complexity: O(n^2) Space complexity: O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n MOD = int(1000000000.0 + 7)\n cnt = Counter(arr)\n keys = list(sorted(cnt.keys()))\n\n @lru_cache(None)\n def comb(n, r):\n r = min(r, n - r)\n if r == 0:\n return 1\n return int(comb(n - 1, r - 1) * n / r) % MOD\n\n def backtrack(i, t, cur):\n if t == 0 and sum(cur.values()) == 3:\n ret = 1\n for k, c in cur.items():\n ret *= comb(cnt[k], c)\n ret %= MOD\n return ret\n if t < 0 or i < 0 or len(cur) >= 3:\n return 0\n ret = backtrack(i - 1, t, cur)\n for c in range(1, cnt[keys[i]] + 1):\n if keys[i] * c > t:\n break\n cur[keys[i]] = c\n ret += backtrack(i - 1, t - c * keys[i], cur)\n cur.pop(keys[i])\n return ret % MOD\n return backtrack(len(keys) - 1, target, {})\n<|end_body_0|>\n\n<|body_start_1|>\n MOD = int(1000000000.0 + 7)\n cnt = Counter(arr)\n keys = list(sorted(cnt.keys()))\n ret = 0\n for i in range(len(keys)):\n for j in range(i, len(keys)):\n remainder = target - keys[i] - keys[j]\n if remainder not in cnt or remainder < keys[j]:\n continue\n cases = 1\n for k, c in Counter([remainder, keys[i], keys[j]]).items():\n numerator = 1\n denominator = 1\n for _c in range(c):\n numerator *= cnt[k] - _c\n denominator *= _c + 1\n cases *= numerator // denominator\n cases %= MOD\n ret += cases\n ret %= MOD\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000020", "length_bytes": 3954, "license_type": "no_license", "methods": [{"docstring": "TLE. This can be a generic solution with some modification.", "name": "threeSumMulti", "signature": "def threeSumMulti(self, arr: List[int], target: int) -> int"}, {"docstring": "Time complexity: O(n^2) Space complexity: O(n)", "name": "threeSumMulti", "signature": "def threeSumMulti(self, arr: List[int], target: int) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012903", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def threeSumMulti(self, arr: List[int], target: int) -> int: TLE. This can be a generic solution with some modification.\n- def threeSumMulti(self, arr: List[int], target: int) -> int: Time complexity: O(n^2) Space complexity: O(n)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def threeSumMulti(self, arr: List[int], target: int) -> int: TLE. This can be a generic solution with some modification.\n- def threeSumMulti(self, arr: List[int], target: int) -> int: Time complexity: O(n^2) Space complexity: O(n)\n\n<|skeleton|>\nclass Solution:\n\n def threeSumMulti(self, arr: List[int], target: int) -> int:\n \"\"\"TLE. This can be a generic solution with some modification.\"\"\"\n <|body_0|>\n\n def threeSumMulti(self, arr: List[int], target: int) -> int:\n \"\"\"Time complexity: O(n^2) Space complexity: O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n MOD = int(1000000000.0 + 7)\n cnt = Counter(arr)\n keys = list(sorted(cnt.keys()))\n\n @lru_cache(None)\n def comb(n, r):\n r = min(r, n - r)\n if r == 0:\n return 1\n return int(comb(n - 1, r - 1) * n / r) % MOD\n\n def backtrack(i, t, cur):\n if t == 0 and sum(cur.values()) == 3:\n ret = 1\n for k, c in cur.items():\n ret *= comb(cnt[k], c)\n ret %= MOD\n return ret\n if t < 0 or i < 0 or len(cur) >= 3:\n return 0\n ret = backtrack(i - 1, t, cur)\n for c in range(1, cnt[keys[i]] + 1):\n if keys[i] * c > t:\n break\n cur[keys[i]] = c\n ret += backtrack(i - 1, t - c * keys[i], cur)\n cur.pop(keys[i])\n return ret % MOD\n return backtrack(len(keys) - 1, target, {})\n<|end_body_0|>\n\n<|body_start_1|>\n MOD = int(1000000000.0 + 7)\n cnt = Counter(arr)\n keys = list(sorted(cnt.keys()))\n ret = 0\n for i in range(len(keys)):\n for j in range(i, len(keys)):\n remainder = target - keys[i] - keys[j]\n if remainder not in cnt or remainder < keys[j]:\n continue\n cases = 1\n for k, c in Counter([remainder, keys[i], keys[j]]).items():\n numerator = 1\n denominator = 1\n for _c in range(c):\n numerator *= cnt[k] - _c\n denominator *= _c + 1\n cases *= numerator // denominator\n cases %= MOD\n ret += cases\n ret %= MOD\n return ret\n<|end_body_1|>\n", "revision_id": "1389a009a02e90e8700a7a00e0b7f797c129cdf4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def threeSumMulti(self, arr: List[int], target: int) -> int:\n \"\"\"TLE. This can be a generic solution with some modification.\"\"\"\n <|body_0|>\n\n def threeSumMulti(self, arr: List[int], target: int) -> int:\n \"\"\"Time complexity: O(n^2) Space complexity: O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def threeSumMulti(self, arr: List[int], target: int) -> int:\n \"\"\"TLE. This can be a generic solution with some modification.\"\"\"\n MOD = int(1000000000.0 + 7)\n cnt = Counter(arr)\n keys = list(sorted(cnt.keys()))\n\n @lru_cache(None)\n def comb(n, r):\n r = min(r, n - r)\n if r == 0:\n return 1\n return int(comb(n - 1, r - 1) * n / r) % MOD\n\n def backtrack(i, t, cur):\n if t == 0 and sum(cur.values()) == 3:\n ret = 1\n for k, c in cur.items():\n ret *= comb(cnt[k], c)\n ret %= MOD\n return ret\n if t < 0 or i < 0 or len(cur) >= 3:\n return 0\n ret = backtrack(i - 1, t, cur)\n for c in range(1, cnt[keys[i]] + 1):\n if keys[i] * c > t:\n break\n cur[keys[i]] = c\n ret += backtrack(i - 1, t - c * keys[i], cur)\n cur.pop(keys[i])\n return ret % MOD\n return backtrack(len(keys) - 1, target, {})\n\n def threeSumMulti(self, arr: List[int], target: int) -> int:\n \"\"\"Time complexity: O(n^2) Space complexity: O(n)\"\"\"\n MOD = int(1000000000.0 + 7)\n cnt = Counter(arr)\n keys = list(sorted(cnt.keys()))\n ret = 0\n for i in range(len(keys)):\n for j in range(i, len(keys)):\n remainder = target - keys[i] - keys[j]\n if remainder not in cnt or remainder < keys[j]:\n continue\n cases = 1\n for k, c in Counter([remainder, keys[i], keys[j]]).items():\n numerator = 1\n denominator = 1\n for _c in range(c):\n numerator *= cnt[k] - _c\n denominator *= _c + 1\n cases *= numerator // denominator\n cases %= MOD\n ret += cases\n ret %= MOD\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/solved/959_3Sum_With_Multiplicity/solution.py", "source_repo": "sungminoh/algorithms", "split": "val", "star_events_count": 0}
{"blob_id": "658f89a78052844d178937b959d34a0b1e1849e2", "bodies": ["output_mediator = self._CreateOutputMediator()\nself._output_writer = cli_test_lib.TestOutputWriter()\nself._output_module = json_out.JsonOutputModule(output_mediator, output_writer=self._output_writer)\nself._event_object = JsonTestEvent()", "expected_header = b'{'\nself._output_module.WriteHeader()\nheader = self._output_writer.ReadOutput()\nself.assertEqual(header, expected_header)", "expected_footer = b'\"event_foo\": \"{}\"}'\nself._output_module.WriteFooter()\nfooter = self._output_writer.ReadOutput()\nself.assertEqual(footer, expected_footer)", "self._output_module.WriteEventBody(self._event_object)\nexpected_uuid = self._event_object.uuid.encode(u'utf-8')\nexpected_timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')\nif sys.platform.startswith(u'win'):\n expected_os_location = u'C:\\\\{0:s}'.format(os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\nelse:\n expected_os_location = u'{0:s}{1:s}'.format(os.path.sep, os.path.join(u'cases', u'image.dd'))\nexpected_os_location = expected_os_location.encode(u'utf-8')\nexpected_event_body = b'\"event_0\": {{\"username\": \"root\", \"display_name\": \"OS: /var/log/syslog.1\", \"uuid\": \"{0:s}\", \"data_type\": \"test:l2tjson\", \"timestamp\": {1:d}, \"hostname\": \"ubuntu\", \"text\": \"Reporter PID: |8442| (pam_unix(cron:session): session\\\\n closed for user root)\", \"pathspec\": \"{{\\\\\"type_indicator\\\\\": \\\\\"TSK\\\\\", \\\\\"inode\\\\\": 15, \\\\\"location\\\\\": \\\\\"/var/log/syslog.1\\\\\", \\\\\"parent\\\\\": \\\\\"{{\\\\\\\\\\\\\"type_indicator\\\\\\\\\\\\\": \\\\\\\\\\\\\"OS\\\\\\\\\\\\\", \\\\\\\\\\\\\"location\\\\\\\\\\\\\": \\\\\\\\\\\\\"{2:s}\\\\\\\\\\\\\"}}\\\\\"}}\", \"inode\": 12345678}},\\n'.format(expected_uuid, expected_timestamp, expected_os_location)\nevent_body = self._output_writer.ReadOutput()\nself.assertEqual(event_body, expected_event_body)"], "bodies_text": "<|body_start_0|>\n output_mediator = self._CreateOutputMediator()\n self._output_writer = cli_test_lib.TestOutputWriter()\n self._output_module = json_out.JsonOutputModule(output_mediator, output_writer=self._output_writer)\n self._event_object = JsonTestEvent()\n<|end_body_0|>\n\n<|body_start_1|>\n expected_header = b'{'\n self._output_module.WriteHeader()\n header = self._output_writer.ReadOutput()\n self.assertEqual(header, expected_header)\n<|end_body_1|>\n\n<|body_start_2|>\n expected_footer = b'\"event_foo\": \"{}\"}'\n self._output_module.WriteFooter()\n footer = self._output_writer.ReadOutput()\n self.assertEqual(footer, expected_footer)\n<|end_body_2|>\n\n<|body_start_3|>\n self._output_module.WriteEventBody(self._event_object)\n expected_uuid = self._event_object.uuid.encode(u'utf-8')\n expected_timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')\n if sys.platform.startswith(u'win'):\n expected_os_location = u'C:\\\\{0:s}'.format(os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n else:\n expected_os_location = u'{0:s}{1:s}'.format(os.path.sep, os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.encode(u'utf-8')\n expected_event_body = b'\"event_0\": {{\"username\": \"root\", \"display_name\": \"OS: /var/log/syslog.1\", \"uuid\": \"{0:s}\", \"data_type\": \"test:l2tjson\", \"timestamp\": {1:d}, \"hostname\": \"ubuntu\", \"text\": \"Reporter PID: |8442| (pam_unix(cron:session): session\\\\n closed for user root)\", \"pathspec\": \"{{\\\\\"type_indicator\\\\\": \\\\\"TSK\\\\\", \\\\\"inode\\\\\": 15, \\\\\"location\\\\\": \\\\\"/var/log/syslog.1\\\\\", \\\\\"parent\\\\\": \\\\\"{{\\\\\\\\\\\\\"type_indicator\\\\\\\\\\\\\": \\\\\\\\\\\\\"OS\\\\\\\\\\\\\", \\\\\\\\\\\\\"location\\\\\\\\\\\\\": \\\\\\\\\\\\\"{2:s}\\\\\\\\\\\\\"}}\\\\\"}}\", \"inode\": 12345678}},\\n'.format(expected_uuid, expected_timestamp, expected_os_location)\n event_body = self._output_writer.ReadOutput()\n self.assertEqual(event_body, expected_event_body)\n<|end_body_3|>\n", "class_docstring": "Tests for the JSON outputter.", "class_name": "JsonOutputTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JsonOutputTest:\n \"\"\"Tests for the JSON outputter.\"\"\"\n\n def setUp(self):\n \"\"\"Sets up the objects needed for this test.\"\"\"\n <|body_0|>\n\n def testWriteHeader(self):\n \"\"\"Tests the WriteHeader functions.\"\"\"\n <|body_1|>\n\n def testWriteFooter(self):\n \"\"\"Tests the WriteFooter functions.\"\"\"\n <|body_2|>\n\n def testWriteEventBody(self):\n \"\"\"Tests the WriteEventBody function.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n output_mediator = self._CreateOutputMediator()\n self._output_writer = cli_test_lib.TestOutputWriter()\n self._output_module = json_out.JsonOutputModule(output_mediator, output_writer=self._output_writer)\n self._event_object = JsonTestEvent()\n<|end_body_0|>\n\n<|body_start_1|>\n expected_header = b'{'\n self._output_module.WriteHeader()\n header = self._output_writer.ReadOutput()\n self.assertEqual(header, expected_header)\n<|end_body_1|>\n\n<|body_start_2|>\n expected_footer = b'\"event_foo\": \"{}\"}'\n self._output_module.WriteFooter()\n footer = self._output_writer.ReadOutput()\n self.assertEqual(footer, expected_footer)\n<|end_body_2|>\n\n<|body_start_3|>\n self._output_module.WriteEventBody(self._event_object)\n expected_uuid = self._event_object.uuid.encode(u'utf-8')\n expected_timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')\n if sys.platform.startswith(u'win'):\n expected_os_location = u'C:\\\\{0:s}'.format(os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n else:\n expected_os_location = u'{0:s}{1:s}'.format(os.path.sep, os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.encode(u'utf-8')\n expected_event_body = b'\"event_0\": {{\"username\": \"root\", \"display_name\": \"OS: /var/log/syslog.1\", \"uuid\": \"{0:s}\", \"data_type\": \"test:l2tjson\", \"timestamp\": {1:d}, \"hostname\": \"ubuntu\", \"text\": \"Reporter PID: |8442| (pam_unix(cron:session): session\\\\n closed for user root)\", \"pathspec\": \"{{\\\\\"type_indicator\\\\\": \\\\\"TSK\\\\\", \\\\\"inode\\\\\": 15, \\\\\"location\\\\\": \\\\\"/var/log/syslog.1\\\\\", \\\\\"parent\\\\\": \\\\\"{{\\\\\\\\\\\\\"type_indicator\\\\\\\\\\\\\": \\\\\\\\\\\\\"OS\\\\\\\\\\\\\", \\\\\\\\\\\\\"location\\\\\\\\\\\\\": \\\\\\\\\\\\\"{2:s}\\\\\\\\\\\\\"}}\\\\\"}}\", \"inode\": 12345678}},\\n'.format(expected_uuid, expected_timestamp, expected_os_location)\n event_body = self._output_writer.ReadOutput()\n self.assertEqual(event_body, expected_event_body)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000021", "length_bytes": 3943, "license_type": "permissive", "methods": [{"docstring": "Sets up the objects needed for this test.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Tests the WriteHeader functions.", "name": "testWriteHeader", "signature": "def testWriteHeader(self)"}, {"docstring": "Tests the WriteFooter functions.", "name": "testWriteFooter", "signature": "def testWriteFooter(self)"}, {"docstring": "Tests the WriteEventBody function.", "name": "testWriteEventBody", "signature": "def testWriteEventBody(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_002117", "prompt": "Implement the Python class `JsonOutputTest` described below.\n\nClass description:\nTests for the JSON outputter.\n\nMethod signatures and docstrings:\n- def setUp(self): Sets up the objects needed for this test.\n- def testWriteHeader(self): Tests the WriteHeader functions.\n- def testWriteFooter(self): Tests the WriteFooter functions.\n- def testWriteEventBody(self): Tests the WriteEventBody function.", "prompted_full_text": "Implement the Python class `JsonOutputTest` described below.\n\nClass description:\nTests for the JSON outputter.\n\nMethod signatures and docstrings:\n- def setUp(self): Sets up the objects needed for this test.\n- def testWriteHeader(self): Tests the WriteHeader functions.\n- def testWriteFooter(self): Tests the WriteFooter functions.\n- def testWriteEventBody(self): Tests the WriteEventBody function.\n\n<|skeleton|>\nclass JsonOutputTest:\n \"\"\"Tests for the JSON outputter.\"\"\"\n\n def setUp(self):\n \"\"\"Sets up the objects needed for this test.\"\"\"\n <|body_0|>\n\n def testWriteHeader(self):\n \"\"\"Tests the WriteHeader functions.\"\"\"\n <|body_1|>\n\n def testWriteFooter(self):\n \"\"\"Tests the WriteFooter functions.\"\"\"\n <|body_2|>\n\n def testWriteEventBody(self):\n \"\"\"Tests the WriteEventBody function.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n output_mediator = self._CreateOutputMediator()\n self._output_writer = cli_test_lib.TestOutputWriter()\n self._output_module = json_out.JsonOutputModule(output_mediator, output_writer=self._output_writer)\n self._event_object = JsonTestEvent()\n<|end_body_0|>\n\n<|body_start_1|>\n expected_header = b'{'\n self._output_module.WriteHeader()\n header = self._output_writer.ReadOutput()\n self.assertEqual(header, expected_header)\n<|end_body_1|>\n\n<|body_start_2|>\n expected_footer = b'\"event_foo\": \"{}\"}'\n self._output_module.WriteFooter()\n footer = self._output_writer.ReadOutput()\n self.assertEqual(footer, expected_footer)\n<|end_body_2|>\n\n<|body_start_3|>\n self._output_module.WriteEventBody(self._event_object)\n expected_uuid = self._event_object.uuid.encode(u'utf-8')\n expected_timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')\n if sys.platform.startswith(u'win'):\n expected_os_location = u'C:\\\\{0:s}'.format(os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n else:\n expected_os_location = u'{0:s}{1:s}'.format(os.path.sep, os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.encode(u'utf-8')\n expected_event_body = b'\"event_0\": {{\"username\": \"root\", \"display_name\": \"OS: /var/log/syslog.1\", \"uuid\": \"{0:s}\", \"data_type\": \"test:l2tjson\", \"timestamp\": {1:d}, \"hostname\": \"ubuntu\", \"text\": \"Reporter PID: |8442| (pam_unix(cron:session): session\\\\n closed for user root)\", \"pathspec\": \"{{\\\\\"type_indicator\\\\\": \\\\\"TSK\\\\\", \\\\\"inode\\\\\": 15, \\\\\"location\\\\\": \\\\\"/var/log/syslog.1\\\\\", \\\\\"parent\\\\\": \\\\\"{{\\\\\\\\\\\\\"type_indicator\\\\\\\\\\\\\": \\\\\\\\\\\\\"OS\\\\\\\\\\\\\", \\\\\\\\\\\\\"location\\\\\\\\\\\\\": \\\\\\\\\\\\\"{2:s}\\\\\\\\\\\\\"}}\\\\\"}}\", \"inode\": 12345678}},\\n'.format(expected_uuid, expected_timestamp, expected_os_location)\n event_body = self._output_writer.ReadOutput()\n self.assertEqual(event_body, expected_event_body)\n<|end_body_3|>\n", "revision_id": "f525298bb1dd8f0fecd16d28acc443785ffe88c3", "skeleton": "<|skeleton|>\nclass JsonOutputTest:\n \"\"\"Tests for the JSON outputter.\"\"\"\n\n def setUp(self):\n \"\"\"Sets up the objects needed for this test.\"\"\"\n <|body_0|>\n\n def testWriteHeader(self):\n \"\"\"Tests the WriteHeader functions.\"\"\"\n <|body_1|>\n\n def testWriteFooter(self):\n \"\"\"Tests the WriteFooter functions.\"\"\"\n <|body_2|>\n\n def testWriteEventBody(self):\n \"\"\"Tests the WriteEventBody function.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class JsonOutputTest:\n \"\"\"Tests for the JSON outputter.\"\"\"\n\n def setUp(self):\n \"\"\"Sets up the objects needed for this test.\"\"\"\n output_mediator = self._CreateOutputMediator()\n self._output_writer = cli_test_lib.TestOutputWriter()\n self._output_module = json_out.JsonOutputModule(output_mediator, output_writer=self._output_writer)\n self._event_object = JsonTestEvent()\n\n def testWriteHeader(self):\n \"\"\"Tests the WriteHeader functions.\"\"\"\n expected_header = b'{'\n self._output_module.WriteHeader()\n header = self._output_writer.ReadOutput()\n self.assertEqual(header, expected_header)\n\n def testWriteFooter(self):\n \"\"\"Tests the WriteFooter functions.\"\"\"\n expected_footer = b'\"event_foo\": \"{}\"}'\n self._output_module.WriteFooter()\n footer = self._output_writer.ReadOutput()\n self.assertEqual(footer, expected_footer)\n\n def testWriteEventBody(self):\n \"\"\"Tests the WriteEventBody function.\"\"\"\n self._output_module.WriteEventBody(self._event_object)\n expected_uuid = self._event_object.uuid.encode(u'utf-8')\n expected_timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')\n if sys.platform.startswith(u'win'):\n expected_os_location = u'C:\\\\{0:s}'.format(os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n expected_os_location = expected_os_location.replace(u'\\\\', u'\\\\\\\\')\n else:\n expected_os_location = u'{0:s}{1:s}'.format(os.path.sep, os.path.join(u'cases', u'image.dd'))\n expected_os_location = expected_os_location.encode(u'utf-8')\n expected_event_body = b'\"event_0\": {{\"username\": \"root\", \"display_name\": \"OS: /var/log/syslog.1\", \"uuid\": \"{0:s}\", \"data_type\": \"test:l2tjson\", \"timestamp\": {1:d}, \"hostname\": \"ubuntu\", \"text\": \"Reporter PID: |8442| (pam_unix(cron:session): session\\\\n closed for user root)\", \"pathspec\": \"{{\\\\\"type_indicator\\\\\": \\\\\"TSK\\\\\", \\\\\"inode\\\\\": 15, \\\\\"location\\\\\": \\\\\"/var/log/syslog.1\\\\\", \\\\\"parent\\\\\": \\\\\"{{\\\\\\\\\\\\\"type_indicator\\\\\\\\\\\\\": \\\\\\\\\\\\\"OS\\\\\\\\\\\\\", \\\\\\\\\\\\\"location\\\\\\\\\\\\\": \\\\\\\\\\\\\"{2:s}\\\\\\\\\\\\\"}}\\\\\"}}\", \"inode\": 12345678}},\\n'.format(expected_uuid, expected_timestamp, expected_os_location)\n event_body = self._output_writer.ReadOutput()\n self.assertEqual(event_body, expected_event_body)\n", "source": "the_stack_v2_python_sparse", "source_path": "plaso/output/json_out_test.py", "source_repo": "cnbird1999/plaso", "split": "val", "star_events_count": 0}
{"blob_id": "baf805206c9f377705c1288d0e95895b89490361", "bodies": ["left = 0\nwhile left <= right:\n mid = left + (right - left >> 1)\n if nums[mid] == target:\n return mid\n if nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\nreturn -1", "n = len(arr)\nret = 0\ndp = [[0 for i in range(n)] for j in range(n)]\nfor i in range(1, n):\n for j in range(0, i):\n tmp = self.binary_search(arr, j - 1, arr[i] - arr[j])\n dp[i][j] = dp[i][j] = dp[j][tmp] + 1 if tmp != -1 else 2\n ret = max(ret, dp[i][j])\nreturn ret if ret > 2 else 0"], "bodies_text": "<|body_start_0|>\n left = 0\n while left <= right:\n mid = left + (right - left >> 1)\n if nums[mid] == target:\n return mid\n if nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n ret = 0\n dp = [[0 for i in range(n)] for j in range(n)]\n for i in range(1, n):\n for j in range(0, i):\n tmp = self.binary_search(arr, j - 1, arr[i] - arr[j])\n dp[i][j] = dp[i][j] = dp[j][tmp] + 1 if tmp != -1 else 2\n ret = max(ret, dp[i][j])\n return ret if ret > 2 else 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def binary_search(self, nums, right, target):\n \"\"\"二分查找\"\"\"\n <|body_0|>\n\n def lenLongestFibSubseq(self, arr):\n \"\"\":type arr: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n left = 0\n while left <= right:\n mid = left + (right - left >> 1)\n if nums[mid] == target:\n return mid\n if nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n ret = 0\n dp = [[0 for i in range(n)] for j in range(n)]\n for i in range(1, n):\n for j in range(0, i):\n tmp = self.binary_search(arr, j - 1, arr[i] - arr[j])\n dp[i][j] = dp[i][j] = dp[j][tmp] + 1 if tmp != -1 else 2\n ret = max(ret, dp[i][j])\n return ret if ret > 2 else 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000022", "length_bytes": 860, "license_type": "no_license", "methods": [{"docstring": "二分查找", "name": "binary_search", "signature": "def binary_search(self, nums, right, target)"}, {"docstring": ":type arr: List[int] :rtype: int", "name": "lenLongestFibSubseq", "signature": "def lenLongestFibSubseq(self, arr)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_011846", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def binary_search(self, nums, right, target): 二分查找\n- def lenLongestFibSubseq(self, arr): :type arr: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def binary_search(self, nums, right, target): 二分查找\n- def lenLongestFibSubseq(self, arr): :type arr: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def binary_search(self, nums, right, target):\n \"\"\"二分查找\"\"\"\n <|body_0|>\n\n def lenLongestFibSubseq(self, arr):\n \"\"\":type arr: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n left = 0\n while left <= right:\n mid = left + (right - left >> 1)\n if nums[mid] == target:\n return mid\n if nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n ret = 0\n dp = [[0 for i in range(n)] for j in range(n)]\n for i in range(1, n):\n for j in range(0, i):\n tmp = self.binary_search(arr, j - 1, arr[i] - arr[j])\n dp[i][j] = dp[i][j] = dp[j][tmp] + 1 if tmp != -1 else 2\n ret = max(ret, dp[i][j])\n return ret if ret > 2 else 0\n<|end_body_1|>\n", "revision_id": "4b30dd6a3f683c8dc71a85f7b947232613a28dc1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def binary_search(self, nums, right, target):\n \"\"\"二分查找\"\"\"\n <|body_0|>\n\n def lenLongestFibSubseq(self, arr):\n \"\"\":type arr: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def binary_search(self, nums, right, target):\n \"\"\"二分查找\"\"\"\n left = 0\n while left <= right:\n mid = left + (right - left >> 1)\n if nums[mid] == target:\n return mid\n if nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n return -1\n\n def lenLongestFibSubseq(self, arr):\n \"\"\":type arr: List[int] :rtype: int\"\"\"\n n = len(arr)\n ret = 0\n dp = [[0 for i in range(n)] for j in range(n)]\n for i in range(1, n):\n for j in range(0, i):\n tmp = self.binary_search(arr, j - 1, arr[i] - arr[j])\n dp[i][j] = dp[i][j] = dp[j][tmp] + 1 if tmp != -1 else 2\n ret = max(ret, dp[i][j])\n return ret if ret > 2 else 0\n", "source": "the_stack_v2_python_sparse", "source_path": "最长斐波那契数列__时间超时.py", "source_repo": "saintifly/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "c55960c80f23835a9bafb2dce4d22c3ae89bd214", "bodies": ["request_payload = {}\nc = commons.parse_coordinates(coordinates).transform_to('icrs')\nra_dec_str = str(c.ra.hour) + ' ' + str(c.dec.degree)\nrequest_payload['RA'] = ra_dec_str\nrequest_payload['Equinox'] = 'J2000'\nrequest_payload['ImageSize'] = coord.Angle(image_size).arcmin\nrequest_payload['ImageType'] = 'FITS File'\nrequest_payload['MaxImSize'] = self.maximsize if maximsize is None else maximsize\nreturn request_payload", "response = self.get_images_async(coordinates, image_size=image_size, get_query_payload=get_query_payload)\nif get_query_payload:\n return response\nS = BytesIO(response.content)\ntry:\n return fits.open(S, ignore_missing_end=True)\nexcept OSError:\n raise InvalidQueryError(response.content)", "request_payload = self._args_to_payload(coordinates, image_size=image_size)\nif get_query_payload:\n return request_payload\nresponse = self._request('POST', url=self.URL, data=request_payload, timeout=self.TIMEOUT)\nreturn response"], "bodies_text": "<|body_start_0|>\n request_payload = {}\n c = commons.parse_coordinates(coordinates).transform_to('icrs')\n ra_dec_str = str(c.ra.hour) + ' ' + str(c.dec.degree)\n request_payload['RA'] = ra_dec_str\n request_payload['Equinox'] = 'J2000'\n request_payload['ImageSize'] = coord.Angle(image_size).arcmin\n request_payload['ImageType'] = 'FITS File'\n request_payload['MaxImSize'] = self.maximsize if maximsize is None else maximsize\n return request_payload\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.get_images_async(coordinates, image_size=image_size, get_query_payload=get_query_payload)\n if get_query_payload:\n return response\n S = BytesIO(response.content)\n try:\n return fits.open(S, ignore_missing_end=True)\n except OSError:\n raise InvalidQueryError(response.content)\n<|end_body_1|>\n\n<|body_start_2|>\n request_payload = self._args_to_payload(coordinates, image_size=image_size)\n if get_query_payload:\n return request_payload\n response = self._request('POST', url=self.URL, data=request_payload, timeout=self.TIMEOUT)\n return response\n<|end_body_2|>\n", "class_docstring": "", "class_name": "FirstClass", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FirstClass:\n\n def _args_to_payload(self, coordinates, *, image_size=1 * u.arcmin, maximsize=None):\n \"\"\"Fetches image cutouts from FIRST survey. Parameters ---------- coordinates : str or `astropy.coordinates` object The target around which to search. It may be specified as a string in which case it is resolved using online services or as the appropriate `astropy.coordinates` object. ICRS coordinates may also be entered as strings as specified in the `astropy.coordinates` module. image_size : str or `~astropy.units.Quantity` object, optional The string must be parsable by `astropy.coordinates.Angle`. The appropriate `~astropy.units.Quantity` object from `astropy.units` may also be used. Specifies the symmetric size of the image. Defaults to 1 arcmin. maximsize : int, optional Specify the maxim\"\"\"\n <|body_0|>\n\n def get_images(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False):\n \"\"\"get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- A list of `~astropy.io.fits.HDUList` objects\"\"\"\n <|body_1|>\n\n def get_images_async(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False):\n \"\"\"get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- response : `requests.Response` The HTTP response returned from the service\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n request_payload = {}\n c = commons.parse_coordinates(coordinates).transform_to('icrs')\n ra_dec_str = str(c.ra.hour) + ' ' + str(c.dec.degree)\n request_payload['RA'] = ra_dec_str\n request_payload['Equinox'] = 'J2000'\n request_payload['ImageSize'] = coord.Angle(image_size).arcmin\n request_payload['ImageType'] = 'FITS File'\n request_payload['MaxImSize'] = self.maximsize if maximsize is None else maximsize\n return request_payload\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.get_images_async(coordinates, image_size=image_size, get_query_payload=get_query_payload)\n if get_query_payload:\n return response\n S = BytesIO(response.content)\n try:\n return fits.open(S, ignore_missing_end=True)\n except OSError:\n raise InvalidQueryError(response.content)\n<|end_body_1|>\n\n<|body_start_2|>\n request_payload = self._args_to_payload(coordinates, image_size=image_size)\n if get_query_payload:\n return request_payload\n response = self._request('POST', url=self.URL, data=request_payload, timeout=self.TIMEOUT)\n return response\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000023", "length_bytes": 3737, "license_type": "permissive", "methods": [{"docstring": "Fetches image cutouts from FIRST survey. Parameters ---------- coordinates : str or `astropy.coordinates` object The target around which to search. It may be specified as a string in which case it is resolved using online services or as the appropriate `astropy.coordinates` object. ICRS coordinates may also be entered as strings as specified in the `astropy.coordinates` module. image_size : str or `~astropy.units.Quantity` object, optional The string must be parsable by `astropy.coordinates.Angle`. The appropriate `~astropy.units.Quantity` object from `astropy.units` may also be used. Specifies the symmetric size of the image. Defaults to 1 arcmin. maximsize : int, optional Specify the maxim", "name": "_args_to_payload", "signature": "def _args_to_payload(self, coordinates, *, image_size=1 * u.arcmin, maximsize=None)"}, {"docstring": "get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- A list of `~astropy.io.fits.HDUList` objects", "name": "get_images", "signature": "def get_images(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False)"}, {"docstring": "get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- response : `requests.Response` The HTTP response returned from the service", "name": "get_images_async", "signature": "def get_images_async(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_029817", "prompt": "Implement the Python class `FirstClass` described below.\n\nClass description:\nImplement the FirstClass class.\n\nMethod signatures and docstrings:\n- def _args_to_payload(self, coordinates, *, image_size=1 * u.arcmin, maximsize=None): Fetches image cutouts from FIRST survey. Parameters ---------- coordinates : str or `astropy.coordinates` object The target around which to search. It may be specified as a string in which case it is resolved using online services or as the appropriate `astropy.coordinates` object. ICRS coordinates may also be entered as strings as specified in the `astropy.coordinates` module. image_size : str or `~astropy.units.Quantity` object, optional The string must be parsable by `astropy.coordinates.Angle`. The appropriate `~astropy.units.Quantity` object from `astropy.units` may also be used. Specifies the symmetric size of the image. Defaults to 1 arcmin. maximsize : int, optional Specify the maxim\n- def get_images(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False): get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- A list of `~astropy.io.fits.HDUList` objects\n- def get_images_async(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False): get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- response : `requests.Response` The HTTP response returned from the service", "prompted_full_text": "Implement the Python class `FirstClass` described below.\n\nClass description:\nImplement the FirstClass class.\n\nMethod signatures and docstrings:\n- def _args_to_payload(self, coordinates, *, image_size=1 * u.arcmin, maximsize=None): Fetches image cutouts from FIRST survey. Parameters ---------- coordinates : str or `astropy.coordinates` object The target around which to search. It may be specified as a string in which case it is resolved using online services or as the appropriate `astropy.coordinates` object. ICRS coordinates may also be entered as strings as specified in the `astropy.coordinates` module. image_size : str or `~astropy.units.Quantity` object, optional The string must be parsable by `astropy.coordinates.Angle`. The appropriate `~astropy.units.Quantity` object from `astropy.units` may also be used. Specifies the symmetric size of the image. Defaults to 1 arcmin. maximsize : int, optional Specify the maxim\n- def get_images(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False): get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- A list of `~astropy.io.fits.HDUList` objects\n- def get_images_async(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False): get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- response : `requests.Response` The HTTP response returned from the service\n\n<|skeleton|>\nclass FirstClass:\n\n def _args_to_payload(self, coordinates, *, image_size=1 * u.arcmin, maximsize=None):\n \"\"\"Fetches image cutouts from FIRST survey. Parameters ---------- coordinates : str or `astropy.coordinates` object The target around which to search. It may be specified as a string in which case it is resolved using online services or as the appropriate `astropy.coordinates` object. ICRS coordinates may also be entered as strings as specified in the `astropy.coordinates` module. image_size : str or `~astropy.units.Quantity` object, optional The string must be parsable by `astropy.coordinates.Angle`. The appropriate `~astropy.units.Quantity` object from `astropy.units` may also be used. Specifies the symmetric size of the image. Defaults to 1 arcmin. maximsize : int, optional Specify the maxim\"\"\"\n <|body_0|>\n\n def get_images(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False):\n \"\"\"get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- A list of `~astropy.io.fits.HDUList` objects\"\"\"\n <|body_1|>\n\n def get_images_async(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False):\n \"\"\"get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- response : `requests.Response` The HTTP response returned from the service\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n request_payload = {}\n c = commons.parse_coordinates(coordinates).transform_to('icrs')\n ra_dec_str = str(c.ra.hour) + ' ' + str(c.dec.degree)\n request_payload['RA'] = ra_dec_str\n request_payload['Equinox'] = 'J2000'\n request_payload['ImageSize'] = coord.Angle(image_size).arcmin\n request_payload['ImageType'] = 'FITS File'\n request_payload['MaxImSize'] = self.maximsize if maximsize is None else maximsize\n return request_payload\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.get_images_async(coordinates, image_size=image_size, get_query_payload=get_query_payload)\n if get_query_payload:\n return response\n S = BytesIO(response.content)\n try:\n return fits.open(S, ignore_missing_end=True)\n except OSError:\n raise InvalidQueryError(response.content)\n<|end_body_1|>\n\n<|body_start_2|>\n request_payload = self._args_to_payload(coordinates, image_size=image_size)\n if get_query_payload:\n return request_payload\n response = self._request('POST', url=self.URL, data=request_payload, timeout=self.TIMEOUT)\n return response\n<|end_body_2|>\n", "revision_id": "51316d7417d7daf01a8b29d1df99037b9227c2bc", "skeleton": "<|skeleton|>\nclass FirstClass:\n\n def _args_to_payload(self, coordinates, *, image_size=1 * u.arcmin, maximsize=None):\n \"\"\"Fetches image cutouts from FIRST survey. Parameters ---------- coordinates : str or `astropy.coordinates` object The target around which to search. It may be specified as a string in which case it is resolved using online services or as the appropriate `astropy.coordinates` object. ICRS coordinates may also be entered as strings as specified in the `astropy.coordinates` module. image_size : str or `~astropy.units.Quantity` object, optional The string must be parsable by `astropy.coordinates.Angle`. The appropriate `~astropy.units.Quantity` object from `astropy.units` may also be used. Specifies the symmetric size of the image. Defaults to 1 arcmin. maximsize : int, optional Specify the maxim\"\"\"\n <|body_0|>\n\n def get_images(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False):\n \"\"\"get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- A list of `~astropy.io.fits.HDUList` objects\"\"\"\n <|body_1|>\n\n def get_images_async(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False):\n \"\"\"get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- response : `requests.Response` The HTTP response returned from the service\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FirstClass:\n def _args_to_payload(self, coordinates, *, image_size=1 * u.arcmin, maximsize=None):\n \"\"\"Fetches image cutouts from FIRST survey. Parameters ---------- coordinates : str or `astropy.coordinates` object The target around which to search. It may be specified as a string in which case it is resolved using online services or as the appropriate `astropy.coordinates` object. ICRS coordinates may also be entered as strings as specified in the `astropy.coordinates` module. image_size : str or `~astropy.units.Quantity` object, optional The string must be parsable by `astropy.coordinates.Angle`. The appropriate `~astropy.units.Quantity` object from `astropy.units` may also be used. Specifies the symmetric size of the image. Defaults to 1 arcmin. maximsize : int, optional Specify the maxim\"\"\"\n request_payload = {}\n c = commons.parse_coordinates(coordinates).transform_to('icrs')\n ra_dec_str = str(c.ra.hour) + ' ' + str(c.dec.degree)\n request_payload['RA'] = ra_dec_str\n request_payload['Equinox'] = 'J2000'\n request_payload['ImageSize'] = coord.Angle(image_size).arcmin\n request_payload['ImageType'] = 'FITS File'\n request_payload['MaxImSize'] = self.maximsize if maximsize is None else maximsize\n return request_payload\n\n def get_images(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False):\n \"\"\"get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- A list of `~astropy.io.fits.HDUList` objects\"\"\"\n response = self.get_images_async(coordinates, image_size=image_size, get_query_payload=get_query_payload)\n if get_query_payload:\n return response\n S = BytesIO(response.content)\n try:\n return fits.open(S, ignore_missing_end=True)\n except OSError:\n raise InvalidQueryError(response.content)\n\n def get_images_async(self, coordinates, *, image_size=1 * u.arcmin, get_query_payload=False):\n \"\"\"get_query_payload : bool, optional if set to `True` then returns the dictionary sent as the HTTP request. Defaults to `False` Returns ------- response : `requests.Response` The HTTP response returned from the service\"\"\"\n request_payload = self._args_to_payload(coordinates, image_size=image_size)\n if get_query_payload:\n return request_payload\n response = self._request('POST', url=self.URL, data=request_payload, timeout=self.TIMEOUT)\n return response\n", "source": "the_stack_v2_python_sparse", "source_path": "astroquery/image_cutouts/first/core.py", "source_repo": "astropy/astroquery", "split": "val", "star_events_count": 636}
{"blob_id": "b1533059914fe35fe2e2945004707eceaf61e077", "bodies": ["super().__init__(*args, **kwargs)\nself._add_function('T')\nself._add_function('q')\nself._add_function('a')\nself._add_function('L')\nself.bcs = []\nself._has_boundary = None", "dispatcher = {'dirichlet': self.add_dirichlet, 'robin': self.add_robin}\nfor name, func in dispatcher.items():\n if name.startswith(boundary_type):\n func(**kwargs)\n break\nelse:\n raise ValueError('{} boundaries are not supported. Supported boundaries are: {}.'.format(boundary_type, ', '.join(dispatcher.keys())))", "if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\nnorm = FacetNormal(self.mesh)\nif isinstance(g, (float, int)):\n g = Constant(g)\nintegrand = -1 * self.v * dot(self.q, norm)\nif surface == 'all':\n dbc = DirichletBC(V=self.V, g=g, sub_domain='on_boundary')\n self.a += integrand * ds\nelse:\n dbc = DirichletBC(V=self.V, g=g, sub_domain=surface)\n try:\n self.a += sum((integrand * ds(s) for s in surface))\n except TypeError:\n self.a += integrand * ds(surface)\nself.bcs.append(dbc)\nself._has_boundary = True", "if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\nif isinstance(g, (float, int)):\n g = Constant(g)\nif isinstance(alpha, (float, int)):\n alpha = Constant(alpha)\na_integrand = alpha * self.v * self.T\nL_integrand = self.v * g\nif surface == 'all':\n self.a += a_integrand * ds\n self.L += L_integrand * ds\nelse:\n try:\n self.a += sum((a_integrand * ds(s) for s in surface))\n self.L += sum((L_integrand * ds(s) for s in surface))\n except TypeError:\n self.a += a_integrand * ds(surface)\n self.L += L_integrand * ds(surface)\nself._has_boundary = True", "if self._has_boundary is True:\n raise AttributeError('Cannot set no boundary after boundaries have been set')\nnorm = FacetNormal(self.mesh)\nself.a += -1 * self.v * dot(self.q, norm) * ds\nself._has_boundary = False"], "bodies_text": "<|body_start_0|>\n super().__init__(*args, **kwargs)\n self._add_function('T')\n self._add_function('q')\n self._add_function('a')\n self._add_function('L')\n self.bcs = []\n self._has_boundary = None\n<|end_body_0|>\n\n<|body_start_1|>\n dispatcher = {'dirichlet': self.add_dirichlet, 'robin': self.add_robin}\n for name, func in dispatcher.items():\n if name.startswith(boundary_type):\n func(**kwargs)\n break\n else:\n raise ValueError('{} boundaries are not supported. Supported boundaries are: {}.'.format(boundary_type, ', '.join(dispatcher.keys())))\n<|end_body_1|>\n\n<|body_start_2|>\n if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\n norm = FacetNormal(self.mesh)\n if isinstance(g, (float, int)):\n g = Constant(g)\n integrand = -1 * self.v * dot(self.q, norm)\n if surface == 'all':\n dbc = DirichletBC(V=self.V, g=g, sub_domain='on_boundary')\n self.a += integrand * ds\n else:\n dbc = DirichletBC(V=self.V, g=g, sub_domain=surface)\n try:\n self.a += sum((integrand * ds(s) for s in surface))\n except TypeError:\n self.a += integrand * ds(surface)\n self.bcs.append(dbc)\n self._has_boundary = True\n<|end_body_2|>\n\n<|body_start_3|>\n if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\n if isinstance(g, (float, int)):\n g = Constant(g)\n if isinstance(alpha, (float, int)):\n alpha = Constant(alpha)\n a_integrand = alpha * self.v * self.T\n L_integrand = self.v * g\n if surface == 'all':\n self.a += a_integrand * ds\n self.L += L_integrand * ds\n else:\n try:\n self.a += sum((a_integrand * ds(s) for s in surface))\n self.L += sum((L_integrand * ds(s) for s in surface))\n except TypeError:\n self.a += a_integrand * ds(surface)\n self.L += L_integrand * ds(surface)\n self._has_boundary = True\n<|end_body_3|>\n\n<|body_start_4|>\n if self._has_boundary is True:\n raise AttributeError('Cannot set no boundary after boundaries have been set')\n norm = FacetNormal(self.mesh)\n self.a += -1 * self.v * dot(self.q, norm) * ds\n self._has_boundary = False\n<|end_body_4|>\n", "class_docstring": "Class to extend Problems as a mixin. This mixin extends the problem to allow boundary values by adding the `K dot(grad(T), n) ds` term. To use, define a new class with this in the inheritance chain. i.e:: class NewProblem(BoundaryMixin, Problem): pass Required Attributes (for mixin): mesh (firedrake.Mesh): The mesh that the problem is defined for. V (firedrake.FunctionSpace): A function space on which functions will be defined. v (firedrake.Function): The test function for the problem. Attributes: T (firedrake.Function): The trial function for the problem. q (firedrake.Function): A function holding the heat flux. a (firedrake.Function): The section containing the combination of terms involvi", "class_name": "BoundaryMixin", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BoundaryMixin:\n \"\"\"Class to extend Problems as a mixin. This mixin extends the problem to allow boundary values by adding the `K dot(grad(T), n) ds` term. To use, define a new class with this in the inheritance chain. i.e:: class NewProblem(BoundaryMixin, Problem): pass Required Attributes (for mixin): mesh (firedrake.Mesh): The mesh that the problem is defined for. V (firedrake.FunctionSpace): A function space on which functions will be defined. v (firedrake.Function): The test function for the problem. Attributes: T (firedrake.Function): The trial function for the problem. q (firedrake.Function): A function holding the heat flux. a (firedrake.Function): The section containing the combination of terms involvi\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialiser for BoundaryMixin. Boundaries are added in the specific funcs. There is no way to remove a boundary. Also creates empty bcs list.\"\"\"\n <|body_0|>\n\n def add_boundary(self, boundary_type, **kwargs):\n \"\"\"Generic method for adding boundaries to the problem. To view documentation for boundary types, see the appropriate function. Args: boundary_type (string): The name of the boundary condition: - \"dirichlet\" - \"robin\" - \"neuman\" (in progress) These can be given in full, or as shortened names (e.g. \"dir\") Raises: ValueError: When the boundary is not in the supported names list.\"\"\"\n <|body_1|>\n\n def add_dirichlet(self, g, surface):\n \"\"\"Adds dirichlet boundary conditions to the problem. Args: g (Function, int, or float): The function to apply on the boundary. surface (int or list of int): The index of the boundary to apply the condition to.\"\"\"\n <|body_2|>\n\n def add_robin(self, alpha, g, surface):\n \"\"\"Adds robin boundary conditions to the problem. The Robin condition is a mixed codition and is defined by: dot(grad(T), n) = g - alpha*T Args: g (Function, int, or float): The function to apply on the boundary as per the above formula. alpha (Function, int, or float): The function to apply on the boundary as per the above formula. surface (int or list of int): The index of the boundary to apply the condition to.\"\"\"\n <|body_3|>\n\n def set_no_boundary(self):\n \"\"\"Declare that the problem will have no boundaries. Raises: AttributeError: If boundaries have already been added.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self._add_function('T')\n self._add_function('q')\n self._add_function('a')\n self._add_function('L')\n self.bcs = []\n self._has_boundary = None\n<|end_body_0|>\n\n<|body_start_1|>\n dispatcher = {'dirichlet': self.add_dirichlet, 'robin': self.add_robin}\n for name, func in dispatcher.items():\n if name.startswith(boundary_type):\n func(**kwargs)\n break\n else:\n raise ValueError('{} boundaries are not supported. Supported boundaries are: {}.'.format(boundary_type, ', '.join(dispatcher.keys())))\n<|end_body_1|>\n\n<|body_start_2|>\n if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\n norm = FacetNormal(self.mesh)\n if isinstance(g, (float, int)):\n g = Constant(g)\n integrand = -1 * self.v * dot(self.q, norm)\n if surface == 'all':\n dbc = DirichletBC(V=self.V, g=g, sub_domain='on_boundary')\n self.a += integrand * ds\n else:\n dbc = DirichletBC(V=self.V, g=g, sub_domain=surface)\n try:\n self.a += sum((integrand * ds(s) for s in surface))\n except TypeError:\n self.a += integrand * ds(surface)\n self.bcs.append(dbc)\n self._has_boundary = True\n<|end_body_2|>\n\n<|body_start_3|>\n if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\n if isinstance(g, (float, int)):\n g = Constant(g)\n if isinstance(alpha, (float, int)):\n alpha = Constant(alpha)\n a_integrand = alpha * self.v * self.T\n L_integrand = self.v * g\n if surface == 'all':\n self.a += a_integrand * ds\n self.L += L_integrand * ds\n else:\n try:\n self.a += sum((a_integrand * ds(s) for s in surface))\n self.L += sum((L_integrand * ds(s) for s in surface))\n except TypeError:\n self.a += a_integrand * ds(surface)\n self.L += L_integrand * ds(surface)\n self._has_boundary = True\n<|end_body_3|>\n\n<|body_start_4|>\n if self._has_boundary is True:\n raise AttributeError('Cannot set no boundary after boundaries have been set')\n norm = FacetNormal(self.mesh)\n self.a += -1 * self.v * dot(self.q, norm) * ds\n self._has_boundary = False\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000024", "length_bytes": 6330, "license_type": "permissive", "methods": [{"docstring": "Initialiser for BoundaryMixin. Boundaries are added in the specific funcs. There is no way to remove a boundary. Also creates empty bcs list.", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Generic method for adding boundaries to the problem. To view documentation for boundary types, see the appropriate function. Args: boundary_type (string): The name of the boundary condition: - \"dirichlet\" - \"robin\" - \"neuman\" (in progress) These can be given in full, or as shortened names (e.g. \"dir\") Raises: ValueError: When the boundary is not in the supported names list.", "name": "add_boundary", "signature": "def add_boundary(self, boundary_type, **kwargs)"}, {"docstring": "Adds dirichlet boundary conditions to the problem. Args: g (Function, int, or float): The function to apply on the boundary. surface (int or list of int): The index of the boundary to apply the condition to.", "name": "add_dirichlet", "signature": "def add_dirichlet(self, g, surface)"}, {"docstring": "Adds robin boundary conditions to the problem. The Robin condition is a mixed codition and is defined by: dot(grad(T), n) = g - alpha*T Args: g (Function, int, or float): The function to apply on the boundary as per the above formula. alpha (Function, int, or float): The function to apply on the boundary as per the above formula. surface (int or list of int): The index of the boundary to apply the condition to.", "name": "add_robin", "signature": "def add_robin(self, alpha, g, surface)"}, {"docstring": "Declare that the problem will have no boundaries. Raises: AttributeError: If boundaries have already been added.", "name": "set_no_boundary", "signature": "def set_no_boundary(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_037667", "prompt": "Implement the Python class `BoundaryMixin` described below.\n\nClass description:\nClass to extend Problems as a mixin. This mixin extends the problem to allow boundary values by adding the `K dot(grad(T), n) ds` term. To use, define a new class with this in the inheritance chain. i.e:: class NewProblem(BoundaryMixin, Problem): pass Required Attributes (for mixin): mesh (firedrake.Mesh): The mesh that the problem is defined for. V (firedrake.FunctionSpace): A function space on which functions will be defined. v (firedrake.Function): The test function for the problem. Attributes: T (firedrake.Function): The trial function for the problem. q (firedrake.Function): A function holding the heat flux. a (firedrake.Function): The section containing the combination of terms involvi\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialiser for BoundaryMixin. Boundaries are added in the specific funcs. There is no way to remove a boundary. Also creates empty bcs list.\n- def add_boundary(self, boundary_type, **kwargs): Generic method for adding boundaries to the problem. To view documentation for boundary types, see the appropriate function. Args: boundary_type (string): The name of the boundary condition: - \"dirichlet\" - \"robin\" - \"neuman\" (in progress) These can be given in full, or as shortened names (e.g. \"dir\") Raises: ValueError: When the boundary is not in the supported names list.\n- def add_dirichlet(self, g, surface): Adds dirichlet boundary conditions to the problem. Args: g (Function, int, or float): The function to apply on the boundary. surface (int or list of int): The index of the boundary to apply the condition to.\n- def add_robin(self, alpha, g, surface): Adds robin boundary conditions to the problem. The Robin condition is a mixed codition and is defined by: dot(grad(T), n) = g - alpha*T Args: g (Function, int, or float): The function to apply on the boundary as per the above formula. alpha (Function, int, or float): The function to apply on the boundary as per the above formula. surface (int or list of int): The index of the boundary to apply the condition to.\n- def set_no_boundary(self): Declare that the problem will have no boundaries. Raises: AttributeError: If boundaries have already been added.", "prompted_full_text": "Implement the Python class `BoundaryMixin` described below.\n\nClass description:\nClass to extend Problems as a mixin. This mixin extends the problem to allow boundary values by adding the `K dot(grad(T), n) ds` term. To use, define a new class with this in the inheritance chain. i.e:: class NewProblem(BoundaryMixin, Problem): pass Required Attributes (for mixin): mesh (firedrake.Mesh): The mesh that the problem is defined for. V (firedrake.FunctionSpace): A function space on which functions will be defined. v (firedrake.Function): The test function for the problem. Attributes: T (firedrake.Function): The trial function for the problem. q (firedrake.Function): A function holding the heat flux. a (firedrake.Function): The section containing the combination of terms involvi\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialiser for BoundaryMixin. Boundaries are added in the specific funcs. There is no way to remove a boundary. Also creates empty bcs list.\n- def add_boundary(self, boundary_type, **kwargs): Generic method for adding boundaries to the problem. To view documentation for boundary types, see the appropriate function. Args: boundary_type (string): The name of the boundary condition: - \"dirichlet\" - \"robin\" - \"neuman\" (in progress) These can be given in full, or as shortened names (e.g. \"dir\") Raises: ValueError: When the boundary is not in the supported names list.\n- def add_dirichlet(self, g, surface): Adds dirichlet boundary conditions to the problem. Args: g (Function, int, or float): The function to apply on the boundary. surface (int or list of int): The index of the boundary to apply the condition to.\n- def add_robin(self, alpha, g, surface): Adds robin boundary conditions to the problem. The Robin condition is a mixed codition and is defined by: dot(grad(T), n) = g - alpha*T Args: g (Function, int, or float): The function to apply on the boundary as per the above formula. alpha (Function, int, or float): The function to apply on the boundary as per the above formula. surface (int or list of int): The index of the boundary to apply the condition to.\n- def set_no_boundary(self): Declare that the problem will have no boundaries. Raises: AttributeError: If boundaries have already been added.\n\n<|skeleton|>\nclass BoundaryMixin:\n \"\"\"Class to extend Problems as a mixin. This mixin extends the problem to allow boundary values by adding the `K dot(grad(T), n) ds` term. To use, define a new class with this in the inheritance chain. i.e:: class NewProblem(BoundaryMixin, Problem): pass Required Attributes (for mixin): mesh (firedrake.Mesh): The mesh that the problem is defined for. V (firedrake.FunctionSpace): A function space on which functions will be defined. v (firedrake.Function): The test function for the problem. Attributes: T (firedrake.Function): The trial function for the problem. q (firedrake.Function): A function holding the heat flux. a (firedrake.Function): The section containing the combination of terms involvi\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialiser for BoundaryMixin. Boundaries are added in the specific funcs. There is no way to remove a boundary. Also creates empty bcs list.\"\"\"\n <|body_0|>\n\n def add_boundary(self, boundary_type, **kwargs):\n \"\"\"Generic method for adding boundaries to the problem. To view documentation for boundary types, see the appropriate function. Args: boundary_type (string): The name of the boundary condition: - \"dirichlet\" - \"robin\" - \"neuman\" (in progress) These can be given in full, or as shortened names (e.g. \"dir\") Raises: ValueError: When the boundary is not in the supported names list.\"\"\"\n <|body_1|>\n\n def add_dirichlet(self, g, surface):\n \"\"\"Adds dirichlet boundary conditions to the problem. Args: g (Function, int, or float): The function to apply on the boundary. surface (int or list of int): The index of the boundary to apply the condition to.\"\"\"\n <|body_2|>\n\n def add_robin(self, alpha, g, surface):\n \"\"\"Adds robin boundary conditions to the problem. The Robin condition is a mixed codition and is defined by: dot(grad(T), n) = g - alpha*T Args: g (Function, int, or float): The function to apply on the boundary as per the above formula. alpha (Function, int, or float): The function to apply on the boundary as per the above formula. surface (int or list of int): The index of the boundary to apply the condition to.\"\"\"\n <|body_3|>\n\n def set_no_boundary(self):\n \"\"\"Declare that the problem will have no boundaries. Raises: AttributeError: If boundaries have already been added.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self._add_function('T')\n self._add_function('q')\n self._add_function('a')\n self._add_function('L')\n self.bcs = []\n self._has_boundary = None\n<|end_body_0|>\n\n<|body_start_1|>\n dispatcher = {'dirichlet': self.add_dirichlet, 'robin': self.add_robin}\n for name, func in dispatcher.items():\n if name.startswith(boundary_type):\n func(**kwargs)\n break\n else:\n raise ValueError('{} boundaries are not supported. Supported boundaries are: {}.'.format(boundary_type, ', '.join(dispatcher.keys())))\n<|end_body_1|>\n\n<|body_start_2|>\n if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\n norm = FacetNormal(self.mesh)\n if isinstance(g, (float, int)):\n g = Constant(g)\n integrand = -1 * self.v * dot(self.q, norm)\n if surface == 'all':\n dbc = DirichletBC(V=self.V, g=g, sub_domain='on_boundary')\n self.a += integrand * ds\n else:\n dbc = DirichletBC(V=self.V, g=g, sub_domain=surface)\n try:\n self.a += sum((integrand * ds(s) for s in surface))\n except TypeError:\n self.a += integrand * ds(surface)\n self.bcs.append(dbc)\n self._has_boundary = True\n<|end_body_2|>\n\n<|body_start_3|>\n if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\n if isinstance(g, (float, int)):\n g = Constant(g)\n if isinstance(alpha, (float, int)):\n alpha = Constant(alpha)\n a_integrand = alpha * self.v * self.T\n L_integrand = self.v * g\n if surface == 'all':\n self.a += a_integrand * ds\n self.L += L_integrand * ds\n else:\n try:\n self.a += sum((a_integrand * ds(s) for s in surface))\n self.L += sum((L_integrand * ds(s) for s in surface))\n except TypeError:\n self.a += a_integrand * ds(surface)\n self.L += L_integrand * ds(surface)\n self._has_boundary = True\n<|end_body_3|>\n\n<|body_start_4|>\n if self._has_boundary is True:\n raise AttributeError('Cannot set no boundary after boundaries have been set')\n norm = FacetNormal(self.mesh)\n self.a += -1 * self.v * dot(self.q, norm) * ds\n self._has_boundary = False\n<|end_body_4|>\n", "revision_id": "cc4e7f7b9abb498893aaa05e2b25416f513905b0", "skeleton": "<|skeleton|>\nclass BoundaryMixin:\n \"\"\"Class to extend Problems as a mixin. This mixin extends the problem to allow boundary values by adding the `K dot(grad(T), n) ds` term. To use, define a new class with this in the inheritance chain. i.e:: class NewProblem(BoundaryMixin, Problem): pass Required Attributes (for mixin): mesh (firedrake.Mesh): The mesh that the problem is defined for. V (firedrake.FunctionSpace): A function space on which functions will be defined. v (firedrake.Function): The test function for the problem. Attributes: T (firedrake.Function): The trial function for the problem. q (firedrake.Function): A function holding the heat flux. a (firedrake.Function): The section containing the combination of terms involvi\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialiser for BoundaryMixin. Boundaries are added in the specific funcs. There is no way to remove a boundary. Also creates empty bcs list.\"\"\"\n <|body_0|>\n\n def add_boundary(self, boundary_type, **kwargs):\n \"\"\"Generic method for adding boundaries to the problem. To view documentation for boundary types, see the appropriate function. Args: boundary_type (string): The name of the boundary condition: - \"dirichlet\" - \"robin\" - \"neuman\" (in progress) These can be given in full, or as shortened names (e.g. \"dir\") Raises: ValueError: When the boundary is not in the supported names list.\"\"\"\n <|body_1|>\n\n def add_dirichlet(self, g, surface):\n \"\"\"Adds dirichlet boundary conditions to the problem. Args: g (Function, int, or float): The function to apply on the boundary. surface (int or list of int): The index of the boundary to apply the condition to.\"\"\"\n <|body_2|>\n\n def add_robin(self, alpha, g, surface):\n \"\"\"Adds robin boundary conditions to the problem. The Robin condition is a mixed codition and is defined by: dot(grad(T), n) = g - alpha*T Args: g (Function, int, or float): The function to apply on the boundary as per the above formula. alpha (Function, int, or float): The function to apply on the boundary as per the above formula. surface (int or list of int): The index of the boundary to apply the condition to.\"\"\"\n <|body_3|>\n\n def set_no_boundary(self):\n \"\"\"Declare that the problem will have no boundaries. Raises: AttributeError: If boundaries have already been added.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BoundaryMixin:\n \"\"\"Class to extend Problems as a mixin. This mixin extends the problem to allow boundary values by adding the `K dot(grad(T), n) ds` term. To use, define a new class with this in the inheritance chain. i.e:: class NewProblem(BoundaryMixin, Problem): pass Required Attributes (for mixin): mesh (firedrake.Mesh): The mesh that the problem is defined for. V (firedrake.FunctionSpace): A function space on which functions will be defined. v (firedrake.Function): The test function for the problem. Attributes: T (firedrake.Function): The trial function for the problem. q (firedrake.Function): A function holding the heat flux. a (firedrake.Function): The section containing the combination of terms involvi\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialiser for BoundaryMixin. Boundaries are added in the specific funcs. There is no way to remove a boundary. Also creates empty bcs list.\"\"\"\n super().__init__(*args, **kwargs)\n self._add_function('T')\n self._add_function('q')\n self._add_function('a')\n self._add_function('L')\n self.bcs = []\n self._has_boundary = None\n\n def add_boundary(self, boundary_type, **kwargs):\n \"\"\"Generic method for adding boundaries to the problem. To view documentation for boundary types, see the appropriate function. Args: boundary_type (string): The name of the boundary condition: - \"dirichlet\" - \"robin\" - \"neuman\" (in progress) These can be given in full, or as shortened names (e.g. \"dir\") Raises: ValueError: When the boundary is not in the supported names list.\"\"\"\n dispatcher = {'dirichlet': self.add_dirichlet, 'robin': self.add_robin}\n for name, func in dispatcher.items():\n if name.startswith(boundary_type):\n func(**kwargs)\n break\n else:\n raise ValueError('{} boundaries are not supported. Supported boundaries are: {}.'.format(boundary_type, ', '.join(dispatcher.keys())))\n\n def add_dirichlet(self, g, surface):\n \"\"\"Adds dirichlet boundary conditions to the problem. Args: g (Function, int, or float): The function to apply on the boundary. surface (int or list of int): The index of the boundary to apply the condition to.\"\"\"\n if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\n norm = FacetNormal(self.mesh)\n if isinstance(g, (float, int)):\n g = Constant(g)\n integrand = -1 * self.v * dot(self.q, norm)\n if surface == 'all':\n dbc = DirichletBC(V=self.V, g=g, sub_domain='on_boundary')\n self.a += integrand * ds\n else:\n dbc = DirichletBC(V=self.V, g=g, sub_domain=surface)\n try:\n self.a += sum((integrand * ds(s) for s in surface))\n except TypeError:\n self.a += integrand * ds(surface)\n self.bcs.append(dbc)\n self._has_boundary = True\n\n def add_robin(self, alpha, g, surface):\n \"\"\"Adds robin boundary conditions to the problem. The Robin condition is a mixed codition and is defined by: dot(grad(T), n) = g - alpha*T Args: g (Function, int, or float): The function to apply on the boundary as per the above formula. alpha (Function, int, or float): The function to apply on the boundary as per the above formula. surface (int or list of int): The index of the boundary to apply the condition to.\"\"\"\n if self._has_boundary is False:\n raise AttributeError('Cannot add boundary after declaring that there are no boundaries')\n if isinstance(g, (float, int)):\n g = Constant(g)\n if isinstance(alpha, (float, int)):\n alpha = Constant(alpha)\n a_integrand = alpha * self.v * self.T\n L_integrand = self.v * g\n if surface == 'all':\n self.a += a_integrand * ds\n self.L += L_integrand * ds\n else:\n try:\n self.a += sum((a_integrand * ds(s) for s in surface))\n self.L += sum((L_integrand * ds(s) for s in surface))\n except TypeError:\n self.a += a_integrand * ds(surface)\n self.L += L_integrand * ds(surface)\n self._has_boundary = True\n\n def set_no_boundary(self):\n \"\"\"Declare that the problem will have no boundaries. Raises: AttributeError: If boundaries have already been added.\"\"\"\n if self._has_boundary is True:\n raise AttributeError('Cannot set no boundary after boundaries have been set')\n norm = FacetNormal(self.mesh)\n self.a += -1 * self.v * dot(self.q, norm) * ds\n self._has_boundary = False\n", "source": "the_stack_v2_python_sparse", "source_path": "TTiP/problem_mixins/boundaries_mixin.py", "source_repo": "AndrewLister-STFC/TTiP", "split": "val", "star_events_count": 0}
{"blob_id": "d5248ef4b524621538e7af886947d13b71a01b66", "bodies": ["self.magnitude = magnitude\nself.c_i = c_i\nself.nsamples = nsamples\nself.stdev_spread = stdev_spread\nloss_c = model.output[0][c_i]\ngrad_symbolic = K.gradients(loss_c, model.input)[0]\nself.iterate = K.function([model.input], grad_symbolic)", "total_gradients = np.zeros_like(x_value)\nstdev = self.stdev_spread * (np.max(x_value) - np.min(x_value))\nmean_of_image = np.mean(x_value)\ntotal_gradients = np.zeros_like(x_value)\nx_shape = list(x_value.shape)\nx_shape[0] = self.nsamples\nnoise = np.random.normal(mean_of_image, stdev, x_value.shape)\nx_plus_noise = x_value + noise\ngrad = self.iterate(x_plus_noise)\nif self.magnitude:\n grad = grad * grad\ntotal_gradients = np.mean(grad, axis=0, keepdims=True)\nreturn total_gradients"], "bodies_text": "<|body_start_0|>\n self.magnitude = magnitude\n self.c_i = c_i\n self.nsamples = nsamples\n self.stdev_spread = stdev_spread\n loss_c = model.output[0][c_i]\n grad_symbolic = K.gradients(loss_c, model.input)[0]\n self.iterate = K.function([model.input], grad_symbolic)\n<|end_body_0|>\n\n<|body_start_1|>\n total_gradients = np.zeros_like(x_value)\n stdev = self.stdev_spread * (np.max(x_value) - np.min(x_value))\n mean_of_image = np.mean(x_value)\n total_gradients = np.zeros_like(x_value)\n x_shape = list(x_value.shape)\n x_shape[0] = self.nsamples\n noise = np.random.normal(mean_of_image, stdev, x_value.shape)\n x_plus_noise = x_value + noise\n grad = self.iterate(x_plus_noise)\n if self.magnitude:\n grad = grad * grad\n total_gradients = np.mean(grad, axis=0, keepdims=True)\n return total_gradients\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SmoothedMask", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SmoothedMask:\n\n def __init__(self, model, c_i, stdev_spread=0.15, nsamples=25, magnitude=True):\n \"\"\"Define the smoothGrad Mask class to return the smooth grad mask model : the deep model used c_i : the index of the class concerned stdev_spread: Amount of noise to add to the input, as fraction of the total spread (x_max - x_min). Defaults to 15%. Level of noise nsamples: Number of samples to average across to get the smooth gradient. magnitude: If true, computes the sum of squares of gradients instead of just the sum. Defaults to true.\"\"\"\n <|body_0|>\n\n def GetMask(self, x_value):\n \"\"\"Returns a mask that is smoothed with the SmoothGrad method. The average of gradient of noisy image Args: x_value: Input value, not batched. Ie the input image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.magnitude = magnitude\n self.c_i = c_i\n self.nsamples = nsamples\n self.stdev_spread = stdev_spread\n loss_c = model.output[0][c_i]\n grad_symbolic = K.gradients(loss_c, model.input)[0]\n self.iterate = K.function([model.input], grad_symbolic)\n<|end_body_0|>\n\n<|body_start_1|>\n total_gradients = np.zeros_like(x_value)\n stdev = self.stdev_spread * (np.max(x_value) - np.min(x_value))\n mean_of_image = np.mean(x_value)\n total_gradients = np.zeros_like(x_value)\n x_shape = list(x_value.shape)\n x_shape[0] = self.nsamples\n noise = np.random.normal(mean_of_image, stdev, x_value.shape)\n x_plus_noise = x_value + noise\n grad = self.iterate(x_plus_noise)\n if self.magnitude:\n grad = grad * grad\n total_gradients = np.mean(grad, axis=0, keepdims=True)\n return total_gradients\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000025", "length_bytes": 9618, "license_type": "no_license", "methods": [{"docstring": "Define the smoothGrad Mask class to return the smooth grad mask model : the deep model used c_i : the index of the class concerned stdev_spread: Amount of noise to add to the input, as fraction of the total spread (x_max - x_min). Defaults to 15%. Level of noise nsamples: Number of samples to average across to get the smooth gradient. magnitude: If true, computes the sum of squares of gradients instead of just the sum. Defaults to true.", "name": "__init__", "signature": "def __init__(self, model, c_i, stdev_spread=0.15, nsamples=25, magnitude=True)"}, {"docstring": "Returns a mask that is smoothed with the SmoothGrad method. The average of gradient of noisy image Args: x_value: Input value, not batched. Ie the input image", "name": "GetMask", "signature": "def GetMask(self, x_value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030771", "prompt": "Implement the Python class `SmoothedMask` described below.\n\nClass description:\nImplement the SmoothedMask class.\n\nMethod signatures and docstrings:\n- def __init__(self, model, c_i, stdev_spread=0.15, nsamples=25, magnitude=True): Define the smoothGrad Mask class to return the smooth grad mask model : the deep model used c_i : the index of the class concerned stdev_spread: Amount of noise to add to the input, as fraction of the total spread (x_max - x_min). Defaults to 15%. Level of noise nsamples: Number of samples to average across to get the smooth gradient. magnitude: If true, computes the sum of squares of gradients instead of just the sum. Defaults to true.\n- def GetMask(self, x_value): Returns a mask that is smoothed with the SmoothGrad method. The average of gradient of noisy image Args: x_value: Input value, not batched. Ie the input image", "prompted_full_text": "Implement the Python class `SmoothedMask` described below.\n\nClass description:\nImplement the SmoothedMask class.\n\nMethod signatures and docstrings:\n- def __init__(self, model, c_i, stdev_spread=0.15, nsamples=25, magnitude=True): Define the smoothGrad Mask class to return the smooth grad mask model : the deep model used c_i : the index of the class concerned stdev_spread: Amount of noise to add to the input, as fraction of the total spread (x_max - x_min). Defaults to 15%. Level of noise nsamples: Number of samples to average across to get the smooth gradient. magnitude: If true, computes the sum of squares of gradients instead of just the sum. Defaults to true.\n- def GetMask(self, x_value): Returns a mask that is smoothed with the SmoothGrad method. The average of gradient of noisy image Args: x_value: Input value, not batched. Ie the input image\n\n<|skeleton|>\nclass SmoothedMask:\n\n def __init__(self, model, c_i, stdev_spread=0.15, nsamples=25, magnitude=True):\n \"\"\"Define the smoothGrad Mask class to return the smooth grad mask model : the deep model used c_i : the index of the class concerned stdev_spread: Amount of noise to add to the input, as fraction of the total spread (x_max - x_min). Defaults to 15%. Level of noise nsamples: Number of samples to average across to get the smooth gradient. magnitude: If true, computes the sum of squares of gradients instead of just the sum. Defaults to true.\"\"\"\n <|body_0|>\n\n def GetMask(self, x_value):\n \"\"\"Returns a mask that is smoothed with the SmoothGrad method. The average of gradient of noisy image Args: x_value: Input value, not batched. Ie the input image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.magnitude = magnitude\n self.c_i = c_i\n self.nsamples = nsamples\n self.stdev_spread = stdev_spread\n loss_c = model.output[0][c_i]\n grad_symbolic = K.gradients(loss_c, model.input)[0]\n self.iterate = K.function([model.input], grad_symbolic)\n<|end_body_0|>\n\n<|body_start_1|>\n total_gradients = np.zeros_like(x_value)\n stdev = self.stdev_spread * (np.max(x_value) - np.min(x_value))\n mean_of_image = np.mean(x_value)\n total_gradients = np.zeros_like(x_value)\n x_shape = list(x_value.shape)\n x_shape[0] = self.nsamples\n noise = np.random.normal(mean_of_image, stdev, x_value.shape)\n x_plus_noise = x_value + noise\n grad = self.iterate(x_plus_noise)\n if self.magnitude:\n grad = grad * grad\n total_gradients = np.mean(grad, axis=0, keepdims=True)\n return total_gradients\n<|end_body_1|>\n", "revision_id": "60da35f58ffe9e24e99b6b20dd7a46b02815ad79", "skeleton": "<|skeleton|>\nclass SmoothedMask:\n\n def __init__(self, model, c_i, stdev_spread=0.15, nsamples=25, magnitude=True):\n \"\"\"Define the smoothGrad Mask class to return the smooth grad mask model : the deep model used c_i : the index of the class concerned stdev_spread: Amount of noise to add to the input, as fraction of the total spread (x_max - x_min). Defaults to 15%. Level of noise nsamples: Number of samples to average across to get the smooth gradient. magnitude: If true, computes the sum of squares of gradients instead of just the sum. Defaults to true.\"\"\"\n <|body_0|>\n\n def GetMask(self, x_value):\n \"\"\"Returns a mask that is smoothed with the SmoothGrad method. The average of gradient of noisy image Args: x_value: Input value, not batched. Ie the input image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SmoothedMask:\n def __init__(self, model, c_i, stdev_spread=0.15, nsamples=25, magnitude=True):\n \"\"\"Define the smoothGrad Mask class to return the smooth grad mask model : the deep model used c_i : the index of the class concerned stdev_spread: Amount of noise to add to the input, as fraction of the total spread (x_max - x_min). Defaults to 15%. Level of noise nsamples: Number of samples to average across to get the smooth gradient. magnitude: If true, computes the sum of squares of gradients instead of just the sum. Defaults to true.\"\"\"\n self.magnitude = magnitude\n self.c_i = c_i\n self.nsamples = nsamples\n self.stdev_spread = stdev_spread\n loss_c = model.output[0][c_i]\n grad_symbolic = K.gradients(loss_c, model.input)[0]\n self.iterate = K.function([model.input], grad_symbolic)\n\n def GetMask(self, x_value):\n \"\"\"Returns a mask that is smoothed with the SmoothGrad method. The average of gradient of noisy image Args: x_value: Input value, not batched. Ie the input image\"\"\"\n total_gradients = np.zeros_like(x_value)\n stdev = self.stdev_spread * (np.max(x_value) - np.min(x_value))\n mean_of_image = np.mean(x_value)\n total_gradients = np.zeros_like(x_value)\n x_shape = list(x_value.shape)\n x_shape[0] = self.nsamples\n noise = np.random.normal(mean_of_image, stdev, x_value.shape)\n x_plus_noise = x_value + noise\n grad = self.iterate(x_plus_noise)\n if self.magnitude:\n grad = grad * grad\n total_gradients = np.mean(grad, axis=0, keepdims=True)\n return total_gradients\n", "source": "the_stack_v2_python_sparse", "source_path": "Classif_Paintings/saliencyMaps.py", "source_repo": "ngonthier/Icono_Art_Analysis", "split": "val", "star_events_count": 2}
{"blob_id": "583831e75af3bcc0eb02847a81b3def88139a26a", "bodies": ["self.head = head\ncheckPointer = head\nself.llLength = 0\nwhile checkPointer:\n self.llLength += 1\n checkPointer = checkPointer.next", "pointer = self.head\nfor i in range(1, randint(1, self.llLength)):\n pointer = pointer.next\nreturn pointer.val"], "bodies_text": "<|body_start_0|>\n self.head = head\n checkPointer = head\n self.llLength = 0\n while checkPointer:\n self.llLength += 1\n checkPointer = checkPointer.next\n<|end_body_0|>\n\n<|body_start_1|>\n pointer = self.head\n for i in range(1, randint(1, self.llLength)):\n pointer = pointer.next\n return pointer.val\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def __init__(self, head):\n \"\"\"@param head The linked list's head. Note that the head is guaranteed to be not null, so it contains at least one node. :type head: ListNode\"\"\"\n <|body_0|>\n\n def getRandom(self):\n \"\"\"Returns a random node's value. :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.head = head\n checkPointer = head\n self.llLength = 0\n while checkPointer:\n self.llLength += 1\n checkPointer = checkPointer.next\n<|end_body_0|>\n\n<|body_start_1|>\n pointer = self.head\n for i in range(1, randint(1, self.llLength)):\n pointer = pointer.next\n return pointer.val\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000026", "length_bytes": 1107, "license_type": "no_license", "methods": [{"docstring": "@param head The linked list's head. Note that the head is guaranteed to be not null, so it contains at least one node. :type head: ListNode", "name": "__init__", "signature": "def __init__(self, head)"}, {"docstring": "Returns a random node's value. :rtype: int", "name": "getRandom", "signature": "def getRandom(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_008968", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, head): @param head The linked list's head. Note that the head is guaranteed to be not null, so it contains at least one node. :type head: ListNode\n- def getRandom(self): Returns a random node's value. :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, head): @param head The linked list's head. Note that the head is guaranteed to be not null, so it contains at least one node. :type head: ListNode\n- def getRandom(self): Returns a random node's value. :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def __init__(self, head):\n \"\"\"@param head The linked list's head. Note that the head is guaranteed to be not null, so it contains at least one node. :type head: ListNode\"\"\"\n <|body_0|>\n\n def getRandom(self):\n \"\"\"Returns a random node's value. :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.head = head\n checkPointer = head\n self.llLength = 0\n while checkPointer:\n self.llLength += 1\n checkPointer = checkPointer.next\n<|end_body_0|>\n\n<|body_start_1|>\n pointer = self.head\n for i in range(1, randint(1, self.llLength)):\n pointer = pointer.next\n return pointer.val\n<|end_body_1|>\n", "revision_id": "5deff070bb9f6b19a1cfc0a6086ac155496fbb78", "skeleton": "<|skeleton|>\nclass Solution:\n\n def __init__(self, head):\n \"\"\"@param head The linked list's head. Note that the head is guaranteed to be not null, so it contains at least one node. :type head: ListNode\"\"\"\n <|body_0|>\n\n def getRandom(self):\n \"\"\"Returns a random node's value. :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def __init__(self, head):\n \"\"\"@param head The linked list's head. Note that the head is guaranteed to be not null, so it contains at least one node. :type head: ListNode\"\"\"\n self.head = head\n checkPointer = head\n self.llLength = 0\n while checkPointer:\n self.llLength += 1\n checkPointer = checkPointer.next\n\n def getRandom(self):\n \"\"\"Returns a random node's value. :rtype: int\"\"\"\n pointer = self.head\n for i in range(1, randint(1, self.llLength)):\n pointer = pointer.next\n return pointer.val\n", "source": "the_stack_v2_python_sparse", "source_path": "lc_linked_list_random_node.py", "source_repo": "vincentt117/coding_challenge", "split": "val", "star_events_count": 1}
{"blob_id": "ad9a0bf76501399b44d95bf3604cd7c19aeee241", "bodies": ["m = len(matrix)\nif m == 0:\n self.sums = []\n return\nn = len(matrix[0])\nself.sums = [[0] * (n + 1) for _ in range(m + 1)]\nsums = self.sums\nfor i in range(1, m + 1):\n for j in range(1, n + 1):\n sums[i][j] = matrix[i - 1][j - 1] + sums[i - 1][j] + sums[i][j - 1] - sums[i - 1][j - 1]", "sums = self.sums\nr = sums[row2 + 1][col2 + 1]\nr -= sums[row2 + 1][col1]\nr -= sums[row1][col2 + 1]\nr += sums[row1][col1]\nreturn r"], "bodies_text": "<|body_start_0|>\n m = len(matrix)\n if m == 0:\n self.sums = []\n return\n n = len(matrix[0])\n self.sums = [[0] * (n + 1) for _ in range(m + 1)]\n sums = self.sums\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n sums[i][j] = matrix[i - 1][j - 1] + sums[i - 1][j] + sums[i][j - 1] - sums[i - 1][j - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n sums = self.sums\n r = sums[row2 + 1][col2 + 1]\n r -= sums[row2 + 1][col1]\n r -= sums[row1][col2 + 1]\n r += sums[row1][col1]\n return r\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NumMatrix", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = len(matrix)\n if m == 0:\n self.sums = []\n return\n n = len(matrix[0])\n self.sums = [[0] * (n + 1) for _ in range(m + 1)]\n sums = self.sums\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n sums[i][j] = matrix[i - 1][j - 1] + sums[i - 1][j] + sums[i][j - 1] - sums[i - 1][j - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n sums = self.sums\n r = sums[row2 + 1][col2 + 1]\n r -= sums[row2 + 1][col1]\n r -= sums[row1][col2 + 1]\n r += sums[row1][col1]\n return r\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000027", "length_bytes": 46454, "license_type": "no_license", "methods": [{"docstring": ":type matrix: List[List[int]]", "name": "__init__", "signature": "def __init__(self, matrix)"}, {"docstring": ":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int", "name": "sumRegion", "signature": "def sumRegion(self, row1, col1, row2, col2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004705", "prompt": "Implement the Python class `NumMatrix` described below.\n\nClass description:\nImplement the NumMatrix class.\n\nMethod signatures and docstrings:\n- def __init__(self, matrix): :type matrix: List[List[int]]\n- def sumRegion(self, row1, col1, row2, col2): :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int", "prompted_full_text": "Implement the Python class `NumMatrix` described below.\n\nClass description:\nImplement the NumMatrix class.\n\nMethod signatures and docstrings:\n- def __init__(self, matrix): :type matrix: List[List[int]]\n- def sumRegion(self, row1, col1, row2, col2): :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\n\n<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = len(matrix)\n if m == 0:\n self.sums = []\n return\n n = len(matrix[0])\n self.sums = [[0] * (n + 1) for _ in range(m + 1)]\n sums = self.sums\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n sums[i][j] = matrix[i - 1][j - 1] + sums[i - 1][j] + sums[i][j - 1] - sums[i - 1][j - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n sums = self.sums\n r = sums[row2 + 1][col2 + 1]\n r -= sums[row2 + 1][col1]\n r -= sums[row1][col2 + 1]\n r += sums[row1][col1]\n return r\n<|end_body_1|>\n", "revision_id": "426e09e75309e3ae8be0e946b31d5804f64bc915", "skeleton": "<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NumMatrix:\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n m = len(matrix)\n if m == 0:\n self.sums = []\n return\n n = len(matrix[0])\n self.sums = [[0] * (n + 1) for _ in range(m + 1)]\n sums = self.sums\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n sums[i][j] = matrix[i - 1][j - 1] + sums[i - 1][j] + sums[i][j - 1] - sums[i - 1][j - 1]\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n sums = self.sums\n r = sums[row2 + 1][col2 + 1]\n r -= sums[row2 + 1][col1]\n r -= sums[row1][col2 + 1]\n r += sums[row1][col1]\n return r\n", "source": "the_stack_v2_python_sparse", "source_path": "py/leetcode11.py", "source_repo": "rzwei/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "09bd071654afa01c1525f68682e0d7b694667452", "bodies": ["super().__init__(*args, **kwargs)\nself.__diary = DiaryDict()\nself.__diary.name = name\nself.title(self.__diary.name)\nself.geometry('900x400')\nself.resizable(width=False, height=False)\nself.__e_target, self.__e_day, self.__e_notes = (tk.StringVar(), tk.StringVar(), tk.StringVar())\ntk.Label(self, text='target ').grid(row=0, column=0, sticky=tk.W)\ntk.Label(self, text='date [dd.mm.yyyy] ').grid(row=1, column=0, sticky=tk.W)\ntk.Label(self, text='notes ').grid(row=2, column=0, sticky=tk.W)\ne_1 = tk.Entry(self, textvariable=self.__e_target, bd=5, width=50)\ne_2 = tk.Entry(self, textvariable=self.__e_day, bd=5, width=10)\ne_3 = tk.Entry(self, textvariable=self.__e_notes, bd=5, width=100)\ne_1.grid(row=0, column=1, sticky=tk.W)\ne_2.grid(row=1, column=1, sticky=tk.W)\ne_3.grid(row=2, column=1, sticky=tk.W)\nb = tk.Button(self, text='add', command=self.__readvalues)\nb.grid(row=3, column=1, sticky=tk.E + tk.W)\nself.__result = tk.StringVar()\ntk.Label(self, textvariable=self.__result, justify=tk.LEFT).grid(row=4, column=1, sticky=tk.E + tk.W)", "d = str(self.__e_day.get().strip()).split('.')\nif len(d) == 3:\n day = date(int(d[2]), int(d[1]), int(d[0]))\nelse:\n day = date.today()\nself.__diary.add(len(self.__diary.observations), self.__e_target.get().strip(), day, self.__e_notes.get().strip())\nself.__result.set(self.__diary)"], "bodies_text": "<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.__diary = DiaryDict()\n self.__diary.name = name\n self.title(self.__diary.name)\n self.geometry('900x400')\n self.resizable(width=False, height=False)\n self.__e_target, self.__e_day, self.__e_notes = (tk.StringVar(), tk.StringVar(), tk.StringVar())\n tk.Label(self, text='target ').grid(row=0, column=0, sticky=tk.W)\n tk.Label(self, text='date [dd.mm.yyyy] ').grid(row=1, column=0, sticky=tk.W)\n tk.Label(self, text='notes ').grid(row=2, column=0, sticky=tk.W)\n e_1 = tk.Entry(self, textvariable=self.__e_target, bd=5, width=50)\n e_2 = tk.Entry(self, textvariable=self.__e_day, bd=5, width=10)\n e_3 = tk.Entry(self, textvariable=self.__e_notes, bd=5, width=100)\n e_1.grid(row=0, column=1, sticky=tk.W)\n e_2.grid(row=1, column=1, sticky=tk.W)\n e_3.grid(row=2, column=1, sticky=tk.W)\n b = tk.Button(self, text='add', command=self.__readvalues)\n b.grid(row=3, column=1, sticky=tk.E + tk.W)\n self.__result = tk.StringVar()\n tk.Label(self, textvariable=self.__result, justify=tk.LEFT).grid(row=4, column=1, sticky=tk.E + tk.W)\n<|end_body_0|>\n\n<|body_start_1|>\n d = str(self.__e_day.get().strip()).split('.')\n if len(d) == 3:\n day = date(int(d[2]), int(d[1]), int(d[0]))\n else:\n day = date.today()\n self.__diary.add(len(self.__diary.observations), self.__e_target.get().strip(), day, self.__e_notes.get().strip())\n self.__result.set(self.__diary)\n<|end_body_1|>\n", "class_docstring": "Diary class keeps track of observations, is also a TK GUI", "class_name": "Diary", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Diary:\n \"\"\"Diary class keeps track of observations, is also a TK GUI\"\"\"\n\n def __init__(self, name=str(), *args, **kwargs):\n \"\"\"that passes the diary name to DiaryDict type object who checks the validity and sets the title to be diarydict objects name attribute Created GUI\"\"\"\n <|body_0|>\n\n def __readvalues(self):\n \"\"\"reads entries values and if ok calls add, calls DiaryDict object's services\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.__diary = DiaryDict()\n self.__diary.name = name\n self.title(self.__diary.name)\n self.geometry('900x400')\n self.resizable(width=False, height=False)\n self.__e_target, self.__e_day, self.__e_notes = (tk.StringVar(), tk.StringVar(), tk.StringVar())\n tk.Label(self, text='target ').grid(row=0, column=0, sticky=tk.W)\n tk.Label(self, text='date [dd.mm.yyyy] ').grid(row=1, column=0, sticky=tk.W)\n tk.Label(self, text='notes ').grid(row=2, column=0, sticky=tk.W)\n e_1 = tk.Entry(self, textvariable=self.__e_target, bd=5, width=50)\n e_2 = tk.Entry(self, textvariable=self.__e_day, bd=5, width=10)\n e_3 = tk.Entry(self, textvariable=self.__e_notes, bd=5, width=100)\n e_1.grid(row=0, column=1, sticky=tk.W)\n e_2.grid(row=1, column=1, sticky=tk.W)\n e_3.grid(row=2, column=1, sticky=tk.W)\n b = tk.Button(self, text='add', command=self.__readvalues)\n b.grid(row=3, column=1, sticky=tk.E + tk.W)\n self.__result = tk.StringVar()\n tk.Label(self, textvariable=self.__result, justify=tk.LEFT).grid(row=4, column=1, sticky=tk.E + tk.W)\n<|end_body_0|>\n\n<|body_start_1|>\n d = str(self.__e_day.get().strip()).split('.')\n if len(d) == 3:\n day = date(int(d[2]), int(d[1]), int(d[0]))\n else:\n day = date.today()\n self.__diary.add(len(self.__diary.observations), self.__e_target.get().strip(), day, self.__e_notes.get().strip())\n self.__result.set(self.__diary)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000028", "length_bytes": 5371, "license_type": "no_license", "methods": [{"docstring": "that passes the diary name to DiaryDict type object who checks the validity and sets the title to be diarydict objects name attribute Created GUI", "name": "__init__", "signature": "def __init__(self, name=str(), *args, **kwargs)"}, {"docstring": "reads entries values and if ok calls add, calls DiaryDict object's services", "name": "__readvalues", "signature": "def __readvalues(self)"}], "n_methods": 2, "prompt": "Implement the Python class `Diary` described below.\n\nClass description:\nDiary class keeps track of observations, is also a TK GUI\n\nMethod signatures and docstrings:\n- def __init__(self, name=str(), *args, **kwargs): that passes the diary name to DiaryDict type object who checks the validity and sets the title to be diarydict objects name attribute Created GUI\n- def __readvalues(self): reads entries values and if ok calls add, calls DiaryDict object's services", "prompted_full_text": "Implement the Python class `Diary` described below.\n\nClass description:\nDiary class keeps track of observations, is also a TK GUI\n\nMethod signatures and docstrings:\n- def __init__(self, name=str(), *args, **kwargs): that passes the diary name to DiaryDict type object who checks the validity and sets the title to be diarydict objects name attribute Created GUI\n- def __readvalues(self): reads entries values and if ok calls add, calls DiaryDict object's services\n\n<|skeleton|>\nclass Diary:\n \"\"\"Diary class keeps track of observations, is also a TK GUI\"\"\"\n\n def __init__(self, name=str(), *args, **kwargs):\n \"\"\"that passes the diary name to DiaryDict type object who checks the validity and sets the title to be diarydict objects name attribute Created GUI\"\"\"\n <|body_0|>\n\n def __readvalues(self):\n \"\"\"reads entries values and if ok calls add, calls DiaryDict object's services\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.__diary = DiaryDict()\n self.__diary.name = name\n self.title(self.__diary.name)\n self.geometry('900x400')\n self.resizable(width=False, height=False)\n self.__e_target, self.__e_day, self.__e_notes = (tk.StringVar(), tk.StringVar(), tk.StringVar())\n tk.Label(self, text='target ').grid(row=0, column=0, sticky=tk.W)\n tk.Label(self, text='date [dd.mm.yyyy] ').grid(row=1, column=0, sticky=tk.W)\n tk.Label(self, text='notes ').grid(row=2, column=0, sticky=tk.W)\n e_1 = tk.Entry(self, textvariable=self.__e_target, bd=5, width=50)\n e_2 = tk.Entry(self, textvariable=self.__e_day, bd=5, width=10)\n e_3 = tk.Entry(self, textvariable=self.__e_notes, bd=5, width=100)\n e_1.grid(row=0, column=1, sticky=tk.W)\n e_2.grid(row=1, column=1, sticky=tk.W)\n e_3.grid(row=2, column=1, sticky=tk.W)\n b = tk.Button(self, text='add', command=self.__readvalues)\n b.grid(row=3, column=1, sticky=tk.E + tk.W)\n self.__result = tk.StringVar()\n tk.Label(self, textvariable=self.__result, justify=tk.LEFT).grid(row=4, column=1, sticky=tk.E + tk.W)\n<|end_body_0|>\n\n<|body_start_1|>\n d = str(self.__e_day.get().strip()).split('.')\n if len(d) == 3:\n day = date(int(d[2]), int(d[1]), int(d[0]))\n else:\n day = date.today()\n self.__diary.add(len(self.__diary.observations), self.__e_target.get().strip(), day, self.__e_notes.get().strip())\n self.__result.set(self.__diary)\n<|end_body_1|>\n", "revision_id": "2b3f2317673ffbb6352dd8c8a01e4fed18c2f6f2", "skeleton": "<|skeleton|>\nclass Diary:\n \"\"\"Diary class keeps track of observations, is also a TK GUI\"\"\"\n\n def __init__(self, name=str(), *args, **kwargs):\n \"\"\"that passes the diary name to DiaryDict type object who checks the validity and sets the title to be diarydict objects name attribute Created GUI\"\"\"\n <|body_0|>\n\n def __readvalues(self):\n \"\"\"reads entries values and if ok calls add, calls DiaryDict object's services\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Diary:\n \"\"\"Diary class keeps track of observations, is also a TK GUI\"\"\"\n\n def __init__(self, name=str(), *args, **kwargs):\n \"\"\"that passes the diary name to DiaryDict type object who checks the validity and sets the title to be diarydict objects name attribute Created GUI\"\"\"\n super().__init__(*args, **kwargs)\n self.__diary = DiaryDict()\n self.__diary.name = name\n self.title(self.__diary.name)\n self.geometry('900x400')\n self.resizable(width=False, height=False)\n self.__e_target, self.__e_day, self.__e_notes = (tk.StringVar(), tk.StringVar(), tk.StringVar())\n tk.Label(self, text='target ').grid(row=0, column=0, sticky=tk.W)\n tk.Label(self, text='date [dd.mm.yyyy] ').grid(row=1, column=0, sticky=tk.W)\n tk.Label(self, text='notes ').grid(row=2, column=0, sticky=tk.W)\n e_1 = tk.Entry(self, textvariable=self.__e_target, bd=5, width=50)\n e_2 = tk.Entry(self, textvariable=self.__e_day, bd=5, width=10)\n e_3 = tk.Entry(self, textvariable=self.__e_notes, bd=5, width=100)\n e_1.grid(row=0, column=1, sticky=tk.W)\n e_2.grid(row=1, column=1, sticky=tk.W)\n e_3.grid(row=2, column=1, sticky=tk.W)\n b = tk.Button(self, text='add', command=self.__readvalues)\n b.grid(row=3, column=1, sticky=tk.E + tk.W)\n self.__result = tk.StringVar()\n tk.Label(self, textvariable=self.__result, justify=tk.LEFT).grid(row=4, column=1, sticky=tk.E + tk.W)\n\n def __readvalues(self):\n \"\"\"reads entries values and if ok calls add, calls DiaryDict object's services\"\"\"\n d = str(self.__e_day.get().strip()).split('.')\n if len(d) == 3:\n day = date(int(d[2]), int(d[1]), int(d[0]))\n else:\n day = date.today()\n self.__diary.add(len(self.__diary.observations), self.__e_target.get().strip(), day, self.__e_notes.get().strip())\n self.__result.set(self.__diary)\n", "source": "the_stack_v2_python_sparse", "source_path": "Object-oriented-programming/Diary-exercises/diary_0_3.py", "source_repo": "nooraelina/school-work", "split": "val", "star_events_count": 0}
{"blob_id": "83989873999dd3dfea6e1e92a8afd1fd4c748b5a", "bodies": ["self.hmap = {}\nself.head = None\nself.last = None", "if timestamp in self.hmap:\n self.hmap[timestamp].count += 1\nelse:\n self.hmap[timestamp] = DoublyNode(timestamp, 1)\n self.hmap[timestamp].next = None\n if self.last:\n self.last.next = self.hmap[timestamp]\n self.hmap[timestamp].prev = self.last\n self.last = self.hmap[timestamp]", "count = 0\ncurr = self.last\nif timestamp in self.hmap:\n curr = self.hmap[timestamp]\ni = 0\nwhile curr and curr.time > timestamp - 300:\n count += curr.count\n curr = curr.prev\nreturn count"], "bodies_text": "<|body_start_0|>\n self.hmap = {}\n self.head = None\n self.last = None\n<|end_body_0|>\n\n<|body_start_1|>\n if timestamp in self.hmap:\n self.hmap[timestamp].count += 1\n else:\n self.hmap[timestamp] = DoublyNode(timestamp, 1)\n self.hmap[timestamp].next = None\n if self.last:\n self.last.next = self.hmap[timestamp]\n self.hmap[timestamp].prev = self.last\n self.last = self.hmap[timestamp]\n<|end_body_1|>\n\n<|body_start_2|>\n count = 0\n curr = self.last\n if timestamp in self.hmap:\n curr = self.hmap[timestamp]\n i = 0\n while curr and curr.time > timestamp - 300:\n count += curr.count\n curr = curr.prev\n return count\n<|end_body_2|>\n", "class_docstring": "", "class_name": "HitCounter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HitCounter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def hit(self, timestamp: int) -> None:\n \"\"\"Record a hit. @param timestamp - The current timestamp (in seconds granularity).\"\"\"\n <|body_1|>\n\n def getHits(self, timestamp: int) -> int:\n \"\"\"Return the number of hits in the past 5 minutes. @param timestamp - The current timestamp (in seconds granularity).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.hmap = {}\n self.head = None\n self.last = None\n<|end_body_0|>\n\n<|body_start_1|>\n if timestamp in self.hmap:\n self.hmap[timestamp].count += 1\n else:\n self.hmap[timestamp] = DoublyNode(timestamp, 1)\n self.hmap[timestamp].next = None\n if self.last:\n self.last.next = self.hmap[timestamp]\n self.hmap[timestamp].prev = self.last\n self.last = self.hmap[timestamp]\n<|end_body_1|>\n\n<|body_start_2|>\n count = 0\n curr = self.last\n if timestamp in self.hmap:\n curr = self.hmap[timestamp]\n i = 0\n while curr and curr.time > timestamp - 300:\n count += curr.count\n curr = curr.prev\n return count\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000029", "length_bytes": 1659, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Record a hit. @param timestamp - The current timestamp (in seconds granularity).", "name": "hit", "signature": "def hit(self, timestamp: int) -> None"}, {"docstring": "Return the number of hits in the past 5 minutes. @param timestamp - The current timestamp (in seconds granularity).", "name": "getHits", "signature": "def getHits(self, timestamp: int) -> int"}], "n_methods": 3, "prompt": "Implement the Python class `HitCounter` described below.\n\nClass description:\nImplement the HitCounter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def hit(self, timestamp: int) -> None: Record a hit. @param timestamp - The current timestamp (in seconds granularity).\n- def getHits(self, timestamp: int) -> int: Return the number of hits in the past 5 minutes. @param timestamp - The current timestamp (in seconds granularity).", "prompted_full_text": "Implement the Python class `HitCounter` described below.\n\nClass description:\nImplement the HitCounter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def hit(self, timestamp: int) -> None: Record a hit. @param timestamp - The current timestamp (in seconds granularity).\n- def getHits(self, timestamp: int) -> int: Return the number of hits in the past 5 minutes. @param timestamp - The current timestamp (in seconds granularity).\n\n<|skeleton|>\nclass HitCounter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def hit(self, timestamp: int) -> None:\n \"\"\"Record a hit. @param timestamp - The current timestamp (in seconds granularity).\"\"\"\n <|body_1|>\n\n def getHits(self, timestamp: int) -> int:\n \"\"\"Return the number of hits in the past 5 minutes. @param timestamp - The current timestamp (in seconds granularity).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.hmap = {}\n self.head = None\n self.last = None\n<|end_body_0|>\n\n<|body_start_1|>\n if timestamp in self.hmap:\n self.hmap[timestamp].count += 1\n else:\n self.hmap[timestamp] = DoublyNode(timestamp, 1)\n self.hmap[timestamp].next = None\n if self.last:\n self.last.next = self.hmap[timestamp]\n self.hmap[timestamp].prev = self.last\n self.last = self.hmap[timestamp]\n<|end_body_1|>\n\n<|body_start_2|>\n count = 0\n curr = self.last\n if timestamp in self.hmap:\n curr = self.hmap[timestamp]\n i = 0\n while curr and curr.time > timestamp - 300:\n count += curr.count\n curr = curr.prev\n return count\n<|end_body_2|>\n", "revision_id": "d71327e16fdf2702542ec585fd6eb48a9a0dc8d0", "skeleton": "<|skeleton|>\nclass HitCounter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def hit(self, timestamp: int) -> None:\n \"\"\"Record a hit. @param timestamp - The current timestamp (in seconds granularity).\"\"\"\n <|body_1|>\n\n def getHits(self, timestamp: int) -> int:\n \"\"\"Return the number of hits in the past 5 minutes. @param timestamp - The current timestamp (in seconds granularity).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HitCounter:\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n self.hmap = {}\n self.head = None\n self.last = None\n\n def hit(self, timestamp: int) -> None:\n \"\"\"Record a hit. @param timestamp - The current timestamp (in seconds granularity).\"\"\"\n if timestamp in self.hmap:\n self.hmap[timestamp].count += 1\n else:\n self.hmap[timestamp] = DoublyNode(timestamp, 1)\n self.hmap[timestamp].next = None\n if self.last:\n self.last.next = self.hmap[timestamp]\n self.hmap[timestamp].prev = self.last\n self.last = self.hmap[timestamp]\n\n def getHits(self, timestamp: int) -> int:\n \"\"\"Return the number of hits in the past 5 minutes. @param timestamp - The current timestamp (in seconds granularity).\"\"\"\n count = 0\n curr = self.last\n if timestamp in self.hmap:\n curr = self.hmap[timestamp]\n i = 0\n while curr and curr.time > timestamp - 300:\n count += curr.count\n curr = curr.prev\n return count\n", "source": "the_stack_v2_python_sparse", "source_path": "src/M362_DesignHitCounter.py", "source_repo": "varshajayaraman/SheCodesInPython", "split": "val", "star_events_count": 1}
{"blob_id": "5f6ebde8c092cdb35eb286f1b37680831bddc529", "bodies": ["if jobject is None:\n jobject = Loader.new_instance(classname)\nself.enforce_type(jobject, 'weka.core.converters.Loader')\nsuper(Loader, self).__init__(jobject=jobject, options=options)\nself.incremental = False\nself.structure = None", "if not self.incremental:\n raise Exception('Not in incremental mode, cannot iterate!')\nreturn IncrementalLoaderIterator(self, self.structure)", "self.enforce_type(self.jobject, 'weka.core.converters.FileSourcedConverter')\nself.incremental = incremental\nif not javabridge.is_instance_of(dfile, 'Ljava/io/File;'):\n dfile = javabridge.make_instance('Ljava/io/File;', '(Ljava/lang/String;)V', javabridge.get_env().new_string_utf(str(dfile)))\njavabridge.call(self.jobject, 'reset', '()V')\njavabridge.call(self.jobject, 'setFile', '(Ljava/io/File;)V', dfile)\nif incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\nelse:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))", "self.enforce_type(self.jobject, 'weka.core.converters.URLSourcedLoader')\nself.incremental = incremental\njavabridge.call(self.jobject, 'reset', '()V')\njavabridge.call(self.jobject, 'setURL', '(Ljava/lang/String;)V', str(url))\nif incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\nelse:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))"], "bodies_text": "<|body_start_0|>\n if jobject is None:\n jobject = Loader.new_instance(classname)\n self.enforce_type(jobject, 'weka.core.converters.Loader')\n super(Loader, self).__init__(jobject=jobject, options=options)\n self.incremental = False\n self.structure = None\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.incremental:\n raise Exception('Not in incremental mode, cannot iterate!')\n return IncrementalLoaderIterator(self, self.structure)\n<|end_body_1|>\n\n<|body_start_2|>\n self.enforce_type(self.jobject, 'weka.core.converters.FileSourcedConverter')\n self.incremental = incremental\n if not javabridge.is_instance_of(dfile, 'Ljava/io/File;'):\n dfile = javabridge.make_instance('Ljava/io/File;', '(Ljava/lang/String;)V', javabridge.get_env().new_string_utf(str(dfile)))\n javabridge.call(self.jobject, 'reset', '()V')\n javabridge.call(self.jobject, 'setFile', '(Ljava/io/File;)V', dfile)\n if incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\n else:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))\n<|end_body_2|>\n\n<|body_start_3|>\n self.enforce_type(self.jobject, 'weka.core.converters.URLSourcedLoader')\n self.incremental = incremental\n javabridge.call(self.jobject, 'reset', '()V')\n javabridge.call(self.jobject, 'setURL', '(Ljava/lang/String;)V', str(url))\n if incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\n else:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))\n<|end_body_3|>\n", "class_docstring": "Wrapper class for Loaders.", "class_name": "Loader", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Loader:\n \"\"\"Wrapper class for Loaders.\"\"\"\n\n def __init__(self, classname='weka.core.converters.ArffLoader', jobject=None, options=None):\n \"\"\"Initializes the specified loader either using the classname or the JB_Object. :param classname: the classname of the loader :type classname: str :param jobject: the JB_Object to use :type jobject: JB_Object :param options: the list of commandline options to set :type options: list\"\"\"\n <|body_0|>\n\n def __iter__(self):\n \"\"\"Returns an iterator in case the loader was instantiated in incremental mode, otherwise an Exception is raised. :return: the iterator :rtype: IncrementalLoaderIterator\"\"\"\n <|body_1|>\n\n def load_file(self, dfile, incremental=False):\n \"\"\"Loads the specified file and returns the Instances object. In case of incremental loading, only the structure. :param dfile: the file to load :type dfile: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\"\"\"\n <|body_2|>\n\n def load_url(self, url, incremental=False):\n \"\"\"Loads the specified URL and returns the Instances object. In case of incremental loading, only the structure. :param url: the URL to load the data from :type url: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if jobject is None:\n jobject = Loader.new_instance(classname)\n self.enforce_type(jobject, 'weka.core.converters.Loader')\n super(Loader, self).__init__(jobject=jobject, options=options)\n self.incremental = False\n self.structure = None\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.incremental:\n raise Exception('Not in incremental mode, cannot iterate!')\n return IncrementalLoaderIterator(self, self.structure)\n<|end_body_1|>\n\n<|body_start_2|>\n self.enforce_type(self.jobject, 'weka.core.converters.FileSourcedConverter')\n self.incremental = incremental\n if not javabridge.is_instance_of(dfile, 'Ljava/io/File;'):\n dfile = javabridge.make_instance('Ljava/io/File;', '(Ljava/lang/String;)V', javabridge.get_env().new_string_utf(str(dfile)))\n javabridge.call(self.jobject, 'reset', '()V')\n javabridge.call(self.jobject, 'setFile', '(Ljava/io/File;)V', dfile)\n if incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\n else:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))\n<|end_body_2|>\n\n<|body_start_3|>\n self.enforce_type(self.jobject, 'weka.core.converters.URLSourcedLoader')\n self.incremental = incremental\n javabridge.call(self.jobject, 'reset', '()V')\n javabridge.call(self.jobject, 'setURL', '(Ljava/lang/String;)V', str(url))\n if incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\n else:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000030", "length_bytes": 8342, "license_type": "no_license", "methods": [{"docstring": "Initializes the specified loader either using the classname or the JB_Object. :param classname: the classname of the loader :type classname: str :param jobject: the JB_Object to use :type jobject: JB_Object :param options: the list of commandline options to set :type options: list", "name": "__init__", "signature": "def __init__(self, classname='weka.core.converters.ArffLoader', jobject=None, options=None)"}, {"docstring": "Returns an iterator in case the loader was instantiated in incremental mode, otherwise an Exception is raised. :return: the iterator :rtype: IncrementalLoaderIterator", "name": "__iter__", "signature": "def __iter__(self)"}, {"docstring": "Loads the specified file and returns the Instances object. In case of incremental loading, only the structure. :param dfile: the file to load :type dfile: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances", "name": "load_file", "signature": "def load_file(self, dfile, incremental=False)"}, {"docstring": "Loads the specified URL and returns the Instances object. In case of incremental loading, only the structure. :param url: the URL to load the data from :type url: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances", "name": "load_url", "signature": "def load_url(self, url, incremental=False)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_041505", "prompt": "Implement the Python class `Loader` described below.\n\nClass description:\nWrapper class for Loaders.\n\nMethod signatures and docstrings:\n- def __init__(self, classname='weka.core.converters.ArffLoader', jobject=None, options=None): Initializes the specified loader either using the classname or the JB_Object. :param classname: the classname of the loader :type classname: str :param jobject: the JB_Object to use :type jobject: JB_Object :param options: the list of commandline options to set :type options: list\n- def __iter__(self): Returns an iterator in case the loader was instantiated in incremental mode, otherwise an Exception is raised. :return: the iterator :rtype: IncrementalLoaderIterator\n- def load_file(self, dfile, incremental=False): Loads the specified file and returns the Instances object. In case of incremental loading, only the structure. :param dfile: the file to load :type dfile: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\n- def load_url(self, url, incremental=False): Loads the specified URL and returns the Instances object. In case of incremental loading, only the structure. :param url: the URL to load the data from :type url: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances", "prompted_full_text": "Implement the Python class `Loader` described below.\n\nClass description:\nWrapper class for Loaders.\n\nMethod signatures and docstrings:\n- def __init__(self, classname='weka.core.converters.ArffLoader', jobject=None, options=None): Initializes the specified loader either using the classname or the JB_Object. :param classname: the classname of the loader :type classname: str :param jobject: the JB_Object to use :type jobject: JB_Object :param options: the list of commandline options to set :type options: list\n- def __iter__(self): Returns an iterator in case the loader was instantiated in incremental mode, otherwise an Exception is raised. :return: the iterator :rtype: IncrementalLoaderIterator\n- def load_file(self, dfile, incremental=False): Loads the specified file and returns the Instances object. In case of incremental loading, only the structure. :param dfile: the file to load :type dfile: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\n- def load_url(self, url, incremental=False): Loads the specified URL and returns the Instances object. In case of incremental loading, only the structure. :param url: the URL to load the data from :type url: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\n\n<|skeleton|>\nclass Loader:\n \"\"\"Wrapper class for Loaders.\"\"\"\n\n def __init__(self, classname='weka.core.converters.ArffLoader', jobject=None, options=None):\n \"\"\"Initializes the specified loader either using the classname or the JB_Object. :param classname: the classname of the loader :type classname: str :param jobject: the JB_Object to use :type jobject: JB_Object :param options: the list of commandline options to set :type options: list\"\"\"\n <|body_0|>\n\n def __iter__(self):\n \"\"\"Returns an iterator in case the loader was instantiated in incremental mode, otherwise an Exception is raised. :return: the iterator :rtype: IncrementalLoaderIterator\"\"\"\n <|body_1|>\n\n def load_file(self, dfile, incremental=False):\n \"\"\"Loads the specified file and returns the Instances object. In case of incremental loading, only the structure. :param dfile: the file to load :type dfile: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\"\"\"\n <|body_2|>\n\n def load_url(self, url, incremental=False):\n \"\"\"Loads the specified URL and returns the Instances object. In case of incremental loading, only the structure. :param url: the URL to load the data from :type url: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if jobject is None:\n jobject = Loader.new_instance(classname)\n self.enforce_type(jobject, 'weka.core.converters.Loader')\n super(Loader, self).__init__(jobject=jobject, options=options)\n self.incremental = False\n self.structure = None\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.incremental:\n raise Exception('Not in incremental mode, cannot iterate!')\n return IncrementalLoaderIterator(self, self.structure)\n<|end_body_1|>\n\n<|body_start_2|>\n self.enforce_type(self.jobject, 'weka.core.converters.FileSourcedConverter')\n self.incremental = incremental\n if not javabridge.is_instance_of(dfile, 'Ljava/io/File;'):\n dfile = javabridge.make_instance('Ljava/io/File;', '(Ljava/lang/String;)V', javabridge.get_env().new_string_utf(str(dfile)))\n javabridge.call(self.jobject, 'reset', '()V')\n javabridge.call(self.jobject, 'setFile', '(Ljava/io/File;)V', dfile)\n if incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\n else:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))\n<|end_body_2|>\n\n<|body_start_3|>\n self.enforce_type(self.jobject, 'weka.core.converters.URLSourcedLoader')\n self.incremental = incremental\n javabridge.call(self.jobject, 'reset', '()V')\n javabridge.call(self.jobject, 'setURL', '(Ljava/lang/String;)V', str(url))\n if incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\n else:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))\n<|end_body_3|>\n", "revision_id": "8e67a1c7653532fdf71b98cda1bf8c6bafef49f3", "skeleton": "<|skeleton|>\nclass Loader:\n \"\"\"Wrapper class for Loaders.\"\"\"\n\n def __init__(self, classname='weka.core.converters.ArffLoader', jobject=None, options=None):\n \"\"\"Initializes the specified loader either using the classname or the JB_Object. :param classname: the classname of the loader :type classname: str :param jobject: the JB_Object to use :type jobject: JB_Object :param options: the list of commandline options to set :type options: list\"\"\"\n <|body_0|>\n\n def __iter__(self):\n \"\"\"Returns an iterator in case the loader was instantiated in incremental mode, otherwise an Exception is raised. :return: the iterator :rtype: IncrementalLoaderIterator\"\"\"\n <|body_1|>\n\n def load_file(self, dfile, incremental=False):\n \"\"\"Loads the specified file and returns the Instances object. In case of incremental loading, only the structure. :param dfile: the file to load :type dfile: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\"\"\"\n <|body_2|>\n\n def load_url(self, url, incremental=False):\n \"\"\"Loads the specified URL and returns the Instances object. In case of incremental loading, only the structure. :param url: the URL to load the data from :type url: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Loader:\n \"\"\"Wrapper class for Loaders.\"\"\"\n\n def __init__(self, classname='weka.core.converters.ArffLoader', jobject=None, options=None):\n \"\"\"Initializes the specified loader either using the classname or the JB_Object. :param classname: the classname of the loader :type classname: str :param jobject: the JB_Object to use :type jobject: JB_Object :param options: the list of commandline options to set :type options: list\"\"\"\n if jobject is None:\n jobject = Loader.new_instance(classname)\n self.enforce_type(jobject, 'weka.core.converters.Loader')\n super(Loader, self).__init__(jobject=jobject, options=options)\n self.incremental = False\n self.structure = None\n\n def __iter__(self):\n \"\"\"Returns an iterator in case the loader was instantiated in incremental mode, otherwise an Exception is raised. :return: the iterator :rtype: IncrementalLoaderIterator\"\"\"\n if not self.incremental:\n raise Exception('Not in incremental mode, cannot iterate!')\n return IncrementalLoaderIterator(self, self.structure)\n\n def load_file(self, dfile, incremental=False):\n \"\"\"Loads the specified file and returns the Instances object. In case of incremental loading, only the structure. :param dfile: the file to load :type dfile: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\"\"\"\n self.enforce_type(self.jobject, 'weka.core.converters.FileSourcedConverter')\n self.incremental = incremental\n if not javabridge.is_instance_of(dfile, 'Ljava/io/File;'):\n dfile = javabridge.make_instance('Ljava/io/File;', '(Ljava/lang/String;)V', javabridge.get_env().new_string_utf(str(dfile)))\n javabridge.call(self.jobject, 'reset', '()V')\n javabridge.call(self.jobject, 'setFile', '(Ljava/io/File;)V', dfile)\n if incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\n else:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))\n\n def load_url(self, url, incremental=False):\n \"\"\"Loads the specified URL and returns the Instances object. In case of incremental loading, only the structure. :param url: the URL to load the data from :type url: str :param incremental: whether to load the dataset incrementally :type incremental: bool :return: the full dataset or the header (if incremental) :rtype: Instances\"\"\"\n self.enforce_type(self.jobject, 'weka.core.converters.URLSourcedLoader')\n self.incremental = incremental\n javabridge.call(self.jobject, 'reset', '()V')\n javabridge.call(self.jobject, 'setURL', '(Ljava/lang/String;)V', str(url))\n if incremental:\n self.structure = Instances(javabridge.call(self.jobject, 'getStructure', '()Lweka/core/Instances;'))\n return self.structure\n else:\n return Instances(javabridge.call(self.jobject, 'getDataSet', '()Lweka/core/Instances;'))\n", "source": "the_stack_v2_python_sparse", "source_path": "search_project/weka/core/converters.py", "source_repo": "Joel-Venzke/Automated-nmr-assignment", "split": "val", "star_events_count": 2}
{"blob_id": "57cd7073e4893858dde4984269dd35e862c175dd", "bodies": ["seen = set()\n\ndef helper(pos):\n if not 0 <= pos < len(arr) or pos in seen:\n return False\n if not arr[pos]:\n return True\n seen.add(pos)\n return helper(pos + arr[pos]) or helper(pos - arr[pos])\nreturn helper(start)", "from collections import deque\nqueue, seen = (deque([start]), {start})\nwhile queue:\n curr = queue.popleft()\n if not arr[curr]:\n return True\n for nxt in [curr + arr[curr], curr - arr[curr]]:\n if 0 <= nxt < len(arr) and nxt not in seen:\n seen.add(nxt)\n queue.append(nxt)\nreturn False"], "bodies_text": "<|body_start_0|>\n seen = set()\n\n def helper(pos):\n if not 0 <= pos < len(arr) or pos in seen:\n return False\n if not arr[pos]:\n return True\n seen.add(pos)\n return helper(pos + arr[pos]) or helper(pos - arr[pos])\n return helper(start)\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n queue, seen = (deque([start]), {start})\n while queue:\n curr = queue.popleft()\n if not arr[curr]:\n return True\n for nxt in [curr + arr[curr], curr - arr[curr]]:\n if 0 <= nxt < len(arr) and nxt not in seen:\n seen.add(nxt)\n queue.append(nxt)\n return False\n<|end_body_1|>\n", "class_docstring": "BFS", "class_name": "Solution", "detected_licenses": ["WTFPL"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"BFS\"\"\"\n\n def canReach_1(self, arr: List[int], start: int) -> bool:\n \"\"\"Recursively.\"\"\"\n <|body_0|>\n\n def canReach_2(self, arr: List[int], start: int) -> bool:\n \"\"\"Iteratively\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n seen = set()\n\n def helper(pos):\n if not 0 <= pos < len(arr) or pos in seen:\n return False\n if not arr[pos]:\n return True\n seen.add(pos)\n return helper(pos + arr[pos]) or helper(pos - arr[pos])\n return helper(start)\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n queue, seen = (deque([start]), {start})\n while queue:\n curr = queue.popleft()\n if not arr[curr]:\n return True\n for nxt in [curr + arr[curr], curr - arr[curr]]:\n if 0 <= nxt < len(arr) and nxt not in seen:\n seen.add(nxt)\n queue.append(nxt)\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000031", "length_bytes": 1019, "license_type": "permissive", "methods": [{"docstring": "Recursively.", "name": "canReach_1", "signature": "def canReach_1(self, arr: List[int], start: int) -> bool"}, {"docstring": "Iteratively", "name": "canReach_2", "signature": "def canReach_2(self, arr: List[int], start: int) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049808", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nBFS\n\nMethod signatures and docstrings:\n- def canReach_1(self, arr: List[int], start: int) -> bool: Recursively.\n- def canReach_2(self, arr: List[int], start: int) -> bool: Iteratively", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nBFS\n\nMethod signatures and docstrings:\n- def canReach_1(self, arr: List[int], start: int) -> bool: Recursively.\n- def canReach_2(self, arr: List[int], start: int) -> bool: Iteratively\n\n<|skeleton|>\nclass Solution:\n \"\"\"BFS\"\"\"\n\n def canReach_1(self, arr: List[int], start: int) -> bool:\n \"\"\"Recursively.\"\"\"\n <|body_0|>\n\n def canReach_2(self, arr: List[int], start: int) -> bool:\n \"\"\"Iteratively\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n seen = set()\n\n def helper(pos):\n if not 0 <= pos < len(arr) or pos in seen:\n return False\n if not arr[pos]:\n return True\n seen.add(pos)\n return helper(pos + arr[pos]) or helper(pos - arr[pos])\n return helper(start)\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n queue, seen = (deque([start]), {start})\n while queue:\n curr = queue.popleft()\n if not arr[curr]:\n return True\n for nxt in [curr + arr[curr], curr - arr[curr]]:\n if 0 <= nxt < len(arr) and nxt not in seen:\n seen.add(nxt)\n queue.append(nxt)\n return False\n<|end_body_1|>\n", "revision_id": "5e5e7098d2310c972314c9c9895aafd048047fe6", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"BFS\"\"\"\n\n def canReach_1(self, arr: List[int], start: int) -> bool:\n \"\"\"Recursively.\"\"\"\n <|body_0|>\n\n def canReach_2(self, arr: List[int], start: int) -> bool:\n \"\"\"Iteratively\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n \"\"\"BFS\"\"\"\n\n def canReach_1(self, arr: List[int], start: int) -> bool:\n \"\"\"Recursively.\"\"\"\n seen = set()\n\n def helper(pos):\n if not 0 <= pos < len(arr) or pos in seen:\n return False\n if not arr[pos]:\n return True\n seen.add(pos)\n return helper(pos + arr[pos]) or helper(pos - arr[pos])\n return helper(start)\n\n def canReach_2(self, arr: List[int], start: int) -> bool:\n \"\"\"Iteratively\"\"\"\n from collections import deque\n queue, seen = (deque([start]), {start})\n while queue:\n curr = queue.popleft()\n if not arr[curr]:\n return True\n for nxt in [curr + arr[curr], curr - arr[curr]]:\n if 0 <= nxt < len(arr) and nxt not in seen:\n seen.add(nxt)\n queue.append(nxt)\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "1306_Jump_Game_III.py", "source_repo": "imguozr/LC-Solutions", "split": "val", "star_events_count": 0}
{"blob_id": "1fc9343321a42dd58bc3075d1f31a8e606a8538c", "bodies": ["if self.request.user.is_superuser:\n return super().retrieve(*args, **kwargs)\nelse:\n return cache_page(self.cache_timeout)(super().retrieve)(*args, **kwargs)", "if self.request.user.is_superuser:\n return super().list(*args, **kwargs)\nelse:\n return cache_page(self.cache_timeout)(super().list)(*args, **kwargs)"], "bodies_text": "<|body_start_0|>\n if self.request.user.is_superuser:\n return super().retrieve(*args, **kwargs)\n else:\n return cache_page(self.cache_timeout)(super().retrieve)(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.user.is_superuser:\n return super().list(*args, **kwargs)\n else:\n return cache_page(self.cache_timeout)(super().list)(*args, **kwargs)\n<|end_body_1|>\n", "class_docstring": "Caches retrieve and list methods for non-admin users only.", "class_name": "CacheMixin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CacheMixin:\n \"\"\"Caches retrieve and list methods for non-admin users only.\"\"\"\n\n def retrieve(self, *args, **kwargs):\n \"\"\"Active cache behavior only for non-admin users.\"\"\"\n <|body_0|>\n\n def list(self, *args, **kwargs):\n \"\"\"Active cache behavior only for non-admin users.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.user.is_superuser:\n return super().retrieve(*args, **kwargs)\n else:\n return cache_page(self.cache_timeout)(super().retrieve)(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.user.is_superuser:\n return super().list(*args, **kwargs)\n else:\n return cache_page(self.cache_timeout)(super().list)(*args, **kwargs)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000032", "length_bytes": 760, "license_type": "no_license", "methods": [{"docstring": "Active cache behavior only for non-admin users.", "name": "retrieve", "signature": "def retrieve(self, *args, **kwargs)"}, {"docstring": "Active cache behavior only for non-admin users.", "name": "list", "signature": "def list(self, *args, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `CacheMixin` described below.\n\nClass description:\nCaches retrieve and list methods for non-admin users only.\n\nMethod signatures and docstrings:\n- def retrieve(self, *args, **kwargs): Active cache behavior only for non-admin users.\n- def list(self, *args, **kwargs): Active cache behavior only for non-admin users.", "prompted_full_text": "Implement the Python class `CacheMixin` described below.\n\nClass description:\nCaches retrieve and list methods for non-admin users only.\n\nMethod signatures and docstrings:\n- def retrieve(self, *args, **kwargs): Active cache behavior only for non-admin users.\n- def list(self, *args, **kwargs): Active cache behavior only for non-admin users.\n\n<|skeleton|>\nclass CacheMixin:\n \"\"\"Caches retrieve and list methods for non-admin users only.\"\"\"\n\n def retrieve(self, *args, **kwargs):\n \"\"\"Active cache behavior only for non-admin users.\"\"\"\n <|body_0|>\n\n def list(self, *args, **kwargs):\n \"\"\"Active cache behavior only for non-admin users.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.user.is_superuser:\n return super().retrieve(*args, **kwargs)\n else:\n return cache_page(self.cache_timeout)(super().retrieve)(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.user.is_superuser:\n return super().list(*args, **kwargs)\n else:\n return cache_page(self.cache_timeout)(super().list)(*args, **kwargs)\n<|end_body_1|>\n", "revision_id": "617f6c990845d233efa64c9f0b309f5afef17590", "skeleton": "<|skeleton|>\nclass CacheMixin:\n \"\"\"Caches retrieve and list methods for non-admin users only.\"\"\"\n\n def retrieve(self, *args, **kwargs):\n \"\"\"Active cache behavior only for non-admin users.\"\"\"\n <|body_0|>\n\n def list(self, *args, **kwargs):\n \"\"\"Active cache behavior only for non-admin users.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CacheMixin:\n \"\"\"Caches retrieve and list methods for non-admin users only.\"\"\"\n\n def retrieve(self, *args, **kwargs):\n \"\"\"Active cache behavior only for non-admin users.\"\"\"\n if self.request.user.is_superuser:\n return super().retrieve(*args, **kwargs)\n else:\n return cache_page(self.cache_timeout)(super().retrieve)(*args, **kwargs)\n\n def list(self, *args, **kwargs):\n \"\"\"Active cache behavior only for non-admin users.\"\"\"\n if self.request.user.is_superuser:\n return super().list(*args, **kwargs)\n else:\n return cache_page(self.cache_timeout)(super().list)(*args, **kwargs)\n", "source": "the_stack_v2_python_sparse", "source_path": "common/drf/mixins.py", "source_repo": "patate-et-cornichon/patateetcornichon-api", "split": "val", "star_events_count": 3}
{"blob_id": "cfb6f615097ca851c2d08b06acc6f8d9b1df136e", "bodies": ["super().__init__()\n\ndef discriminator_block(in_filters, out_filters, bn=True):\n block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1), torch.nn.LeakyReLU(0.2, inplace=True), torch.nn.Dropout2d(0.25)]\n if bn:\n block.append(torch.nn.BatchNorm2d(out_filters, 0.8))\n return block\nself.model = torch.nn.Sequential(*discriminator_block(num_channels, 16, bn=False), *discriminator_block(16, 32), *discriminator_block(32, 64), *discriminator_block(64, 128))\nds_size = self.model(torch.rand(1, num_channels, img_size, img_size)).size(2)\nself.adv_layer = torch.nn.Sequential(torch.nn.Linear(128 * ds_size ** 2, 1), torch.nn.Sigmoid())", "out = self.model(img)\nout = out.view(out.shape[0], -1)\nvalidity = self.adv_layer(out)\nreturn validity"], "bodies_text": "<|body_start_0|>\n super().__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1), torch.nn.LeakyReLU(0.2, inplace=True), torch.nn.Dropout2d(0.25)]\n if bn:\n block.append(torch.nn.BatchNorm2d(out_filters, 0.8))\n return block\n self.model = torch.nn.Sequential(*discriminator_block(num_channels, 16, bn=False), *discriminator_block(16, 32), *discriminator_block(32, 64), *discriminator_block(64, 128))\n ds_size = self.model(torch.rand(1, num_channels, img_size, img_size)).size(2)\n self.adv_layer = torch.nn.Sequential(torch.nn.Linear(128 * ds_size ** 2, 1), torch.nn.Sigmoid())\n<|end_body_0|>\n\n<|body_start_1|>\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n return validity\n<|end_body_1|>\n", "class_docstring": "A convolutional discriminator model", "class_name": "Discriminator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Discriminator:\n \"\"\"A convolutional discriminator model\"\"\"\n\n def __init__(self, num_channels, img_size):\n \"\"\"Parameters ---------- num_channels : int number of input channels img_size : int size of input images\"\"\"\n <|body_0|>\n\n def forward(self, img):\n \"\"\"Forwards an image batch through the discriminator network Parameters ---------- img : :class:`torch.Tensor` the image batch Returns ------- :class:`torch.Tensor` the discriminative result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1), torch.nn.LeakyReLU(0.2, inplace=True), torch.nn.Dropout2d(0.25)]\n if bn:\n block.append(torch.nn.BatchNorm2d(out_filters, 0.8))\n return block\n self.model = torch.nn.Sequential(*discriminator_block(num_channels, 16, bn=False), *discriminator_block(16, 32), *discriminator_block(32, 64), *discriminator_block(64, 128))\n ds_size = self.model(torch.rand(1, num_channels, img_size, img_size)).size(2)\n self.adv_layer = torch.nn.Sequential(torch.nn.Linear(128 * ds_size ** 2, 1), torch.nn.Sigmoid())\n<|end_body_0|>\n\n<|body_start_1|>\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n return validity\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000033", "length_bytes": 3368, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- num_channels : int number of input channels img_size : int size of input images", "name": "__init__", "signature": "def __init__(self, num_channels, img_size)"}, {"docstring": "Forwards an image batch through the discriminator network Parameters ---------- img : :class:`torch.Tensor` the image batch Returns ------- :class:`torch.Tensor` the discriminative result", "name": "forward", "signature": "def forward(self, img)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043065", "prompt": "Implement the Python class `Discriminator` described below.\n\nClass description:\nA convolutional discriminator model\n\nMethod signatures and docstrings:\n- def __init__(self, num_channels, img_size): Parameters ---------- num_channels : int number of input channels img_size : int size of input images\n- def forward(self, img): Forwards an image batch through the discriminator network Parameters ---------- img : :class:`torch.Tensor` the image batch Returns ------- :class:`torch.Tensor` the discriminative result", "prompted_full_text": "Implement the Python class `Discriminator` described below.\n\nClass description:\nA convolutional discriminator model\n\nMethod signatures and docstrings:\n- def __init__(self, num_channels, img_size): Parameters ---------- num_channels : int number of input channels img_size : int size of input images\n- def forward(self, img): Forwards an image batch through the discriminator network Parameters ---------- img : :class:`torch.Tensor` the image batch Returns ------- :class:`torch.Tensor` the discriminative result\n\n<|skeleton|>\nclass Discriminator:\n \"\"\"A convolutional discriminator model\"\"\"\n\n def __init__(self, num_channels, img_size):\n \"\"\"Parameters ---------- num_channels : int number of input channels img_size : int size of input images\"\"\"\n <|body_0|>\n\n def forward(self, img):\n \"\"\"Forwards an image batch through the discriminator network Parameters ---------- img : :class:`torch.Tensor` the image batch Returns ------- :class:`torch.Tensor` the discriminative result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1), torch.nn.LeakyReLU(0.2, inplace=True), torch.nn.Dropout2d(0.25)]\n if bn:\n block.append(torch.nn.BatchNorm2d(out_filters, 0.8))\n return block\n self.model = torch.nn.Sequential(*discriminator_block(num_channels, 16, bn=False), *discriminator_block(16, 32), *discriminator_block(32, 64), *discriminator_block(64, 128))\n ds_size = self.model(torch.rand(1, num_channels, img_size, img_size)).size(2)\n self.adv_layer = torch.nn.Sequential(torch.nn.Linear(128 * ds_size ** 2, 1), torch.nn.Sigmoid())\n<|end_body_0|>\n\n<|body_start_1|>\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n return validity\n<|end_body_1|>\n", "revision_id": "1078f5030b8aac2bf022daf5fa14d66f74c3c893", "skeleton": "<|skeleton|>\nclass Discriminator:\n \"\"\"A convolutional discriminator model\"\"\"\n\n def __init__(self, num_channels, img_size):\n \"\"\"Parameters ---------- num_channels : int number of input channels img_size : int size of input images\"\"\"\n <|body_0|>\n\n def forward(self, img):\n \"\"\"Forwards an image batch through the discriminator network Parameters ---------- img : :class:`torch.Tensor` the image batch Returns ------- :class:`torch.Tensor` the discriminative result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Discriminator:\n \"\"\"A convolutional discriminator model\"\"\"\n\n def __init__(self, num_channels, img_size):\n \"\"\"Parameters ---------- num_channels : int number of input channels img_size : int size of input images\"\"\"\n super().__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1), torch.nn.LeakyReLU(0.2, inplace=True), torch.nn.Dropout2d(0.25)]\n if bn:\n block.append(torch.nn.BatchNorm2d(out_filters, 0.8))\n return block\n self.model = torch.nn.Sequential(*discriminator_block(num_channels, 16, bn=False), *discriminator_block(16, 32), *discriminator_block(32, 64), *discriminator_block(64, 128))\n ds_size = self.model(torch.rand(1, num_channels, img_size, img_size)).size(2)\n self.adv_layer = torch.nn.Sequential(torch.nn.Linear(128 * ds_size ** 2, 1), torch.nn.Sigmoid())\n\n def forward(self, img):\n \"\"\"Forwards an image batch through the discriminator network Parameters ---------- img : :class:`torch.Tensor` the image batch Returns ------- :class:`torch.Tensor` the discriminative result\"\"\"\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n return validity\n", "source": "the_stack_v2_python_sparse", "source_path": "dlutils/models/gans/dragan/models.py", "source_repo": "justusschock/dl-utils", "split": "val", "star_events_count": 15}
{"blob_id": "0d755dfb475ed89d97409204ec3ece184759e6ed", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "class_docstring": "navService.Map 导航地图 仅无线导航版本支持", "class_name": "MapServicer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MapServicer:\n \"\"\"navService.Map 导航地图 仅无线导航版本支持\"\"\"\n\n def GetMapInfo(self, request, context):\n \"\"\"GetMapInfo 获取地图信息\"\"\"\n <|body_0|>\n\n def MapPositionStream(self, request, context):\n \"\"\"MapPositionStream 监听设备的地图定位信息\"\"\"\n <|body_1|>\n\n def LocationReset(self, request, context):\n \"\"\"LocationReset 重置当前定位 某些情况下此方法返回成功时可能出现实际位置与地图位置不相符, 在有明确参照物附近调用此方法可提高准确定位成功率。 用于发生定位异常/错误状态,重新初始化导航定位 重定位错误:定位状态超时|无地图|UWB错误 重定位超时判断: 默认10s,仅在非错误状态下重置\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000034", "length_bytes": 5685, "license_type": "permissive", "methods": [{"docstring": "GetMapInfo 获取地图信息", "name": "GetMapInfo", "signature": "def GetMapInfo(self, request, context)"}, {"docstring": "MapPositionStream 监听设备的地图定位信息", "name": "MapPositionStream", "signature": "def MapPositionStream(self, request, context)"}, {"docstring": "LocationReset 重置当前定位 某些情况下此方法返回成功时可能出现实际位置与地图位置不相符, 在有明确参照物附近调用此方法可提高准确定位成功率。 用于发生定位异常/错误状态,重新初始化导航定位 重定位错误:定位状态超时|无地图|UWB错误 重定位超时判断: 默认10s,仅在非错误状态下重置", "name": "LocationReset", "signature": "def LocationReset(self, request, context)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_010527", "prompt": "Implement the Python class `MapServicer` described below.\n\nClass description:\nnavService.Map 导航地图 仅无线导航版本支持\n\nMethod signatures and docstrings:\n- def GetMapInfo(self, request, context): GetMapInfo 获取地图信息\n- def MapPositionStream(self, request, context): MapPositionStream 监听设备的地图定位信息\n- def LocationReset(self, request, context): LocationReset 重置当前定位 某些情况下此方法返回成功时可能出现实际位置与地图位置不相符, 在有明确参照物附近调用此方法可提高准确定位成功率。 用于发生定位异常/错误状态,重新初始化导航定位 重定位错误:定位状态超时|无地图|UWB错误 重定位超时判断: 默认10s,仅在非错误状态下重置", "prompted_full_text": "Implement the Python class `MapServicer` described below.\n\nClass description:\nnavService.Map 导航地图 仅无线导航版本支持\n\nMethod signatures and docstrings:\n- def GetMapInfo(self, request, context): GetMapInfo 获取地图信息\n- def MapPositionStream(self, request, context): MapPositionStream 监听设备的地图定位信息\n- def LocationReset(self, request, context): LocationReset 重置当前定位 某些情况下此方法返回成功时可能出现实际位置与地图位置不相符, 在有明确参照物附近调用此方法可提高准确定位成功率。 用于发生定位异常/错误状态,重新初始化导航定位 重定位错误:定位状态超时|无地图|UWB错误 重定位超时判断: 默认10s,仅在非错误状态下重置\n\n<|skeleton|>\nclass MapServicer:\n \"\"\"navService.Map 导航地图 仅无线导航版本支持\"\"\"\n\n def GetMapInfo(self, request, context):\n \"\"\"GetMapInfo 获取地图信息\"\"\"\n <|body_0|>\n\n def MapPositionStream(self, request, context):\n \"\"\"MapPositionStream 监听设备的地图定位信息\"\"\"\n <|body_1|>\n\n def LocationReset(self, request, context):\n \"\"\"LocationReset 重置当前定位 某些情况下此方法返回成功时可能出现实际位置与地图位置不相符, 在有明确参照物附近调用此方法可提高准确定位成功率。 用于发生定位异常/错误状态,重新初始化导航定位 重定位错误:定位状态超时|无地图|UWB错误 重定位超时判断: 默认10s,仅在非错误状态下重置\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "revision_id": "4a0cb57aa5f318a3099fbfe6198620555b3a45af", "skeleton": "<|skeleton|>\nclass MapServicer:\n \"\"\"navService.Map 导航地图 仅无线导航版本支持\"\"\"\n\n def GetMapInfo(self, request, context):\n \"\"\"GetMapInfo 获取地图信息\"\"\"\n <|body_0|>\n\n def MapPositionStream(self, request, context):\n \"\"\"MapPositionStream 监听设备的地图定位信息\"\"\"\n <|body_1|>\n\n def LocationReset(self, request, context):\n \"\"\"LocationReset 重置当前定位 某些情况下此方法返回成功时可能出现实际位置与地图位置不相符, 在有明确参照物附近调用此方法可提高准确定位成功率。 用于发生定位异常/错误状态,重新初始化导航定位 重定位错误:定位状态超时|无地图|UWB错误 重定位超时判断: 默认10s,仅在非错误状态下重置\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MapServicer:\n \"\"\"navService.Map 导航地图 仅无线导航版本支持\"\"\"\n\n def GetMapInfo(self, request, context):\n \"\"\"GetMapInfo 获取地图信息\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def MapPositionStream(self, request, context):\n \"\"\"MapPositionStream 监听设备的地图定位信息\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def LocationReset(self, request, context):\n \"\"\"LocationReset 重置当前定位 某些情况下此方法返回成功时可能出现实际位置与地图位置不相符, 在有明确参照物附近调用此方法可提高准确定位成功率。 用于发生定位异常/错误状态,重新初始化导航定位 重定位错误:定位状态超时|无地图|UWB错误 重定位超时判断: 默认10s,仅在非错误状态下重置\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "pythonsdk/nav/map_pb2_grpc.py", "source_repo": "jjrobotcn/andy4", "split": "val", "star_events_count": 0}
{"blob_id": "1e63c91d278413eb12cadaca3ef4d4dba82b82d9", "bodies": ["self._logger = logging.getLogger(__name__)\n'A logger for this object.'\nself._job_store = job_store\n'The job store to act on.'\nself._steps_requirements = dict()\n'Requirements per step, keyed by step name and requirement\\n name.\\n '\nself._get_steps_resource_requirements(local_api_dir)", "with self._job_store:\n job = self._job_store.get_job(job_id)\n steps = get_workflow_step_names(cast(bytes, job.workflow_content))\n for step in steps:\n if step not in self._steps_requirements:\n job.error('Found invalid step {} in workflow'.format(step))\n raise InvalidJobError('Invalid step in workflow')\n job.required_num_cores = get_required_num_cores(cast(bytes, job.workflow_content))\n num_cores_steps = [self._steps_requirements[step]['num_cores'] for step in steps]\n if max(num_cores_steps) > 0:\n job.required_num_cores = max(num_cores_steps)\n job.time_limit = get_time_limit(cast(bytes, job.workflow_content))\n time_limit_steps = [self._steps_requirements[step]['time_limit'] for step in steps]\n job.time_limit = max(job.time_limit, sum(time_limit_steps))", "for project_dir in local_api_dir.iterdir():\n local_steps_dir = project_dir / 'steps'\n for this_dir, _, files in local_steps_dir.walk():\n for filename in files:\n if filename.endswith('.cwl'):\n self._logger.debug('Scanning file for requirements: {}'.format(this_dir / filename))\n rel_this_dir = this_dir.relative_to(str(local_steps_dir))\n step_name = str(rel_this_dir / filename)\n step_contents = (this_dir / filename).read_bytes()\n step_num_cores = get_required_num_cores(step_contents)\n step_time_limit = get_time_limit(step_contents)\n if step_name not in self._steps_requirements:\n self._steps_requirements[step_name] = dict()\n self._steps_requirements[step_name]['num_cores'] = step_num_cores\n self._steps_requirements[step_name]['time_limit'] = step_time_limit\n self._logger.debug('Step {} requires {} cores'.format(step_name, step_num_cores))"], "bodies_text": "<|body_start_0|>\n self._logger = logging.getLogger(__name__)\n 'A logger for this object.'\n self._job_store = job_store\n 'The job store to act on.'\n self._steps_requirements = dict()\n 'Requirements per step, keyed by step name and requirement\\n name.\\n '\n self._get_steps_resource_requirements(local_api_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n with self._job_store:\n job = self._job_store.get_job(job_id)\n steps = get_workflow_step_names(cast(bytes, job.workflow_content))\n for step in steps:\n if step not in self._steps_requirements:\n job.error('Found invalid step {} in workflow'.format(step))\n raise InvalidJobError('Invalid step in workflow')\n job.required_num_cores = get_required_num_cores(cast(bytes, job.workflow_content))\n num_cores_steps = [self._steps_requirements[step]['num_cores'] for step in steps]\n if max(num_cores_steps) > 0:\n job.required_num_cores = max(num_cores_steps)\n job.time_limit = get_time_limit(cast(bytes, job.workflow_content))\n time_limit_steps = [self._steps_requirements[step]['time_limit'] for step in steps]\n job.time_limit = max(job.time_limit, sum(time_limit_steps))\n<|end_body_1|>\n\n<|body_start_2|>\n for project_dir in local_api_dir.iterdir():\n local_steps_dir = project_dir / 'steps'\n for this_dir, _, files in local_steps_dir.walk():\n for filename in files:\n if filename.endswith('.cwl'):\n self._logger.debug('Scanning file for requirements: {}'.format(this_dir / filename))\n rel_this_dir = this_dir.relative_to(str(local_steps_dir))\n step_name = str(rel_this_dir / filename)\n step_contents = (this_dir / filename).read_bytes()\n step_num_cores = get_required_num_cores(step_contents)\n step_time_limit = get_time_limit(step_contents)\n if step_name not in self._steps_requirements:\n self._steps_requirements[step_name] = dict()\n self._steps_requirements[step_name]['num_cores'] = step_num_cores\n self._steps_requirements[step_name]['time_limit'] = step_time_limit\n self._logger.debug('Step {} requires {} cores'.format(step_name, step_num_cores))\n<|end_body_2|>\n", "class_docstring": "Handles workflow execution requirements. This class keeps track of which hardware is needed for each available step, then analyses a workflow and decides which resources it needs based on this.", "class_name": "JobPlanner", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JobPlanner:\n \"\"\"Handles workflow execution requirements. This class keeps track of which hardware is needed for each available step, then analyses a workflow and decides which resources it needs based on this.\"\"\"\n\n def __init__(self, job_store: SQLiteJobStore, local_api_dir: cerulean.Path):\n \"\"\"Create a JobPlanner. Args: job_store: The job store to act on. local_api_dir: Path of local api directory.\"\"\"\n <|body_0|>\n\n def plan_job(self, job_id: str) -> None:\n \"\"\"Figures out which resources a job needs. Resources are identified by strings. Currently, there is ``num_cores``, the number of cores to run on, and ``time_limit``, the amount of time to reserve in seconds. Args: job_id: Id of the job to plan.\"\"\"\n <|body_1|>\n\n def _get_steps_resource_requirements(self, local_api_dir: cerulean.Path) -> None:\n \"\"\"Scan CWL steps and extract resource requirements. Args: local_api_dir: The local directory with the API\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._logger = logging.getLogger(__name__)\n 'A logger for this object.'\n self._job_store = job_store\n 'The job store to act on.'\n self._steps_requirements = dict()\n 'Requirements per step, keyed by step name and requirement\\n name.\\n '\n self._get_steps_resource_requirements(local_api_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n with self._job_store:\n job = self._job_store.get_job(job_id)\n steps = get_workflow_step_names(cast(bytes, job.workflow_content))\n for step in steps:\n if step not in self._steps_requirements:\n job.error('Found invalid step {} in workflow'.format(step))\n raise InvalidJobError('Invalid step in workflow')\n job.required_num_cores = get_required_num_cores(cast(bytes, job.workflow_content))\n num_cores_steps = [self._steps_requirements[step]['num_cores'] for step in steps]\n if max(num_cores_steps) > 0:\n job.required_num_cores = max(num_cores_steps)\n job.time_limit = get_time_limit(cast(bytes, job.workflow_content))\n time_limit_steps = [self._steps_requirements[step]['time_limit'] for step in steps]\n job.time_limit = max(job.time_limit, sum(time_limit_steps))\n<|end_body_1|>\n\n<|body_start_2|>\n for project_dir in local_api_dir.iterdir():\n local_steps_dir = project_dir / 'steps'\n for this_dir, _, files in local_steps_dir.walk():\n for filename in files:\n if filename.endswith('.cwl'):\n self._logger.debug('Scanning file for requirements: {}'.format(this_dir / filename))\n rel_this_dir = this_dir.relative_to(str(local_steps_dir))\n step_name = str(rel_this_dir / filename)\n step_contents = (this_dir / filename).read_bytes()\n step_num_cores = get_required_num_cores(step_contents)\n step_time_limit = get_time_limit(step_contents)\n if step_name not in self._steps_requirements:\n self._steps_requirements[step_name] = dict()\n self._steps_requirements[step_name]['num_cores'] = step_num_cores\n self._steps_requirements[step_name]['time_limit'] = step_time_limit\n self._logger.debug('Step {} requires {} cores'.format(step_name, step_num_cores))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000035", "length_bytes": 4211, "license_type": "permissive", "methods": [{"docstring": "Create a JobPlanner. Args: job_store: The job store to act on. local_api_dir: Path of local api directory.", "name": "__init__", "signature": "def __init__(self, job_store: SQLiteJobStore, local_api_dir: cerulean.Path)"}, {"docstring": "Figures out which resources a job needs. Resources are identified by strings. Currently, there is ``num_cores``, the number of cores to run on, and ``time_limit``, the amount of time to reserve in seconds. Args: job_id: Id of the job to plan.", "name": "plan_job", "signature": "def plan_job(self, job_id: str) -> None"}, {"docstring": "Scan CWL steps and extract resource requirements. Args: local_api_dir: The local directory with the API", "name": "_get_steps_resource_requirements", "signature": "def _get_steps_resource_requirements(self, local_api_dir: cerulean.Path) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_052098", "prompt": "Implement the Python class `JobPlanner` described below.\n\nClass description:\nHandles workflow execution requirements. This class keeps track of which hardware is needed for each available step, then analyses a workflow and decides which resources it needs based on this.\n\nMethod signatures and docstrings:\n- def __init__(self, job_store: SQLiteJobStore, local_api_dir: cerulean.Path): Create a JobPlanner. Args: job_store: The job store to act on. local_api_dir: Path of local api directory.\n- def plan_job(self, job_id: str) -> None: Figures out which resources a job needs. Resources are identified by strings. Currently, there is ``num_cores``, the number of cores to run on, and ``time_limit``, the amount of time to reserve in seconds. Args: job_id: Id of the job to plan.\n- def _get_steps_resource_requirements(self, local_api_dir: cerulean.Path) -> None: Scan CWL steps and extract resource requirements. Args: local_api_dir: The local directory with the API", "prompted_full_text": "Implement the Python class `JobPlanner` described below.\n\nClass description:\nHandles workflow execution requirements. This class keeps track of which hardware is needed for each available step, then analyses a workflow and decides which resources it needs based on this.\n\nMethod signatures and docstrings:\n- def __init__(self, job_store: SQLiteJobStore, local_api_dir: cerulean.Path): Create a JobPlanner. Args: job_store: The job store to act on. local_api_dir: Path of local api directory.\n- def plan_job(self, job_id: str) -> None: Figures out which resources a job needs. Resources are identified by strings. Currently, there is ``num_cores``, the number of cores to run on, and ``time_limit``, the amount of time to reserve in seconds. Args: job_id: Id of the job to plan.\n- def _get_steps_resource_requirements(self, local_api_dir: cerulean.Path) -> None: Scan CWL steps and extract resource requirements. Args: local_api_dir: The local directory with the API\n\n<|skeleton|>\nclass JobPlanner:\n \"\"\"Handles workflow execution requirements. This class keeps track of which hardware is needed for each available step, then analyses a workflow and decides which resources it needs based on this.\"\"\"\n\n def __init__(self, job_store: SQLiteJobStore, local_api_dir: cerulean.Path):\n \"\"\"Create a JobPlanner. Args: job_store: The job store to act on. local_api_dir: Path of local api directory.\"\"\"\n <|body_0|>\n\n def plan_job(self, job_id: str) -> None:\n \"\"\"Figures out which resources a job needs. Resources are identified by strings. Currently, there is ``num_cores``, the number of cores to run on, and ``time_limit``, the amount of time to reserve in seconds. Args: job_id: Id of the job to plan.\"\"\"\n <|body_1|>\n\n def _get_steps_resource_requirements(self, local_api_dir: cerulean.Path) -> None:\n \"\"\"Scan CWL steps and extract resource requirements. Args: local_api_dir: The local directory with the API\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._logger = logging.getLogger(__name__)\n 'A logger for this object.'\n self._job_store = job_store\n 'The job store to act on.'\n self._steps_requirements = dict()\n 'Requirements per step, keyed by step name and requirement\\n name.\\n '\n self._get_steps_resource_requirements(local_api_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n with self._job_store:\n job = self._job_store.get_job(job_id)\n steps = get_workflow_step_names(cast(bytes, job.workflow_content))\n for step in steps:\n if step not in self._steps_requirements:\n job.error('Found invalid step {} in workflow'.format(step))\n raise InvalidJobError('Invalid step in workflow')\n job.required_num_cores = get_required_num_cores(cast(bytes, job.workflow_content))\n num_cores_steps = [self._steps_requirements[step]['num_cores'] for step in steps]\n if max(num_cores_steps) > 0:\n job.required_num_cores = max(num_cores_steps)\n job.time_limit = get_time_limit(cast(bytes, job.workflow_content))\n time_limit_steps = [self._steps_requirements[step]['time_limit'] for step in steps]\n job.time_limit = max(job.time_limit, sum(time_limit_steps))\n<|end_body_1|>\n\n<|body_start_2|>\n for project_dir in local_api_dir.iterdir():\n local_steps_dir = project_dir / 'steps'\n for this_dir, _, files in local_steps_dir.walk():\n for filename in files:\n if filename.endswith('.cwl'):\n self._logger.debug('Scanning file for requirements: {}'.format(this_dir / filename))\n rel_this_dir = this_dir.relative_to(str(local_steps_dir))\n step_name = str(rel_this_dir / filename)\n step_contents = (this_dir / filename).read_bytes()\n step_num_cores = get_required_num_cores(step_contents)\n step_time_limit = get_time_limit(step_contents)\n if step_name not in self._steps_requirements:\n self._steps_requirements[step_name] = dict()\n self._steps_requirements[step_name]['num_cores'] = step_num_cores\n self._steps_requirements[step_name]['time_limit'] = step_time_limit\n self._logger.debug('Step {} requires {} cores'.format(step_name, step_num_cores))\n<|end_body_2|>\n", "revision_id": "f8ff51629d1198200bd84d59e78ca456321af940", "skeleton": "<|skeleton|>\nclass JobPlanner:\n \"\"\"Handles workflow execution requirements. This class keeps track of which hardware is needed for each available step, then analyses a workflow and decides which resources it needs based on this.\"\"\"\n\n def __init__(self, job_store: SQLiteJobStore, local_api_dir: cerulean.Path):\n \"\"\"Create a JobPlanner. Args: job_store: The job store to act on. local_api_dir: Path of local api directory.\"\"\"\n <|body_0|>\n\n def plan_job(self, job_id: str) -> None:\n \"\"\"Figures out which resources a job needs. Resources are identified by strings. Currently, there is ``num_cores``, the number of cores to run on, and ``time_limit``, the amount of time to reserve in seconds. Args: job_id: Id of the job to plan.\"\"\"\n <|body_1|>\n\n def _get_steps_resource_requirements(self, local_api_dir: cerulean.Path) -> None:\n \"\"\"Scan CWL steps and extract resource requirements. Args: local_api_dir: The local directory with the API\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class JobPlanner:\n \"\"\"Handles workflow execution requirements. This class keeps track of which hardware is needed for each available step, then analyses a workflow and decides which resources it needs based on this.\"\"\"\n\n def __init__(self, job_store: SQLiteJobStore, local_api_dir: cerulean.Path):\n \"\"\"Create a JobPlanner. Args: job_store: The job store to act on. local_api_dir: Path of local api directory.\"\"\"\n self._logger = logging.getLogger(__name__)\n 'A logger for this object.'\n self._job_store = job_store\n 'The job store to act on.'\n self._steps_requirements = dict()\n 'Requirements per step, keyed by step name and requirement\\n name.\\n '\n self._get_steps_resource_requirements(local_api_dir)\n\n def plan_job(self, job_id: str) -> None:\n \"\"\"Figures out which resources a job needs. Resources are identified by strings. Currently, there is ``num_cores``, the number of cores to run on, and ``time_limit``, the amount of time to reserve in seconds. Args: job_id: Id of the job to plan.\"\"\"\n with self._job_store:\n job = self._job_store.get_job(job_id)\n steps = get_workflow_step_names(cast(bytes, job.workflow_content))\n for step in steps:\n if step not in self._steps_requirements:\n job.error('Found invalid step {} in workflow'.format(step))\n raise InvalidJobError('Invalid step in workflow')\n job.required_num_cores = get_required_num_cores(cast(bytes, job.workflow_content))\n num_cores_steps = [self._steps_requirements[step]['num_cores'] for step in steps]\n if max(num_cores_steps) > 0:\n job.required_num_cores = max(num_cores_steps)\n job.time_limit = get_time_limit(cast(bytes, job.workflow_content))\n time_limit_steps = [self._steps_requirements[step]['time_limit'] for step in steps]\n job.time_limit = max(job.time_limit, sum(time_limit_steps))\n\n def _get_steps_resource_requirements(self, local_api_dir: cerulean.Path) -> None:\n \"\"\"Scan CWL steps and extract resource requirements. Args: local_api_dir: The local directory with the API\"\"\"\n for project_dir in local_api_dir.iterdir():\n local_steps_dir = project_dir / 'steps'\n for this_dir, _, files in local_steps_dir.walk():\n for filename in files:\n if filename.endswith('.cwl'):\n self._logger.debug('Scanning file for requirements: {}'.format(this_dir / filename))\n rel_this_dir = this_dir.relative_to(str(local_steps_dir))\n step_name = str(rel_this_dir / filename)\n step_contents = (this_dir / filename).read_bytes()\n step_num_cores = get_required_num_cores(step_contents)\n step_time_limit = get_time_limit(step_contents)\n if step_name not in self._steps_requirements:\n self._steps_requirements[step_name] = dict()\n self._steps_requirements[step_name]['num_cores'] = step_num_cores\n self._steps_requirements[step_name]['time_limit'] = step_time_limit\n self._logger.debug('Step {} requires {} cores'.format(step_name, step_num_cores))\n", "source": "the_stack_v2_python_sparse", "source_path": "cerise/back_end/job_planner.py", "source_repo": "MD-Studio/cerise", "split": "val", "star_events_count": 10}
{"blob_id": "5355c008a3f20ff6c45e0280146d4861a43ff007", "bodies": ["bunsi, bunbo = (3, 7)\nfra = Fraction(bunsi, bunbo)\nself.assertEqual(bunsi, fra.numerator)\nself.assertEqual(bunbo, fra.denominator)", "fra_base = Fraction(1, 2)\nfra_add = Fraction(1, 3)\nfra_exp = Fraction(5, 6)\nfra_calc = fra_base + fra_add\nself.assertEqual(fra_exp.numerator, fra_calc.numerator)\nself.assertEqual(fra_exp.denominator, fra_calc.denominator)\nfra_base += fra_add\nself.assertEqual(fra_exp.numerator, fra_base.numerator)\nself.assertEqual(fra_exp.denominator, fra_base.denominator)", "fra_base = Fraction(1, 2)\nfra_sub = Fraction(1, 3)\nfra_exp = Fraction(1, 6)\nfra_calc = fra_base - fra_sub\nself.assertEqual(fra_exp.numerator, fra_calc.numerator)\nself.assertEqual(fra_exp.denominator, fra_calc.denominator)\nfra_base -= fra_sub\nself.assertEqual(fra_exp.numerator, fra_base.numerator)\nself.assertEqual(fra_exp.denominator, fra_base.denominator)", "fra_base = Fraction(1, 2)\nfra_mul = Fraction(1, 3)\nfra_exp = Fraction(1, 6)\nfra_calc = fra_base * fra_mul\nself.assertEqual(fra_exp.numerator, fra_calc.numerator)\nself.assertEqual(fra_exp.denominator, fra_calc.denominator)\nfra_base *= fra_mul\nself.assertEqual(fra_exp.numerator, fra_base.numerator)\nself.assertEqual(fra_exp.denominator, fra_base.denominator)", "fra_base = Fraction(1, 2)\nfra_truediv = Fraction(1, 3)\nfra_exp = Fraction(3, 2)\nfra_calc = fra_base / fra_truediv\nself.assertEqual(fra_exp.numerator, fra_calc.numerator)\nself.assertEqual(fra_exp.denominator, fra_calc.denominator)\nfra_base /= fra_truediv\nself.assertEqual(fra_exp.numerator, fra_base.numerator)\nself.assertEqual(fra_exp.denominator, fra_base.denominator)"], "bodies_text": "<|body_start_0|>\n bunsi, bunbo = (3, 7)\n fra = Fraction(bunsi, bunbo)\n self.assertEqual(bunsi, fra.numerator)\n self.assertEqual(bunbo, fra.denominator)\n<|end_body_0|>\n\n<|body_start_1|>\n fra_base = Fraction(1, 2)\n fra_add = Fraction(1, 3)\n fra_exp = Fraction(5, 6)\n fra_calc = fra_base + fra_add\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base += fra_add\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_1|>\n\n<|body_start_2|>\n fra_base = Fraction(1, 2)\n fra_sub = Fraction(1, 3)\n fra_exp = Fraction(1, 6)\n fra_calc = fra_base - fra_sub\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base -= fra_sub\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_2|>\n\n<|body_start_3|>\n fra_base = Fraction(1, 2)\n fra_mul = Fraction(1, 3)\n fra_exp = Fraction(1, 6)\n fra_calc = fra_base * fra_mul\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base *= fra_mul\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_3|>\n\n<|body_start_4|>\n fra_base = Fraction(1, 2)\n fra_truediv = Fraction(1, 3)\n fra_exp = Fraction(3, 2)\n fra_calc = fra_base / fra_truediv\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base /= fra_truediv\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_4|>\n", "class_docstring": "[summary] Fractionクラスのユニットテストクラス", "class_name": "Fractiontest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Fractiontest:\n \"\"\"[summary] Fractionクラスのユニットテストクラス\"\"\"\n\n def test_constructor(self):\n \"\"\"[summary] コンストラクタのテストメソッド\"\"\"\n <|body_0|>\n\n def test_add(self):\n \"\"\"[summary] __add__と__iadd__のテストメソッド\"\"\"\n <|body_1|>\n\n def test_sub(self):\n \"\"\"[summary] __sub__と__isub__のテストメソッド\"\"\"\n <|body_2|>\n\n def test_mul(self):\n \"\"\"[summary] __mul__と__imul__のテストメソッド\"\"\"\n <|body_3|>\n\n def test_truediv(self):\n \"\"\"[summary] __truediv__と__itruediv__のテストメソッド\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n bunsi, bunbo = (3, 7)\n fra = Fraction(bunsi, bunbo)\n self.assertEqual(bunsi, fra.numerator)\n self.assertEqual(bunbo, fra.denominator)\n<|end_body_0|>\n\n<|body_start_1|>\n fra_base = Fraction(1, 2)\n fra_add = Fraction(1, 3)\n fra_exp = Fraction(5, 6)\n fra_calc = fra_base + fra_add\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base += fra_add\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_1|>\n\n<|body_start_2|>\n fra_base = Fraction(1, 2)\n fra_sub = Fraction(1, 3)\n fra_exp = Fraction(1, 6)\n fra_calc = fra_base - fra_sub\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base -= fra_sub\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_2|>\n\n<|body_start_3|>\n fra_base = Fraction(1, 2)\n fra_mul = Fraction(1, 3)\n fra_exp = Fraction(1, 6)\n fra_calc = fra_base * fra_mul\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base *= fra_mul\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_3|>\n\n<|body_start_4|>\n fra_base = Fraction(1, 2)\n fra_truediv = Fraction(1, 3)\n fra_exp = Fraction(3, 2)\n fra_calc = fra_base / fra_truediv\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base /= fra_truediv\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000036", "length_bytes": 2728, "license_type": "no_license", "methods": [{"docstring": "[summary] コンストラクタのテストメソッド", "name": "test_constructor", "signature": "def test_constructor(self)"}, {"docstring": "[summary] __add__と__iadd__のテストメソッド", "name": "test_add", "signature": "def test_add(self)"}, {"docstring": "[summary] __sub__と__isub__のテストメソッド", "name": "test_sub", "signature": "def test_sub(self)"}, {"docstring": "[summary] __mul__と__imul__のテストメソッド", "name": "test_mul", "signature": "def test_mul(self)"}, {"docstring": "[summary] __truediv__と__itruediv__のテストメソッド", "name": "test_truediv", "signature": "def test_truediv(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_044375", "prompt": "Implement the Python class `Fractiontest` described below.\n\nClass description:\n[summary] Fractionクラスのユニットテストクラス\n\nMethod signatures and docstrings:\n- def test_constructor(self): [summary] コンストラクタのテストメソッド\n- def test_add(self): [summary] __add__と__iadd__のテストメソッド\n- def test_sub(self): [summary] __sub__と__isub__のテストメソッド\n- def test_mul(self): [summary] __mul__と__imul__のテストメソッド\n- def test_truediv(self): [summary] __truediv__と__itruediv__のテストメソッド", "prompted_full_text": "Implement the Python class `Fractiontest` described below.\n\nClass description:\n[summary] Fractionクラスのユニットテストクラス\n\nMethod signatures and docstrings:\n- def test_constructor(self): [summary] コンストラクタのテストメソッド\n- def test_add(self): [summary] __add__と__iadd__のテストメソッド\n- def test_sub(self): [summary] __sub__と__isub__のテストメソッド\n- def test_mul(self): [summary] __mul__と__imul__のテストメソッド\n- def test_truediv(self): [summary] __truediv__と__itruediv__のテストメソッド\n\n<|skeleton|>\nclass Fractiontest:\n \"\"\"[summary] Fractionクラスのユニットテストクラス\"\"\"\n\n def test_constructor(self):\n \"\"\"[summary] コンストラクタのテストメソッド\"\"\"\n <|body_0|>\n\n def test_add(self):\n \"\"\"[summary] __add__と__iadd__のテストメソッド\"\"\"\n <|body_1|>\n\n def test_sub(self):\n \"\"\"[summary] __sub__と__isub__のテストメソッド\"\"\"\n <|body_2|>\n\n def test_mul(self):\n \"\"\"[summary] __mul__と__imul__のテストメソッド\"\"\"\n <|body_3|>\n\n def test_truediv(self):\n \"\"\"[summary] __truediv__と__itruediv__のテストメソッド\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n bunsi, bunbo = (3, 7)\n fra = Fraction(bunsi, bunbo)\n self.assertEqual(bunsi, fra.numerator)\n self.assertEqual(bunbo, fra.denominator)\n<|end_body_0|>\n\n<|body_start_1|>\n fra_base = Fraction(1, 2)\n fra_add = Fraction(1, 3)\n fra_exp = Fraction(5, 6)\n fra_calc = fra_base + fra_add\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base += fra_add\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_1|>\n\n<|body_start_2|>\n fra_base = Fraction(1, 2)\n fra_sub = Fraction(1, 3)\n fra_exp = Fraction(1, 6)\n fra_calc = fra_base - fra_sub\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base -= fra_sub\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_2|>\n\n<|body_start_3|>\n fra_base = Fraction(1, 2)\n fra_mul = Fraction(1, 3)\n fra_exp = Fraction(1, 6)\n fra_calc = fra_base * fra_mul\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base *= fra_mul\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_3|>\n\n<|body_start_4|>\n fra_base = Fraction(1, 2)\n fra_truediv = Fraction(1, 3)\n fra_exp = Fraction(3, 2)\n fra_calc = fra_base / fra_truediv\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base /= fra_truediv\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n<|end_body_4|>\n", "revision_id": "1742785de215902db2807d5aa829bc3a6bf0b456", "skeleton": "<|skeleton|>\nclass Fractiontest:\n \"\"\"[summary] Fractionクラスのユニットテストクラス\"\"\"\n\n def test_constructor(self):\n \"\"\"[summary] コンストラクタのテストメソッド\"\"\"\n <|body_0|>\n\n def test_add(self):\n \"\"\"[summary] __add__と__iadd__のテストメソッド\"\"\"\n <|body_1|>\n\n def test_sub(self):\n \"\"\"[summary] __sub__と__isub__のテストメソッド\"\"\"\n <|body_2|>\n\n def test_mul(self):\n \"\"\"[summary] __mul__と__imul__のテストメソッド\"\"\"\n <|body_3|>\n\n def test_truediv(self):\n \"\"\"[summary] __truediv__と__itruediv__のテストメソッド\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Fractiontest:\n \"\"\"[summary] Fractionクラスのユニットテストクラス\"\"\"\n\n def test_constructor(self):\n \"\"\"[summary] コンストラクタのテストメソッド\"\"\"\n bunsi, bunbo = (3, 7)\n fra = Fraction(bunsi, bunbo)\n self.assertEqual(bunsi, fra.numerator)\n self.assertEqual(bunbo, fra.denominator)\n\n def test_add(self):\n \"\"\"[summary] __add__と__iadd__のテストメソッド\"\"\"\n fra_base = Fraction(1, 2)\n fra_add = Fraction(1, 3)\n fra_exp = Fraction(5, 6)\n fra_calc = fra_base + fra_add\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base += fra_add\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n\n def test_sub(self):\n \"\"\"[summary] __sub__と__isub__のテストメソッド\"\"\"\n fra_base = Fraction(1, 2)\n fra_sub = Fraction(1, 3)\n fra_exp = Fraction(1, 6)\n fra_calc = fra_base - fra_sub\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base -= fra_sub\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n\n def test_mul(self):\n \"\"\"[summary] __mul__と__imul__のテストメソッド\"\"\"\n fra_base = Fraction(1, 2)\n fra_mul = Fraction(1, 3)\n fra_exp = Fraction(1, 6)\n fra_calc = fra_base * fra_mul\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base *= fra_mul\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n\n def test_truediv(self):\n \"\"\"[summary] __truediv__と__itruediv__のテストメソッド\"\"\"\n fra_base = Fraction(1, 2)\n fra_truediv = Fraction(1, 3)\n fra_exp = Fraction(3, 2)\n fra_calc = fra_base / fra_truediv\n self.assertEqual(fra_exp.numerator, fra_calc.numerator)\n self.assertEqual(fra_exp.denominator, fra_calc.denominator)\n fra_base /= fra_truediv\n self.assertEqual(fra_exp.numerator, fra_base.numerator)\n self.assertEqual(fra_exp.denominator, fra_base.denominator)\n", "source": "the_stack_v2_python_sparse", "source_path": "20200718_specialmethod/Fractiontest.py", "source_repo": "otowmoyarng/FreeStudyRepo", "split": "val", "star_events_count": 0}
{"blob_id": "dd24ee8c8805e85348974c914551a1525393978c", "bodies": ["def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\nreturn wrapper", "@wraps(f)\ndef wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\nreturn wrapp", "def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\nreturn wrapp", "@wraps(f)\ndef wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\nreturn wrapp", "@wraps(f)\ndef wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\nwrapp.value = False\nreturn wrapp"], "bodies_text": "<|body_start_0|>\n def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\n return wrapper\n<|end_body_0|>\n\n<|body_start_1|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\n return wrapp\n<|end_body_1|>\n\n<|body_start_2|>\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\n return wrapp\n<|end_body_2|>\n\n<|body_start_3|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\n return wrapp\n<|end_body_3|>\n\n<|body_start_4|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\n wrapp.value = False\n return wrapp\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Decorators", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Decorators:\n\n def decor0(self, func):\n \"\"\"декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_0|>\n\n def decor1(self, f):\n \"\"\"декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_1|>\n\n def decor2(self, f):\n \"\"\"декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\"\"\"\n <|body_2|>\n\n def decor3(self, f):\n \"\"\"декоратор, проверяющий типы, переданных декорируемой функции, аргументов\"\"\"\n <|body_3|>\n\n def decor4(self, f):\n \"\"\"декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\n return wrapper\n<|end_body_0|>\n\n<|body_start_1|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\n return wrapp\n<|end_body_1|>\n\n<|body_start_2|>\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\n return wrapp\n<|end_body_2|>\n\n<|body_start_3|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\n return wrapp\n<|end_body_3|>\n\n<|body_start_4|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\n wrapp.value = False\n return wrapp\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000037", "length_bytes": 3283, "license_type": "no_license", "methods": [{"docstring": "декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы", "name": "decor0", "signature": "def decor0(self, func)"}, {"docstring": "декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы", "name": "decor1", "signature": "def decor1(self, f)"}, {"docstring": "декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл", "name": "decor2", "signature": "def decor2(self, f)"}, {"docstring": "декоратор, проверяющий типы, переданных декорируемой функции, аргументов", "name": "decor3", "signature": "def decor3(self, f)"}, {"docstring": "декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции", "name": "decor4", "signature": "def decor4(self, f)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_034737", "prompt": "Implement the Python class `Decorators` described below.\n\nClass description:\nImplement the Decorators class.\n\nMethod signatures and docstrings:\n- def decor0(self, func): декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\n- def decor1(self, f): декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\n- def decor2(self, f): декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\n- def decor3(self, f): декоратор, проверяющий типы, переданных декорируемой функции, аргументов\n- def decor4(self, f): декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции", "prompted_full_text": "Implement the Python class `Decorators` described below.\n\nClass description:\nImplement the Decorators class.\n\nMethod signatures and docstrings:\n- def decor0(self, func): декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\n- def decor1(self, f): декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\n- def decor2(self, f): декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\n- def decor3(self, f): декоратор, проверяющий типы, переданных декорируемой функции, аргументов\n- def decor4(self, f): декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\n\n<|skeleton|>\nclass Decorators:\n\n def decor0(self, func):\n \"\"\"декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_0|>\n\n def decor1(self, f):\n \"\"\"декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_1|>\n\n def decor2(self, f):\n \"\"\"декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\"\"\"\n <|body_2|>\n\n def decor3(self, f):\n \"\"\"декоратор, проверяющий типы, переданных декорируемой функции, аргументов\"\"\"\n <|body_3|>\n\n def decor4(self, f):\n \"\"\"декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\n return wrapper\n<|end_body_0|>\n\n<|body_start_1|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\n return wrapp\n<|end_body_1|>\n\n<|body_start_2|>\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\n return wrapp\n<|end_body_2|>\n\n<|body_start_3|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\n return wrapp\n<|end_body_3|>\n\n<|body_start_4|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\n wrapp.value = False\n return wrapp\n<|end_body_4|>\n", "revision_id": "c3225516640d872b97139a5c2919d216d5370b17", "skeleton": "<|skeleton|>\nclass Decorators:\n\n def decor0(self, func):\n \"\"\"декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_0|>\n\n def decor1(self, f):\n \"\"\"декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_1|>\n\n def decor2(self, f):\n \"\"\"декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\"\"\"\n <|body_2|>\n\n def decor3(self, f):\n \"\"\"декоратор, проверяющий типы, переданных декорируемой функции, аргументов\"\"\"\n <|body_3|>\n\n def decor4(self, f):\n \"\"\"декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Decorators:\n def decor0(self, func):\n \"\"\"декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\n return wrapper\n\n def decor1(self, f):\n \"\"\"декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n @wraps(f)\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\n return wrapp\n\n def decor2(self, f):\n \"\"\"декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\"\"\"\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\n return wrapp\n\n def decor3(self, f):\n \"\"\"декоратор, проверяющий типы, переданных декорируемой функции, аргументов\"\"\"\n @wraps(f)\n def wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\n return wrapp\n\n def decor4(self, f):\n \"\"\"декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\"\"\"\n @wraps(f)\n def wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\n wrapp.value = False\n return wrapp\n", "source": "the_stack_v2_python_sparse", "source_path": "Homework11-20+22.03/Task0(decors).py", "source_repo": "Twicer/Homeworks", "split": "val", "star_events_count": 0}
{"blob_id": "2956d2d74217145dd1d67c1eccd9079417977140", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn AccessPackageAssignmentRequest()", "from .access_package import AccessPackage\nfrom .access_package_answer import AccessPackageAnswer\nfrom .access_package_assignment import AccessPackageAssignment\nfrom .access_package_request_state import AccessPackageRequestState\nfrom .access_package_request_type import AccessPackageRequestType\nfrom .access_package_subject import AccessPackageSubject\nfrom .custom_extension_callout_instance import CustomExtensionCalloutInstance\nfrom .entitlement_management_schedule import EntitlementManagementSchedule\nfrom .entity import Entity\nfrom .access_package import AccessPackage\nfrom .access_package_answer import AccessPackageAnswer\nfrom .access_package_assignment import AccessPackageAssignment\nfrom .access_package_request_state import AccessPackageRequestState\nfrom .access_package_request_type import AccessPackageRequestType\nfrom .access_package_subject import AccessPackageSubject\nfrom .custom_extension_callout_instance import CustomExtensionCalloutInstance\nfrom .entitlement_management_schedule import EntitlementManagementSchedule\nfrom .entity import Entity\nfields: Dict[str, Callable[[Any], None]] = {'accessPackage': lambda n: setattr(self, 'access_package', n.get_object_value(AccessPackage)), 'answers': lambda n: setattr(self, 'answers', n.get_collection_of_object_values(AccessPackageAnswer)), 'assignment': lambda n: setattr(self, 'assignment', n.get_object_value(AccessPackageAssignment)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'customExtensionCalloutInstances': lambda n: setattr(self, 'custom_extension_callout_instances', n.get_collection_of_object_values(CustomExtensionCalloutInstance)), 'requestType': lambda n: setattr(self, 'request_type', n.get_enum_value(AccessPackageRequestType)), 'requestor': lambda n: setattr(self, 'requestor', n.get_object_value(AccessPackageSubject)), 'schedule': lambda n: setattr(self, 'schedule', n.get_object_value(EntitlementManagementSchedule)), 'state': lambda n: setattr(self, 'state', n.get_enum_value(AccessPackageRequestState)), 'status': lambda n: setattr(self, 'status', n.get_str_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_object_value('accessPackage', self.access_package)\nwriter.write_collection_of_object_values('answers', self.answers)\nwriter.write_object_value('assignment', self.assignment)\nwriter.write_datetime_value('completedDateTime', self.completed_date_time)\nwriter.write_datetime_value('createdDateTime', self.created_date_time)\nwriter.write_collection_of_object_values('customExtensionCalloutInstances', self.custom_extension_callout_instances)\nwriter.write_enum_value('requestType', self.request_type)\nwriter.write_object_value('requestor', self.requestor)\nwriter.write_object_value('schedule', self.schedule)\nwriter.write_enum_value('state', self.state)\nwriter.write_str_value('status', self.status)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessPackageAssignmentRequest()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_package import AccessPackage\n from .access_package_answer import AccessPackageAnswer\n from .access_package_assignment import AccessPackageAssignment\n from .access_package_request_state import AccessPackageRequestState\n from .access_package_request_type import AccessPackageRequestType\n from .access_package_subject import AccessPackageSubject\n from .custom_extension_callout_instance import CustomExtensionCalloutInstance\n from .entitlement_management_schedule import EntitlementManagementSchedule\n from .entity import Entity\n from .access_package import AccessPackage\n from .access_package_answer import AccessPackageAnswer\n from .access_package_assignment import AccessPackageAssignment\n from .access_package_request_state import AccessPackageRequestState\n from .access_package_request_type import AccessPackageRequestType\n from .access_package_subject import AccessPackageSubject\n from .custom_extension_callout_instance import CustomExtensionCalloutInstance\n from .entitlement_management_schedule import EntitlementManagementSchedule\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'accessPackage': lambda n: setattr(self, 'access_package', n.get_object_value(AccessPackage)), 'answers': lambda n: setattr(self, 'answers', n.get_collection_of_object_values(AccessPackageAnswer)), 'assignment': lambda n: setattr(self, 'assignment', n.get_object_value(AccessPackageAssignment)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'customExtensionCalloutInstances': lambda n: setattr(self, 'custom_extension_callout_instances', n.get_collection_of_object_values(CustomExtensionCalloutInstance)), 'requestType': lambda n: setattr(self, 'request_type', n.get_enum_value(AccessPackageRequestType)), 'requestor': lambda n: setattr(self, 'requestor', n.get_object_value(AccessPackageSubject)), 'schedule': lambda n: setattr(self, 'schedule', n.get_object_value(EntitlementManagementSchedule)), 'state': lambda n: setattr(self, 'state', n.get_enum_value(AccessPackageRequestState)), 'status': lambda n: setattr(self, 'status', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_object_value('accessPackage', self.access_package)\n writer.write_collection_of_object_values('answers', self.answers)\n writer.write_object_value('assignment', self.assignment)\n writer.write_datetime_value('completedDateTime', self.completed_date_time)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_collection_of_object_values('customExtensionCalloutInstances', self.custom_extension_callout_instances)\n writer.write_enum_value('requestType', self.request_type)\n writer.write_object_value('requestor', self.requestor)\n writer.write_object_value('schedule', self.schedule)\n writer.write_enum_value('state', self.state)\n writer.write_str_value('status', self.status)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "AccessPackageAssignmentRequest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccessPackageAssignmentRequest:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequest:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequest\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessPackageAssignmentRequest()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_package import AccessPackage\n from .access_package_answer import AccessPackageAnswer\n from .access_package_assignment import AccessPackageAssignment\n from .access_package_request_state import AccessPackageRequestState\n from .access_package_request_type import AccessPackageRequestType\n from .access_package_subject import AccessPackageSubject\n from .custom_extension_callout_instance import CustomExtensionCalloutInstance\n from .entitlement_management_schedule import EntitlementManagementSchedule\n from .entity import Entity\n from .access_package import AccessPackage\n from .access_package_answer import AccessPackageAnswer\n from .access_package_assignment import AccessPackageAssignment\n from .access_package_request_state import AccessPackageRequestState\n from .access_package_request_type import AccessPackageRequestType\n from .access_package_subject import AccessPackageSubject\n from .custom_extension_callout_instance import CustomExtensionCalloutInstance\n from .entitlement_management_schedule import EntitlementManagementSchedule\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'accessPackage': lambda n: setattr(self, 'access_package', n.get_object_value(AccessPackage)), 'answers': lambda n: setattr(self, 'answers', n.get_collection_of_object_values(AccessPackageAnswer)), 'assignment': lambda n: setattr(self, 'assignment', n.get_object_value(AccessPackageAssignment)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'customExtensionCalloutInstances': lambda n: setattr(self, 'custom_extension_callout_instances', n.get_collection_of_object_values(CustomExtensionCalloutInstance)), 'requestType': lambda n: setattr(self, 'request_type', n.get_enum_value(AccessPackageRequestType)), 'requestor': lambda n: setattr(self, 'requestor', n.get_object_value(AccessPackageSubject)), 'schedule': lambda n: setattr(self, 'schedule', n.get_object_value(EntitlementManagementSchedule)), 'state': lambda n: setattr(self, 'state', n.get_enum_value(AccessPackageRequestState)), 'status': lambda n: setattr(self, 'status', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_object_value('accessPackage', self.access_package)\n writer.write_collection_of_object_values('answers', self.answers)\n writer.write_object_value('assignment', self.assignment)\n writer.write_datetime_value('completedDateTime', self.completed_date_time)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_collection_of_object_values('customExtensionCalloutInstances', self.custom_extension_callout_instances)\n writer.write_enum_value('requestType', self.request_type)\n writer.write_object_value('requestor', self.requestor)\n writer.write_object_value('schedule', self.schedule)\n writer.write_enum_value('state', self.state)\n writer.write_str_value('status', self.status)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000038", "length_bytes": 8264, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequest", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequest"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `AccessPackageAssignmentRequest` described below.\n\nClass description:\nImplement the AccessPackageAssignmentRequest class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequest: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequest\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `AccessPackageAssignmentRequest` described below.\n\nClass description:\nImplement the AccessPackageAssignmentRequest class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequest: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequest\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass AccessPackageAssignmentRequest:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequest:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequest\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessPackageAssignmentRequest()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_package import AccessPackage\n from .access_package_answer import AccessPackageAnswer\n from .access_package_assignment import AccessPackageAssignment\n from .access_package_request_state import AccessPackageRequestState\n from .access_package_request_type import AccessPackageRequestType\n from .access_package_subject import AccessPackageSubject\n from .custom_extension_callout_instance import CustomExtensionCalloutInstance\n from .entitlement_management_schedule import EntitlementManagementSchedule\n from .entity import Entity\n from .access_package import AccessPackage\n from .access_package_answer import AccessPackageAnswer\n from .access_package_assignment import AccessPackageAssignment\n from .access_package_request_state import AccessPackageRequestState\n from .access_package_request_type import AccessPackageRequestType\n from .access_package_subject import AccessPackageSubject\n from .custom_extension_callout_instance import CustomExtensionCalloutInstance\n from .entitlement_management_schedule import EntitlementManagementSchedule\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'accessPackage': lambda n: setattr(self, 'access_package', n.get_object_value(AccessPackage)), 'answers': lambda n: setattr(self, 'answers', n.get_collection_of_object_values(AccessPackageAnswer)), 'assignment': lambda n: setattr(self, 'assignment', n.get_object_value(AccessPackageAssignment)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'customExtensionCalloutInstances': lambda n: setattr(self, 'custom_extension_callout_instances', n.get_collection_of_object_values(CustomExtensionCalloutInstance)), 'requestType': lambda n: setattr(self, 'request_type', n.get_enum_value(AccessPackageRequestType)), 'requestor': lambda n: setattr(self, 'requestor', n.get_object_value(AccessPackageSubject)), 'schedule': lambda n: setattr(self, 'schedule', n.get_object_value(EntitlementManagementSchedule)), 'state': lambda n: setattr(self, 'state', n.get_enum_value(AccessPackageRequestState)), 'status': lambda n: setattr(self, 'status', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_object_value('accessPackage', self.access_package)\n writer.write_collection_of_object_values('answers', self.answers)\n writer.write_object_value('assignment', self.assignment)\n writer.write_datetime_value('completedDateTime', self.completed_date_time)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_collection_of_object_values('customExtensionCalloutInstances', self.custom_extension_callout_instances)\n writer.write_enum_value('requestType', self.request_type)\n writer.write_object_value('requestor', self.requestor)\n writer.write_object_value('schedule', self.schedule)\n writer.write_enum_value('state', self.state)\n writer.write_str_value('status', self.status)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass AccessPackageAssignmentRequest:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequest:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequest\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AccessPackageAssignmentRequest:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequest:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequest\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessPackageAssignmentRequest()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .access_package import AccessPackage\n from .access_package_answer import AccessPackageAnswer\n from .access_package_assignment import AccessPackageAssignment\n from .access_package_request_state import AccessPackageRequestState\n from .access_package_request_type import AccessPackageRequestType\n from .access_package_subject import AccessPackageSubject\n from .custom_extension_callout_instance import CustomExtensionCalloutInstance\n from .entitlement_management_schedule import EntitlementManagementSchedule\n from .entity import Entity\n from .access_package import AccessPackage\n from .access_package_answer import AccessPackageAnswer\n from .access_package_assignment import AccessPackageAssignment\n from .access_package_request_state import AccessPackageRequestState\n from .access_package_request_type import AccessPackageRequestType\n from .access_package_subject import AccessPackageSubject\n from .custom_extension_callout_instance import CustomExtensionCalloutInstance\n from .entitlement_management_schedule import EntitlementManagementSchedule\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'accessPackage': lambda n: setattr(self, 'access_package', n.get_object_value(AccessPackage)), 'answers': lambda n: setattr(self, 'answers', n.get_collection_of_object_values(AccessPackageAnswer)), 'assignment': lambda n: setattr(self, 'assignment', n.get_object_value(AccessPackageAssignment)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'customExtensionCalloutInstances': lambda n: setattr(self, 'custom_extension_callout_instances', n.get_collection_of_object_values(CustomExtensionCalloutInstance)), 'requestType': lambda n: setattr(self, 'request_type', n.get_enum_value(AccessPackageRequestType)), 'requestor': lambda n: setattr(self, 'requestor', n.get_object_value(AccessPackageSubject)), 'schedule': lambda n: setattr(self, 'schedule', n.get_object_value(EntitlementManagementSchedule)), 'state': lambda n: setattr(self, 'state', n.get_enum_value(AccessPackageRequestState)), 'status': lambda n: setattr(self, 'status', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_object_value('accessPackage', self.access_package)\n writer.write_collection_of_object_values('answers', self.answers)\n writer.write_object_value('assignment', self.assignment)\n writer.write_datetime_value('completedDateTime', self.completed_date_time)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_collection_of_object_values('customExtensionCalloutInstances', self.custom_extension_callout_instances)\n writer.write_enum_value('requestType', self.request_type)\n writer.write_object_value('requestor', self.requestor)\n writer.write_object_value('schedule', self.schedule)\n writer.write_enum_value('state', self.state)\n writer.write_str_value('status', self.status)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/access_package_assignment_request.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135}
{"blob_id": "8acd69ce4395f10c12a77464b9bec06718746c32", "bodies": ["self.d = deque()\nself.size = size\nself.presum = 0", "if len(self.d) < self.size:\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\nelse:\n pop = self.d.popleft()\n self.presum = self.presum - pop\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)"], "bodies_text": "<|body_start_0|>\n self.d = deque()\n self.size = size\n self.presum = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.d) < self.size:\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\n else:\n pop = self.d.popleft()\n self.presum = self.presum - pop\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MovingAverage", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = deque()\n self.size = size\n self.presum = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.d) < self.size:\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\n else:\n pop = self.d.popleft()\n self.presum = self.presum - pop\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000039", "length_bytes": 855, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here. :type size: int", "name": "__init__", "signature": "def __init__(self, size)"}, {"docstring": ":type val: int :rtype: float", "name": "next", "signature": "def next(self, val)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009807", "prompt": "Implement the Python class `MovingAverage` described below.\n\nClass description:\nImplement the MovingAverage class.\n\nMethod signatures and docstrings:\n- def __init__(self, size): Initialize your data structure here. :type size: int\n- def next(self, val): :type val: int :rtype: float", "prompted_full_text": "Implement the Python class `MovingAverage` described below.\n\nClass description:\nImplement the MovingAverage class.\n\nMethod signatures and docstrings:\n- def __init__(self, size): Initialize your data structure here. :type size: int\n- def next(self, val): :type val: int :rtype: float\n\n<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = deque()\n self.size = size\n self.presum = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.d) < self.size:\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\n else:\n pop = self.d.popleft()\n self.presum = self.presum - pop\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\n<|end_body_1|>\n", "revision_id": "fb4d09746c9fe4f8b173dd1648825856ff7fa6d3", "skeleton": "<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MovingAverage:\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n self.d = deque()\n self.size = size\n self.presum = 0\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n if len(self.d) < self.size:\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\n else:\n pop = self.d.popleft()\n self.presum = self.presum - pop\n self.d.append(val)\n self.presum = self.presum + val\n return self.presum / len(self.d)\n", "source": "the_stack_v2_python_sparse", "source_path": "346. Moving Average from Data Stream.py", "source_repo": "jingwenh/Leetcode2", "split": "val", "star_events_count": 0}
{"blob_id": "0330ecb29cb5147d7b89b049b4b5f11874795c51", "bodies": ["if s is None:\n return t is None\nif t is None:\n return False\nif s.val != t.val:\n return False\nreturn self.isSameTree(s.left, t.left) and self.isSameTree(s.right, t.right)", "if s is None:\n return t is None\nif t is None:\n return False\nif s.val == t.val:\n return self.isSameTree(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\nelse:\n return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)"], "bodies_text": "<|body_start_0|>\n if s is None:\n return t is None\n if t is None:\n return False\n if s.val != t.val:\n return False\n return self.isSameTree(s.left, t.left) and self.isSameTree(s.right, t.right)\n<|end_body_0|>\n\n<|body_start_1|>\n if s is None:\n return t is None\n if t is None:\n return False\n if s.val == t.val:\n return self.isSameTree(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n else:\n return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isSameTree(self, s, t):\n \"\"\":type s: TreeNode :type t: TreeNode :rtype: bool\"\"\"\n <|body_0|>\n\n def isSubtree(self, s, t):\n \"\"\":type s: TreeNode :type t: TreeNode :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if s is None:\n return t is None\n if t is None:\n return False\n if s.val != t.val:\n return False\n return self.isSameTree(s.left, t.left) and self.isSameTree(s.right, t.right)\n<|end_body_0|>\n\n<|body_start_1|>\n if s is None:\n return t is None\n if t is None:\n return False\n if s.val == t.val:\n return self.isSameTree(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n else:\n return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000040", "length_bytes": 1317, "license_type": "no_license", "methods": [{"docstring": ":type s: TreeNode :type t: TreeNode :rtype: bool", "name": "isSameTree", "signature": "def isSameTree(self, s, t)"}, {"docstring": ":type s: TreeNode :type t: TreeNode :rtype: bool", "name": "isSubtree", "signature": "def isSubtree(self, s, t)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020232", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isSameTree(self, s, t): :type s: TreeNode :type t: TreeNode :rtype: bool\n- def isSubtree(self, s, t): :type s: TreeNode :type t: TreeNode :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isSameTree(self, s, t): :type s: TreeNode :type t: TreeNode :rtype: bool\n- def isSubtree(self, s, t): :type s: TreeNode :type t: TreeNode :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def isSameTree(self, s, t):\n \"\"\":type s: TreeNode :type t: TreeNode :rtype: bool\"\"\"\n <|body_0|>\n\n def isSubtree(self, s, t):\n \"\"\":type s: TreeNode :type t: TreeNode :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if s is None:\n return t is None\n if t is None:\n return False\n if s.val != t.val:\n return False\n return self.isSameTree(s.left, t.left) and self.isSameTree(s.right, t.right)\n<|end_body_0|>\n\n<|body_start_1|>\n if s is None:\n return t is None\n if t is None:\n return False\n if s.val == t.val:\n return self.isSameTree(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n else:\n return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n<|end_body_1|>\n", "revision_id": "f8b35179b980e55f61bbcd2631fa3a9bf30c40ec", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isSameTree(self, s, t):\n \"\"\":type s: TreeNode :type t: TreeNode :rtype: bool\"\"\"\n <|body_0|>\n\n def isSubtree(self, s, t):\n \"\"\":type s: TreeNode :type t: TreeNode :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isSameTree(self, s, t):\n \"\"\":type s: TreeNode :type t: TreeNode :rtype: bool\"\"\"\n if s is None:\n return t is None\n if t is None:\n return False\n if s.val != t.val:\n return False\n return self.isSameTree(s.left, t.left) and self.isSameTree(s.right, t.right)\n\n def isSubtree(self, s, t):\n \"\"\":type s: TreeNode :type t: TreeNode :rtype: bool\"\"\"\n if s is None:\n return t is None\n if t is None:\n return False\n if s.val == t.val:\n return self.isSameTree(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n else:\n return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python Solutions/572 Subtree of Another Tree.py", "source_repo": "Sue9/Leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "949ff2e5f05636420f35c8a6af33a435937b1420", "bodies": ["data = queries.get_all_model_train_data(reverse=True)\npaginator = Paginator(data, 10)\npage_number = request.GET.get('page')\npage_obj = paginator.get_page(page_number)\nreturn render(request, 'data_manager_app/modelTrainData.html', {'form': self.form_class, 'datas': page_obj, 'search_form': self.search_form})", "url_name = return_url_name(request.path)\nif not has_permission(request.user.role_id, url_name, request.method):\n return JsonResponse({'error': 'No permission'}, status=403)\nif request.is_ajax():\n form = self.form_class(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.added_by = request.user\n instance.save()\n ser_instance = serializers.serialize('json', [instance])\n return JsonResponse({'Added data': ser_instance}, status=200)\n else:\n str_error = ''\n for field, errors in form.errors.items():\n str_error += field + ': '\n for error in errors:\n str_error += error + ', '\n return JsonResponse({'error': str_error[:-2]}, status=400)\nelse:\n return JsonResponse({'Error': 'Something when terribly wrong'}, status=400)"], "bodies_text": "<|body_start_0|>\n data = queries.get_all_model_train_data(reverse=True)\n paginator = Paginator(data, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'data_manager_app/modelTrainData.html', {'form': self.form_class, 'datas': page_obj, 'search_form': self.search_form})\n<|end_body_0|>\n\n<|body_start_1|>\n url_name = return_url_name(request.path)\n if not has_permission(request.user.role_id, url_name, request.method):\n return JsonResponse({'error': 'No permission'}, status=403)\n if request.is_ajax():\n form = self.form_class(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.added_by = request.user\n instance.save()\n ser_instance = serializers.serialize('json', [instance])\n return JsonResponse({'Added data': ser_instance}, status=200)\n else:\n str_error = ''\n for field, errors in form.errors.items():\n str_error += field + ': '\n for error in errors:\n str_error += error + ', '\n return JsonResponse({'error': str_error[:-2]}, status=400)\n else:\n return JsonResponse({'Error': 'Something when terribly wrong'}, status=400)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ModelTrainData", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModelTrainData:\n\n def get(self, request):\n \"\"\"Return all data from ModelTrainData :param request: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create record in model_train_data :param request: :return: Record if created, otherwise error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = queries.get_all_model_train_data(reverse=True)\n paginator = Paginator(data, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'data_manager_app/modelTrainData.html', {'form': self.form_class, 'datas': page_obj, 'search_form': self.search_form})\n<|end_body_0|>\n\n<|body_start_1|>\n url_name = return_url_name(request.path)\n if not has_permission(request.user.role_id, url_name, request.method):\n return JsonResponse({'error': 'No permission'}, status=403)\n if request.is_ajax():\n form = self.form_class(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.added_by = request.user\n instance.save()\n ser_instance = serializers.serialize('json', [instance])\n return JsonResponse({'Added data': ser_instance}, status=200)\n else:\n str_error = ''\n for field, errors in form.errors.items():\n str_error += field + ': '\n for error in errors:\n str_error += error + ', '\n return JsonResponse({'error': str_error[:-2]}, status=400)\n else:\n return JsonResponse({'Error': 'Something when terribly wrong'}, status=400)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000041", "length_bytes": 9663, "license_type": "no_license", "methods": [{"docstring": "Return all data from ModelTrainData :param request: :return:", "name": "get", "signature": "def get(self, request)"}, {"docstring": "Create record in model_train_data :param request: :return: Record if created, otherwise error", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_053736", "prompt": "Implement the Python class `ModelTrainData` described below.\n\nClass description:\nImplement the ModelTrainData class.\n\nMethod signatures and docstrings:\n- def get(self, request): Return all data from ModelTrainData :param request: :return:\n- def post(self, request): Create record in model_train_data :param request: :return: Record if created, otherwise error", "prompted_full_text": "Implement the Python class `ModelTrainData` described below.\n\nClass description:\nImplement the ModelTrainData class.\n\nMethod signatures and docstrings:\n- def get(self, request): Return all data from ModelTrainData :param request: :return:\n- def post(self, request): Create record in model_train_data :param request: :return: Record if created, otherwise error\n\n<|skeleton|>\nclass ModelTrainData:\n\n def get(self, request):\n \"\"\"Return all data from ModelTrainData :param request: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create record in model_train_data :param request: :return: Record if created, otherwise error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = queries.get_all_model_train_data(reverse=True)\n paginator = Paginator(data, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'data_manager_app/modelTrainData.html', {'form': self.form_class, 'datas': page_obj, 'search_form': self.search_form})\n<|end_body_0|>\n\n<|body_start_1|>\n url_name = return_url_name(request.path)\n if not has_permission(request.user.role_id, url_name, request.method):\n return JsonResponse({'error': 'No permission'}, status=403)\n if request.is_ajax():\n form = self.form_class(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.added_by = request.user\n instance.save()\n ser_instance = serializers.serialize('json', [instance])\n return JsonResponse({'Added data': ser_instance}, status=200)\n else:\n str_error = ''\n for field, errors in form.errors.items():\n str_error += field + ': '\n for error in errors:\n str_error += error + ', '\n return JsonResponse({'error': str_error[:-2]}, status=400)\n else:\n return JsonResponse({'Error': 'Something when terribly wrong'}, status=400)\n<|end_body_1|>\n", "revision_id": "2dedee10bded628a0eaecacc4554b421cc6f0ddd", "skeleton": "<|skeleton|>\nclass ModelTrainData:\n\n def get(self, request):\n \"\"\"Return all data from ModelTrainData :param request: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create record in model_train_data :param request: :return: Record if created, otherwise error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ModelTrainData:\n def get(self, request):\n \"\"\"Return all data from ModelTrainData :param request: :return:\"\"\"\n data = queries.get_all_model_train_data(reverse=True)\n paginator = Paginator(data, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'data_manager_app/modelTrainData.html', {'form': self.form_class, 'datas': page_obj, 'search_form': self.search_form})\n\n def post(self, request):\n \"\"\"Create record in model_train_data :param request: :return: Record if created, otherwise error\"\"\"\n url_name = return_url_name(request.path)\n if not has_permission(request.user.role_id, url_name, request.method):\n return JsonResponse({'error': 'No permission'}, status=403)\n if request.is_ajax():\n form = self.form_class(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.added_by = request.user\n instance.save()\n ser_instance = serializers.serialize('json', [instance])\n return JsonResponse({'Added data': ser_instance}, status=200)\n else:\n str_error = ''\n for field, errors in form.errors.items():\n str_error += field + ': '\n for error in errors:\n str_error += error + ', '\n return JsonResponse({'error': str_error[:-2]}, status=400)\n else:\n return JsonResponse({'Error': 'Something when terribly wrong'}, status=400)\n", "source": "the_stack_v2_python_sparse", "source_path": "data_model_manager_app/views/model_train_data_manager_view.py", "source_repo": "SonThanhNguyen13/django_data_manager", "split": "val", "star_events_count": 0}
{"blob_id": "1d47be557f70f7d32c8a19155413ff945f364c3b", "bodies": ["self.user_id = str(user_id)\nself.click_data = None\nself.recall_data = []\nself.recommend_newsid = []\nself.n_article = n_article", "sql = 'select newsid from user_click where userid=%s limit 1 ' % self.user_id\nfor item1 in MYSQL_CLIENT.execute_query(sql):\n newsid = item1['newsid']\n sql = 'select content from article where newsid=%s;' % newsid\n for item2 in MYSQL_CLIENT.execute_query(sql):\n self.click_data = (item1['newsid'], item2['content'])", "mysql_of_actions = 'user_click'\nuser_news = get_click_action(mysql_of_actions)\ndata = item_cf_recommend(item_similarity(user_news), [self.user_id], user_news)\nfor newsid in data[self.user_id]:\n sql = 'select content from article where newsid=%s;' % newsid\n for item in MYSQL_CLIENT.execute_query(sql):\n self.recall_data.append({'newsid': newsid, 'content': item['content']})", "score_list = []\nfor recall_data in self.recall_data:\n sim = NewsSimLda.calc_similar(recall_data['content'], self.click_data[-1])\n score_list.append((recall_data['newsid'], sim))\nscore_list.sort(key=lambda x: x[-1], reverse=True)\nself.recommend_newsid = [newsid for newsid, _ in score_list[0:self.n_article]]", "self._get_user_action()\nself._recall()\nself._rank()"], "bodies_text": "<|body_start_0|>\n self.user_id = str(user_id)\n self.click_data = None\n self.recall_data = []\n self.recommend_newsid = []\n self.n_article = n_article\n<|end_body_0|>\n\n<|body_start_1|>\n sql = 'select newsid from user_click where userid=%s limit 1 ' % self.user_id\n for item1 in MYSQL_CLIENT.execute_query(sql):\n newsid = item1['newsid']\n sql = 'select content from article where newsid=%s;' % newsid\n for item2 in MYSQL_CLIENT.execute_query(sql):\n self.click_data = (item1['newsid'], item2['content'])\n<|end_body_1|>\n\n<|body_start_2|>\n mysql_of_actions = 'user_click'\n user_news = get_click_action(mysql_of_actions)\n data = item_cf_recommend(item_similarity(user_news), [self.user_id], user_news)\n for newsid in data[self.user_id]:\n sql = 'select content from article where newsid=%s;' % newsid\n for item in MYSQL_CLIENT.execute_query(sql):\n self.recall_data.append({'newsid': newsid, 'content': item['content']})\n<|end_body_2|>\n\n<|body_start_3|>\n score_list = []\n for recall_data in self.recall_data:\n sim = NewsSimLda.calc_similar(recall_data['content'], self.click_data[-1])\n score_list.append((recall_data['newsid'], sim))\n score_list.sort(key=lambda x: x[-1], reverse=True)\n self.recommend_newsid = [newsid for newsid, _ in score_list[0:self.n_article]]\n<|end_body_3|>\n\n<|body_start_4|>\n self._get_user_action()\n self._recall()\n self._rank()\n<|end_body_4|>\n", "class_docstring": "", "class_name": "RecommendEngine", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecommendEngine:\n\n def __init__(self, user_id, n_article=3):\n \"\"\":param user_id: 用户ID :param n_article:int, 推荐n_article新闻\"\"\"\n <|body_0|>\n\n def _get_user_action(self):\n \"\"\"从msyql获取用户的用户点击行为 :return:\"\"\"\n <|body_1|>\n\n def _recall(self):\n \"\"\"召回模块 :return:\"\"\"\n <|body_2|>\n\n def _rank(self):\n \"\"\"排序模块 :return:\"\"\"\n <|body_3|>\n\n def recommend_news(self):\n \"\"\"给用户推荐新闻 :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.user_id = str(user_id)\n self.click_data = None\n self.recall_data = []\n self.recommend_newsid = []\n self.n_article = n_article\n<|end_body_0|>\n\n<|body_start_1|>\n sql = 'select newsid from user_click where userid=%s limit 1 ' % self.user_id\n for item1 in MYSQL_CLIENT.execute_query(sql):\n newsid = item1['newsid']\n sql = 'select content from article where newsid=%s;' % newsid\n for item2 in MYSQL_CLIENT.execute_query(sql):\n self.click_data = (item1['newsid'], item2['content'])\n<|end_body_1|>\n\n<|body_start_2|>\n mysql_of_actions = 'user_click'\n user_news = get_click_action(mysql_of_actions)\n data = item_cf_recommend(item_similarity(user_news), [self.user_id], user_news)\n for newsid in data[self.user_id]:\n sql = 'select content from article where newsid=%s;' % newsid\n for item in MYSQL_CLIENT.execute_query(sql):\n self.recall_data.append({'newsid': newsid, 'content': item['content']})\n<|end_body_2|>\n\n<|body_start_3|>\n score_list = []\n for recall_data in self.recall_data:\n sim = NewsSimLda.calc_similar(recall_data['content'], self.click_data[-1])\n score_list.append((recall_data['newsid'], sim))\n score_list.sort(key=lambda x: x[-1], reverse=True)\n self.recommend_newsid = [newsid for newsid, _ in score_list[0:self.n_article]]\n<|end_body_3|>\n\n<|body_start_4|>\n self._get_user_action()\n self._recall()\n self._rank()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000042", "length_bytes": 3487, "license_type": "no_license", "methods": [{"docstring": ":param user_id: 用户ID :param n_article:int, 推荐n_article新闻", "name": "__init__", "signature": "def __init__(self, user_id, n_article=3)"}, {"docstring": "从msyql获取用户的用户点击行为 :return:", "name": "_get_user_action", "signature": "def _get_user_action(self)"}, {"docstring": "召回模块 :return:", "name": "_recall", "signature": "def _recall(self)"}, {"docstring": "排序模块 :return:", "name": "_rank", "signature": "def _rank(self)"}, {"docstring": "给用户推荐新闻 :return:", "name": "recommend_news", "signature": "def recommend_news(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_018894", "prompt": "Implement the Python class `RecommendEngine` described below.\n\nClass description:\nImplement the RecommendEngine class.\n\nMethod signatures and docstrings:\n- def __init__(self, user_id, n_article=3): :param user_id: 用户ID :param n_article:int, 推荐n_article新闻\n- def _get_user_action(self): 从msyql获取用户的用户点击行为 :return:\n- def _recall(self): 召回模块 :return:\n- def _rank(self): 排序模块 :return:\n- def recommend_news(self): 给用户推荐新闻 :return:", "prompted_full_text": "Implement the Python class `RecommendEngine` described below.\n\nClass description:\nImplement the RecommendEngine class.\n\nMethod signatures and docstrings:\n- def __init__(self, user_id, n_article=3): :param user_id: 用户ID :param n_article:int, 推荐n_article新闻\n- def _get_user_action(self): 从msyql获取用户的用户点击行为 :return:\n- def _recall(self): 召回模块 :return:\n- def _rank(self): 排序模块 :return:\n- def recommend_news(self): 给用户推荐新闻 :return:\n\n<|skeleton|>\nclass RecommendEngine:\n\n def __init__(self, user_id, n_article=3):\n \"\"\":param user_id: 用户ID :param n_article:int, 推荐n_article新闻\"\"\"\n <|body_0|>\n\n def _get_user_action(self):\n \"\"\"从msyql获取用户的用户点击行为 :return:\"\"\"\n <|body_1|>\n\n def _recall(self):\n \"\"\"召回模块 :return:\"\"\"\n <|body_2|>\n\n def _rank(self):\n \"\"\"排序模块 :return:\"\"\"\n <|body_3|>\n\n def recommend_news(self):\n \"\"\"给用户推荐新闻 :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.user_id = str(user_id)\n self.click_data = None\n self.recall_data = []\n self.recommend_newsid = []\n self.n_article = n_article\n<|end_body_0|>\n\n<|body_start_1|>\n sql = 'select newsid from user_click where userid=%s limit 1 ' % self.user_id\n for item1 in MYSQL_CLIENT.execute_query(sql):\n newsid = item1['newsid']\n sql = 'select content from article where newsid=%s;' % newsid\n for item2 in MYSQL_CLIENT.execute_query(sql):\n self.click_data = (item1['newsid'], item2['content'])\n<|end_body_1|>\n\n<|body_start_2|>\n mysql_of_actions = 'user_click'\n user_news = get_click_action(mysql_of_actions)\n data = item_cf_recommend(item_similarity(user_news), [self.user_id], user_news)\n for newsid in data[self.user_id]:\n sql = 'select content from article where newsid=%s;' % newsid\n for item in MYSQL_CLIENT.execute_query(sql):\n self.recall_data.append({'newsid': newsid, 'content': item['content']})\n<|end_body_2|>\n\n<|body_start_3|>\n score_list = []\n for recall_data in self.recall_data:\n sim = NewsSimLda.calc_similar(recall_data['content'], self.click_data[-1])\n score_list.append((recall_data['newsid'], sim))\n score_list.sort(key=lambda x: x[-1], reverse=True)\n self.recommend_newsid = [newsid for newsid, _ in score_list[0:self.n_article]]\n<|end_body_3|>\n\n<|body_start_4|>\n self._get_user_action()\n self._recall()\n self._rank()\n<|end_body_4|>\n", "revision_id": "b5389f4bf3ced1496a00a5263cd94cdc1f29aad8", "skeleton": "<|skeleton|>\nclass RecommendEngine:\n\n def __init__(self, user_id, n_article=3):\n \"\"\":param user_id: 用户ID :param n_article:int, 推荐n_article新闻\"\"\"\n <|body_0|>\n\n def _get_user_action(self):\n \"\"\"从msyql获取用户的用户点击行为 :return:\"\"\"\n <|body_1|>\n\n def _recall(self):\n \"\"\"召回模块 :return:\"\"\"\n <|body_2|>\n\n def _rank(self):\n \"\"\"排序模块 :return:\"\"\"\n <|body_3|>\n\n def recommend_news(self):\n \"\"\"给用户推荐新闻 :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RecommendEngine:\n def __init__(self, user_id, n_article=3):\n \"\"\":param user_id: 用户ID :param n_article:int, 推荐n_article新闻\"\"\"\n self.user_id = str(user_id)\n self.click_data = None\n self.recall_data = []\n self.recommend_newsid = []\n self.n_article = n_article\n\n def _get_user_action(self):\n \"\"\"从msyql获取用户的用户点击行为 :return:\"\"\"\n sql = 'select newsid from user_click where userid=%s limit 1 ' % self.user_id\n for item1 in MYSQL_CLIENT.execute_query(sql):\n newsid = item1['newsid']\n sql = 'select content from article where newsid=%s;' % newsid\n for item2 in MYSQL_CLIENT.execute_query(sql):\n self.click_data = (item1['newsid'], item2['content'])\n\n def _recall(self):\n \"\"\"召回模块 :return:\"\"\"\n mysql_of_actions = 'user_click'\n user_news = get_click_action(mysql_of_actions)\n data = item_cf_recommend(item_similarity(user_news), [self.user_id], user_news)\n for newsid in data[self.user_id]:\n sql = 'select content from article where newsid=%s;' % newsid\n for item in MYSQL_CLIENT.execute_query(sql):\n self.recall_data.append({'newsid': newsid, 'content': item['content']})\n\n def _rank(self):\n \"\"\"排序模块 :return:\"\"\"\n score_list = []\n for recall_data in self.recall_data:\n sim = NewsSimLda.calc_similar(recall_data['content'], self.click_data[-1])\n score_list.append((recall_data['newsid'], sim))\n score_list.sort(key=lambda x: x[-1], reverse=True)\n self.recommend_newsid = [newsid for newsid, _ in score_list[0:self.n_article]]\n\n def recommend_news(self):\n \"\"\"给用户推荐新闻 :return:\"\"\"\n self._get_user_action()\n self._recall()\n self._rank()\n", "source": "the_stack_v2_python_sparse", "source_path": "11-Recommender_System/recommend.py", "source_repo": "GAOYANGAU/AIBigdata", "split": "val", "star_events_count": 5}
{"blob_id": "01d2a0e9624894f835cba384993e5225bfce2811", "bodies": ["self.record = record\nself.current_action = action\nrecord_needs = self.collect_needs()\nsuper().__init__(*record_needs)", "if self.current_action == 'read':\n return self.read_permissions()\nelif self.current_action == 'create':\n return [create_records_action, backoffice_access_action]\nelif self.current_action == 'update':\n return self.record_needs() + [backoffice_access_action]\nelse:\n return self.record_needs()", "if self.is_public():\n return [any_user]\nelse:\n return self.record_needs() + [backoffice_access_action]", "if current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED'):\n return self.record.get('_access', {}).get(self.current_action, [])\nreturn []", "needs = []\nfor access_entity in self.record_explicit_restrictions():\n try:\n if isinstance(access_entity, string_types):\n needs.append(UserNeed(int(access_entity)))\n elif isinstance(access_entity, int):\n needs.append(UserNeed(access_entity))\n except ValueError:\n needs.append(RoleNeed(access_entity.lower()))\nreturn needs", "has_explicit_perm = current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED') and self.record.get('_access', {}).get('read', [])\nif not has_explicit_perm:\n return self.record.get('restricted', False) is False\nreturn False"], "bodies_text": "<|body_start_0|>\n self.record = record\n self.current_action = action\n record_needs = self.collect_needs()\n super().__init__(*record_needs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.current_action == 'read':\n return self.read_permissions()\n elif self.current_action == 'create':\n return [create_records_action, backoffice_access_action]\n elif self.current_action == 'update':\n return self.record_needs() + [backoffice_access_action]\n else:\n return self.record_needs()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.is_public():\n return [any_user]\n else:\n return self.record_needs() + [backoffice_access_action]\n<|end_body_2|>\n\n<|body_start_3|>\n if current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED'):\n return self.record.get('_access', {}).get(self.current_action, [])\n return []\n<|end_body_3|>\n\n<|body_start_4|>\n needs = []\n for access_entity in self.record_explicit_restrictions():\n try:\n if isinstance(access_entity, string_types):\n needs.append(UserNeed(int(access_entity)))\n elif isinstance(access_entity, int):\n needs.append(UserNeed(access_entity))\n except ValueError:\n needs.append(RoleNeed(access_entity.lower()))\n return needs\n<|end_body_4|>\n\n<|body_start_5|>\n has_explicit_perm = current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED') and self.record.get('_access', {}).get('read', [])\n if not has_explicit_perm:\n return self.record.get('restricted', False) is False\n return False\n<|end_body_5|>\n", "class_docstring": "Record permission. - Create action given to librarian, admin and specified users. - Read access given to everyone with possibility to hide. - Delete access to admin and specified users.", "class_name": "RecordPermission", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecordPermission:\n \"\"\"Record permission. - Create action given to librarian, admin and specified users. - Read access given to everyone with possibility to hide. - Delete access to admin and specified users.\"\"\"\n\n def __init__(self, record, action):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def collect_needs(self):\n \"\"\"Collect permission policy per action.\"\"\"\n <|body_1|>\n\n def read_permissions(self):\n \"\"\"Define read permission policy per record.\"\"\"\n <|body_2|>\n\n def record_explicit_restrictions(self):\n \"\"\"Return the list of user ids/roles allowed for the given action.\"\"\"\n <|body_3|>\n\n def record_needs(self):\n \"\"\"Create needs of the record.\"\"\"\n <|body_4|>\n\n def is_public(self):\n \"\"\"Check if the record is fully public. Explicit permission = `_access` field Implicit permission = `restricted` field Explicit, when defined, takes precedence over implicit which is ignored. The record is public when `_access.read` is not defined and `restricted` is False.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.record = record\n self.current_action = action\n record_needs = self.collect_needs()\n super().__init__(*record_needs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.current_action == 'read':\n return self.read_permissions()\n elif self.current_action == 'create':\n return [create_records_action, backoffice_access_action]\n elif self.current_action == 'update':\n return self.record_needs() + [backoffice_access_action]\n else:\n return self.record_needs()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.is_public():\n return [any_user]\n else:\n return self.record_needs() + [backoffice_access_action]\n<|end_body_2|>\n\n<|body_start_3|>\n if current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED'):\n return self.record.get('_access', {}).get(self.current_action, [])\n return []\n<|end_body_3|>\n\n<|body_start_4|>\n needs = []\n for access_entity in self.record_explicit_restrictions():\n try:\n if isinstance(access_entity, string_types):\n needs.append(UserNeed(int(access_entity)))\n elif isinstance(access_entity, int):\n needs.append(UserNeed(access_entity))\n except ValueError:\n needs.append(RoleNeed(access_entity.lower()))\n return needs\n<|end_body_4|>\n\n<|body_start_5|>\n has_explicit_perm = current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED') and self.record.get('_access', {}).get('read', [])\n if not has_explicit_perm:\n return self.record.get('restricted', False) is False\n return False\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000043", "length_bytes": 3773, "license_type": "permissive", "methods": [{"docstring": "Constructor.", "name": "__init__", "signature": "def __init__(self, record, action)"}, {"docstring": "Collect permission policy per action.", "name": "collect_needs", "signature": "def collect_needs(self)"}, {"docstring": "Define read permission policy per record.", "name": "read_permissions", "signature": "def read_permissions(self)"}, {"docstring": "Return the list of user ids/roles allowed for the given action.", "name": "record_explicit_restrictions", "signature": "def record_explicit_restrictions(self)"}, {"docstring": "Create needs of the record.", "name": "record_needs", "signature": "def record_needs(self)"}, {"docstring": "Check if the record is fully public. Explicit permission = `_access` field Implicit permission = `restricted` field Explicit, when defined, takes precedence over implicit which is ignored. The record is public when `_access.read` is not defined and `restricted` is False.", "name": "is_public", "signature": "def is_public(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_002011", "prompt": "Implement the Python class `RecordPermission` described below.\n\nClass description:\nRecord permission. - Create action given to librarian, admin and specified users. - Read access given to everyone with possibility to hide. - Delete access to admin and specified users.\n\nMethod signatures and docstrings:\n- def __init__(self, record, action): Constructor.\n- def collect_needs(self): Collect permission policy per action.\n- def read_permissions(self): Define read permission policy per record.\n- def record_explicit_restrictions(self): Return the list of user ids/roles allowed for the given action.\n- def record_needs(self): Create needs of the record.\n- def is_public(self): Check if the record is fully public. Explicit permission = `_access` field Implicit permission = `restricted` field Explicit, when defined, takes precedence over implicit which is ignored. The record is public when `_access.read` is not defined and `restricted` is False.", "prompted_full_text": "Implement the Python class `RecordPermission` described below.\n\nClass description:\nRecord permission. - Create action given to librarian, admin and specified users. - Read access given to everyone with possibility to hide. - Delete access to admin and specified users.\n\nMethod signatures and docstrings:\n- def __init__(self, record, action): Constructor.\n- def collect_needs(self): Collect permission policy per action.\n- def read_permissions(self): Define read permission policy per record.\n- def record_explicit_restrictions(self): Return the list of user ids/roles allowed for the given action.\n- def record_needs(self): Create needs of the record.\n- def is_public(self): Check if the record is fully public. Explicit permission = `_access` field Implicit permission = `restricted` field Explicit, when defined, takes precedence over implicit which is ignored. The record is public when `_access.read` is not defined and `restricted` is False.\n\n<|skeleton|>\nclass RecordPermission:\n \"\"\"Record permission. - Create action given to librarian, admin and specified users. - Read access given to everyone with possibility to hide. - Delete access to admin and specified users.\"\"\"\n\n def __init__(self, record, action):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def collect_needs(self):\n \"\"\"Collect permission policy per action.\"\"\"\n <|body_1|>\n\n def read_permissions(self):\n \"\"\"Define read permission policy per record.\"\"\"\n <|body_2|>\n\n def record_explicit_restrictions(self):\n \"\"\"Return the list of user ids/roles allowed for the given action.\"\"\"\n <|body_3|>\n\n def record_needs(self):\n \"\"\"Create needs of the record.\"\"\"\n <|body_4|>\n\n def is_public(self):\n \"\"\"Check if the record is fully public. Explicit permission = `_access` field Implicit permission = `restricted` field Explicit, when defined, takes precedence over implicit which is ignored. The record is public when `_access.read` is not defined and `restricted` is False.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.record = record\n self.current_action = action\n record_needs = self.collect_needs()\n super().__init__(*record_needs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.current_action == 'read':\n return self.read_permissions()\n elif self.current_action == 'create':\n return [create_records_action, backoffice_access_action]\n elif self.current_action == 'update':\n return self.record_needs() + [backoffice_access_action]\n else:\n return self.record_needs()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.is_public():\n return [any_user]\n else:\n return self.record_needs() + [backoffice_access_action]\n<|end_body_2|>\n\n<|body_start_3|>\n if current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED'):\n return self.record.get('_access', {}).get(self.current_action, [])\n return []\n<|end_body_3|>\n\n<|body_start_4|>\n needs = []\n for access_entity in self.record_explicit_restrictions():\n try:\n if isinstance(access_entity, string_types):\n needs.append(UserNeed(int(access_entity)))\n elif isinstance(access_entity, int):\n needs.append(UserNeed(access_entity))\n except ValueError:\n needs.append(RoleNeed(access_entity.lower()))\n return needs\n<|end_body_4|>\n\n<|body_start_5|>\n has_explicit_perm = current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED') and self.record.get('_access', {}).get('read', [])\n if not has_explicit_perm:\n return self.record.get('restricted', False) is False\n return False\n<|end_body_5|>\n", "revision_id": "1c36526e85510100c5f64059518d1b716d87ac10", "skeleton": "<|skeleton|>\nclass RecordPermission:\n \"\"\"Record permission. - Create action given to librarian, admin and specified users. - Read access given to everyone with possibility to hide. - Delete access to admin and specified users.\"\"\"\n\n def __init__(self, record, action):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def collect_needs(self):\n \"\"\"Collect permission policy per action.\"\"\"\n <|body_1|>\n\n def read_permissions(self):\n \"\"\"Define read permission policy per record.\"\"\"\n <|body_2|>\n\n def record_explicit_restrictions(self):\n \"\"\"Return the list of user ids/roles allowed for the given action.\"\"\"\n <|body_3|>\n\n def record_needs(self):\n \"\"\"Create needs of the record.\"\"\"\n <|body_4|>\n\n def is_public(self):\n \"\"\"Check if the record is fully public. Explicit permission = `_access` field Implicit permission = `restricted` field Explicit, when defined, takes precedence over implicit which is ignored. The record is public when `_access.read` is not defined and `restricted` is False.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RecordPermission:\n \"\"\"Record permission. - Create action given to librarian, admin and specified users. - Read access given to everyone with possibility to hide. - Delete access to admin and specified users.\"\"\"\n\n def __init__(self, record, action):\n \"\"\"Constructor.\"\"\"\n self.record = record\n self.current_action = action\n record_needs = self.collect_needs()\n super().__init__(*record_needs)\n\n def collect_needs(self):\n \"\"\"Collect permission policy per action.\"\"\"\n if self.current_action == 'read':\n return self.read_permissions()\n elif self.current_action == 'create':\n return [create_records_action, backoffice_access_action]\n elif self.current_action == 'update':\n return self.record_needs() + [backoffice_access_action]\n else:\n return self.record_needs()\n\n def read_permissions(self):\n \"\"\"Define read permission policy per record.\"\"\"\n if self.is_public():\n return [any_user]\n else:\n return self.record_needs() + [backoffice_access_action]\n\n def record_explicit_restrictions(self):\n \"\"\"Return the list of user ids/roles allowed for the given action.\"\"\"\n if current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED'):\n return self.record.get('_access', {}).get(self.current_action, [])\n return []\n\n def record_needs(self):\n \"\"\"Create needs of the record.\"\"\"\n needs = []\n for access_entity in self.record_explicit_restrictions():\n try:\n if isinstance(access_entity, string_types):\n needs.append(UserNeed(int(access_entity)))\n elif isinstance(access_entity, int):\n needs.append(UserNeed(access_entity))\n except ValueError:\n needs.append(RoleNeed(access_entity.lower()))\n return needs\n\n def is_public(self):\n \"\"\"Check if the record is fully public. Explicit permission = `_access` field Implicit permission = `restricted` field Explicit, when defined, takes precedence over implicit which is ignored. The record is public when `_access.read` is not defined and `restricted` is False.\"\"\"\n has_explicit_perm = current_app.config.get('ILS_RECORDS_EXPLICIT_PERMISSIONS_ENABLED') and self.record.get('_access', {}).get('read', [])\n if not has_explicit_perm:\n return self.record.get('restricted', False) is False\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "invenio_app_ils/records/permissions.py", "source_repo": "inveniosoftware/invenio-app-ils", "split": "val", "star_events_count": 64}
{"blob_id": "1e1deda27442496174bdb709366361aff9212404", "bodies": ["with tf.keras.utils.CustomObjectScope({'tf': tf}):\n self.model = tf.keras.models.load_model(model)\nself.database = database\nself.identities = identities", "em = np.zeros((images.shape[0], 128))\nfor i, img in enumerate(images):\n em[i] = self.model.predict(np.expand_dims(img, axis=0))[0]\nreturn np.array(em)", "em = self.model.predict(np.expand_dims(image, axis=0))[0]\ndist = []\nlenght = len(self.identities)\nfor i in range(lenght):\n dist.append(np.sum(np.square(em, self.database[i])))\ndist = np.array(dist)\nidx = np.argmin(dist)\nif dist[idx] < tau:\n return (self.identities[idx], dist[idx])\nelse:\n return (None, None)"], "bodies_text": "<|body_start_0|>\n with tf.keras.utils.CustomObjectScope({'tf': tf}):\n self.model = tf.keras.models.load_model(model)\n self.database = database\n self.identities = identities\n<|end_body_0|>\n\n<|body_start_1|>\n em = np.zeros((images.shape[0], 128))\n for i, img in enumerate(images):\n em[i] = self.model.predict(np.expand_dims(img, axis=0))[0]\n return np.array(em)\n<|end_body_1|>\n\n<|body_start_2|>\n em = self.model.predict(np.expand_dims(image, axis=0))[0]\n dist = []\n lenght = len(self.identities)\n for i in range(lenght):\n dist.append(np.sum(np.square(em, self.database[i])))\n dist = np.array(dist)\n idx = np.argmin(dist)\n if dist[idx] < tau:\n return (self.identities[idx], dist[idx])\n else:\n return (None, None)\n<|end_body_2|>\n", "class_docstring": "FaceVerification class", "class_name": "FaceVerification", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FaceVerification:\n \"\"\"FaceVerification class\"\"\"\n\n def __init__(self, model_path, database, identities):\n \"\"\"Class constructor Argumetns: - model_path is the path to where the face verification embedding model is stored - database is a numpy.ndarray of shape (d, e) containing all the face embeddings in the database * d is the number of images in the database * e is the dimensionality of the embedding - identities is a list of length d containing the identities corresponding to the embeddings in database\"\"\"\n <|body_0|>\n\n def embedding(self, images):\n \"\"\"Public instance method that calculates the face embedding of images Arguments: - images is a numpy.ndarray of shape (i, n, n, 3) containing the aligned images * i is the number of images * n is the size of the aligned images Returns: A numpy.ndarray of shape (i, e) containing the embeddings where e is the dimensionality of the embeddings\"\"\"\n <|body_1|>\n\n def verify(self, image, tau=0.5):\n \"\"\"Public instance method Arguments: - image is a numpy.ndarray of shape (n, n, 3) containing the aligned image of the face to be verify * n is the shape of the aligned image - tau is the maximum euclidean distance used for verification Returns: (identity, distance), or (None, None) on failure - identity is a string containing the identity of the verified face - distance is the euclidean distance between the verified face embedding and the identified database embedding\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with tf.keras.utils.CustomObjectScope({'tf': tf}):\n self.model = tf.keras.models.load_model(model)\n self.database = database\n self.identities = identities\n<|end_body_0|>\n\n<|body_start_1|>\n em = np.zeros((images.shape[0], 128))\n for i, img in enumerate(images):\n em[i] = self.model.predict(np.expand_dims(img, axis=0))[0]\n return np.array(em)\n<|end_body_1|>\n\n<|body_start_2|>\n em = self.model.predict(np.expand_dims(image, axis=0))[0]\n dist = []\n lenght = len(self.identities)\n for i in range(lenght):\n dist.append(np.sum(np.square(em, self.database[i])))\n dist = np.array(dist)\n idx = np.argmin(dist)\n if dist[idx] < tau:\n return (self.identities[idx], dist[idx])\n else:\n return (None, None)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000044", "length_bytes": 2739, "license_type": "no_license", "methods": [{"docstring": "Class constructor Argumetns: - model_path is the path to where the face verification embedding model is stored - database is a numpy.ndarray of shape (d, e) containing all the face embeddings in the database * d is the number of images in the database * e is the dimensionality of the embedding - identities is a list of length d containing the identities corresponding to the embeddings in database", "name": "__init__", "signature": "def __init__(self, model_path, database, identities)"}, {"docstring": "Public instance method that calculates the face embedding of images Arguments: - images is a numpy.ndarray of shape (i, n, n, 3) containing the aligned images * i is the number of images * n is the size of the aligned images Returns: A numpy.ndarray of shape (i, e) containing the embeddings where e is the dimensionality of the embeddings", "name": "embedding", "signature": "def embedding(self, images)"}, {"docstring": "Public instance method Arguments: - image is a numpy.ndarray of shape (n, n, 3) containing the aligned image of the face to be verify * n is the shape of the aligned image - tau is the maximum euclidean distance used for verification Returns: (identity, distance), or (None, None) on failure - identity is a string containing the identity of the verified face - distance is the euclidean distance between the verified face embedding and the identified database embedding", "name": "verify", "signature": "def verify(self, image, tau=0.5)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_002084", "prompt": "Implement the Python class `FaceVerification` described below.\n\nClass description:\nFaceVerification class\n\nMethod signatures and docstrings:\n- def __init__(self, model_path, database, identities): Class constructor Argumetns: - model_path is the path to where the face verification embedding model is stored - database is a numpy.ndarray of shape (d, e) containing all the face embeddings in the database * d is the number of images in the database * e is the dimensionality of the embedding - identities is a list of length d containing the identities corresponding to the embeddings in database\n- def embedding(self, images): Public instance method that calculates the face embedding of images Arguments: - images is a numpy.ndarray of shape (i, n, n, 3) containing the aligned images * i is the number of images * n is the size of the aligned images Returns: A numpy.ndarray of shape (i, e) containing the embeddings where e is the dimensionality of the embeddings\n- def verify(self, image, tau=0.5): Public instance method Arguments: - image is a numpy.ndarray of shape (n, n, 3) containing the aligned image of the face to be verify * n is the shape of the aligned image - tau is the maximum euclidean distance used for verification Returns: (identity, distance), or (None, None) on failure - identity is a string containing the identity of the verified face - distance is the euclidean distance between the verified face embedding and the identified database embedding", "prompted_full_text": "Implement the Python class `FaceVerification` described below.\n\nClass description:\nFaceVerification class\n\nMethod signatures and docstrings:\n- def __init__(self, model_path, database, identities): Class constructor Argumetns: - model_path is the path to where the face verification embedding model is stored - database is a numpy.ndarray of shape (d, e) containing all the face embeddings in the database * d is the number of images in the database * e is the dimensionality of the embedding - identities is a list of length d containing the identities corresponding to the embeddings in database\n- def embedding(self, images): Public instance method that calculates the face embedding of images Arguments: - images is a numpy.ndarray of shape (i, n, n, 3) containing the aligned images * i is the number of images * n is the size of the aligned images Returns: A numpy.ndarray of shape (i, e) containing the embeddings where e is the dimensionality of the embeddings\n- def verify(self, image, tau=0.5): Public instance method Arguments: - image is a numpy.ndarray of shape (n, n, 3) containing the aligned image of the face to be verify * n is the shape of the aligned image - tau is the maximum euclidean distance used for verification Returns: (identity, distance), or (None, None) on failure - identity is a string containing the identity of the verified face - distance is the euclidean distance between the verified face embedding and the identified database embedding\n\n<|skeleton|>\nclass FaceVerification:\n \"\"\"FaceVerification class\"\"\"\n\n def __init__(self, model_path, database, identities):\n \"\"\"Class constructor Argumetns: - model_path is the path to where the face verification embedding model is stored - database is a numpy.ndarray of shape (d, e) containing all the face embeddings in the database * d is the number of images in the database * e is the dimensionality of the embedding - identities is a list of length d containing the identities corresponding to the embeddings in database\"\"\"\n <|body_0|>\n\n def embedding(self, images):\n \"\"\"Public instance method that calculates the face embedding of images Arguments: - images is a numpy.ndarray of shape (i, n, n, 3) containing the aligned images * i is the number of images * n is the size of the aligned images Returns: A numpy.ndarray of shape (i, e) containing the embeddings where e is the dimensionality of the embeddings\"\"\"\n <|body_1|>\n\n def verify(self, image, tau=0.5):\n \"\"\"Public instance method Arguments: - image is a numpy.ndarray of shape (n, n, 3) containing the aligned image of the face to be verify * n is the shape of the aligned image - tau is the maximum euclidean distance used for verification Returns: (identity, distance), or (None, None) on failure - identity is a string containing the identity of the verified face - distance is the euclidean distance between the verified face embedding and the identified database embedding\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with tf.keras.utils.CustomObjectScope({'tf': tf}):\n self.model = tf.keras.models.load_model(model)\n self.database = database\n self.identities = identities\n<|end_body_0|>\n\n<|body_start_1|>\n em = np.zeros((images.shape[0], 128))\n for i, img in enumerate(images):\n em[i] = self.model.predict(np.expand_dims(img, axis=0))[0]\n return np.array(em)\n<|end_body_1|>\n\n<|body_start_2|>\n em = self.model.predict(np.expand_dims(image, axis=0))[0]\n dist = []\n lenght = len(self.identities)\n for i in range(lenght):\n dist.append(np.sum(np.square(em, self.database[i])))\n dist = np.array(dist)\n idx = np.argmin(dist)\n if dist[idx] < tau:\n return (self.identities[idx], dist[idx])\n else:\n return (None, None)\n<|end_body_2|>\n", "revision_id": "fc2cec306961f7ca2448965ddd3a2f656bbe10c7", "skeleton": "<|skeleton|>\nclass FaceVerification:\n \"\"\"FaceVerification class\"\"\"\n\n def __init__(self, model_path, database, identities):\n \"\"\"Class constructor Argumetns: - model_path is the path to where the face verification embedding model is stored - database is a numpy.ndarray of shape (d, e) containing all the face embeddings in the database * d is the number of images in the database * e is the dimensionality of the embedding - identities is a list of length d containing the identities corresponding to the embeddings in database\"\"\"\n <|body_0|>\n\n def embedding(self, images):\n \"\"\"Public instance method that calculates the face embedding of images Arguments: - images is a numpy.ndarray of shape (i, n, n, 3) containing the aligned images * i is the number of images * n is the size of the aligned images Returns: A numpy.ndarray of shape (i, e) containing the embeddings where e is the dimensionality of the embeddings\"\"\"\n <|body_1|>\n\n def verify(self, image, tau=0.5):\n \"\"\"Public instance method Arguments: - image is a numpy.ndarray of shape (n, n, 3) containing the aligned image of the face to be verify * n is the shape of the aligned image - tau is the maximum euclidean distance used for verification Returns: (identity, distance), or (None, None) on failure - identity is a string containing the identity of the verified face - distance is the euclidean distance between the verified face embedding and the identified database embedding\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FaceVerification:\n \"\"\"FaceVerification class\"\"\"\n\n def __init__(self, model_path, database, identities):\n \"\"\"Class constructor Argumetns: - model_path is the path to where the face verification embedding model is stored - database is a numpy.ndarray of shape (d, e) containing all the face embeddings in the database * d is the number of images in the database * e is the dimensionality of the embedding - identities is a list of length d containing the identities corresponding to the embeddings in database\"\"\"\n with tf.keras.utils.CustomObjectScope({'tf': tf}):\n self.model = tf.keras.models.load_model(model)\n self.database = database\n self.identities = identities\n\n def embedding(self, images):\n \"\"\"Public instance method that calculates the face embedding of images Arguments: - images is a numpy.ndarray of shape (i, n, n, 3) containing the aligned images * i is the number of images * n is the size of the aligned images Returns: A numpy.ndarray of shape (i, e) containing the embeddings where e is the dimensionality of the embeddings\"\"\"\n em = np.zeros((images.shape[0], 128))\n for i, img in enumerate(images):\n em[i] = self.model.predict(np.expand_dims(img, axis=0))[0]\n return np.array(em)\n\n def verify(self, image, tau=0.5):\n \"\"\"Public instance method Arguments: - image is a numpy.ndarray of shape (n, n, 3) containing the aligned image of the face to be verify * n is the shape of the aligned image - tau is the maximum euclidean distance used for verification Returns: (identity, distance), or (None, None) on failure - identity is a string containing the identity of the verified face - distance is the euclidean distance between the verified face embedding and the identified database embedding\"\"\"\n em = self.model.predict(np.expand_dims(image, axis=0))[0]\n dist = []\n lenght = len(self.identities)\n for i in range(lenght):\n dist.append(np.sum(np.square(em, self.database[i])))\n dist = np.array(dist)\n idx = np.argmin(dist)\n if dist[idx] < tau:\n return (self.identities[idx], dist[idx])\n else:\n return (None, None)\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x0B-face_verification/verification.py", "source_repo": "dalexach/holbertonschool-machine_learning", "split": "val", "star_events_count": 2}
{"blob_id": "5584496503813e833453ca9cc99e83c9b7ad70ad", "bodies": ["self.__videopath = videopath\nself.__savepath = savepath\nself.__resizeheight = resizeheight\nself.__resizewidth = resizewidth\nself.__frames = []", "if os.path.isdir(self.__videopath):\n for dirpath, dirnames, filenames in os.walk(self.__videopath):\n for filename in filenames:\n filepathandname = os.path.join(dirpath, filename)\n self.__extractkeyframe(filepathandname)", "if os.path.exists(filepathandname):\n videocapture = cv2.VideoCapture(filepathandname)\n if videocapture.isOpened():\n wholeframenum = int(videocapture.get(cv2.CAP_PROP_FRAME_COUNT))\n if wholeframenum < 2:\n print('the image you inputted has not enougth frames!')\n middleframenum = math.ceil(wholeframenum / 2)\n success, frame = videocapture.read()\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n self.__frames = [frame]\n count = 0\n while success:\n count += 1\n success, frame = videocapture.read()\n if success:\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n if count == middleframenum:\n self.__frames.append(frame)\n elif count == wholeframenum - 1:\n self.__frames.append(frame)\n if self.__frames is not None:\n for keyindex in range(len(self.__frames)):\n currentframe = self.__frames[keyindex]\n if os.path.isdir(self.__savepath):\n framename = os.path.abspath(self.__savepath) + os.sep + os.path.basename(os.path.dirname(filepathandname)) + '_' + os.path.basename(filepathandname).split('.')[0] + '_keyFrame_' + str(keyindex) + '.jpg'\n cv2.imwrite(framename, currentframe)\n else:\n print(' Please input the correct save path!')\nelse:\n print(' you inputted file is not existed!')"], "bodies_text": "<|body_start_0|>\n self.__videopath = videopath\n self.__savepath = savepath\n self.__resizeheight = resizeheight\n self.__resizewidth = resizewidth\n self.__frames = []\n<|end_body_0|>\n\n<|body_start_1|>\n if os.path.isdir(self.__videopath):\n for dirpath, dirnames, filenames in os.walk(self.__videopath):\n for filename in filenames:\n filepathandname = os.path.join(dirpath, filename)\n self.__extractkeyframe(filepathandname)\n<|end_body_1|>\n\n<|body_start_2|>\n if os.path.exists(filepathandname):\n videocapture = cv2.VideoCapture(filepathandname)\n if videocapture.isOpened():\n wholeframenum = int(videocapture.get(cv2.CAP_PROP_FRAME_COUNT))\n if wholeframenum < 2:\n print('the image you inputted has not enougth frames!')\n middleframenum = math.ceil(wholeframenum / 2)\n success, frame = videocapture.read()\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n self.__frames = [frame]\n count = 0\n while success:\n count += 1\n success, frame = videocapture.read()\n if success:\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n if count == middleframenum:\n self.__frames.append(frame)\n elif count == wholeframenum - 1:\n self.__frames.append(frame)\n if self.__frames is not None:\n for keyindex in range(len(self.__frames)):\n currentframe = self.__frames[keyindex]\n if os.path.isdir(self.__savepath):\n framename = os.path.abspath(self.__savepath) + os.sep + os.path.basename(os.path.dirname(filepathandname)) + '_' + os.path.basename(filepathandname).split('.')[0] + '_keyFrame_' + str(keyindex) + '.jpg'\n cv2.imwrite(framename, currentframe)\n else:\n print(' Please input the correct save path!')\n else:\n print(' you inputted file is not existed!')\n<|end_body_2|>\n", "class_docstring": "", "class_name": "KeyFrameExtractor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KeyFrameExtractor:\n\n def __init__(self, videopath, savepath, resizeheight=240, resizewidth=320):\n \"\"\"初始化方法 :param videopath: 视频输入路径 :param savepath: 关键帧保存路径 :param resizeheight: 重置视频的大小的高 :param resizewidth: 重置视频的大小的宽\"\"\"\n <|body_0|>\n\n def keyframeextractor(self):\n \"\"\"遍历文件夹下所有的视频,进行关键帧提取 :return:\"\"\"\n <|body_1|>\n\n def __extractkeyframe(self, filepathandname):\n \"\"\"提取关键帧\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__videopath = videopath\n self.__savepath = savepath\n self.__resizeheight = resizeheight\n self.__resizewidth = resizewidth\n self.__frames = []\n<|end_body_0|>\n\n<|body_start_1|>\n if os.path.isdir(self.__videopath):\n for dirpath, dirnames, filenames in os.walk(self.__videopath):\n for filename in filenames:\n filepathandname = os.path.join(dirpath, filename)\n self.__extractkeyframe(filepathandname)\n<|end_body_1|>\n\n<|body_start_2|>\n if os.path.exists(filepathandname):\n videocapture = cv2.VideoCapture(filepathandname)\n if videocapture.isOpened():\n wholeframenum = int(videocapture.get(cv2.CAP_PROP_FRAME_COUNT))\n if wholeframenum < 2:\n print('the image you inputted has not enougth frames!')\n middleframenum = math.ceil(wholeframenum / 2)\n success, frame = videocapture.read()\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n self.__frames = [frame]\n count = 0\n while success:\n count += 1\n success, frame = videocapture.read()\n if success:\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n if count == middleframenum:\n self.__frames.append(frame)\n elif count == wholeframenum - 1:\n self.__frames.append(frame)\n if self.__frames is not None:\n for keyindex in range(len(self.__frames)):\n currentframe = self.__frames[keyindex]\n if os.path.isdir(self.__savepath):\n framename = os.path.abspath(self.__savepath) + os.sep + os.path.basename(os.path.dirname(filepathandname)) + '_' + os.path.basename(filepathandname).split('.')[0] + '_keyFrame_' + str(keyindex) + '.jpg'\n cv2.imwrite(framename, currentframe)\n else:\n print(' Please input the correct save path!')\n else:\n print(' you inputted file is not existed!')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000045", "length_bytes": 4295, "license_type": "no_license", "methods": [{"docstring": "初始化方法 :param videopath: 视频输入路径 :param savepath: 关键帧保存路径 :param resizeheight: 重置视频的大小的高 :param resizewidth: 重置视频的大小的宽", "name": "__init__", "signature": "def __init__(self, videopath, savepath, resizeheight=240, resizewidth=320)"}, {"docstring": "遍历文件夹下所有的视频,进行关键帧提取 :return:", "name": "keyframeextractor", "signature": "def keyframeextractor(self)"}, {"docstring": "提取关键帧", "name": "__extractkeyframe", "signature": "def __extractkeyframe(self, filepathandname)"}], "n_methods": 3, "prompt": "Implement the Python class `KeyFrameExtractor` described below.\n\nClass description:\nImplement the KeyFrameExtractor class.\n\nMethod signatures and docstrings:\n- def __init__(self, videopath, savepath, resizeheight=240, resizewidth=320): 初始化方法 :param videopath: 视频输入路径 :param savepath: 关键帧保存路径 :param resizeheight: 重置视频的大小的高 :param resizewidth: 重置视频的大小的宽\n- def keyframeextractor(self): 遍历文件夹下所有的视频,进行关键帧提取 :return:\n- def __extractkeyframe(self, filepathandname): 提取关键帧", "prompted_full_text": "Implement the Python class `KeyFrameExtractor` described below.\n\nClass description:\nImplement the KeyFrameExtractor class.\n\nMethod signatures and docstrings:\n- def __init__(self, videopath, savepath, resizeheight=240, resizewidth=320): 初始化方法 :param videopath: 视频输入路径 :param savepath: 关键帧保存路径 :param resizeheight: 重置视频的大小的高 :param resizewidth: 重置视频的大小的宽\n- def keyframeextractor(self): 遍历文件夹下所有的视频,进行关键帧提取 :return:\n- def __extractkeyframe(self, filepathandname): 提取关键帧\n\n<|skeleton|>\nclass KeyFrameExtractor:\n\n def __init__(self, videopath, savepath, resizeheight=240, resizewidth=320):\n \"\"\"初始化方法 :param videopath: 视频输入路径 :param savepath: 关键帧保存路径 :param resizeheight: 重置视频的大小的高 :param resizewidth: 重置视频的大小的宽\"\"\"\n <|body_0|>\n\n def keyframeextractor(self):\n \"\"\"遍历文件夹下所有的视频,进行关键帧提取 :return:\"\"\"\n <|body_1|>\n\n def __extractkeyframe(self, filepathandname):\n \"\"\"提取关键帧\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__videopath = videopath\n self.__savepath = savepath\n self.__resizeheight = resizeheight\n self.__resizewidth = resizewidth\n self.__frames = []\n<|end_body_0|>\n\n<|body_start_1|>\n if os.path.isdir(self.__videopath):\n for dirpath, dirnames, filenames in os.walk(self.__videopath):\n for filename in filenames:\n filepathandname = os.path.join(dirpath, filename)\n self.__extractkeyframe(filepathandname)\n<|end_body_1|>\n\n<|body_start_2|>\n if os.path.exists(filepathandname):\n videocapture = cv2.VideoCapture(filepathandname)\n if videocapture.isOpened():\n wholeframenum = int(videocapture.get(cv2.CAP_PROP_FRAME_COUNT))\n if wholeframenum < 2:\n print('the image you inputted has not enougth frames!')\n middleframenum = math.ceil(wholeframenum / 2)\n success, frame = videocapture.read()\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n self.__frames = [frame]\n count = 0\n while success:\n count += 1\n success, frame = videocapture.read()\n if success:\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n if count == middleframenum:\n self.__frames.append(frame)\n elif count == wholeframenum - 1:\n self.__frames.append(frame)\n if self.__frames is not None:\n for keyindex in range(len(self.__frames)):\n currentframe = self.__frames[keyindex]\n if os.path.isdir(self.__savepath):\n framename = os.path.abspath(self.__savepath) + os.sep + os.path.basename(os.path.dirname(filepathandname)) + '_' + os.path.basename(filepathandname).split('.')[0] + '_keyFrame_' + str(keyindex) + '.jpg'\n cv2.imwrite(framename, currentframe)\n else:\n print(' Please input the correct save path!')\n else:\n print(' you inputted file is not existed!')\n<|end_body_2|>\n", "revision_id": "805ae46ab3a6585b89c5360e55f42108e4b66fd5", "skeleton": "<|skeleton|>\nclass KeyFrameExtractor:\n\n def __init__(self, videopath, savepath, resizeheight=240, resizewidth=320):\n \"\"\"初始化方法 :param videopath: 视频输入路径 :param savepath: 关键帧保存路径 :param resizeheight: 重置视频的大小的高 :param resizewidth: 重置视频的大小的宽\"\"\"\n <|body_0|>\n\n def keyframeextractor(self):\n \"\"\"遍历文件夹下所有的视频,进行关键帧提取 :return:\"\"\"\n <|body_1|>\n\n def __extractkeyframe(self, filepathandname):\n \"\"\"提取关键帧\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class KeyFrameExtractor:\n def __init__(self, videopath, savepath, resizeheight=240, resizewidth=320):\n \"\"\"初始化方法 :param videopath: 视频输入路径 :param savepath: 关键帧保存路径 :param resizeheight: 重置视频的大小的高 :param resizewidth: 重置视频的大小的宽\"\"\"\n self.__videopath = videopath\n self.__savepath = savepath\n self.__resizeheight = resizeheight\n self.__resizewidth = resizewidth\n self.__frames = []\n\n def keyframeextractor(self):\n \"\"\"遍历文件夹下所有的视频,进行关键帧提取 :return:\"\"\"\n if os.path.isdir(self.__videopath):\n for dirpath, dirnames, filenames in os.walk(self.__videopath):\n for filename in filenames:\n filepathandname = os.path.join(dirpath, filename)\n self.__extractkeyframe(filepathandname)\n\n def __extractkeyframe(self, filepathandname):\n \"\"\"提取关键帧\"\"\"\n if os.path.exists(filepathandname):\n videocapture = cv2.VideoCapture(filepathandname)\n if videocapture.isOpened():\n wholeframenum = int(videocapture.get(cv2.CAP_PROP_FRAME_COUNT))\n if wholeframenum < 2:\n print('the image you inputted has not enougth frames!')\n middleframenum = math.ceil(wholeframenum / 2)\n success, frame = videocapture.read()\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n self.__frames = [frame]\n count = 0\n while success:\n count += 1\n success, frame = videocapture.read()\n if success:\n frame = cv2.resize(frame, (self.__resizewidth, self.__resizeheight))\n if count == middleframenum:\n self.__frames.append(frame)\n elif count == wholeframenum - 1:\n self.__frames.append(frame)\n if self.__frames is not None:\n for keyindex in range(len(self.__frames)):\n currentframe = self.__frames[keyindex]\n if os.path.isdir(self.__savepath):\n framename = os.path.abspath(self.__savepath) + os.sep + os.path.basename(os.path.dirname(filepathandname)) + '_' + os.path.basename(filepathandname).split('.')[0] + '_keyFrame_' + str(keyindex) + '.jpg'\n cv2.imwrite(framename, currentframe)\n else:\n print(' Please input the correct save path!')\n else:\n print(' you inputted file is not existed!')\n", "source": "the_stack_v2_python_sparse", "source_path": "KeyFrameExtractorSaveToHDFS.py", "source_repo": "SunBite/ProvincialProject", "split": "val", "star_events_count": 1}
{"blob_id": "d55f0ade72d9745fa73ebe8221bb6a5fb76f540b", "bodies": ["super().__init__(graphical_params, game_params)\nself._player = player\nself._mvt = []", "if len(self._mvt) == 0:\n mvts = self._player(self.get_current_game_state())\n if type(mvts) is Controls:\n self._mvt = [mvts]\n else:\n self._mvt = mvts\nif len(self._mvt) != 0:\n mvt = self._mvt.pop(0)\nelse:\n mvt = Controls.NOTHING\nself.tick(mvt)"], "bodies_text": "<|body_start_0|>\n super().__init__(graphical_params, game_params)\n self._player = player\n self._mvt = []\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self._mvt) == 0:\n mvts = self._player(self.get_current_game_state())\n if type(mvts) is Controls:\n self._mvt = [mvts]\n else:\n self._mvt = mvts\n if len(self._mvt) != 0:\n mvt = self._mvt.pop(0)\n else:\n mvt = Controls.NOTHING\n self.tick(mvt)\n<|end_body_1|>\n", "class_docstring": "Binds an external player (or set of command) to the tetris graphics class", "class_name": "TetrisGraphicsBinder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TetrisGraphicsBinder:\n \"\"\"Binds an external player (or set of command) to the tetris graphics class\"\"\"\n\n def __init__(self, player, graphical_params, game_params):\n \"\"\"Ctor Parameters ---------- player: function A function that takes the current board as parameters and returns the corresponding action (Controls) to perform graphical_params: GraphicalParametersEu The display parameters for the game. See src.tetris.graphics.GraphicalParameters for more information about those parameters. game_params: GameParameters Parameters for the game. See class documentation for more details\"\"\"\n <|body_0|>\n\n def on_update(self, events=''):\n \"\"\"Overrides on_update method, ignoring second parameter Parameters ---------- events: any Not used, just here for the sake of compatibility with main class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(graphical_params, game_params)\n self._player = player\n self._mvt = []\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self._mvt) == 0:\n mvts = self._player(self.get_current_game_state())\n if type(mvts) is Controls:\n self._mvt = [mvts]\n else:\n self._mvt = mvts\n if len(self._mvt) != 0:\n mvt = self._mvt.pop(0)\n else:\n mvt = Controls.NOTHING\n self.tick(mvt)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000046", "length_bytes": 1887, "license_type": "no_license", "methods": [{"docstring": "Ctor Parameters ---------- player: function A function that takes the current board as parameters and returns the corresponding action (Controls) to perform graphical_params: GraphicalParametersEu The display parameters for the game. See src.tetris.graphics.GraphicalParameters for more information about those parameters. game_params: GameParameters Parameters for the game. See class documentation for more details", "name": "__init__", "signature": "def __init__(self, player, graphical_params, game_params)"}, {"docstring": "Overrides on_update method, ignoring second parameter Parameters ---------- events: any Not used, just here for the sake of compatibility with main class", "name": "on_update", "signature": "def on_update(self, events='')"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013069", "prompt": "Implement the Python class `TetrisGraphicsBinder` described below.\n\nClass description:\nBinds an external player (or set of command) to the tetris graphics class\n\nMethod signatures and docstrings:\n- def __init__(self, player, graphical_params, game_params): Ctor Parameters ---------- player: function A function that takes the current board as parameters and returns the corresponding action (Controls) to perform graphical_params: GraphicalParametersEu The display parameters for the game. See src.tetris.graphics.GraphicalParameters for more information about those parameters. game_params: GameParameters Parameters for the game. See class documentation for more details\n- def on_update(self, events=''): Overrides on_update method, ignoring second parameter Parameters ---------- events: any Not used, just here for the sake of compatibility with main class", "prompted_full_text": "Implement the Python class `TetrisGraphicsBinder` described below.\n\nClass description:\nBinds an external player (or set of command) to the tetris graphics class\n\nMethod signatures and docstrings:\n- def __init__(self, player, graphical_params, game_params): Ctor Parameters ---------- player: function A function that takes the current board as parameters and returns the corresponding action (Controls) to perform graphical_params: GraphicalParametersEu The display parameters for the game. See src.tetris.graphics.GraphicalParameters for more information about those parameters. game_params: GameParameters Parameters for the game. See class documentation for more details\n- def on_update(self, events=''): Overrides on_update method, ignoring second parameter Parameters ---------- events: any Not used, just here for the sake of compatibility with main class\n\n<|skeleton|>\nclass TetrisGraphicsBinder:\n \"\"\"Binds an external player (or set of command) to the tetris graphics class\"\"\"\n\n def __init__(self, player, graphical_params, game_params):\n \"\"\"Ctor Parameters ---------- player: function A function that takes the current board as parameters and returns the corresponding action (Controls) to perform graphical_params: GraphicalParametersEu The display parameters for the game. See src.tetris.graphics.GraphicalParameters for more information about those parameters. game_params: GameParameters Parameters for the game. See class documentation for more details\"\"\"\n <|body_0|>\n\n def on_update(self, events=''):\n \"\"\"Overrides on_update method, ignoring second parameter Parameters ---------- events: any Not used, just here for the sake of compatibility with main class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(graphical_params, game_params)\n self._player = player\n self._mvt = []\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self._mvt) == 0:\n mvts = self._player(self.get_current_game_state())\n if type(mvts) is Controls:\n self._mvt = [mvts]\n else:\n self._mvt = mvts\n if len(self._mvt) != 0:\n mvt = self._mvt.pop(0)\n else:\n mvt = Controls.NOTHING\n self.tick(mvt)\n<|end_body_1|>\n", "revision_id": "2202df3bd490aa4e006655b45fc0a854d7814a59", "skeleton": "<|skeleton|>\nclass TetrisGraphicsBinder:\n \"\"\"Binds an external player (or set of command) to the tetris graphics class\"\"\"\n\n def __init__(self, player, graphical_params, game_params):\n \"\"\"Ctor Parameters ---------- player: function A function that takes the current board as parameters and returns the corresponding action (Controls) to perform graphical_params: GraphicalParametersEu The display parameters for the game. See src.tetris.graphics.GraphicalParameters for more information about those parameters. game_params: GameParameters Parameters for the game. See class documentation for more details\"\"\"\n <|body_0|>\n\n def on_update(self, events=''):\n \"\"\"Overrides on_update method, ignoring second parameter Parameters ---------- events: any Not used, just here for the sake of compatibility with main class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TetrisGraphicsBinder:\n \"\"\"Binds an external player (or set of command) to the tetris graphics class\"\"\"\n\n def __init__(self, player, graphical_params, game_params):\n \"\"\"Ctor Parameters ---------- player: function A function that takes the current board as parameters and returns the corresponding action (Controls) to perform graphical_params: GraphicalParametersEu The display parameters for the game. See src.tetris.graphics.GraphicalParameters for more information about those parameters. game_params: GameParameters Parameters for the game. See class documentation for more details\"\"\"\n super().__init__(graphical_params, game_params)\n self._player = player\n self._mvt = []\n\n def on_update(self, events=''):\n \"\"\"Overrides on_update method, ignoring second parameter Parameters ---------- events: any Not used, just here for the sake of compatibility with main class\"\"\"\n if len(self._mvt) == 0:\n mvts = self._player(self.get_current_game_state())\n if type(mvts) is Controls:\n self._mvt = [mvts]\n else:\n self._mvt = mvts\n if len(self._mvt) != 0:\n mvt = self._mvt.pop(0)\n else:\n mvt = Controls.NOTHING\n self.tick(mvt)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/tetris/graphics/TetrisGraphicsBinder.py", "source_repo": "MohamedGassem/ai_for_tetris", "split": "val", "star_events_count": 0}
{"blob_id": "50d6549c16564cfa90fe7757a6b328154f41d339", "bodies": ["if sum_ <= 0:\n raise ValueError('Sum must be positive.')\nif size < 2:\n raise ValueError('Size must be greater than 1.')\nself.dp = np.zeros((sum_ + 1, size + 1), dtype=object)\nself.combs = self.get_combinations(sum_, size)", "if self.dp[sum][n]:\n return self.dp[sum][n]\ntmp_list = []\nif n == 1:\n tmp_list = [[sum]]\nelif sum == 0:\n tmp_list = [[0] * n]\nelse:\n for i in range(sum, -1, -1):\n for l in self.get_combinations(sum - i, n - 1):\n tmp_list.append([i] + l)\nself.dp[sum][n] = tmp_list\nreturn tmp_list"], "bodies_text": "<|body_start_0|>\n if sum_ <= 0:\n raise ValueError('Sum must be positive.')\n if size < 2:\n raise ValueError('Size must be greater than 1.')\n self.dp = np.zeros((sum_ + 1, size + 1), dtype=object)\n self.combs = self.get_combinations(sum_, size)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dp[sum][n]:\n return self.dp[sum][n]\n tmp_list = []\n if n == 1:\n tmp_list = [[sum]]\n elif sum == 0:\n tmp_list = [[0] * n]\n else:\n for i in range(sum, -1, -1):\n for l in self.get_combinations(sum - i, n - 1):\n tmp_list.append([i] + l)\n self.dp[sum][n] = tmp_list\n return tmp_list\n<|end_body_1|>\n", "class_docstring": "Class to generate all combinations of non-negative integers that have equal sum.", "class_name": "EqualSumCombinations", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EqualSumCombinations:\n \"\"\"Class to generate all combinations of non-negative integers that have equal sum.\"\"\"\n\n def __init__(self, sum_, size):\n \"\"\"Constructor to initialize the variables. Parameters ---------- sum_ : int Desired sum (must be greater than 0). size : int Number of integers (must be greater than 1). Raises ------ ValueError If sum is less than 2.\"\"\"\n <|body_0|>\n\n def get_combinations(self, sum, n):\n \"\"\"A recursive function to generate the list of all non-negative integer combinations of a given size that have a given sum. Parameters ---------- sum : int Desired sum. n : int Desired size. Returns ------- type : array-like A list containing the lists of combinations.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if sum_ <= 0:\n raise ValueError('Sum must be positive.')\n if size < 2:\n raise ValueError('Size must be greater than 1.')\n self.dp = np.zeros((sum_ + 1, size + 1), dtype=object)\n self.combs = self.get_combinations(sum_, size)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dp[sum][n]:\n return self.dp[sum][n]\n tmp_list = []\n if n == 1:\n tmp_list = [[sum]]\n elif sum == 0:\n tmp_list = [[0] * n]\n else:\n for i in range(sum, -1, -1):\n for l in self.get_combinations(sum - i, n - 1):\n tmp_list.append([i] + l)\n self.dp[sum][n] = tmp_list\n return tmp_list\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000047", "length_bytes": 1889, "license_type": "permissive", "methods": [{"docstring": "Constructor to initialize the variables. Parameters ---------- sum_ : int Desired sum (must be greater than 0). size : int Number of integers (must be greater than 1). Raises ------ ValueError If sum is less than 2.", "name": "__init__", "signature": "def __init__(self, sum_, size)"}, {"docstring": "A recursive function to generate the list of all non-negative integer combinations of a given size that have a given sum. Parameters ---------- sum : int Desired sum. n : int Desired size. Returns ------- type : array-like A list containing the lists of combinations.", "name": "get_combinations", "signature": "def get_combinations(self, sum, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002947", "prompt": "Implement the Python class `EqualSumCombinations` described below.\n\nClass description:\nClass to generate all combinations of non-negative integers that have equal sum.\n\nMethod signatures and docstrings:\n- def __init__(self, sum_, size): Constructor to initialize the variables. Parameters ---------- sum_ : int Desired sum (must be greater than 0). size : int Number of integers (must be greater than 1). Raises ------ ValueError If sum is less than 2.\n- def get_combinations(self, sum, n): A recursive function to generate the list of all non-negative integer combinations of a given size that have a given sum. Parameters ---------- sum : int Desired sum. n : int Desired size. Returns ------- type : array-like A list containing the lists of combinations.", "prompted_full_text": "Implement the Python class `EqualSumCombinations` described below.\n\nClass description:\nClass to generate all combinations of non-negative integers that have equal sum.\n\nMethod signatures and docstrings:\n- def __init__(self, sum_, size): Constructor to initialize the variables. Parameters ---------- sum_ : int Desired sum (must be greater than 0). size : int Number of integers (must be greater than 1). Raises ------ ValueError If sum is less than 2.\n- def get_combinations(self, sum, n): A recursive function to generate the list of all non-negative integer combinations of a given size that have a given sum. Parameters ---------- sum : int Desired sum. n : int Desired size. Returns ------- type : array-like A list containing the lists of combinations.\n\n<|skeleton|>\nclass EqualSumCombinations:\n \"\"\"Class to generate all combinations of non-negative integers that have equal sum.\"\"\"\n\n def __init__(self, sum_, size):\n \"\"\"Constructor to initialize the variables. Parameters ---------- sum_ : int Desired sum (must be greater than 0). size : int Number of integers (must be greater than 1). Raises ------ ValueError If sum is less than 2.\"\"\"\n <|body_0|>\n\n def get_combinations(self, sum, n):\n \"\"\"A recursive function to generate the list of all non-negative integer combinations of a given size that have a given sum. Parameters ---------- sum : int Desired sum. n : int Desired size. Returns ------- type : array-like A list containing the lists of combinations.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if sum_ <= 0:\n raise ValueError('Sum must be positive.')\n if size < 2:\n raise ValueError('Size must be greater than 1.')\n self.dp = np.zeros((sum_ + 1, size + 1), dtype=object)\n self.combs = self.get_combinations(sum_, size)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dp[sum][n]:\n return self.dp[sum][n]\n tmp_list = []\n if n == 1:\n tmp_list = [[sum]]\n elif sum == 0:\n tmp_list = [[0] * n]\n else:\n for i in range(sum, -1, -1):\n for l in self.get_combinations(sum - i, n - 1):\n tmp_list.append([i] + l)\n self.dp[sum][n] = tmp_list\n return tmp_list\n<|end_body_1|>\n", "revision_id": "d511d91500f757de46162d2f8331e353a68de6a0", "skeleton": "<|skeleton|>\nclass EqualSumCombinations:\n \"\"\"Class to generate all combinations of non-negative integers that have equal sum.\"\"\"\n\n def __init__(self, sum_, size):\n \"\"\"Constructor to initialize the variables. Parameters ---------- sum_ : int Desired sum (must be greater than 0). size : int Number of integers (must be greater than 1). Raises ------ ValueError If sum is less than 2.\"\"\"\n <|body_0|>\n\n def get_combinations(self, sum, n):\n \"\"\"A recursive function to generate the list of all non-negative integer combinations of a given size that have a given sum. Parameters ---------- sum : int Desired sum. n : int Desired size. Returns ------- type : array-like A list containing the lists of combinations.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EqualSumCombinations:\n \"\"\"Class to generate all combinations of non-negative integers that have equal sum.\"\"\"\n\n def __init__(self, sum_, size):\n \"\"\"Constructor to initialize the variables. Parameters ---------- sum_ : int Desired sum (must be greater than 0). size : int Number of integers (must be greater than 1). Raises ------ ValueError If sum is less than 2.\"\"\"\n if sum_ <= 0:\n raise ValueError('Sum must be positive.')\n if size < 2:\n raise ValueError('Size must be greater than 1.')\n self.dp = np.zeros((sum_ + 1, size + 1), dtype=object)\n self.combs = self.get_combinations(sum_, size)\n\n def get_combinations(self, sum, n):\n \"\"\"A recursive function to generate the list of all non-negative integer combinations of a given size that have a given sum. Parameters ---------- sum : int Desired sum. n : int Desired size. Returns ------- type : array-like A list containing the lists of combinations.\"\"\"\n if self.dp[sum][n]:\n return self.dp[sum][n]\n tmp_list = []\n if n == 1:\n tmp_list = [[sum]]\n elif sum == 0:\n tmp_list = [[0] * n]\n else:\n for i in range(sum, -1, -1):\n for l in self.get_combinations(sum - i, n - 1):\n tmp_list.append([i] + l)\n self.dp[sum][n] = tmp_list\n return tmp_list\n", "source": "the_stack_v2_python_sparse", "source_path": "chemml/chem/magpie_python/utility/EqualSumCombinations.py", "source_repo": "hachmannlab/chemml", "split": "val", "star_events_count": 140}
{"blob_id": "c271a343c58153dda279e8a017bab3a959ced606", "bodies": ["self.public_ip = public_ip\nself.uplink = uplink\nself.port_rules = port_rules", "if dictionary is None:\n return None\npublic_ip = dictionary.get('publicIp')\nuplink = dictionary.get('uplink')\nport_rules = None\nif dictionary.get('portRules') != None:\n port_rules = list()\n for structure in dictionary.get('portRules'):\n port_rules.append(meraki_sdk.models.port_rule_model.PortRuleModel.from_dictionary(structure))\nreturn cls(public_ip, uplink, port_rules)"], "bodies_text": "<|body_start_0|>\n self.public_ip = public_ip\n self.uplink = uplink\n self.port_rules = port_rules\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n public_ip = dictionary.get('publicIp')\n uplink = dictionary.get('uplink')\n port_rules = None\n if dictionary.get('portRules') != None:\n port_rules = list()\n for structure in dictionary.get('portRules'):\n port_rules.append(meraki_sdk.models.port_rule_model.PortRuleModel.from_dictionary(structure))\n return cls(public_ip, uplink, port_rules)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'Rule7' model. TODO: type model description here. Attributes: public_ip (string): The IP address that will be used to access the internal resource from the WAN uplink (Uplink1Enum): The physical WAN interface on which the traffic will arrive ('internet1' or, if available, 'internet2') port_rules (list of PortRuleModel): An array of associated forwarding rules", "class_name": "Rule7Model", "detected_licenses": ["MIT", "Python-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Rule7Model:\n \"\"\"Implementation of the 'Rule7' model. TODO: type model description here. Attributes: public_ip (string): The IP address that will be used to access the internal resource from the WAN uplink (Uplink1Enum): The physical WAN interface on which the traffic will arrive ('internet1' or, if available, 'internet2') port_rules (list of PortRuleModel): An array of associated forwarding rules\"\"\"\n\n def __init__(self, public_ip=None, uplink=None, port_rules=None):\n \"\"\"Constructor for the Rule7Model class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.public_ip = public_ip\n self.uplink = uplink\n self.port_rules = port_rules\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n public_ip = dictionary.get('publicIp')\n uplink = dictionary.get('uplink')\n port_rules = None\n if dictionary.get('portRules') != None:\n port_rules = list()\n for structure in dictionary.get('portRules'):\n port_rules.append(meraki_sdk.models.port_rule_model.PortRuleModel.from_dictionary(structure))\n return cls(public_ip, uplink, port_rules)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000048", "length_bytes": 2338, "license_type": "permissive", "methods": [{"docstring": "Constructor for the Rule7Model class", "name": "__init__", "signature": "def __init__(self, public_ip=None, uplink=None, port_rules=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013154", "prompt": "Implement the Python class `Rule7Model` described below.\n\nClass description:\nImplementation of the 'Rule7' model. TODO: type model description here. Attributes: public_ip (string): The IP address that will be used to access the internal resource from the WAN uplink (Uplink1Enum): The physical WAN interface on which the traffic will arrive ('internet1' or, if available, 'internet2') port_rules (list of PortRuleModel): An array of associated forwarding rules\n\nMethod signatures and docstrings:\n- def __init__(self, public_ip=None, uplink=None, port_rules=None): Constructor for the Rule7Model class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `Rule7Model` described below.\n\nClass description:\nImplementation of the 'Rule7' model. TODO: type model description here. Attributes: public_ip (string): The IP address that will be used to access the internal resource from the WAN uplink (Uplink1Enum): The physical WAN interface on which the traffic will arrive ('internet1' or, if available, 'internet2') port_rules (list of PortRuleModel): An array of associated forwarding rules\n\nMethod signatures and docstrings:\n- def __init__(self, public_ip=None, uplink=None, port_rules=None): Constructor for the Rule7Model class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass Rule7Model:\n \"\"\"Implementation of the 'Rule7' model. TODO: type model description here. Attributes: public_ip (string): The IP address that will be used to access the internal resource from the WAN uplink (Uplink1Enum): The physical WAN interface on which the traffic will arrive ('internet1' or, if available, 'internet2') port_rules (list of PortRuleModel): An array of associated forwarding rules\"\"\"\n\n def __init__(self, public_ip=None, uplink=None, port_rules=None):\n \"\"\"Constructor for the Rule7Model class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.public_ip = public_ip\n self.uplink = uplink\n self.port_rules = port_rules\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n public_ip = dictionary.get('publicIp')\n uplink = dictionary.get('uplink')\n port_rules = None\n if dictionary.get('portRules') != None:\n port_rules = list()\n for structure in dictionary.get('portRules'):\n port_rules.append(meraki_sdk.models.port_rule_model.PortRuleModel.from_dictionary(structure))\n return cls(public_ip, uplink, port_rules)\n<|end_body_1|>\n", "revision_id": "9894089eb013318243ae48869cc5130eb37f80c0", "skeleton": "<|skeleton|>\nclass Rule7Model:\n \"\"\"Implementation of the 'Rule7' model. TODO: type model description here. Attributes: public_ip (string): The IP address that will be used to access the internal resource from the WAN uplink (Uplink1Enum): The physical WAN interface on which the traffic will arrive ('internet1' or, if available, 'internet2') port_rules (list of PortRuleModel): An array of associated forwarding rules\"\"\"\n\n def __init__(self, public_ip=None, uplink=None, port_rules=None):\n \"\"\"Constructor for the Rule7Model class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Rule7Model:\n \"\"\"Implementation of the 'Rule7' model. TODO: type model description here. Attributes: public_ip (string): The IP address that will be used to access the internal resource from the WAN uplink (Uplink1Enum): The physical WAN interface on which the traffic will arrive ('internet1' or, if available, 'internet2') port_rules (list of PortRuleModel): An array of associated forwarding rules\"\"\"\n\n def __init__(self, public_ip=None, uplink=None, port_rules=None):\n \"\"\"Constructor for the Rule7Model class\"\"\"\n self.public_ip = public_ip\n self.uplink = uplink\n self.port_rules = port_rules\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n public_ip = dictionary.get('publicIp')\n uplink = dictionary.get('uplink')\n port_rules = None\n if dictionary.get('portRules') != None:\n port_rules = list()\n for structure in dictionary.get('portRules'):\n port_rules.append(meraki_sdk.models.port_rule_model.PortRuleModel.from_dictionary(structure))\n return cls(public_ip, uplink, port_rules)\n", "source": "the_stack_v2_python_sparse", "source_path": "meraki_sdk/models/rule_7_model.py", "source_repo": "RaulCatalano/meraki-python-sdk", "split": "val", "star_events_count": 1}
{"blob_id": "57f7e866063648c135fbc3b3d1c1ffdfc6556a8d", "bodies": ["self.arr_voxels = arr_voxels\nself.arr_xyz = arr_xyz\nself.subj_name = subj_name\nself.cls = cls", "arr_voxels = np.genfromtxt(file_voxels, dtype=np.float, delimiter=' ')\narr_xyz = np.genfromtxt(file_xyz, dtype=np.float, delimiter=' ')\nreturn cls(arr_voxels, arr_xyz)"], "bodies_text": "<|body_start_0|>\n self.arr_voxels = arr_voxels\n self.arr_xyz = arr_xyz\n self.subj_name = subj_name\n self.cls = cls\n<|end_body_0|>\n\n<|body_start_1|>\n arr_voxels = np.genfromtxt(file_voxels, dtype=np.float, delimiter=' ')\n arr_xyz = np.genfromtxt(file_xyz, dtype=np.float, delimiter=' ')\n return cls(arr_voxels, arr_xyz)\n<|end_body_1|>\n", "class_docstring": "Intermediate representation for all kinds of brain data. Notes ----- notes here...", "class_name": "IntermRep", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IntermRep:\n \"\"\"Intermediate representation for all kinds of brain data. Notes ----- notes here...\"\"\"\n\n def __init__(self, arr_voxels, arr_xyz, subj_name=None, cls=None):\n \"\"\"The intermediate representation is based on two arrays. arr_voxels: containing a list of voxels arr_xyz: containing the x,y,z coordinates for each voxel.\"\"\"\n <|body_0|>\n\n def load_from_file(cls, file_voxels, file_xyz):\n \"\"\"Creates an IntermRep object from a file: @file_voxels: A file with the time series. Rows are voxels, and columns are time points. @file_xyz: files are the corresponding x, y, z coordinates of each voxel.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.arr_voxels = arr_voxels\n self.arr_xyz = arr_xyz\n self.subj_name = subj_name\n self.cls = cls\n<|end_body_0|>\n\n<|body_start_1|>\n arr_voxels = np.genfromtxt(file_voxels, dtype=np.float, delimiter=' ')\n arr_xyz = np.genfromtxt(file_xyz, dtype=np.float, delimiter=' ')\n return cls(arr_voxels, arr_xyz)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000049", "length_bytes": 1291, "license_type": "no_license", "methods": [{"docstring": "The intermediate representation is based on two arrays. arr_voxels: containing a list of voxels arr_xyz: containing the x,y,z coordinates for each voxel.", "name": "__init__", "signature": "def __init__(self, arr_voxels, arr_xyz, subj_name=None, cls=None)"}, {"docstring": "Creates an IntermRep object from a file: @file_voxels: A file with the time series. Rows are voxels, and columns are time points. @file_xyz: files are the corresponding x, y, z coordinates of each voxel.", "name": "load_from_file", "signature": "def load_from_file(cls, file_voxels, file_xyz)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043515", "prompt": "Implement the Python class `IntermRep` described below.\n\nClass description:\nIntermediate representation for all kinds of brain data. Notes ----- notes here...\n\nMethod signatures and docstrings:\n- def __init__(self, arr_voxels, arr_xyz, subj_name=None, cls=None): The intermediate representation is based on two arrays. arr_voxels: containing a list of voxels arr_xyz: containing the x,y,z coordinates for each voxel.\n- def load_from_file(cls, file_voxels, file_xyz): Creates an IntermRep object from a file: @file_voxels: A file with the time series. Rows are voxels, and columns are time points. @file_xyz: files are the corresponding x, y, z coordinates of each voxel.", "prompted_full_text": "Implement the Python class `IntermRep` described below.\n\nClass description:\nIntermediate representation for all kinds of brain data. Notes ----- notes here...\n\nMethod signatures and docstrings:\n- def __init__(self, arr_voxels, arr_xyz, subj_name=None, cls=None): The intermediate representation is based on two arrays. arr_voxels: containing a list of voxels arr_xyz: containing the x,y,z coordinates for each voxel.\n- def load_from_file(cls, file_voxels, file_xyz): Creates an IntermRep object from a file: @file_voxels: A file with the time series. Rows are voxels, and columns are time points. @file_xyz: files are the corresponding x, y, z coordinates of each voxel.\n\n<|skeleton|>\nclass IntermRep:\n \"\"\"Intermediate representation for all kinds of brain data. Notes ----- notes here...\"\"\"\n\n def __init__(self, arr_voxels, arr_xyz, subj_name=None, cls=None):\n \"\"\"The intermediate representation is based on two arrays. arr_voxels: containing a list of voxels arr_xyz: containing the x,y,z coordinates for each voxel.\"\"\"\n <|body_0|>\n\n def load_from_file(cls, file_voxels, file_xyz):\n \"\"\"Creates an IntermRep object from a file: @file_voxels: A file with the time series. Rows are voxels, and columns are time points. @file_xyz: files are the corresponding x, y, z coordinates of each voxel.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.arr_voxels = arr_voxels\n self.arr_xyz = arr_xyz\n self.subj_name = subj_name\n self.cls = cls\n<|end_body_0|>\n\n<|body_start_1|>\n arr_voxels = np.genfromtxt(file_voxels, dtype=np.float, delimiter=' ')\n arr_xyz = np.genfromtxt(file_xyz, dtype=np.float, delimiter=' ')\n return cls(arr_voxels, arr_xyz)\n<|end_body_1|>\n", "revision_id": "de6a748c4ebcf56daa834e353b8540e1f219732d", "skeleton": "<|skeleton|>\nclass IntermRep:\n \"\"\"Intermediate representation for all kinds of brain data. Notes ----- notes here...\"\"\"\n\n def __init__(self, arr_voxels, arr_xyz, subj_name=None, cls=None):\n \"\"\"The intermediate representation is based on two arrays. arr_voxels: containing a list of voxels arr_xyz: containing the x,y,z coordinates for each voxel.\"\"\"\n <|body_0|>\n\n def load_from_file(cls, file_voxels, file_xyz):\n \"\"\"Creates an IntermRep object from a file: @file_voxels: A file with the time series. Rows are voxels, and columns are time points. @file_xyz: files are the corresponding x, y, z coordinates of each voxel.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class IntermRep:\n \"\"\"Intermediate representation for all kinds of brain data. Notes ----- notes here...\"\"\"\n\n def __init__(self, arr_voxels, arr_xyz, subj_name=None, cls=None):\n \"\"\"The intermediate representation is based on two arrays. arr_voxels: containing a list of voxels arr_xyz: containing the x,y,z coordinates for each voxel.\"\"\"\n self.arr_voxels = arr_voxels\n self.arr_xyz = arr_xyz\n self.subj_name = subj_name\n self.cls = cls\n\n def load_from_file(cls, file_voxels, file_xyz):\n \"\"\"Creates an IntermRep object from a file: @file_voxels: A file with the time series. Rows are voxels, and columns are time points. @file_xyz: files are the corresponding x, y, z coordinates of each voxel.\"\"\"\n arr_voxels = np.genfromtxt(file_voxels, dtype=np.float, delimiter=' ')\n arr_xyz = np.genfromtxt(file_xyz, dtype=np.float, delimiter=' ')\n return cls(arr_voxels, arr_xyz)\n", "source": "the_stack_v2_python_sparse", "source_path": "IntermRepresentation/IntermRepresentation.py", "source_repo": "svegapons/PyBDGK", "split": "val", "star_events_count": 5}
{"blob_id": "573233193e49e1872caf77f3c8192e8dc27eeee3", "bodies": ["if x is None:\n n = len(y)\n model = match\nelse:\n x = asfactor(x)\n n = x._cellsize()\n if isinstance(n, dict):\n raise NotImplementedError('SEM for unequal cell sizes')\n model = x if match is None else x + match\nsem = residual_mean_square(y, model)\nsem /= n\nnp.sqrt(sem, sem)\nself.n = n\nself.model = model\nself.sem = sem", "if self.model is None:\n df = self.n - 1\nelif isinstance(self.model, Model):\n df = self.model.df_error\nelse:\n df = Model(self.model).df_error\nreturn self.sem * scipy.stats.t.isf((1 - confidence) / 2, df)"], "bodies_text": "<|body_start_0|>\n if x is None:\n n = len(y)\n model = match\n else:\n x = asfactor(x)\n n = x._cellsize()\n if isinstance(n, dict):\n raise NotImplementedError('SEM for unequal cell sizes')\n model = x if match is None else x + match\n sem = residual_mean_square(y, model)\n sem /= n\n np.sqrt(sem, sem)\n self.n = n\n self.model = model\n self.sem = sem\n<|end_body_0|>\n\n<|body_start_1|>\n if self.model is None:\n df = self.n - 1\n elif isinstance(self.model, Model):\n df = self.model.df_error\n else:\n df = Model(self.model).df_error\n return self.sem * scipy.stats.t.isf((1 - confidence) / 2, df)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SEM", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SEM:\n\n def __init__(self, y, x=None, match=None):\n \"\"\"Standard error of the mean (SEM) Parameters ---------- y : array (n, ...) Data, first dimension reflecting cases. x : Categorial Categorial predictor for using pooled variance. match : Categorial Within-subject SEM. Notes ----- See Loftus and Masson (1994).\"\"\"\n <|body_0|>\n\n def ci(self, confidence):\n \"\"\"Confidence interval based on the inverse t-test Parameters ---------- confidence : scalar Confidence in the interval (i.e., .95 for 95% CI). Returns ------- ci : array [...] Confidence interval (i.e., the mean of y lies within m +- ci with the specified confidence). Notes ----- See ``_\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if x is None:\n n = len(y)\n model = match\n else:\n x = asfactor(x)\n n = x._cellsize()\n if isinstance(n, dict):\n raise NotImplementedError('SEM for unequal cell sizes')\n model = x if match is None else x + match\n sem = residual_mean_square(y, model)\n sem /= n\n np.sqrt(sem, sem)\n self.n = n\n self.model = model\n self.sem = sem\n<|end_body_0|>\n\n<|body_start_1|>\n if self.model is None:\n df = self.n - 1\n elif isinstance(self.model, Model):\n df = self.model.df_error\n else:\n df = Model(self.model).df_error\n return self.sem * scipy.stats.t.isf((1 - confidence) / 2, df)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000050", "length_bytes": 11346, "license_type": "permissive", "methods": [{"docstring": "Standard error of the mean (SEM) Parameters ---------- y : array (n, ...) Data, first dimension reflecting cases. x : Categorial Categorial predictor for using pooled variance. match : Categorial Within-subject SEM. Notes ----- See Loftus and Masson (1994).", "name": "__init__", "signature": "def __init__(self, y, x=None, match=None)"}, {"docstring": "Confidence interval based on the inverse t-test Parameters ---------- confidence : scalar Confidence in the interval (i.e., .95 for 95% CI). Returns ------- ci : array [...] Confidence interval (i.e., the mean of y lies within m +- ci with the specified confidence). Notes ----- See ``_", "name": "ci", "signature": "def ci(self, confidence)"}], "n_methods": 2, "prompt": "Implement the Python class `SEM` described below.\n\nClass description:\nImplement the SEM class.\n\nMethod signatures and docstrings:\n- def __init__(self, y, x=None, match=None): Standard error of the mean (SEM) Parameters ---------- y : array (n, ...) Data, first dimension reflecting cases. x : Categorial Categorial predictor for using pooled variance. match : Categorial Within-subject SEM. Notes ----- See Loftus and Masson (1994).\n- def ci(self, confidence): Confidence interval based on the inverse t-test Parameters ---------- confidence : scalar Confidence in the interval (i.e., .95 for 95% CI). Returns ------- ci : array [...] Confidence interval (i.e., the mean of y lies within m +- ci with the specified confidence). Notes ----- See ``_", "prompted_full_text": "Implement the Python class `SEM` described below.\n\nClass description:\nImplement the SEM class.\n\nMethod signatures and docstrings:\n- def __init__(self, y, x=None, match=None): Standard error of the mean (SEM) Parameters ---------- y : array (n, ...) Data, first dimension reflecting cases. x : Categorial Categorial predictor for using pooled variance. match : Categorial Within-subject SEM. Notes ----- See Loftus and Masson (1994).\n- def ci(self, confidence): Confidence interval based on the inverse t-test Parameters ---------- confidence : scalar Confidence in the interval (i.e., .95 for 95% CI). Returns ------- ci : array [...] Confidence interval (i.e., the mean of y lies within m +- ci with the specified confidence). Notes ----- See ``_\n\n<|skeleton|>\nclass SEM:\n\n def __init__(self, y, x=None, match=None):\n \"\"\"Standard error of the mean (SEM) Parameters ---------- y : array (n, ...) Data, first dimension reflecting cases. x : Categorial Categorial predictor for using pooled variance. match : Categorial Within-subject SEM. Notes ----- See Loftus and Masson (1994).\"\"\"\n <|body_0|>\n\n def ci(self, confidence):\n \"\"\"Confidence interval based on the inverse t-test Parameters ---------- confidence : scalar Confidence in the interval (i.e., .95 for 95% CI). Returns ------- ci : array [...] Confidence interval (i.e., the mean of y lies within m +- ci with the specified confidence). Notes ----- See ``_\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if x is None:\n n = len(y)\n model = match\n else:\n x = asfactor(x)\n n = x._cellsize()\n if isinstance(n, dict):\n raise NotImplementedError('SEM for unequal cell sizes')\n model = x if match is None else x + match\n sem = residual_mean_square(y, model)\n sem /= n\n np.sqrt(sem, sem)\n self.n = n\n self.model = model\n self.sem = sem\n<|end_body_0|>\n\n<|body_start_1|>\n if self.model is None:\n df = self.n - 1\n elif isinstance(self.model, Model):\n df = self.model.df_error\n else:\n df = Model(self.model).df_error\n return self.sem * scipy.stats.t.isf((1 - confidence) / 2, df)\n<|end_body_1|>\n", "revision_id": "feb9bdec2a99aca3077e44f318aef1c85a2e4730", "skeleton": "<|skeleton|>\nclass SEM:\n\n def __init__(self, y, x=None, match=None):\n \"\"\"Standard error of the mean (SEM) Parameters ---------- y : array (n, ...) Data, first dimension reflecting cases. x : Categorial Categorial predictor for using pooled variance. match : Categorial Within-subject SEM. Notes ----- See Loftus and Masson (1994).\"\"\"\n <|body_0|>\n\n def ci(self, confidence):\n \"\"\"Confidence interval based on the inverse t-test Parameters ---------- confidence : scalar Confidence in the interval (i.e., .95 for 95% CI). Returns ------- ci : array [...] Confidence interval (i.e., the mean of y lies within m +- ci with the specified confidence). Notes ----- See ``_\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SEM:\n def __init__(self, y, x=None, match=None):\n \"\"\"Standard error of the mean (SEM) Parameters ---------- y : array (n, ...) Data, first dimension reflecting cases. x : Categorial Categorial predictor for using pooled variance. match : Categorial Within-subject SEM. Notes ----- See Loftus and Masson (1994).\"\"\"\n if x is None:\n n = len(y)\n model = match\n else:\n x = asfactor(x)\n n = x._cellsize()\n if isinstance(n, dict):\n raise NotImplementedError('SEM for unequal cell sizes')\n model = x if match is None else x + match\n sem = residual_mean_square(y, model)\n sem /= n\n np.sqrt(sem, sem)\n self.n = n\n self.model = model\n self.sem = sem\n\n def ci(self, confidence):\n \"\"\"Confidence interval based on the inverse t-test Parameters ---------- confidence : scalar Confidence in the interval (i.e., .95 for 95% CI). Returns ------- ci : array [...] Confidence interval (i.e., the mean of y lies within m +- ci with the specified confidence). Notes ----- See ``_\"\"\"\n if self.model is None:\n df = self.n - 1\n elif isinstance(self.model, Model):\n df = self.model.df_error\n else:\n df = Model(self.model).df_error\n return self.sem * scipy.stats.t.isf((1 - confidence) / 2, df)\n", "source": "the_stack_v2_python_sparse", "source_path": "eelbrain/_stats/stats.py", "source_repo": "weilongzheng/Eelbrain", "split": "val", "star_events_count": 0}
{"blob_id": "074723f526b205c1a680fccc6c0c61f2f2fc7011", "bodies": ["certPath = '..\\\\testCerts\\\\caBasicConstNotCrit.pem'\nlint_basic_constraints_not_critical.init()\nwith open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Error, out.Status)", "certPath = '..\\\\testCerts\\\\caBasicConstCrit.pem'\nlint_basic_constraints_not_critical.init()\nwith open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Pass, out.Status)"], "bodies_text": "<|body_start_0|>\n certPath = '..\\\\testCerts\\\\caBasicConstNotCrit.pem'\n lint_basic_constraints_not_critical.init()\n with open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Error, out.Status)\n<|end_body_0|>\n\n<|body_start_1|>\n certPath = '..\\\\testCerts\\\\caBasicConstCrit.pem'\n lint_basic_constraints_not_critical.init()\n with open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Pass, out.Status)\n<|end_body_1|>\n", "class_docstring": "Test lint_basic_constraints_not_critical.py", "class_name": "test_lint_basic_constraints_not_critical", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass test_lint_basic_constraints_not_critical:\n \"\"\"Test lint_basic_constraints_not_critical.py\"\"\"\n\n def test_BasicConstNotCrit(self):\n \"\"\"Test BasicConstNotCrit\"\"\"\n <|body_0|>\n\n def test_BasicConstCrit(self):\n \"\"\"Test lint_basic_constraints_critical.py\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n certPath = '..\\\\testCerts\\\\caBasicConstNotCrit.pem'\n lint_basic_constraints_not_critical.init()\n with open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Error, out.Status)\n<|end_body_0|>\n\n<|body_start_1|>\n certPath = '..\\\\testCerts\\\\caBasicConstCrit.pem'\n lint_basic_constraints_not_critical.init()\n with open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Pass, out.Status)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000051", "length_bytes": 1324, "license_type": "no_license", "methods": [{"docstring": "Test BasicConstNotCrit", "name": "test_BasicConstNotCrit", "signature": "def test_BasicConstNotCrit(self)"}, {"docstring": "Test lint_basic_constraints_critical.py", "name": "test_BasicConstCrit", "signature": "def test_BasicConstCrit(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_016547", "prompt": "Implement the Python class `test_lint_basic_constraints_not_critical` described below.\n\nClass description:\nTest lint_basic_constraints_not_critical.py\n\nMethod signatures and docstrings:\n- def test_BasicConstNotCrit(self): Test BasicConstNotCrit\n- def test_BasicConstCrit(self): Test lint_basic_constraints_critical.py", "prompted_full_text": "Implement the Python class `test_lint_basic_constraints_not_critical` described below.\n\nClass description:\nTest lint_basic_constraints_not_critical.py\n\nMethod signatures and docstrings:\n- def test_BasicConstNotCrit(self): Test BasicConstNotCrit\n- def test_BasicConstCrit(self): Test lint_basic_constraints_critical.py\n\n<|skeleton|>\nclass test_lint_basic_constraints_not_critical:\n \"\"\"Test lint_basic_constraints_not_critical.py\"\"\"\n\n def test_BasicConstNotCrit(self):\n \"\"\"Test BasicConstNotCrit\"\"\"\n <|body_0|>\n\n def test_BasicConstCrit(self):\n \"\"\"Test lint_basic_constraints_critical.py\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n certPath = '..\\\\testCerts\\\\caBasicConstNotCrit.pem'\n lint_basic_constraints_not_critical.init()\n with open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Error, out.Status)\n<|end_body_0|>\n\n<|body_start_1|>\n certPath = '..\\\\testCerts\\\\caBasicConstCrit.pem'\n lint_basic_constraints_not_critical.init()\n with open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Pass, out.Status)\n<|end_body_1|>\n", "revision_id": "c7e7ca27e5d04bbaa4e7ad71d8e86ec5c9388987", "skeleton": "<|skeleton|>\nclass test_lint_basic_constraints_not_critical:\n \"\"\"Test lint_basic_constraints_not_critical.py\"\"\"\n\n def test_BasicConstNotCrit(self):\n \"\"\"Test BasicConstNotCrit\"\"\"\n <|body_0|>\n\n def test_BasicConstCrit(self):\n \"\"\"Test lint_basic_constraints_critical.py\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class test_lint_basic_constraints_not_critical:\n \"\"\"Test lint_basic_constraints_not_critical.py\"\"\"\n\n def test_BasicConstNotCrit(self):\n \"\"\"Test BasicConstNotCrit\"\"\"\n certPath = '..\\\\testCerts\\\\caBasicConstNotCrit.pem'\n lint_basic_constraints_not_critical.init()\n with open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Error, out.Status)\n\n def test_BasicConstCrit(self):\n \"\"\"Test lint_basic_constraints_critical.py\"\"\"\n certPath = '..\\\\testCerts\\\\caBasicConstCrit.pem'\n lint_basic_constraints_not_critical.init()\n with open(certPath, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n out = base.Lints['e_basic_constraints_not_critical'].Execute(cert)\n self.assertEqual(base.LintStatus.Pass, out.Status)\n", "source": "the_stack_v2_python_sparse", "source_path": "testlints/test_lint_basic_constraints_not_critical.py", "source_repo": "846468230/Plint", "split": "val", "star_events_count": 1}
{"blob_id": "52329cc8b9e800c1e8ce899e7c8ca04941251a4b", "bodies": ["if phone_num and code:\n params = {}\n params['number'] = phone_num\n params['templateId'] = templateId\n params['templateParams'] = [code, time]\n json_str_data = self.client.send(params)\n return json_str_data\nelse:\n raise Exception('手机号和验证码不能为空')", "result = json.loads(self.client.balance())\ncode = result['code']\ndata = result['data']\nif not code:\n print('短信剩余{}条'.format(data))\nelse:\n print('错误代码: {}, {}'.format(code, data))"], "bodies_text": "<|body_start_0|>\n if phone_num and code:\n params = {}\n params['number'] = phone_num\n params['templateId'] = templateId\n params['templateParams'] = [code, time]\n json_str_data = self.client.send(params)\n return json_str_data\n else:\n raise Exception('手机号和验证码不能为空')\n<|end_body_0|>\n\n<|body_start_1|>\n result = json.loads(self.client.balance())\n code = result['code']\n data = result['data']\n if not code:\n print('短信剩余{}条'.format(data))\n else:\n print('错误代码: {}, {}'.format(code, data))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Message", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Message:\n\n def send_message(self, phone_num, code, templateId='141', time='五'):\n \"\"\"发送短信验证码 :param phone_num: str 收件人手机号 :param code: str 6位随机激活码 :param templateId: str 榛子云短信平台模板id :param time: str 过期时间,仅用于显示 :return: json_str 短信发送结果\"\"\"\n <|body_0|>\n\n def number_of_message(self):\n \"\"\"查询剩余短语条数 返回结果是json格式的字符串, code: 查询状态,0为成功, data: 为剩余短信条数。 非0为查询失败,可从data中查看错误信息\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if phone_num and code:\n params = {}\n params['number'] = phone_num\n params['templateId'] = templateId\n params['templateParams'] = [code, time]\n json_str_data = self.client.send(params)\n return json_str_data\n else:\n raise Exception('手机号和验证码不能为空')\n<|end_body_0|>\n\n<|body_start_1|>\n result = json.loads(self.client.balance())\n code = result['code']\n data = result['data']\n if not code:\n print('短信剩余{}条'.format(data))\n else:\n print('错误代码: {}, {}'.format(code, data))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000052", "length_bytes": 1720, "license_type": "no_license", "methods": [{"docstring": "发送短信验证码 :param phone_num: str 收件人手机号 :param code: str 6位随机激活码 :param templateId: str 榛子云短信平台模板id :param time: str 过期时间,仅用于显示 :return: json_str 短信发送结果", "name": "send_message", "signature": "def send_message(self, phone_num, code, templateId='141', time='五')"}, {"docstring": "查询剩余短语条数 返回结果是json格式的字符串, code: 查询状态,0为成功, data: 为剩余短信条数。 非0为查询失败,可从data中查看错误信息", "name": "number_of_message", "signature": "def number_of_message(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004852", "prompt": "Implement the Python class `Message` described below.\n\nClass description:\nImplement the Message class.\n\nMethod signatures and docstrings:\n- def send_message(self, phone_num, code, templateId='141', time='五'): 发送短信验证码 :param phone_num: str 收件人手机号 :param code: str 6位随机激活码 :param templateId: str 榛子云短信平台模板id :param time: str 过期时间,仅用于显示 :return: json_str 短信发送结果\n- def number_of_message(self): 查询剩余短语条数 返回结果是json格式的字符串, code: 查询状态,0为成功, data: 为剩余短信条数。 非0为查询失败,可从data中查看错误信息", "prompted_full_text": "Implement the Python class `Message` described below.\n\nClass description:\nImplement the Message class.\n\nMethod signatures and docstrings:\n- def send_message(self, phone_num, code, templateId='141', time='五'): 发送短信验证码 :param phone_num: str 收件人手机号 :param code: str 6位随机激活码 :param templateId: str 榛子云短信平台模板id :param time: str 过期时间,仅用于显示 :return: json_str 短信发送结果\n- def number_of_message(self): 查询剩余短语条数 返回结果是json格式的字符串, code: 查询状态,0为成功, data: 为剩余短信条数。 非0为查询失败,可从data中查看错误信息\n\n<|skeleton|>\nclass Message:\n\n def send_message(self, phone_num, code, templateId='141', time='五'):\n \"\"\"发送短信验证码 :param phone_num: str 收件人手机号 :param code: str 6位随机激活码 :param templateId: str 榛子云短信平台模板id :param time: str 过期时间,仅用于显示 :return: json_str 短信发送结果\"\"\"\n <|body_0|>\n\n def number_of_message(self):\n \"\"\"查询剩余短语条数 返回结果是json格式的字符串, code: 查询状态,0为成功, data: 为剩余短信条数。 非0为查询失败,可从data中查看错误信息\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if phone_num and code:\n params = {}\n params['number'] = phone_num\n params['templateId'] = templateId\n params['templateParams'] = [code, time]\n json_str_data = self.client.send(params)\n return json_str_data\n else:\n raise Exception('手机号和验证码不能为空')\n<|end_body_0|>\n\n<|body_start_1|>\n result = json.loads(self.client.balance())\n code = result['code']\n data = result['data']\n if not code:\n print('短信剩余{}条'.format(data))\n else:\n print('错误代码: {}, {}'.format(code, data))\n<|end_body_1|>\n", "revision_id": "9870f9402c9416594409be974c640f890b46a41b", "skeleton": "<|skeleton|>\nclass Message:\n\n def send_message(self, phone_num, code, templateId='141', time='五'):\n \"\"\"发送短信验证码 :param phone_num: str 收件人手机号 :param code: str 6位随机激活码 :param templateId: str 榛子云短信平台模板id :param time: str 过期时间,仅用于显示 :return: json_str 短信发送结果\"\"\"\n <|body_0|>\n\n def number_of_message(self):\n \"\"\"查询剩余短语条数 返回结果是json格式的字符串, code: 查询状态,0为成功, data: 为剩余短信条数。 非0为查询失败,可从data中查看错误信息\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Message:\n def send_message(self, phone_num, code, templateId='141', time='五'):\n \"\"\"发送短信验证码 :param phone_num: str 收件人手机号 :param code: str 6位随机激活码 :param templateId: str 榛子云短信平台模板id :param time: str 过期时间,仅用于显示 :return: json_str 短信发送结果\"\"\"\n if phone_num and code:\n params = {}\n params['number'] = phone_num\n params['templateId'] = templateId\n params['templateParams'] = [code, time]\n json_str_data = self.client.send(params)\n return json_str_data\n else:\n raise Exception('手机号和验证码不能为空')\n\n def number_of_message(self):\n \"\"\"查询剩余短语条数 返回结果是json格式的字符串, code: 查询状态,0为成功, data: 为剩余短信条数。 非0为查询失败,可从data中查看错误信息\"\"\"\n result = json.loads(self.client.balance())\n code = result['code']\n data = result['data']\n if not code:\n print('短信剩余{}条'.format(data))\n else:\n print('错误代码: {}, {}'.format(code, data))\n", "source": "the_stack_v2_python_sparse", "source_path": "PYMARA 2/Public/message/send_msg.py", "source_repo": "yuanzuliang/PyMara", "split": "val", "star_events_count": 1}
{"blob_id": "e4f4685c554dc1c8009df12448e6c85ce5e46aa1", "bodies": ["SSHComputingElement.__init__(self, ceUniqueID)\nself.ceType = CE_NAME\nself.controlScript = 'sgece'\nself.mandatoryParameters = MANDATORY_PARAMETERS", "result = pfnparse(jobID)\nif not result['OK']:\n return result\njobStamp = result['Value']['FileName']\nhost = result['Value']['Host']\noutput = '%s/DIRACPilot.o%s' % (self.batchOutput, jobStamp)\nerror = '%s/DIRACPilot.e%s' % (self.batchError, jobStamp)\nreturn S_OK((jobStamp, host, output, error))"], "bodies_text": "<|body_start_0|>\n SSHComputingElement.__init__(self, ceUniqueID)\n self.ceType = CE_NAME\n self.controlScript = 'sgece'\n self.mandatoryParameters = MANDATORY_PARAMETERS\n<|end_body_0|>\n\n<|body_start_1|>\n result = pfnparse(jobID)\n if not result['OK']:\n return result\n jobStamp = result['Value']['FileName']\n host = result['Value']['Host']\n output = '%s/DIRACPilot.o%s' % (self.batchOutput, jobStamp)\n error = '%s/DIRACPilot.e%s' % (self.batchError, jobStamp)\n return S_OK((jobStamp, host, output, error))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SSHGEComputingElement", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SSHGEComputingElement:\n\n def __init__(self, ceUniqueID):\n \"\"\"Standard constructor.\"\"\"\n <|body_0|>\n\n def _getJobOutputFiles(self, jobID):\n \"\"\"Get output file names for the specific CE\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n SSHComputingElement.__init__(self, ceUniqueID)\n self.ceType = CE_NAME\n self.controlScript = 'sgece'\n self.mandatoryParameters = MANDATORY_PARAMETERS\n<|end_body_0|>\n\n<|body_start_1|>\n result = pfnparse(jobID)\n if not result['OK']:\n return result\n jobStamp = result['Value']['FileName']\n host = result['Value']['Host']\n output = '%s/DIRACPilot.o%s' % (self.batchOutput, jobStamp)\n error = '%s/DIRACPilot.e%s' % (self.batchError, jobStamp)\n return S_OK((jobStamp, host, output, error))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000053", "length_bytes": 1618, "license_type": "no_license", "methods": [{"docstring": "Standard constructor.", "name": "__init__", "signature": "def __init__(self, ceUniqueID)"}, {"docstring": "Get output file names for the specific CE", "name": "_getJobOutputFiles", "signature": "def _getJobOutputFiles(self, jobID)"}], "n_methods": 2, "prompt": "Implement the Python class `SSHGEComputingElement` described below.\n\nClass description:\nImplement the SSHGEComputingElement class.\n\nMethod signatures and docstrings:\n- def __init__(self, ceUniqueID): Standard constructor.\n- def _getJobOutputFiles(self, jobID): Get output file names for the specific CE", "prompted_full_text": "Implement the Python class `SSHGEComputingElement` described below.\n\nClass description:\nImplement the SSHGEComputingElement class.\n\nMethod signatures and docstrings:\n- def __init__(self, ceUniqueID): Standard constructor.\n- def _getJobOutputFiles(self, jobID): Get output file names for the specific CE\n\n<|skeleton|>\nclass SSHGEComputingElement:\n\n def __init__(self, ceUniqueID):\n \"\"\"Standard constructor.\"\"\"\n <|body_0|>\n\n def _getJobOutputFiles(self, jobID):\n \"\"\"Get output file names for the specific CE\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n SSHComputingElement.__init__(self, ceUniqueID)\n self.ceType = CE_NAME\n self.controlScript = 'sgece'\n self.mandatoryParameters = MANDATORY_PARAMETERS\n<|end_body_0|>\n\n<|body_start_1|>\n result = pfnparse(jobID)\n if not result['OK']:\n return result\n jobStamp = result['Value']['FileName']\n host = result['Value']['Host']\n output = '%s/DIRACPilot.o%s' % (self.batchOutput, jobStamp)\n error = '%s/DIRACPilot.e%s' % (self.batchError, jobStamp)\n return S_OK((jobStamp, host, output, error))\n<|end_body_1|>\n", "revision_id": "cb6b5db20eff2a8c5f04e9983221850f475b38e3", "skeleton": "<|skeleton|>\nclass SSHGEComputingElement:\n\n def __init__(self, ceUniqueID):\n \"\"\"Standard constructor.\"\"\"\n <|body_0|>\n\n def _getJobOutputFiles(self, jobID):\n \"\"\"Get output file names for the specific CE\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SSHGEComputingElement:\n def __init__(self, ceUniqueID):\n \"\"\"Standard constructor.\"\"\"\n SSHComputingElement.__init__(self, ceUniqueID)\n self.ceType = CE_NAME\n self.controlScript = 'sgece'\n self.mandatoryParameters = MANDATORY_PARAMETERS\n\n def _getJobOutputFiles(self, jobID):\n \"\"\"Get output file names for the specific CE\"\"\"\n result = pfnparse(jobID)\n if not result['OK']:\n return result\n jobStamp = result['Value']['FileName']\n host = result['Value']['Host']\n output = '%s/DIRACPilot.o%s' % (self.batchOutput, jobStamp)\n error = '%s/DIRACPilot.e%s' % (self.batchError, jobStamp)\n return S_OK((jobStamp, host, output, error))\n", "source": "the_stack_v2_python_sparse", "source_path": "Resources/Computing/SSHGEComputingElement.py", "source_repo": "alexanderrichards/DIRAC", "split": "val", "star_events_count": 0}
{"blob_id": "36470304f09349f82486c2750a018a157f4a8f71", "bodies": ["self.vk = vk\ndata = vk('messages.getLongPollServer')\nif data.get('error'):\n if data['error']['error_code'] == 5:\n raise Exception('tokenfail')\nself.server = data['server']\nself.key = data['key']\nself.ts = data['ts']\nself.wait = wait", "response = requests.get(f'http://{self.server}?act=a_check&key={self.key}&ts={self.ts}&wait={self.wait}&version=3&mode=2')\nif response.status_code != 200:\n logger.error('Ошибка сети')\n return []\nself.time = datetime.now().timestamp()\ndata = response.json()\nif 'failed' in data.keys():\n if data['failed'] == 1:\n logger.error('Ошибка истории событий')\n self.ts = data['ts']\n elif data['failed'] == 2:\n self.key = self.vk('messages.getLongPollServer')['key']\n else:\n raise Exception('Информация о пользователе утрачена')\n return []\nelse:\n self.ts = data['ts']\n return data['updates']"], "bodies_text": "<|body_start_0|>\n self.vk = vk\n data = vk('messages.getLongPollServer')\n if data.get('error'):\n if data['error']['error_code'] == 5:\n raise Exception('tokenfail')\n self.server = data['server']\n self.key = data['key']\n self.ts = data['ts']\n self.wait = wait\n<|end_body_0|>\n\n<|body_start_1|>\n response = requests.get(f'http://{self.server}?act=a_check&key={self.key}&ts={self.ts}&wait={self.wait}&version=3&mode=2')\n if response.status_code != 200:\n logger.error('Ошибка сети')\n return []\n self.time = datetime.now().timestamp()\n data = response.json()\n if 'failed' in data.keys():\n if data['failed'] == 1:\n logger.error('Ошибка истории событий')\n self.ts = data['ts']\n elif data['failed'] == 2:\n self.key = self.vk('messages.getLongPollServer')['key']\n else:\n raise Exception('Информация о пользователе утрачена')\n return []\n else:\n self.ts = data['ts']\n return data['updates']\n<|end_body_1|>\n", "class_docstring": "", "class_name": "LP", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LP:\n\n def __init__(self, vk, wait=25):\n \"\"\"vk - экземпляр VkApi\"\"\"\n <|body_0|>\n\n def check(self):\n \"\"\"Возвращает список событий (updates)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.vk = vk\n data = vk('messages.getLongPollServer')\n if data.get('error'):\n if data['error']['error_code'] == 5:\n raise Exception('tokenfail')\n self.server = data['server']\n self.key = data['key']\n self.ts = data['ts']\n self.wait = wait\n<|end_body_0|>\n\n<|body_start_1|>\n response = requests.get(f'http://{self.server}?act=a_check&key={self.key}&ts={self.ts}&wait={self.wait}&version=3&mode=2')\n if response.status_code != 200:\n logger.error('Ошибка сети')\n return []\n self.time = datetime.now().timestamp()\n data = response.json()\n if 'failed' in data.keys():\n if data['failed'] == 1:\n logger.error('Ошибка истории событий')\n self.ts = data['ts']\n elif data['failed'] == 2:\n self.key = self.vk('messages.getLongPollServer')['key']\n else:\n raise Exception('Информация о пользователе утрачена')\n return []\n else:\n self.ts = data['ts']\n return data['updates']\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000054", "length_bytes": 1570, "license_type": "permissive", "methods": [{"docstring": "vk - экземпляр VkApi", "name": "__init__", "signature": "def __init__(self, vk, wait=25)"}, {"docstring": "Возвращает список событий (updates)", "name": "check", "signature": "def check(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_019936", "prompt": "Implement the Python class `LP` described below.\n\nClass description:\nImplement the LP class.\n\nMethod signatures and docstrings:\n- def __init__(self, vk, wait=25): vk - экземпляр VkApi\n- def check(self): Возвращает список событий (updates)", "prompted_full_text": "Implement the Python class `LP` described below.\n\nClass description:\nImplement the LP class.\n\nMethod signatures and docstrings:\n- def __init__(self, vk, wait=25): vk - экземпляр VkApi\n- def check(self): Возвращает список событий (updates)\n\n<|skeleton|>\nclass LP:\n\n def __init__(self, vk, wait=25):\n \"\"\"vk - экземпляр VkApi\"\"\"\n <|body_0|>\n\n def check(self):\n \"\"\"Возвращает список событий (updates)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.vk = vk\n data = vk('messages.getLongPollServer')\n if data.get('error'):\n if data['error']['error_code'] == 5:\n raise Exception('tokenfail')\n self.server = data['server']\n self.key = data['key']\n self.ts = data['ts']\n self.wait = wait\n<|end_body_0|>\n\n<|body_start_1|>\n response = requests.get(f'http://{self.server}?act=a_check&key={self.key}&ts={self.ts}&wait={self.wait}&version=3&mode=2')\n if response.status_code != 200:\n logger.error('Ошибка сети')\n return []\n self.time = datetime.now().timestamp()\n data = response.json()\n if 'failed' in data.keys():\n if data['failed'] == 1:\n logger.error('Ошибка истории событий')\n self.ts = data['ts']\n elif data['failed'] == 2:\n self.key = self.vk('messages.getLongPollServer')['key']\n else:\n raise Exception('Информация о пользователе утрачена')\n return []\n else:\n self.ts = data['ts']\n return data['updates']\n<|end_body_1|>\n", "revision_id": "128fddea9824ea00cc0f61e79e9e77797fdce04f", "skeleton": "<|skeleton|>\nclass LP:\n\n def __init__(self, vk, wait=25):\n \"\"\"vk - экземпляр VkApi\"\"\"\n <|body_0|>\n\n def check(self):\n \"\"\"Возвращает список событий (updates)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LP:\n def __init__(self, vk, wait=25):\n \"\"\"vk - экземпляр VkApi\"\"\"\n self.vk = vk\n data = vk('messages.getLongPollServer')\n if data.get('error'):\n if data['error']['error_code'] == 5:\n raise Exception('tokenfail')\n self.server = data['server']\n self.key = data['key']\n self.ts = data['ts']\n self.wait = wait\n\n def check(self):\n \"\"\"Возвращает список событий (updates)\"\"\"\n response = requests.get(f'http://{self.server}?act=a_check&key={self.key}&ts={self.ts}&wait={self.wait}&version=3&mode=2')\n if response.status_code != 200:\n logger.error('Ошибка сети')\n return []\n self.time = datetime.now().timestamp()\n data = response.json()\n if 'failed' in data.keys():\n if data['failed'] == 1:\n logger.error('Ошибка истории событий')\n self.ts = data['ts']\n elif data['failed'] == 2:\n self.key = self.vk('messages.getLongPollServer')['key']\n else:\n raise Exception('Информация о пользователе утрачена')\n return []\n else:\n self.ts = data['ts']\n return data['updates']\n", "source": "the_stack_v2_python_sparse", "source_path": "microvk/user_longpoll.py", "source_repo": "Yazawasempai/IrCA-Duty", "split": "val", "star_events_count": 0}
{"blob_id": "512bc44045c9ba17c996b53087f81e1f6d970ec3", "bodies": ["EasyFrame.__init__(self)\nself.addLabel(text='Input', row=0, column=0)\nself.inputField = self.addTextField(text='', row=0, column=1)\nself.addLabel(text='Output', row=1, column=0)\nself.outputField = self.addTextField(text='', row=1, column=1, state='readonly')\nself.button = self.addButton(text='Convert', row=2, column=0, columnspan=2, command=self.convert)", "text = self.inputField.getText()\nresult = text.upper()\nself.outputField.setText(result)"], "bodies_text": "<|body_start_0|>\n EasyFrame.__init__(self)\n self.addLabel(text='Input', row=0, column=0)\n self.inputField = self.addTextField(text='', row=0, column=1)\n self.addLabel(text='Output', row=1, column=0)\n self.outputField = self.addTextField(text='', row=1, column=1, state='readonly')\n self.button = self.addButton(text='Convert', row=2, column=0, columnspan=2, command=self.convert)\n<|end_body_0|>\n\n<|body_start_1|>\n text = self.inputField.getText()\n result = text.upper()\n self.outputField.setText(result)\n<|end_body_1|>\n", "class_docstring": "Converts an input string to uppercase and displays the result.", "class_name": "TextFieldDemo", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TextFieldDemo:\n \"\"\"Converts an input string to uppercase and displays the result.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n <|body_0|>\n\n def convert(self):\n \"\"\"Inputs the string, converts it to uppercase, and outputs the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n EasyFrame.__init__(self)\n self.addLabel(text='Input', row=0, column=0)\n self.inputField = self.addTextField(text='', row=0, column=1)\n self.addLabel(text='Output', row=1, column=0)\n self.outputField = self.addTextField(text='', row=1, column=1, state='readonly')\n self.button = self.addButton(text='Convert', row=2, column=0, columnspan=2, command=self.convert)\n<|end_body_0|>\n\n<|body_start_1|>\n text = self.inputField.getText()\n result = text.upper()\n self.outputField.setText(result)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000055", "length_bytes": 1579, "license_type": "no_license", "methods": [{"docstring": "Sets up the window and widgets.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Inputs the string, converts it to uppercase, and outputs the result.", "name": "convert", "signature": "def convert(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_050604", "prompt": "Implement the Python class `TextFieldDemo` described below.\n\nClass description:\nConverts an input string to uppercase and displays the result.\n\nMethod signatures and docstrings:\n- def __init__(self): Sets up the window and widgets.\n- def convert(self): Inputs the string, converts it to uppercase, and outputs the result.", "prompted_full_text": "Implement the Python class `TextFieldDemo` described below.\n\nClass description:\nConverts an input string to uppercase and displays the result.\n\nMethod signatures and docstrings:\n- def __init__(self): Sets up the window and widgets.\n- def convert(self): Inputs the string, converts it to uppercase, and outputs the result.\n\n<|skeleton|>\nclass TextFieldDemo:\n \"\"\"Converts an input string to uppercase and displays the result.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n <|body_0|>\n\n def convert(self):\n \"\"\"Inputs the string, converts it to uppercase, and outputs the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n EasyFrame.__init__(self)\n self.addLabel(text='Input', row=0, column=0)\n self.inputField = self.addTextField(text='', row=0, column=1)\n self.addLabel(text='Output', row=1, column=0)\n self.outputField = self.addTextField(text='', row=1, column=1, state='readonly')\n self.button = self.addButton(text='Convert', row=2, column=0, columnspan=2, command=self.convert)\n<|end_body_0|>\n\n<|body_start_1|>\n text = self.inputField.getText()\n result = text.upper()\n self.outputField.setText(result)\n<|end_body_1|>\n", "revision_id": "eca69d000dc77681a30734b073b2383c97ccc02e", "skeleton": "<|skeleton|>\nclass TextFieldDemo:\n \"\"\"Converts an input string to uppercase and displays the result.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n <|body_0|>\n\n def convert(self):\n \"\"\"Inputs the string, converts it to uppercase, and outputs the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TextFieldDemo:\n \"\"\"Converts an input string to uppercase and displays the result.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n EasyFrame.__init__(self)\n self.addLabel(text='Input', row=0, column=0)\n self.inputField = self.addTextField(text='', row=0, column=1)\n self.addLabel(text='Output', row=1, column=0)\n self.outputField = self.addTextField(text='', row=1, column=1, state='readonly')\n self.button = self.addButton(text='Convert', row=2, column=0, columnspan=2, command=self.convert)\n\n def convert(self):\n \"\"\"Inputs the string, converts it to uppercase, and outputs the result.\"\"\"\n text = self.inputField.getText()\n result = text.upper()\n self.outputField.setText(result)\n", "source": "the_stack_v2_python_sparse", "source_path": "gui/breezy/textfielddemo.py", "source_repo": "lforet/robomow", "split": "val", "star_events_count": 11}
{"blob_id": "71e50c38afdc7295860d9856c6864bb78a759dab", "bodies": ["self.__serverport = int(serverport)\nself.shutdown = shutdown\nt = threading.Thread(target=self.__mainloop, args=[add_message_to_list])\nt.start()", "s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind(('', self.__serverport))\ns.listen(5)\ns.settimeout(2)\nwhile not self.shutdown[0]:\n try:\n clientsock, clientaddr = s.accept()\n clientsock.settimeout(None)\n msg = clientsock.recv(2048)\n add_message_to_list(msg)\n self.clientsock.close()\n self.clientsock = None\n except:\n continue\ns.close()", "try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, int(self.__serverport)))\n s.sendall(msg)\n s.close()\nexcept:\n traceback.print_exc()\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n self.__serverport = int(serverport)\n self.shutdown = shutdown\n t = threading.Thread(target=self.__mainloop, args=[add_message_to_list])\n t.start()\n<|end_body_0|>\n\n<|body_start_1|>\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('', self.__serverport))\n s.listen(5)\n s.settimeout(2)\n while not self.shutdown[0]:\n try:\n clientsock, clientaddr = s.accept()\n clientsock.settimeout(None)\n msg = clientsock.recv(2048)\n add_message_to_list(msg)\n self.clientsock.close()\n self.clientsock = None\n except:\n continue\n s.close()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, int(self.__serverport)))\n s.sendall(msg)\n s.close()\n except:\n traceback.print_exc()\n return False\n return True\n<|end_body_2|>\n", "class_docstring": "This class is used for receiving and sending messages using TCP. Therefore a seperated thread is started which will be listening for incoming messages at the specified port. Any message received will be added to a list.", "class_name": "Peer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Peer:\n \"\"\"This class is used for receiving and sending messages using TCP. Therefore a seperated thread is started which will be listening for incoming messages at the specified port. Any message received will be added to a list.\"\"\"\n\n def __init__(self, serverport, add_message_to_list, shutdown):\n \"\"\"Initializing of an object of the class Peer. serverport : Port number which is used for listening and sending messages using TCP. Any port number can be chosen but make sure all clients are using the same port number for TCP communication. add_message_to_list : A function adding a received message to the list of messages. shutdown : A parameter used for stopping the infinite loop of the main thread which is listening for incoming messages.\"\"\"\n <|body_0|>\n\n def __mainloop(self, add_message_to_list):\n \"\"\"This method creates a socket at the specified port. An infinite loop is used for listening for incoming messages. If a message is received it will be added to a list and the loop continues listening for new messages. This method is private. add_message_to_list : A function adding a received message to the list of messages.\"\"\"\n <|body_1|>\n\n def connectandsend(self, host, msg):\n \"\"\"This method is used for sending messages using TCP. The method returns True after successfully sending a message. If an error occures during sending the message, the method returns False and an error message is printed within the console. This method is public. host : contains the address of the receiver msg : contains the content of the message\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__serverport = int(serverport)\n self.shutdown = shutdown\n t = threading.Thread(target=self.__mainloop, args=[add_message_to_list])\n t.start()\n<|end_body_0|>\n\n<|body_start_1|>\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('', self.__serverport))\n s.listen(5)\n s.settimeout(2)\n while not self.shutdown[0]:\n try:\n clientsock, clientaddr = s.accept()\n clientsock.settimeout(None)\n msg = clientsock.recv(2048)\n add_message_to_list(msg)\n self.clientsock.close()\n self.clientsock = None\n except:\n continue\n s.close()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, int(self.__serverport)))\n s.sendall(msg)\n s.close()\n except:\n traceback.print_exc()\n return False\n return True\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000056", "length_bytes": 4540, "license_type": "no_license", "methods": [{"docstring": "Initializing of an object of the class Peer. serverport : Port number which is used for listening and sending messages using TCP. Any port number can be chosen but make sure all clients are using the same port number for TCP communication. add_message_to_list : A function adding a received message to the list of messages. shutdown : A parameter used for stopping the infinite loop of the main thread which is listening for incoming messages.", "name": "__init__", "signature": "def __init__(self, serverport, add_message_to_list, shutdown)"}, {"docstring": "This method creates a socket at the specified port. An infinite loop is used for listening for incoming messages. If a message is received it will be added to a list and the loop continues listening for new messages. This method is private. add_message_to_list : A function adding a received message to the list of messages.", "name": "__mainloop", "signature": "def __mainloop(self, add_message_to_list)"}, {"docstring": "This method is used for sending messages using TCP. The method returns True after successfully sending a message. If an error occures during sending the message, the method returns False and an error message is printed within the console. This method is public. host : contains the address of the receiver msg : contains the content of the message", "name": "connectandsend", "signature": "def connectandsend(self, host, msg)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_049968", "prompt": "Implement the Python class `Peer` described below.\n\nClass description:\nThis class is used for receiving and sending messages using TCP. Therefore a seperated thread is started which will be listening for incoming messages at the specified port. Any message received will be added to a list.\n\nMethod signatures and docstrings:\n- def __init__(self, serverport, add_message_to_list, shutdown): Initializing of an object of the class Peer. serverport : Port number which is used for listening and sending messages using TCP. Any port number can be chosen but make sure all clients are using the same port number for TCP communication. add_message_to_list : A function adding a received message to the list of messages. shutdown : A parameter used for stopping the infinite loop of the main thread which is listening for incoming messages.\n- def __mainloop(self, add_message_to_list): This method creates a socket at the specified port. An infinite loop is used for listening for incoming messages. If a message is received it will be added to a list and the loop continues listening for new messages. This method is private. add_message_to_list : A function adding a received message to the list of messages.\n- def connectandsend(self, host, msg): This method is used for sending messages using TCP. The method returns True after successfully sending a message. If an error occures during sending the message, the method returns False and an error message is printed within the console. This method is public. host : contains the address of the receiver msg : contains the content of the message", "prompted_full_text": "Implement the Python class `Peer` described below.\n\nClass description:\nThis class is used for receiving and sending messages using TCP. Therefore a seperated thread is started which will be listening for incoming messages at the specified port. Any message received will be added to a list.\n\nMethod signatures and docstrings:\n- def __init__(self, serverport, add_message_to_list, shutdown): Initializing of an object of the class Peer. serverport : Port number which is used for listening and sending messages using TCP. Any port number can be chosen but make sure all clients are using the same port number for TCP communication. add_message_to_list : A function adding a received message to the list of messages. shutdown : A parameter used for stopping the infinite loop of the main thread which is listening for incoming messages.\n- def __mainloop(self, add_message_to_list): This method creates a socket at the specified port. An infinite loop is used for listening for incoming messages. If a message is received it will be added to a list and the loop continues listening for new messages. This method is private. add_message_to_list : A function adding a received message to the list of messages.\n- def connectandsend(self, host, msg): This method is used for sending messages using TCP. The method returns True after successfully sending a message. If an error occures during sending the message, the method returns False and an error message is printed within the console. This method is public. host : contains the address of the receiver msg : contains the content of the message\n\n<|skeleton|>\nclass Peer:\n \"\"\"This class is used for receiving and sending messages using TCP. Therefore a seperated thread is started which will be listening for incoming messages at the specified port. Any message received will be added to a list.\"\"\"\n\n def __init__(self, serverport, add_message_to_list, shutdown):\n \"\"\"Initializing of an object of the class Peer. serverport : Port number which is used for listening and sending messages using TCP. Any port number can be chosen but make sure all clients are using the same port number for TCP communication. add_message_to_list : A function adding a received message to the list of messages. shutdown : A parameter used for stopping the infinite loop of the main thread which is listening for incoming messages.\"\"\"\n <|body_0|>\n\n def __mainloop(self, add_message_to_list):\n \"\"\"This method creates a socket at the specified port. An infinite loop is used for listening for incoming messages. If a message is received it will be added to a list and the loop continues listening for new messages. This method is private. add_message_to_list : A function adding a received message to the list of messages.\"\"\"\n <|body_1|>\n\n def connectandsend(self, host, msg):\n \"\"\"This method is used for sending messages using TCP. The method returns True after successfully sending a message. If an error occures during sending the message, the method returns False and an error message is printed within the console. This method is public. host : contains the address of the receiver msg : contains the content of the message\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__serverport = int(serverport)\n self.shutdown = shutdown\n t = threading.Thread(target=self.__mainloop, args=[add_message_to_list])\n t.start()\n<|end_body_0|>\n\n<|body_start_1|>\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('', self.__serverport))\n s.listen(5)\n s.settimeout(2)\n while not self.shutdown[0]:\n try:\n clientsock, clientaddr = s.accept()\n clientsock.settimeout(None)\n msg = clientsock.recv(2048)\n add_message_to_list(msg)\n self.clientsock.close()\n self.clientsock = None\n except:\n continue\n s.close()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, int(self.__serverport)))\n s.sendall(msg)\n s.close()\n except:\n traceback.print_exc()\n return False\n return True\n<|end_body_2|>\n", "revision_id": "f3fe9ef9a14d775b97b5944deab0bd05ae07d4a2", "skeleton": "<|skeleton|>\nclass Peer:\n \"\"\"This class is used for receiving and sending messages using TCP. Therefore a seperated thread is started which will be listening for incoming messages at the specified port. Any message received will be added to a list.\"\"\"\n\n def __init__(self, serverport, add_message_to_list, shutdown):\n \"\"\"Initializing of an object of the class Peer. serverport : Port number which is used for listening and sending messages using TCP. Any port number can be chosen but make sure all clients are using the same port number for TCP communication. add_message_to_list : A function adding a received message to the list of messages. shutdown : A parameter used for stopping the infinite loop of the main thread which is listening for incoming messages.\"\"\"\n <|body_0|>\n\n def __mainloop(self, add_message_to_list):\n \"\"\"This method creates a socket at the specified port. An infinite loop is used for listening for incoming messages. If a message is received it will be added to a list and the loop continues listening for new messages. This method is private. add_message_to_list : A function adding a received message to the list of messages.\"\"\"\n <|body_1|>\n\n def connectandsend(self, host, msg):\n \"\"\"This method is used for sending messages using TCP. The method returns True after successfully sending a message. If an error occures during sending the message, the method returns False and an error message is printed within the console. This method is public. host : contains the address of the receiver msg : contains the content of the message\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Peer:\n \"\"\"This class is used for receiving and sending messages using TCP. Therefore a seperated thread is started which will be listening for incoming messages at the specified port. Any message received will be added to a list.\"\"\"\n\n def __init__(self, serverport, add_message_to_list, shutdown):\n \"\"\"Initializing of an object of the class Peer. serverport : Port number which is used for listening and sending messages using TCP. Any port number can be chosen but make sure all clients are using the same port number for TCP communication. add_message_to_list : A function adding a received message to the list of messages. shutdown : A parameter used for stopping the infinite loop of the main thread which is listening for incoming messages.\"\"\"\n self.__serverport = int(serverport)\n self.shutdown = shutdown\n t = threading.Thread(target=self.__mainloop, args=[add_message_to_list])\n t.start()\n\n def __mainloop(self, add_message_to_list):\n \"\"\"This method creates a socket at the specified port. An infinite loop is used for listening for incoming messages. If a message is received it will be added to a list and the loop continues listening for new messages. This method is private. add_message_to_list : A function adding a received message to the list of messages.\"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('', self.__serverport))\n s.listen(5)\n s.settimeout(2)\n while not self.shutdown[0]:\n try:\n clientsock, clientaddr = s.accept()\n clientsock.settimeout(None)\n msg = clientsock.recv(2048)\n add_message_to_list(msg)\n self.clientsock.close()\n self.clientsock = None\n except:\n continue\n s.close()\n\n def connectandsend(self, host, msg):\n \"\"\"This method is used for sending messages using TCP. The method returns True after successfully sending a message. If an error occures during sending the message, the method returns False and an error message is printed within the console. This method is public. host : contains the address of the receiver msg : contains the content of the message\"\"\"\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, int(self.__serverport)))\n s.sendall(msg)\n s.close()\n except:\n traceback.print_exc()\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "common/tcp_framework.py", "source_repo": "Abdullah-Gouda/LabControlSoftware", "split": "val", "star_events_count": 0}
{"blob_id": "4cc074404f8caee5afdc49f2ff33890ecae9061a", "bodies": ["data = request.json\ndb = database.DBClass()\ntry:\n query = 'INSERT INTO users(email, password, name)\\n VALUES(%(email)s, %(password)s, %(name)s);\\n '\n db.execute(query, data)\nexcept err.IntegrityError:\n return ({'status': 'Failed', 'message': 'Email Duplicated'}, 400)\nfinally:\n db.commit()\n db.close()\nreturn ({'status': 'Success'}, 201)", "userEmail = get_jwt_identity()\njti = get_jwt()['jti']\ndb = database.DBClass()\nquery = '\\n insert into revoked_tokens(jti) values(%s);\\n '\ndb.execute(query, (jti,))\nquery = '\\n select email from users WHERE email=(%s)\\n '\ndbdata = db.executeOne(query, (userEmail,))\ndbdata = dbdata['email']\nif dbdata is None:\n return ({'status': 'Failed', 'message': \"The email could not be found. It doesn't seem to be registered.\"}, 400)\nelse:\n query = '\\n DELETE FROM users WHERE email=(%s);\\n '\n db.execute(query, (dbdata,))\n db.commit()\n return ({'status': 'Success'}, 200)", "data = request.json\nuserEmail = get_jwt_identity()\nnew_password = data['new_password']\ndb = database.DBClass()\nif data['new_password'] != data['new_password_again']:\n return ({'status': 'Failed', 'message': 'The two passwords entered are different'}, 400)\nquery_list = [f'select * from users where email = \"{userEmail}\";', f'update users set password = \"{new_password}\" where email = \"{userEmail}\";']\nif db.executeOne(query_list[0]):\n db.execute_and_commit(query_list[1])\n return ({'status': 'Success'}, 200)\nelse:\n return ({'status': 'Failed', 'message': 'Wrong email'}, 400)"], "bodies_text": "<|body_start_0|>\n data = request.json\n db = database.DBClass()\n try:\n query = 'INSERT INTO users(email, password, name)\\n VALUES(%(email)s, %(password)s, %(name)s);\\n '\n db.execute(query, data)\n except err.IntegrityError:\n return ({'status': 'Failed', 'message': 'Email Duplicated'}, 400)\n finally:\n db.commit()\n db.close()\n return ({'status': 'Success'}, 201)\n<|end_body_0|>\n\n<|body_start_1|>\n userEmail = get_jwt_identity()\n jti = get_jwt()['jti']\n db = database.DBClass()\n query = '\\n insert into revoked_tokens(jti) values(%s);\\n '\n db.execute(query, (jti,))\n query = '\\n select email from users WHERE email=(%s)\\n '\n dbdata = db.executeOne(query, (userEmail,))\n dbdata = dbdata['email']\n if dbdata is None:\n return ({'status': 'Failed', 'message': \"The email could not be found. It doesn't seem to be registered.\"}, 400)\n else:\n query = '\\n DELETE FROM users WHERE email=(%s);\\n '\n db.execute(query, (dbdata,))\n db.commit()\n return ({'status': 'Success'}, 200)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.json\n userEmail = get_jwt_identity()\n new_password = data['new_password']\n db = database.DBClass()\n if data['new_password'] != data['new_password_again']:\n return ({'status': 'Failed', 'message': 'The two passwords entered are different'}, 400)\n query_list = [f'select * from users where email = \"{userEmail}\";', f'update users set password = \"{new_password}\" where email = \"{userEmail}\";']\n if db.executeOne(query_list[0]):\n db.execute_and_commit(query_list[1])\n return ({'status': 'Success'}, 200)\n else:\n return ({'status': 'Failed', 'message': 'Wrong email'}, 400)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "register", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass register:\n\n def post(self, *args):\n \"\"\"클라이언트로부터 회원 정보를 받아 회원가입을 수행하고 결과를 반환한다.\"\"\"\n <|body_0|>\n\n def delete(self, *args):\n \"\"\"클라이언트로부터 받은 jwt토큰에서 이메일을 분리하여 회원탈퇴를 수행하고 결과를 반환한다.\"\"\"\n <|body_1|>\n\n def put(self, *args):\n \"\"\"클라이언트로부터 비밀번호를 받아서 비밀번호 변경을 수행한다.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.json\n db = database.DBClass()\n try:\n query = 'INSERT INTO users(email, password, name)\\n VALUES(%(email)s, %(password)s, %(name)s);\\n '\n db.execute(query, data)\n except err.IntegrityError:\n return ({'status': 'Failed', 'message': 'Email Duplicated'}, 400)\n finally:\n db.commit()\n db.close()\n return ({'status': 'Success'}, 201)\n<|end_body_0|>\n\n<|body_start_1|>\n userEmail = get_jwt_identity()\n jti = get_jwt()['jti']\n db = database.DBClass()\n query = '\\n insert into revoked_tokens(jti) values(%s);\\n '\n db.execute(query, (jti,))\n query = '\\n select email from users WHERE email=(%s)\\n '\n dbdata = db.executeOne(query, (userEmail,))\n dbdata = dbdata['email']\n if dbdata is None:\n return ({'status': 'Failed', 'message': \"The email could not be found. It doesn't seem to be registered.\"}, 400)\n else:\n query = '\\n DELETE FROM users WHERE email=(%s);\\n '\n db.execute(query, (dbdata,))\n db.commit()\n return ({'status': 'Success'}, 200)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.json\n userEmail = get_jwt_identity()\n new_password = data['new_password']\n db = database.DBClass()\n if data['new_password'] != data['new_password_again']:\n return ({'status': 'Failed', 'message': 'The two passwords entered are different'}, 400)\n query_list = [f'select * from users where email = \"{userEmail}\";', f'update users set password = \"{new_password}\" where email = \"{userEmail}\";']\n if db.executeOne(query_list[0]):\n db.execute_and_commit(query_list[1])\n return ({'status': 'Success'}, 200)\n else:\n return ({'status': 'Failed', 'message': 'Wrong email'}, 400)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000057", "length_bytes": 7155, "license_type": "no_license", "methods": [{"docstring": "클라이언트로부터 회원 정보를 받아 회원가입을 수행하고 결과를 반환한다.", "name": "post", "signature": "def post(self, *args)"}, {"docstring": "클라이언트로부터 받은 jwt토큰에서 이메일을 분리하여 회원탈퇴를 수행하고 결과를 반환한다.", "name": "delete", "signature": "def delete(self, *args)"}, {"docstring": "클라이언트로부터 비밀번호를 받아서 비밀번호 변경을 수행한다.", "name": "put", "signature": "def put(self, *args)"}], "n_methods": 3, "prompt": "Implement the Python class `register` described below.\n\nClass description:\nImplement the register class.\n\nMethod signatures and docstrings:\n- def post(self, *args): 클라이언트로부터 회원 정보를 받아 회원가입을 수행하고 결과를 반환한다.\n- def delete(self, *args): 클라이언트로부터 받은 jwt토큰에서 이메일을 분리하여 회원탈퇴를 수행하고 결과를 반환한다.\n- def put(self, *args): 클라이언트로부터 비밀번호를 받아서 비밀번호 변경을 수행한다.", "prompted_full_text": "Implement the Python class `register` described below.\n\nClass description:\nImplement the register class.\n\nMethod signatures and docstrings:\n- def post(self, *args): 클라이언트로부터 회원 정보를 받아 회원가입을 수행하고 결과를 반환한다.\n- def delete(self, *args): 클라이언트로부터 받은 jwt토큰에서 이메일을 분리하여 회원탈퇴를 수행하고 결과를 반환한다.\n- def put(self, *args): 클라이언트로부터 비밀번호를 받아서 비밀번호 변경을 수행한다.\n\n<|skeleton|>\nclass register:\n\n def post(self, *args):\n \"\"\"클라이언트로부터 회원 정보를 받아 회원가입을 수행하고 결과를 반환한다.\"\"\"\n <|body_0|>\n\n def delete(self, *args):\n \"\"\"클라이언트로부터 받은 jwt토큰에서 이메일을 분리하여 회원탈퇴를 수행하고 결과를 반환한다.\"\"\"\n <|body_1|>\n\n def put(self, *args):\n \"\"\"클라이언트로부터 비밀번호를 받아서 비밀번호 변경을 수행한다.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.json\n db = database.DBClass()\n try:\n query = 'INSERT INTO users(email, password, name)\\n VALUES(%(email)s, %(password)s, %(name)s);\\n '\n db.execute(query, data)\n except err.IntegrityError:\n return ({'status': 'Failed', 'message': 'Email Duplicated'}, 400)\n finally:\n db.commit()\n db.close()\n return ({'status': 'Success'}, 201)\n<|end_body_0|>\n\n<|body_start_1|>\n userEmail = get_jwt_identity()\n jti = get_jwt()['jti']\n db = database.DBClass()\n query = '\\n insert into revoked_tokens(jti) values(%s);\\n '\n db.execute(query, (jti,))\n query = '\\n select email from users WHERE email=(%s)\\n '\n dbdata = db.executeOne(query, (userEmail,))\n dbdata = dbdata['email']\n if dbdata is None:\n return ({'status': 'Failed', 'message': \"The email could not be found. It doesn't seem to be registered.\"}, 400)\n else:\n query = '\\n DELETE FROM users WHERE email=(%s);\\n '\n db.execute(query, (dbdata,))\n db.commit()\n return ({'status': 'Success'}, 200)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.json\n userEmail = get_jwt_identity()\n new_password = data['new_password']\n db = database.DBClass()\n if data['new_password'] != data['new_password_again']:\n return ({'status': 'Failed', 'message': 'The two passwords entered are different'}, 400)\n query_list = [f'select * from users where email = \"{userEmail}\";', f'update users set password = \"{new_password}\" where email = \"{userEmail}\";']\n if db.executeOne(query_list[0]):\n db.execute_and_commit(query_list[1])\n return ({'status': 'Success'}, 200)\n else:\n return ({'status': 'Failed', 'message': 'Wrong email'}, 400)\n<|end_body_2|>\n", "revision_id": "ee499c9741c6b7caaae3037262327d6e4c886755", "skeleton": "<|skeleton|>\nclass register:\n\n def post(self, *args):\n \"\"\"클라이언트로부터 회원 정보를 받아 회원가입을 수행하고 결과를 반환한다.\"\"\"\n <|body_0|>\n\n def delete(self, *args):\n \"\"\"클라이언트로부터 받은 jwt토큰에서 이메일을 분리하여 회원탈퇴를 수행하고 결과를 반환한다.\"\"\"\n <|body_1|>\n\n def put(self, *args):\n \"\"\"클라이언트로부터 비밀번호를 받아서 비밀번호 변경을 수행한다.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class register:\n def post(self, *args):\n \"\"\"클라이언트로부터 회원 정보를 받아 회원가입을 수행하고 결과를 반환한다.\"\"\"\n data = request.json\n db = database.DBClass()\n try:\n query = 'INSERT INTO users(email, password, name)\\n VALUES(%(email)s, %(password)s, %(name)s);\\n '\n db.execute(query, data)\n except err.IntegrityError:\n return ({'status': 'Failed', 'message': 'Email Duplicated'}, 400)\n finally:\n db.commit()\n db.close()\n return ({'status': 'Success'}, 201)\n\n def delete(self, *args):\n \"\"\"클라이언트로부터 받은 jwt토큰에서 이메일을 분리하여 회원탈퇴를 수행하고 결과를 반환한다.\"\"\"\n userEmail = get_jwt_identity()\n jti = get_jwt()['jti']\n db = database.DBClass()\n query = '\\n insert into revoked_tokens(jti) values(%s);\\n '\n db.execute(query, (jti,))\n query = '\\n select email from users WHERE email=(%s)\\n '\n dbdata = db.executeOne(query, (userEmail,))\n dbdata = dbdata['email']\n if dbdata is None:\n return ({'status': 'Failed', 'message': \"The email could not be found. It doesn't seem to be registered.\"}, 400)\n else:\n query = '\\n DELETE FROM users WHERE email=(%s);\\n '\n db.execute(query, (dbdata,))\n db.commit()\n return ({'status': 'Success'}, 200)\n\n def put(self, *args):\n \"\"\"클라이언트로부터 비밀번호를 받아서 비밀번호 변경을 수행한다.\"\"\"\n data = request.json\n userEmail = get_jwt_identity()\n new_password = data['new_password']\n db = database.DBClass()\n if data['new_password'] != data['new_password_again']:\n return ({'status': 'Failed', 'message': 'The two passwords entered are different'}, 400)\n query_list = [f'select * from users where email = \"{userEmail}\";', f'update users set password = \"{new_password}\" where email = \"{userEmail}\";']\n if db.executeOne(query_list[0]):\n db.execute_and_commit(query_list[1])\n return ({'status': 'Success'}, 200)\n else:\n return ({'status': 'Failed', 'message': 'Wrong email'}, 400)\n", "source": "the_stack_v2_python_sparse", "source_path": "2021_2_backEnd/backEnd/user/register.py", "source_repo": "ghwns82/GFW1", "split": "val", "star_events_count": 0}
{"blob_id": "46dd4e6be3f394ac89dbd6b31f5f942478641273", "bodies": ["sums = sum(nums)\nif sums & 1 == 1:\n return False\nhalf = sums // 2\ndp = [False] * (half + 1)\ndp[0] = True\nfor n in nums:\n for i in range(half, n - 1, -1):\n dp[i] |= dp[i - n]\nreturn dp[half]", "def rec(curr, nums):\n if sum(curr) == sum(nums):\n return True\n elif sum(curr) > sum(nums):\n return False\n for i in range(len(nums)):\n if nums[i] == 0:\n continue\n curr += [nums[i]]\n nums[i] = 0\n if rec(curr, nums):\n return True\n nums[i] = curr.pop()\n return False\nans = rec([], nums)\nreturn ans"], "bodies_text": "<|body_start_0|>\n sums = sum(nums)\n if sums & 1 == 1:\n return False\n half = sums // 2\n dp = [False] * (half + 1)\n dp[0] = True\n for n in nums:\n for i in range(half, n - 1, -1):\n dp[i] |= dp[i - n]\n return dp[half]\n<|end_body_0|>\n\n<|body_start_1|>\n def rec(curr, nums):\n if sum(curr) == sum(nums):\n return True\n elif sum(curr) > sum(nums):\n return False\n for i in range(len(nums)):\n if nums[i] == 0:\n continue\n curr += [nums[i]]\n nums[i] = 0\n if rec(curr, nums):\n return True\n nums[i] = curr.pop()\n return False\n ans = rec([], nums)\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def canPartition(self, nums):\n \"\"\":type nums: List[int] :rtype: bool\"\"\"\n <|body_0|>\n\n def canPartition_TLE(self, nums):\n \"\"\":type nums: List[int] :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sums = sum(nums)\n if sums & 1 == 1:\n return False\n half = sums // 2\n dp = [False] * (half + 1)\n dp[0] = True\n for n in nums:\n for i in range(half, n - 1, -1):\n dp[i] |= dp[i - n]\n return dp[half]\n<|end_body_0|>\n\n<|body_start_1|>\n def rec(curr, nums):\n if sum(curr) == sum(nums):\n return True\n elif sum(curr) > sum(nums):\n return False\n for i in range(len(nums)):\n if nums[i] == 0:\n continue\n curr += [nums[i]]\n nums[i] = 0\n if rec(curr, nums):\n return True\n nums[i] = curr.pop()\n return False\n ans = rec([], nums)\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000058", "length_bytes": 3632, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: bool", "name": "canPartition", "signature": "def canPartition(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: bool", "name": "canPartition_TLE", "signature": "def canPartition_TLE(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041704", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canPartition(self, nums): :type nums: List[int] :rtype: bool\n- def canPartition_TLE(self, nums): :type nums: List[int] :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canPartition(self, nums): :type nums: List[int] :rtype: bool\n- def canPartition_TLE(self, nums): :type nums: List[int] :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def canPartition(self, nums):\n \"\"\":type nums: List[int] :rtype: bool\"\"\"\n <|body_0|>\n\n def canPartition_TLE(self, nums):\n \"\"\":type nums: List[int] :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sums = sum(nums)\n if sums & 1 == 1:\n return False\n half = sums // 2\n dp = [False] * (half + 1)\n dp[0] = True\n for n in nums:\n for i in range(half, n - 1, -1):\n dp[i] |= dp[i - n]\n return dp[half]\n<|end_body_0|>\n\n<|body_start_1|>\n def rec(curr, nums):\n if sum(curr) == sum(nums):\n return True\n elif sum(curr) > sum(nums):\n return False\n for i in range(len(nums)):\n if nums[i] == 0:\n continue\n curr += [nums[i]]\n nums[i] = 0\n if rec(curr, nums):\n return True\n nums[i] = curr.pop()\n return False\n ans = rec([], nums)\n return ans\n<|end_body_1|>\n", "revision_id": "2d5fa4cd696d5035ea8859befeadc5cc436959c9", "skeleton": "<|skeleton|>\nclass Solution:\n\n def canPartition(self, nums):\n \"\"\":type nums: List[int] :rtype: bool\"\"\"\n <|body_0|>\n\n def canPartition_TLE(self, nums):\n \"\"\":type nums: List[int] :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def canPartition(self, nums):\n \"\"\":type nums: List[int] :rtype: bool\"\"\"\n sums = sum(nums)\n if sums & 1 == 1:\n return False\n half = sums // 2\n dp = [False] * (half + 1)\n dp[0] = True\n for n in nums:\n for i in range(half, n - 1, -1):\n dp[i] |= dp[i - n]\n return dp[half]\n\n def canPartition_TLE(self, nums):\n \"\"\":type nums: List[int] :rtype: bool\"\"\"\n def rec(curr, nums):\n if sum(curr) == sum(nums):\n return True\n elif sum(curr) > sum(nums):\n return False\n for i in range(len(nums)):\n if nums[i] == 0:\n continue\n curr += [nums[i]]\n nums[i] = 0\n if rec(curr, nums):\n return True\n nums[i] = curr.pop()\n return False\n ans = rec([], nums)\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "SourceCode/Python/Problem/00416.Partition Equal Subset Sum.py", "source_repo": "roger6blog/LeetCode", "split": "val", "star_events_count": 0}
{"blob_id": "4a0978c96700ce9bed0eab343272b9747366f6a1", "bodies": ["super().__init__(*args, **kwargs)\nself.fields['code'].widget.attrs['placeholder'] = _('Item code')\nself.fields['desciption'].widget.attrs['placeholder'] = _('Item desciption')\nself.fields['barcode'].widget.attrs['placeholder'] = _('item barcode')\nself.fields['price'].help_text = _(\"Enter itme's selling unit price\")\nself.fields['price'].widget.attrs['placeholder'] = _('item price')\nself.fields['stock_limit'].widget.attrs['placeholder'] = _('Limit')\nself.fields['stock_limit'].help_text = _('Enter the warning stock limit to re-order item')\nself.fields['photo'].help_text = _('Item photo must be at least 500x500')", "form = super().save(commit=True)\nif form.photo:\n try:\n x = self.cleaned_data.get('x')\n y = self.cleaned_data.get('y')\n w = self.cleaned_data.get('width')\n h = self.cleaned_data.get('height')\n image = Image.open(form.photo.path)\n cropped_image = image.crop((x, y, w + x, h + y))\n if cropped_image.size[0] * cropped_image.size[1] < 0.5 * 1024 * 1024:\n raise forms.ValidationError(_('Image file too small ( < 500kb )'))\n else:\n resized_image = cropped_image.resize((500, 500), Image.ANTIALIAS)\n resized_image.save(form.photo.path)\n return form\n except Exception as error_type:\n print(error_type)\n form.save()\n return form\nelse:\n form.save()\n return form"], "bodies_text": "<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.fields['code'].widget.attrs['placeholder'] = _('Item code')\n self.fields['desciption'].widget.attrs['placeholder'] = _('Item desciption')\n self.fields['barcode'].widget.attrs['placeholder'] = _('item barcode')\n self.fields['price'].help_text = _(\"Enter itme's selling unit price\")\n self.fields['price'].widget.attrs['placeholder'] = _('item price')\n self.fields['stock_limit'].widget.attrs['placeholder'] = _('Limit')\n self.fields['stock_limit'].help_text = _('Enter the warning stock limit to re-order item')\n self.fields['photo'].help_text = _('Item photo must be at least 500x500')\n<|end_body_0|>\n\n<|body_start_1|>\n form = super().save(commit=True)\n if form.photo:\n try:\n x = self.cleaned_data.get('x')\n y = self.cleaned_data.get('y')\n w = self.cleaned_data.get('width')\n h = self.cleaned_data.get('height')\n image = Image.open(form.photo.path)\n cropped_image = image.crop((x, y, w + x, h + y))\n if cropped_image.size[0] * cropped_image.size[1] < 0.5 * 1024 * 1024:\n raise forms.ValidationError(_('Image file too small ( < 500kb )'))\n else:\n resized_image = cropped_image.resize((500, 500), Image.ANTIALIAS)\n resized_image.save(form.photo.path)\n return form\n except Exception as error_type:\n print(error_type)\n form.save()\n return form\n else:\n form.save()\n return form\n<|end_body_1|>\n", "class_docstring": "Form for items", "class_name": "ItemForm", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ItemForm:\n \"\"\"Form for items\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Method for initial values and functions\"\"\"\n <|body_0|>\n\n def save(self, commit=True):\n \"\"\"save for with cropped image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.fields['code'].widget.attrs['placeholder'] = _('Item code')\n self.fields['desciption'].widget.attrs['placeholder'] = _('Item desciption')\n self.fields['barcode'].widget.attrs['placeholder'] = _('item barcode')\n self.fields['price'].help_text = _(\"Enter itme's selling unit price\")\n self.fields['price'].widget.attrs['placeholder'] = _('item price')\n self.fields['stock_limit'].widget.attrs['placeholder'] = _('Limit')\n self.fields['stock_limit'].help_text = _('Enter the warning stock limit to re-order item')\n self.fields['photo'].help_text = _('Item photo must be at least 500x500')\n<|end_body_0|>\n\n<|body_start_1|>\n form = super().save(commit=True)\n if form.photo:\n try:\n x = self.cleaned_data.get('x')\n y = self.cleaned_data.get('y')\n w = self.cleaned_data.get('width')\n h = self.cleaned_data.get('height')\n image = Image.open(form.photo.path)\n cropped_image = image.crop((x, y, w + x, h + y))\n if cropped_image.size[0] * cropped_image.size[1] < 0.5 * 1024 * 1024:\n raise forms.ValidationError(_('Image file too small ( < 500kb )'))\n else:\n resized_image = cropped_image.resize((500, 500), Image.ANTIALIAS)\n resized_image.save(form.photo.path)\n return form\n except Exception as error_type:\n print(error_type)\n form.save()\n return form\n else:\n form.save()\n return form\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000059", "length_bytes": 8200, "license_type": "permissive", "methods": [{"docstring": "Method for initial values and functions", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "save for with cropped image", "name": "save", "signature": "def save(self, commit=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005720", "prompt": "Implement the Python class `ItemForm` described below.\n\nClass description:\nForm for items\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Method for initial values and functions\n- def save(self, commit=True): save for with cropped image", "prompted_full_text": "Implement the Python class `ItemForm` described below.\n\nClass description:\nForm for items\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Method for initial values and functions\n- def save(self, commit=True): save for with cropped image\n\n<|skeleton|>\nclass ItemForm:\n \"\"\"Form for items\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Method for initial values and functions\"\"\"\n <|body_0|>\n\n def save(self, commit=True):\n \"\"\"save for with cropped image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.fields['code'].widget.attrs['placeholder'] = _('Item code')\n self.fields['desciption'].widget.attrs['placeholder'] = _('Item desciption')\n self.fields['barcode'].widget.attrs['placeholder'] = _('item barcode')\n self.fields['price'].help_text = _(\"Enter itme's selling unit price\")\n self.fields['price'].widget.attrs['placeholder'] = _('item price')\n self.fields['stock_limit'].widget.attrs['placeholder'] = _('Limit')\n self.fields['stock_limit'].help_text = _('Enter the warning stock limit to re-order item')\n self.fields['photo'].help_text = _('Item photo must be at least 500x500')\n<|end_body_0|>\n\n<|body_start_1|>\n form = super().save(commit=True)\n if form.photo:\n try:\n x = self.cleaned_data.get('x')\n y = self.cleaned_data.get('y')\n w = self.cleaned_data.get('width')\n h = self.cleaned_data.get('height')\n image = Image.open(form.photo.path)\n cropped_image = image.crop((x, y, w + x, h + y))\n if cropped_image.size[0] * cropped_image.size[1] < 0.5 * 1024 * 1024:\n raise forms.ValidationError(_('Image file too small ( < 500kb )'))\n else:\n resized_image = cropped_image.resize((500, 500), Image.ANTIALIAS)\n resized_image.save(form.photo.path)\n return form\n except Exception as error_type:\n print(error_type)\n form.save()\n return form\n else:\n form.save()\n return form\n<|end_body_1|>\n", "revision_id": "f3f8354bf164fcfe86d597cdbc28b0e3b7b73bd1", "skeleton": "<|skeleton|>\nclass ItemForm:\n \"\"\"Form for items\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Method for initial values and functions\"\"\"\n <|body_0|>\n\n def save(self, commit=True):\n \"\"\"save for with cropped image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ItemForm:\n \"\"\"Form for items\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Method for initial values and functions\"\"\"\n super().__init__(*args, **kwargs)\n self.fields['code'].widget.attrs['placeholder'] = _('Item code')\n self.fields['desciption'].widget.attrs['placeholder'] = _('Item desciption')\n self.fields['barcode'].widget.attrs['placeholder'] = _('item barcode')\n self.fields['price'].help_text = _(\"Enter itme's selling unit price\")\n self.fields['price'].widget.attrs['placeholder'] = _('item price')\n self.fields['stock_limit'].widget.attrs['placeholder'] = _('Limit')\n self.fields['stock_limit'].help_text = _('Enter the warning stock limit to re-order item')\n self.fields['photo'].help_text = _('Item photo must be at least 500x500')\n\n def save(self, commit=True):\n \"\"\"save for with cropped image\"\"\"\n form = super().save(commit=True)\n if form.photo:\n try:\n x = self.cleaned_data.get('x')\n y = self.cleaned_data.get('y')\n w = self.cleaned_data.get('width')\n h = self.cleaned_data.get('height')\n image = Image.open(form.photo.path)\n cropped_image = image.crop((x, y, w + x, h + y))\n if cropped_image.size[0] * cropped_image.size[1] < 0.5 * 1024 * 1024:\n raise forms.ValidationError(_('Image file too small ( < 500kb )'))\n else:\n resized_image = cropped_image.resize((500, 500), Image.ANTIALIAS)\n resized_image.save(form.photo.path)\n return form\n except Exception as error_type:\n print(error_type)\n form.save()\n return form\n else:\n form.save()\n return form\n", "source": "the_stack_v2_python_sparse", "source_path": "seshat/stock/forms.py", "source_repo": "XecusM/SESHAT", "split": "val", "star_events_count": 0}
{"blob_id": "e375fc78f8c458d3ebad669c3137d8bdee0201be", "bodies": ["self.big = big\nself.medium = medium\nself.small = small\n'\\n self.carType = {\\n 1: \"big\",\\n 2: \"medium\",\\n 3: \"small\"\\n }\\n '", "if carType == 1:\n if self.big > 0:\n self.big -= 1\n return True\n else:\n return False\nelif carType == 2:\n if self.medium > 0:\n self.medium -= 1\n return True\n else:\n return False\nelif self.small > 0:\n self.small -= 1\n return True\nelse:\n return False\n'\\n if self.carType[carType] == \"big\":\\n if self.big == 0:\\n return False\\n self.big-=1\\n elif self.carType[carType] == \"medium\":\\n if self.medium == 0:\\n return False\\n self.medium-=1\\n elif self.carType[carType] == \"small\":\\n if self.small == 0:\\n return False\\n self.small-=1 \\n \\n return True\\n '"], "bodies_text": "<|body_start_0|>\n self.big = big\n self.medium = medium\n self.small = small\n '\\n self.carType = {\\n 1: \"big\",\\n 2: \"medium\",\\n 3: \"small\"\\n }\\n '\n<|end_body_0|>\n\n<|body_start_1|>\n if carType == 1:\n if self.big > 0:\n self.big -= 1\n return True\n else:\n return False\n elif carType == 2:\n if self.medium > 0:\n self.medium -= 1\n return True\n else:\n return False\n elif self.small > 0:\n self.small -= 1\n return True\n else:\n return False\n '\\n if self.carType[carType] == \"big\":\\n if self.big == 0:\\n return False\\n self.big-=1\\n elif self.carType[carType] == \"medium\":\\n if self.medium == 0:\\n return False\\n self.medium-=1\\n elif self.carType[carType] == \"small\":\\n if self.small == 0:\\n return False\\n self.small-=1 \\n \\n return True\\n '\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ParkingSystem", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParkingSystem:\n\n def __init__(self, big, medium, small):\n \"\"\":type big: int :type medium: int :type small: int\"\"\"\n <|body_0|>\n\n def addCar(self, carType):\n \"\"\":type carType: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.big = big\n self.medium = medium\n self.small = small\n '\\n self.carType = {\\n 1: \"big\",\\n 2: \"medium\",\\n 3: \"small\"\\n }\\n '\n<|end_body_0|>\n\n<|body_start_1|>\n if carType == 1:\n if self.big > 0:\n self.big -= 1\n return True\n else:\n return False\n elif carType == 2:\n if self.medium > 0:\n self.medium -= 1\n return True\n else:\n return False\n elif self.small > 0:\n self.small -= 1\n return True\n else:\n return False\n '\\n if self.carType[carType] == \"big\":\\n if self.big == 0:\\n return False\\n self.big-=1\\n elif self.carType[carType] == \"medium\":\\n if self.medium == 0:\\n return False\\n self.medium-=1\\n elif self.carType[carType] == \"small\":\\n if self.small == 0:\\n return False\\n self.small-=1 \\n \\n return True\\n '\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000060", "length_bytes": 2955, "license_type": "no_license", "methods": [{"docstring": ":type big: int :type medium: int :type small: int", "name": "__init__", "signature": "def __init__(self, big, medium, small)"}, {"docstring": ":type carType: int :rtype: bool", "name": "addCar", "signature": "def addCar(self, carType)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_053841", "prompt": "Implement the Python class `ParkingSystem` described below.\n\nClass description:\nImplement the ParkingSystem class.\n\nMethod signatures and docstrings:\n- def __init__(self, big, medium, small): :type big: int :type medium: int :type small: int\n- def addCar(self, carType): :type carType: int :rtype: bool", "prompted_full_text": "Implement the Python class `ParkingSystem` described below.\n\nClass description:\nImplement the ParkingSystem class.\n\nMethod signatures and docstrings:\n- def __init__(self, big, medium, small): :type big: int :type medium: int :type small: int\n- def addCar(self, carType): :type carType: int :rtype: bool\n\n<|skeleton|>\nclass ParkingSystem:\n\n def __init__(self, big, medium, small):\n \"\"\":type big: int :type medium: int :type small: int\"\"\"\n <|body_0|>\n\n def addCar(self, carType):\n \"\"\":type carType: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.big = big\n self.medium = medium\n self.small = small\n '\\n self.carType = {\\n 1: \"big\",\\n 2: \"medium\",\\n 3: \"small\"\\n }\\n '\n<|end_body_0|>\n\n<|body_start_1|>\n if carType == 1:\n if self.big > 0:\n self.big -= 1\n return True\n else:\n return False\n elif carType == 2:\n if self.medium > 0:\n self.medium -= 1\n return True\n else:\n return False\n elif self.small > 0:\n self.small -= 1\n return True\n else:\n return False\n '\\n if self.carType[carType] == \"big\":\\n if self.big == 0:\\n return False\\n self.big-=1\\n elif self.carType[carType] == \"medium\":\\n if self.medium == 0:\\n return False\\n self.medium-=1\\n elif self.carType[carType] == \"small\":\\n if self.small == 0:\\n return False\\n self.small-=1 \\n \\n return True\\n '\n<|end_body_1|>\n", "revision_id": "f0f0ae1373cc83efa6de4f97498792fa5628b8c3", "skeleton": "<|skeleton|>\nclass ParkingSystem:\n\n def __init__(self, big, medium, small):\n \"\"\":type big: int :type medium: int :type small: int\"\"\"\n <|body_0|>\n\n def addCar(self, carType):\n \"\"\":type carType: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ParkingSystem:\n def __init__(self, big, medium, small):\n \"\"\":type big: int :type medium: int :type small: int\"\"\"\n self.big = big\n self.medium = medium\n self.small = small\n '\\n self.carType = {\\n 1: \"big\",\\n 2: \"medium\",\\n 3: \"small\"\\n }\\n '\n\n def addCar(self, carType):\n \"\"\":type carType: int :rtype: bool\"\"\"\n if carType == 1:\n if self.big > 0:\n self.big -= 1\n return True\n else:\n return False\n elif carType == 2:\n if self.medium > 0:\n self.medium -= 1\n return True\n else:\n return False\n elif self.small > 0:\n self.small -= 1\n return True\n else:\n return False\n '\\n if self.carType[carType] == \"big\":\\n if self.big == 0:\\n return False\\n self.big-=1\\n elif self.carType[carType] == \"medium\":\\n if self.medium == 0:\\n return False\\n self.medium-=1\\n elif self.carType[carType] == \"small\":\\n if self.small == 0:\\n return False\\n self.small-=1 \\n \\n return True\\n '\n", "source": "the_stack_v2_python_sparse", "source_path": "parkingSystem.py", "source_repo": "trishalapiz/Python-practice", "split": "val", "star_events_count": 0}
{"blob_id": "1b1f6d36acc2f6bfdeb4eb77017c94610fba38ef", "bodies": ["self.x = x_center\nself.y = y_center\nself.r = radius", "while True:\n x = uniform(-1, 1)\n y = uniform(-1, 1)\n if x ** 2 + y ** 2 <= 1:\n break\nreturn (self.x + x * self.r, self.y + y * self.r)"], "bodies_text": "<|body_start_0|>\n self.x = x_center\n self.y = y_center\n self.r = radius\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n x = uniform(-1, 1)\n y = uniform(-1, 1)\n if x ** 2 + y ** 2 <= 1:\n break\n return (self.x + x * self.r, self.y + y * self.r)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def __init__(self, radius, x_center, y_center):\n \"\"\":type radius: float :type x_center: float :type y_center: float\"\"\"\n <|body_0|>\n\n def randPoint(self):\n \"\"\":rtype: List[float]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.x = x_center\n self.y = y_center\n self.r = radius\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n x = uniform(-1, 1)\n y = uniform(-1, 1)\n if x ** 2 + y ** 2 <= 1:\n break\n return (self.x + x * self.r, self.y + y * self.r)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000061", "length_bytes": 666, "license_type": "no_license", "methods": [{"docstring": ":type radius: float :type x_center: float :type y_center: float", "name": "__init__", "signature": "def __init__(self, radius, x_center, y_center)"}, {"docstring": ":rtype: List[float]", "name": "randPoint", "signature": "def randPoint(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030613", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, radius, x_center, y_center): :type radius: float :type x_center: float :type y_center: float\n- def randPoint(self): :rtype: List[float]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, radius, x_center, y_center): :type radius: float :type x_center: float :type y_center: float\n- def randPoint(self): :rtype: List[float]\n\n<|skeleton|>\nclass Solution:\n\n def __init__(self, radius, x_center, y_center):\n \"\"\":type radius: float :type x_center: float :type y_center: float\"\"\"\n <|body_0|>\n\n def randPoint(self):\n \"\"\":rtype: List[float]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.x = x_center\n self.y = y_center\n self.r = radius\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n x = uniform(-1, 1)\n y = uniform(-1, 1)\n if x ** 2 + y ** 2 <= 1:\n break\n return (self.x + x * self.r, self.y + y * self.r)\n<|end_body_1|>\n", "revision_id": "97533d53c8892b6519e99f344489fa4fd4c9ab93", "skeleton": "<|skeleton|>\nclass Solution:\n\n def __init__(self, radius, x_center, y_center):\n \"\"\":type radius: float :type x_center: float :type y_center: float\"\"\"\n <|body_0|>\n\n def randPoint(self):\n \"\"\":rtype: List[float]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def __init__(self, radius, x_center, y_center):\n \"\"\":type radius: float :type x_center: float :type y_center: float\"\"\"\n self.x = x_center\n self.y = y_center\n self.r = radius\n\n def randPoint(self):\n \"\"\":rtype: List[float]\"\"\"\n while True:\n x = uniform(-1, 1)\n y = uniform(-1, 1)\n if x ** 2 + y ** 2 <= 1:\n break\n return (self.x + x * self.r, self.y + y * self.r)\n", "source": "the_stack_v2_python_sparse", "source_path": "19. Random/478.py", "source_repo": "proTao/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "b79dce4b868720e39e382175dfaa5ed642edb5aa", "bodies": ["self._model = model\nself._module = module\nself._forward_fn = forward_fn or self.default_forward_fn", "def _hook_to_collect_inp_out_data(_, inp, out):\n \"\"\"\n hook to collect input and output data\n \"\"\"\n if collect_input:\n inp_data_list.append(inp[0])\n if collect_output:\n out_data_list.append(out)\n raise StopForwardException\ninp_data_list = []\nout_data_list = []\nhandle = self._module.register_forward_hook(_hook_to_collect_inp_out_data)\ndevice = get_device(self._model)\nmodel_input = change_tensor_device_placement(model_input, device)\ntry:\n with in_eval_mode(self._model), torch.no_grad():\n _ = self._forward_fn(self._model, model_input)\nexcept StopForwardException:\n pass\nhandle.remove()\ninp_data, out_data = (None, None)\nif inp_data_list and isinstance(inp_data_list[0], torch.Tensor):\n inp_data = inp_data_list[0].detach()\nif out_data_list and isinstance(out_data_list[0], torch.Tensor):\n out_data = out_data_list[0].detach()\nreturn (inp_data, out_data)", "if isinstance(inputs, (list, tuple)):\n inputs, _ = inputs\nif isinstance(inputs, torch.Tensor):\n inputs = [inputs]\nmodel(*inputs)"], "bodies_text": "<|body_start_0|>\n self._model = model\n self._module = module\n self._forward_fn = forward_fn or self.default_forward_fn\n<|end_body_0|>\n\n<|body_start_1|>\n def _hook_to_collect_inp_out_data(_, inp, out):\n \"\"\"\n hook to collect input and output data\n \"\"\"\n if collect_input:\n inp_data_list.append(inp[0])\n if collect_output:\n out_data_list.append(out)\n raise StopForwardException\n inp_data_list = []\n out_data_list = []\n handle = self._module.register_forward_hook(_hook_to_collect_inp_out_data)\n device = get_device(self._model)\n model_input = change_tensor_device_placement(model_input, device)\n try:\n with in_eval_mode(self._model), torch.no_grad():\n _ = self._forward_fn(self._model, model_input)\n except StopForwardException:\n pass\n handle.remove()\n inp_data, out_data = (None, None)\n if inp_data_list and isinstance(inp_data_list[0], torch.Tensor):\n inp_data = inp_data_list[0].detach()\n if out_data_list and isinstance(out_data_list[0], torch.Tensor):\n out_data = out_data_list[0].detach()\n return (inp_data, out_data)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(inputs, (list, tuple)):\n inputs, _ = inputs\n if isinstance(inputs, torch.Tensor):\n inputs = [inputs]\n model(*inputs)\n<|end_body_2|>\n", "class_docstring": "Collect input and output data to and from module", "class_name": "ModuleData", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModuleData:\n \"\"\"Collect input and output data to and from module\"\"\"\n\n def __init__(self, model: torch.nn.Module, module: torch.nn.Module, forward_fn: Callable[[torch.nn.Module, Any], Any]=None):\n \"\"\":param model: Pytorch model :param module: Module reference :param forward_fn: Adapter function that performs forward pass given a model and inputs yielded from the data loader.\"\"\"\n <|body_0|>\n\n def collect_inp_out_data(self, model_input: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]], collect_input: bool, collect_output: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Collect input and output data depending on the collect_input and collect_output flag :param model_input: Input to model, Can be a single tensor or a list/tuple of tensors :param collect_input: Boolean to collect input or not :param collect_output: Boolean to collect output or not :return: Module's input and output data\"\"\"\n <|body_1|>\n\n def default_forward_fn(model: torch.nn.Module, inputs: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]]):\n \"\"\"Default forward function that performs forward pass given a model and inputs yielded from the data loader. Data loader which yields torch.Tensor object that can be directly passed into the model, or a data loader which yields a tuple of length two where its first element can be directly passed into the model. :param model: PyTorch model. :param inputs: Inputs passed to model.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._model = model\n self._module = module\n self._forward_fn = forward_fn or self.default_forward_fn\n<|end_body_0|>\n\n<|body_start_1|>\n def _hook_to_collect_inp_out_data(_, inp, out):\n \"\"\"\n hook to collect input and output data\n \"\"\"\n if collect_input:\n inp_data_list.append(inp[0])\n if collect_output:\n out_data_list.append(out)\n raise StopForwardException\n inp_data_list = []\n out_data_list = []\n handle = self._module.register_forward_hook(_hook_to_collect_inp_out_data)\n device = get_device(self._model)\n model_input = change_tensor_device_placement(model_input, device)\n try:\n with in_eval_mode(self._model), torch.no_grad():\n _ = self._forward_fn(self._model, model_input)\n except StopForwardException:\n pass\n handle.remove()\n inp_data, out_data = (None, None)\n if inp_data_list and isinstance(inp_data_list[0], torch.Tensor):\n inp_data = inp_data_list[0].detach()\n if out_data_list and isinstance(out_data_list[0], torch.Tensor):\n out_data = out_data_list[0].detach()\n return (inp_data, out_data)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(inputs, (list, tuple)):\n inputs, _ = inputs\n if isinstance(inputs, torch.Tensor):\n inputs = [inputs]\n model(*inputs)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000062", "length_bytes": 37283, "license_type": "permissive", "methods": [{"docstring": ":param model: Pytorch model :param module: Module reference :param forward_fn: Adapter function that performs forward pass given a model and inputs yielded from the data loader.", "name": "__init__", "signature": "def __init__(self, model: torch.nn.Module, module: torch.nn.Module, forward_fn: Callable[[torch.nn.Module, Any], Any]=None)"}, {"docstring": "Collect input and output data depending on the collect_input and collect_output flag :param model_input: Input to model, Can be a single tensor or a list/tuple of tensors :param collect_input: Boolean to collect input or not :param collect_output: Boolean to collect output or not :return: Module's input and output data", "name": "collect_inp_out_data", "signature": "def collect_inp_out_data(self, model_input: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]], collect_input: bool, collect_output: bool) -> Tuple[torch.Tensor, torch.Tensor]"}, {"docstring": "Default forward function that performs forward pass given a model and inputs yielded from the data loader. Data loader which yields torch.Tensor object that can be directly passed into the model, or a data loader which yields a tuple of length two where its first element can be directly passed into the model. :param model: PyTorch model. :param inputs: Inputs passed to model.", "name": "default_forward_fn", "signature": "def default_forward_fn(model: torch.nn.Module, inputs: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]])"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_050196", "prompt": "Implement the Python class `ModuleData` described below.\n\nClass description:\nCollect input and output data to and from module\n\nMethod signatures and docstrings:\n- def __init__(self, model: torch.nn.Module, module: torch.nn.Module, forward_fn: Callable[[torch.nn.Module, Any], Any]=None): :param model: Pytorch model :param module: Module reference :param forward_fn: Adapter function that performs forward pass given a model and inputs yielded from the data loader.\n- def collect_inp_out_data(self, model_input: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]], collect_input: bool, collect_output: bool) -> Tuple[torch.Tensor, torch.Tensor]: Collect input and output data depending on the collect_input and collect_output flag :param model_input: Input to model, Can be a single tensor or a list/tuple of tensors :param collect_input: Boolean to collect input or not :param collect_output: Boolean to collect output or not :return: Module's input and output data\n- def default_forward_fn(model: torch.nn.Module, inputs: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]]): Default forward function that performs forward pass given a model and inputs yielded from the data loader. Data loader which yields torch.Tensor object that can be directly passed into the model, or a data loader which yields a tuple of length two where its first element can be directly passed into the model. :param model: PyTorch model. :param inputs: Inputs passed to model.", "prompted_full_text": "Implement the Python class `ModuleData` described below.\n\nClass description:\nCollect input and output data to and from module\n\nMethod signatures and docstrings:\n- def __init__(self, model: torch.nn.Module, module: torch.nn.Module, forward_fn: Callable[[torch.nn.Module, Any], Any]=None): :param model: Pytorch model :param module: Module reference :param forward_fn: Adapter function that performs forward pass given a model and inputs yielded from the data loader.\n- def collect_inp_out_data(self, model_input: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]], collect_input: bool, collect_output: bool) -> Tuple[torch.Tensor, torch.Tensor]: Collect input and output data depending on the collect_input and collect_output flag :param model_input: Input to model, Can be a single tensor or a list/tuple of tensors :param collect_input: Boolean to collect input or not :param collect_output: Boolean to collect output or not :return: Module's input and output data\n- def default_forward_fn(model: torch.nn.Module, inputs: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]]): Default forward function that performs forward pass given a model and inputs yielded from the data loader. Data loader which yields torch.Tensor object that can be directly passed into the model, or a data loader which yields a tuple of length two where its first element can be directly passed into the model. :param model: PyTorch model. :param inputs: Inputs passed to model.\n\n<|skeleton|>\nclass ModuleData:\n \"\"\"Collect input and output data to and from module\"\"\"\n\n def __init__(self, model: torch.nn.Module, module: torch.nn.Module, forward_fn: Callable[[torch.nn.Module, Any], Any]=None):\n \"\"\":param model: Pytorch model :param module: Module reference :param forward_fn: Adapter function that performs forward pass given a model and inputs yielded from the data loader.\"\"\"\n <|body_0|>\n\n def collect_inp_out_data(self, model_input: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]], collect_input: bool, collect_output: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Collect input and output data depending on the collect_input and collect_output flag :param model_input: Input to model, Can be a single tensor or a list/tuple of tensors :param collect_input: Boolean to collect input or not :param collect_output: Boolean to collect output or not :return: Module's input and output data\"\"\"\n <|body_1|>\n\n def default_forward_fn(model: torch.nn.Module, inputs: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]]):\n \"\"\"Default forward function that performs forward pass given a model and inputs yielded from the data loader. Data loader which yields torch.Tensor object that can be directly passed into the model, or a data loader which yields a tuple of length two where its first element can be directly passed into the model. :param model: PyTorch model. :param inputs: Inputs passed to model.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._model = model\n self._module = module\n self._forward_fn = forward_fn or self.default_forward_fn\n<|end_body_0|>\n\n<|body_start_1|>\n def _hook_to_collect_inp_out_data(_, inp, out):\n \"\"\"\n hook to collect input and output data\n \"\"\"\n if collect_input:\n inp_data_list.append(inp[0])\n if collect_output:\n out_data_list.append(out)\n raise StopForwardException\n inp_data_list = []\n out_data_list = []\n handle = self._module.register_forward_hook(_hook_to_collect_inp_out_data)\n device = get_device(self._model)\n model_input = change_tensor_device_placement(model_input, device)\n try:\n with in_eval_mode(self._model), torch.no_grad():\n _ = self._forward_fn(self._model, model_input)\n except StopForwardException:\n pass\n handle.remove()\n inp_data, out_data = (None, None)\n if inp_data_list and isinstance(inp_data_list[0], torch.Tensor):\n inp_data = inp_data_list[0].detach()\n if out_data_list and isinstance(out_data_list[0], torch.Tensor):\n out_data = out_data_list[0].detach()\n return (inp_data, out_data)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(inputs, (list, tuple)):\n inputs, _ = inputs\n if isinstance(inputs, torch.Tensor):\n inputs = [inputs]\n model(*inputs)\n<|end_body_2|>\n", "revision_id": "5a406e657082b6a4f6e4bf48f0e46e085cb1e351", "skeleton": "<|skeleton|>\nclass ModuleData:\n \"\"\"Collect input and output data to and from module\"\"\"\n\n def __init__(self, model: torch.nn.Module, module: torch.nn.Module, forward_fn: Callable[[torch.nn.Module, Any], Any]=None):\n \"\"\":param model: Pytorch model :param module: Module reference :param forward_fn: Adapter function that performs forward pass given a model and inputs yielded from the data loader.\"\"\"\n <|body_0|>\n\n def collect_inp_out_data(self, model_input: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]], collect_input: bool, collect_output: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Collect input and output data depending on the collect_input and collect_output flag :param model_input: Input to model, Can be a single tensor or a list/tuple of tensors :param collect_input: Boolean to collect input or not :param collect_output: Boolean to collect output or not :return: Module's input and output data\"\"\"\n <|body_1|>\n\n def default_forward_fn(model: torch.nn.Module, inputs: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]]):\n \"\"\"Default forward function that performs forward pass given a model and inputs yielded from the data loader. Data loader which yields torch.Tensor object that can be directly passed into the model, or a data loader which yields a tuple of length two where its first element can be directly passed into the model. :param model: PyTorch model. :param inputs: Inputs passed to model.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ModuleData:\n \"\"\"Collect input and output data to and from module\"\"\"\n\n def __init__(self, model: torch.nn.Module, module: torch.nn.Module, forward_fn: Callable[[torch.nn.Module, Any], Any]=None):\n \"\"\":param model: Pytorch model :param module: Module reference :param forward_fn: Adapter function that performs forward pass given a model and inputs yielded from the data loader.\"\"\"\n self._model = model\n self._module = module\n self._forward_fn = forward_fn or self.default_forward_fn\n\n def collect_inp_out_data(self, model_input: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]], collect_input: bool, collect_output: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Collect input and output data depending on the collect_input and collect_output flag :param model_input: Input to model, Can be a single tensor or a list/tuple of tensors :param collect_input: Boolean to collect input or not :param collect_output: Boolean to collect output or not :return: Module's input and output data\"\"\"\n def _hook_to_collect_inp_out_data(_, inp, out):\n \"\"\"\n hook to collect input and output data\n \"\"\"\n if collect_input:\n inp_data_list.append(inp[0])\n if collect_output:\n out_data_list.append(out)\n raise StopForwardException\n inp_data_list = []\n out_data_list = []\n handle = self._module.register_forward_hook(_hook_to_collect_inp_out_data)\n device = get_device(self._model)\n model_input = change_tensor_device_placement(model_input, device)\n try:\n with in_eval_mode(self._model), torch.no_grad():\n _ = self._forward_fn(self._model, model_input)\n except StopForwardException:\n pass\n handle.remove()\n inp_data, out_data = (None, None)\n if inp_data_list and isinstance(inp_data_list[0], torch.Tensor):\n inp_data = inp_data_list[0].detach()\n if out_data_list and isinstance(out_data_list[0], torch.Tensor):\n out_data = out_data_list[0].detach()\n return (inp_data, out_data)\n\n def default_forward_fn(model: torch.nn.Module, inputs: Union[torch.tensor, List[torch.Tensor], Tuple[torch.Tensor]]):\n \"\"\"Default forward function that performs forward pass given a model and inputs yielded from the data loader. Data loader which yields torch.Tensor object that can be directly passed into the model, or a data loader which yields a tuple of length two where its first element can be directly passed into the model. :param model: PyTorch model. :param inputs: Inputs passed to model.\"\"\"\n if isinstance(inputs, (list, tuple)):\n inputs, _ = inputs\n if isinstance(inputs, torch.Tensor):\n inputs = [inputs]\n model(*inputs)\n", "source": "the_stack_v2_python_sparse", "source_path": "TrainingExtensions/torch/src/python/aimet_torch/utils.py", "source_repo": "quic/aimet", "split": "val", "star_events_count": 1676}
{"blob_id": "c6b61b1ccc655e553605ee2fb68683d9add0b6f2", "bodies": ["self.availability_zone_id = availability_zone_id\nself.db_instance_id = db_instance_id\nself.db_option_group_id = db_option_group_id\nself.db_parameter_group_id = db_parameter_group_id\nself.db_port = db_port\nself.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\nself.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\nself.enable_db_authentication = enable_db_authentication\nself.enable_public_accessibility = enable_public_accessibility\nself.is_multi_az_deployment = is_multi_az_deployment", "if dictionary is None:\n return None\navailability_zone_id = dictionary.get('availabilityZoneId')\ndb_instance_id = dictionary.get('dbInstanceId')\ndb_option_group_id = dictionary.get('dbOptionGroupId')\ndb_parameter_group_id = dictionary.get('dbParameterGroupId')\ndb_port = dictionary.get('dbPort')\nenable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\nenable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\nenable_db_authentication = dictionary.get('enableDbAuthentication')\nenable_public_accessibility = dictionary.get('enablePublicAccessibility')\nis_multi_az_deployment = dictionary.get('isMultiAzDeployment')\nreturn cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)"], "bodies_text": "<|body_start_0|>\n self.availability_zone_id = availability_zone_id\n self.db_instance_id = db_instance_id\n self.db_option_group_id = db_option_group_id\n self.db_parameter_group_id = db_parameter_group_id\n self.db_port = db_port\n self.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\n self.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\n self.enable_db_authentication = enable_db_authentication\n self.enable_public_accessibility = enable_public_accessibility\n self.is_multi_az_deployment = is_multi_az_deployment\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n availability_zone_id = dictionary.get('availabilityZoneId')\n db_instance_id = dictionary.get('dbInstanceId')\n db_option_group_id = dictionary.get('dbOptionGroupId')\n db_parameter_group_id = dictionary.get('dbParameterGroupId')\n db_port = dictionary.get('dbPort')\n enable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\n enable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\n enable_db_authentication = dictionary.get('enableDbAuthentication')\n enable_public_accessibility = dictionary.get('enablePublicAccessibility')\n is_multi_az_deployment = dictionary.get('isMultiAzDeployment')\n return cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto", "class_name": "RdsParams", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RdsParams:\n \"\"\"Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\"\"\"\n\n def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None):\n \"\"\"Constructor for the RdsParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.availability_zone_id = availability_zone_id\n self.db_instance_id = db_instance_id\n self.db_option_group_id = db_option_group_id\n self.db_parameter_group_id = db_parameter_group_id\n self.db_port = db_port\n self.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\n self.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\n self.enable_db_authentication = enable_db_authentication\n self.enable_public_accessibility = enable_public_accessibility\n self.is_multi_az_deployment = is_multi_az_deployment\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n availability_zone_id = dictionary.get('availabilityZoneId')\n db_instance_id = dictionary.get('dbInstanceId')\n db_option_group_id = dictionary.get('dbOptionGroupId')\n db_parameter_group_id = dictionary.get('dbParameterGroupId')\n db_port = dictionary.get('dbPort')\n enable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\n enable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\n enable_db_authentication = dictionary.get('enableDbAuthentication')\n enable_public_accessibility = dictionary.get('enablePublicAccessibility')\n is_multi_az_deployment = dictionary.get('isMultiAzDeployment')\n return cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000063", "length_bytes": 4883, "license_type": "permissive", "methods": [{"docstring": "Constructor for the RdsParams class", "name": "__init__", "signature": "def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031300", "prompt": "Implement the Python class `RdsParams` described below.\n\nClass description:\nImplementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\n\nMethod signatures and docstrings:\n- def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None): Constructor for the RdsParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `RdsParams` described below.\n\nClass description:\nImplementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\n\nMethod signatures and docstrings:\n- def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None): Constructor for the RdsParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass RdsParams:\n \"\"\"Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\"\"\"\n\n def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None):\n \"\"\"Constructor for the RdsParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.availability_zone_id = availability_zone_id\n self.db_instance_id = db_instance_id\n self.db_option_group_id = db_option_group_id\n self.db_parameter_group_id = db_parameter_group_id\n self.db_port = db_port\n self.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\n self.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\n self.enable_db_authentication = enable_db_authentication\n self.enable_public_accessibility = enable_public_accessibility\n self.is_multi_az_deployment = is_multi_az_deployment\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n availability_zone_id = dictionary.get('availabilityZoneId')\n db_instance_id = dictionary.get('dbInstanceId')\n db_option_group_id = dictionary.get('dbOptionGroupId')\n db_parameter_group_id = dictionary.get('dbParameterGroupId')\n db_port = dictionary.get('dbPort')\n enable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\n enable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\n enable_db_authentication = dictionary.get('enableDbAuthentication')\n enable_public_accessibility = dictionary.get('enablePublicAccessibility')\n is_multi_az_deployment = dictionary.get('isMultiAzDeployment')\n return cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass RdsParams:\n \"\"\"Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\"\"\"\n\n def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None):\n \"\"\"Constructor for the RdsParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RdsParams:\n \"\"\"Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\"\"\"\n\n def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None):\n \"\"\"Constructor for the RdsParams class\"\"\"\n self.availability_zone_id = availability_zone_id\n self.db_instance_id = db_instance_id\n self.db_option_group_id = db_option_group_id\n self.db_parameter_group_id = db_parameter_group_id\n self.db_port = db_port\n self.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\n self.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\n self.enable_db_authentication = enable_db_authentication\n self.enable_public_accessibility = enable_public_accessibility\n self.is_multi_az_deployment = is_multi_az_deployment\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n availability_zone_id = dictionary.get('availabilityZoneId')\n db_instance_id = dictionary.get('dbInstanceId')\n db_option_group_id = dictionary.get('dbOptionGroupId')\n db_parameter_group_id = dictionary.get('dbParameterGroupId')\n db_port = dictionary.get('dbPort')\n enable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\n enable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\n enable_db_authentication = dictionary.get('enableDbAuthentication')\n enable_public_accessibility = dictionary.get('enablePublicAccessibility')\n is_multi_az_deployment = dictionary.get('isMultiAzDeployment')\n return cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/rds_params.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24}
{"blob_id": "ba57ea447a2fa3862b55e840f1aa777691cde3a1", "bodies": ["if channel > 2 or channel < 1:\n print('DAC channel needs to be 1 or 2')\nif voltage >= 0.0 and voltage < 2.048:\n rawval = voltage / 2.048 * 4096\n self.set_dac_raw(channel, int(rawval))\nreturn", "lowByte = value & 255\nhighByte = value >> 8 & 255 | channel - 1 << 7 | 1 << 5 | 1 << 4\nself.__spiDAC.xfer2([highByte, lowByte])\nreturn"], "bodies_text": "<|body_start_0|>\n if channel > 2 or channel < 1:\n print('DAC channel needs to be 1 or 2')\n if voltage >= 0.0 and voltage < 2.048:\n rawval = voltage / 2.048 * 4096\n self.set_dac_raw(channel, int(rawval))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n lowByte = value & 255\n highByte = value >> 8 & 255 | channel - 1 << 7 | 1 << 5 | 1 << 4\n self.__spiDAC.xfer2([highByte, lowByte])\n return\n<|end_body_1|>\n", "class_docstring": "Based on the Microchip MCP4822 Define SPI bus and init", "class_name": "DAC", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DAC:\n \"\"\"Based on the Microchip MCP4822 Define SPI bus and init\"\"\"\n\n def set_dac_voltage(self, channel, voltage):\n \"\"\"set the voltage for the selected channel on the DAC voltage can be between 0 and 2.047 volts\"\"\"\n <|body_0|>\n\n def set_dac_raw(self, channel, value):\n \"\"\"Set the raw value from the selected channel on the DAC Channel = 1 or 2 Value between 0 and 4095\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if channel > 2 or channel < 1:\n print('DAC channel needs to be 1 or 2')\n if voltage >= 0.0 and voltage < 2.048:\n rawval = voltage / 2.048 * 4096\n self.set_dac_raw(channel, int(rawval))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n lowByte = value & 255\n highByte = value >> 8 & 255 | channel - 1 << 7 | 1 << 5 | 1 << 4\n self.__spiDAC.xfer2([highByte, lowByte])\n return\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000064", "length_bytes": 23619, "license_type": "permissive", "methods": [{"docstring": "set the voltage for the selected channel on the DAC voltage can be between 0 and 2.047 volts", "name": "set_dac_voltage", "signature": "def set_dac_voltage(self, channel, voltage)"}, {"docstring": "Set the raw value from the selected channel on the DAC Channel = 1 or 2 Value between 0 and 4095", "name": "set_dac_raw", "signature": "def set_dac_raw(self, channel, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024727", "prompt": "Implement the Python class `DAC` described below.\n\nClass description:\nBased on the Microchip MCP4822 Define SPI bus and init\n\nMethod signatures and docstrings:\n- def set_dac_voltage(self, channel, voltage): set the voltage for the selected channel on the DAC voltage can be between 0 and 2.047 volts\n- def set_dac_raw(self, channel, value): Set the raw value from the selected channel on the DAC Channel = 1 or 2 Value between 0 and 4095", "prompted_full_text": "Implement the Python class `DAC` described below.\n\nClass description:\nBased on the Microchip MCP4822 Define SPI bus and init\n\nMethod signatures and docstrings:\n- def set_dac_voltage(self, channel, voltage): set the voltage for the selected channel on the DAC voltage can be between 0 and 2.047 volts\n- def set_dac_raw(self, channel, value): Set the raw value from the selected channel on the DAC Channel = 1 or 2 Value between 0 and 4095\n\n<|skeleton|>\nclass DAC:\n \"\"\"Based on the Microchip MCP4822 Define SPI bus and init\"\"\"\n\n def set_dac_voltage(self, channel, voltage):\n \"\"\"set the voltage for the selected channel on the DAC voltage can be between 0 and 2.047 volts\"\"\"\n <|body_0|>\n\n def set_dac_raw(self, channel, value):\n \"\"\"Set the raw value from the selected channel on the DAC Channel = 1 or 2 Value between 0 and 4095\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if channel > 2 or channel < 1:\n print('DAC channel needs to be 1 or 2')\n if voltage >= 0.0 and voltage < 2.048:\n rawval = voltage / 2.048 * 4096\n self.set_dac_raw(channel, int(rawval))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n lowByte = value & 255\n highByte = value >> 8 & 255 | channel - 1 << 7 | 1 << 5 | 1 << 4\n self.__spiDAC.xfer2([highByte, lowByte])\n return\n<|end_body_1|>\n", "revision_id": "a5d2d255eba7d2b922b3f1effaf56ba18743e1d3", "skeleton": "<|skeleton|>\nclass DAC:\n \"\"\"Based on the Microchip MCP4822 Define SPI bus and init\"\"\"\n\n def set_dac_voltage(self, channel, voltage):\n \"\"\"set the voltage for the selected channel on the DAC voltage can be between 0 and 2.047 volts\"\"\"\n <|body_0|>\n\n def set_dac_raw(self, channel, value):\n \"\"\"Set the raw value from the selected channel on the DAC Channel = 1 or 2 Value between 0 and 4095\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DAC:\n \"\"\"Based on the Microchip MCP4822 Define SPI bus and init\"\"\"\n\n def set_dac_voltage(self, channel, voltage):\n \"\"\"set the voltage for the selected channel on the DAC voltage can be between 0 and 2.047 volts\"\"\"\n if channel > 2 or channel < 1:\n print('DAC channel needs to be 1 or 2')\n if voltage >= 0.0 and voltage < 2.048:\n rawval = voltage / 2.048 * 4096\n self.set_dac_raw(channel, int(rawval))\n return\n\n def set_dac_raw(self, channel, value):\n \"\"\"Set the raw value from the selected channel on the DAC Channel = 1 or 2 Value between 0 and 4095\"\"\"\n lowByte = value & 255\n highByte = value >> 8 & 255 | channel - 1 << 7 | 1 << 5 | 1 << 4\n self.__spiDAC.xfer2([highByte, lowByte])\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "AB Electronics/ABElectronics_Python3_Libraries-master/ExpanderPi/ABE_ExpanderPi.py", "source_repo": "NuclearPi/NuclearPi-Source-Code", "split": "val", "star_events_count": 32}
{"blob_id": "d99e502bc5f6206ee4a11314ea262e493442e03d", "bodies": ["self.elements = elements\nself._lookup = {}\nfor element in elements:\n self._lookup[element.number] = element\n self._lookup[element.symbol.lower()] = element", "result = self._lookup.get(index)\nif result is None and isinstance(index, str):\n index = index.strip()\n result = self._lookup.get(index.lower())\n if result is None and index.isdigit():\n result = self._lookup.get(int(index))\n if result is None:\n raise KeyError('Could not find element %s.' % index)\nreturn result"], "bodies_text": "<|body_start_0|>\n self.elements = elements\n self._lookup = {}\n for element in elements:\n self._lookup[element.number] = element\n self._lookup[element.symbol.lower()] = element\n<|end_body_0|>\n\n<|body_start_1|>\n result = self._lookup.get(index)\n if result is None and isinstance(index, str):\n index = index.strip()\n result = self._lookup.get(index.lower())\n if result is None and index.isdigit():\n result = self._lookup.get(int(index))\n if result is None:\n raise KeyError('Could not find element %s.' % index)\n return result\n<|end_body_1|>\n", "class_docstring": "A periodic table data structure.", "class_name": "Periodic", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Periodic:\n \"\"\"A periodic table data structure.\"\"\"\n\n def __init__(self, elements):\n \"\"\"Initialize the instance Parameters ---------- elements A list of :class:`Element` instances.\"\"\"\n <|body_0|>\n\n def __getitem__(self, index):\n \"\"\"Get an element from the table based on a flexible index. Parameters ---------- index This can be either an integer atomic number, a string with the elemental symbol (any case), or a string with the atomic number. Returns ------- result : The corresponding :class:`Element` instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.elements = elements\n self._lookup = {}\n for element in elements:\n self._lookup[element.number] = element\n self._lookup[element.symbol.lower()] = element\n<|end_body_0|>\n\n<|body_start_1|>\n result = self._lookup.get(index)\n if result is None and isinstance(index, str):\n index = index.strip()\n result = self._lookup.get(index.lower())\n if result is None and index.isdigit():\n result = self._lookup.get(int(index))\n if result is None:\n raise KeyError('Could not find element %s.' % index)\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000065", "length_bytes": 22041, "license_type": "permissive", "methods": [{"docstring": "Initialize the instance Parameters ---------- elements A list of :class:`Element` instances.", "name": "__init__", "signature": "def __init__(self, elements)"}, {"docstring": "Get an element from the table based on a flexible index. Parameters ---------- index This can be either an integer atomic number, a string with the elemental symbol (any case), or a string with the atomic number. Returns ------- result : The corresponding :class:`Element` instance.", "name": "__getitem__", "signature": "def __getitem__(self, index)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_042176", "prompt": "Implement the Python class `Periodic` described below.\n\nClass description:\nA periodic table data structure.\n\nMethod signatures and docstrings:\n- def __init__(self, elements): Initialize the instance Parameters ---------- elements A list of :class:`Element` instances.\n- def __getitem__(self, index): Get an element from the table based on a flexible index. Parameters ---------- index This can be either an integer atomic number, a string with the elemental symbol (any case), or a string with the atomic number. Returns ------- result : The corresponding :class:`Element` instance.", "prompted_full_text": "Implement the Python class `Periodic` described below.\n\nClass description:\nA periodic table data structure.\n\nMethod signatures and docstrings:\n- def __init__(self, elements): Initialize the instance Parameters ---------- elements A list of :class:`Element` instances.\n- def __getitem__(self, index): Get an element from the table based on a flexible index. Parameters ---------- index This can be either an integer atomic number, a string with the elemental symbol (any case), or a string with the atomic number. Returns ------- result : The corresponding :class:`Element` instance.\n\n<|skeleton|>\nclass Periodic:\n \"\"\"A periodic table data structure.\"\"\"\n\n def __init__(self, elements):\n \"\"\"Initialize the instance Parameters ---------- elements A list of :class:`Element` instances.\"\"\"\n <|body_0|>\n\n def __getitem__(self, index):\n \"\"\"Get an element from the table based on a flexible index. Parameters ---------- index This can be either an integer atomic number, a string with the elemental symbol (any case), or a string with the atomic number. Returns ------- result : The corresponding :class:`Element` instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.elements = elements\n self._lookup = {}\n for element in elements:\n self._lookup[element.number] = element\n self._lookup[element.symbol.lower()] = element\n<|end_body_0|>\n\n<|body_start_1|>\n result = self._lookup.get(index)\n if result is None and isinstance(index, str):\n index = index.strip()\n result = self._lookup.get(index.lower())\n if result is None and index.isdigit():\n result = self._lookup.get(int(index))\n if result is None:\n raise KeyError('Could not find element %s.' % index)\n return result\n<|end_body_1|>\n", "revision_id": "dc6c21b016f7d55009832957f5654b7c3d464b8b", "skeleton": "<|skeleton|>\nclass Periodic:\n \"\"\"A periodic table data structure.\"\"\"\n\n def __init__(self, elements):\n \"\"\"Initialize the instance Parameters ---------- elements A list of :class:`Element` instances.\"\"\"\n <|body_0|>\n\n def __getitem__(self, index):\n \"\"\"Get an element from the table based on a flexible index. Parameters ---------- index This can be either an integer atomic number, a string with the elemental symbol (any case), or a string with the atomic number. Returns ------- result : The corresponding :class:`Element` instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Periodic:\n \"\"\"A periodic table data structure.\"\"\"\n\n def __init__(self, elements):\n \"\"\"Initialize the instance Parameters ---------- elements A list of :class:`Element` instances.\"\"\"\n self.elements = elements\n self._lookup = {}\n for element in elements:\n self._lookup[element.number] = element\n self._lookup[element.symbol.lower()] = element\n\n def __getitem__(self, index):\n \"\"\"Get an element from the table based on a flexible index. Parameters ---------- index This can be either an integer atomic number, a string with the elemental symbol (any case), or a string with the atomic number. Returns ------- result : The corresponding :class:`Element` instance.\"\"\"\n result = self._lookup.get(index)\n if result is None and isinstance(index, str):\n index = index.strip()\n result = self._lookup.get(index.lower())\n if result is None and index.isdigit():\n result = self._lookup.get(int(index))\n if result is None:\n raise KeyError('Could not find element %s.' % index)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "moha/system/periodic.py", "source_repo": "ZhaoYilin/moha", "split": "val", "star_events_count": 16}
{"blob_id": "2ad9d67ca21f0197286f0dbc264529615b671d36", "bodies": ["self.inputs = inputs\nself.layer_dims = layer_dims\nself.test_phase = test_phase\nif output_dim is not None:\n self.output_dim = output_dim\nelse:\n self.output_dim = inputs.get_shape()[-1]\nself._build_model()", "start_trainable_variables = tf.trainable_variables()\nnet = self.inputs\nfor layer_dim in layer_dims:\n net = tf.layers.dense(inputs=net, units=layer_dim, activation=tf.nn.leaky_relu)\n if not self.test_phase:\n net = tf.dropout(net, 0.5)\nself.outputs = tf.layers.dense(inputs=net, units=self.output_dim, activation=tf.identity)\nend_trainable_variables = tf.trainable_variables()\nself.L_params = [param for param in end_trainable_variables if param not in start_trainable_variables]"], "bodies_text": "<|body_start_0|>\n self.inputs = inputs\n self.layer_dims = layer_dims\n self.test_phase = test_phase\n if output_dim is not None:\n self.output_dim = output_dim\n else:\n self.output_dim = inputs.get_shape()[-1]\n self._build_model()\n<|end_body_0|>\n\n<|body_start_1|>\n start_trainable_variables = tf.trainable_variables()\n net = self.inputs\n for layer_dim in layer_dims:\n net = tf.layers.dense(inputs=net, units=layer_dim, activation=tf.nn.leaky_relu)\n if not self.test_phase:\n net = tf.dropout(net, 0.5)\n self.outputs = tf.layers.dense(inputs=net, units=self.output_dim, activation=tf.identity)\n end_trainable_variables = tf.trainable_variables()\n self.L_params = [param for param in end_trainable_variables if param not in start_trainable_variables]\n<|end_body_1|>\n", "class_docstring": "An instance of this class contains everything with the model being applied to an input tensor. It features more than just the weights of the model. Multiple applications are handled through scope sharing.", "class_name": "ModelMLP", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModelMLP:\n \"\"\"An instance of this class contains everything with the model being applied to an input tensor. It features more than just the weights of the model. Multiple applications are handled through scope sharing.\"\"\"\n\n def __init__(self, inputs, layer_dims, test_phase=False, output_dim=None):\n \"\"\"We really expect this constructor call to be made within a proper scope so that variable reuse is properly used.\"\"\"\n <|body_0|>\n\n def _build_model(self):\n \"\"\"It does not want arguments because it is going to pick whatever it needs.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.inputs = inputs\n self.layer_dims = layer_dims\n self.test_phase = test_phase\n if output_dim is not None:\n self.output_dim = output_dim\n else:\n self.output_dim = inputs.get_shape()[-1]\n self._build_model()\n<|end_body_0|>\n\n<|body_start_1|>\n start_trainable_variables = tf.trainable_variables()\n net = self.inputs\n for layer_dim in layer_dims:\n net = tf.layers.dense(inputs=net, units=layer_dim, activation=tf.nn.leaky_relu)\n if not self.test_phase:\n net = tf.dropout(net, 0.5)\n self.outputs = tf.layers.dense(inputs=net, units=self.output_dim, activation=tf.identity)\n end_trainable_variables = tf.trainable_variables()\n self.L_params = [param for param in end_trainable_variables if param not in start_trainable_variables]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000066", "length_bytes": 4780, "license_type": "no_license", "methods": [{"docstring": "We really expect this constructor call to be made within a proper scope so that variable reuse is properly used.", "name": "__init__", "signature": "def __init__(self, inputs, layer_dims, test_phase=False, output_dim=None)"}, {"docstring": "It does not want arguments because it is going to pick whatever it needs.", "name": "_build_model", "signature": "def _build_model(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001876", "prompt": "Implement the Python class `ModelMLP` described below.\n\nClass description:\nAn instance of this class contains everything with the model being applied to an input tensor. It features more than just the weights of the model. Multiple applications are handled through scope sharing.\n\nMethod signatures and docstrings:\n- def __init__(self, inputs, layer_dims, test_phase=False, output_dim=None): We really expect this constructor call to be made within a proper scope so that variable reuse is properly used.\n- def _build_model(self): It does not want arguments because it is going to pick whatever it needs.", "prompted_full_text": "Implement the Python class `ModelMLP` described below.\n\nClass description:\nAn instance of this class contains everything with the model being applied to an input tensor. It features more than just the weights of the model. Multiple applications are handled through scope sharing.\n\nMethod signatures and docstrings:\n- def __init__(self, inputs, layer_dims, test_phase=False, output_dim=None): We really expect this constructor call to be made within a proper scope so that variable reuse is properly used.\n- def _build_model(self): It does not want arguments because it is going to pick whatever it needs.\n\n<|skeleton|>\nclass ModelMLP:\n \"\"\"An instance of this class contains everything with the model being applied to an input tensor. It features more than just the weights of the model. Multiple applications are handled through scope sharing.\"\"\"\n\n def __init__(self, inputs, layer_dims, test_phase=False, output_dim=None):\n \"\"\"We really expect this constructor call to be made within a proper scope so that variable reuse is properly used.\"\"\"\n <|body_0|>\n\n def _build_model(self):\n \"\"\"It does not want arguments because it is going to pick whatever it needs.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.inputs = inputs\n self.layer_dims = layer_dims\n self.test_phase = test_phase\n if output_dim is not None:\n self.output_dim = output_dim\n else:\n self.output_dim = inputs.get_shape()[-1]\n self._build_model()\n<|end_body_0|>\n\n<|body_start_1|>\n start_trainable_variables = tf.trainable_variables()\n net = self.inputs\n for layer_dim in layer_dims:\n net = tf.layers.dense(inputs=net, units=layer_dim, activation=tf.nn.leaky_relu)\n if not self.test_phase:\n net = tf.dropout(net, 0.5)\n self.outputs = tf.layers.dense(inputs=net, units=self.output_dim, activation=tf.identity)\n end_trainable_variables = tf.trainable_variables()\n self.L_params = [param for param in end_trainable_variables if param not in start_trainable_variables]\n<|end_body_1|>\n", "revision_id": "e2bd4eeff078c8ad91df11119fe8372b28c8fd0e", "skeleton": "<|skeleton|>\nclass ModelMLP:\n \"\"\"An instance of this class contains everything with the model being applied to an input tensor. It features more than just the weights of the model. Multiple applications are handled through scope sharing.\"\"\"\n\n def __init__(self, inputs, layer_dims, test_phase=False, output_dim=None):\n \"\"\"We really expect this constructor call to be made within a proper scope so that variable reuse is properly used.\"\"\"\n <|body_0|>\n\n def _build_model(self):\n \"\"\"It does not want arguments because it is going to pick whatever it needs.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ModelMLP:\n \"\"\"An instance of this class contains everything with the model being applied to an input tensor. It features more than just the weights of the model. Multiple applications are handled through scope sharing.\"\"\"\n\n def __init__(self, inputs, layer_dims, test_phase=False, output_dim=None):\n \"\"\"We really expect this constructor call to be made within a proper scope so that variable reuse is properly used.\"\"\"\n self.inputs = inputs\n self.layer_dims = layer_dims\n self.test_phase = test_phase\n if output_dim is not None:\n self.output_dim = output_dim\n else:\n self.output_dim = inputs.get_shape()[-1]\n self._build_model()\n\n def _build_model(self):\n \"\"\"It does not want arguments because it is going to pick whatever it needs.\"\"\"\n start_trainable_variables = tf.trainable_variables()\n net = self.inputs\n for layer_dim in layer_dims:\n net = tf.layers.dense(inputs=net, units=layer_dim, activation=tf.nn.leaky_relu)\n if not self.test_phase:\n net = tf.dropout(net, 0.5)\n self.outputs = tf.layers.dense(inputs=net, units=self.output_dim, activation=tf.identity)\n end_trainable_variables = tf.trainable_variables()\n self.L_params = [param for param in end_trainable_variables if param not in start_trainable_variables]\n", "source": "the_stack_v2_python_sparse", "source_path": "2019_spiral/src/denoising_autoencoder/models.py", "source_repo": "gyom/denoising_autoencoder", "split": "val", "star_events_count": 0}
{"blob_id": "db3783d67fbbcb1ffff347cbdb34916823c4a666", "bodies": ["data = []\nwith open(file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(reader):\n if i == 0:\n header = row\n else:\n data.append(dict(zip(header, row)))\nreturn data", "with MongoDB() as database:\n collections = ({'database': 'products', 'file': product_file, 'records': 0, 'errors': 0}, {'database': 'customers', 'file': customer_file, 'records': 0, 'errors': 0}, {'database': 'rentals', 'file': rental_file, 'records': 0, 'errors': 0})\n for collection in collections:\n try:\n data = self.get_data(collection['file'])\n try:\n database[collection['database']].insert_many(data)\n collection['records'] = database[collection['database']].count_documents({})\n except BulkWriteError:\n collection['errors'] += 1\n except DuplicateKeyError:\n collection['errors'] += 1\n except FileNotFoundError:\n collection['errors'] += 1\n return ((collections[0]['records'], collections[1]['records'], collections[2]['records']), (collections[0]['errors'], collections[1]['errors'], collections[2]['errors']))", "prods = {}\nwith MongoDB() as database:\n for prod in database['products'].find({'qty_avail': {'$gt': '0'}}):\n prods[prod['prod_id']] = {'desc': prod['description'], 'prod_type': prod['prod_type'], 'qty_avail': prod['qty_avail']}\n return prods", "renters = {}\nwith MongoDB() as database:\n id_list = [item['user_id'] for item in database['rentals'].find({'prod_id': product_id})]\n for user_id in id_list:\n user = database['customers'].find_one({'user_id': user_id})\n renters[user_id] = {'name': user['name'], 'address': user['address'], 'phone': user['phone'], 'email': user['email']}\n return renters", "with MongoDB() as database:\n database['products'].drop()\n database['customers'].drop()\n database['rentals'].drop()"], "bodies_text": "<|body_start_0|>\n data = []\n with open(file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(reader):\n if i == 0:\n header = row\n else:\n data.append(dict(zip(header, row)))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n with MongoDB() as database:\n collections = ({'database': 'products', 'file': product_file, 'records': 0, 'errors': 0}, {'database': 'customers', 'file': customer_file, 'records': 0, 'errors': 0}, {'database': 'rentals', 'file': rental_file, 'records': 0, 'errors': 0})\n for collection in collections:\n try:\n data = self.get_data(collection['file'])\n try:\n database[collection['database']].insert_many(data)\n collection['records'] = database[collection['database']].count_documents({})\n except BulkWriteError:\n collection['errors'] += 1\n except DuplicateKeyError:\n collection['errors'] += 1\n except FileNotFoundError:\n collection['errors'] += 1\n return ((collections[0]['records'], collections[1]['records'], collections[2]['records']), (collections[0]['errors'], collections[1]['errors'], collections[2]['errors']))\n<|end_body_1|>\n\n<|body_start_2|>\n prods = {}\n with MongoDB() as database:\n for prod in database['products'].find({'qty_avail': {'$gt': '0'}}):\n prods[prod['prod_id']] = {'desc': prod['description'], 'prod_type': prod['prod_type'], 'qty_avail': prod['qty_avail']}\n return prods\n<|end_body_2|>\n\n<|body_start_3|>\n renters = {}\n with MongoDB() as database:\n id_list = [item['user_id'] for item in database['rentals'].find({'prod_id': product_id})]\n for user_id in id_list:\n user = database['customers'].find_one({'user_id': user_id})\n renters[user_id] = {'name': user['name'], 'address': user['address'], 'phone': user['phone'], 'email': user['email']}\n return renters\n<|end_body_3|>\n\n<|body_start_4|>\n with MongoDB() as database:\n database['products'].drop()\n database['customers'].drop()\n database['rentals'].drop()\n<|end_body_4|>\n", "class_docstring": "HP Norton application functionality", "class_name": "HPNortonApp", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HPNortonApp:\n \"\"\"HP Norton application functionality\"\"\"\n\n def get_data(self, file):\n \"\"\"Gets data from csv file Returns: List, consisting of Dicts with data in each row\"\"\"\n <|body_0|>\n\n def import_data(self, product_file, customer_file, rental_file):\n \"\"\"Creates and populates 3 collections in MongoDB database Inputs: path to data for: products, customers, rentals (in that order) Returns: Tuple, with number of entries added; Tuple, with number of errors occurred\"\"\"\n <|body_1|>\n\n def show_available_products(self):\n \"\"\"Queries product database for available items Returns: Dict, with available product data\"\"\"\n <|body_2|>\n\n def show_rentals(self, product_id):\n \"\"\"Queries rental database for customer by product ID Returns: Dict, user info for those who have rented product\"\"\"\n <|body_3|>\n\n def clear_collections(self):\n \"\"\"Clears all collections in database\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = []\n with open(file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(reader):\n if i == 0:\n header = row\n else:\n data.append(dict(zip(header, row)))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n with MongoDB() as database:\n collections = ({'database': 'products', 'file': product_file, 'records': 0, 'errors': 0}, {'database': 'customers', 'file': customer_file, 'records': 0, 'errors': 0}, {'database': 'rentals', 'file': rental_file, 'records': 0, 'errors': 0})\n for collection in collections:\n try:\n data = self.get_data(collection['file'])\n try:\n database[collection['database']].insert_many(data)\n collection['records'] = database[collection['database']].count_documents({})\n except BulkWriteError:\n collection['errors'] += 1\n except DuplicateKeyError:\n collection['errors'] += 1\n except FileNotFoundError:\n collection['errors'] += 1\n return ((collections[0]['records'], collections[1]['records'], collections[2]['records']), (collections[0]['errors'], collections[1]['errors'], collections[2]['errors']))\n<|end_body_1|>\n\n<|body_start_2|>\n prods = {}\n with MongoDB() as database:\n for prod in database['products'].find({'qty_avail': {'$gt': '0'}}):\n prods[prod['prod_id']] = {'desc': prod['description'], 'prod_type': prod['prod_type'], 'qty_avail': prod['qty_avail']}\n return prods\n<|end_body_2|>\n\n<|body_start_3|>\n renters = {}\n with MongoDB() as database:\n id_list = [item['user_id'] for item in database['rentals'].find({'prod_id': product_id})]\n for user_id in id_list:\n user = database['customers'].find_one({'user_id': user_id})\n renters[user_id] = {'name': user['name'], 'address': user['address'], 'phone': user['phone'], 'email': user['email']}\n return renters\n<|end_body_3|>\n\n<|body_start_4|>\n with MongoDB() as database:\n database['products'].drop()\n database['customers'].drop()\n database['rentals'].drop()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000067", "length_bytes": 6767, "license_type": "no_license", "methods": [{"docstring": "Gets data from csv file Returns: List, consisting of Dicts with data in each row", "name": "get_data", "signature": "def get_data(self, file)"}, {"docstring": "Creates and populates 3 collections in MongoDB database Inputs: path to data for: products, customers, rentals (in that order) Returns: Tuple, with number of entries added; Tuple, with number of errors occurred", "name": "import_data", "signature": "def import_data(self, product_file, customer_file, rental_file)"}, {"docstring": "Queries product database for available items Returns: Dict, with available product data", "name": "show_available_products", "signature": "def show_available_products(self)"}, {"docstring": "Queries rental database for customer by product ID Returns: Dict, user info for those who have rented product", "name": "show_rentals", "signature": "def show_rentals(self, product_id)"}, {"docstring": "Clears all collections in database", "name": "clear_collections", "signature": "def clear_collections(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_049441", "prompt": "Implement the Python class `HPNortonApp` described below.\n\nClass description:\nHP Norton application functionality\n\nMethod signatures and docstrings:\n- def get_data(self, file): Gets data from csv file Returns: List, consisting of Dicts with data in each row\n- def import_data(self, product_file, customer_file, rental_file): Creates and populates 3 collections in MongoDB database Inputs: path to data for: products, customers, rentals (in that order) Returns: Tuple, with number of entries added; Tuple, with number of errors occurred\n- def show_available_products(self): Queries product database for available items Returns: Dict, with available product data\n- def show_rentals(self, product_id): Queries rental database for customer by product ID Returns: Dict, user info for those who have rented product\n- def clear_collections(self): Clears all collections in database", "prompted_full_text": "Implement the Python class `HPNortonApp` described below.\n\nClass description:\nHP Norton application functionality\n\nMethod signatures and docstrings:\n- def get_data(self, file): Gets data from csv file Returns: List, consisting of Dicts with data in each row\n- def import_data(self, product_file, customer_file, rental_file): Creates and populates 3 collections in MongoDB database Inputs: path to data for: products, customers, rentals (in that order) Returns: Tuple, with number of entries added; Tuple, with number of errors occurred\n- def show_available_products(self): Queries product database for available items Returns: Dict, with available product data\n- def show_rentals(self, product_id): Queries rental database for customer by product ID Returns: Dict, user info for those who have rented product\n- def clear_collections(self): Clears all collections in database\n\n<|skeleton|>\nclass HPNortonApp:\n \"\"\"HP Norton application functionality\"\"\"\n\n def get_data(self, file):\n \"\"\"Gets data from csv file Returns: List, consisting of Dicts with data in each row\"\"\"\n <|body_0|>\n\n def import_data(self, product_file, customer_file, rental_file):\n \"\"\"Creates and populates 3 collections in MongoDB database Inputs: path to data for: products, customers, rentals (in that order) Returns: Tuple, with number of entries added; Tuple, with number of errors occurred\"\"\"\n <|body_1|>\n\n def show_available_products(self):\n \"\"\"Queries product database for available items Returns: Dict, with available product data\"\"\"\n <|body_2|>\n\n def show_rentals(self, product_id):\n \"\"\"Queries rental database for customer by product ID Returns: Dict, user info for those who have rented product\"\"\"\n <|body_3|>\n\n def clear_collections(self):\n \"\"\"Clears all collections in database\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = []\n with open(file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(reader):\n if i == 0:\n header = row\n else:\n data.append(dict(zip(header, row)))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n with MongoDB() as database:\n collections = ({'database': 'products', 'file': product_file, 'records': 0, 'errors': 0}, {'database': 'customers', 'file': customer_file, 'records': 0, 'errors': 0}, {'database': 'rentals', 'file': rental_file, 'records': 0, 'errors': 0})\n for collection in collections:\n try:\n data = self.get_data(collection['file'])\n try:\n database[collection['database']].insert_many(data)\n collection['records'] = database[collection['database']].count_documents({})\n except BulkWriteError:\n collection['errors'] += 1\n except DuplicateKeyError:\n collection['errors'] += 1\n except FileNotFoundError:\n collection['errors'] += 1\n return ((collections[0]['records'], collections[1]['records'], collections[2]['records']), (collections[0]['errors'], collections[1]['errors'], collections[2]['errors']))\n<|end_body_1|>\n\n<|body_start_2|>\n prods = {}\n with MongoDB() as database:\n for prod in database['products'].find({'qty_avail': {'$gt': '0'}}):\n prods[prod['prod_id']] = {'desc': prod['description'], 'prod_type': prod['prod_type'], 'qty_avail': prod['qty_avail']}\n return prods\n<|end_body_2|>\n\n<|body_start_3|>\n renters = {}\n with MongoDB() as database:\n id_list = [item['user_id'] for item in database['rentals'].find({'prod_id': product_id})]\n for user_id in id_list:\n user = database['customers'].find_one({'user_id': user_id})\n renters[user_id] = {'name': user['name'], 'address': user['address'], 'phone': user['phone'], 'email': user['email']}\n return renters\n<|end_body_3|>\n\n<|body_start_4|>\n with MongoDB() as database:\n database['products'].drop()\n database['customers'].drop()\n database['rentals'].drop()\n<|end_body_4|>\n", "revision_id": "5dac60f39e3909ff05b26721d602ed20f14d6be3", "skeleton": "<|skeleton|>\nclass HPNortonApp:\n \"\"\"HP Norton application functionality\"\"\"\n\n def get_data(self, file):\n \"\"\"Gets data from csv file Returns: List, consisting of Dicts with data in each row\"\"\"\n <|body_0|>\n\n def import_data(self, product_file, customer_file, rental_file):\n \"\"\"Creates and populates 3 collections in MongoDB database Inputs: path to data for: products, customers, rentals (in that order) Returns: Tuple, with number of entries added; Tuple, with number of errors occurred\"\"\"\n <|body_1|>\n\n def show_available_products(self):\n \"\"\"Queries product database for available items Returns: Dict, with available product data\"\"\"\n <|body_2|>\n\n def show_rentals(self, product_id):\n \"\"\"Queries rental database for customer by product ID Returns: Dict, user info for those who have rented product\"\"\"\n <|body_3|>\n\n def clear_collections(self):\n \"\"\"Clears all collections in database\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HPNortonApp:\n \"\"\"HP Norton application functionality\"\"\"\n\n def get_data(self, file):\n \"\"\"Gets data from csv file Returns: List, consisting of Dicts with data in each row\"\"\"\n data = []\n with open(file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(reader):\n if i == 0:\n header = row\n else:\n data.append(dict(zip(header, row)))\n return data\n\n def import_data(self, product_file, customer_file, rental_file):\n \"\"\"Creates and populates 3 collections in MongoDB database Inputs: path to data for: products, customers, rentals (in that order) Returns: Tuple, with number of entries added; Tuple, with number of errors occurred\"\"\"\n with MongoDB() as database:\n collections = ({'database': 'products', 'file': product_file, 'records': 0, 'errors': 0}, {'database': 'customers', 'file': customer_file, 'records': 0, 'errors': 0}, {'database': 'rentals', 'file': rental_file, 'records': 0, 'errors': 0})\n for collection in collections:\n try:\n data = self.get_data(collection['file'])\n try:\n database[collection['database']].insert_many(data)\n collection['records'] = database[collection['database']].count_documents({})\n except BulkWriteError:\n collection['errors'] += 1\n except DuplicateKeyError:\n collection['errors'] += 1\n except FileNotFoundError:\n collection['errors'] += 1\n return ((collections[0]['records'], collections[1]['records'], collections[2]['records']), (collections[0]['errors'], collections[1]['errors'], collections[2]['errors']))\n\n def show_available_products(self):\n \"\"\"Queries product database for available items Returns: Dict, with available product data\"\"\"\n prods = {}\n with MongoDB() as database:\n for prod in database['products'].find({'qty_avail': {'$gt': '0'}}):\n prods[prod['prod_id']] = {'desc': prod['description'], 'prod_type': prod['prod_type'], 'qty_avail': prod['qty_avail']}\n return prods\n\n def show_rentals(self, product_id):\n \"\"\"Queries rental database for customer by product ID Returns: Dict, user info for those who have rented product\"\"\"\n renters = {}\n with MongoDB() as database:\n id_list = [item['user_id'] for item in database['rentals'].find({'prod_id': product_id})]\n for user_id in id_list:\n user = database['customers'].find_one({'user_id': user_id})\n renters[user_id] = {'name': user['name'], 'address': user['address'], 'phone': user['phone'], 'email': user['email']}\n return renters\n\n def clear_collections(self):\n \"\"\"Clears all collections in database\"\"\"\n with MongoDB() as database:\n database['products'].drop()\n database['customers'].drop()\n database['rentals'].drop()\n", "source": "the_stack_v2_python_sparse", "source_path": "students/joli-u/lesson10/database.py", "source_repo": "JavaRod/SP_Python220B_2019", "split": "val", "star_events_count": 1}
{"blob_id": "59cece10aba217187613c3dacd8e9f1874a846a2", "bodies": ["self.data = data\nself.template: Template = data.pop(ATTR_MEDIA_CONTENT_ID, None)\nif self.template:\n self.template.hass = hass", "if self.template:\n kwargs[ATTR_MESSAGE] = message\n message = self.template.async_render(kwargs)\nservice_data = self.data.copy()\nservice_data[ATTR_MEDIA_CONTENT_ID] = message\nif kwargs.get(ATTR_DATA):\n service_data.update(kwargs[ATTR_DATA])\nservice_data = cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA, extra=vol.REMOVE_EXTRA)(service_data)\nreturn await self.hass.services.async_call('media_player', 'play_media', service_data)"], "bodies_text": "<|body_start_0|>\n self.data = data\n self.template: Template = data.pop(ATTR_MEDIA_CONTENT_ID, None)\n if self.template:\n self.template.hass = hass\n<|end_body_0|>\n\n<|body_start_1|>\n if self.template:\n kwargs[ATTR_MESSAGE] = message\n message = self.template.async_render(kwargs)\n service_data = self.data.copy()\n service_data[ATTR_MEDIA_CONTENT_ID] = message\n if kwargs.get(ATTR_DATA):\n service_data.update(kwargs[ATTR_DATA])\n service_data = cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA, extra=vol.REMOVE_EXTRA)(service_data)\n return await self.hass.services.async_call('media_player', 'play_media', service_data)\n<|end_body_1|>\n", "class_docstring": "Implement the notification service for Yandex Station.", "class_name": "YandexStationNotificationService", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass YandexStationNotificationService:\n \"\"\"Implement the notification service for Yandex Station.\"\"\"\n\n def __init__(self, hass, data: dict):\n \"\"\"Initialize the service.\"\"\"\n <|body_0|>\n\n async def async_send_message(self, message: str, **kwargs):\n \"\"\"Send a TTS message to the speaker.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data = data\n self.template: Template = data.pop(ATTR_MEDIA_CONTENT_ID, None)\n if self.template:\n self.template.hass = hass\n<|end_body_0|>\n\n<|body_start_1|>\n if self.template:\n kwargs[ATTR_MESSAGE] = message\n message = self.template.async_render(kwargs)\n service_data = self.data.copy()\n service_data[ATTR_MEDIA_CONTENT_ID] = message\n if kwargs.get(ATTR_DATA):\n service_data.update(kwargs[ATTR_DATA])\n service_data = cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA, extra=vol.REMOVE_EXTRA)(service_data)\n return await self.hass.services.async_call('media_player', 'play_media', service_data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000068", "length_bytes": 2258, "license_type": "no_license", "methods": [{"docstring": "Initialize the service.", "name": "__init__", "signature": "def __init__(self, hass, data: dict)"}, {"docstring": "Send a TTS message to the speaker.", "name": "async_send_message", "signature": "async def async_send_message(self, message: str, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `YandexStationNotificationService` described below.\n\nClass description:\nImplement the notification service for Yandex Station.\n\nMethod signatures and docstrings:\n- def __init__(self, hass, data: dict): Initialize the service.\n- async def async_send_message(self, message: str, **kwargs): Send a TTS message to the speaker.", "prompted_full_text": "Implement the Python class `YandexStationNotificationService` described below.\n\nClass description:\nImplement the notification service for Yandex Station.\n\nMethod signatures and docstrings:\n- def __init__(self, hass, data: dict): Initialize the service.\n- async def async_send_message(self, message: str, **kwargs): Send a TTS message to the speaker.\n\n<|skeleton|>\nclass YandexStationNotificationService:\n \"\"\"Implement the notification service for Yandex Station.\"\"\"\n\n def __init__(self, hass, data: dict):\n \"\"\"Initialize the service.\"\"\"\n <|body_0|>\n\n async def async_send_message(self, message: str, **kwargs):\n \"\"\"Send a TTS message to the speaker.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data = data\n self.template: Template = data.pop(ATTR_MEDIA_CONTENT_ID, None)\n if self.template:\n self.template.hass = hass\n<|end_body_0|>\n\n<|body_start_1|>\n if self.template:\n kwargs[ATTR_MESSAGE] = message\n message = self.template.async_render(kwargs)\n service_data = self.data.copy()\n service_data[ATTR_MEDIA_CONTENT_ID] = message\n if kwargs.get(ATTR_DATA):\n service_data.update(kwargs[ATTR_DATA])\n service_data = cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA, extra=vol.REMOVE_EXTRA)(service_data)\n return await self.hass.services.async_call('media_player', 'play_media', service_data)\n<|end_body_1|>\n", "revision_id": "9966a647d9f1d385ac6f0365b5e0ed0b516686a6", "skeleton": "<|skeleton|>\nclass YandexStationNotificationService:\n \"\"\"Implement the notification service for Yandex Station.\"\"\"\n\n def __init__(self, hass, data: dict):\n \"\"\"Initialize the service.\"\"\"\n <|body_0|>\n\n async def async_send_message(self, message: str, **kwargs):\n \"\"\"Send a TTS message to the speaker.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class YandexStationNotificationService:\n \"\"\"Implement the notification service for Yandex Station.\"\"\"\n\n def __init__(self, hass, data: dict):\n \"\"\"Initialize the service.\"\"\"\n self.data = data\n self.template: Template = data.pop(ATTR_MEDIA_CONTENT_ID, None)\n if self.template:\n self.template.hass = hass\n\n async def async_send_message(self, message: str, **kwargs):\n \"\"\"Send a TTS message to the speaker.\"\"\"\n if self.template:\n kwargs[ATTR_MESSAGE] = message\n message = self.template.async_render(kwargs)\n service_data = self.data.copy()\n service_data[ATTR_MEDIA_CONTENT_ID] = message\n if kwargs.get(ATTR_DATA):\n service_data.update(kwargs[ATTR_DATA])\n service_data = cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA, extra=vol.REMOVE_EXTRA)(service_data)\n return await self.hass.services.async_call('media_player', 'play_media', service_data)\n", "source": "the_stack_v2_python_sparse", "source_path": "custom_components/yandex_station/notify.py", "source_repo": "AlexxIT/YandexStation", "split": "val", "star_events_count": 1018}
{"blob_id": "6166b17eddcf70121e34086c42d5bd80cfc4c2b1", "bodies": ["Thread.__init__(self)\nself.address = (host, port)\nself.myView = myView\nself.isRunning = True", "try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5)\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n patientName = decode(client.recv(BUFSIZE), CODE)\n self.myView.updateStatus('... connected from ' + patientName + ' at ' + str(address))\n doctor = Doctor(patientName)\n handler = ClientHandler(client, doctor, self.myView)\n handler.start()\nexcept Exception as message:\n self.myView.updateStatus(message)\nself.server.close()\nself.myView.updateStatus('Server shutting down.')"], "bodies_text": "<|body_start_0|>\n Thread.__init__(self)\n self.address = (host, port)\n self.myView = myView\n self.isRunning = True\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5)\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n patientName = decode(client.recv(BUFSIZE), CODE)\n self.myView.updateStatus('... connected from ' + patientName + ' at ' + str(address))\n doctor = Doctor(patientName)\n handler = ClientHandler(client, doctor, self.myView)\n handler.start()\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus('Server shutting down.')\n<|end_body_1|>\n", "class_docstring": "Represents a server to handle multiple clients.", "class_name": "DoctorServer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DoctorServer:\n \"\"\"Represents a server to handle multiple clients.\"\"\"\n\n def __init__(self, host, port, myView):\n \"\"\"Sets the initial state of the server.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Opens the server's socket, waits for connections from clients, and serves them.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Thread.__init__(self)\n self.address = (host, port)\n self.myView = myView\n self.isRunning = True\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5)\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n patientName = decode(client.recv(BUFSIZE), CODE)\n self.myView.updateStatus('... connected from ' + patientName + ' at ' + str(address))\n doctor = Doctor(patientName)\n handler = ClientHandler(client, doctor, self.myView)\n handler.start()\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus('Server shutting down.')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000069", "length_bytes": 2272, "license_type": "no_license", "methods": [{"docstring": "Sets the initial state of the server.", "name": "__init__", "signature": "def __init__(self, host, port, myView)"}, {"docstring": "Opens the server's socket, waits for connections from clients, and serves them.", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "prompt": "Implement the Python class `DoctorServer` described below.\n\nClass description:\nRepresents a server to handle multiple clients.\n\nMethod signatures and docstrings:\n- def __init__(self, host, port, myView): Sets the initial state of the server.\n- def run(self): Opens the server's socket, waits for connections from clients, and serves them.", "prompted_full_text": "Implement the Python class `DoctorServer` described below.\n\nClass description:\nRepresents a server to handle multiple clients.\n\nMethod signatures and docstrings:\n- def __init__(self, host, port, myView): Sets the initial state of the server.\n- def run(self): Opens the server's socket, waits for connections from clients, and serves them.\n\n<|skeleton|>\nclass DoctorServer:\n \"\"\"Represents a server to handle multiple clients.\"\"\"\n\n def __init__(self, host, port, myView):\n \"\"\"Sets the initial state of the server.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Opens the server's socket, waits for connections from clients, and serves them.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Thread.__init__(self)\n self.address = (host, port)\n self.myView = myView\n self.isRunning = True\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5)\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n patientName = decode(client.recv(BUFSIZE), CODE)\n self.myView.updateStatus('... connected from ' + patientName + ' at ' + str(address))\n doctor = Doctor(patientName)\n handler = ClientHandler(client, doctor, self.myView)\n handler.start()\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus('Server shutting down.')\n<|end_body_1|>\n", "revision_id": "4b6b49098b533ef799e53d0c55b2176f62b0f1f6", "skeleton": "<|skeleton|>\nclass DoctorServer:\n \"\"\"Represents a server to handle multiple clients.\"\"\"\n\n def __init__(self, host, port, myView):\n \"\"\"Sets the initial state of the server.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Opens the server's socket, waits for connections from clients, and serves them.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DoctorServer:\n \"\"\"Represents a server to handle multiple clients.\"\"\"\n\n def __init__(self, host, port, myView):\n \"\"\"Sets the initial state of the server.\"\"\"\n Thread.__init__(self)\n self.address = (host, port)\n self.myView = myView\n self.isRunning = True\n\n def run(self):\n \"\"\"Opens the server's socket, waits for connections from clients, and serves them.\"\"\"\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5)\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n patientName = decode(client.recv(BUFSIZE), CODE)\n self.myView.updateStatus('... connected from ' + patientName + ' at ' + str(address))\n doctor = Doctor(patientName)\n handler = ClientHandler(client, doctor, self.myView)\n handler.start()\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus('Server shutting down.')\n", "source": "the_stack_v2_python_sparse", "source_path": "doctorserver.py", "source_repo": "staufferl16/Programming111", "split": "val", "star_events_count": 0}
{"blob_id": "3a90de517da9199757efad8a7b2b029e182f8c73", "bodies": ["out = {'type': type, 'content': content}\nif id:\n out['id'] = id\nif in_response:\n out['in_response'] = in_response\ntry:\n await super().send_json(out)\nexcept (ConnectionClosed, RuntimeError) as e:\n if not silence_errors:\n raise e", "try:\n jsonschema.validate(content, schema)\nexcept jsonschema.ValidationError as err:\n try:\n in_response = content['id']\n except (TypeError, KeyError):\n in_response = None\n await self.send_json(type='error', content=str(err), in_response=in_response)\n return\nawait websocket_client_messages[content['type']].receive_content(self, content['content'], id=content['id'])"], "bodies_text": "<|body_start_0|>\n out = {'type': type, 'content': content}\n if id:\n out['id'] = id\n if in_response:\n out['in_response'] = in_response\n try:\n await super().send_json(out)\n except (ConnectionClosed, RuntimeError) as e:\n if not silence_errors:\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n jsonschema.validate(content, schema)\n except jsonschema.ValidationError as err:\n try:\n in_response = content['id']\n except (TypeError, KeyError):\n in_response = None\n await self.send_json(type='error', content=str(err), in_response=in_response)\n return\n await websocket_client_messages[content['type']].receive_content(self, content['content'], id=content['id'])\n<|end_body_1|>\n", "class_docstring": "Mixin for JSONWebsocketConsumers, that speaks the a special protocol.", "class_name": "ProtocollAsyncJsonWebsocketConsumer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProtocollAsyncJsonWebsocketConsumer:\n \"\"\"Mixin for JSONWebsocketConsumers, that speaks the a special protocol.\"\"\"\n\n async def send_json(self, type: str, content: Any, id: Optional[str]=None, in_response: Optional[str]=None, silence_errors: Optional[bool]=True) -> None:\n \"\"\"Sends the data with the type. If silence_errors is True (default), all ConnectionClosed and runtime errors during sending will be ignored.\"\"\"\n <|body_0|>\n\n async def receive_json(self, content: Any) -> None:\n \"\"\"Receives the json data, parses it and calls receive_content.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n out = {'type': type, 'content': content}\n if id:\n out['id'] = id\n if in_response:\n out['in_response'] = in_response\n try:\n await super().send_json(out)\n except (ConnectionClosed, RuntimeError) as e:\n if not silence_errors:\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n jsonschema.validate(content, schema)\n except jsonschema.ValidationError as err:\n try:\n in_response = content['id']\n except (TypeError, KeyError):\n in_response = None\n await self.send_json(type='error', content=str(err), in_response=in_response)\n return\n await websocket_client_messages[content['type']].receive_content(self, content['content'], id=content['id'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000070", "length_bytes": 6258, "license_type": "permissive", "methods": [{"docstring": "Sends the data with the type. If silence_errors is True (default), all ConnectionClosed and runtime errors during sending will be ignored.", "name": "send_json", "signature": "async def send_json(self, type: str, content: Any, id: Optional[str]=None, in_response: Optional[str]=None, silence_errors: Optional[bool]=True) -> None"}, {"docstring": "Receives the json data, parses it and calls receive_content.", "name": "receive_json", "signature": "async def receive_json(self, content: Any) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043573", "prompt": "Implement the Python class `ProtocollAsyncJsonWebsocketConsumer` described below.\n\nClass description:\nMixin for JSONWebsocketConsumers, that speaks the a special protocol.\n\nMethod signatures and docstrings:\n- async def send_json(self, type: str, content: Any, id: Optional[str]=None, in_response: Optional[str]=None, silence_errors: Optional[bool]=True) -> None: Sends the data with the type. If silence_errors is True (default), all ConnectionClosed and runtime errors during sending will be ignored.\n- async def receive_json(self, content: Any) -> None: Receives the json data, parses it and calls receive_content.", "prompted_full_text": "Implement the Python class `ProtocollAsyncJsonWebsocketConsumer` described below.\n\nClass description:\nMixin for JSONWebsocketConsumers, that speaks the a special protocol.\n\nMethod signatures and docstrings:\n- async def send_json(self, type: str, content: Any, id: Optional[str]=None, in_response: Optional[str]=None, silence_errors: Optional[bool]=True) -> None: Sends the data with the type. If silence_errors is True (default), all ConnectionClosed and runtime errors during sending will be ignored.\n- async def receive_json(self, content: Any) -> None: Receives the json data, parses it and calls receive_content.\n\n<|skeleton|>\nclass ProtocollAsyncJsonWebsocketConsumer:\n \"\"\"Mixin for JSONWebsocketConsumers, that speaks the a special protocol.\"\"\"\n\n async def send_json(self, type: str, content: Any, id: Optional[str]=None, in_response: Optional[str]=None, silence_errors: Optional[bool]=True) -> None:\n \"\"\"Sends the data with the type. If silence_errors is True (default), all ConnectionClosed and runtime errors during sending will be ignored.\"\"\"\n <|body_0|>\n\n async def receive_json(self, content: Any) -> None:\n \"\"\"Receives the json data, parses it and calls receive_content.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n out = {'type': type, 'content': content}\n if id:\n out['id'] = id\n if in_response:\n out['in_response'] = in_response\n try:\n await super().send_json(out)\n except (ConnectionClosed, RuntimeError) as e:\n if not silence_errors:\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n jsonschema.validate(content, schema)\n except jsonschema.ValidationError as err:\n try:\n in_response = content['id']\n except (TypeError, KeyError):\n in_response = None\n await self.send_json(type='error', content=str(err), in_response=in_response)\n return\n await websocket_client_messages[content['type']].receive_content(self, content['content'], id=content['id'])\n<|end_body_1|>\n", "revision_id": "4495985d4c752d9e56d1011a4396a7cb444070a6", "skeleton": "<|skeleton|>\nclass ProtocollAsyncJsonWebsocketConsumer:\n \"\"\"Mixin for JSONWebsocketConsumers, that speaks the a special protocol.\"\"\"\n\n async def send_json(self, type: str, content: Any, id: Optional[str]=None, in_response: Optional[str]=None, silence_errors: Optional[bool]=True) -> None:\n \"\"\"Sends the data with the type. If silence_errors is True (default), all ConnectionClosed and runtime errors during sending will be ignored.\"\"\"\n <|body_0|>\n\n async def receive_json(self, content: Any) -> None:\n \"\"\"Receives the json data, parses it and calls receive_content.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProtocollAsyncJsonWebsocketConsumer:\n \"\"\"Mixin for JSONWebsocketConsumers, that speaks the a special protocol.\"\"\"\n\n async def send_json(self, type: str, content: Any, id: Optional[str]=None, in_response: Optional[str]=None, silence_errors: Optional[bool]=True) -> None:\n \"\"\"Sends the data with the type. If silence_errors is True (default), all ConnectionClosed and runtime errors during sending will be ignored.\"\"\"\n out = {'type': type, 'content': content}\n if id:\n out['id'] = id\n if in_response:\n out['in_response'] = in_response\n try:\n await super().send_json(out)\n except (ConnectionClosed, RuntimeError) as e:\n if not silence_errors:\n raise e\n\n async def receive_json(self, content: Any) -> None:\n \"\"\"Receives the json data, parses it and calls receive_content.\"\"\"\n try:\n jsonschema.validate(content, schema)\n except jsonschema.ValidationError as err:\n try:\n in_response = content['id']\n except (TypeError, KeyError):\n in_response = None\n await self.send_json(type='error', content=str(err), in_response=in_response)\n return\n await websocket_client_messages[content['type']].receive_content(self, content['content'], id=content['id'])\n", "source": "the_stack_v2_python_sparse", "source_path": "openslides/utils/websocket.py", "source_repo": "Intevation/OpenSlides", "split": "val", "star_events_count": 0}
{"blob_id": "a39c08e24f2cf72a209282b18c8cac1e0a9ec213", "bodies": ["self._parameters = parameters\nif not hasattr(self, '_mapper'):\n self._mapper = AzureProviderMap(provider=self.provider, report_type=parameters.report_type)\nif parameters.get_filter('enabled') is None:\n parameters.set_filter(**{'enabled': True})\nsuper().__init__(parameters)", "enabled_parameter = self._parameters.get_filter('enabled') in (None, True)\nfilter_map = deepcopy(TagQueryHandler.FILTER_MAP)\nif self._parameters.get_filter('value'):\n filter_map.update({'subscription_guid': {'field': 'subscription_guids', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\nelse:\n filter_map.update({'subscription_guid': {'field': 'subscription_guid', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\nreturn filter_map"], "bodies_text": "<|body_start_0|>\n self._parameters = parameters\n if not hasattr(self, '_mapper'):\n self._mapper = AzureProviderMap(provider=self.provider, report_type=parameters.report_type)\n if parameters.get_filter('enabled') is None:\n parameters.set_filter(**{'enabled': True})\n super().__init__(parameters)\n<|end_body_0|>\n\n<|body_start_1|>\n enabled_parameter = self._parameters.get_filter('enabled') in (None, True)\n filter_map = deepcopy(TagQueryHandler.FILTER_MAP)\n if self._parameters.get_filter('value'):\n filter_map.update({'subscription_guid': {'field': 'subscription_guids', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\n else:\n filter_map.update({'subscription_guid': {'field': 'subscription_guid', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\n return filter_map\n<|end_body_1|>\n", "class_docstring": "Handles tag queries and responses for Azure.", "class_name": "AzureTagQueryHandler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AzureTagQueryHandler:\n \"\"\"Handles tag queries and responses for Azure.\"\"\"\n\n def __init__(self, parameters):\n \"\"\"Establish Azure report query handler. Args: parameters (QueryParameters): parameter object for query\"\"\"\n <|body_0|>\n\n def filter_map(self):\n \"\"\"Establish which filter map to use based on tag API.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._parameters = parameters\n if not hasattr(self, '_mapper'):\n self._mapper = AzureProviderMap(provider=self.provider, report_type=parameters.report_type)\n if parameters.get_filter('enabled') is None:\n parameters.set_filter(**{'enabled': True})\n super().__init__(parameters)\n<|end_body_0|>\n\n<|body_start_1|>\n enabled_parameter = self._parameters.get_filter('enabled') in (None, True)\n filter_map = deepcopy(TagQueryHandler.FILTER_MAP)\n if self._parameters.get_filter('value'):\n filter_map.update({'subscription_guid': {'field': 'subscription_guids', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\n else:\n filter_map.update({'subscription_guid': {'field': 'subscription_guid', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\n return filter_map\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000071", "length_bytes": 2630, "license_type": "permissive", "methods": [{"docstring": "Establish Azure report query handler. Args: parameters (QueryParameters): parameter object for query", "name": "__init__", "signature": "def __init__(self, parameters)"}, {"docstring": "Establish which filter map to use based on tag API.", "name": "filter_map", "signature": "def filter_map(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_045236", "prompt": "Implement the Python class `AzureTagQueryHandler` described below.\n\nClass description:\nHandles tag queries and responses for Azure.\n\nMethod signatures and docstrings:\n- def __init__(self, parameters): Establish Azure report query handler. Args: parameters (QueryParameters): parameter object for query\n- def filter_map(self): Establish which filter map to use based on tag API.", "prompted_full_text": "Implement the Python class `AzureTagQueryHandler` described below.\n\nClass description:\nHandles tag queries and responses for Azure.\n\nMethod signatures and docstrings:\n- def __init__(self, parameters): Establish Azure report query handler. Args: parameters (QueryParameters): parameter object for query\n- def filter_map(self): Establish which filter map to use based on tag API.\n\n<|skeleton|>\nclass AzureTagQueryHandler:\n \"\"\"Handles tag queries and responses for Azure.\"\"\"\n\n def __init__(self, parameters):\n \"\"\"Establish Azure report query handler. Args: parameters (QueryParameters): parameter object for query\"\"\"\n <|body_0|>\n\n def filter_map(self):\n \"\"\"Establish which filter map to use based on tag API.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._parameters = parameters\n if not hasattr(self, '_mapper'):\n self._mapper = AzureProviderMap(provider=self.provider, report_type=parameters.report_type)\n if parameters.get_filter('enabled') is None:\n parameters.set_filter(**{'enabled': True})\n super().__init__(parameters)\n<|end_body_0|>\n\n<|body_start_1|>\n enabled_parameter = self._parameters.get_filter('enabled') in (None, True)\n filter_map = deepcopy(TagQueryHandler.FILTER_MAP)\n if self._parameters.get_filter('value'):\n filter_map.update({'subscription_guid': {'field': 'subscription_guids', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\n else:\n filter_map.update({'subscription_guid': {'field': 'subscription_guid', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\n return filter_map\n<|end_body_1|>\n", "revision_id": "0416e5216eb1ec4b41c8dd4999adde218b1ab2e1", "skeleton": "<|skeleton|>\nclass AzureTagQueryHandler:\n \"\"\"Handles tag queries and responses for Azure.\"\"\"\n\n def __init__(self, parameters):\n \"\"\"Establish Azure report query handler. Args: parameters (QueryParameters): parameter object for query\"\"\"\n <|body_0|>\n\n def filter_map(self):\n \"\"\"Establish which filter map to use based on tag API.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AzureTagQueryHandler:\n \"\"\"Handles tag queries and responses for Azure.\"\"\"\n\n def __init__(self, parameters):\n \"\"\"Establish Azure report query handler. Args: parameters (QueryParameters): parameter object for query\"\"\"\n self._parameters = parameters\n if not hasattr(self, '_mapper'):\n self._mapper = AzureProviderMap(provider=self.provider, report_type=parameters.report_type)\n if parameters.get_filter('enabled') is None:\n parameters.set_filter(**{'enabled': True})\n super().__init__(parameters)\n\n def filter_map(self):\n \"\"\"Establish which filter map to use based on tag API.\"\"\"\n enabled_parameter = self._parameters.get_filter('enabled') in (None, True)\n filter_map = deepcopy(TagQueryHandler.FILTER_MAP)\n if self._parameters.get_filter('value'):\n filter_map.update({'subscription_guid': {'field': 'subscription_guids', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\n else:\n filter_map.update({'subscription_guid': {'field': 'subscription_guid', 'operation': 'icontains'}, 'enabled': {'field': 'enabled', 'operation': 'exact', 'parameter': enabled_parameter}})\n return filter_map\n", "source": "the_stack_v2_python_sparse", "source_path": "koku/api/tags/azure/queries.py", "source_repo": "project-koku/koku", "split": "val", "star_events_count": 225}
{"blob_id": "709c06bf4af386421f56b6f0753e2dd2007f1d9d", "bodies": ["self._dep = dependency\nself._name = component_name\nself._dirs = dirs or {}\nself._flags = flags or {}", "context = {'dep': self._dep, 'comp_name': self._name, 'dep_name': _makefy(self._dep.ref.name), 'name': _makefy(self._name), 'cpp_info_dirs': self._dirs, 'cpp_info_flags': self._flags}\ntemplate = Template(_jinja_format_list_values() + self.template, trim_blocks=True, lstrip_blocks=True, undefined=StrictUndefined)\nreturn template.render(context)"], "bodies_text": "<|body_start_0|>\n self._dep = dependency\n self._name = component_name\n self._dirs = dirs or {}\n self._flags = flags or {}\n<|end_body_0|>\n\n<|body_start_1|>\n context = {'dep': self._dep, 'comp_name': self._name, 'dep_name': _makefy(self._dep.ref.name), 'name': _makefy(self._name), 'cpp_info_dirs': self._dirs, 'cpp_info_flags': self._flags}\n template = Template(_jinja_format_list_values() + self.template, trim_blocks=True, lstrip_blocks=True, undefined=StrictUndefined)\n return template.render(context)\n<|end_body_1|>\n", "class_docstring": "Generates Makefile content for each dependency component", "class_name": "DepComponentContentGenerator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DepComponentContentGenerator:\n \"\"\"Generates Makefile content for each dependency component\"\"\"\n\n def __init__(self, dependency, component_name: str, dirs: dict, flags: dict):\n \"\"\":param dependency: The dependency object that owns the component :param component_name: component raw name e.g. poco::poco_json :param dirs: The component cpp_info folders :param flags: The component cpp_info variables\"\"\"\n <|body_0|>\n\n def content(self) -> str:\n \"\"\"Format template and generate Makefile component\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._dep = dependency\n self._name = component_name\n self._dirs = dirs or {}\n self._flags = flags or {}\n<|end_body_0|>\n\n<|body_start_1|>\n context = {'dep': self._dep, 'comp_name': self._name, 'dep_name': _makefy(self._dep.ref.name), 'name': _makefy(self._name), 'cpp_info_dirs': self._dirs, 'cpp_info_flags': self._flags}\n template = Template(_jinja_format_list_values() + self.template, trim_blocks=True, lstrip_blocks=True, undefined=StrictUndefined)\n return template.render(context)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000072", "length_bytes": 27066, "license_type": "permissive", "methods": [{"docstring": ":param dependency: The dependency object that owns the component :param component_name: component raw name e.g. poco::poco_json :param dirs: The component cpp_info folders :param flags: The component cpp_info variables", "name": "__init__", "signature": "def __init__(self, dependency, component_name: str, dirs: dict, flags: dict)"}, {"docstring": "Format template and generate Makefile component", "name": "content", "signature": "def content(self) -> str"}], "n_methods": 2, "prompt": "Implement the Python class `DepComponentContentGenerator` described below.\n\nClass description:\nGenerates Makefile content for each dependency component\n\nMethod signatures and docstrings:\n- def __init__(self, dependency, component_name: str, dirs: dict, flags: dict): :param dependency: The dependency object that owns the component :param component_name: component raw name e.g. poco::poco_json :param dirs: The component cpp_info folders :param flags: The component cpp_info variables\n- def content(self) -> str: Format template and generate Makefile component", "prompted_full_text": "Implement the Python class `DepComponentContentGenerator` described below.\n\nClass description:\nGenerates Makefile content for each dependency component\n\nMethod signatures and docstrings:\n- def __init__(self, dependency, component_name: str, dirs: dict, flags: dict): :param dependency: The dependency object that owns the component :param component_name: component raw name e.g. poco::poco_json :param dirs: The component cpp_info folders :param flags: The component cpp_info variables\n- def content(self) -> str: Format template and generate Makefile component\n\n<|skeleton|>\nclass DepComponentContentGenerator:\n \"\"\"Generates Makefile content for each dependency component\"\"\"\n\n def __init__(self, dependency, component_name: str, dirs: dict, flags: dict):\n \"\"\":param dependency: The dependency object that owns the component :param component_name: component raw name e.g. poco::poco_json :param dirs: The component cpp_info folders :param flags: The component cpp_info variables\"\"\"\n <|body_0|>\n\n def content(self) -> str:\n \"\"\"Format template and generate Makefile component\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._dep = dependency\n self._name = component_name\n self._dirs = dirs or {}\n self._flags = flags or {}\n<|end_body_0|>\n\n<|body_start_1|>\n context = {'dep': self._dep, 'comp_name': self._name, 'dep_name': _makefy(self._dep.ref.name), 'name': _makefy(self._name), 'cpp_info_dirs': self._dirs, 'cpp_info_flags': self._flags}\n template = Template(_jinja_format_list_values() + self.template, trim_blocks=True, lstrip_blocks=True, undefined=StrictUndefined)\n return template.render(context)\n<|end_body_1|>\n", "revision_id": "bac455d1329b6744cdc41747354a727c9233179f", "skeleton": "<|skeleton|>\nclass DepComponentContentGenerator:\n \"\"\"Generates Makefile content for each dependency component\"\"\"\n\n def __init__(self, dependency, component_name: str, dirs: dict, flags: dict):\n \"\"\":param dependency: The dependency object that owns the component :param component_name: component raw name e.g. poco::poco_json :param dirs: The component cpp_info folders :param flags: The component cpp_info variables\"\"\"\n <|body_0|>\n\n def content(self) -> str:\n \"\"\"Format template and generate Makefile component\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DepComponentContentGenerator:\n \"\"\"Generates Makefile content for each dependency component\"\"\"\n\n def __init__(self, dependency, component_name: str, dirs: dict, flags: dict):\n \"\"\":param dependency: The dependency object that owns the component :param component_name: component raw name e.g. poco::poco_json :param dirs: The component cpp_info folders :param flags: The component cpp_info variables\"\"\"\n self._dep = dependency\n self._name = component_name\n self._dirs = dirs or {}\n self._flags = flags or {}\n\n def content(self) -> str:\n \"\"\"Format template and generate Makefile component\"\"\"\n context = {'dep': self._dep, 'comp_name': self._name, 'dep_name': _makefy(self._dep.ref.name), 'name': _makefy(self._name), 'cpp_info_dirs': self._dirs, 'cpp_info_flags': self._flags}\n template = Template(_jinja_format_list_values() + self.template, trim_blocks=True, lstrip_blocks=True, undefined=StrictUndefined)\n return template.render(context)\n", "source": "the_stack_v2_python_sparse", "source_path": "conan/tools/gnu/makedeps.py", "source_repo": "conan-io/conan", "split": "val", "star_events_count": 7754}
{"blob_id": "58ca928a4e957b60816a63faaa60fc28dfcc0090", "bodies": ["self.preprocess_function = preprocess_function\nself.axis_labels = axis_labels if axis_labels is not None else []\nself.explainer = LimeImageExplainer(kernel_width, kernel, verbose, feature_selection, random_state)", "input_data, full_preprocess_function = self._prepare_image_data(input_data)\nrunner = utils.get_function(model_or_function, preprocess_function=full_preprocess_function)\nexplain_instance_kwargs = utils.get_kwargs_applicable_to_function(self.explainer.explain_instance, kwargs)\nexplanation = self.explainer.explain_instance(input_data, runner, labels=labels, top_labels=top_labels, num_features=num_features, num_samples=num_samples, **explain_instance_kwargs)\nif return_masks:\n get_image_and_mask_kwargs = utils.get_kwargs_applicable_to_function(explanation.get_image_and_mask, kwargs)\n maps = [explanation.get_image_and_mask(label, positive_only=positive_only, hide_rest=hide_rest, num_features=num_features, **get_image_and_mask_kwargs)[1] for label in labels]\nelse:\n maps = [self._get_explanation_values(label, explanation) for label in labels]\nreturn maps", "axis_label_names = self.axis_labels.values() if isinstance(self.axis_labels, dict) else self.axis_labels\nif not axis_label_names:\n channels_axis_index = utils.locate_channels_axis(input_data.shape)\n self.axis_labels = {channels_axis_index: 'channels'}\nelif 'channels' not in axis_label_names:\n raise ValueError('When providing axis_labels it is required to provide the location of the channels axis')\ninput_data = utils.to_xarray(input_data, self.axis_labels)\nchannels_axis_index = input_data.dims.index('channels')\ninput_data = utils.move_axis(input_data, 'channels', -1)\ngreyscale = False\nif len(input_data['channels']) == 1:\n greyscale = True\n input_data = input_data.sel(channels=0).expand_dims({'channels': 3}, axis=input_data.dims.index('channels'))\nfull_preprocess_function = self._get_full_preprocess_function(channels_axis_index, input_data.dtype, greyscale)\nreturn (input_data.values.astype(np.float64), full_preprocess_function)", "if greyscale:\n\n def moveaxis_function(data):\n return np.moveaxis(data[..., [0]], -1, channel_axis_index + 1).astype(dtype)\nelse:\n\n def moveaxis_function(data):\n return np.moveaxis(data, -1, channel_axis_index + 1).astype(dtype)\nif self.preprocess_function is None:\n return moveaxis_function\nreturn lambda data: self.preprocess_function(moveaxis_function(data))", "class_explanation = explanation.local_exp[label]\nsalience_map = np.zeros(explanation.segments.shape, dtype=class_explanation[0][1].dtype)\nfor segment_id, segment_val in class_explanation:\n salience_map[segment_id == explanation.segments] = segment_val\nreturn salience_map"], "bodies_text": "<|body_start_0|>\n self.preprocess_function = preprocess_function\n self.axis_labels = axis_labels if axis_labels is not None else []\n self.explainer = LimeImageExplainer(kernel_width, kernel, verbose, feature_selection, random_state)\n<|end_body_0|>\n\n<|body_start_1|>\n input_data, full_preprocess_function = self._prepare_image_data(input_data)\n runner = utils.get_function(model_or_function, preprocess_function=full_preprocess_function)\n explain_instance_kwargs = utils.get_kwargs_applicable_to_function(self.explainer.explain_instance, kwargs)\n explanation = self.explainer.explain_instance(input_data, runner, labels=labels, top_labels=top_labels, num_features=num_features, num_samples=num_samples, **explain_instance_kwargs)\n if return_masks:\n get_image_and_mask_kwargs = utils.get_kwargs_applicable_to_function(explanation.get_image_and_mask, kwargs)\n maps = [explanation.get_image_and_mask(label, positive_only=positive_only, hide_rest=hide_rest, num_features=num_features, **get_image_and_mask_kwargs)[1] for label in labels]\n else:\n maps = [self._get_explanation_values(label, explanation) for label in labels]\n return maps\n<|end_body_1|>\n\n<|body_start_2|>\n axis_label_names = self.axis_labels.values() if isinstance(self.axis_labels, dict) else self.axis_labels\n if not axis_label_names:\n channels_axis_index = utils.locate_channels_axis(input_data.shape)\n self.axis_labels = {channels_axis_index: 'channels'}\n elif 'channels' not in axis_label_names:\n raise ValueError('When providing axis_labels it is required to provide the location of the channels axis')\n input_data = utils.to_xarray(input_data, self.axis_labels)\n channels_axis_index = input_data.dims.index('channels')\n input_data = utils.move_axis(input_data, 'channels', -1)\n greyscale = False\n if len(input_data['channels']) == 1:\n greyscale = True\n input_data = input_data.sel(channels=0).expand_dims({'channels': 3}, axis=input_data.dims.index('channels'))\n full_preprocess_function = self._get_full_preprocess_function(channels_axis_index, input_data.dtype, greyscale)\n return (input_data.values.astype(np.float64), full_preprocess_function)\n<|end_body_2|>\n\n<|body_start_3|>\n if greyscale:\n\n def moveaxis_function(data):\n return np.moveaxis(data[..., [0]], -1, channel_axis_index + 1).astype(dtype)\n else:\n\n def moveaxis_function(data):\n return np.moveaxis(data, -1, channel_axis_index + 1).astype(dtype)\n if self.preprocess_function is None:\n return moveaxis_function\n return lambda data: self.preprocess_function(moveaxis_function(data))\n<|end_body_3|>\n\n<|body_start_4|>\n class_explanation = explanation.local_exp[label]\n salience_map = np.zeros(explanation.segments.shape, dtype=class_explanation[0][1].dtype)\n for segment_id, segment_val in class_explanation:\n salience_map[segment_id == explanation.segments] = segment_val\n return salience_map\n<|end_body_4|>\n", "class_docstring": "Wrapper around the LIME explainer. Lime explainer byMarco Tulio Correia Ribeiro (https://github.com/marcotcr/lime).", "class_name": "LIMEImage", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LIMEImage:\n \"\"\"Wrapper around the LIME explainer. Lime explainer byMarco Tulio Correia Ribeiro (https://github.com/marcotcr/lime).\"\"\"\n\n def __init__(self, kernel_width=25, kernel=None, verbose=False, feature_selection='auto', random_state=None, axis_labels=None, preprocess_function=None):\n \"\"\"Initializes Lime explainer. Args: kernel_width (int, optional): kernel width kernel (callable, optional): kernel verbose (bool, optional): verbose feature_selection (str, optional): feature selection random_state (int or np.RandomState, optional): seed or random state axis_labels (dict/list, optional): If a dict, key,value pairs of axis index, name. If a list, the name of each axis where the index in the list is the axis index preprocess_function (callable, optional): Function to preprocess input data with\"\"\"\n <|body_0|>\n\n def explain(self, model_or_function, input_data, labels, top_labels=None, num_features=10, num_samples=5000, return_masks=True, positive_only=False, hide_rest=True, **kwargs):\n \"\"\"Run the LIME explainer. Args: model_or_function (callable or str): The function that runs the model to be explained _or_ the path to a ONNX model on disk. input_data (np.ndarray): Data to be explained. Must be an \"RGB image\", i.e. with values in the [0,255] range. labels (Iterable(int)): Indices of classes to be explained top_labels: Top labels num_features (int): Number of features num_samples (int): Number of samples return_masks (bool): If true, return discretized masks. Otherwise, return LIME scores positive_only (bool): Positive only hide_rest (bool): Hide rest kwargs: These parameters are passed on Other keyword arguments: see the LIME documentation for LimeImageExplainer.explain_insta\"\"\"\n <|body_1|>\n\n def _prepare_image_data(self, input_data):\n \"\"\"Transforms the data to be of the shape and type LIME expects. Args: input_data (NumPy-compatible array): Data to be explained Returns: transformed input data, preprocessing function to use with utils.get_function()\"\"\"\n <|body_2|>\n\n def _get_full_preprocess_function(self, channel_axis_index, dtype, greyscale=False):\n \"\"\"Creates a full preprocessing function. Creates a preprocessing function that incorporates both the (optional) user's preprocessing function, as well as any needed dtype and shape conversions Args: channel_axis_index (int): Axis index of the channels in the input data dtype (type): Data type of the input data (e.g. np.float32) greyscale (bool): Whether or not the data is greyscale (i.e. one channel) Returns: Function that first ensures the data has the same shape and type as the input data, then runs the users' preprocessing function\"\"\"\n <|body_3|>\n\n def _get_explanation_values(self, label: int, explanation: ImageExplanation) -> np.array:\n \"\"\"Get the importance scores from LIME in a salience map. Leverages the `ImageExplanation` class from LIME to generate salience maps. These salience maps are constructed using the segmentation masks from the explanation and fills these with the scores from the surrogate model (default for LIME is Ridge regression) used for the explanation. Args: label: The class label for the given explanation explanation: An Image Explanation generated by LIME Returns: A salience map containing the feature importances from LIME\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.preprocess_function = preprocess_function\n self.axis_labels = axis_labels if axis_labels is not None else []\n self.explainer = LimeImageExplainer(kernel_width, kernel, verbose, feature_selection, random_state)\n<|end_body_0|>\n\n<|body_start_1|>\n input_data, full_preprocess_function = self._prepare_image_data(input_data)\n runner = utils.get_function(model_or_function, preprocess_function=full_preprocess_function)\n explain_instance_kwargs = utils.get_kwargs_applicable_to_function(self.explainer.explain_instance, kwargs)\n explanation = self.explainer.explain_instance(input_data, runner, labels=labels, top_labels=top_labels, num_features=num_features, num_samples=num_samples, **explain_instance_kwargs)\n if return_masks:\n get_image_and_mask_kwargs = utils.get_kwargs_applicable_to_function(explanation.get_image_and_mask, kwargs)\n maps = [explanation.get_image_and_mask(label, positive_only=positive_only, hide_rest=hide_rest, num_features=num_features, **get_image_and_mask_kwargs)[1] for label in labels]\n else:\n maps = [self._get_explanation_values(label, explanation) for label in labels]\n return maps\n<|end_body_1|>\n\n<|body_start_2|>\n axis_label_names = self.axis_labels.values() if isinstance(self.axis_labels, dict) else self.axis_labels\n if not axis_label_names:\n channels_axis_index = utils.locate_channels_axis(input_data.shape)\n self.axis_labels = {channels_axis_index: 'channels'}\n elif 'channels' not in axis_label_names:\n raise ValueError('When providing axis_labels it is required to provide the location of the channels axis')\n input_data = utils.to_xarray(input_data, self.axis_labels)\n channels_axis_index = input_data.dims.index('channels')\n input_data = utils.move_axis(input_data, 'channels', -1)\n greyscale = False\n if len(input_data['channels']) == 1:\n greyscale = True\n input_data = input_data.sel(channels=0).expand_dims({'channels': 3}, axis=input_data.dims.index('channels'))\n full_preprocess_function = self._get_full_preprocess_function(channels_axis_index, input_data.dtype, greyscale)\n return (input_data.values.astype(np.float64), full_preprocess_function)\n<|end_body_2|>\n\n<|body_start_3|>\n if greyscale:\n\n def moveaxis_function(data):\n return np.moveaxis(data[..., [0]], -1, channel_axis_index + 1).astype(dtype)\n else:\n\n def moveaxis_function(data):\n return np.moveaxis(data, -1, channel_axis_index + 1).astype(dtype)\n if self.preprocess_function is None:\n return moveaxis_function\n return lambda data: self.preprocess_function(moveaxis_function(data))\n<|end_body_3|>\n\n<|body_start_4|>\n class_explanation = explanation.local_exp[label]\n salience_map = np.zeros(explanation.segments.shape, dtype=class_explanation[0][1].dtype)\n for segment_id, segment_val in class_explanation:\n salience_map[segment_id == explanation.segments] = segment_val\n return salience_map\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000073", "length_bytes": 14834, "license_type": "permissive", "methods": [{"docstring": "Initializes Lime explainer. Args: kernel_width (int, optional): kernel width kernel (callable, optional): kernel verbose (bool, optional): verbose feature_selection (str, optional): feature selection random_state (int or np.RandomState, optional): seed or random state axis_labels (dict/list, optional): If a dict, key,value pairs of axis index, name. If a list, the name of each axis where the index in the list is the axis index preprocess_function (callable, optional): Function to preprocess input data with", "name": "__init__", "signature": "def __init__(self, kernel_width=25, kernel=None, verbose=False, feature_selection='auto', random_state=None, axis_labels=None, preprocess_function=None)"}, {"docstring": "Run the LIME explainer. Args: model_or_function (callable or str): The function that runs the model to be explained _or_ the path to a ONNX model on disk. input_data (np.ndarray): Data to be explained. Must be an \"RGB image\", i.e. with values in the [0,255] range. labels (Iterable(int)): Indices of classes to be explained top_labels: Top labels num_features (int): Number of features num_samples (int): Number of samples return_masks (bool): If true, return discretized masks. Otherwise, return LIME scores positive_only (bool): Positive only hide_rest (bool): Hide rest kwargs: These parameters are passed on Other keyword arguments: see the LIME documentation for LimeImageExplainer.explain_insta", "name": "explain", "signature": "def explain(self, model_or_function, input_data, labels, top_labels=None, num_features=10, num_samples=5000, return_masks=True, positive_only=False, hide_rest=True, **kwargs)"}, {"docstring": "Transforms the data to be of the shape and type LIME expects. Args: input_data (NumPy-compatible array): Data to be explained Returns: transformed input data, preprocessing function to use with utils.get_function()", "name": "_prepare_image_data", "signature": "def _prepare_image_data(self, input_data)"}, {"docstring": "Creates a full preprocessing function. Creates a preprocessing function that incorporates both the (optional) user's preprocessing function, as well as any needed dtype and shape conversions Args: channel_axis_index (int): Axis index of the channels in the input data dtype (type): Data type of the input data (e.g. np.float32) greyscale (bool): Whether or not the data is greyscale (i.e. one channel) Returns: Function that first ensures the data has the same shape and type as the input data, then runs the users' preprocessing function", "name": "_get_full_preprocess_function", "signature": "def _get_full_preprocess_function(self, channel_axis_index, dtype, greyscale=False)"}, {"docstring": "Get the importance scores from LIME in a salience map. Leverages the `ImageExplanation` class from LIME to generate salience maps. These salience maps are constructed using the segmentation masks from the explanation and fills these with the scores from the surrogate model (default for LIME is Ridge regression) used for the explanation. Args: label: The class label for the given explanation explanation: An Image Explanation generated by LIME Returns: A salience map containing the feature importances from LIME", "name": "_get_explanation_values", "signature": "def _get_explanation_values(self, label: int, explanation: ImageExplanation) -> np.array"}], "n_methods": 5, "prompt": "Implement the Python class `LIMEImage` described below.\n\nClass description:\nWrapper around the LIME explainer. Lime explainer byMarco Tulio Correia Ribeiro (https://github.com/marcotcr/lime).\n\nMethod signatures and docstrings:\n- def __init__(self, kernel_width=25, kernel=None, verbose=False, feature_selection='auto', random_state=None, axis_labels=None, preprocess_function=None): Initializes Lime explainer. Args: kernel_width (int, optional): kernel width kernel (callable, optional): kernel verbose (bool, optional): verbose feature_selection (str, optional): feature selection random_state (int or np.RandomState, optional): seed or random state axis_labels (dict/list, optional): If a dict, key,value pairs of axis index, name. If a list, the name of each axis where the index in the list is the axis index preprocess_function (callable, optional): Function to preprocess input data with\n- def explain(self, model_or_function, input_data, labels, top_labels=None, num_features=10, num_samples=5000, return_masks=True, positive_only=False, hide_rest=True, **kwargs): Run the LIME explainer. Args: model_or_function (callable or str): The function that runs the model to be explained _or_ the path to a ONNX model on disk. input_data (np.ndarray): Data to be explained. Must be an \"RGB image\", i.e. with values in the [0,255] range. labels (Iterable(int)): Indices of classes to be explained top_labels: Top labels num_features (int): Number of features num_samples (int): Number of samples return_masks (bool): If true, return discretized masks. Otherwise, return LIME scores positive_only (bool): Positive only hide_rest (bool): Hide rest kwargs: These parameters are passed on Other keyword arguments: see the LIME documentation for LimeImageExplainer.explain_insta\n- def _prepare_image_data(self, input_data): Transforms the data to be of the shape and type LIME expects. Args: input_data (NumPy-compatible array): Data to be explained Returns: transformed input data, preprocessing function to use with utils.get_function()\n- def _get_full_preprocess_function(self, channel_axis_index, dtype, greyscale=False): Creates a full preprocessing function. Creates a preprocessing function that incorporates both the (optional) user's preprocessing function, as well as any needed dtype and shape conversions Args: channel_axis_index (int): Axis index of the channels in the input data dtype (type): Data type of the input data (e.g. np.float32) greyscale (bool): Whether or not the data is greyscale (i.e. one channel) Returns: Function that first ensures the data has the same shape and type as the input data, then runs the users' preprocessing function\n- def _get_explanation_values(self, label: int, explanation: ImageExplanation) -> np.array: Get the importance scores from LIME in a salience map. Leverages the `ImageExplanation` class from LIME to generate salience maps. These salience maps are constructed using the segmentation masks from the explanation and fills these with the scores from the surrogate model (default for LIME is Ridge regression) used for the explanation. Args: label: The class label for the given explanation explanation: An Image Explanation generated by LIME Returns: A salience map containing the feature importances from LIME", "prompted_full_text": "Implement the Python class `LIMEImage` described below.\n\nClass description:\nWrapper around the LIME explainer. Lime explainer byMarco Tulio Correia Ribeiro (https://github.com/marcotcr/lime).\n\nMethod signatures and docstrings:\n- def __init__(self, kernel_width=25, kernel=None, verbose=False, feature_selection='auto', random_state=None, axis_labels=None, preprocess_function=None): Initializes Lime explainer. Args: kernel_width (int, optional): kernel width kernel (callable, optional): kernel verbose (bool, optional): verbose feature_selection (str, optional): feature selection random_state (int or np.RandomState, optional): seed or random state axis_labels (dict/list, optional): If a dict, key,value pairs of axis index, name. If a list, the name of each axis where the index in the list is the axis index preprocess_function (callable, optional): Function to preprocess input data with\n- def explain(self, model_or_function, input_data, labels, top_labels=None, num_features=10, num_samples=5000, return_masks=True, positive_only=False, hide_rest=True, **kwargs): Run the LIME explainer. Args: model_or_function (callable or str): The function that runs the model to be explained _or_ the path to a ONNX model on disk. input_data (np.ndarray): Data to be explained. Must be an \"RGB image\", i.e. with values in the [0,255] range. labels (Iterable(int)): Indices of classes to be explained top_labels: Top labels num_features (int): Number of features num_samples (int): Number of samples return_masks (bool): If true, return discretized masks. Otherwise, return LIME scores positive_only (bool): Positive only hide_rest (bool): Hide rest kwargs: These parameters are passed on Other keyword arguments: see the LIME documentation for LimeImageExplainer.explain_insta\n- def _prepare_image_data(self, input_data): Transforms the data to be of the shape and type LIME expects. Args: input_data (NumPy-compatible array): Data to be explained Returns: transformed input data, preprocessing function to use with utils.get_function()\n- def _get_full_preprocess_function(self, channel_axis_index, dtype, greyscale=False): Creates a full preprocessing function. Creates a preprocessing function that incorporates both the (optional) user's preprocessing function, as well as any needed dtype and shape conversions Args: channel_axis_index (int): Axis index of the channels in the input data dtype (type): Data type of the input data (e.g. np.float32) greyscale (bool): Whether or not the data is greyscale (i.e. one channel) Returns: Function that first ensures the data has the same shape and type as the input data, then runs the users' preprocessing function\n- def _get_explanation_values(self, label: int, explanation: ImageExplanation) -> np.array: Get the importance scores from LIME in a salience map. Leverages the `ImageExplanation` class from LIME to generate salience maps. These salience maps are constructed using the segmentation masks from the explanation and fills these with the scores from the surrogate model (default for LIME is Ridge regression) used for the explanation. Args: label: The class label for the given explanation explanation: An Image Explanation generated by LIME Returns: A salience map containing the feature importances from LIME\n\n<|skeleton|>\nclass LIMEImage:\n \"\"\"Wrapper around the LIME explainer. Lime explainer byMarco Tulio Correia Ribeiro (https://github.com/marcotcr/lime).\"\"\"\n\n def __init__(self, kernel_width=25, kernel=None, verbose=False, feature_selection='auto', random_state=None, axis_labels=None, preprocess_function=None):\n \"\"\"Initializes Lime explainer. Args: kernel_width (int, optional): kernel width kernel (callable, optional): kernel verbose (bool, optional): verbose feature_selection (str, optional): feature selection random_state (int or np.RandomState, optional): seed or random state axis_labels (dict/list, optional): If a dict, key,value pairs of axis index, name. If a list, the name of each axis where the index in the list is the axis index preprocess_function (callable, optional): Function to preprocess input data with\"\"\"\n <|body_0|>\n\n def explain(self, model_or_function, input_data, labels, top_labels=None, num_features=10, num_samples=5000, return_masks=True, positive_only=False, hide_rest=True, **kwargs):\n \"\"\"Run the LIME explainer. Args: model_or_function (callable or str): The function that runs the model to be explained _or_ the path to a ONNX model on disk. input_data (np.ndarray): Data to be explained. Must be an \"RGB image\", i.e. with values in the [0,255] range. labels (Iterable(int)): Indices of classes to be explained top_labels: Top labels num_features (int): Number of features num_samples (int): Number of samples return_masks (bool): If true, return discretized masks. Otherwise, return LIME scores positive_only (bool): Positive only hide_rest (bool): Hide rest kwargs: These parameters are passed on Other keyword arguments: see the LIME documentation for LimeImageExplainer.explain_insta\"\"\"\n <|body_1|>\n\n def _prepare_image_data(self, input_data):\n \"\"\"Transforms the data to be of the shape and type LIME expects. Args: input_data (NumPy-compatible array): Data to be explained Returns: transformed input data, preprocessing function to use with utils.get_function()\"\"\"\n <|body_2|>\n\n def _get_full_preprocess_function(self, channel_axis_index, dtype, greyscale=False):\n \"\"\"Creates a full preprocessing function. Creates a preprocessing function that incorporates both the (optional) user's preprocessing function, as well as any needed dtype and shape conversions Args: channel_axis_index (int): Axis index of the channels in the input data dtype (type): Data type of the input data (e.g. np.float32) greyscale (bool): Whether or not the data is greyscale (i.e. one channel) Returns: Function that first ensures the data has the same shape and type as the input data, then runs the users' preprocessing function\"\"\"\n <|body_3|>\n\n def _get_explanation_values(self, label: int, explanation: ImageExplanation) -> np.array:\n \"\"\"Get the importance scores from LIME in a salience map. Leverages the `ImageExplanation` class from LIME to generate salience maps. These salience maps are constructed using the segmentation masks from the explanation and fills these with the scores from the surrogate model (default for LIME is Ridge regression) used for the explanation. Args: label: The class label for the given explanation explanation: An Image Explanation generated by LIME Returns: A salience map containing the feature importances from LIME\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.preprocess_function = preprocess_function\n self.axis_labels = axis_labels if axis_labels is not None else []\n self.explainer = LimeImageExplainer(kernel_width, kernel, verbose, feature_selection, random_state)\n<|end_body_0|>\n\n<|body_start_1|>\n input_data, full_preprocess_function = self._prepare_image_data(input_data)\n runner = utils.get_function(model_or_function, preprocess_function=full_preprocess_function)\n explain_instance_kwargs = utils.get_kwargs_applicable_to_function(self.explainer.explain_instance, kwargs)\n explanation = self.explainer.explain_instance(input_data, runner, labels=labels, top_labels=top_labels, num_features=num_features, num_samples=num_samples, **explain_instance_kwargs)\n if return_masks:\n get_image_and_mask_kwargs = utils.get_kwargs_applicable_to_function(explanation.get_image_and_mask, kwargs)\n maps = [explanation.get_image_and_mask(label, positive_only=positive_only, hide_rest=hide_rest, num_features=num_features, **get_image_and_mask_kwargs)[1] for label in labels]\n else:\n maps = [self._get_explanation_values(label, explanation) for label in labels]\n return maps\n<|end_body_1|>\n\n<|body_start_2|>\n axis_label_names = self.axis_labels.values() if isinstance(self.axis_labels, dict) else self.axis_labels\n if not axis_label_names:\n channels_axis_index = utils.locate_channels_axis(input_data.shape)\n self.axis_labels = {channels_axis_index: 'channels'}\n elif 'channels' not in axis_label_names:\n raise ValueError('When providing axis_labels it is required to provide the location of the channels axis')\n input_data = utils.to_xarray(input_data, self.axis_labels)\n channels_axis_index = input_data.dims.index('channels')\n input_data = utils.move_axis(input_data, 'channels', -1)\n greyscale = False\n if len(input_data['channels']) == 1:\n greyscale = True\n input_data = input_data.sel(channels=0).expand_dims({'channels': 3}, axis=input_data.dims.index('channels'))\n full_preprocess_function = self._get_full_preprocess_function(channels_axis_index, input_data.dtype, greyscale)\n return (input_data.values.astype(np.float64), full_preprocess_function)\n<|end_body_2|>\n\n<|body_start_3|>\n if greyscale:\n\n def moveaxis_function(data):\n return np.moveaxis(data[..., [0]], -1, channel_axis_index + 1).astype(dtype)\n else:\n\n def moveaxis_function(data):\n return np.moveaxis(data, -1, channel_axis_index + 1).astype(dtype)\n if self.preprocess_function is None:\n return moveaxis_function\n return lambda data: self.preprocess_function(moveaxis_function(data))\n<|end_body_3|>\n\n<|body_start_4|>\n class_explanation = explanation.local_exp[label]\n salience_map = np.zeros(explanation.segments.shape, dtype=class_explanation[0][1].dtype)\n for segment_id, segment_val in class_explanation:\n salience_map[segment_id == explanation.segments] = segment_val\n return salience_map\n<|end_body_4|>\n", "revision_id": "3284afe6aee489afecb3754aba9fad851b66e56f", "skeleton": "<|skeleton|>\nclass LIMEImage:\n \"\"\"Wrapper around the LIME explainer. Lime explainer byMarco Tulio Correia Ribeiro (https://github.com/marcotcr/lime).\"\"\"\n\n def __init__(self, kernel_width=25, kernel=None, verbose=False, feature_selection='auto', random_state=None, axis_labels=None, preprocess_function=None):\n \"\"\"Initializes Lime explainer. Args: kernel_width (int, optional): kernel width kernel (callable, optional): kernel verbose (bool, optional): verbose feature_selection (str, optional): feature selection random_state (int or np.RandomState, optional): seed or random state axis_labels (dict/list, optional): If a dict, key,value pairs of axis index, name. If a list, the name of each axis where the index in the list is the axis index preprocess_function (callable, optional): Function to preprocess input data with\"\"\"\n <|body_0|>\n\n def explain(self, model_or_function, input_data, labels, top_labels=None, num_features=10, num_samples=5000, return_masks=True, positive_only=False, hide_rest=True, **kwargs):\n \"\"\"Run the LIME explainer. Args: model_or_function (callable or str): The function that runs the model to be explained _or_ the path to a ONNX model on disk. input_data (np.ndarray): Data to be explained. Must be an \"RGB image\", i.e. with values in the [0,255] range. labels (Iterable(int)): Indices of classes to be explained top_labels: Top labels num_features (int): Number of features num_samples (int): Number of samples return_masks (bool): If true, return discretized masks. Otherwise, return LIME scores positive_only (bool): Positive only hide_rest (bool): Hide rest kwargs: These parameters are passed on Other keyword arguments: see the LIME documentation for LimeImageExplainer.explain_insta\"\"\"\n <|body_1|>\n\n def _prepare_image_data(self, input_data):\n \"\"\"Transforms the data to be of the shape and type LIME expects. Args: input_data (NumPy-compatible array): Data to be explained Returns: transformed input data, preprocessing function to use with utils.get_function()\"\"\"\n <|body_2|>\n\n def _get_full_preprocess_function(self, channel_axis_index, dtype, greyscale=False):\n \"\"\"Creates a full preprocessing function. Creates a preprocessing function that incorporates both the (optional) user's preprocessing function, as well as any needed dtype and shape conversions Args: channel_axis_index (int): Axis index of the channels in the input data dtype (type): Data type of the input data (e.g. np.float32) greyscale (bool): Whether or not the data is greyscale (i.e. one channel) Returns: Function that first ensures the data has the same shape and type as the input data, then runs the users' preprocessing function\"\"\"\n <|body_3|>\n\n def _get_explanation_values(self, label: int, explanation: ImageExplanation) -> np.array:\n \"\"\"Get the importance scores from LIME in a salience map. Leverages the `ImageExplanation` class from LIME to generate salience maps. These salience maps are constructed using the segmentation masks from the explanation and fills these with the scores from the surrogate model (default for LIME is Ridge regression) used for the explanation. Args: label: The class label for the given explanation explanation: An Image Explanation generated by LIME Returns: A salience map containing the feature importances from LIME\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LIMEImage:\n \"\"\"Wrapper around the LIME explainer. Lime explainer byMarco Tulio Correia Ribeiro (https://github.com/marcotcr/lime).\"\"\"\n\n def __init__(self, kernel_width=25, kernel=None, verbose=False, feature_selection='auto', random_state=None, axis_labels=None, preprocess_function=None):\n \"\"\"Initializes Lime explainer. Args: kernel_width (int, optional): kernel width kernel (callable, optional): kernel verbose (bool, optional): verbose feature_selection (str, optional): feature selection random_state (int or np.RandomState, optional): seed or random state axis_labels (dict/list, optional): If a dict, key,value pairs of axis index, name. If a list, the name of each axis where the index in the list is the axis index preprocess_function (callable, optional): Function to preprocess input data with\"\"\"\n self.preprocess_function = preprocess_function\n self.axis_labels = axis_labels if axis_labels is not None else []\n self.explainer = LimeImageExplainer(kernel_width, kernel, verbose, feature_selection, random_state)\n\n def explain(self, model_or_function, input_data, labels, top_labels=None, num_features=10, num_samples=5000, return_masks=True, positive_only=False, hide_rest=True, **kwargs):\n \"\"\"Run the LIME explainer. Args: model_or_function (callable or str): The function that runs the model to be explained _or_ the path to a ONNX model on disk. input_data (np.ndarray): Data to be explained. Must be an \"RGB image\", i.e. with values in the [0,255] range. labels (Iterable(int)): Indices of classes to be explained top_labels: Top labels num_features (int): Number of features num_samples (int): Number of samples return_masks (bool): If true, return discretized masks. Otherwise, return LIME scores positive_only (bool): Positive only hide_rest (bool): Hide rest kwargs: These parameters are passed on Other keyword arguments: see the LIME documentation for LimeImageExplainer.explain_insta\"\"\"\n input_data, full_preprocess_function = self._prepare_image_data(input_data)\n runner = utils.get_function(model_or_function, preprocess_function=full_preprocess_function)\n explain_instance_kwargs = utils.get_kwargs_applicable_to_function(self.explainer.explain_instance, kwargs)\n explanation = self.explainer.explain_instance(input_data, runner, labels=labels, top_labels=top_labels, num_features=num_features, num_samples=num_samples, **explain_instance_kwargs)\n if return_masks:\n get_image_and_mask_kwargs = utils.get_kwargs_applicable_to_function(explanation.get_image_and_mask, kwargs)\n maps = [explanation.get_image_and_mask(label, positive_only=positive_only, hide_rest=hide_rest, num_features=num_features, **get_image_and_mask_kwargs)[1] for label in labels]\n else:\n maps = [self._get_explanation_values(label, explanation) for label in labels]\n return maps\n\n def _prepare_image_data(self, input_data):\n \"\"\"Transforms the data to be of the shape and type LIME expects. Args: input_data (NumPy-compatible array): Data to be explained Returns: transformed input data, preprocessing function to use with utils.get_function()\"\"\"\n axis_label_names = self.axis_labels.values() if isinstance(self.axis_labels, dict) else self.axis_labels\n if not axis_label_names:\n channels_axis_index = utils.locate_channels_axis(input_data.shape)\n self.axis_labels = {channels_axis_index: 'channels'}\n elif 'channels' not in axis_label_names:\n raise ValueError('When providing axis_labels it is required to provide the location of the channels axis')\n input_data = utils.to_xarray(input_data, self.axis_labels)\n channels_axis_index = input_data.dims.index('channels')\n input_data = utils.move_axis(input_data, 'channels', -1)\n greyscale = False\n if len(input_data['channels']) == 1:\n greyscale = True\n input_data = input_data.sel(channels=0).expand_dims({'channels': 3}, axis=input_data.dims.index('channels'))\n full_preprocess_function = self._get_full_preprocess_function(channels_axis_index, input_data.dtype, greyscale)\n return (input_data.values.astype(np.float64), full_preprocess_function)\n\n def _get_full_preprocess_function(self, channel_axis_index, dtype, greyscale=False):\n \"\"\"Creates a full preprocessing function. Creates a preprocessing function that incorporates both the (optional) user's preprocessing function, as well as any needed dtype and shape conversions Args: channel_axis_index (int): Axis index of the channels in the input data dtype (type): Data type of the input data (e.g. np.float32) greyscale (bool): Whether or not the data is greyscale (i.e. one channel) Returns: Function that first ensures the data has the same shape and type as the input data, then runs the users' preprocessing function\"\"\"\n if greyscale:\n\n def moveaxis_function(data):\n return np.moveaxis(data[..., [0]], -1, channel_axis_index + 1).astype(dtype)\n else:\n\n def moveaxis_function(data):\n return np.moveaxis(data, -1, channel_axis_index + 1).astype(dtype)\n if self.preprocess_function is None:\n return moveaxis_function\n return lambda data: self.preprocess_function(moveaxis_function(data))\n\n def _get_explanation_values(self, label: int, explanation: ImageExplanation) -> np.array:\n \"\"\"Get the importance scores from LIME in a salience map. Leverages the `ImageExplanation` class from LIME to generate salience maps. These salience maps are constructed using the segmentation masks from the explanation and fills these with the scores from the surrogate model (default for LIME is Ridge regression) used for the explanation. Args: label: The class label for the given explanation explanation: An Image Explanation generated by LIME Returns: A salience map containing the feature importances from LIME\"\"\"\n class_explanation = explanation.local_exp[label]\n salience_map = np.zeros(explanation.segments.shape, dtype=class_explanation[0][1].dtype)\n for segment_id, segment_val in class_explanation:\n salience_map[segment_id == explanation.segments] = segment_val\n return salience_map\n", "source": "the_stack_v2_python_sparse", "source_path": "dianna/methods/lime.py", "source_repo": "dianna-ai/dianna", "split": "val", "star_events_count": 37}
{"blob_id": "b19dbfcc6a11c3cd0f15c2efaaa23fd8ab27c5b6", "bodies": ["max_cardinality = 4\ntrainable = False\nargs = {'device': 'cpu'}\nannotation_model = model.AnnotationModel(args, max_cardinality, trainable)\nannotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\nalignment_tensor = None\noutput = annotation_model((annotation_tensor, alignment_tensor))\nbatch_size = 3\nseq_len = 5\nself.assertEqual(output.shape, (batch_size, seq_len, max_cardinality))\nself.assert_annotations_equal([0, 1, 0, 0], output[0, 0])\nself.assert_annotations_equal([0, 0, 0, 1], output[1, 4])", "max_cardinality = 4\ntrainable = False\nargs = {'device': 'cpu'}\nannotation_model = model.AnnotationModel(args, max_cardinality, trainable)\nannotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\nalignment_tensor = None\noutput = annotation_model((annotation_tensor, alignment_tensor))\nself.assert_annotations_equal([0, 0, 0, 0], output[0, 4])\nself.assert_annotations_equal([0, 0, 0, 0], output[2, 2])", "max_cardinality = 4\nargs = {'device': 'cpu'}\nfor trainable in (False, True):\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n optimizer = torch.optim.SGD(annotation_model.embeddings.parameters(), lr=0.01)\n weights = torch.nn.Parameter(torch.zeros(max_cardinality))\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n nn.init.uniform_(weights)\n prediction = torch.dot(weights, output[0, 0])\n loss = (prediction - torch.tensor(1)) ** 2\n loss.backward()\n optimizer.step()\n new_output = annotation_model((annotation_tensor, alignment_tensor))\n new_prediction = torch.dot(weights, new_output[0, 0])\n new_loss = (new_prediction - torch.tensor(1)) ** 2\n if not trainable:\n self.assert_annotations_equal([0, 1, 0, 0], annotation_model.embeddings.weight.data[1, :])\n self.assertAlmostEqual(new_loss, loss, places=7)\n else:\n self.assertLess(new_loss, loss)"], "bodies_text": "<|body_start_0|>\n max_cardinality = 4\n trainable = False\n args = {'device': 'cpu'}\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n batch_size = 3\n seq_len = 5\n self.assertEqual(output.shape, (batch_size, seq_len, max_cardinality))\n self.assert_annotations_equal([0, 1, 0, 0], output[0, 0])\n self.assert_annotations_equal([0, 0, 0, 1], output[1, 4])\n<|end_body_0|>\n\n<|body_start_1|>\n max_cardinality = 4\n trainable = False\n args = {'device': 'cpu'}\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n self.assert_annotations_equal([0, 0, 0, 0], output[0, 4])\n self.assert_annotations_equal([0, 0, 0, 0], output[2, 2])\n<|end_body_1|>\n\n<|body_start_2|>\n max_cardinality = 4\n args = {'device': 'cpu'}\n for trainable in (False, True):\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n optimizer = torch.optim.SGD(annotation_model.embeddings.parameters(), lr=0.01)\n weights = torch.nn.Parameter(torch.zeros(max_cardinality))\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n nn.init.uniform_(weights)\n prediction = torch.dot(weights, output[0, 0])\n loss = (prediction - torch.tensor(1)) ** 2\n loss.backward()\n optimizer.step()\n new_output = annotation_model((annotation_tensor, alignment_tensor))\n new_prediction = torch.dot(weights, new_output[0, 0])\n new_loss = (new_prediction - torch.tensor(1)) ** 2\n if not trainable:\n self.assert_annotations_equal([0, 1, 0, 0], annotation_model.embeddings.weight.data[1, :])\n self.assertAlmostEqual(new_loss, loss, places=7)\n else:\n self.assertLess(new_loss, loss)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "AnnotationModelTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AnnotationModelTest:\n\n def test_forward_wellformedness(self):\n \"\"\"Tests whether the outout of the forward function of AnnotationModel is the one-hot representation of the indices passed to it\"\"\"\n <|body_0|>\n\n def test_pad_token_zeroed(self):\n \"\"\"Tests whether the pad token (0) is given the zero vector, unlike all other token indices\"\"\"\n <|body_1|>\n\n def test_trainable_is_respected(self):\n \"\"\"Tests whether the trainable flag, which is True if a models should be fine-tuned during training, and False otherwise, is respected. That is, the embedding weights are trained (or not trained) depending on the boolean value of trainable\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n max_cardinality = 4\n trainable = False\n args = {'device': 'cpu'}\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n batch_size = 3\n seq_len = 5\n self.assertEqual(output.shape, (batch_size, seq_len, max_cardinality))\n self.assert_annotations_equal([0, 1, 0, 0], output[0, 0])\n self.assert_annotations_equal([0, 0, 0, 1], output[1, 4])\n<|end_body_0|>\n\n<|body_start_1|>\n max_cardinality = 4\n trainable = False\n args = {'device': 'cpu'}\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n self.assert_annotations_equal([0, 0, 0, 0], output[0, 4])\n self.assert_annotations_equal([0, 0, 0, 0], output[2, 2])\n<|end_body_1|>\n\n<|body_start_2|>\n max_cardinality = 4\n args = {'device': 'cpu'}\n for trainable in (False, True):\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n optimizer = torch.optim.SGD(annotation_model.embeddings.parameters(), lr=0.01)\n weights = torch.nn.Parameter(torch.zeros(max_cardinality))\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n nn.init.uniform_(weights)\n prediction = torch.dot(weights, output[0, 0])\n loss = (prediction - torch.tensor(1)) ** 2\n loss.backward()\n optimizer.step()\n new_output = annotation_model((annotation_tensor, alignment_tensor))\n new_prediction = torch.dot(weights, new_output[0, 0])\n new_loss = (new_prediction - torch.tensor(1)) ** 2\n if not trainable:\n self.assert_annotations_equal([0, 1, 0, 0], annotation_model.embeddings.weight.data[1, :])\n self.assertAlmostEqual(new_loss, loss, places=7)\n else:\n self.assertLess(new_loss, loss)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000074", "length_bytes": 42833, "license_type": "permissive", "methods": [{"docstring": "Tests whether the outout of the forward function of AnnotationModel is the one-hot representation of the indices passed to it", "name": "test_forward_wellformedness", "signature": "def test_forward_wellformedness(self)"}, {"docstring": "Tests whether the pad token (0) is given the zero vector, unlike all other token indices", "name": "test_pad_token_zeroed", "signature": "def test_pad_token_zeroed(self)"}, {"docstring": "Tests whether the trainable flag, which is True if a models should be fine-tuned during training, and False otherwise, is respected. That is, the embedding weights are trained (or not trained) depending on the boolean value of trainable", "name": "test_trainable_is_respected", "signature": "def test_trainable_is_respected(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000723", "prompt": "Implement the Python class `AnnotationModelTest` described below.\n\nClass description:\nImplement the AnnotationModelTest class.\n\nMethod signatures and docstrings:\n- def test_forward_wellformedness(self): Tests whether the outout of the forward function of AnnotationModel is the one-hot representation of the indices passed to it\n- def test_pad_token_zeroed(self): Tests whether the pad token (0) is given the zero vector, unlike all other token indices\n- def test_trainable_is_respected(self): Tests whether the trainable flag, which is True if a models should be fine-tuned during training, and False otherwise, is respected. That is, the embedding weights are trained (or not trained) depending on the boolean value of trainable", "prompted_full_text": "Implement the Python class `AnnotationModelTest` described below.\n\nClass description:\nImplement the AnnotationModelTest class.\n\nMethod signatures and docstrings:\n- def test_forward_wellformedness(self): Tests whether the outout of the forward function of AnnotationModel is the one-hot representation of the indices passed to it\n- def test_pad_token_zeroed(self): Tests whether the pad token (0) is given the zero vector, unlike all other token indices\n- def test_trainable_is_respected(self): Tests whether the trainable flag, which is True if a models should be fine-tuned during training, and False otherwise, is respected. That is, the embedding weights are trained (or not trained) depending on the boolean value of trainable\n\n<|skeleton|>\nclass AnnotationModelTest:\n\n def test_forward_wellformedness(self):\n \"\"\"Tests whether the outout of the forward function of AnnotationModel is the one-hot representation of the indices passed to it\"\"\"\n <|body_0|>\n\n def test_pad_token_zeroed(self):\n \"\"\"Tests whether the pad token (0) is given the zero vector, unlike all other token indices\"\"\"\n <|body_1|>\n\n def test_trainable_is_respected(self):\n \"\"\"Tests whether the trainable flag, which is True if a models should be fine-tuned during training, and False otherwise, is respected. That is, the embedding weights are trained (or not trained) depending on the boolean value of trainable\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n max_cardinality = 4\n trainable = False\n args = {'device': 'cpu'}\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n batch_size = 3\n seq_len = 5\n self.assertEqual(output.shape, (batch_size, seq_len, max_cardinality))\n self.assert_annotations_equal([0, 1, 0, 0], output[0, 0])\n self.assert_annotations_equal([0, 0, 0, 1], output[1, 4])\n<|end_body_0|>\n\n<|body_start_1|>\n max_cardinality = 4\n trainable = False\n args = {'device': 'cpu'}\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n self.assert_annotations_equal([0, 0, 0, 0], output[0, 4])\n self.assert_annotations_equal([0, 0, 0, 0], output[2, 2])\n<|end_body_1|>\n\n<|body_start_2|>\n max_cardinality = 4\n args = {'device': 'cpu'}\n for trainable in (False, True):\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n optimizer = torch.optim.SGD(annotation_model.embeddings.parameters(), lr=0.01)\n weights = torch.nn.Parameter(torch.zeros(max_cardinality))\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n nn.init.uniform_(weights)\n prediction = torch.dot(weights, output[0, 0])\n loss = (prediction - torch.tensor(1)) ** 2\n loss.backward()\n optimizer.step()\n new_output = annotation_model((annotation_tensor, alignment_tensor))\n new_prediction = torch.dot(weights, new_output[0, 0])\n new_loss = (new_prediction - torch.tensor(1)) ** 2\n if not trainable:\n self.assert_annotations_equal([0, 1, 0, 0], annotation_model.embeddings.weight.data[1, :])\n self.assertAlmostEqual(new_loss, loss, places=7)\n else:\n self.assertLess(new_loss, loss)\n<|end_body_2|>\n", "revision_id": "bebc90aa0c910395e2370910409076a945279fe0", "skeleton": "<|skeleton|>\nclass AnnotationModelTest:\n\n def test_forward_wellformedness(self):\n \"\"\"Tests whether the outout of the forward function of AnnotationModel is the one-hot representation of the indices passed to it\"\"\"\n <|body_0|>\n\n def test_pad_token_zeroed(self):\n \"\"\"Tests whether the pad token (0) is given the zero vector, unlike all other token indices\"\"\"\n <|body_1|>\n\n def test_trainable_is_respected(self):\n \"\"\"Tests whether the trainable flag, which is True if a models should be fine-tuned during training, and False otherwise, is respected. That is, the embedding weights are trained (or not trained) depending on the boolean value of trainable\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AnnotationModelTest:\n def test_forward_wellformedness(self):\n \"\"\"Tests whether the outout of the forward function of AnnotationModel is the one-hot representation of the indices passed to it\"\"\"\n max_cardinality = 4\n trainable = False\n args = {'device': 'cpu'}\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n batch_size = 3\n seq_len = 5\n self.assertEqual(output.shape, (batch_size, seq_len, max_cardinality))\n self.assert_annotations_equal([0, 1, 0, 0], output[0, 0])\n self.assert_annotations_equal([0, 0, 0, 1], output[1, 4])\n\n def test_pad_token_zeroed(self):\n \"\"\"Tests whether the pad token (0) is given the zero vector, unlike all other token indices\"\"\"\n max_cardinality = 4\n trainable = False\n args = {'device': 'cpu'}\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n self.assert_annotations_equal([0, 0, 0, 0], output[0, 4])\n self.assert_annotations_equal([0, 0, 0, 0], output[2, 2])\n\n def test_trainable_is_respected(self):\n \"\"\"Tests whether the trainable flag, which is True if a models should be fine-tuned during training, and False otherwise, is respected. That is, the embedding weights are trained (or not trained) depending on the boolean value of trainable\"\"\"\n max_cardinality = 4\n args = {'device': 'cpu'}\n for trainable in (False, True):\n annotation_model = model.AnnotationModel(args, max_cardinality, trainable)\n optimizer = torch.optim.SGD(annotation_model.embeddings.parameters(), lr=0.01)\n weights = torch.nn.Parameter(torch.zeros(max_cardinality))\n annotation_tensor = torch.tensor([[1, 2, 1, 3, 0], [3, 3, 3, 3, 3], [2, 1, 0, 0, 0]])\n alignment_tensor = None\n output = annotation_model((annotation_tensor, alignment_tensor))\n nn.init.uniform_(weights)\n prediction = torch.dot(weights, output[0, 0])\n loss = (prediction - torch.tensor(1)) ** 2\n loss.backward()\n optimizer.step()\n new_output = annotation_model((annotation_tensor, alignment_tensor))\n new_prediction = torch.dot(weights, new_output[0, 0])\n new_loss = (new_prediction - torch.tensor(1)) ** 2\n if not trainable:\n self.assert_annotations_equal([0, 1, 0, 0], annotation_model.embeddings.weight.data[1, :])\n self.assertAlmostEqual(new_loss, loss, places=7)\n else:\n self.assertLess(new_loss, loss)\n", "source": "the_stack_v2_python_sparse", "source_path": "vinfo/testing.py", "source_repo": "inimah/conditional-probing", "split": "val", "star_events_count": 0}
{"blob_id": "a13a2ecb0cd7c0bebe278951c7f0406fad3582aa", "bodies": ["super(subMemberObjectNode, self).__init__(subMember, tree, container)\nself._treeItemId = tree.AppendItem(parentTreeItemId, '')\nif tree.isAutoVisible():\n tree.EnsureVisible(self._treeItemId)", "nodeData = self._tree.GetItemPyData(self._treeItemId)\nif nodeData is None:\n raise AttributeError('No data attached to tree node!')\nreturn nodeData.getObject()", "subObject = self._member.getObject(object)\ntry:\n currentObject = self.getObject()\n if PyUtils.sameObject(currentObject, subObject):\n return\nexcept AttributeError:\n pass\nnodeData = NodeData(self._member.name, subObject, self._tree, self._treeItemId, self._container)"], "bodies_text": "<|body_start_0|>\n super(subMemberObjectNode, self).__init__(subMember, tree, container)\n self._treeItemId = tree.AppendItem(parentTreeItemId, '')\n if tree.isAutoVisible():\n tree.EnsureVisible(self._treeItemId)\n<|end_body_0|>\n\n<|body_start_1|>\n nodeData = self._tree.GetItemPyData(self._treeItemId)\n if nodeData is None:\n raise AttributeError('No data attached to tree node!')\n return nodeData.getObject()\n<|end_body_1|>\n\n<|body_start_2|>\n subObject = self._member.getObject(object)\n try:\n currentObject = self.getObject()\n if PyUtils.sameObject(currentObject, subObject):\n return\n except AttributeError:\n pass\n nodeData = NodeData(self._member.name, subObject, self._tree, self._treeItemId, self._container)\n<|end_body_2|>\n", "class_docstring": "Private class. Information for a submember node that wraps a single object.", "class_name": "subMemberObjectNode", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass subMemberObjectNode:\n \"\"\"Private class. Information for a submember node that wraps a single object.\"\"\"\n\n def __init__(self, subMember, tree, container, parentTreeItemId):\n \"\"\"Create a new node inside the tree that wraps the specified submember\"\"\"\n <|body_0|>\n\n def getObject(self):\n \"\"\"Returns the object associated with this node, or None if no object is associated.\"\"\"\n <|body_1|>\n\n def update(self, object):\n \"\"\"Updates the node so that it contains the specified object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(subMemberObjectNode, self).__init__(subMember, tree, container)\n self._treeItemId = tree.AppendItem(parentTreeItemId, '')\n if tree.isAutoVisible():\n tree.EnsureVisible(self._treeItemId)\n<|end_body_0|>\n\n<|body_start_1|>\n nodeData = self._tree.GetItemPyData(self._treeItemId)\n if nodeData is None:\n raise AttributeError('No data attached to tree node!')\n return nodeData.getObject()\n<|end_body_1|>\n\n<|body_start_2|>\n subObject = self._member.getObject(object)\n try:\n currentObject = self.getObject()\n if PyUtils.sameObject(currentObject, subObject):\n return\n except AttributeError:\n pass\n nodeData = NodeData(self._member.name, subObject, self._tree, self._treeItemId, self._container)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000075", "length_bytes": 11787, "license_type": "no_license", "methods": [{"docstring": "Create a new node inside the tree that wraps the specified submember", "name": "__init__", "signature": "def __init__(self, subMember, tree, container, parentTreeItemId)"}, {"docstring": "Returns the object associated with this node, or None if no object is associated.", "name": "getObject", "signature": "def getObject(self)"}, {"docstring": "Updates the node so that it contains the specified object.", "name": "update", "signature": "def update(self, object)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_019476", "prompt": "Implement the Python class `subMemberObjectNode` described below.\n\nClass description:\nPrivate class. Information for a submember node that wraps a single object.\n\nMethod signatures and docstrings:\n- def __init__(self, subMember, tree, container, parentTreeItemId): Create a new node inside the tree that wraps the specified submember\n- def getObject(self): Returns the object associated with this node, or None if no object is associated.\n- def update(self, object): Updates the node so that it contains the specified object.", "prompted_full_text": "Implement the Python class `subMemberObjectNode` described below.\n\nClass description:\nPrivate class. Information for a submember node that wraps a single object.\n\nMethod signatures and docstrings:\n- def __init__(self, subMember, tree, container, parentTreeItemId): Create a new node inside the tree that wraps the specified submember\n- def getObject(self): Returns the object associated with this node, or None if no object is associated.\n- def update(self, object): Updates the node so that it contains the specified object.\n\n<|skeleton|>\nclass subMemberObjectNode:\n \"\"\"Private class. Information for a submember node that wraps a single object.\"\"\"\n\n def __init__(self, subMember, tree, container, parentTreeItemId):\n \"\"\"Create a new node inside the tree that wraps the specified submember\"\"\"\n <|body_0|>\n\n def getObject(self):\n \"\"\"Returns the object associated with this node, or None if no object is associated.\"\"\"\n <|body_1|>\n\n def update(self, object):\n \"\"\"Updates the node so that it contains the specified object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(subMemberObjectNode, self).__init__(subMember, tree, container)\n self._treeItemId = tree.AppendItem(parentTreeItemId, '')\n if tree.isAutoVisible():\n tree.EnsureVisible(self._treeItemId)\n<|end_body_0|>\n\n<|body_start_1|>\n nodeData = self._tree.GetItemPyData(self._treeItemId)\n if nodeData is None:\n raise AttributeError('No data attached to tree node!')\n return nodeData.getObject()\n<|end_body_1|>\n\n<|body_start_2|>\n subObject = self._member.getObject(object)\n try:\n currentObject = self.getObject()\n if PyUtils.sameObject(currentObject, subObject):\n return\n except AttributeError:\n pass\n nodeData = NodeData(self._member.name, subObject, self._tree, self._treeItemId, self._container)\n<|end_body_2|>\n", "revision_id": "f5ecde937663091fd324c9d22fd72542d4eb1e16", "skeleton": "<|skeleton|>\nclass subMemberObjectNode:\n \"\"\"Private class. Information for a submember node that wraps a single object.\"\"\"\n\n def __init__(self, subMember, tree, container, parentTreeItemId):\n \"\"\"Create a new node inside the tree that wraps the specified submember\"\"\"\n <|body_0|>\n\n def getObject(self):\n \"\"\"Returns the object associated with this node, or None if no object is associated.\"\"\"\n <|body_1|>\n\n def update(self, object):\n \"\"\"Updates the node so that it contains the specified object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class subMemberObjectNode:\n \"\"\"Private class. Information for a submember node that wraps a single object.\"\"\"\n\n def __init__(self, subMember, tree, container, parentTreeItemId):\n \"\"\"Create a new node inside the tree that wraps the specified submember\"\"\"\n super(subMemberObjectNode, self).__init__(subMember, tree, container)\n self._treeItemId = tree.AppendItem(parentTreeItemId, '')\n if tree.isAutoVisible():\n tree.EnsureVisible(self._treeItemId)\n\n def getObject(self):\n \"\"\"Returns the object associated with this node, or None if no object is associated.\"\"\"\n nodeData = self._tree.GetItemPyData(self._treeItemId)\n if nodeData is None:\n raise AttributeError('No data attached to tree node!')\n return nodeData.getObject()\n\n def update(self, object):\n \"\"\"Updates the node so that it contains the specified object.\"\"\"\n subObject = self._member.getObject(object)\n try:\n currentObject = self.getObject()\n if PyUtils.sameObject(currentObject, subObject):\n return\n except AttributeError:\n pass\n nodeData = NodeData(self._member.name, subObject, self._tree, self._treeItemId, self._container)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/App/Proxys/NodeData.py", "source_repo": "kujira70/simbicon", "split": "val", "star_events_count": 3}
{"blob_id": "a48883f09211e47ee5e4d5d20ef29982c22cab34", "bodies": ["def helper(node: TreeNode, lower=float('-inf'), upper=float('inf')) -> bool:\n if node is None:\n return True\n val = node.val\n if val <= lower or val >= upper:\n return False\n if not helper(node.left, lower, val):\n return False\n if not helper(node.right, val, upper):\n return False\n return True\nreturn helper(root)", "stack, inorder = ([], float('-inf'))\nwhile len(stack) > 0 or root is not None:\n while root is not None:\n stack.append(root)\n root = root.left\n root = stack.pop()\n if root.val <= inorder:\n return False\n inorder = root.val\n root = root.right\nreturn True"], "bodies_text": "<|body_start_0|>\n def helper(node: TreeNode, lower=float('-inf'), upper=float('inf')) -> bool:\n if node is None:\n return True\n val = node.val\n if val <= lower or val >= upper:\n return False\n if not helper(node.left, lower, val):\n return False\n if not helper(node.right, val, upper):\n return False\n return True\n return helper(root)\n<|end_body_0|>\n\n<|body_start_1|>\n stack, inorder = ([], float('-inf'))\n while len(stack) > 0 or root is not None:\n while root is not None:\n stack.append(root)\n root = root.left\n root = stack.pop()\n if root.val <= inorder:\n return False\n inorder = root.val\n root = root.right\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "OfficialSolution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OfficialSolution:\n\n def is_valid_BST(self, root: TreeNode) -> bool:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def is_valid_BST(self, root: TreeNode) -> bool:\n \"\"\"中序遍历\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def helper(node: TreeNode, lower=float('-inf'), upper=float('inf')) -> bool:\n if node is None:\n return True\n val = node.val\n if val <= lower or val >= upper:\n return False\n if not helper(node.left, lower, val):\n return False\n if not helper(node.right, val, upper):\n return False\n return True\n return helper(root)\n<|end_body_0|>\n\n<|body_start_1|>\n stack, inorder = ([], float('-inf'))\n while len(stack) > 0 or root is not None:\n while root is not None:\n stack.append(root)\n root = root.left\n root = stack.pop()\n if root.val <= inorder:\n return False\n inorder = root.val\n root = root.right\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000076", "length_bytes": 4006, "license_type": "no_license", "methods": [{"docstring": "递归", "name": "is_valid_BST", "signature": "def is_valid_BST(self, root: TreeNode) -> bool"}, {"docstring": "中序遍历", "name": "is_valid_BST", "signature": "def is_valid_BST(self, root: TreeNode) -> bool"}], "n_methods": 2, "prompt": "Implement the Python class `OfficialSolution` described below.\n\nClass description:\nImplement the OfficialSolution class.\n\nMethod signatures and docstrings:\n- def is_valid_BST(self, root: TreeNode) -> bool: 递归\n- def is_valid_BST(self, root: TreeNode) -> bool: 中序遍历", "prompted_full_text": "Implement the Python class `OfficialSolution` described below.\n\nClass description:\nImplement the OfficialSolution class.\n\nMethod signatures and docstrings:\n- def is_valid_BST(self, root: TreeNode) -> bool: 递归\n- def is_valid_BST(self, root: TreeNode) -> bool: 中序遍历\n\n<|skeleton|>\nclass OfficialSolution:\n\n def is_valid_BST(self, root: TreeNode) -> bool:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def is_valid_BST(self, root: TreeNode) -> bool:\n \"\"\"中序遍历\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def helper(node: TreeNode, lower=float('-inf'), upper=float('inf')) -> bool:\n if node is None:\n return True\n val = node.val\n if val <= lower or val >= upper:\n return False\n if not helper(node.left, lower, val):\n return False\n if not helper(node.right, val, upper):\n return False\n return True\n return helper(root)\n<|end_body_0|>\n\n<|body_start_1|>\n stack, inorder = ([], float('-inf'))\n while len(stack) > 0 or root is not None:\n while root is not None:\n stack.append(root)\n root = root.left\n root = stack.pop()\n if root.val <= inorder:\n return False\n inorder = root.val\n root = root.right\n return True\n<|end_body_1|>\n", "revision_id": "6932d69353b94ec824dd0ddc86a92453f6673232", "skeleton": "<|skeleton|>\nclass OfficialSolution:\n\n def is_valid_BST(self, root: TreeNode) -> bool:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def is_valid_BST(self, root: TreeNode) -> bool:\n \"\"\"中序遍历\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OfficialSolution:\n def is_valid_BST(self, root: TreeNode) -> bool:\n \"\"\"递归\"\"\"\n def helper(node: TreeNode, lower=float('-inf'), upper=float('inf')) -> bool:\n if node is None:\n return True\n val = node.val\n if val <= lower or val >= upper:\n return False\n if not helper(node.left, lower, val):\n return False\n if not helper(node.right, val, upper):\n return False\n return True\n return helper(root)\n\n def is_valid_BST(self, root: TreeNode) -> bool:\n \"\"\"中序遍历\"\"\"\n stack, inorder = ([], float('-inf'))\n while len(stack) > 0 or root is not None:\n while root is not None:\n stack.append(root)\n root = root.left\n root = stack.pop()\n if root.val <= inorder:\n return False\n inorder = root.val\n root = root.right\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "0098_validate-binary-search-tree.py", "source_repo": "Nigirimeshi/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "9f2f7bdbe644fc84c68066666508221cd7e6e9bc", "bodies": ["super().__init__(**kwargs)\nself.fc1 = keras.layers.Dense(hidden_dim)\nself.fcs = [keras.layers.Dense(dim) for dim in output_dims]", "x = tf.nn.relu(self.fc1(x))\nxs = [fc(x) for fc in self.fcs]\nreturn xs"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n self.fc1 = keras.layers.Dense(hidden_dim)\n self.fcs = [keras.layers.Dense(dim) for dim in output_dims]\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.nn.relu(self.fc1(x))\n xs = [fc(x) for fc in self.fcs]\n return xs\n<|end_body_1|>\n", "class_docstring": "ADULT decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of of a fully connected layer with ReLU nonlinearity, and a multiheaded layer, one for each categorical feature and a single head for the rest of numerical features. The hidden dimension used in the paper is 128.", "class_name": "ADULTDecoder", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ADULTDecoder:\n \"\"\"ADULT decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of of a fully connected layer with ReLU nonlinearity, and a multiheaded layer, one for each categorical feature and a single head for the rest of numerical features. The hidden dimension used in the paper is 128.\"\"\"\n\n def __init__(self, hidden_dim: int, output_dims: List[int], **kwargs):\n \"\"\"Constructor. Parameters ---------- hidden_dim Hidden dimension. output_dim List of output dimensions.\"\"\"\n <|body_0|>\n\n def call(self, x: tf.Tensor, **kwargs) -> List[tf.Tensor]:\n \"\"\"Forward pass. Parameters ---------- x Input tensor. **kwargs Other arguments. Not used. Returns ------- List of reconstruction of the input tensor. First element corresponds to the reconstruction of all the numerical features if they exist, and the rest of the elements correspond to each categorical feature.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.fc1 = keras.layers.Dense(hidden_dim)\n self.fcs = [keras.layers.Dense(dim) for dim in output_dims]\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.nn.relu(self.fc1(x))\n xs = [fc(x) for fc in self.fcs]\n return xs\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000077", "length_bytes": 8692, "license_type": "permissive", "methods": [{"docstring": "Constructor. Parameters ---------- hidden_dim Hidden dimension. output_dim List of output dimensions.", "name": "__init__", "signature": "def __init__(self, hidden_dim: int, output_dims: List[int], **kwargs)"}, {"docstring": "Forward pass. Parameters ---------- x Input tensor. **kwargs Other arguments. Not used. Returns ------- List of reconstruction of the input tensor. First element corresponds to the reconstruction of all the numerical features if they exist, and the rest of the elements correspond to each categorical feature.", "name": "call", "signature": "def call(self, x: tf.Tensor, **kwargs) -> List[tf.Tensor]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039132", "prompt": "Implement the Python class `ADULTDecoder` described below.\n\nClass description:\nADULT decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of of a fully connected layer with ReLU nonlinearity, and a multiheaded layer, one for each categorical feature and a single head for the rest of numerical features. The hidden dimension used in the paper is 128.\n\nMethod signatures and docstrings:\n- def __init__(self, hidden_dim: int, output_dims: List[int], **kwargs): Constructor. Parameters ---------- hidden_dim Hidden dimension. output_dim List of output dimensions.\n- def call(self, x: tf.Tensor, **kwargs) -> List[tf.Tensor]: Forward pass. Parameters ---------- x Input tensor. **kwargs Other arguments. Not used. Returns ------- List of reconstruction of the input tensor. First element corresponds to the reconstruction of all the numerical features if they exist, and the rest of the elements correspond to each categorical feature.", "prompted_full_text": "Implement the Python class `ADULTDecoder` described below.\n\nClass description:\nADULT decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of of a fully connected layer with ReLU nonlinearity, and a multiheaded layer, one for each categorical feature and a single head for the rest of numerical features. The hidden dimension used in the paper is 128.\n\nMethod signatures and docstrings:\n- def __init__(self, hidden_dim: int, output_dims: List[int], **kwargs): Constructor. Parameters ---------- hidden_dim Hidden dimension. output_dim List of output dimensions.\n- def call(self, x: tf.Tensor, **kwargs) -> List[tf.Tensor]: Forward pass. Parameters ---------- x Input tensor. **kwargs Other arguments. Not used. Returns ------- List of reconstruction of the input tensor. First element corresponds to the reconstruction of all the numerical features if they exist, and the rest of the elements correspond to each categorical feature.\n\n<|skeleton|>\nclass ADULTDecoder:\n \"\"\"ADULT decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of of a fully connected layer with ReLU nonlinearity, and a multiheaded layer, one for each categorical feature and a single head for the rest of numerical features. The hidden dimension used in the paper is 128.\"\"\"\n\n def __init__(self, hidden_dim: int, output_dims: List[int], **kwargs):\n \"\"\"Constructor. Parameters ---------- hidden_dim Hidden dimension. output_dim List of output dimensions.\"\"\"\n <|body_0|>\n\n def call(self, x: tf.Tensor, **kwargs) -> List[tf.Tensor]:\n \"\"\"Forward pass. Parameters ---------- x Input tensor. **kwargs Other arguments. Not used. Returns ------- List of reconstruction of the input tensor. First element corresponds to the reconstruction of all the numerical features if they exist, and the rest of the elements correspond to each categorical feature.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.fc1 = keras.layers.Dense(hidden_dim)\n self.fcs = [keras.layers.Dense(dim) for dim in output_dims]\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.nn.relu(self.fc1(x))\n xs = [fc(x) for fc in self.fcs]\n return xs\n<|end_body_1|>\n", "revision_id": "54d0c957fb01c7ebba4e2a0d28fcbde52d9c6718", "skeleton": "<|skeleton|>\nclass ADULTDecoder:\n \"\"\"ADULT decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of of a fully connected layer with ReLU nonlinearity, and a multiheaded layer, one for each categorical feature and a single head for the rest of numerical features. The hidden dimension used in the paper is 128.\"\"\"\n\n def __init__(self, hidden_dim: int, output_dims: List[int], **kwargs):\n \"\"\"Constructor. Parameters ---------- hidden_dim Hidden dimension. output_dim List of output dimensions.\"\"\"\n <|body_0|>\n\n def call(self, x: tf.Tensor, **kwargs) -> List[tf.Tensor]:\n \"\"\"Forward pass. Parameters ---------- x Input tensor. **kwargs Other arguments. Not used. Returns ------- List of reconstruction of the input tensor. First element corresponds to the reconstruction of all the numerical features if they exist, and the rest of the elements correspond to each categorical feature.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ADULTDecoder:\n \"\"\"ADULT decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of of a fully connected layer with ReLU nonlinearity, and a multiheaded layer, one for each categorical feature and a single head for the rest of numerical features. The hidden dimension used in the paper is 128.\"\"\"\n\n def __init__(self, hidden_dim: int, output_dims: List[int], **kwargs):\n \"\"\"Constructor. Parameters ---------- hidden_dim Hidden dimension. output_dim List of output dimensions.\"\"\"\n super().__init__(**kwargs)\n self.fc1 = keras.layers.Dense(hidden_dim)\n self.fcs = [keras.layers.Dense(dim) for dim in output_dims]\n\n def call(self, x: tf.Tensor, **kwargs) -> List[tf.Tensor]:\n \"\"\"Forward pass. Parameters ---------- x Input tensor. **kwargs Other arguments. Not used. Returns ------- List of reconstruction of the input tensor. First element corresponds to the reconstruction of all the numerical features if they exist, and the rest of the elements correspond to each categorical feature.\"\"\"\n x = tf.nn.relu(self.fc1(x))\n xs = [fc(x) for fc in self.fcs]\n return xs\n", "source": "the_stack_v2_python_sparse", "source_path": "alibi/models/tensorflow/cfrl_models.py", "source_repo": "SeldonIO/alibi", "split": "val", "star_events_count": 2143}
{"blob_id": "46ef8270f95a390447d1b040233d4689fcfcb1dc", "bodies": ["try:\n session = session or _Session()\n return session.query(cls).filter(cls.key == key).one()\nexcept NoResultFound as error:\n raise Client.NotFound(f'No client with key={key}') from error", "try:\n session = session or _Session()\n return session.query(cls).filter(cls.user_id == user_id).one()\nexcept NoResultFound as error:\n raise Client.NotFound(f'No client with user_id={user_id}') from error", "session = _Session()\nuser = User.from_id(user_id, session)\nkey, secret = (Key.generate(), Secret.generate())\nclient = Client(user_id=user.id, level=level, key=key.value, secret=secret.hashed().value, valid=True)\nsession.add(client)\nsession.commit()\nlog.info(f'Added client for user ({user.id})')\nreturn (key, secret, client)", "session = _Session()\nclient = Client.from_user(user_id, session)\nsecret = Secret.generate()\nclient.secret = secret.hashed().value\nsession.commit()\nlog.info(f'Updated client secret for user ({client.user_id})')\nreturn (Key(client.key), secret)", "session = _Session()\nclient = Client.from_user(user_id, session)\nkey, secret = (Key.generate(), Secret.generate())\nclient.key = key.value\nclient.secret = secret.hashed().value\nsession.commit()\nlog.info(f'Updated client key and secret for user ({client.user_id})')\nreturn (key, secret)"], "bodies_text": "<|body_start_0|>\n try:\n session = session or _Session()\n return session.query(cls).filter(cls.key == key).one()\n except NoResultFound as error:\n raise Client.NotFound(f'No client with key={key}') from error\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n session = session or _Session()\n return session.query(cls).filter(cls.user_id == user_id).one()\n except NoResultFound as error:\n raise Client.NotFound(f'No client with user_id={user_id}') from error\n<|end_body_1|>\n\n<|body_start_2|>\n session = _Session()\n user = User.from_id(user_id, session)\n key, secret = (Key.generate(), Secret.generate())\n client = Client(user_id=user.id, level=level, key=key.value, secret=secret.hashed().value, valid=True)\n session.add(client)\n session.commit()\n log.info(f'Added client for user ({user.id})')\n return (key, secret, client)\n<|end_body_2|>\n\n<|body_start_3|>\n session = _Session()\n client = Client.from_user(user_id, session)\n secret = Secret.generate()\n client.secret = secret.hashed().value\n session.commit()\n log.info(f'Updated client secret for user ({client.user_id})')\n return (Key(client.key), secret)\n<|end_body_3|>\n\n<|body_start_4|>\n session = _Session()\n client = Client.from_user(user_id, session)\n key, secret = (Key.generate(), Secret.generate())\n client.key = key.value\n client.secret = secret.hashed().value\n session.commit()\n log.info(f'Updated client key and secret for user ({client.user_id})')\n return (key, secret)\n<|end_body_4|>\n", "class_docstring": "Client stores user authorization and authentication.", "class_name": "Client", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Client:\n \"\"\"Client stores user authorization and authentication.\"\"\"\n\n def from_key(cls, key: str, session: _Session=None) -> Client:\n \"\"\"Query by unique `key`.\"\"\"\n <|body_0|>\n\n def from_user(cls, user_id: int, session: _Session=None) -> Client:\n \"\"\"Query by unique `user_id`.\"\"\"\n <|body_1|>\n\n def new(cls, user_id: int, level: int=DEFAULT_CLIENT_LEVEL) -> Tuple[Key, Secret, Client]:\n \"\"\"Create client credentials for `user` with `level`. Args: user_id (int or `User`): An existing user. level (int): Authorization level (default: `DEFAULT_CLIENT_LEVEL`).\"\"\"\n <|body_2|>\n\n def new_secret(cls, user_id: int) -> Tuple[Key, Secret]:\n \"\"\"Generate a new secret (store the hashed value).\"\"\"\n <|body_3|>\n\n def new_key(cls, user_id: int) -> Tuple[Key, Secret]:\n \"\"\"Generate a new key and secret (store the hashed value).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n session = session or _Session()\n return session.query(cls).filter(cls.key == key).one()\n except NoResultFound as error:\n raise Client.NotFound(f'No client with key={key}') from error\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n session = session or _Session()\n return session.query(cls).filter(cls.user_id == user_id).one()\n except NoResultFound as error:\n raise Client.NotFound(f'No client with user_id={user_id}') from error\n<|end_body_1|>\n\n<|body_start_2|>\n session = _Session()\n user = User.from_id(user_id, session)\n key, secret = (Key.generate(), Secret.generate())\n client = Client(user_id=user.id, level=level, key=key.value, secret=secret.hashed().value, valid=True)\n session.add(client)\n session.commit()\n log.info(f'Added client for user ({user.id})')\n return (key, secret, client)\n<|end_body_2|>\n\n<|body_start_3|>\n session = _Session()\n client = Client.from_user(user_id, session)\n secret = Secret.generate()\n client.secret = secret.hashed().value\n session.commit()\n log.info(f'Updated client secret for user ({client.user_id})')\n return (Key(client.key), secret)\n<|end_body_3|>\n\n<|body_start_4|>\n session = _Session()\n client = Client.from_user(user_id, session)\n key, secret = (Key.generate(), Secret.generate())\n client.key = key.value\n client.secret = secret.hashed().value\n session.commit()\n log.info(f'Updated client key and secret for user ({client.user_id})')\n return (key, secret)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000078", "length_bytes": 49365, "license_type": "permissive", "methods": [{"docstring": "Query by unique `key`.", "name": "from_key", "signature": "def from_key(cls, key: str, session: _Session=None) -> Client"}, {"docstring": "Query by unique `user_id`.", "name": "from_user", "signature": "def from_user(cls, user_id: int, session: _Session=None) -> Client"}, {"docstring": "Create client credentials for `user` with `level`. Args: user_id (int or `User`): An existing user. level (int): Authorization level (default: `DEFAULT_CLIENT_LEVEL`).", "name": "new", "signature": "def new(cls, user_id: int, level: int=DEFAULT_CLIENT_LEVEL) -> Tuple[Key, Secret, Client]"}, {"docstring": "Generate a new secret (store the hashed value).", "name": "new_secret", "signature": "def new_secret(cls, user_id: int) -> Tuple[Key, Secret]"}, {"docstring": "Generate a new key and secret (store the hashed value).", "name": "new_key", "signature": "def new_key(cls, user_id: int) -> Tuple[Key, Secret]"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_037072", "prompt": "Implement the Python class `Client` described below.\n\nClass description:\nClient stores user authorization and authentication.\n\nMethod signatures and docstrings:\n- def from_key(cls, key: str, session: _Session=None) -> Client: Query by unique `key`.\n- def from_user(cls, user_id: int, session: _Session=None) -> Client: Query by unique `user_id`.\n- def new(cls, user_id: int, level: int=DEFAULT_CLIENT_LEVEL) -> Tuple[Key, Secret, Client]: Create client credentials for `user` with `level`. Args: user_id (int or `User`): An existing user. level (int): Authorization level (default: `DEFAULT_CLIENT_LEVEL`).\n- def new_secret(cls, user_id: int) -> Tuple[Key, Secret]: Generate a new secret (store the hashed value).\n- def new_key(cls, user_id: int) -> Tuple[Key, Secret]: Generate a new key and secret (store the hashed value).", "prompted_full_text": "Implement the Python class `Client` described below.\n\nClass description:\nClient stores user authorization and authentication.\n\nMethod signatures and docstrings:\n- def from_key(cls, key: str, session: _Session=None) -> Client: Query by unique `key`.\n- def from_user(cls, user_id: int, session: _Session=None) -> Client: Query by unique `user_id`.\n- def new(cls, user_id: int, level: int=DEFAULT_CLIENT_LEVEL) -> Tuple[Key, Secret, Client]: Create client credentials for `user` with `level`. Args: user_id (int or `User`): An existing user. level (int): Authorization level (default: `DEFAULT_CLIENT_LEVEL`).\n- def new_secret(cls, user_id: int) -> Tuple[Key, Secret]: Generate a new secret (store the hashed value).\n- def new_key(cls, user_id: int) -> Tuple[Key, Secret]: Generate a new key and secret (store the hashed value).\n\n<|skeleton|>\nclass Client:\n \"\"\"Client stores user authorization and authentication.\"\"\"\n\n def from_key(cls, key: str, session: _Session=None) -> Client:\n \"\"\"Query by unique `key`.\"\"\"\n <|body_0|>\n\n def from_user(cls, user_id: int, session: _Session=None) -> Client:\n \"\"\"Query by unique `user_id`.\"\"\"\n <|body_1|>\n\n def new(cls, user_id: int, level: int=DEFAULT_CLIENT_LEVEL) -> Tuple[Key, Secret, Client]:\n \"\"\"Create client credentials for `user` with `level`. Args: user_id (int or `User`): An existing user. level (int): Authorization level (default: `DEFAULT_CLIENT_LEVEL`).\"\"\"\n <|body_2|>\n\n def new_secret(cls, user_id: int) -> Tuple[Key, Secret]:\n \"\"\"Generate a new secret (store the hashed value).\"\"\"\n <|body_3|>\n\n def new_key(cls, user_id: int) -> Tuple[Key, Secret]:\n \"\"\"Generate a new key and secret (store the hashed value).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n session = session or _Session()\n return session.query(cls).filter(cls.key == key).one()\n except NoResultFound as error:\n raise Client.NotFound(f'No client with key={key}') from error\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n session = session or _Session()\n return session.query(cls).filter(cls.user_id == user_id).one()\n except NoResultFound as error:\n raise Client.NotFound(f'No client with user_id={user_id}') from error\n<|end_body_1|>\n\n<|body_start_2|>\n session = _Session()\n user = User.from_id(user_id, session)\n key, secret = (Key.generate(), Secret.generate())\n client = Client(user_id=user.id, level=level, key=key.value, secret=secret.hashed().value, valid=True)\n session.add(client)\n session.commit()\n log.info(f'Added client for user ({user.id})')\n return (key, secret, client)\n<|end_body_2|>\n\n<|body_start_3|>\n session = _Session()\n client = Client.from_user(user_id, session)\n secret = Secret.generate()\n client.secret = secret.hashed().value\n session.commit()\n log.info(f'Updated client secret for user ({client.user_id})')\n return (Key(client.key), secret)\n<|end_body_3|>\n\n<|body_start_4|>\n session = _Session()\n client = Client.from_user(user_id, session)\n key, secret = (Key.generate(), Secret.generate())\n client.key = key.value\n client.secret = secret.hashed().value\n session.commit()\n log.info(f'Updated client key and secret for user ({client.user_id})')\n return (key, secret)\n<|end_body_4|>\n", "revision_id": "3f226912ec8d303a067b7c2b794afbb15af00cf9", "skeleton": "<|skeleton|>\nclass Client:\n \"\"\"Client stores user authorization and authentication.\"\"\"\n\n def from_key(cls, key: str, session: _Session=None) -> Client:\n \"\"\"Query by unique `key`.\"\"\"\n <|body_0|>\n\n def from_user(cls, user_id: int, session: _Session=None) -> Client:\n \"\"\"Query by unique `user_id`.\"\"\"\n <|body_1|>\n\n def new(cls, user_id: int, level: int=DEFAULT_CLIENT_LEVEL) -> Tuple[Key, Secret, Client]:\n \"\"\"Create client credentials for `user` with `level`. Args: user_id (int or `User`): An existing user. level (int): Authorization level (default: `DEFAULT_CLIENT_LEVEL`).\"\"\"\n <|body_2|>\n\n def new_secret(cls, user_id: int) -> Tuple[Key, Secret]:\n \"\"\"Generate a new secret (store the hashed value).\"\"\"\n <|body_3|>\n\n def new_key(cls, user_id: int) -> Tuple[Key, Secret]:\n \"\"\"Generate a new key and secret (store the hashed value).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Client:\n \"\"\"Client stores user authorization and authentication.\"\"\"\n\n def from_key(cls, key: str, session: _Session=None) -> Client:\n \"\"\"Query by unique `key`.\"\"\"\n try:\n session = session or _Session()\n return session.query(cls).filter(cls.key == key).one()\n except NoResultFound as error:\n raise Client.NotFound(f'No client with key={key}') from error\n\n def from_user(cls, user_id: int, session: _Session=None) -> Client:\n \"\"\"Query by unique `user_id`.\"\"\"\n try:\n session = session or _Session()\n return session.query(cls).filter(cls.user_id == user_id).one()\n except NoResultFound as error:\n raise Client.NotFound(f'No client with user_id={user_id}') from error\n\n def new(cls, user_id: int, level: int=DEFAULT_CLIENT_LEVEL) -> Tuple[Key, Secret, Client]:\n \"\"\"Create client credentials for `user` with `level`. Args: user_id (int or `User`): An existing user. level (int): Authorization level (default: `DEFAULT_CLIENT_LEVEL`).\"\"\"\n session = _Session()\n user = User.from_id(user_id, session)\n key, secret = (Key.generate(), Secret.generate())\n client = Client(user_id=user.id, level=level, key=key.value, secret=secret.hashed().value, valid=True)\n session.add(client)\n session.commit()\n log.info(f'Added client for user ({user.id})')\n return (key, secret, client)\n\n def new_secret(cls, user_id: int) -> Tuple[Key, Secret]:\n \"\"\"Generate a new secret (store the hashed value).\"\"\"\n session = _Session()\n client = Client.from_user(user_id, session)\n secret = Secret.generate()\n client.secret = secret.hashed().value\n session.commit()\n log.info(f'Updated client secret for user ({client.user_id})')\n return (Key(client.key), secret)\n\n def new_key(cls, user_id: int) -> Tuple[Key, Secret]:\n \"\"\"Generate a new key and secret (store the hashed value).\"\"\"\n session = _Session()\n client = Client.from_user(user_id, session)\n key, secret = (Key.generate(), Secret.generate())\n client.key = key.value\n client.secret = secret.hashed().value\n session.commit()\n log.info(f'Updated client key and secret for user ({client.user_id})')\n return (key, secret)\n", "source": "the_stack_v2_python_sparse", "source_path": "refitt/database/model.py", "source_repo": "Feliconut/refitt", "split": "val", "star_events_count": 0}
{"blob_id": "cc91dcfe018f9c70e8dc56a0976872594bc5960f", "bodies": ["need, have = ([0] * 128, [0] * 128)\nfor ch in t:\n need[ord(ch)] += 1\nl, r, start = (0, 0, 0)\nmin_len, count = (float('inf'), 0)\nwhile r < len(s):\n s_r = ord(s[r])\n if need[s_r] == 0:\n r += 1\n continue\n if have[s_r] < need[s_r]:\n count += 1\n have[s_r] += 1\n r += 1\n while count == len(t):\n if r - l < min_len:\n min_len = r - l\n start = l\n s_l = ord(s[l])\n if need[s_l] == 0:\n l += 1\n continue\n if have[s_l] == need[s_l]:\n count -= 1\n have[s_l] -= 1\n l += 1\nreturn '' if min_len == float('inf') else s[start:start + min_len]", "dict_t, counter = (Counter(t), len(t))\nl, r, head = (0, 0, 0)\nmin_len = float('inf')\nwhile r < len(s):\n if s[r] in dict_t:\n if dict_t[s[r]] > 0:\n counter -= 1\n dict_t[s[r]] -= 1\n r += 1\n while counter == 0:\n if r - l < min_len:\n min_len = r - l\n head = l\n if s[l] in dict_t:\n dict_t[s[l]] += 1\n if dict_t[s[l]] > 0:\n counter += 1\n l += 1\nreturn '' if min_len == float('inf') else s[head:head + min_len]"], "bodies_text": "<|body_start_0|>\n need, have = ([0] * 128, [0] * 128)\n for ch in t:\n need[ord(ch)] += 1\n l, r, start = (0, 0, 0)\n min_len, count = (float('inf'), 0)\n while r < len(s):\n s_r = ord(s[r])\n if need[s_r] == 0:\n r += 1\n continue\n if have[s_r] < need[s_r]:\n count += 1\n have[s_r] += 1\n r += 1\n while count == len(t):\n if r - l < min_len:\n min_len = r - l\n start = l\n s_l = ord(s[l])\n if need[s_l] == 0:\n l += 1\n continue\n if have[s_l] == need[s_l]:\n count -= 1\n have[s_l] -= 1\n l += 1\n return '' if min_len == float('inf') else s[start:start + min_len]\n<|end_body_0|>\n\n<|body_start_1|>\n dict_t, counter = (Counter(t), len(t))\n l, r, head = (0, 0, 0)\n min_len = float('inf')\n while r < len(s):\n if s[r] in dict_t:\n if dict_t[s[r]] > 0:\n counter -= 1\n dict_t[s[r]] -= 1\n r += 1\n while counter == 0:\n if r - l < min_len:\n min_len = r - l\n head = l\n if s[l] in dict_t:\n dict_t[s[l]] += 1\n if dict_t[s[l]] > 0:\n counter += 1\n l += 1\n return '' if min_len == float('inf') else s[head:head + min_len]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minWindow(self, s, t):\n \"\"\":type s: str :type t: str :rtype: str\"\"\"\n <|body_0|>\n\n def minWindow(self, s, t):\n \"\"\":type s: str :type t: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n need, have = ([0] * 128, [0] * 128)\n for ch in t:\n need[ord(ch)] += 1\n l, r, start = (0, 0, 0)\n min_len, count = (float('inf'), 0)\n while r < len(s):\n s_r = ord(s[r])\n if need[s_r] == 0:\n r += 1\n continue\n if have[s_r] < need[s_r]:\n count += 1\n have[s_r] += 1\n r += 1\n while count == len(t):\n if r - l < min_len:\n min_len = r - l\n start = l\n s_l = ord(s[l])\n if need[s_l] == 0:\n l += 1\n continue\n if have[s_l] == need[s_l]:\n count -= 1\n have[s_l] -= 1\n l += 1\n return '' if min_len == float('inf') else s[start:start + min_len]\n<|end_body_0|>\n\n<|body_start_1|>\n dict_t, counter = (Counter(t), len(t))\n l, r, head = (0, 0, 0)\n min_len = float('inf')\n while r < len(s):\n if s[r] in dict_t:\n if dict_t[s[r]] > 0:\n counter -= 1\n dict_t[s[r]] -= 1\n r += 1\n while counter == 0:\n if r - l < min_len:\n min_len = r - l\n head = l\n if s[l] in dict_t:\n dict_t[s[l]] += 1\n if dict_t[s[l]] > 0:\n counter += 1\n l += 1\n return '' if min_len == float('inf') else s[head:head + min_len]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000079", "length_bytes": 6972, "license_type": "no_license", "methods": [{"docstring": ":type s: str :type t: str :rtype: str", "name": "minWindow", "signature": "def minWindow(self, s, t)"}, {"docstring": ":type s: str :type t: str :rtype: str", "name": "minWindow", "signature": "def minWindow(self, s, t)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000336", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minWindow(self, s, t): :type s: str :type t: str :rtype: str\n- def minWindow(self, s, t): :type s: str :type t: str :rtype: str", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minWindow(self, s, t): :type s: str :type t: str :rtype: str\n- def minWindow(self, s, t): :type s: str :type t: str :rtype: str\n\n<|skeleton|>\nclass Solution:\n\n def minWindow(self, s, t):\n \"\"\":type s: str :type t: str :rtype: str\"\"\"\n <|body_0|>\n\n def minWindow(self, s, t):\n \"\"\":type s: str :type t: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n need, have = ([0] * 128, [0] * 128)\n for ch in t:\n need[ord(ch)] += 1\n l, r, start = (0, 0, 0)\n min_len, count = (float('inf'), 0)\n while r < len(s):\n s_r = ord(s[r])\n if need[s_r] == 0:\n r += 1\n continue\n if have[s_r] < need[s_r]:\n count += 1\n have[s_r] += 1\n r += 1\n while count == len(t):\n if r - l < min_len:\n min_len = r - l\n start = l\n s_l = ord(s[l])\n if need[s_l] == 0:\n l += 1\n continue\n if have[s_l] == need[s_l]:\n count -= 1\n have[s_l] -= 1\n l += 1\n return '' if min_len == float('inf') else s[start:start + min_len]\n<|end_body_0|>\n\n<|body_start_1|>\n dict_t, counter = (Counter(t), len(t))\n l, r, head = (0, 0, 0)\n min_len = float('inf')\n while r < len(s):\n if s[r] in dict_t:\n if dict_t[s[r]] > 0:\n counter -= 1\n dict_t[s[r]] -= 1\n r += 1\n while counter == 0:\n if r - l < min_len:\n min_len = r - l\n head = l\n if s[l] in dict_t:\n dict_t[s[l]] += 1\n if dict_t[s[l]] > 0:\n counter += 1\n l += 1\n return '' if min_len == float('inf') else s[head:head + min_len]\n<|end_body_1|>\n", "revision_id": "860590239da0618c52967a55eda8d6bbe00bfa96", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minWindow(self, s, t):\n \"\"\":type s: str :type t: str :rtype: str\"\"\"\n <|body_0|>\n\n def minWindow(self, s, t):\n \"\"\":type s: str :type t: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def minWindow(self, s, t):\n \"\"\":type s: str :type t: str :rtype: str\"\"\"\n need, have = ([0] * 128, [0] * 128)\n for ch in t:\n need[ord(ch)] += 1\n l, r, start = (0, 0, 0)\n min_len, count = (float('inf'), 0)\n while r < len(s):\n s_r = ord(s[r])\n if need[s_r] == 0:\n r += 1\n continue\n if have[s_r] < need[s_r]:\n count += 1\n have[s_r] += 1\n r += 1\n while count == len(t):\n if r - l < min_len:\n min_len = r - l\n start = l\n s_l = ord(s[l])\n if need[s_l] == 0:\n l += 1\n continue\n if have[s_l] == need[s_l]:\n count -= 1\n have[s_l] -= 1\n l += 1\n return '' if min_len == float('inf') else s[start:start + min_len]\n\n def minWindow(self, s, t):\n \"\"\":type s: str :type t: str :rtype: str\"\"\"\n dict_t, counter = (Counter(t), len(t))\n l, r, head = (0, 0, 0)\n min_len = float('inf')\n while r < len(s):\n if s[r] in dict_t:\n if dict_t[s[r]] > 0:\n counter -= 1\n dict_t[s[r]] -= 1\n r += 1\n while counter == 0:\n if r - l < min_len:\n min_len = r - l\n head = l\n if s[l] in dict_t:\n dict_t[s[l]] += 1\n if dict_t[s[l]] > 0:\n counter += 1\n l += 1\n return '' if min_len == float('inf') else s[head:head + min_len]\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/p0076/I/minimum-window-substring.py", "source_repo": "Ynjxsjmh/PracticeMakesPerfect", "split": "val", "star_events_count": 0}
{"blob_id": "8a75873b555a7cda7e8d1c7deb7290b7f9614f7d", "bodies": ["day = timezone.now() - datetime.timedelta(days=5)\npast_post = Donation(end_date=day)\nself.assertIs(past_post.date_in_future(), False)", "day = timezone.now()\npast_post = Donation(end_date=day)\nself.assertIs(past_post.date_in_future(), True)", "day = timezone.now() + datetime.timedelta(days=123)\npast_post = Donation(end_date=day)\nself.assertIs(past_post.date_in_future(), True)"], "bodies_text": "<|body_start_0|>\n day = timezone.now() - datetime.timedelta(days=5)\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), False)\n<|end_body_0|>\n\n<|body_start_1|>\n day = timezone.now()\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), True)\n<|end_body_1|>\n\n<|body_start_2|>\n day = timezone.now() + datetime.timedelta(days=123)\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), True)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "DonationPostTestCase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DonationPostTestCase:\n\n def test_donation_end_date_in_past(self):\n \"\"\"date_in_future() returns False for donations with end dates in the past\"\"\"\n <|body_0|>\n\n def test_donation_end_date_in_present(self):\n \"\"\"date_in_future() returns True for donations with end dates in the present\"\"\"\n <|body_1|>\n\n def test_donation_end_date_in_future(self):\n \"\"\"date_in_future() returns False for donations with end dates in the future\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n day = timezone.now() - datetime.timedelta(days=5)\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), False)\n<|end_body_0|>\n\n<|body_start_1|>\n day = timezone.now()\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), True)\n<|end_body_1|>\n\n<|body_start_2|>\n day = timezone.now() + datetime.timedelta(days=123)\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), True)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000080", "length_bytes": 7315, "license_type": "no_license", "methods": [{"docstring": "date_in_future() returns False for donations with end dates in the past", "name": "test_donation_end_date_in_past", "signature": "def test_donation_end_date_in_past(self)"}, {"docstring": "date_in_future() returns True for donations with end dates in the present", "name": "test_donation_end_date_in_present", "signature": "def test_donation_end_date_in_present(self)"}, {"docstring": "date_in_future() returns False for donations with end dates in the future", "name": "test_donation_end_date_in_future", "signature": "def test_donation_end_date_in_future(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_035018", "prompt": "Implement the Python class `DonationPostTestCase` described below.\n\nClass description:\nImplement the DonationPostTestCase class.\n\nMethod signatures and docstrings:\n- def test_donation_end_date_in_past(self): date_in_future() returns False for donations with end dates in the past\n- def test_donation_end_date_in_present(self): date_in_future() returns True for donations with end dates in the present\n- def test_donation_end_date_in_future(self): date_in_future() returns False for donations with end dates in the future", "prompted_full_text": "Implement the Python class `DonationPostTestCase` described below.\n\nClass description:\nImplement the DonationPostTestCase class.\n\nMethod signatures and docstrings:\n- def test_donation_end_date_in_past(self): date_in_future() returns False for donations with end dates in the past\n- def test_donation_end_date_in_present(self): date_in_future() returns True for donations with end dates in the present\n- def test_donation_end_date_in_future(self): date_in_future() returns False for donations with end dates in the future\n\n<|skeleton|>\nclass DonationPostTestCase:\n\n def test_donation_end_date_in_past(self):\n \"\"\"date_in_future() returns False for donations with end dates in the past\"\"\"\n <|body_0|>\n\n def test_donation_end_date_in_present(self):\n \"\"\"date_in_future() returns True for donations with end dates in the present\"\"\"\n <|body_1|>\n\n def test_donation_end_date_in_future(self):\n \"\"\"date_in_future() returns False for donations with end dates in the future\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n day = timezone.now() - datetime.timedelta(days=5)\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), False)\n<|end_body_0|>\n\n<|body_start_1|>\n day = timezone.now()\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), True)\n<|end_body_1|>\n\n<|body_start_2|>\n day = timezone.now() + datetime.timedelta(days=123)\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), True)\n<|end_body_2|>\n", "revision_id": "c4cdde7be0f2550f52cef6173c5530abae24ac67", "skeleton": "<|skeleton|>\nclass DonationPostTestCase:\n\n def test_donation_end_date_in_past(self):\n \"\"\"date_in_future() returns False for donations with end dates in the past\"\"\"\n <|body_0|>\n\n def test_donation_end_date_in_present(self):\n \"\"\"date_in_future() returns True for donations with end dates in the present\"\"\"\n <|body_1|>\n\n def test_donation_end_date_in_future(self):\n \"\"\"date_in_future() returns False for donations with end dates in the future\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DonationPostTestCase:\n def test_donation_end_date_in_past(self):\n \"\"\"date_in_future() returns False for donations with end dates in the past\"\"\"\n day = timezone.now() - datetime.timedelta(days=5)\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), False)\n\n def test_donation_end_date_in_present(self):\n \"\"\"date_in_future() returns True for donations with end dates in the present\"\"\"\n day = timezone.now()\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), True)\n\n def test_donation_end_date_in_future(self):\n \"\"\"date_in_future() returns False for donations with end dates in the future\"\"\"\n day = timezone.now() + datetime.timedelta(days=123)\n past_post = Donation(end_date=day)\n self.assertIs(past_post.date_in_future(), True)\n", "source": "the_stack_v2_python_sparse", "source_path": "donations/tests.py", "source_repo": "alex5947/mdmv-application", "split": "val", "star_events_count": 0}
{"blob_id": "444c7a8c9d0123ef42f255adbb6c84bc444c8f86", "bodies": ["m = len(matrix)\nn = len(matrix[0])\nr = []\nc = []\nfor x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\nfor x in r:\n for i in range(n):\n matrix[x][i] = 0\nfor x in c:\n for i in range(m):\n matrix[i][x] = 0", "m = len(matrix)\nn = len(matrix[0])\nr = []\nc = []\nfor x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\nfor x in r:\n for i in range(n):\n matrix[x][i] = 0\nfor x in c:\n for i in range(m):\n matrix[i][x] = 0"], "bodies_text": "<|body_start_0|>\n m = len(matrix)\n n = len(matrix[0])\n r = []\n c = []\n for x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\n for x in r:\n for i in range(n):\n matrix[x][i] = 0\n for x in c:\n for i in range(m):\n matrix[i][x] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n m = len(matrix)\n n = len(matrix[0])\n r = []\n c = []\n for x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\n for x in r:\n for i in range(n):\n matrix[x][i] = 0\n for x in c:\n for i in range(m):\n matrix[i][x] = 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def setZeroes(self, matrix) -> None:\n \"\"\"Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def setZeroes(self, matrix) -> None:\n \"\"\"Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = len(matrix)\n n = len(matrix[0])\n r = []\n c = []\n for x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\n for x in r:\n for i in range(n):\n matrix[x][i] = 0\n for x in c:\n for i in range(m):\n matrix[i][x] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n m = len(matrix)\n n = len(matrix[0])\n r = []\n c = []\n for x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\n for x in r:\n for i in range(n):\n matrix[x][i] = 0\n for x in c:\n for i in range(m):\n matrix[i][x] = 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000081", "length_bytes": 1206, "license_type": "no_license", "methods": [{"docstring": "Do not return anything, modify matrix in-place instead.", "name": "setZeroes", "signature": "def setZeroes(self, matrix) -> None"}, {"docstring": "Do not return anything, modify matrix in-place instead.", "name": "setZeroes", "signature": "def setZeroes(self, matrix) -> None"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def setZeroes(self, matrix) -> None: Do not return anything, modify matrix in-place instead.\n- def setZeroes(self, matrix) -> None: Do not return anything, modify matrix in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def setZeroes(self, matrix) -> None: Do not return anything, modify matrix in-place instead.\n- def setZeroes(self, matrix) -> None: Do not return anything, modify matrix in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def setZeroes(self, matrix) -> None:\n \"\"\"Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def setZeroes(self, matrix) -> None:\n \"\"\"Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = len(matrix)\n n = len(matrix[0])\n r = []\n c = []\n for x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\n for x in r:\n for i in range(n):\n matrix[x][i] = 0\n for x in c:\n for i in range(m):\n matrix[i][x] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n m = len(matrix)\n n = len(matrix[0])\n r = []\n c = []\n for x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\n for x in r:\n for i in range(n):\n matrix[x][i] = 0\n for x in c:\n for i in range(m):\n matrix[i][x] = 0\n<|end_body_1|>\n", "revision_id": "a7b31b2deaaac24470e6337690b5d6df481bc04b", "skeleton": "<|skeleton|>\nclass Solution:\n\n def setZeroes(self, matrix) -> None:\n \"\"\"Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def setZeroes(self, matrix) -> None:\n \"\"\"Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def setZeroes(self, matrix) -> None:\n \"\"\"Do not return anything, modify matrix in-place instead.\"\"\"\n m = len(matrix)\n n = len(matrix[0])\n r = []\n c = []\n for x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\n for x in r:\n for i in range(n):\n matrix[x][i] = 0\n for x in c:\n for i in range(m):\n matrix[i][x] = 0\n\n def setZeroes(self, matrix) -> None:\n \"\"\"Do not return anything, modify matrix in-place instead.\"\"\"\n m = len(matrix)\n n = len(matrix[0])\n r = []\n c = []\n for x in range(m):\n for i in range(n):\n if matrix[x][i] == 0:\n r.append(x)\n c.append(i)\n for x in r:\n for i in range(n):\n matrix[x][i] = 0\n for x in c:\n for i in range(m):\n matrix[i][x] = 0\n", "source": "the_stack_v2_python_sparse", "source_path": "Archive/setMatrixZeros_73.py", "source_repo": "adalloul0928/Leetcode_Hell", "split": "val", "star_events_count": 0}
{"blob_id": "3406fbd6c2d2a9ba10d76786ab6543f5989d89b5", "bodies": ["if len(nums) <= 1:\n return False\nbuff_dict = {}\nfor i in range(len(nums)):\n if i == leftnum:\n continue\n if nums[i] in buff_dict:\n return [buff_dict[nums[i]], i]\n else:\n buff_dict[target - nums[i]] = i", "leftnum = 0\nnumstack = []\nfor index in range(len(nums)):\n numstack.append(self.twoSum(Solution, nums, -nums[index], index))\nreturn numstack"], "bodies_text": "<|body_start_0|>\n if len(nums) <= 1:\n return False\n buff_dict = {}\n for i in range(len(nums)):\n if i == leftnum:\n continue\n if nums[i] in buff_dict:\n return [buff_dict[nums[i]], i]\n else:\n buff_dict[target - nums[i]] = i\n<|end_body_0|>\n\n<|body_start_1|>\n leftnum = 0\n numstack = []\n for index in range(len(nums)):\n numstack.append(self.twoSum(Solution, nums, -nums[index], index))\n return numstack\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target, leftnum):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def threeSum(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) <= 1:\n return False\n buff_dict = {}\n for i in range(len(nums)):\n if i == leftnum:\n continue\n if nums[i] in buff_dict:\n return [buff_dict[nums[i]], i]\n else:\n buff_dict[target - nums[i]] = i\n<|end_body_0|>\n\n<|body_start_1|>\n leftnum = 0\n numstack = []\n for index in range(len(nums)):\n numstack.append(self.twoSum(Solution, nums, -nums[index], index))\n return numstack\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000082", "length_bytes": 863, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type target: int :rtype: List[int]", "name": "twoSum", "signature": "def twoSum(self, nums, target, leftnum)"}, {"docstring": ":type nums: List[int] :rtype: List[List[int]]", "name": "threeSum", "signature": "def threeSum(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_044548", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def twoSum(self, nums, target, leftnum): :type nums: List[int] :type target: int :rtype: List[int]\n- def threeSum(self, nums): :type nums: List[int] :rtype: List[List[int]]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def twoSum(self, nums, target, leftnum): :type nums: List[int] :type target: int :rtype: List[int]\n- def threeSum(self, nums): :type nums: List[int] :rtype: List[List[int]]\n\n<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target, leftnum):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def threeSum(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) <= 1:\n return False\n buff_dict = {}\n for i in range(len(nums)):\n if i == leftnum:\n continue\n if nums[i] in buff_dict:\n return [buff_dict[nums[i]], i]\n else:\n buff_dict[target - nums[i]] = i\n<|end_body_0|>\n\n<|body_start_1|>\n leftnum = 0\n numstack = []\n for index in range(len(nums)):\n numstack.append(self.twoSum(Solution, nums, -nums[index], index))\n return numstack\n<|end_body_1|>\n", "revision_id": "90d95d72fb4fa0659a2f4861b65bc4f98647ab37", "skeleton": "<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target, leftnum):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def threeSum(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def twoSum(self, nums, target, leftnum):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n if len(nums) <= 1:\n return False\n buff_dict = {}\n for i in range(len(nums)):\n if i == leftnum:\n continue\n if nums[i] in buff_dict:\n return [buff_dict[nums[i]], i]\n else:\n buff_dict[target - nums[i]] = i\n\n def threeSum(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n leftnum = 0\n numstack = []\n for index in range(len(nums)):\n numstack.append(self.twoSum(Solution, nums, -nums[index], index))\n return numstack\n", "source": "the_stack_v2_python_sparse", "source_path": "3 sum.py", "source_repo": "hyperion-mk2/git", "split": "val", "star_events_count": 0}
{"blob_id": "2bf78e04b1dbf4dd052f9358942a9a23d3c0f141", "bodies": ["self.dn = np.zeros((nsteps, nfrac))\nself.nav = float(natoms) / frac ** 2\nreturn", "for j in range(nfrac):\n hist, xedges, yedges = np.histogram2d(xs, ys, bins=frac[j], range=[[0, 1], [0, 1]])\n std = np.std(hist)\n self.dn[step, j] = std\nif plot == 'True':\n plt.loglog(self.nav, self.dn[step], marker='o')\n plt.show()\n plt.close()\nreturn"], "bodies_text": "<|body_start_0|>\n self.dn = np.zeros((nsteps, nfrac))\n self.nav = float(natoms) / frac ** 2\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for j in range(nfrac):\n hist, xedges, yedges = np.histogram2d(xs, ys, bins=frac[j], range=[[0, 1], [0, 1]])\n std = np.std(hist)\n self.dn[step, j] = std\n if plot == 'True':\n plt.loglog(self.nav, self.dn[step], marker='o')\n plt.show()\n plt.close()\n return\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NumberFluctuation", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumberFluctuation:\n\n def __init__(self, nsteps, natoms, line):\n \"\"\"initialize: allocate arrays to store results\"\"\"\n <|body_0|>\n\n def compute(self, step, xs, ys, plot='False'):\n \"\"\"compute number fluctuations\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dn = np.zeros((nsteps, nfrac))\n self.nav = float(natoms) / frac ** 2\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for j in range(nfrac):\n hist, xedges, yedges = np.histogram2d(xs, ys, bins=frac[j], range=[[0, 1], [0, 1]])\n std = np.std(hist)\n self.dn[step, j] = std\n if plot == 'True':\n plt.loglog(self.nav, self.dn[step], marker='o')\n plt.show()\n plt.close()\n return\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000083", "length_bytes": 1343, "license_type": "no_license", "methods": [{"docstring": "initialize: allocate arrays to store results", "name": "__init__", "signature": "def __init__(self, nsteps, natoms, line)"}, {"docstring": "compute number fluctuations", "name": "compute", "signature": "def compute(self, step, xs, ys, plot='False')"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_054387", "prompt": "Implement the Python class `NumberFluctuation` described below.\n\nClass description:\nImplement the NumberFluctuation class.\n\nMethod signatures and docstrings:\n- def __init__(self, nsteps, natoms, line): initialize: allocate arrays to store results\n- def compute(self, step, xs, ys, plot='False'): compute number fluctuations", "prompted_full_text": "Implement the Python class `NumberFluctuation` described below.\n\nClass description:\nImplement the NumberFluctuation class.\n\nMethod signatures and docstrings:\n- def __init__(self, nsteps, natoms, line): initialize: allocate arrays to store results\n- def compute(self, step, xs, ys, plot='False'): compute number fluctuations\n\n<|skeleton|>\nclass NumberFluctuation:\n\n def __init__(self, nsteps, natoms, line):\n \"\"\"initialize: allocate arrays to store results\"\"\"\n <|body_0|>\n\n def compute(self, step, xs, ys, plot='False'):\n \"\"\"compute number fluctuations\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dn = np.zeros((nsteps, nfrac))\n self.nav = float(natoms) / frac ** 2\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for j in range(nfrac):\n hist, xedges, yedges = np.histogram2d(xs, ys, bins=frac[j], range=[[0, 1], [0, 1]])\n std = np.std(hist)\n self.dn[step, j] = std\n if plot == 'True':\n plt.loglog(self.nav, self.dn[step], marker='o')\n plt.show()\n plt.close()\n return\n<|end_body_1|>\n", "revision_id": "7d2659bee85c955c680eda019cbff6e2b93ecff2", "skeleton": "<|skeleton|>\nclass NumberFluctuation:\n\n def __init__(self, nsteps, natoms, line):\n \"\"\"initialize: allocate arrays to store results\"\"\"\n <|body_0|>\n\n def compute(self, step, xs, ys, plot='False'):\n \"\"\"compute number fluctuations\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NumberFluctuation:\n def __init__(self, nsteps, natoms, line):\n \"\"\"initialize: allocate arrays to store results\"\"\"\n self.dn = np.zeros((nsteps, nfrac))\n self.nav = float(natoms) / frac ** 2\n return\n\n def compute(self, step, xs, ys, plot='False'):\n \"\"\"compute number fluctuations\"\"\"\n for j in range(nfrac):\n hist, xedges, yedges = np.histogram2d(xs, ys, bins=frac[j], range=[[0, 1], [0, 1]])\n std = np.std(hist)\n self.dn[step, j] = std\n if plot == 'True':\n plt.loglog(self.nav, self.dn[step], marker='o')\n plt.show()\n plt.close()\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "analyse_collective/numberfluctuation.py", "source_repo": "melampyge/CollectiveFilament", "split": "val", "star_events_count": 0}
{"blob_id": "c4cbb75a4fc0deb33fbada77ea48c1ca933116b3", "bodies": ["try:\n self._client = mongo.MongoClient(configuration['hostname'], configuration['port'])\n self._db = self._client[configuration['bdname']]\nexcept mongo.errors.PyMongoError:\n raise Exception('Error initializing database')", "try:\n mongo.collection.Collection(self._db, collection_name, create=True)\nexcept mongo.errors.PyMongoError:\n raise Exception('collection already exist')", "try:\n return self._db[collection_name].insert_one(item)\nexcept mongo.errors.PyMongoError:\n raise Exception('failed to insert data')", "try:\n return self._db[collection_name].find({'videoId': video_id}, projection)\nexcept mongo.errors.PyMongoError:\n raise Exception('failed to find video by id')"], "bodies_text": "<|body_start_0|>\n try:\n self._client = mongo.MongoClient(configuration['hostname'], configuration['port'])\n self._db = self._client[configuration['bdname']]\n except mongo.errors.PyMongoError:\n raise Exception('Error initializing database')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n mongo.collection.Collection(self._db, collection_name, create=True)\n except mongo.errors.PyMongoError:\n raise Exception('collection already exist')\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return self._db[collection_name].insert_one(item)\n except mongo.errors.PyMongoError:\n raise Exception('failed to insert data')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n return self._db[collection_name].find({'videoId': video_id}, projection)\n except mongo.errors.PyMongoError:\n raise Exception('failed to find video by id')\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Database", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Database:\n\n def __init__(self, configuration):\n \"\"\":param configuration: :param logger:\"\"\"\n <|body_0|>\n\n def create_store(self, collection_name):\n \"\"\"this method create a collection in mongodb database :param collection_name: the collection name\"\"\"\n <|body_1|>\n\n def insert(self, collection_name, item):\n \"\"\":param collection_name: :param item: :return :generated for keys of inserted elements if success else false\"\"\"\n <|body_2|>\n\n def find_by_video_id(self, collection_name, video_id, projection={}):\n \"\"\":param collection_name: :param video_id: :param kvargs: {\"key\":val,...}possible key=\"comment\",\"videoId\",\"created_at\",\"author\", \"lang\", \"likes\",\"_id\" possible val:1 to fetch,0 to ignore :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self._client = mongo.MongoClient(configuration['hostname'], configuration['port'])\n self._db = self._client[configuration['bdname']]\n except mongo.errors.PyMongoError:\n raise Exception('Error initializing database')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n mongo.collection.Collection(self._db, collection_name, create=True)\n except mongo.errors.PyMongoError:\n raise Exception('collection already exist')\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return self._db[collection_name].insert_one(item)\n except mongo.errors.PyMongoError:\n raise Exception('failed to insert data')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n return self._db[collection_name].find({'videoId': video_id}, projection)\n except mongo.errors.PyMongoError:\n raise Exception('failed to find video by id')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000084", "length_bytes": 1706, "license_type": "no_license", "methods": [{"docstring": ":param configuration: :param logger:", "name": "__init__", "signature": "def __init__(self, configuration)"}, {"docstring": "this method create a collection in mongodb database :param collection_name: the collection name", "name": "create_store", "signature": "def create_store(self, collection_name)"}, {"docstring": ":param collection_name: :param item: :return :generated for keys of inserted elements if success else false", "name": "insert", "signature": "def insert(self, collection_name, item)"}, {"docstring": ":param collection_name: :param video_id: :param kvargs: {\"key\":val,...}possible key=\"comment\",\"videoId\",\"created_at\",\"author\", \"lang\", \"likes\",\"_id\" possible val:1 to fetch,0 to ignore :return:", "name": "find_by_video_id", "signature": "def find_by_video_id(self, collection_name, video_id, projection={})"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_036208", "prompt": "Implement the Python class `Database` described below.\n\nClass description:\nImplement the Database class.\n\nMethod signatures and docstrings:\n- def __init__(self, configuration): :param configuration: :param logger:\n- def create_store(self, collection_name): this method create a collection in mongodb database :param collection_name: the collection name\n- def insert(self, collection_name, item): :param collection_name: :param item: :return :generated for keys of inserted elements if success else false\n- def find_by_video_id(self, collection_name, video_id, projection={}): :param collection_name: :param video_id: :param kvargs: {\"key\":val,...}possible key=\"comment\",\"videoId\",\"created_at\",\"author\", \"lang\", \"likes\",\"_id\" possible val:1 to fetch,0 to ignore :return:", "prompted_full_text": "Implement the Python class `Database` described below.\n\nClass description:\nImplement the Database class.\n\nMethod signatures and docstrings:\n- def __init__(self, configuration): :param configuration: :param logger:\n- def create_store(self, collection_name): this method create a collection in mongodb database :param collection_name: the collection name\n- def insert(self, collection_name, item): :param collection_name: :param item: :return :generated for keys of inserted elements if success else false\n- def find_by_video_id(self, collection_name, video_id, projection={}): :param collection_name: :param video_id: :param kvargs: {\"key\":val,...}possible key=\"comment\",\"videoId\",\"created_at\",\"author\", \"lang\", \"likes\",\"_id\" possible val:1 to fetch,0 to ignore :return:\n\n<|skeleton|>\nclass Database:\n\n def __init__(self, configuration):\n \"\"\":param configuration: :param logger:\"\"\"\n <|body_0|>\n\n def create_store(self, collection_name):\n \"\"\"this method create a collection in mongodb database :param collection_name: the collection name\"\"\"\n <|body_1|>\n\n def insert(self, collection_name, item):\n \"\"\":param collection_name: :param item: :return :generated for keys of inserted elements if success else false\"\"\"\n <|body_2|>\n\n def find_by_video_id(self, collection_name, video_id, projection={}):\n \"\"\":param collection_name: :param video_id: :param kvargs: {\"key\":val,...}possible key=\"comment\",\"videoId\",\"created_at\",\"author\", \"lang\", \"likes\",\"_id\" possible val:1 to fetch,0 to ignore :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self._client = mongo.MongoClient(configuration['hostname'], configuration['port'])\n self._db = self._client[configuration['bdname']]\n except mongo.errors.PyMongoError:\n raise Exception('Error initializing database')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n mongo.collection.Collection(self._db, collection_name, create=True)\n except mongo.errors.PyMongoError:\n raise Exception('collection already exist')\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return self._db[collection_name].insert_one(item)\n except mongo.errors.PyMongoError:\n raise Exception('failed to insert data')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n return self._db[collection_name].find({'videoId': video_id}, projection)\n except mongo.errors.PyMongoError:\n raise Exception('failed to find video by id')\n<|end_body_3|>\n", "revision_id": "a7eb1019e62a0bdce52a4547e1c829aa0cfd28c2", "skeleton": "<|skeleton|>\nclass Database:\n\n def __init__(self, configuration):\n \"\"\":param configuration: :param logger:\"\"\"\n <|body_0|>\n\n def create_store(self, collection_name):\n \"\"\"this method create a collection in mongodb database :param collection_name: the collection name\"\"\"\n <|body_1|>\n\n def insert(self, collection_name, item):\n \"\"\":param collection_name: :param item: :return :generated for keys of inserted elements if success else false\"\"\"\n <|body_2|>\n\n def find_by_video_id(self, collection_name, video_id, projection={}):\n \"\"\":param collection_name: :param video_id: :param kvargs: {\"key\":val,...}possible key=\"comment\",\"videoId\",\"created_at\",\"author\", \"lang\", \"likes\",\"_id\" possible val:1 to fetch,0 to ignore :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Database:\n def __init__(self, configuration):\n \"\"\":param configuration: :param logger:\"\"\"\n try:\n self._client = mongo.MongoClient(configuration['hostname'], configuration['port'])\n self._db = self._client[configuration['bdname']]\n except mongo.errors.PyMongoError:\n raise Exception('Error initializing database')\n\n def create_store(self, collection_name):\n \"\"\"this method create a collection in mongodb database :param collection_name: the collection name\"\"\"\n try:\n mongo.collection.Collection(self._db, collection_name, create=True)\n except mongo.errors.PyMongoError:\n raise Exception('collection already exist')\n\n def insert(self, collection_name, item):\n \"\"\":param collection_name: :param item: :return :generated for keys of inserted elements if success else false\"\"\"\n try:\n return self._db[collection_name].insert_one(item)\n except mongo.errors.PyMongoError:\n raise Exception('failed to insert data')\n\n def find_by_video_id(self, collection_name, video_id, projection={}):\n \"\"\":param collection_name: :param video_id: :param kvargs: {\"key\":val,...}possible key=\"comment\",\"videoId\",\"created_at\",\"author\", \"lang\", \"likes\",\"_id\" possible val:1 to fetch,0 to ignore :return:\"\"\"\n try:\n return self._db[collection_name].find({'videoId': video_id}, projection)\n except mongo.errors.PyMongoError:\n raise Exception('failed to find video by id')\n", "source": "the_stack_v2_python_sparse", "source_path": "src/datastore/database.py", "source_repo": "mhamedLmarbouh/yt-Gender-Resolver", "split": "val", "star_events_count": 0}
{"blob_id": "55a24d63dd81ce9593d2616a51371e28b489d4be", "bodies": ["if len(s) != len(t):\n return False\nd = {}\nfor c in s:\n if c in d:\n d[c] += 1\n else:\n d[c] = 1\nfor c in t:\n if c not in d:\n return False\n else:\n d[c] -= 1\n if d[c] < 0:\n return False\nreturn True", "if len(s) != len(t):\n return False\nd = dict.fromkeys(string.ascii_lowercase, 0)\nfor e in s:\n if e in d:\n d[e] += 1\nfor i in t:\n if i in d:\n d[i] -= 1\nfor ele in d:\n if d[ele] != 0:\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n if len(s) != len(t):\n return False\n d = {}\n for c in s:\n if c in d:\n d[c] += 1\n else:\n d[c] = 1\n for c in t:\n if c not in d:\n return False\n else:\n d[c] -= 1\n if d[c] < 0:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if len(s) != len(t):\n return False\n d = dict.fromkeys(string.ascii_lowercase, 0)\n for e in s:\n if e in d:\n d[e] += 1\n for i in t:\n if i in d:\n d[i] -= 1\n for ele in d:\n if d[ele] != 0:\n return False\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isAnagram1(self, s, t):\n \"\"\":type s: str :type t: str :rtype: bool\"\"\"\n <|body_0|>\n\n def isAnagram2(self, s, t):\n \"\"\":type s: str :type t: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(s) != len(t):\n return False\n d = {}\n for c in s:\n if c in d:\n d[c] += 1\n else:\n d[c] = 1\n for c in t:\n if c not in d:\n return False\n else:\n d[c] -= 1\n if d[c] < 0:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if len(s) != len(t):\n return False\n d = dict.fromkeys(string.ascii_lowercase, 0)\n for e in s:\n if e in d:\n d[e] += 1\n for i in t:\n if i in d:\n d[i] -= 1\n for ele in d:\n if d[ele] != 0:\n return False\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000085", "length_bytes": 1919, "license_type": "no_license", "methods": [{"docstring": ":type s: str :type t: str :rtype: bool", "name": "isAnagram1", "signature": "def isAnagram1(self, s, t)"}, {"docstring": ":type s: str :type t: str :rtype: bool", "name": "isAnagram2", "signature": "def isAnagram2(self, s, t)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009150", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isAnagram1(self, s, t): :type s: str :type t: str :rtype: bool\n- def isAnagram2(self, s, t): :type s: str :type t: str :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isAnagram1(self, s, t): :type s: str :type t: str :rtype: bool\n- def isAnagram2(self, s, t): :type s: str :type t: str :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def isAnagram1(self, s, t):\n \"\"\":type s: str :type t: str :rtype: bool\"\"\"\n <|body_0|>\n\n def isAnagram2(self, s, t):\n \"\"\":type s: str :type t: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(s) != len(t):\n return False\n d = {}\n for c in s:\n if c in d:\n d[c] += 1\n else:\n d[c] = 1\n for c in t:\n if c not in d:\n return False\n else:\n d[c] -= 1\n if d[c] < 0:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if len(s) != len(t):\n return False\n d = dict.fromkeys(string.ascii_lowercase, 0)\n for e in s:\n if e in d:\n d[e] += 1\n for i in t:\n if i in d:\n d[i] -= 1\n for ele in d:\n if d[ele] != 0:\n return False\n return True\n<|end_body_1|>\n", "revision_id": "813235789ce422a3bab198317aafc46fbc61625e", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isAnagram1(self, s, t):\n \"\"\":type s: str :type t: str :rtype: bool\"\"\"\n <|body_0|>\n\n def isAnagram2(self, s, t):\n \"\"\":type s: str :type t: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isAnagram1(self, s, t):\n \"\"\":type s: str :type t: str :rtype: bool\"\"\"\n if len(s) != len(t):\n return False\n d = {}\n for c in s:\n if c in d:\n d[c] += 1\n else:\n d[c] = 1\n for c in t:\n if c not in d:\n return False\n else:\n d[c] -= 1\n if d[c] < 0:\n return False\n return True\n\n def isAnagram2(self, s, t):\n \"\"\":type s: str :type t: str :rtype: bool\"\"\"\n if len(s) != len(t):\n return False\n d = dict.fromkeys(string.ascii_lowercase, 0)\n for e in s:\n if e in d:\n d[e] += 1\n for i in t:\n if i in d:\n d[i] -= 1\n for ele in d:\n if d[ele] != 0:\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "13.HASH MAP/242_valid_anagram/solution.py", "source_repo": "kimmyoo/python_leetcode", "split": "val", "star_events_count": 1}
{"blob_id": "451a1a199b2308ee8cbb82c3d62745e1c1504f0a", "bodies": ["rob_record = {}\n\ndef rob_tree(root):\n if not root:\n return 0\n if root in rob_record:\n return rob_record[root]\n result = max(rob_child(root), root.val + rob_child(root.left) + rob_child(root.right))\n rob_record[root] = result\n return result\n\ndef rob_child(node):\n if not node:\n return 0\n return rob_tree(node.left) + rob_tree(node.right)\nreturn rob_tree(root)", "def rob3(root):\n if not root:\n return (0, 0, 0)\n l, ll, lr = rob3(root.left)\n r, rl, rr = rob3(root.right)\n return (max(root.val + ll + lr + rl + rr, l + r), l, r)\nreturn rob3(root)[0]"], "bodies_text": "<|body_start_0|>\n rob_record = {}\n\n def rob_tree(root):\n if not root:\n return 0\n if root in rob_record:\n return rob_record[root]\n result = max(rob_child(root), root.val + rob_child(root.left) + rob_child(root.right))\n rob_record[root] = result\n return result\n\n def rob_child(node):\n if not node:\n return 0\n return rob_tree(node.left) + rob_tree(node.right)\n return rob_tree(root)\n<|end_body_0|>\n\n<|body_start_1|>\n def rob3(root):\n if not root:\n return (0, 0, 0)\n l, ll, lr = rob3(root.left)\n r, rl, rr = rob3(root.right)\n return (max(root.val + ll + lr + rl + rr, l + r), l, r)\n return rob3(root)[0]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def rob(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def rob(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rob_record = {}\n\n def rob_tree(root):\n if not root:\n return 0\n if root in rob_record:\n return rob_record[root]\n result = max(rob_child(root), root.val + rob_child(root.left) + rob_child(root.right))\n rob_record[root] = result\n return result\n\n def rob_child(node):\n if not node:\n return 0\n return rob_tree(node.left) + rob_tree(node.right)\n return rob_tree(root)\n<|end_body_0|>\n\n<|body_start_1|>\n def rob3(root):\n if not root:\n return (0, 0, 0)\n l, ll, lr = rob3(root.left)\n r, rl, rr = rob3(root.right)\n return (max(root.val + ll + lr + rl + rr, l + r), l, r)\n return rob3(root)[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000086", "length_bytes": 1727, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: int", "name": "rob", "signature": "def rob(self, root)"}, {"docstring": ":type root: TreeNode :rtype: int", "name": "rob", "signature": "def rob(self, root)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_017892", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rob(self, root): :type root: TreeNode :rtype: int\n- def rob(self, root): :type root: TreeNode :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rob(self, root): :type root: TreeNode :rtype: int\n- def rob(self, root): :type root: TreeNode :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def rob(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def rob(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rob_record = {}\n\n def rob_tree(root):\n if not root:\n return 0\n if root in rob_record:\n return rob_record[root]\n result = max(rob_child(root), root.val + rob_child(root.left) + rob_child(root.right))\n rob_record[root] = result\n return result\n\n def rob_child(node):\n if not node:\n return 0\n return rob_tree(node.left) + rob_tree(node.right)\n return rob_tree(root)\n<|end_body_0|>\n\n<|body_start_1|>\n def rob3(root):\n if not root:\n return (0, 0, 0)\n l, ll, lr = rob3(root.left)\n r, rl, rr = rob3(root.right)\n return (max(root.val + ll + lr + rl + rr, l + r), l, r)\n return rob3(root)[0]\n<|end_body_1|>\n", "revision_id": "6475851d21ef5312727f93b9f4e85a3ca1e79bb8", "skeleton": "<|skeleton|>\nclass Solution:\n\n def rob(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def rob(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def rob(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n rob_record = {}\n\n def rob_tree(root):\n if not root:\n return 0\n if root in rob_record:\n return rob_record[root]\n result = max(rob_child(root), root.val + rob_child(root.left) + rob_child(root.right))\n rob_record[root] = result\n return result\n\n def rob_child(node):\n if not node:\n return 0\n return rob_tree(node.left) + rob_tree(node.right)\n return rob_tree(root)\n\n def rob(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n def rob3(root):\n if not root:\n return (0, 0, 0)\n l, ll, lr = rob3(root.left)\n r, rl, rr = rob3(root.right)\n return (max(root.val + ll + lr + rl + rr, l + r), l, r)\n return rob3(root)[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "src/0337-house-robber-iii.py", "source_repo": "chaneyzorn/LeetCode-Python", "split": "val", "star_events_count": 0}
{"blob_id": "c919106cbc0d924e5fd62bf8f1e30203b427ff16", "bodies": ["self.Wf = np.random.normal(size=(h + i, h))\nself.Wu = np.random.normal(size=(h + i, h))\nself.Wc = np.random.normal(size=(h + i, h))\nself.Wo = np.random.normal(size=(h + i, h))\nself.Wy = np.random.normal(size=(h, o))\nself.bo = np.zeros((1, h))\nself.bc = np.zeros((1, h))\nself.bu = np.zeros((1, h))\nself.bf = np.zeros((1, h))\nself.by = np.zeros((1, o))", "con = np.concatenate((h_prev, x_t), axis=1)\nx = np.matmul(con, self.Wf) + self.bf\nforget = 1 / (1 + np.exp(-x))\nx = np.matmul(con, self.Wu) + self.bu\nupdate = 1 / (1 + np.exp(-x))\nx = np.matmul(con, self.Wo) + self.bo\nout = 1 / (1 + np.exp(-x))\nc = np.tanh(np.matmul(con, self.Wc) + self.bc)\nc_next = forget * c_prev + c * update\nh_next = out * np.tanh(c_next)\nsoft = np.matmul(h_next, self.Wy) + self.by\nx_max = np.max(soft, axis=-1, keepdims=True)\nx_exp = np.exp(soft - x_max)\ny = x_exp / x_exp.sum(axis=-1, keepdims=True)\nreturn (h_next, c_next, y)"], "bodies_text": "<|body_start_0|>\n self.Wf = np.random.normal(size=(h + i, h))\n self.Wu = np.random.normal(size=(h + i, h))\n self.Wc = np.random.normal(size=(h + i, h))\n self.Wo = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bo = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bf = np.zeros((1, h))\n self.by = np.zeros((1, o))\n<|end_body_0|>\n\n<|body_start_1|>\n con = np.concatenate((h_prev, x_t), axis=1)\n x = np.matmul(con, self.Wf) + self.bf\n forget = 1 / (1 + np.exp(-x))\n x = np.matmul(con, self.Wu) + self.bu\n update = 1 / (1 + np.exp(-x))\n x = np.matmul(con, self.Wo) + self.bo\n out = 1 / (1 + np.exp(-x))\n c = np.tanh(np.matmul(con, self.Wc) + self.bc)\n c_next = forget * c_prev + c * update\n h_next = out * np.tanh(c_next)\n soft = np.matmul(h_next, self.Wy) + self.by\n x_max = np.max(soft, axis=-1, keepdims=True)\n x_exp = np.exp(soft - x_max)\n y = x_exp / x_exp.sum(axis=-1, keepdims=True)\n return (h_next, c_next, y)\n<|end_body_1|>\n", "class_docstring": "epresents an LSTM unit", "class_name": "LSTMCell", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LSTMCell:\n \"\"\"epresents an LSTM unit\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"Constructor @i: dimensionality of the data @h: dimensionality of the hidden state @o: dimensionality of the outputs public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by weights and biases @Wf and bf are for the forget gate @Wu and bu are for the update gate @Wc and bc are for the intermediate cell state @Wo and bo are for the output gate @Wy and by are for the outputs\"\"\"\n <|body_0|>\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"performs forward propagation for one time step @h_prev: np.ndarray shape(m, h) containing the previous hidden state @x_t: np.ndarray of shape(m, i) containing the data input for the cell @m: batche size for the data Retrun: h_next, c_next, y @h_next is the next hidden state @y: output of the cell\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Wf = np.random.normal(size=(h + i, h))\n self.Wu = np.random.normal(size=(h + i, h))\n self.Wc = np.random.normal(size=(h + i, h))\n self.Wo = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bo = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bf = np.zeros((1, h))\n self.by = np.zeros((1, o))\n<|end_body_0|>\n\n<|body_start_1|>\n con = np.concatenate((h_prev, x_t), axis=1)\n x = np.matmul(con, self.Wf) + self.bf\n forget = 1 / (1 + np.exp(-x))\n x = np.matmul(con, self.Wu) + self.bu\n update = 1 / (1 + np.exp(-x))\n x = np.matmul(con, self.Wo) + self.bo\n out = 1 / (1 + np.exp(-x))\n c = np.tanh(np.matmul(con, self.Wc) + self.bc)\n c_next = forget * c_prev + c * update\n h_next = out * np.tanh(c_next)\n soft = np.matmul(h_next, self.Wy) + self.by\n x_max = np.max(soft, axis=-1, keepdims=True)\n x_exp = np.exp(soft - x_max)\n y = x_exp / x_exp.sum(axis=-1, keepdims=True)\n return (h_next, c_next, y)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000087", "length_bytes": 2339, "license_type": "no_license", "methods": [{"docstring": "Constructor @i: dimensionality of the data @h: dimensionality of the hidden state @o: dimensionality of the outputs public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by weights and biases @Wf and bf are for the forget gate @Wu and bu are for the update gate @Wc and bc are for the intermediate cell state @Wo and bo are for the output gate @Wy and by are for the outputs", "name": "__init__", "signature": "def __init__(self, i, h, o)"}, {"docstring": "performs forward propagation for one time step @h_prev: np.ndarray shape(m, h) containing the previous hidden state @x_t: np.ndarray of shape(m, i) containing the data input for the cell @m: batche size for the data Retrun: h_next, c_next, y @h_next is the next hidden state @y: output of the cell", "name": "forward", "signature": "def forward(self, h_prev, c_prev, x_t)"}], "n_methods": 2, "prompt": "Implement the Python class `LSTMCell` described below.\n\nClass description:\nepresents an LSTM unit\n\nMethod signatures and docstrings:\n- def __init__(self, i, h, o): Constructor @i: dimensionality of the data @h: dimensionality of the hidden state @o: dimensionality of the outputs public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by weights and biases @Wf and bf are for the forget gate @Wu and bu are for the update gate @Wc and bc are for the intermediate cell state @Wo and bo are for the output gate @Wy and by are for the outputs\n- def forward(self, h_prev, c_prev, x_t): performs forward propagation for one time step @h_prev: np.ndarray shape(m, h) containing the previous hidden state @x_t: np.ndarray of shape(m, i) containing the data input for the cell @m: batche size for the data Retrun: h_next, c_next, y @h_next is the next hidden state @y: output of the cell", "prompted_full_text": "Implement the Python class `LSTMCell` described below.\n\nClass description:\nepresents an LSTM unit\n\nMethod signatures and docstrings:\n- def __init__(self, i, h, o): Constructor @i: dimensionality of the data @h: dimensionality of the hidden state @o: dimensionality of the outputs public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by weights and biases @Wf and bf are for the forget gate @Wu and bu are for the update gate @Wc and bc are for the intermediate cell state @Wo and bo are for the output gate @Wy and by are for the outputs\n- def forward(self, h_prev, c_prev, x_t): performs forward propagation for one time step @h_prev: np.ndarray shape(m, h) containing the previous hidden state @x_t: np.ndarray of shape(m, i) containing the data input for the cell @m: batche size for the data Retrun: h_next, c_next, y @h_next is the next hidden state @y: output of the cell\n\n<|skeleton|>\nclass LSTMCell:\n \"\"\"epresents an LSTM unit\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"Constructor @i: dimensionality of the data @h: dimensionality of the hidden state @o: dimensionality of the outputs public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by weights and biases @Wf and bf are for the forget gate @Wu and bu are for the update gate @Wc and bc are for the intermediate cell state @Wo and bo are for the output gate @Wy and by are for the outputs\"\"\"\n <|body_0|>\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"performs forward propagation for one time step @h_prev: np.ndarray shape(m, h) containing the previous hidden state @x_t: np.ndarray of shape(m, i) containing the data input for the cell @m: batche size for the data Retrun: h_next, c_next, y @h_next is the next hidden state @y: output of the cell\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Wf = np.random.normal(size=(h + i, h))\n self.Wu = np.random.normal(size=(h + i, h))\n self.Wc = np.random.normal(size=(h + i, h))\n self.Wo = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bo = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bf = np.zeros((1, h))\n self.by = np.zeros((1, o))\n<|end_body_0|>\n\n<|body_start_1|>\n con = np.concatenate((h_prev, x_t), axis=1)\n x = np.matmul(con, self.Wf) + self.bf\n forget = 1 / (1 + np.exp(-x))\n x = np.matmul(con, self.Wu) + self.bu\n update = 1 / (1 + np.exp(-x))\n x = np.matmul(con, self.Wo) + self.bo\n out = 1 / (1 + np.exp(-x))\n c = np.tanh(np.matmul(con, self.Wc) + self.bc)\n c_next = forget * c_prev + c * update\n h_next = out * np.tanh(c_next)\n soft = np.matmul(h_next, self.Wy) + self.by\n x_max = np.max(soft, axis=-1, keepdims=True)\n x_exp = np.exp(soft - x_max)\n y = x_exp / x_exp.sum(axis=-1, keepdims=True)\n return (h_next, c_next, y)\n<|end_body_1|>\n", "revision_id": "e20b284d5f1841952104d7d9a0274cff80eb304d", "skeleton": "<|skeleton|>\nclass LSTMCell:\n \"\"\"epresents an LSTM unit\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"Constructor @i: dimensionality of the data @h: dimensionality of the hidden state @o: dimensionality of the outputs public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by weights and biases @Wf and bf are for the forget gate @Wu and bu are for the update gate @Wc and bc are for the intermediate cell state @Wo and bo are for the output gate @Wy and by are for the outputs\"\"\"\n <|body_0|>\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"performs forward propagation for one time step @h_prev: np.ndarray shape(m, h) containing the previous hidden state @x_t: np.ndarray of shape(m, i) containing the data input for the cell @m: batche size for the data Retrun: h_next, c_next, y @h_next is the next hidden state @y: output of the cell\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LSTMCell:\n \"\"\"epresents an LSTM unit\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"Constructor @i: dimensionality of the data @h: dimensionality of the hidden state @o: dimensionality of the outputs public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by weights and biases @Wf and bf are for the forget gate @Wu and bu are for the update gate @Wc and bc are for the intermediate cell state @Wo and bo are for the output gate @Wy and by are for the outputs\"\"\"\n self.Wf = np.random.normal(size=(h + i, h))\n self.Wu = np.random.normal(size=(h + i, h))\n self.Wc = np.random.normal(size=(h + i, h))\n self.Wo = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bo = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bf = np.zeros((1, h))\n self.by = np.zeros((1, o))\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"performs forward propagation for one time step @h_prev: np.ndarray shape(m, h) containing the previous hidden state @x_t: np.ndarray of shape(m, i) containing the data input for the cell @m: batche size for the data Retrun: h_next, c_next, y @h_next is the next hidden state @y: output of the cell\"\"\"\n con = np.concatenate((h_prev, x_t), axis=1)\n x = np.matmul(con, self.Wf) + self.bf\n forget = 1 / (1 + np.exp(-x))\n x = np.matmul(con, self.Wu) + self.bu\n update = 1 / (1 + np.exp(-x))\n x = np.matmul(con, self.Wo) + self.bo\n out = 1 / (1 + np.exp(-x))\n c = np.tanh(np.matmul(con, self.Wc) + self.bc)\n c_next = forget * c_prev + c * update\n h_next = out * np.tanh(c_next)\n soft = np.matmul(h_next, self.Wy) + self.by\n x_max = np.max(soft, axis=-1, keepdims=True)\n x_exp = np.exp(soft - x_max)\n y = x_exp / x_exp.sum(axis=-1, keepdims=True)\n return (h_next, c_next, y)\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x0D-RNNs/3-lstm_cell.py", "source_repo": "jgadelugo/holbertonschool-machine_learning", "split": "val", "star_events_count": 1}
{"blob_id": "d208ca4db22c8cf8466a505e4ff0d86271a9748e", "bodies": ["BaseDustNode.__init__(self, xml_node)\nself._value = utils.parse_number(xml_node.text)\nself._columns = [Column(name=col_name, unit=units)]", "base_string = BaseDustNode.__str__(self)\nstring = '[NumberNode: ' + base_string + ', value: ' + str(self._value) + ']'\nreturn string"], "bodies_text": "<|body_start_0|>\n BaseDustNode.__init__(self, xml_node)\n self._value = utils.parse_number(xml_node.text)\n self._columns = [Column(name=col_name, unit=units)]\n<|end_body_0|>\n\n<|body_start_1|>\n base_string = BaseDustNode.__str__(self)\n string = '[NumberNode: ' + base_string + ', value: ' + str(self._value) + ']'\n return string\n<|end_body_1|>\n", "class_docstring": "A node that contains a number. Outputs a single column containing the number.", "class_name": "NumberNode", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumberNode:\n \"\"\"A node that contains a number. Outputs a single column containing the number.\"\"\"\n\n def __init__(self, xml_node, col_name, *, units=None):\n \"\"\"Parameters ---------- xml_node : `xml.etree.ElementTree` the xml node that provides the raw data for this DustNode col_name : str the name of the column associated with this item units : `~astropy.units.Unit` the units associated with this item\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Return a string representation of the item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n BaseDustNode.__init__(self, xml_node)\n self._value = utils.parse_number(xml_node.text)\n self._columns = [Column(name=col_name, unit=units)]\n<|end_body_0|>\n\n<|body_start_1|>\n base_string = BaseDustNode.__str__(self)\n string = '[NumberNode: ' + base_string + ', value: ' + str(self._value) + ']'\n return string\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000088", "length_bytes": 41056, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- xml_node : `xml.etree.ElementTree` the xml node that provides the raw data for this DustNode col_name : str the name of the column associated with this item units : `~astropy.units.Unit` the units associated with this item", "name": "__init__", "signature": "def __init__(self, xml_node, col_name, *, units=None)"}, {"docstring": "Return a string representation of the item.", "name": "__str__", "signature": "def __str__(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024983", "prompt": "Implement the Python class `NumberNode` described below.\n\nClass description:\nA node that contains a number. Outputs a single column containing the number.\n\nMethod signatures and docstrings:\n- def __init__(self, xml_node, col_name, *, units=None): Parameters ---------- xml_node : `xml.etree.ElementTree` the xml node that provides the raw data for this DustNode col_name : str the name of the column associated with this item units : `~astropy.units.Unit` the units associated with this item\n- def __str__(self): Return a string representation of the item.", "prompted_full_text": "Implement the Python class `NumberNode` described below.\n\nClass description:\nA node that contains a number. Outputs a single column containing the number.\n\nMethod signatures and docstrings:\n- def __init__(self, xml_node, col_name, *, units=None): Parameters ---------- xml_node : `xml.etree.ElementTree` the xml node that provides the raw data for this DustNode col_name : str the name of the column associated with this item units : `~astropy.units.Unit` the units associated with this item\n- def __str__(self): Return a string representation of the item.\n\n<|skeleton|>\nclass NumberNode:\n \"\"\"A node that contains a number. Outputs a single column containing the number.\"\"\"\n\n def __init__(self, xml_node, col_name, *, units=None):\n \"\"\"Parameters ---------- xml_node : `xml.etree.ElementTree` the xml node that provides the raw data for this DustNode col_name : str the name of the column associated with this item units : `~astropy.units.Unit` the units associated with this item\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Return a string representation of the item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n BaseDustNode.__init__(self, xml_node)\n self._value = utils.parse_number(xml_node.text)\n self._columns = [Column(name=col_name, unit=units)]\n<|end_body_0|>\n\n<|body_start_1|>\n base_string = BaseDustNode.__str__(self)\n string = '[NumberNode: ' + base_string + ', value: ' + str(self._value) + ']'\n return string\n<|end_body_1|>\n", "revision_id": "51316d7417d7daf01a8b29d1df99037b9227c2bc", "skeleton": "<|skeleton|>\nclass NumberNode:\n \"\"\"A node that contains a number. Outputs a single column containing the number.\"\"\"\n\n def __init__(self, xml_node, col_name, *, units=None):\n \"\"\"Parameters ---------- xml_node : `xml.etree.ElementTree` the xml node that provides the raw data for this DustNode col_name : str the name of the column associated with this item units : `~astropy.units.Unit` the units associated with this item\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Return a string representation of the item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NumberNode:\n \"\"\"A node that contains a number. Outputs a single column containing the number.\"\"\"\n\n def __init__(self, xml_node, col_name, *, units=None):\n \"\"\"Parameters ---------- xml_node : `xml.etree.ElementTree` the xml node that provides the raw data for this DustNode col_name : str the name of the column associated with this item units : `~astropy.units.Unit` the units associated with this item\"\"\"\n BaseDustNode.__init__(self, xml_node)\n self._value = utils.parse_number(xml_node.text)\n self._columns = [Column(name=col_name, unit=units)]\n\n def __str__(self):\n \"\"\"Return a string representation of the item.\"\"\"\n base_string = BaseDustNode.__str__(self)\n string = '[NumberNode: ' + base_string + ', value: ' + str(self._value) + ']'\n return string\n", "source": "the_stack_v2_python_sparse", "source_path": "astroquery/ipac/irsa/irsa_dust/core.py", "source_repo": "astropy/astroquery", "split": "val", "star_events_count": 636}
{"blob_id": "1785389d5818d508743bd63d5053077ab499dba3", "bodies": ["super(LstmSeqClassificationModel, self).__init__()\nself.padding_idx = padding_idx\nself.embedder = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx)\nself.layer_norm = nn.LayerNorm(normalized_shape=emb_dim, epsilon=epsilon)\nself.dropout = nn.Dropout(p=dropout_rate)\ndirection = 'bidirectional' if is_bidirectory else 'forward'\nself.lstm_encoder = nn.LSTM(emb_dim, hidden_size, num_layers=n_lstm_layer, direction=direction)\nin_channels = hidden_size * 2 if is_bidirectory else hidden_size\nself.conv_encoder = nn.Conv1D(in_channels=in_channels, out_channels=hidden_size, kernel_size=5, padding=2)\nself.output_layer = nn.Conv1D(in_channels=hidden_size, out_channels=num_class, kernel_size=3, padding=1)", "embedded_text = self.embedder(tokens)\nembedded_text = self.layer_norm(embedded_text)\nembedded_text = self.dropout(embedded_text)\nreturn embedded_text", "conv_out = self.conv_encoder(lstm_output)\nconv_out = F.relu(conv_out)\nlogits = self.output_layer(conv_out).transpose(perm=(0, 2, 1))\nreturn logits", "embedded_text = self._prepare_emb(tokens)\nlstm_output, (last_hidden, last_cell) = self.lstm_encoder(embedded_text)\nlstm_output = lstm_output.transpose(perm=(0, 2, 1))\nlogits = self._seq_classification_task(lstm_output)\nreturn logits"], "bodies_text": "<|body_start_0|>\n super(LstmSeqClassificationModel, self).__init__()\n self.padding_idx = padding_idx\n self.embedder = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx)\n self.layer_norm = nn.LayerNorm(normalized_shape=emb_dim, epsilon=epsilon)\n self.dropout = nn.Dropout(p=dropout_rate)\n direction = 'bidirectional' if is_bidirectory else 'forward'\n self.lstm_encoder = nn.LSTM(emb_dim, hidden_size, num_layers=n_lstm_layer, direction=direction)\n in_channels = hidden_size * 2 if is_bidirectory else hidden_size\n self.conv_encoder = nn.Conv1D(in_channels=in_channels, out_channels=hidden_size, kernel_size=5, padding=2)\n self.output_layer = nn.Conv1D(in_channels=hidden_size, out_channels=num_class, kernel_size=3, padding=1)\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_text = self.embedder(tokens)\n embedded_text = self.layer_norm(embedded_text)\n embedded_text = self.dropout(embedded_text)\n return embedded_text\n<|end_body_1|>\n\n<|body_start_2|>\n conv_out = self.conv_encoder(lstm_output)\n conv_out = F.relu(conv_out)\n logits = self.output_layer(conv_out).transpose(perm=(0, 2, 1))\n return logits\n<|end_body_2|>\n\n<|body_start_3|>\n embedded_text = self._prepare_emb(tokens)\n lstm_output, (last_hidden, last_cell) = self.lstm_encoder(embedded_text)\n lstm_output = lstm_output.transpose(perm=(0, 2, 1))\n logits = self._seq_classification_task(lstm_output)\n return logits\n<|end_body_3|>\n", "class_docstring": "Lstm model for seq classification task.", "class_name": "LstmSeqClassificationModel", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LstmSeqClassificationModel:\n \"\"\"Lstm model for seq classification task.\"\"\"\n\n def __init__(self, vocab_size, num_class, emb_dim=512, hidden_size=512, n_lstm_layer=3, is_bidirectory=True, padding_idx=0, epsilon=1e-05, dropout_rate=0.1):\n \"\"\"Init model Args: vocab_size (int): vocab size. num_class (int): num of classes. emb_dim (int, optional): embedding dimmension. Defaults to 512. hidden_size (int, optional): hidden size. Defaults to 512. n_lstm_layer (int, optional): number of lstm layer. Defaults to 3. is_bidirectory (bool, optional): use bidirect lstm. Defaults to True. padding_idx (int, optional): padding index. Defaults to 0. epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1.\"\"\"\n <|body_0|>\n\n def _prepare_emb(self, tokens):\n \"\"\"prepare emb\"\"\"\n <|body_1|>\n\n def _seq_classification_task(self, lstm_output):\n \"\"\"calc seq class loss\"\"\"\n <|body_2|>\n\n def forward(self, tokens, seq_lens):\n \"\"\"model forward\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LstmSeqClassificationModel, self).__init__()\n self.padding_idx = padding_idx\n self.embedder = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx)\n self.layer_norm = nn.LayerNorm(normalized_shape=emb_dim, epsilon=epsilon)\n self.dropout = nn.Dropout(p=dropout_rate)\n direction = 'bidirectional' if is_bidirectory else 'forward'\n self.lstm_encoder = nn.LSTM(emb_dim, hidden_size, num_layers=n_lstm_layer, direction=direction)\n in_channels = hidden_size * 2 if is_bidirectory else hidden_size\n self.conv_encoder = nn.Conv1D(in_channels=in_channels, out_channels=hidden_size, kernel_size=5, padding=2)\n self.output_layer = nn.Conv1D(in_channels=hidden_size, out_channels=num_class, kernel_size=3, padding=1)\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_text = self.embedder(tokens)\n embedded_text = self.layer_norm(embedded_text)\n embedded_text = self.dropout(embedded_text)\n return embedded_text\n<|end_body_1|>\n\n<|body_start_2|>\n conv_out = self.conv_encoder(lstm_output)\n conv_out = F.relu(conv_out)\n logits = self.output_layer(conv_out).transpose(perm=(0, 2, 1))\n return logits\n<|end_body_2|>\n\n<|body_start_3|>\n embedded_text = self._prepare_emb(tokens)\n lstm_output, (last_hidden, last_cell) = self.lstm_encoder(embedded_text)\n lstm_output = lstm_output.transpose(perm=(0, 2, 1))\n logits = self._seq_classification_task(lstm_output)\n return logits\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000089", "length_bytes": 10112, "license_type": "permissive", "methods": [{"docstring": "Init model Args: vocab_size (int): vocab size. num_class (int): num of classes. emb_dim (int, optional): embedding dimmension. Defaults to 512. hidden_size (int, optional): hidden size. Defaults to 512. n_lstm_layer (int, optional): number of lstm layer. Defaults to 3. is_bidirectory (bool, optional): use bidirect lstm. Defaults to True. padding_idx (int, optional): padding index. Defaults to 0. epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1.", "name": "__init__", "signature": "def __init__(self, vocab_size, num_class, emb_dim=512, hidden_size=512, n_lstm_layer=3, is_bidirectory=True, padding_idx=0, epsilon=1e-05, dropout_rate=0.1)"}, {"docstring": "prepare emb", "name": "_prepare_emb", "signature": "def _prepare_emb(self, tokens)"}, {"docstring": "calc seq class loss", "name": "_seq_classification_task", "signature": "def _seq_classification_task(self, lstm_output)"}, {"docstring": "model forward", "name": "forward", "signature": "def forward(self, tokens, seq_lens)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_050277", "prompt": "Implement the Python class `LstmSeqClassificationModel` described below.\n\nClass description:\nLstm model for seq classification task.\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size, num_class, emb_dim=512, hidden_size=512, n_lstm_layer=3, is_bidirectory=True, padding_idx=0, epsilon=1e-05, dropout_rate=0.1): Init model Args: vocab_size (int): vocab size. num_class (int): num of classes. emb_dim (int, optional): embedding dimmension. Defaults to 512. hidden_size (int, optional): hidden size. Defaults to 512. n_lstm_layer (int, optional): number of lstm layer. Defaults to 3. is_bidirectory (bool, optional): use bidirect lstm. Defaults to True. padding_idx (int, optional): padding index. Defaults to 0. epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1.\n- def _prepare_emb(self, tokens): prepare emb\n- def _seq_classification_task(self, lstm_output): calc seq class loss\n- def forward(self, tokens, seq_lens): model forward", "prompted_full_text": "Implement the Python class `LstmSeqClassificationModel` described below.\n\nClass description:\nLstm model for seq classification task.\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size, num_class, emb_dim=512, hidden_size=512, n_lstm_layer=3, is_bidirectory=True, padding_idx=0, epsilon=1e-05, dropout_rate=0.1): Init model Args: vocab_size (int): vocab size. num_class (int): num of classes. emb_dim (int, optional): embedding dimmension. Defaults to 512. hidden_size (int, optional): hidden size. Defaults to 512. n_lstm_layer (int, optional): number of lstm layer. Defaults to 3. is_bidirectory (bool, optional): use bidirect lstm. Defaults to True. padding_idx (int, optional): padding index. Defaults to 0. epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1.\n- def _prepare_emb(self, tokens): prepare emb\n- def _seq_classification_task(self, lstm_output): calc seq class loss\n- def forward(self, tokens, seq_lens): model forward\n\n<|skeleton|>\nclass LstmSeqClassificationModel:\n \"\"\"Lstm model for seq classification task.\"\"\"\n\n def __init__(self, vocab_size, num_class, emb_dim=512, hidden_size=512, n_lstm_layer=3, is_bidirectory=True, padding_idx=0, epsilon=1e-05, dropout_rate=0.1):\n \"\"\"Init model Args: vocab_size (int): vocab size. num_class (int): num of classes. emb_dim (int, optional): embedding dimmension. Defaults to 512. hidden_size (int, optional): hidden size. Defaults to 512. n_lstm_layer (int, optional): number of lstm layer. Defaults to 3. is_bidirectory (bool, optional): use bidirect lstm. Defaults to True. padding_idx (int, optional): padding index. Defaults to 0. epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1.\"\"\"\n <|body_0|>\n\n def _prepare_emb(self, tokens):\n \"\"\"prepare emb\"\"\"\n <|body_1|>\n\n def _seq_classification_task(self, lstm_output):\n \"\"\"calc seq class loss\"\"\"\n <|body_2|>\n\n def forward(self, tokens, seq_lens):\n \"\"\"model forward\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LstmSeqClassificationModel, self).__init__()\n self.padding_idx = padding_idx\n self.embedder = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx)\n self.layer_norm = nn.LayerNorm(normalized_shape=emb_dim, epsilon=epsilon)\n self.dropout = nn.Dropout(p=dropout_rate)\n direction = 'bidirectional' if is_bidirectory else 'forward'\n self.lstm_encoder = nn.LSTM(emb_dim, hidden_size, num_layers=n_lstm_layer, direction=direction)\n in_channels = hidden_size * 2 if is_bidirectory else hidden_size\n self.conv_encoder = nn.Conv1D(in_channels=in_channels, out_channels=hidden_size, kernel_size=5, padding=2)\n self.output_layer = nn.Conv1D(in_channels=hidden_size, out_channels=num_class, kernel_size=3, padding=1)\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_text = self.embedder(tokens)\n embedded_text = self.layer_norm(embedded_text)\n embedded_text = self.dropout(embedded_text)\n return embedded_text\n<|end_body_1|>\n\n<|body_start_2|>\n conv_out = self.conv_encoder(lstm_output)\n conv_out = F.relu(conv_out)\n logits = self.output_layer(conv_out).transpose(perm=(0, 2, 1))\n return logits\n<|end_body_2|>\n\n<|body_start_3|>\n embedded_text = self._prepare_emb(tokens)\n lstm_output, (last_hidden, last_cell) = self.lstm_encoder(embedded_text)\n lstm_output = lstm_output.transpose(perm=(0, 2, 1))\n logits = self._seq_classification_task(lstm_output)\n return logits\n<|end_body_3|>\n", "revision_id": "1c84ea6d51625d2d66b3eef1d9a7cc9a87c99e0e", "skeleton": "<|skeleton|>\nclass LstmSeqClassificationModel:\n \"\"\"Lstm model for seq classification task.\"\"\"\n\n def __init__(self, vocab_size, num_class, emb_dim=512, hidden_size=512, n_lstm_layer=3, is_bidirectory=True, padding_idx=0, epsilon=1e-05, dropout_rate=0.1):\n \"\"\"Init model Args: vocab_size (int): vocab size. num_class (int): num of classes. emb_dim (int, optional): embedding dimmension. Defaults to 512. hidden_size (int, optional): hidden size. Defaults to 512. n_lstm_layer (int, optional): number of lstm layer. Defaults to 3. is_bidirectory (bool, optional): use bidirect lstm. Defaults to True. padding_idx (int, optional): padding index. Defaults to 0. epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1.\"\"\"\n <|body_0|>\n\n def _prepare_emb(self, tokens):\n \"\"\"prepare emb\"\"\"\n <|body_1|>\n\n def _seq_classification_task(self, lstm_output):\n \"\"\"calc seq class loss\"\"\"\n <|body_2|>\n\n def forward(self, tokens, seq_lens):\n \"\"\"model forward\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LstmSeqClassificationModel:\n \"\"\"Lstm model for seq classification task.\"\"\"\n\n def __init__(self, vocab_size, num_class, emb_dim=512, hidden_size=512, n_lstm_layer=3, is_bidirectory=True, padding_idx=0, epsilon=1e-05, dropout_rate=0.1):\n \"\"\"Init model Args: vocab_size (int): vocab size. num_class (int): num of classes. emb_dim (int, optional): embedding dimmension. Defaults to 512. hidden_size (int, optional): hidden size. Defaults to 512. n_lstm_layer (int, optional): number of lstm layer. Defaults to 3. is_bidirectory (bool, optional): use bidirect lstm. Defaults to True. padding_idx (int, optional): padding index. Defaults to 0. epsilon (float, optional): epsilon. Defaults to 1e-5. dropout_rate (float, optional): dropout rate. Defaults to 0.1.\"\"\"\n super(LstmSeqClassificationModel, self).__init__()\n self.padding_idx = padding_idx\n self.embedder = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx)\n self.layer_norm = nn.LayerNorm(normalized_shape=emb_dim, epsilon=epsilon)\n self.dropout = nn.Dropout(p=dropout_rate)\n direction = 'bidirectional' if is_bidirectory else 'forward'\n self.lstm_encoder = nn.LSTM(emb_dim, hidden_size, num_layers=n_lstm_layer, direction=direction)\n in_channels = hidden_size * 2 if is_bidirectory else hidden_size\n self.conv_encoder = nn.Conv1D(in_channels=in_channels, out_channels=hidden_size, kernel_size=5, padding=2)\n self.output_layer = nn.Conv1D(in_channels=hidden_size, out_channels=num_class, kernel_size=3, padding=1)\n\n def _prepare_emb(self, tokens):\n \"\"\"prepare emb\"\"\"\n embedded_text = self.embedder(tokens)\n embedded_text = self.layer_norm(embedded_text)\n embedded_text = self.dropout(embedded_text)\n return embedded_text\n\n def _seq_classification_task(self, lstm_output):\n \"\"\"calc seq class loss\"\"\"\n conv_out = self.conv_encoder(lstm_output)\n conv_out = F.relu(conv_out)\n logits = self.output_layer(conv_out).transpose(perm=(0, 2, 1))\n return logits\n\n def forward(self, tokens, seq_lens):\n \"\"\"model forward\"\"\"\n embedded_text = self._prepare_emb(tokens)\n lstm_output, (last_hidden, last_cell) = self.lstm_encoder(embedded_text)\n lstm_output = lstm_output.transpose(perm=(0, 2, 1))\n logits = self._seq_classification_task(lstm_output)\n return logits\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/pretrained_protein/tape_dynamic/protein_sequence_model_dynamic.py", "source_repo": "RuikangSun/PaddleHelix", "split": "val", "star_events_count": 0}
{"blob_id": "fb0db1158596d67d48e243b098d9cb6f22020dbb", "bodies": ["data = {}\nfor key, value in entry.items():\n converter = self.convert.get(key, str)\n data[key] = converter(value)\nreturn data", "with open(os.path.join(directory, self.filename)) as raw:\n reader = csv.DictReader(raw, fieldnames=self.header)\n seen = False\n for row in reader:\n converted = self.__convert__(row)\n if 'name' in converted:\n name = converted.pop('name')\n yield (name, converted)\n else:\n yield converted\n seen = True\n if not seen:\n raise core.InvalidState('No rows for %s' % self.filename)"], "bodies_text": "<|body_start_0|>\n data = {}\n for key, value in entry.items():\n converter = self.convert.get(key, str)\n data[key] = converter(value)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n with open(os.path.join(directory, self.filename)) as raw:\n reader = csv.DictReader(raw, fieldnames=self.header)\n seen = False\n for row in reader:\n converted = self.__convert__(row)\n if 'name' in converted:\n name = converted.pop('name')\n yield (name, converted)\n else:\n yield converted\n seen = True\n if not seen:\n raise core.InvalidState('No rows for %s' % self.filename)\n<|end_body_1|>\n", "class_docstring": "A base parser for all motif csv files. This is callable and will produce a generator of dictonaries. If the row has a 'name' property we will yield the name and dictonary, otherwise we just yield the dictonary. This will also convert the field values specified by convert property.", "class_name": "BaseParser", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseParser:\n \"\"\"A base parser for all motif csv files. This is callable and will produce a generator of dictonaries. If the row has a 'name' property we will yield the name and dictonary, otherwise we just yield the dictonary. This will also convert the field values specified by convert property.\"\"\"\n\n def __convert__(self, entry):\n \"\"\"Converts the values as requested. :param dict entry: The dictonary to convert.\"\"\"\n <|body_0|>\n\n def __call__(self, directory):\n \"\"\"Parse the file in the given directory. :param str directory: The directory to get the file in. :yields: Each row in the file, after converting.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = {}\n for key, value in entry.items():\n converter = self.convert.get(key, str)\n data[key] = converter(value)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n with open(os.path.join(directory, self.filename)) as raw:\n reader = csv.DictReader(raw, fieldnames=self.header)\n seen = False\n for row in reader:\n converted = self.__convert__(row)\n if 'name' in converted:\n name = converted.pop('name')\n yield (name, converted)\n else:\n yield converted\n seen = True\n if not seen:\n raise core.InvalidState('No rows for %s' % self.filename)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000090", "length_bytes": 12261, "license_type": "no_license", "methods": [{"docstring": "Converts the values as requested. :param dict entry: The dictonary to convert.", "name": "__convert__", "signature": "def __convert__(self, entry)"}, {"docstring": "Parse the file in the given directory. :param str directory: The directory to get the file in. :yields: Each row in the file, after converting.", "name": "__call__", "signature": "def __call__(self, directory)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_018958", "prompt": "Implement the Python class `BaseParser` described below.\n\nClass description:\nA base parser for all motif csv files. This is callable and will produce a generator of dictonaries. If the row has a 'name' property we will yield the name and dictonary, otherwise we just yield the dictonary. This will also convert the field values specified by convert property.\n\nMethod signatures and docstrings:\n- def __convert__(self, entry): Converts the values as requested. :param dict entry: The dictonary to convert.\n- def __call__(self, directory): Parse the file in the given directory. :param str directory: The directory to get the file in. :yields: Each row in the file, after converting.", "prompted_full_text": "Implement the Python class `BaseParser` described below.\n\nClass description:\nA base parser for all motif csv files. This is callable and will produce a generator of dictonaries. If the row has a 'name' property we will yield the name and dictonary, otherwise we just yield the dictonary. This will also convert the field values specified by convert property.\n\nMethod signatures and docstrings:\n- def __convert__(self, entry): Converts the values as requested. :param dict entry: The dictonary to convert.\n- def __call__(self, directory): Parse the file in the given directory. :param str directory: The directory to get the file in. :yields: Each row in the file, after converting.\n\n<|skeleton|>\nclass BaseParser:\n \"\"\"A base parser for all motif csv files. This is callable and will produce a generator of dictonaries. If the row has a 'name' property we will yield the name and dictonary, otherwise we just yield the dictonary. This will also convert the field values specified by convert property.\"\"\"\n\n def __convert__(self, entry):\n \"\"\"Converts the values as requested. :param dict entry: The dictonary to convert.\"\"\"\n <|body_0|>\n\n def __call__(self, directory):\n \"\"\"Parse the file in the given directory. :param str directory: The directory to get the file in. :yields: Each row in the file, after converting.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = {}\n for key, value in entry.items():\n converter = self.convert.get(key, str)\n data[key] = converter(value)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n with open(os.path.join(directory, self.filename)) as raw:\n reader = csv.DictReader(raw, fieldnames=self.header)\n seen = False\n for row in reader:\n converted = self.__convert__(row)\n if 'name' in converted:\n name = converted.pop('name')\n yield (name, converted)\n else:\n yield converted\n seen = True\n if not seen:\n raise core.InvalidState('No rows for %s' % self.filename)\n<|end_body_1|>\n", "revision_id": "1982e10a56885e56d79aac69365b9ff78c0e3d92", "skeleton": "<|skeleton|>\nclass BaseParser:\n \"\"\"A base parser for all motif csv files. This is callable and will produce a generator of dictonaries. If the row has a 'name' property we will yield the name and dictonary, otherwise we just yield the dictonary. This will also convert the field values specified by convert property.\"\"\"\n\n def __convert__(self, entry):\n \"\"\"Converts the values as requested. :param dict entry: The dictonary to convert.\"\"\"\n <|body_0|>\n\n def __call__(self, directory):\n \"\"\"Parse the file in the given directory. :param str directory: The directory to get the file in. :yields: Each row in the file, after converting.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BaseParser:\n \"\"\"A base parser for all motif csv files. This is callable and will produce a generator of dictonaries. If the row has a 'name' property we will yield the name and dictonary, otherwise we just yield the dictonary. This will also convert the field values specified by convert property.\"\"\"\n\n def __convert__(self, entry):\n \"\"\"Converts the values as requested. :param dict entry: The dictonary to convert.\"\"\"\n data = {}\n for key, value in entry.items():\n converter = self.convert.get(key, str)\n data[key] = converter(value)\n return data\n\n def __call__(self, directory):\n \"\"\"Parse the file in the given directory. :param str directory: The directory to get the file in. :yields: Each row in the file, after converting.\"\"\"\n with open(os.path.join(directory, self.filename)) as raw:\n reader = csv.DictReader(raw, fieldnames=self.header)\n seen = False\n for row in reader:\n converted = self.__convert__(row)\n if 'name' in converted:\n name = converted.pop('name')\n yield (name, converted)\n else:\n yield converted\n seen = True\n if not seen:\n raise core.InvalidState('No rows for %s' % self.filename)\n", "source": "the_stack_v2_python_sparse", "source_path": "pymotifs/motifs/builder.py", "source_repo": "BGSU-RNA/RNA-3D-Hub-core", "split": "val", "star_events_count": 3}
{"blob_id": "10de189ba019e30eda51e23ebd5fe27301e2989d", "bodies": ["self._entry_id = entry_id\nself._device_id = device_id\nself.sonarr = sonarr\nself.host_config = host_config\nself.system_status = system_status", "if self._device_id is None:\n return None\nreturn DeviceInfo(identifiers={(DOMAIN, self._device_id)}, name='Activity Sensor', manufacturer='Sonarr', sw_version=self.system_status.version, entry_type=DeviceEntryType.SERVICE, configuration_url=self.host_config.base_url)"], "bodies_text": "<|body_start_0|>\n self._entry_id = entry_id\n self._device_id = device_id\n self.sonarr = sonarr\n self.host_config = host_config\n self.system_status = system_status\n<|end_body_0|>\n\n<|body_start_1|>\n if self._device_id is None:\n return None\n return DeviceInfo(identifiers={(DOMAIN, self._device_id)}, name='Activity Sensor', manufacturer='Sonarr', sw_version=self.system_status.version, entry_type=DeviceEntryType.SERVICE, configuration_url=self.host_config.base_url)\n<|end_body_1|>\n", "class_docstring": "Defines a base Sonarr entity.", "class_name": "SonarrEntity", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SonarrEntity:\n \"\"\"Defines a base Sonarr entity.\"\"\"\n\n def __init__(self, *, sonarr: SonarrClient, host_config: PyArrHostConfiguration, system_status: SystemStatus, entry_id: str, device_id: str) -> None:\n \"\"\"Initialize the Sonarr entity.\"\"\"\n <|body_0|>\n\n def device_info(self) -> DeviceInfo | None:\n \"\"\"Return device information about the application.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._entry_id = entry_id\n self._device_id = device_id\n self.sonarr = sonarr\n self.host_config = host_config\n self.system_status = system_status\n<|end_body_0|>\n\n<|body_start_1|>\n if self._device_id is None:\n return None\n return DeviceInfo(identifiers={(DOMAIN, self._device_id)}, name='Activity Sensor', manufacturer='Sonarr', sw_version=self.system_status.version, entry_type=DeviceEntryType.SERVICE, configuration_url=self.host_config.base_url)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000091", "length_bytes": 1389, "license_type": "permissive", "methods": [{"docstring": "Initialize the Sonarr entity.", "name": "__init__", "signature": "def __init__(self, *, sonarr: SonarrClient, host_config: PyArrHostConfiguration, system_status: SystemStatus, entry_id: str, device_id: str) -> None"}, {"docstring": "Return device information about the application.", "name": "device_info", "signature": "def device_info(self) -> DeviceInfo | None"}], "n_methods": 2, "prompt": "Implement the Python class `SonarrEntity` described below.\n\nClass description:\nDefines a base Sonarr entity.\n\nMethod signatures and docstrings:\n- def __init__(self, *, sonarr: SonarrClient, host_config: PyArrHostConfiguration, system_status: SystemStatus, entry_id: str, device_id: str) -> None: Initialize the Sonarr entity.\n- def device_info(self) -> DeviceInfo | None: Return device information about the application.", "prompted_full_text": "Implement the Python class `SonarrEntity` described below.\n\nClass description:\nDefines a base Sonarr entity.\n\nMethod signatures and docstrings:\n- def __init__(self, *, sonarr: SonarrClient, host_config: PyArrHostConfiguration, system_status: SystemStatus, entry_id: str, device_id: str) -> None: Initialize the Sonarr entity.\n- def device_info(self) -> DeviceInfo | None: Return device information about the application.\n\n<|skeleton|>\nclass SonarrEntity:\n \"\"\"Defines a base Sonarr entity.\"\"\"\n\n def __init__(self, *, sonarr: SonarrClient, host_config: PyArrHostConfiguration, system_status: SystemStatus, entry_id: str, device_id: str) -> None:\n \"\"\"Initialize the Sonarr entity.\"\"\"\n <|body_0|>\n\n def device_info(self) -> DeviceInfo | None:\n \"\"\"Return device information about the application.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._entry_id = entry_id\n self._device_id = device_id\n self.sonarr = sonarr\n self.host_config = host_config\n self.system_status = system_status\n<|end_body_0|>\n\n<|body_start_1|>\n if self._device_id is None:\n return None\n return DeviceInfo(identifiers={(DOMAIN, self._device_id)}, name='Activity Sensor', manufacturer='Sonarr', sw_version=self.system_status.version, entry_type=DeviceEntryType.SERVICE, configuration_url=self.host_config.base_url)\n<|end_body_1|>\n", "revision_id": "4a53121b58b77a318f08c64ad2c5372a16b800e0", "skeleton": "<|skeleton|>\nclass SonarrEntity:\n \"\"\"Defines a base Sonarr entity.\"\"\"\n\n def __init__(self, *, sonarr: SonarrClient, host_config: PyArrHostConfiguration, system_status: SystemStatus, entry_id: str, device_id: str) -> None:\n \"\"\"Initialize the Sonarr entity.\"\"\"\n <|body_0|>\n\n def device_info(self) -> DeviceInfo | None:\n \"\"\"Return device information about the application.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SonarrEntity:\n \"\"\"Defines a base Sonarr entity.\"\"\"\n\n def __init__(self, *, sonarr: SonarrClient, host_config: PyArrHostConfiguration, system_status: SystemStatus, entry_id: str, device_id: str) -> None:\n \"\"\"Initialize the Sonarr entity.\"\"\"\n self._entry_id = entry_id\n self._device_id = device_id\n self.sonarr = sonarr\n self.host_config = host_config\n self.system_status = system_status\n\n def device_info(self) -> DeviceInfo | None:\n \"\"\"Return device information about the application.\"\"\"\n if self._device_id is None:\n return None\n return DeviceInfo(identifiers={(DOMAIN, self._device_id)}, name='Activity Sensor', manufacturer='Sonarr', sw_version=self.system_status.version, entry_type=DeviceEntryType.SERVICE, configuration_url=self.host_config.base_url)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/sonarr/entity.py", "source_repo": "robert-alfaro/home-assistant", "split": "val", "star_events_count": 4}
{"blob_id": "3c875d131c594ef952aa23bbd6e2681ff12ba75d", "bodies": ["r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\ntoken = r.data['data']['access_token']\nheader = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\nr = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84dd4/', content_type='application/json', **header)\nself.assertTrue(r.data['success'])\nself.assertEqual(r.status_code, status.HTTP_200_OK)", "r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\ntoken = r.data['data']['access_token']\nheader = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\nr = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84d32/', content_type='application/json', **header)\nself.assertFalse(r.data['success'])\nself.assertEqual(r.status_code, status.HTTP_204_NO_CONTENT)"], "bodies_text": "<|body_start_0|>\n r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\n token = r.data['data']['access_token']\n header = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\n r = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84dd4/', content_type='application/json', **header)\n self.assertTrue(r.data['success'])\n self.assertEqual(r.status_code, status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\n token = r.data['data']['access_token']\n header = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\n r = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84d32/', content_type='application/json', **header)\n self.assertFalse(r.data['success'])\n self.assertEqual(r.status_code, status.HTTP_204_NO_CONTENT)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GetServiceArea", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GetServiceArea:\n\n def test_success(self):\n \"\"\"Test Success\"\"\"\n <|body_0|>\n\n def test_failure(self):\n \"\"\"Test Failure\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\n token = r.data['data']['access_token']\n header = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\n r = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84dd4/', content_type='application/json', **header)\n self.assertTrue(r.data['success'])\n self.assertEqual(r.status_code, status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\n token = r.data['data']['access_token']\n header = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\n r = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84d32/', content_type='application/json', **header)\n self.assertFalse(r.data['success'])\n self.assertEqual(r.status_code, status.HTTP_204_NO_CONTENT)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000092", "length_bytes": 11905, "license_type": "permissive", "methods": [{"docstring": "Test Success", "name": "test_success", "signature": "def test_success(self)"}, {"docstring": "Test Failure", "name": "test_failure", "signature": "def test_failure(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_002065", "prompt": "Implement the Python class `GetServiceArea` described below.\n\nClass description:\nImplement the GetServiceArea class.\n\nMethod signatures and docstrings:\n- def test_success(self): Test Success\n- def test_failure(self): Test Failure", "prompted_full_text": "Implement the Python class `GetServiceArea` described below.\n\nClass description:\nImplement the GetServiceArea class.\n\nMethod signatures and docstrings:\n- def test_success(self): Test Success\n- def test_failure(self): Test Failure\n\n<|skeleton|>\nclass GetServiceArea:\n\n def test_success(self):\n \"\"\"Test Success\"\"\"\n <|body_0|>\n\n def test_failure(self):\n \"\"\"Test Failure\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\n token = r.data['data']['access_token']\n header = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\n r = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84dd4/', content_type='application/json', **header)\n self.assertTrue(r.data['success'])\n self.assertEqual(r.status_code, status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\n token = r.data['data']['access_token']\n header = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\n r = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84d32/', content_type='application/json', **header)\n self.assertFalse(r.data['success'])\n self.assertEqual(r.status_code, status.HTTP_204_NO_CONTENT)\n<|end_body_1|>\n", "revision_id": "c667e1bc2163ba49591e367bd5c882fe1b1f8df0", "skeleton": "<|skeleton|>\nclass GetServiceArea:\n\n def test_success(self):\n \"\"\"Test Success\"\"\"\n <|body_0|>\n\n def test_failure(self):\n \"\"\"Test Failure\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GetServiceArea:\n def test_success(self):\n \"\"\"Test Success\"\"\"\n r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\n token = r.data['data']['access_token']\n header = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\n r = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84dd4/', content_type='application/json', **header)\n self.assertTrue(r.data['success'])\n self.assertEqual(r.status_code, status.HTTP_200_OK)\n\n def test_failure(self):\n \"\"\"Test Failure\"\"\"\n r = self.client.put('/api/company/', LOGIN_COMPANY_OBJECT_SUCCESS, content_type='application/json')\n token = r.data['data']['access_token']\n header = {'HTTP_AUTHORIZATION': 'Bearer {token}'.format(token=token)}\n r = self.client.get('/api/my-service-area/efcb0080-1ce9-47a0-8299-15c126b84d32/', content_type='application/json', **header)\n self.assertFalse(r.data['success'])\n self.assertEqual(r.status_code, status.HTTP_204_NO_CONTENT)\n", "source": "the_stack_v2_python_sparse", "source_path": "service_area/tests.py", "source_repo": "jinayshah86/service-provider-locator", "split": "val", "star_events_count": 0}
{"blob_id": "e9d4d3849ca4fdaa8c3d437098f472d2ad00c4bb", "bodies": ["alto_path = os.path.join(self.path, 'ALTO')\nif not os.path.exists(alto_path):\n logger.critical(f'Could not find pages for {self.id}')\npage_file_names = [file for file in os.listdir(alto_path) if not file.startswith('.') and '.xml' in file]\npage_numbers = []\nfor fname in page_file_names:\n page_no = fname.split('.')[0]\n page_numbers.append(int(page_no))\npage_canonical_names = ['{}-p{}'.format(self.id, str(page_n).zfill(4)) for page_n in page_numbers]\nself.pages = []\nfor filename, page_no, page_id in zip(page_file_names, page_numbers, page_canonical_names):\n try:\n self.pages.append(ReroNewspaperPage(page_id, page_no, filename, alto_path))\n except Exception as e:\n logger.error(f'Adding page {page_no} {page_id} {filename}', f'raised following exception: {e}')\n raise e", "parts = []\nfor child in content_div.children:\n if isinstance(child, NavigableString):\n continue\n elif isinstance(child, Tag):\n type_attr = child.get('TYPE')\n comp_role = type_attr.lower() if type_attr else None\n areas = child.findAll('area')\n for area in areas:\n comp_id = area.get('BEGIN')\n comp_fileid = area.get('FILEID')\n comp_page_no = int(comp_fileid.replace('ALTO', ''))\n parts.append({'comp_role': comp_role, 'comp_id': comp_id, 'comp_fileid': comp_fileid, 'comp_page_no': comp_page_no})\nreturn parts", "div_type = item_div.get('TYPE').lower()\nif div_type == 'picture':\n div_type = CONTENTITEM_TYPE_IMAGE\nif div_type not in CONTENTITEM_TYPES:\n logger.warning(f'Found new content item type: {div_type}')\nmetadata = {'id': '{}-i{}'.format(self.id, str(counter).zfill(4)), 'tp': div_type, 'pp': [], 't': item_div.get('LABEL')}\ncontent_item = {'m': metadata, 'l': {'id': item_div.get('ID'), 'parts': self._parse_content_parts(item_div)}}\nfor p in content_item['l']['parts']:\n pge_no = p['comp_page_no']\n if pge_no not in content_item['m']['pp']:\n content_item['m']['pp'].append(pge_no)\nreturn content_item", "content_items = []\ndivs = mets_doc.find('div', {'TYPE': 'CONTENT'}).findChildren('div', recursive=False)\nsorted_divs = sorted(divs, key=lambda x: x.get('ID').lower())\nfound_types = set((x.get('TYPE') for x in sorted_divs))\nprint(f'Found types {found_types} for content items')\ncounter = 1\nfor div in sorted_divs:\n content_items.append(self._parse_content_item(div, counter))\n counter += 1\nreturn content_items", "mets_doc = self.xml\nself.image_properties = parse_mets_amdsec(mets_doc, x_res='XphysScanResolution', y_res='YphysScanResolution')\ncontent_items = self._parse_content_items(mets_doc)\nself.issue_data = {'cdt': strftime('%Y-%m-%d %H:%M:%S'), 'i': content_items, 'id': self.id, 'ar': self.rights, 'pp': [p.id for p in self.pages]}"], "bodies_text": "<|body_start_0|>\n alto_path = os.path.join(self.path, 'ALTO')\n if not os.path.exists(alto_path):\n logger.critical(f'Could not find pages for {self.id}')\n page_file_names = [file for file in os.listdir(alto_path) if not file.startswith('.') and '.xml' in file]\n page_numbers = []\n for fname in page_file_names:\n page_no = fname.split('.')[0]\n page_numbers.append(int(page_no))\n page_canonical_names = ['{}-p{}'.format(self.id, str(page_n).zfill(4)) for page_n in page_numbers]\n self.pages = []\n for filename, page_no, page_id in zip(page_file_names, page_numbers, page_canonical_names):\n try:\n self.pages.append(ReroNewspaperPage(page_id, page_no, filename, alto_path))\n except Exception as e:\n logger.error(f'Adding page {page_no} {page_id} {filename}', f'raised following exception: {e}')\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n parts = []\n for child in content_div.children:\n if isinstance(child, NavigableString):\n continue\n elif isinstance(child, Tag):\n type_attr = child.get('TYPE')\n comp_role = type_attr.lower() if type_attr else None\n areas = child.findAll('area')\n for area in areas:\n comp_id = area.get('BEGIN')\n comp_fileid = area.get('FILEID')\n comp_page_no = int(comp_fileid.replace('ALTO', ''))\n parts.append({'comp_role': comp_role, 'comp_id': comp_id, 'comp_fileid': comp_fileid, 'comp_page_no': comp_page_no})\n return parts\n<|end_body_1|>\n\n<|body_start_2|>\n div_type = item_div.get('TYPE').lower()\n if div_type == 'picture':\n div_type = CONTENTITEM_TYPE_IMAGE\n if div_type not in CONTENTITEM_TYPES:\n logger.warning(f'Found new content item type: {div_type}')\n metadata = {'id': '{}-i{}'.format(self.id, str(counter).zfill(4)), 'tp': div_type, 'pp': [], 't': item_div.get('LABEL')}\n content_item = {'m': metadata, 'l': {'id': item_div.get('ID'), 'parts': self._parse_content_parts(item_div)}}\n for p in content_item['l']['parts']:\n pge_no = p['comp_page_no']\n if pge_no not in content_item['m']['pp']:\n content_item['m']['pp'].append(pge_no)\n return content_item\n<|end_body_2|>\n\n<|body_start_3|>\n content_items = []\n divs = mets_doc.find('div', {'TYPE': 'CONTENT'}).findChildren('div', recursive=False)\n sorted_divs = sorted(divs, key=lambda x: x.get('ID').lower())\n found_types = set((x.get('TYPE') for x in sorted_divs))\n print(f'Found types {found_types} for content items')\n counter = 1\n for div in sorted_divs:\n content_items.append(self._parse_content_item(div, counter))\n counter += 1\n return content_items\n<|end_body_3|>\n\n<|body_start_4|>\n mets_doc = self.xml\n self.image_properties = parse_mets_amdsec(mets_doc, x_res='XphysScanResolution', y_res='YphysScanResolution')\n content_items = self._parse_content_items(mets_doc)\n self.issue_data = {'cdt': strftime('%Y-%m-%d %H:%M:%S'), 'i': content_items, 'id': self.id, 'ar': self.rights, 'pp': [p.id for p in self.pages]}\n<|end_body_4|>\n", "class_docstring": "Class representing an issue in RERO (Mets/Alto) data. All functions defined in this child class are specific to parsing RERO Mets/Alto format", "class_name": "ReroNewspaperIssue", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReroNewspaperIssue:\n \"\"\"Class representing an issue in RERO (Mets/Alto) data. All functions defined in this child class are specific to parsing RERO Mets/Alto format\"\"\"\n\n def _find_pages(self):\n \"\"\"Detects the Alto XML page files for a newspaper issue and initializes page objects.\"\"\"\n <|body_0|>\n\n def _parse_content_parts(self, content_div) -> List[Dict[str, str]]:\n \"\"\"Given the div of a content item, this function parses the children and constructs the legacy `parts` component :param content_div: The div containing the content item :return: list[dict] of different parts for this content item (role, id, fileid, page)\"\"\"\n <|body_1|>\n\n def _parse_content_item(self, item_div, counter: int):\n \"\"\"Parses a content item div and returns the dictionary representing it. :param item_div: Div of content item :param counter: Number of content items already added (needed to generate canonical id). :return: dict, of the resulting content item\"\"\"\n <|body_2|>\n\n def _parse_content_items(self, mets_doc: BeautifulSoup):\n \"\"\"Extract content item elements from a Mets XML file. :param BeautifulSoup mets_doc: :return:\"\"\"\n <|body_3|>\n\n def _parse_mets(self):\n \"\"\"Parses the Mets XML file of the newspaper issue.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n alto_path = os.path.join(self.path, 'ALTO')\n if not os.path.exists(alto_path):\n logger.critical(f'Could not find pages for {self.id}')\n page_file_names = [file for file in os.listdir(alto_path) if not file.startswith('.') and '.xml' in file]\n page_numbers = []\n for fname in page_file_names:\n page_no = fname.split('.')[0]\n page_numbers.append(int(page_no))\n page_canonical_names = ['{}-p{}'.format(self.id, str(page_n).zfill(4)) for page_n in page_numbers]\n self.pages = []\n for filename, page_no, page_id in zip(page_file_names, page_numbers, page_canonical_names):\n try:\n self.pages.append(ReroNewspaperPage(page_id, page_no, filename, alto_path))\n except Exception as e:\n logger.error(f'Adding page {page_no} {page_id} {filename}', f'raised following exception: {e}')\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n parts = []\n for child in content_div.children:\n if isinstance(child, NavigableString):\n continue\n elif isinstance(child, Tag):\n type_attr = child.get('TYPE')\n comp_role = type_attr.lower() if type_attr else None\n areas = child.findAll('area')\n for area in areas:\n comp_id = area.get('BEGIN')\n comp_fileid = area.get('FILEID')\n comp_page_no = int(comp_fileid.replace('ALTO', ''))\n parts.append({'comp_role': comp_role, 'comp_id': comp_id, 'comp_fileid': comp_fileid, 'comp_page_no': comp_page_no})\n return parts\n<|end_body_1|>\n\n<|body_start_2|>\n div_type = item_div.get('TYPE').lower()\n if div_type == 'picture':\n div_type = CONTENTITEM_TYPE_IMAGE\n if div_type not in CONTENTITEM_TYPES:\n logger.warning(f'Found new content item type: {div_type}')\n metadata = {'id': '{}-i{}'.format(self.id, str(counter).zfill(4)), 'tp': div_type, 'pp': [], 't': item_div.get('LABEL')}\n content_item = {'m': metadata, 'l': {'id': item_div.get('ID'), 'parts': self._parse_content_parts(item_div)}}\n for p in content_item['l']['parts']:\n pge_no = p['comp_page_no']\n if pge_no not in content_item['m']['pp']:\n content_item['m']['pp'].append(pge_no)\n return content_item\n<|end_body_2|>\n\n<|body_start_3|>\n content_items = []\n divs = mets_doc.find('div', {'TYPE': 'CONTENT'}).findChildren('div', recursive=False)\n sorted_divs = sorted(divs, key=lambda x: x.get('ID').lower())\n found_types = set((x.get('TYPE') for x in sorted_divs))\n print(f'Found types {found_types} for content items')\n counter = 1\n for div in sorted_divs:\n content_items.append(self._parse_content_item(div, counter))\n counter += 1\n return content_items\n<|end_body_3|>\n\n<|body_start_4|>\n mets_doc = self.xml\n self.image_properties = parse_mets_amdsec(mets_doc, x_res='XphysScanResolution', y_res='YphysScanResolution')\n content_items = self._parse_content_items(mets_doc)\n self.issue_data = {'cdt': strftime('%Y-%m-%d %H:%M:%S'), 'i': content_items, 'id': self.id, 'ar': self.rights, 'pp': [p.id for p in self.pages]}\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000093", "length_bytes": 7435, "license_type": "permissive", "methods": [{"docstring": "Detects the Alto XML page files for a newspaper issue and initializes page objects.", "name": "_find_pages", "signature": "def _find_pages(self)"}, {"docstring": "Given the div of a content item, this function parses the children and constructs the legacy `parts` component :param content_div: The div containing the content item :return: list[dict] of different parts for this content item (role, id, fileid, page)", "name": "_parse_content_parts", "signature": "def _parse_content_parts(self, content_div) -> List[Dict[str, str]]"}, {"docstring": "Parses a content item div and returns the dictionary representing it. :param item_div: Div of content item :param counter: Number of content items already added (needed to generate canonical id). :return: dict, of the resulting content item", "name": "_parse_content_item", "signature": "def _parse_content_item(self, item_div, counter: int)"}, {"docstring": "Extract content item elements from a Mets XML file. :param BeautifulSoup mets_doc: :return:", "name": "_parse_content_items", "signature": "def _parse_content_items(self, mets_doc: BeautifulSoup)"}, {"docstring": "Parses the Mets XML file of the newspaper issue.", "name": "_parse_mets", "signature": "def _parse_mets(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_031946", "prompt": "Implement the Python class `ReroNewspaperIssue` described below.\n\nClass description:\nClass representing an issue in RERO (Mets/Alto) data. All functions defined in this child class are specific to parsing RERO Mets/Alto format\n\nMethod signatures and docstrings:\n- def _find_pages(self): Detects the Alto XML page files for a newspaper issue and initializes page objects.\n- def _parse_content_parts(self, content_div) -> List[Dict[str, str]]: Given the div of a content item, this function parses the children and constructs the legacy `parts` component :param content_div: The div containing the content item :return: list[dict] of different parts for this content item (role, id, fileid, page)\n- def _parse_content_item(self, item_div, counter: int): Parses a content item div and returns the dictionary representing it. :param item_div: Div of content item :param counter: Number of content items already added (needed to generate canonical id). :return: dict, of the resulting content item\n- def _parse_content_items(self, mets_doc: BeautifulSoup): Extract content item elements from a Mets XML file. :param BeautifulSoup mets_doc: :return:\n- def _parse_mets(self): Parses the Mets XML file of the newspaper issue.", "prompted_full_text": "Implement the Python class `ReroNewspaperIssue` described below.\n\nClass description:\nClass representing an issue in RERO (Mets/Alto) data. All functions defined in this child class are specific to parsing RERO Mets/Alto format\n\nMethod signatures and docstrings:\n- def _find_pages(self): Detects the Alto XML page files for a newspaper issue and initializes page objects.\n- def _parse_content_parts(self, content_div) -> List[Dict[str, str]]: Given the div of a content item, this function parses the children and constructs the legacy `parts` component :param content_div: The div containing the content item :return: list[dict] of different parts for this content item (role, id, fileid, page)\n- def _parse_content_item(self, item_div, counter: int): Parses a content item div and returns the dictionary representing it. :param item_div: Div of content item :param counter: Number of content items already added (needed to generate canonical id). :return: dict, of the resulting content item\n- def _parse_content_items(self, mets_doc: BeautifulSoup): Extract content item elements from a Mets XML file. :param BeautifulSoup mets_doc: :return:\n- def _parse_mets(self): Parses the Mets XML file of the newspaper issue.\n\n<|skeleton|>\nclass ReroNewspaperIssue:\n \"\"\"Class representing an issue in RERO (Mets/Alto) data. All functions defined in this child class are specific to parsing RERO Mets/Alto format\"\"\"\n\n def _find_pages(self):\n \"\"\"Detects the Alto XML page files for a newspaper issue and initializes page objects.\"\"\"\n <|body_0|>\n\n def _parse_content_parts(self, content_div) -> List[Dict[str, str]]:\n \"\"\"Given the div of a content item, this function parses the children and constructs the legacy `parts` component :param content_div: The div containing the content item :return: list[dict] of different parts for this content item (role, id, fileid, page)\"\"\"\n <|body_1|>\n\n def _parse_content_item(self, item_div, counter: int):\n \"\"\"Parses a content item div and returns the dictionary representing it. :param item_div: Div of content item :param counter: Number of content items already added (needed to generate canonical id). :return: dict, of the resulting content item\"\"\"\n <|body_2|>\n\n def _parse_content_items(self, mets_doc: BeautifulSoup):\n \"\"\"Extract content item elements from a Mets XML file. :param BeautifulSoup mets_doc: :return:\"\"\"\n <|body_3|>\n\n def _parse_mets(self):\n \"\"\"Parses the Mets XML file of the newspaper issue.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n alto_path = os.path.join(self.path, 'ALTO')\n if not os.path.exists(alto_path):\n logger.critical(f'Could not find pages for {self.id}')\n page_file_names = [file for file in os.listdir(alto_path) if not file.startswith('.') and '.xml' in file]\n page_numbers = []\n for fname in page_file_names:\n page_no = fname.split('.')[0]\n page_numbers.append(int(page_no))\n page_canonical_names = ['{}-p{}'.format(self.id, str(page_n).zfill(4)) for page_n in page_numbers]\n self.pages = []\n for filename, page_no, page_id in zip(page_file_names, page_numbers, page_canonical_names):\n try:\n self.pages.append(ReroNewspaperPage(page_id, page_no, filename, alto_path))\n except Exception as e:\n logger.error(f'Adding page {page_no} {page_id} {filename}', f'raised following exception: {e}')\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n parts = []\n for child in content_div.children:\n if isinstance(child, NavigableString):\n continue\n elif isinstance(child, Tag):\n type_attr = child.get('TYPE')\n comp_role = type_attr.lower() if type_attr else None\n areas = child.findAll('area')\n for area in areas:\n comp_id = area.get('BEGIN')\n comp_fileid = area.get('FILEID')\n comp_page_no = int(comp_fileid.replace('ALTO', ''))\n parts.append({'comp_role': comp_role, 'comp_id': comp_id, 'comp_fileid': comp_fileid, 'comp_page_no': comp_page_no})\n return parts\n<|end_body_1|>\n\n<|body_start_2|>\n div_type = item_div.get('TYPE').lower()\n if div_type == 'picture':\n div_type = CONTENTITEM_TYPE_IMAGE\n if div_type not in CONTENTITEM_TYPES:\n logger.warning(f'Found new content item type: {div_type}')\n metadata = {'id': '{}-i{}'.format(self.id, str(counter).zfill(4)), 'tp': div_type, 'pp': [], 't': item_div.get('LABEL')}\n content_item = {'m': metadata, 'l': {'id': item_div.get('ID'), 'parts': self._parse_content_parts(item_div)}}\n for p in content_item['l']['parts']:\n pge_no = p['comp_page_no']\n if pge_no not in content_item['m']['pp']:\n content_item['m']['pp'].append(pge_no)\n return content_item\n<|end_body_2|>\n\n<|body_start_3|>\n content_items = []\n divs = mets_doc.find('div', {'TYPE': 'CONTENT'}).findChildren('div', recursive=False)\n sorted_divs = sorted(divs, key=lambda x: x.get('ID').lower())\n found_types = set((x.get('TYPE') for x in sorted_divs))\n print(f'Found types {found_types} for content items')\n counter = 1\n for div in sorted_divs:\n content_items.append(self._parse_content_item(div, counter))\n counter += 1\n return content_items\n<|end_body_3|>\n\n<|body_start_4|>\n mets_doc = self.xml\n self.image_properties = parse_mets_amdsec(mets_doc, x_res='XphysScanResolution', y_res='YphysScanResolution')\n content_items = self._parse_content_items(mets_doc)\n self.issue_data = {'cdt': strftime('%Y-%m-%d %H:%M:%S'), 'i': content_items, 'id': self.id, 'ar': self.rights, 'pp': [p.id for p in self.pages]}\n<|end_body_4|>\n", "revision_id": "ed8f0586ed6a4f7de94b1504b292570bce1f51c5", "skeleton": "<|skeleton|>\nclass ReroNewspaperIssue:\n \"\"\"Class representing an issue in RERO (Mets/Alto) data. All functions defined in this child class are specific to parsing RERO Mets/Alto format\"\"\"\n\n def _find_pages(self):\n \"\"\"Detects the Alto XML page files for a newspaper issue and initializes page objects.\"\"\"\n <|body_0|>\n\n def _parse_content_parts(self, content_div) -> List[Dict[str, str]]:\n \"\"\"Given the div of a content item, this function parses the children and constructs the legacy `parts` component :param content_div: The div containing the content item :return: list[dict] of different parts for this content item (role, id, fileid, page)\"\"\"\n <|body_1|>\n\n def _parse_content_item(self, item_div, counter: int):\n \"\"\"Parses a content item div and returns the dictionary representing it. :param item_div: Div of content item :param counter: Number of content items already added (needed to generate canonical id). :return: dict, of the resulting content item\"\"\"\n <|body_2|>\n\n def _parse_content_items(self, mets_doc: BeautifulSoup):\n \"\"\"Extract content item elements from a Mets XML file. :param BeautifulSoup mets_doc: :return:\"\"\"\n <|body_3|>\n\n def _parse_mets(self):\n \"\"\"Parses the Mets XML file of the newspaper issue.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ReroNewspaperIssue:\n \"\"\"Class representing an issue in RERO (Mets/Alto) data. All functions defined in this child class are specific to parsing RERO Mets/Alto format\"\"\"\n\n def _find_pages(self):\n \"\"\"Detects the Alto XML page files for a newspaper issue and initializes page objects.\"\"\"\n alto_path = os.path.join(self.path, 'ALTO')\n if not os.path.exists(alto_path):\n logger.critical(f'Could not find pages for {self.id}')\n page_file_names = [file for file in os.listdir(alto_path) if not file.startswith('.') and '.xml' in file]\n page_numbers = []\n for fname in page_file_names:\n page_no = fname.split('.')[0]\n page_numbers.append(int(page_no))\n page_canonical_names = ['{}-p{}'.format(self.id, str(page_n).zfill(4)) for page_n in page_numbers]\n self.pages = []\n for filename, page_no, page_id in zip(page_file_names, page_numbers, page_canonical_names):\n try:\n self.pages.append(ReroNewspaperPage(page_id, page_no, filename, alto_path))\n except Exception as e:\n logger.error(f'Adding page {page_no} {page_id} {filename}', f'raised following exception: {e}')\n raise e\n\n def _parse_content_parts(self, content_div) -> List[Dict[str, str]]:\n \"\"\"Given the div of a content item, this function parses the children and constructs the legacy `parts` component :param content_div: The div containing the content item :return: list[dict] of different parts for this content item (role, id, fileid, page)\"\"\"\n parts = []\n for child in content_div.children:\n if isinstance(child, NavigableString):\n continue\n elif isinstance(child, Tag):\n type_attr = child.get('TYPE')\n comp_role = type_attr.lower() if type_attr else None\n areas = child.findAll('area')\n for area in areas:\n comp_id = area.get('BEGIN')\n comp_fileid = area.get('FILEID')\n comp_page_no = int(comp_fileid.replace('ALTO', ''))\n parts.append({'comp_role': comp_role, 'comp_id': comp_id, 'comp_fileid': comp_fileid, 'comp_page_no': comp_page_no})\n return parts\n\n def _parse_content_item(self, item_div, counter: int):\n \"\"\"Parses a content item div and returns the dictionary representing it. :param item_div: Div of content item :param counter: Number of content items already added (needed to generate canonical id). :return: dict, of the resulting content item\"\"\"\n div_type = item_div.get('TYPE').lower()\n if div_type == 'picture':\n div_type = CONTENTITEM_TYPE_IMAGE\n if div_type not in CONTENTITEM_TYPES:\n logger.warning(f'Found new content item type: {div_type}')\n metadata = {'id': '{}-i{}'.format(self.id, str(counter).zfill(4)), 'tp': div_type, 'pp': [], 't': item_div.get('LABEL')}\n content_item = {'m': metadata, 'l': {'id': item_div.get('ID'), 'parts': self._parse_content_parts(item_div)}}\n for p in content_item['l']['parts']:\n pge_no = p['comp_page_no']\n if pge_no not in content_item['m']['pp']:\n content_item['m']['pp'].append(pge_no)\n return content_item\n\n def _parse_content_items(self, mets_doc: BeautifulSoup):\n \"\"\"Extract content item elements from a Mets XML file. :param BeautifulSoup mets_doc: :return:\"\"\"\n content_items = []\n divs = mets_doc.find('div', {'TYPE': 'CONTENT'}).findChildren('div', recursive=False)\n sorted_divs = sorted(divs, key=lambda x: x.get('ID').lower())\n found_types = set((x.get('TYPE') for x in sorted_divs))\n print(f'Found types {found_types} for content items')\n counter = 1\n for div in sorted_divs:\n content_items.append(self._parse_content_item(div, counter))\n counter += 1\n return content_items\n\n def _parse_mets(self):\n \"\"\"Parses the Mets XML file of the newspaper issue.\"\"\"\n mets_doc = self.xml\n self.image_properties = parse_mets_amdsec(mets_doc, x_res='XphysScanResolution', y_res='YphysScanResolution')\n content_items = self._parse_content_items(mets_doc)\n self.issue_data = {'cdt': strftime('%Y-%m-%d %H:%M:%S'), 'i': content_items, 'id': self.id, 'ar': self.rights, 'pp': [p.id for p in self.pages]}\n", "source": "the_stack_v2_python_sparse", "source_path": "text_importer/importers/rero/classes.py", "source_repo": "aflueckiger/impresso-text-acquisition", "split": "val", "star_events_count": 0}
{"blob_id": "c2daf9ca0daea960f472adf3cb13effc82f6abd8", "bodies": ["def perm(cnt):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0})\n ret.extend([[k, *y] for y in sub])\n return ret\nreturn perm(Counter(nums))", "def perm(cnt):\n if len(cnt) == 1:\n k = next(iter(cnt))\n return [[k] * cnt[k]]\n ret = []\n keys = list(cnt.keys())\n for k in keys:\n if cnt[k] == 0:\n continue\n cnt[k] -= 1\n if cnt[k] == 0:\n cnt.pop(k)\n sub = perm(cnt)\n cnt.setdefault(k, 0)\n cnt[k] += 1\n ret.extend([[k, *y] for y in sub])\n return ret\nreturn perm(Counter(nums))", "class HashableDict(dict):\n\n def __hash__(self):\n return hash((frozenset(self), frozenset(self.values())))\n\n@lru_cache(None)\ndef perm(cnt: HashableDict):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm(HashableDict({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0}))\n ret.extend([[k, *y] for y in sub])\n return ret\nreturn perm(HashableDict(Counter(nums)))", "nums.sort()\n\ndef dfs(cur, remain):\n if remain == 0:\n return [cur]\n ret = []\n pv = None\n for i in range(len(nums)):\n if pv == nums[i]:\n continue\n if remain & 1 << i == 0:\n continue\n pv = nums[i]\n ret.extend(dfs(cur + [nums[i]], remain ^ 1 << i))\n return ret\nreturn dfs([], (1 << len(nums)) - 1)"], "bodies_text": "<|body_start_0|>\n def perm(cnt):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0})\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(Counter(nums))\n<|end_body_0|>\n\n<|body_start_1|>\n def perm(cnt):\n if len(cnt) == 1:\n k = next(iter(cnt))\n return [[k] * cnt[k]]\n ret = []\n keys = list(cnt.keys())\n for k in keys:\n if cnt[k] == 0:\n continue\n cnt[k] -= 1\n if cnt[k] == 0:\n cnt.pop(k)\n sub = perm(cnt)\n cnt.setdefault(k, 0)\n cnt[k] += 1\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(Counter(nums))\n<|end_body_1|>\n\n<|body_start_2|>\n class HashableDict(dict):\n\n def __hash__(self):\n return hash((frozenset(self), frozenset(self.values())))\n\n @lru_cache(None)\n def perm(cnt: HashableDict):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm(HashableDict({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0}))\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(HashableDict(Counter(nums)))\n<|end_body_2|>\n\n<|body_start_3|>\n nums.sort()\n\n def dfs(cur, remain):\n if remain == 0:\n return [cur]\n ret = []\n pv = None\n for i in range(len(nums)):\n if pv == nums[i]:\n continue\n if remain & 1 << i == 0:\n continue\n pv = nums[i]\n ret.extend(dfs(cur + [nums[i]], remain ^ 1 << i))\n return ret\n return dfs([], (1 << len(nums)) - 1)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:32\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:55 Not to recreate counter\"\"\"\n <|body_1|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:41 Cache previous result\"\"\"\n <|body_2|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 19:25 Backtrack\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def perm(cnt):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0})\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(Counter(nums))\n<|end_body_0|>\n\n<|body_start_1|>\n def perm(cnt):\n if len(cnt) == 1:\n k = next(iter(cnt))\n return [[k] * cnt[k]]\n ret = []\n keys = list(cnt.keys())\n for k in keys:\n if cnt[k] == 0:\n continue\n cnt[k] -= 1\n if cnt[k] == 0:\n cnt.pop(k)\n sub = perm(cnt)\n cnt.setdefault(k, 0)\n cnt[k] += 1\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(Counter(nums))\n<|end_body_1|>\n\n<|body_start_2|>\n class HashableDict(dict):\n\n def __hash__(self):\n return hash((frozenset(self), frozenset(self.values())))\n\n @lru_cache(None)\n def perm(cnt: HashableDict):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm(HashableDict({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0}))\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(HashableDict(Counter(nums)))\n<|end_body_2|>\n\n<|body_start_3|>\n nums.sort()\n\n def dfs(cur, remain):\n if remain == 0:\n return [cur]\n ret = []\n pv = None\n for i in range(len(nums)):\n if pv == nums[i]:\n continue\n if remain & 1 << i == 0:\n continue\n pv = nums[i]\n ret.extend(dfs(cur + [nums[i]], remain ^ 1 << i))\n return ret\n return dfs([], (1 << len(nums)) - 1)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000094", "length_bytes": 3681, "license_type": "no_license", "methods": [{"docstring": "05/22/2022 16:32", "name": "permuteUnique", "signature": "def permuteUnique(self, nums: List[int]) -> List[List[int]]"}, {"docstring": "05/22/2022 16:55 Not to recreate counter", "name": "permuteUnique", "signature": "def permuteUnique(self, nums: List[int]) -> List[List[int]]"}, {"docstring": "05/22/2022 16:41 Cache previous result", "name": "permuteUnique", "signature": "def permuteUnique(self, nums: List[int]) -> List[List[int]]"}, {"docstring": "05/22/2022 19:25 Backtrack", "name": "permuteUnique", "signature": "def permuteUnique(self, nums: List[int]) -> List[List[int]]"}], "n_methods": 4, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def permuteUnique(self, nums: List[int]) -> List[List[int]]: 05/22/2022 16:32\n- def permuteUnique(self, nums: List[int]) -> List[List[int]]: 05/22/2022 16:55 Not to recreate counter\n- def permuteUnique(self, nums: List[int]) -> List[List[int]]: 05/22/2022 16:41 Cache previous result\n- def permuteUnique(self, nums: List[int]) -> List[List[int]]: 05/22/2022 19:25 Backtrack", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def permuteUnique(self, nums: List[int]) -> List[List[int]]: 05/22/2022 16:32\n- def permuteUnique(self, nums: List[int]) -> List[List[int]]: 05/22/2022 16:55 Not to recreate counter\n- def permuteUnique(self, nums: List[int]) -> List[List[int]]: 05/22/2022 16:41 Cache previous result\n- def permuteUnique(self, nums: List[int]) -> List[List[int]]: 05/22/2022 19:25 Backtrack\n\n<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:32\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:55 Not to recreate counter\"\"\"\n <|body_1|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:41 Cache previous result\"\"\"\n <|body_2|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 19:25 Backtrack\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def perm(cnt):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0})\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(Counter(nums))\n<|end_body_0|>\n\n<|body_start_1|>\n def perm(cnt):\n if len(cnt) == 1:\n k = next(iter(cnt))\n return [[k] * cnt[k]]\n ret = []\n keys = list(cnt.keys())\n for k in keys:\n if cnt[k] == 0:\n continue\n cnt[k] -= 1\n if cnt[k] == 0:\n cnt.pop(k)\n sub = perm(cnt)\n cnt.setdefault(k, 0)\n cnt[k] += 1\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(Counter(nums))\n<|end_body_1|>\n\n<|body_start_2|>\n class HashableDict(dict):\n\n def __hash__(self):\n return hash((frozenset(self), frozenset(self.values())))\n\n @lru_cache(None)\n def perm(cnt: HashableDict):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm(HashableDict({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0}))\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(HashableDict(Counter(nums)))\n<|end_body_2|>\n\n<|body_start_3|>\n nums.sort()\n\n def dfs(cur, remain):\n if remain == 0:\n return [cur]\n ret = []\n pv = None\n for i in range(len(nums)):\n if pv == nums[i]:\n continue\n if remain & 1 << i == 0:\n continue\n pv = nums[i]\n ret.extend(dfs(cur + [nums[i]], remain ^ 1 << i))\n return ret\n return dfs([], (1 << len(nums)) - 1)\n<|end_body_3|>\n", "revision_id": "1389a009a02e90e8700a7a00e0b7f797c129cdf4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:32\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:55 Not to recreate counter\"\"\"\n <|body_1|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:41 Cache previous result\"\"\"\n <|body_2|>\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 19:25 Backtrack\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:32\"\"\"\n def perm(cnt):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0})\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(Counter(nums))\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:55 Not to recreate counter\"\"\"\n def perm(cnt):\n if len(cnt) == 1:\n k = next(iter(cnt))\n return [[k] * cnt[k]]\n ret = []\n keys = list(cnt.keys())\n for k in keys:\n if cnt[k] == 0:\n continue\n cnt[k] -= 1\n if cnt[k] == 0:\n cnt.pop(k)\n sub = perm(cnt)\n cnt.setdefault(k, 0)\n cnt[k] += 1\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(Counter(nums))\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 16:41 Cache previous result\"\"\"\n class HashableDict(dict):\n\n def __hash__(self):\n return hash((frozenset(self), frozenset(self.values())))\n\n @lru_cache(None)\n def perm(cnt: HashableDict):\n if len(cnt) == 1:\n k, v = cnt.popitem()\n return [[k] * v]\n ret = []\n for k in cnt:\n sub = perm(HashableDict({x: c if x != k else c - 1 for x, c in cnt.items() if (c if x != k else c - 1) > 0}))\n ret.extend([[k, *y] for y in sub])\n return ret\n return perm(HashableDict(Counter(nums)))\n\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"05/22/2022 19:25 Backtrack\"\"\"\n nums.sort()\n\n def dfs(cur, remain):\n if remain == 0:\n return [cur]\n ret = []\n pv = None\n for i in range(len(nums)):\n if pv == nums[i]:\n continue\n if remain & 1 << i == 0:\n continue\n pv = nums[i]\n ret.extend(dfs(cur + [nums[i]], remain ^ 1 << i))\n return ret\n return dfs([], (1 << len(nums)) - 1)\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/solved/47_Permutations_II/solution.py", "source_repo": "sungminoh/algorithms", "split": "val", "star_events_count": 0}
{"blob_id": "356ebfcd60869fc27de89048607b7b6f36641a54", "bodies": ["with mock.patch('fuzz_target.FuzzTarget.is_reproducible', side_effect=[True, False]):\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.BUG_FOUND)", "with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.NO_BUG_FOUND)", "with tempfile.TemporaryDirectory() as tmp_dir:\n out_path = os.path.join(tmp_dir, 'build-out')\n os.mkdir(out_path)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=tmp_dir, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\nself.assertEqual(result, run_fuzzers.RunFuzzersResult.ERROR)"], "bodies_text": "<|body_start_0|>\n with mock.patch('fuzz_target.FuzzTarget.is_reproducible', side_effect=[True, False]):\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.BUG_FOUND)\n<|end_body_0|>\n\n<|body_start_1|>\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.NO_BUG_FOUND)\n<|end_body_1|>\n\n<|body_start_2|>\n with tempfile.TemporaryDirectory() as tmp_dir:\n out_path = os.path.join(tmp_dir, 'build-out')\n os.mkdir(out_path)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=tmp_dir, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.ERROR)\n<|end_body_2|>\n", "class_docstring": "Integration tests for build_fuzzers with an ASAN build.", "class_name": "RunAddressFuzzersIntegrationTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RunAddressFuzzersIntegrationTest:\n \"\"\"Integration tests for build_fuzzers with an ASAN build.\"\"\"\n\n def test_new_bug_found(self):\n \"\"\"Tests run_fuzzers with a valid ASAN build.\"\"\"\n <|body_0|>\n\n def test_old_bug_found(self, _):\n \"\"\"Tests run_fuzzers with a bug found in OSS-Fuzz before.\"\"\"\n <|body_1|>\n\n def test_invalid_build(self):\n \"\"\"Tests run_fuzzers with an invalid ASAN build.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with mock.patch('fuzz_target.FuzzTarget.is_reproducible', side_effect=[True, False]):\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.BUG_FOUND)\n<|end_body_0|>\n\n<|body_start_1|>\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.NO_BUG_FOUND)\n<|end_body_1|>\n\n<|body_start_2|>\n with tempfile.TemporaryDirectory() as tmp_dir:\n out_path = os.path.join(tmp_dir, 'build-out')\n os.mkdir(out_path)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=tmp_dir, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.ERROR)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000095", "length_bytes": 19890, "license_type": "permissive", "methods": [{"docstring": "Tests run_fuzzers with a valid ASAN build.", "name": "test_new_bug_found", "signature": "def test_new_bug_found(self)"}, {"docstring": "Tests run_fuzzers with a bug found in OSS-Fuzz before.", "name": "test_old_bug_found", "signature": "def test_old_bug_found(self, _)"}, {"docstring": "Tests run_fuzzers with an invalid ASAN build.", "name": "test_invalid_build", "signature": "def test_invalid_build(self)"}], "n_methods": 3, "prompt": "Implement the Python class `RunAddressFuzzersIntegrationTest` described below.\n\nClass description:\nIntegration tests for build_fuzzers with an ASAN build.\n\nMethod signatures and docstrings:\n- def test_new_bug_found(self): Tests run_fuzzers with a valid ASAN build.\n- def test_old_bug_found(self, _): Tests run_fuzzers with a bug found in OSS-Fuzz before.\n- def test_invalid_build(self): Tests run_fuzzers with an invalid ASAN build.", "prompted_full_text": "Implement the Python class `RunAddressFuzzersIntegrationTest` described below.\n\nClass description:\nIntegration tests for build_fuzzers with an ASAN build.\n\nMethod signatures and docstrings:\n- def test_new_bug_found(self): Tests run_fuzzers with a valid ASAN build.\n- def test_old_bug_found(self, _): Tests run_fuzzers with a bug found in OSS-Fuzz before.\n- def test_invalid_build(self): Tests run_fuzzers with an invalid ASAN build.\n\n<|skeleton|>\nclass RunAddressFuzzersIntegrationTest:\n \"\"\"Integration tests for build_fuzzers with an ASAN build.\"\"\"\n\n def test_new_bug_found(self):\n \"\"\"Tests run_fuzzers with a valid ASAN build.\"\"\"\n <|body_0|>\n\n def test_old_bug_found(self, _):\n \"\"\"Tests run_fuzzers with a bug found in OSS-Fuzz before.\"\"\"\n <|body_1|>\n\n def test_invalid_build(self):\n \"\"\"Tests run_fuzzers with an invalid ASAN build.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with mock.patch('fuzz_target.FuzzTarget.is_reproducible', side_effect=[True, False]):\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.BUG_FOUND)\n<|end_body_0|>\n\n<|body_start_1|>\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.NO_BUG_FOUND)\n<|end_body_1|>\n\n<|body_start_2|>\n with tempfile.TemporaryDirectory() as tmp_dir:\n out_path = os.path.join(tmp_dir, 'build-out')\n os.mkdir(out_path)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=tmp_dir, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.ERROR)\n<|end_body_2|>\n", "revision_id": "f0275421f84b8f80ee767fb9230134ac97cb687b", "skeleton": "<|skeleton|>\nclass RunAddressFuzzersIntegrationTest:\n \"\"\"Integration tests for build_fuzzers with an ASAN build.\"\"\"\n\n def test_new_bug_found(self):\n \"\"\"Tests run_fuzzers with a valid ASAN build.\"\"\"\n <|body_0|>\n\n def test_old_bug_found(self, _):\n \"\"\"Tests run_fuzzers with a bug found in OSS-Fuzz before.\"\"\"\n <|body_1|>\n\n def test_invalid_build(self):\n \"\"\"Tests run_fuzzers with an invalid ASAN build.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RunAddressFuzzersIntegrationTest:\n \"\"\"Integration tests for build_fuzzers with an ASAN build.\"\"\"\n\n def test_new_bug_found(self):\n \"\"\"Tests run_fuzzers with a valid ASAN build.\"\"\"\n with mock.patch('fuzz_target.FuzzTarget.is_reproducible', side_effect=[True, False]):\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.BUG_FOUND)\n\n def test_old_bug_found(self, _):\n \"\"\"Tests run_fuzzers with a bug found in OSS-Fuzz before.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n workspace = os.path.join(tmp_dir, 'workspace')\n shutil.copytree(TEST_DATA_PATH, workspace)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=workspace, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.NO_BUG_FOUND)\n\n def test_invalid_build(self):\n \"\"\"Tests run_fuzzers with an invalid ASAN build.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n out_path = os.path.join(tmp_dir, 'build-out')\n os.mkdir(out_path)\n config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS, workspace=tmp_dir, oss_fuzz_project_name=EXAMPLE_PROJECT)\n result = run_fuzzers.run_fuzzers(config)\n self.assertEqual(result, run_fuzzers.RunFuzzersResult.ERROR)\n", "source": "the_stack_v2_python_sparse", "source_path": "infra/cifuzz/run_fuzzers_test.py", "source_repo": "google/oss-fuzz", "split": "val", "star_events_count": 9438}
{"blob_id": "00caa4a1925c26b087b2375c382d02818aa00af0", "bodies": ["obsolete_content = []\nobsolete_translation_suggestion_error_report: List[Dict[str, Union[str, List[Dict[str, str]]]]] = []\ntranslatable_content_ids = exploration.get_translatable_content_ids()\nfor suggestion in suggestions:\n suggestion_change = suggestion.change_cmd\n if not suggestion_change['content_id'] in translatable_content_ids:\n obsolete_content.append({'content_id': suggestion_change['content_id'], 'state_name': suggestion_change['state_name']})\nobsolete_translation_suggestion_error_report.append({'exp_id': exploration.id, 'obsolete_content': obsolete_content})\nreturn obsolete_translation_suggestion_error_report", "suggestion_dicts = _get_suggestion_dicts(self.pipeline)\ntotal_processed_suggestions_count_job_run_results = suggestion_dicts | 'Get suggestions' >> beam.FlatMap(lambda suggestions_dict: suggestions_dict['suggestions']) | 'Total processed suggestion count' >> job_result_transforms.CountObjectsToJobRunResult('TOTAL PROCESSED SUGGESTIONS COUNT')\nsuggestion_results = suggestion_dicts | 'Report obsolete suggestions' >> beam.Map(lambda suggestions_dict: self._report_suggestions_with_missing_content_ids(suggestions_dict['suggestions'], suggestions_dict['exploration'])) | 'Flatten reports' >> beam.FlatMap(lambda x: x) | 'Filter out reports with no obsolete suggestions' >> beam.Filter(lambda report: len(report['obsolete_content']) > 0)\njob_run_results = suggestion_results | 'Report the obsolete suggestions' >> beam.Map(lambda result: job_run_result.JobRunResult.as_stdout(f'Results are - {result}'))\nobsolete_suggestions_count_job_run_results = suggestion_results | 'Flatten obsolete suggestions' >> beam.FlatMap(lambda report: report['obsolete_content']) | 'Report the obsolete suggestions count' >> job_result_transforms.CountObjectsToJobRunResult('OBSOLETE SUGGESTIONS COUNT')\nreturn (job_run_results, total_processed_suggestions_count_job_run_results, obsolete_suggestions_count_job_run_results) | 'Combine results' >> beam.Flatten()"], "bodies_text": "<|body_start_0|>\n obsolete_content = []\n obsolete_translation_suggestion_error_report: List[Dict[str, Union[str, List[Dict[str, str]]]]] = []\n translatable_content_ids = exploration.get_translatable_content_ids()\n for suggestion in suggestions:\n suggestion_change = suggestion.change_cmd\n if not suggestion_change['content_id'] in translatable_content_ids:\n obsolete_content.append({'content_id': suggestion_change['content_id'], 'state_name': suggestion_change['state_name']})\n obsolete_translation_suggestion_error_report.append({'exp_id': exploration.id, 'obsolete_content': obsolete_content})\n return obsolete_translation_suggestion_error_report\n<|end_body_0|>\n\n<|body_start_1|>\n suggestion_dicts = _get_suggestion_dicts(self.pipeline)\n total_processed_suggestions_count_job_run_results = suggestion_dicts | 'Get suggestions' >> beam.FlatMap(lambda suggestions_dict: suggestions_dict['suggestions']) | 'Total processed suggestion count' >> job_result_transforms.CountObjectsToJobRunResult('TOTAL PROCESSED SUGGESTIONS COUNT')\n suggestion_results = suggestion_dicts | 'Report obsolete suggestions' >> beam.Map(lambda suggestions_dict: self._report_suggestions_with_missing_content_ids(suggestions_dict['suggestions'], suggestions_dict['exploration'])) | 'Flatten reports' >> beam.FlatMap(lambda x: x) | 'Filter out reports with no obsolete suggestions' >> beam.Filter(lambda report: len(report['obsolete_content']) > 0)\n job_run_results = suggestion_results | 'Report the obsolete suggestions' >> beam.Map(lambda result: job_run_result.JobRunResult.as_stdout(f'Results are - {result}'))\n obsolete_suggestions_count_job_run_results = suggestion_results | 'Flatten obsolete suggestions' >> beam.FlatMap(lambda report: report['obsolete_content']) | 'Report the obsolete suggestions count' >> job_result_transforms.CountObjectsToJobRunResult('OBSOLETE SUGGESTIONS COUNT')\n return (job_run_results, total_processed_suggestions_count_job_run_results, obsolete_suggestions_count_job_run_results) | 'Combine results' >> beam.Flatten()\n<|end_body_1|>\n", "class_docstring": "Audits translation suggestions for missing content IDs.", "class_name": "AuditTranslationSuggestionsWithMissingContentIdJob", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuditTranslationSuggestionsWithMissingContentIdJob:\n \"\"\"Audits translation suggestions for missing content IDs.\"\"\"\n\n def _report_suggestions_with_missing_content_ids(suggestions: List[suggestion_models.GeneralSuggestionModel], exploration: exp_domain.Exploration) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]:\n \"\"\"Audits translation suggestion models for missing content IDs. Reports the following for each exploration: - exploration ID - list of missing content IDs and corresponding state names. Args: suggestions: list(GeneralSuggestionModel). A list of translation suggestion models corresponding to the given exploration. exploration: Exploration. The corresponding exploration domain object. Returns: list(dict). Audit report result.\"\"\"\n <|body_0|>\n\n def run(self) -> beam.PCollection[job_run_result.JobRunResult]:\n \"\"\"Returns a PCollection of audit job run results. Returns: PCollection. A PCollection of results.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n obsolete_content = []\n obsolete_translation_suggestion_error_report: List[Dict[str, Union[str, List[Dict[str, str]]]]] = []\n translatable_content_ids = exploration.get_translatable_content_ids()\n for suggestion in suggestions:\n suggestion_change = suggestion.change_cmd\n if not suggestion_change['content_id'] in translatable_content_ids:\n obsolete_content.append({'content_id': suggestion_change['content_id'], 'state_name': suggestion_change['state_name']})\n obsolete_translation_suggestion_error_report.append({'exp_id': exploration.id, 'obsolete_content': obsolete_content})\n return obsolete_translation_suggestion_error_report\n<|end_body_0|>\n\n<|body_start_1|>\n suggestion_dicts = _get_suggestion_dicts(self.pipeline)\n total_processed_suggestions_count_job_run_results = suggestion_dicts | 'Get suggestions' >> beam.FlatMap(lambda suggestions_dict: suggestions_dict['suggestions']) | 'Total processed suggestion count' >> job_result_transforms.CountObjectsToJobRunResult('TOTAL PROCESSED SUGGESTIONS COUNT')\n suggestion_results = suggestion_dicts | 'Report obsolete suggestions' >> beam.Map(lambda suggestions_dict: self._report_suggestions_with_missing_content_ids(suggestions_dict['suggestions'], suggestions_dict['exploration'])) | 'Flatten reports' >> beam.FlatMap(lambda x: x) | 'Filter out reports with no obsolete suggestions' >> beam.Filter(lambda report: len(report['obsolete_content']) > 0)\n job_run_results = suggestion_results | 'Report the obsolete suggestions' >> beam.Map(lambda result: job_run_result.JobRunResult.as_stdout(f'Results are - {result}'))\n obsolete_suggestions_count_job_run_results = suggestion_results | 'Flatten obsolete suggestions' >> beam.FlatMap(lambda report: report['obsolete_content']) | 'Report the obsolete suggestions count' >> job_result_transforms.CountObjectsToJobRunResult('OBSOLETE SUGGESTIONS COUNT')\n return (job_run_results, total_processed_suggestions_count_job_run_results, obsolete_suggestions_count_job_run_results) | 'Combine results' >> beam.Flatten()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000096", "length_bytes": 11707, "license_type": "permissive", "methods": [{"docstring": "Audits translation suggestion models for missing content IDs. Reports the following for each exploration: - exploration ID - list of missing content IDs and corresponding state names. Args: suggestions: list(GeneralSuggestionModel). A list of translation suggestion models corresponding to the given exploration. exploration: Exploration. The corresponding exploration domain object. Returns: list(dict). Audit report result.", "name": "_report_suggestions_with_missing_content_ids", "signature": "def _report_suggestions_with_missing_content_ids(suggestions: List[suggestion_models.GeneralSuggestionModel], exploration: exp_domain.Exploration) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]"}, {"docstring": "Returns a PCollection of audit job run results. Returns: PCollection. A PCollection of results.", "name": "run", "signature": "def run(self) -> beam.PCollection[job_run_result.JobRunResult]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_054459", "prompt": "Implement the Python class `AuditTranslationSuggestionsWithMissingContentIdJob` described below.\n\nClass description:\nAudits translation suggestions for missing content IDs.\n\nMethod signatures and docstrings:\n- def _report_suggestions_with_missing_content_ids(suggestions: List[suggestion_models.GeneralSuggestionModel], exploration: exp_domain.Exploration) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]: Audits translation suggestion models for missing content IDs. Reports the following for each exploration: - exploration ID - list of missing content IDs and corresponding state names. Args: suggestions: list(GeneralSuggestionModel). A list of translation suggestion models corresponding to the given exploration. exploration: Exploration. The corresponding exploration domain object. Returns: list(dict). Audit report result.\n- def run(self) -> beam.PCollection[job_run_result.JobRunResult]: Returns a PCollection of audit job run results. Returns: PCollection. A PCollection of results.", "prompted_full_text": "Implement the Python class `AuditTranslationSuggestionsWithMissingContentIdJob` described below.\n\nClass description:\nAudits translation suggestions for missing content IDs.\n\nMethod signatures and docstrings:\n- def _report_suggestions_with_missing_content_ids(suggestions: List[suggestion_models.GeneralSuggestionModel], exploration: exp_domain.Exploration) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]: Audits translation suggestion models for missing content IDs. Reports the following for each exploration: - exploration ID - list of missing content IDs and corresponding state names. Args: suggestions: list(GeneralSuggestionModel). A list of translation suggestion models corresponding to the given exploration. exploration: Exploration. The corresponding exploration domain object. Returns: list(dict). Audit report result.\n- def run(self) -> beam.PCollection[job_run_result.JobRunResult]: Returns a PCollection of audit job run results. Returns: PCollection. A PCollection of results.\n\n<|skeleton|>\nclass AuditTranslationSuggestionsWithMissingContentIdJob:\n \"\"\"Audits translation suggestions for missing content IDs.\"\"\"\n\n def _report_suggestions_with_missing_content_ids(suggestions: List[suggestion_models.GeneralSuggestionModel], exploration: exp_domain.Exploration) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]:\n \"\"\"Audits translation suggestion models for missing content IDs. Reports the following for each exploration: - exploration ID - list of missing content IDs and corresponding state names. Args: suggestions: list(GeneralSuggestionModel). A list of translation suggestion models corresponding to the given exploration. exploration: Exploration. The corresponding exploration domain object. Returns: list(dict). Audit report result.\"\"\"\n <|body_0|>\n\n def run(self) -> beam.PCollection[job_run_result.JobRunResult]:\n \"\"\"Returns a PCollection of audit job run results. Returns: PCollection. A PCollection of results.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n obsolete_content = []\n obsolete_translation_suggestion_error_report: List[Dict[str, Union[str, List[Dict[str, str]]]]] = []\n translatable_content_ids = exploration.get_translatable_content_ids()\n for suggestion in suggestions:\n suggestion_change = suggestion.change_cmd\n if not suggestion_change['content_id'] in translatable_content_ids:\n obsolete_content.append({'content_id': suggestion_change['content_id'], 'state_name': suggestion_change['state_name']})\n obsolete_translation_suggestion_error_report.append({'exp_id': exploration.id, 'obsolete_content': obsolete_content})\n return obsolete_translation_suggestion_error_report\n<|end_body_0|>\n\n<|body_start_1|>\n suggestion_dicts = _get_suggestion_dicts(self.pipeline)\n total_processed_suggestions_count_job_run_results = suggestion_dicts | 'Get suggestions' >> beam.FlatMap(lambda suggestions_dict: suggestions_dict['suggestions']) | 'Total processed suggestion count' >> job_result_transforms.CountObjectsToJobRunResult('TOTAL PROCESSED SUGGESTIONS COUNT')\n suggestion_results = suggestion_dicts | 'Report obsolete suggestions' >> beam.Map(lambda suggestions_dict: self._report_suggestions_with_missing_content_ids(suggestions_dict['suggestions'], suggestions_dict['exploration'])) | 'Flatten reports' >> beam.FlatMap(lambda x: x) | 'Filter out reports with no obsolete suggestions' >> beam.Filter(lambda report: len(report['obsolete_content']) > 0)\n job_run_results = suggestion_results | 'Report the obsolete suggestions' >> beam.Map(lambda result: job_run_result.JobRunResult.as_stdout(f'Results are - {result}'))\n obsolete_suggestions_count_job_run_results = suggestion_results | 'Flatten obsolete suggestions' >> beam.FlatMap(lambda report: report['obsolete_content']) | 'Report the obsolete suggestions count' >> job_result_transforms.CountObjectsToJobRunResult('OBSOLETE SUGGESTIONS COUNT')\n return (job_run_results, total_processed_suggestions_count_job_run_results, obsolete_suggestions_count_job_run_results) | 'Combine results' >> beam.Flatten()\n<|end_body_1|>\n", "revision_id": "d16fdf23d790eafd63812bd7239532256e30a21d", "skeleton": "<|skeleton|>\nclass AuditTranslationSuggestionsWithMissingContentIdJob:\n \"\"\"Audits translation suggestions for missing content IDs.\"\"\"\n\n def _report_suggestions_with_missing_content_ids(suggestions: List[suggestion_models.GeneralSuggestionModel], exploration: exp_domain.Exploration) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]:\n \"\"\"Audits translation suggestion models for missing content IDs. Reports the following for each exploration: - exploration ID - list of missing content IDs and corresponding state names. Args: suggestions: list(GeneralSuggestionModel). A list of translation suggestion models corresponding to the given exploration. exploration: Exploration. The corresponding exploration domain object. Returns: list(dict). Audit report result.\"\"\"\n <|body_0|>\n\n def run(self) -> beam.PCollection[job_run_result.JobRunResult]:\n \"\"\"Returns a PCollection of audit job run results. Returns: PCollection. A PCollection of results.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AuditTranslationSuggestionsWithMissingContentIdJob:\n \"\"\"Audits translation suggestions for missing content IDs.\"\"\"\n\n def _report_suggestions_with_missing_content_ids(suggestions: List[suggestion_models.GeneralSuggestionModel], exploration: exp_domain.Exploration) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]:\n \"\"\"Audits translation suggestion models for missing content IDs. Reports the following for each exploration: - exploration ID - list of missing content IDs and corresponding state names. Args: suggestions: list(GeneralSuggestionModel). A list of translation suggestion models corresponding to the given exploration. exploration: Exploration. The corresponding exploration domain object. Returns: list(dict). Audit report result.\"\"\"\n obsolete_content = []\n obsolete_translation_suggestion_error_report: List[Dict[str, Union[str, List[Dict[str, str]]]]] = []\n translatable_content_ids = exploration.get_translatable_content_ids()\n for suggestion in suggestions:\n suggestion_change = suggestion.change_cmd\n if not suggestion_change['content_id'] in translatable_content_ids:\n obsolete_content.append({'content_id': suggestion_change['content_id'], 'state_name': suggestion_change['state_name']})\n obsolete_translation_suggestion_error_report.append({'exp_id': exploration.id, 'obsolete_content': obsolete_content})\n return obsolete_translation_suggestion_error_report\n\n def run(self) -> beam.PCollection[job_run_result.JobRunResult]:\n \"\"\"Returns a PCollection of audit job run results. Returns: PCollection. A PCollection of results.\"\"\"\n suggestion_dicts = _get_suggestion_dicts(self.pipeline)\n total_processed_suggestions_count_job_run_results = suggestion_dicts | 'Get suggestions' >> beam.FlatMap(lambda suggestions_dict: suggestions_dict['suggestions']) | 'Total processed suggestion count' >> job_result_transforms.CountObjectsToJobRunResult('TOTAL PROCESSED SUGGESTIONS COUNT')\n suggestion_results = suggestion_dicts | 'Report obsolete suggestions' >> beam.Map(lambda suggestions_dict: self._report_suggestions_with_missing_content_ids(suggestions_dict['suggestions'], suggestions_dict['exploration'])) | 'Flatten reports' >> beam.FlatMap(lambda x: x) | 'Filter out reports with no obsolete suggestions' >> beam.Filter(lambda report: len(report['obsolete_content']) > 0)\n job_run_results = suggestion_results | 'Report the obsolete suggestions' >> beam.Map(lambda result: job_run_result.JobRunResult.as_stdout(f'Results are - {result}'))\n obsolete_suggestions_count_job_run_results = suggestion_results | 'Flatten obsolete suggestions' >> beam.FlatMap(lambda report: report['obsolete_content']) | 'Report the obsolete suggestions count' >> job_result_transforms.CountObjectsToJobRunResult('OBSOLETE SUGGESTIONS COUNT')\n return (job_run_results, total_processed_suggestions_count_job_run_results, obsolete_suggestions_count_job_run_results) | 'Combine results' >> beam.Flatten()\n", "source": "the_stack_v2_python_sparse", "source_path": "core/jobs/batch_jobs/rejecting_suggestion_for_invalid_content_ids_jobs.py", "source_repo": "oppia/oppia", "split": "val", "star_events_count": 6172}
{"blob_id": "fde04d10bf4764eb29313f3fdaa20fbe61529cf3", "bodies": ["threading.Thread.__init__(self)\nself.log = logging.getLogger('Asset Loader')\nself.loader_queue = loader_queue\nself.loaded_queue = loaded_queue\nself.exception_queue = exception_queue\nself.thread_stopper = thread_stopper\nself.name = 'asset_loader'", "try:\n while not self.thread_stopper.is_set():\n try:\n asset = self.loader_queue.get(block=True, timeout=1)\n except Empty:\n asset = None\n if asset:\n with asset.lock:\n if not asset.loaded:\n try:\n asset.do_load()\n except Exception as e:\n raise ConfigFileError(\"Error while loading {} asset file '{}'\".format(asset.attribute, asset.file), 1, self.log.name, asset.name) from e\n self.loaded_queue.put((asset, True))\n else:\n self.loaded_queue.put((asset, False))\n return\nexcept Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join((line for line in lines))\n self.exception_queue.put(msg)\n raise"], "bodies_text": "<|body_start_0|>\n threading.Thread.__init__(self)\n self.log = logging.getLogger('Asset Loader')\n self.loader_queue = loader_queue\n self.loaded_queue = loaded_queue\n self.exception_queue = exception_queue\n self.thread_stopper = thread_stopper\n self.name = 'asset_loader'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n while not self.thread_stopper.is_set():\n try:\n asset = self.loader_queue.get(block=True, timeout=1)\n except Empty:\n asset = None\n if asset:\n with asset.lock:\n if not asset.loaded:\n try:\n asset.do_load()\n except Exception as e:\n raise ConfigFileError(\"Error while loading {} asset file '{}'\".format(asset.attribute, asset.file), 1, self.log.name, asset.name) from e\n self.loaded_queue.put((asset, True))\n else:\n self.loaded_queue.put((asset, False))\n return\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join((line for line in lines))\n self.exception_queue.put(msg)\n raise\n<|end_body_1|>\n", "class_docstring": "Base class for the Asset Loader thread and actually loads the assets from disk. Args: loader_queue: A reference to the asset manager's loader_queue which holds assets waiting to be loaded. Items are automatically sorted in reverse order by priority, then creation ID. loaded_queue: A reference to the asset manager's loaded_queue which holds assets that have just been loaded. Entries are Asset instances. exception_queue: Send a reference to self.machine.crash_queue. This way if the asset loader crashes, it will write the crash to that queue and cause an exception in the main thread. Otherwise it fails silently which is super annoying. :)", "class_name": "AssetLoader", "detected_licenses": ["MIT", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AssetLoader:\n \"\"\"Base class for the Asset Loader thread and actually loads the assets from disk. Args: loader_queue: A reference to the asset manager's loader_queue which holds assets waiting to be loaded. Items are automatically sorted in reverse order by priority, then creation ID. loaded_queue: A reference to the asset manager's loaded_queue which holds assets that have just been loaded. Entries are Asset instances. exception_queue: Send a reference to self.machine.crash_queue. This way if the asset loader crashes, it will write the crash to that queue and cause an exception in the main thread. Otherwise it fails silently which is super annoying. :)\"\"\"\n\n def __init__(self, loader_queue, loaded_queue, exception_queue, thread_stopper):\n \"\"\"Initialise asset loader.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Run loop for the loader thread.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n threading.Thread.__init__(self)\n self.log = logging.getLogger('Asset Loader')\n self.loader_queue = loader_queue\n self.loaded_queue = loaded_queue\n self.exception_queue = exception_queue\n self.thread_stopper = thread_stopper\n self.name = 'asset_loader'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n while not self.thread_stopper.is_set():\n try:\n asset = self.loader_queue.get(block=True, timeout=1)\n except Empty:\n asset = None\n if asset:\n with asset.lock:\n if not asset.loaded:\n try:\n asset.do_load()\n except Exception as e:\n raise ConfigFileError(\"Error while loading {} asset file '{}'\".format(asset.attribute, asset.file), 1, self.log.name, asset.name) from e\n self.loaded_queue.put((asset, True))\n else:\n self.loaded_queue.put((asset, False))\n return\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join((line for line in lines))\n self.exception_queue.put(msg)\n raise\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000097", "length_bytes": 5327, "license_type": "permissive", "methods": [{"docstring": "Initialise asset loader.", "name": "__init__", "signature": "def __init__(self, loader_queue, loaded_queue, exception_queue, thread_stopper)"}, {"docstring": "Run loop for the loader thread.", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_035838", "prompt": "Implement the Python class `AssetLoader` described below.\n\nClass description:\nBase class for the Asset Loader thread and actually loads the assets from disk. Args: loader_queue: A reference to the asset manager's loader_queue which holds assets waiting to be loaded. Items are automatically sorted in reverse order by priority, then creation ID. loaded_queue: A reference to the asset manager's loaded_queue which holds assets that have just been loaded. Entries are Asset instances. exception_queue: Send a reference to self.machine.crash_queue. This way if the asset loader crashes, it will write the crash to that queue and cause an exception in the main thread. Otherwise it fails silently which is super annoying. :)\n\nMethod signatures and docstrings:\n- def __init__(self, loader_queue, loaded_queue, exception_queue, thread_stopper): Initialise asset loader.\n- def run(self): Run loop for the loader thread.", "prompted_full_text": "Implement the Python class `AssetLoader` described below.\n\nClass description:\nBase class for the Asset Loader thread and actually loads the assets from disk. Args: loader_queue: A reference to the asset manager's loader_queue which holds assets waiting to be loaded. Items are automatically sorted in reverse order by priority, then creation ID. loaded_queue: A reference to the asset manager's loaded_queue which holds assets that have just been loaded. Entries are Asset instances. exception_queue: Send a reference to self.machine.crash_queue. This way if the asset loader crashes, it will write the crash to that queue and cause an exception in the main thread. Otherwise it fails silently which is super annoying. :)\n\nMethod signatures and docstrings:\n- def __init__(self, loader_queue, loaded_queue, exception_queue, thread_stopper): Initialise asset loader.\n- def run(self): Run loop for the loader thread.\n\n<|skeleton|>\nclass AssetLoader:\n \"\"\"Base class for the Asset Loader thread and actually loads the assets from disk. Args: loader_queue: A reference to the asset manager's loader_queue which holds assets waiting to be loaded. Items are automatically sorted in reverse order by priority, then creation ID. loaded_queue: A reference to the asset manager's loaded_queue which holds assets that have just been loaded. Entries are Asset instances. exception_queue: Send a reference to self.machine.crash_queue. This way if the asset loader crashes, it will write the crash to that queue and cause an exception in the main thread. Otherwise it fails silently which is super annoying. :)\"\"\"\n\n def __init__(self, loader_queue, loaded_queue, exception_queue, thread_stopper):\n \"\"\"Initialise asset loader.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Run loop for the loader thread.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n threading.Thread.__init__(self)\n self.log = logging.getLogger('Asset Loader')\n self.loader_queue = loader_queue\n self.loaded_queue = loaded_queue\n self.exception_queue = exception_queue\n self.thread_stopper = thread_stopper\n self.name = 'asset_loader'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n while not self.thread_stopper.is_set():\n try:\n asset = self.loader_queue.get(block=True, timeout=1)\n except Empty:\n asset = None\n if asset:\n with asset.lock:\n if not asset.loaded:\n try:\n asset.do_load()\n except Exception as e:\n raise ConfigFileError(\"Error while loading {} asset file '{}'\".format(asset.attribute, asset.file), 1, self.log.name, asset.name) from e\n self.loaded_queue.put((asset, True))\n else:\n self.loaded_queue.put((asset, False))\n return\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join((line for line in lines))\n self.exception_queue.put(msg)\n raise\n<|end_body_1|>\n", "revision_id": "d38bbe12d530f5f05c9e0f37877a19878876920a", "skeleton": "<|skeleton|>\nclass AssetLoader:\n \"\"\"Base class for the Asset Loader thread and actually loads the assets from disk. Args: loader_queue: A reference to the asset manager's loader_queue which holds assets waiting to be loaded. Items are automatically sorted in reverse order by priority, then creation ID. loaded_queue: A reference to the asset manager's loaded_queue which holds assets that have just been loaded. Entries are Asset instances. exception_queue: Send a reference to self.machine.crash_queue. This way if the asset loader crashes, it will write the crash to that queue and cause an exception in the main thread. Otherwise it fails silently which is super annoying. :)\"\"\"\n\n def __init__(self, loader_queue, loaded_queue, exception_queue, thread_stopper):\n \"\"\"Initialise asset loader.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Run loop for the loader thread.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AssetLoader:\n \"\"\"Base class for the Asset Loader thread and actually loads the assets from disk. Args: loader_queue: A reference to the asset manager's loader_queue which holds assets waiting to be loaded. Items are automatically sorted in reverse order by priority, then creation ID. loaded_queue: A reference to the asset manager's loaded_queue which holds assets that have just been loaded. Entries are Asset instances. exception_queue: Send a reference to self.machine.crash_queue. This way if the asset loader crashes, it will write the crash to that queue and cause an exception in the main thread. Otherwise it fails silently which is super annoying. :)\"\"\"\n\n def __init__(self, loader_queue, loaded_queue, exception_queue, thread_stopper):\n \"\"\"Initialise asset loader.\"\"\"\n threading.Thread.__init__(self)\n self.log = logging.getLogger('Asset Loader')\n self.loader_queue = loader_queue\n self.loaded_queue = loaded_queue\n self.exception_queue = exception_queue\n self.thread_stopper = thread_stopper\n self.name = 'asset_loader'\n\n def run(self):\n \"\"\"Run loop for the loader thread.\"\"\"\n try:\n while not self.thread_stopper.is_set():\n try:\n asset = self.loader_queue.get(block=True, timeout=1)\n except Empty:\n asset = None\n if asset:\n with asset.lock:\n if not asset.loaded:\n try:\n asset.do_load()\n except Exception as e:\n raise ConfigFileError(\"Error while loading {} asset file '{}'\".format(asset.attribute, asset.file), 1, self.log.name, asset.name) from e\n self.loaded_queue.put((asset, True))\n else:\n self.loaded_queue.put((asset, False))\n return\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join((line for line in lines))\n self.exception_queue.put(msg)\n raise\n", "source": "the_stack_v2_python_sparse", "source_path": "mpfmc/core/assets.py", "source_repo": "missionpinball/mpf-mc", "split": "val", "star_events_count": 19}
{"blob_id": "d6f624a359025df9cc0dc018b8dc46e9a10d389f", "bodies": ["count = 0\nfor i in range(len(nums) - 1):\n for j in range(1, len(nums)):\n tmp = sum(nums[i:j + 1]) - goal\n if tmp < 0:\n continue\n elif tmp == 0:\n count += 1\n else:\n break\nreturn count", "n = len(nums)\nans = l1 = l2 = s1 = s2 = 0\nfor r in range(n):\n s1 += nums[r]\n s2 += nums[r]\n while l1 <= r and s1 > goal:\n s1 -= nums[l1]\n l1 += 1\n while l2 <= r and s2 >= goal:\n s2 -= nums[l2]\n l2 += 1\n ans += l2 - l1\nreturn ans"], "bodies_text": "<|body_start_0|>\n count = 0\n for i in range(len(nums) - 1):\n for j in range(1, len(nums)):\n tmp = sum(nums[i:j + 1]) - goal\n if tmp < 0:\n continue\n elif tmp == 0:\n count += 1\n else:\n break\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n ans = l1 = l2 = s1 = s2 = 0\n for r in range(n):\n s1 += nums[r]\n s2 += nums[r]\n while l1 <= r and s1 > goal:\n s1 -= nums[l1]\n l1 += 1\n while l2 <= r and s2 >= goal:\n s2 -= nums[l2]\n l2 += 1\n ans += l2 - l1\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:\n \"\"\"双指针 超时 :param nums: :param goal: :return:\"\"\"\n <|body_0|>\n\n def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:\n \"\"\"双指针 :param nums: :param goal: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = 0\n for i in range(len(nums) - 1):\n for j in range(1, len(nums)):\n tmp = sum(nums[i:j + 1]) - goal\n if tmp < 0:\n continue\n elif tmp == 0:\n count += 1\n else:\n break\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n ans = l1 = l2 = s1 = s2 = 0\n for r in range(n):\n s1 += nums[r]\n s2 += nums[r]\n while l1 <= r and s1 > goal:\n s1 -= nums[l1]\n l1 += 1\n while l2 <= r and s2 >= goal:\n s2 -= nums[l2]\n l2 += 1\n ans += l2 - l1\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000098", "length_bytes": 1932, "license_type": "no_license", "methods": [{"docstring": "双指针 超时 :param nums: :param goal: :return:", "name": "numSubarraysWithSum", "signature": "def numSubarraysWithSum(self, nums: List[int], goal: int) -> int"}, {"docstring": "双指针 :param nums: :param goal: :return:", "name": "numSubarraysWithSum", "signature": "def numSubarraysWithSum(self, nums: List[int], goal: int) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSubarraysWithSum(self, nums: List[int], goal: int) -> int: 双指针 超时 :param nums: :param goal: :return:\n- def numSubarraysWithSum(self, nums: List[int], goal: int) -> int: 双指针 :param nums: :param goal: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSubarraysWithSum(self, nums: List[int], goal: int) -> int: 双指针 超时 :param nums: :param goal: :return:\n- def numSubarraysWithSum(self, nums: List[int], goal: int) -> int: 双指针 :param nums: :param goal: :return:\n\n<|skeleton|>\nclass Solution:\n\n def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:\n \"\"\"双指针 超时 :param nums: :param goal: :return:\"\"\"\n <|body_0|>\n\n def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:\n \"\"\"双指针 :param nums: :param goal: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = 0\n for i in range(len(nums) - 1):\n for j in range(1, len(nums)):\n tmp = sum(nums[i:j + 1]) - goal\n if tmp < 0:\n continue\n elif tmp == 0:\n count += 1\n else:\n break\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n ans = l1 = l2 = s1 = s2 = 0\n for r in range(n):\n s1 += nums[r]\n s2 += nums[r]\n while l1 <= r and s1 > goal:\n s1 -= nums[l1]\n l1 += 1\n while l2 <= r and s2 >= goal:\n s2 -= nums[l2]\n l2 += 1\n ans += l2 - l1\n return ans\n<|end_body_1|>\n", "revision_id": "b1680014ce3f55ba952a1e64241c0cbb783cc436", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:\n \"\"\"双指针 超时 :param nums: :param goal: :return:\"\"\"\n <|body_0|>\n\n def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:\n \"\"\"双指针 :param nums: :param goal: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:\n \"\"\"双指针 超时 :param nums: :param goal: :return:\"\"\"\n count = 0\n for i in range(len(nums) - 1):\n for j in range(1, len(nums)):\n tmp = sum(nums[i:j + 1]) - goal\n if tmp < 0:\n continue\n elif tmp == 0:\n count += 1\n else:\n break\n return count\n\n def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:\n \"\"\"双指针 :param nums: :param goal: :return:\"\"\"\n n = len(nums)\n ans = l1 = l2 = s1 = s2 = 0\n for r in range(n):\n s1 += nums[r]\n s2 += nums[r]\n while l1 <= r and s1 > goal:\n s1 -= nums[l1]\n l1 += 1\n while l2 <= r and s2 >= goal:\n s2 -= nums[l2]\n l2 += 1\n ans += l2 - l1\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "a_930.py", "source_repo": "sun510001/leetcode_jianzhi_offer_2", "split": "val", "star_events_count": 0}
{"blob_id": "a93985f62cb809f5cfa8b50b693db9caef6b93e9", "bodies": ["super().set_params(params)\nparams = dict_to_namespace(params)\nself.params.name = getattr(params, 'name', 'UsBaxAcqFunction')", "super().__call__(x_list)\nacq_list = self.acq_vars['std']\nreturn acq_list"], "bodies_text": "<|body_start_0|>\n super().set_params(params)\n params = dict_to_namespace(params)\n self.params.name = getattr(params, 'name', 'UsBaxAcqFunction')\n<|end_body_0|>\n\n<|body_start_1|>\n super().__call__(x_list)\n acq_list = self.acq_vars['std']\n return acq_list\n<|end_body_1|>\n", "class_docstring": "Wrapper on BaxAcqFunction for uncertainty sampling acquisition, when we still want various BaxAcqFunction variables for visualizations.", "class_name": "UsBaxAcqFunction", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UsBaxAcqFunction:\n \"\"\"Wrapper on BaxAcqFunction for uncertainty sampling acquisition, when we still want various BaxAcqFunction variables for visualizations.\"\"\"\n\n def set_params(self, params):\n \"\"\"Set self.params, the parameters for the AcqFunction.\"\"\"\n <|body_0|>\n\n def __call__(self, x_list):\n \"\"\"Class is callable and returns acquisition function on x_list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().set_params(params)\n params = dict_to_namespace(params)\n self.params.name = getattr(params, 'name', 'UsBaxAcqFunction')\n<|end_body_0|>\n\n<|body_start_1|>\n super().__call__(x_list)\n acq_list = self.acq_vars['std']\n return acq_list\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000099", "length_bytes": 26407, "license_type": "no_license", "methods": [{"docstring": "Set self.params, the parameters for the AcqFunction.", "name": "set_params", "signature": "def set_params(self, params)"}, {"docstring": "Class is callable and returns acquisition function on x_list.", "name": "__call__", "signature": "def __call__(self, x_list)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_038254", "prompt": "Implement the Python class `UsBaxAcqFunction` described below.\n\nClass description:\nWrapper on BaxAcqFunction for uncertainty sampling acquisition, when we still want various BaxAcqFunction variables for visualizations.\n\nMethod signatures and docstrings:\n- def set_params(self, params): Set self.params, the parameters for the AcqFunction.\n- def __call__(self, x_list): Class is callable and returns acquisition function on x_list.", "prompted_full_text": "Implement the Python class `UsBaxAcqFunction` described below.\n\nClass description:\nWrapper on BaxAcqFunction for uncertainty sampling acquisition, when we still want various BaxAcqFunction variables for visualizations.\n\nMethod signatures and docstrings:\n- def set_params(self, params): Set self.params, the parameters for the AcqFunction.\n- def __call__(self, x_list): Class is callable and returns acquisition function on x_list.\n\n<|skeleton|>\nclass UsBaxAcqFunction:\n \"\"\"Wrapper on BaxAcqFunction for uncertainty sampling acquisition, when we still want various BaxAcqFunction variables for visualizations.\"\"\"\n\n def set_params(self, params):\n \"\"\"Set self.params, the parameters for the AcqFunction.\"\"\"\n <|body_0|>\n\n def __call__(self, x_list):\n \"\"\"Class is callable and returns acquisition function on x_list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().set_params(params)\n params = dict_to_namespace(params)\n self.params.name = getattr(params, 'name', 'UsBaxAcqFunction')\n<|end_body_0|>\n\n<|body_start_1|>\n super().__call__(x_list)\n acq_list = self.acq_vars['std']\n return acq_list\n<|end_body_1|>\n", "revision_id": "d75d1a89bb566e62662e4d010d91893bfe1ee9f4", "skeleton": "<|skeleton|>\nclass UsBaxAcqFunction:\n \"\"\"Wrapper on BaxAcqFunction for uncertainty sampling acquisition, when we still want various BaxAcqFunction variables for visualizations.\"\"\"\n\n def set_params(self, params):\n \"\"\"Set self.params, the parameters for the AcqFunction.\"\"\"\n <|body_0|>\n\n def __call__(self, x_list):\n \"\"\"Class is callable and returns acquisition function on x_list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UsBaxAcqFunction:\n \"\"\"Wrapper on BaxAcqFunction for uncertainty sampling acquisition, when we still want various BaxAcqFunction variables for visualizations.\"\"\"\n\n def set_params(self, params):\n \"\"\"Set self.params, the parameters for the AcqFunction.\"\"\"\n super().set_params(params)\n params = dict_to_namespace(params)\n self.params.name = getattr(params, 'name', 'UsBaxAcqFunction')\n\n def __call__(self, x_list):\n \"\"\"Class is callable and returns acquisition function on x_list.\"\"\"\n super().__call__(x_list)\n acq_list = self.acq_vars['std']\n return acq_list\n", "source": "the_stack_v2_python_sparse", "source_path": "bax/acq/acquisition.py", "source_repo": "willieneis/bayesian-algorithm-execution", "split": "val", "star_events_count": 45}
{"blob_id": "253a981759823d8ac21a2d7eb77ea636818e352b", "bodies": ["try:\n super(PostUpgradeCmd, self).__init__(config, services)\nexcept Exception as e:\n raise e", "self.logger.info(f'Processing phase = {self.name}, config = {self.url}, service = {self.services}')\ntry:\n self.logger.info('validations started')\n self.phase_prereqs_validate(self.name)\n self.logger.info('validations completed')\n self.logger.info('Copy .ample and unsafe attribute files started')\n self.copy_config_files()\n self.logger.info('Copy .ample and unsafe attribute files completed')\n self.logger.info('Delete config file started')\n self.delete_config_files()\n self.logger.info('Delete config file completed')\n self.logger.info('merge configs started')\n config_file_path = '/etc/cortx'\n merge_configs(config_file_path, self.s3_tmp_dir)\n self.logger.info('merge configs completed')\n self.logger.info('Remove sample.old files started')\n regex = '*.sample.old'\n self.DeleteFileOrDirWithRegex(self.s3_tmp_dir, regex)\n self.logger.info('Remove sample.old files completed')\n self.logger.info('config file validations started')\n self.validate_config_files(self.name)\n self.logger.info('config file validations completed')\nexcept Exception as e:\n raise S3PROVError(f'process: {self.name} failed with exception: {e}')", "config_files = [self.get_confkey('S3_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_UNSAFE_ATTR_FILE')]\nfor config_file in config_files:\n self.logger.info(f'Source config file: {config_file}')\n dest_config_file = config_file.replace('/opt/seagate/cortx', '/etc/cortx')\n self.logger.info(f'Dest config file: {dest_config_file}')\n os.makedirs(os.path.dirname(dest_config_file), exist_ok=True)\n shutil.move(config_file, dest_config_file)\n self.logger.info('Config file copied successfully to /etc/cortx')", "config_files = [self.get_confkey('S3_CONFIG_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_FILE')]\nfor config_file in config_files:\n self.DeleteFile(config_file)\n self.logger.info(f'Config file {config_file} deleted successfully')"], "bodies_text": "<|body_start_0|>\n try:\n super(PostUpgradeCmd, self).__init__(config, services)\n except Exception as e:\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.info(f'Processing phase = {self.name}, config = {self.url}, service = {self.services}')\n try:\n self.logger.info('validations started')\n self.phase_prereqs_validate(self.name)\n self.logger.info('validations completed')\n self.logger.info('Copy .ample and unsafe attribute files started')\n self.copy_config_files()\n self.logger.info('Copy .ample and unsafe attribute files completed')\n self.logger.info('Delete config file started')\n self.delete_config_files()\n self.logger.info('Delete config file completed')\n self.logger.info('merge configs started')\n config_file_path = '/etc/cortx'\n merge_configs(config_file_path, self.s3_tmp_dir)\n self.logger.info('merge configs completed')\n self.logger.info('Remove sample.old files started')\n regex = '*.sample.old'\n self.DeleteFileOrDirWithRegex(self.s3_tmp_dir, regex)\n self.logger.info('Remove sample.old files completed')\n self.logger.info('config file validations started')\n self.validate_config_files(self.name)\n self.logger.info('config file validations completed')\n except Exception as e:\n raise S3PROVError(f'process: {self.name} failed with exception: {e}')\n<|end_body_1|>\n\n<|body_start_2|>\n config_files = [self.get_confkey('S3_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_UNSAFE_ATTR_FILE')]\n for config_file in config_files:\n self.logger.info(f'Source config file: {config_file}')\n dest_config_file = config_file.replace('/opt/seagate/cortx', '/etc/cortx')\n self.logger.info(f'Dest config file: {dest_config_file}')\n os.makedirs(os.path.dirname(dest_config_file), exist_ok=True)\n shutil.move(config_file, dest_config_file)\n self.logger.info('Config file copied successfully to /etc/cortx')\n<|end_body_2|>\n\n<|body_start_3|>\n config_files = [self.get_confkey('S3_CONFIG_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_FILE')]\n for config_file in config_files:\n self.DeleteFile(config_file)\n self.logger.info(f'Config file {config_file} deleted successfully')\n<|end_body_3|>\n", "class_docstring": "Post Upgrade Setup Cmd.", "class_name": "PostUpgradeCmd", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PostUpgradeCmd:\n \"\"\"Post Upgrade Setup Cmd.\"\"\"\n\n def __init__(self, config: str, services: str=None):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def process(self):\n \"\"\"Main processing function.\"\"\"\n <|body_1|>\n\n def copy_config_files(self):\n \"\"\"Copy sample and unsafe attribute config files from /opt/seagate/cortx to /etc/cortx.\"\"\"\n <|body_2|>\n\n def delete_config_files(self):\n \"\"\"delete config file which are installed by rpm\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n super(PostUpgradeCmd, self).__init__(config, services)\n except Exception as e:\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.info(f'Processing phase = {self.name}, config = {self.url}, service = {self.services}')\n try:\n self.logger.info('validations started')\n self.phase_prereqs_validate(self.name)\n self.logger.info('validations completed')\n self.logger.info('Copy .ample and unsafe attribute files started')\n self.copy_config_files()\n self.logger.info('Copy .ample and unsafe attribute files completed')\n self.logger.info('Delete config file started')\n self.delete_config_files()\n self.logger.info('Delete config file completed')\n self.logger.info('merge configs started')\n config_file_path = '/etc/cortx'\n merge_configs(config_file_path, self.s3_tmp_dir)\n self.logger.info('merge configs completed')\n self.logger.info('Remove sample.old files started')\n regex = '*.sample.old'\n self.DeleteFileOrDirWithRegex(self.s3_tmp_dir, regex)\n self.logger.info('Remove sample.old files completed')\n self.logger.info('config file validations started')\n self.validate_config_files(self.name)\n self.logger.info('config file validations completed')\n except Exception as e:\n raise S3PROVError(f'process: {self.name} failed with exception: {e}')\n<|end_body_1|>\n\n<|body_start_2|>\n config_files = [self.get_confkey('S3_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_UNSAFE_ATTR_FILE')]\n for config_file in config_files:\n self.logger.info(f'Source config file: {config_file}')\n dest_config_file = config_file.replace('/opt/seagate/cortx', '/etc/cortx')\n self.logger.info(f'Dest config file: {dest_config_file}')\n os.makedirs(os.path.dirname(dest_config_file), exist_ok=True)\n shutil.move(config_file, dest_config_file)\n self.logger.info('Config file copied successfully to /etc/cortx')\n<|end_body_2|>\n\n<|body_start_3|>\n config_files = [self.get_confkey('S3_CONFIG_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_FILE')]\n for config_file in config_files:\n self.DeleteFile(config_file)\n self.logger.info(f'Config file {config_file} deleted successfully')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000100", "length_bytes": 4830, "license_type": "permissive", "methods": [{"docstring": "Constructor.", "name": "__init__", "signature": "def __init__(self, config: str, services: str=None)"}, {"docstring": "Main processing function.", "name": "process", "signature": "def process(self)"}, {"docstring": "Copy sample and unsafe attribute config files from /opt/seagate/cortx to /etc/cortx.", "name": "copy_config_files", "signature": "def copy_config_files(self)"}, {"docstring": "delete config file which are installed by rpm", "name": "delete_config_files", "signature": "def delete_config_files(self)"}], "n_methods": 4, "prompt": "Implement the Python class `PostUpgradeCmd` described below.\n\nClass description:\nPost Upgrade Setup Cmd.\n\nMethod signatures and docstrings:\n- def __init__(self, config: str, services: str=None): Constructor.\n- def process(self): Main processing function.\n- def copy_config_files(self): Copy sample and unsafe attribute config files from /opt/seagate/cortx to /etc/cortx.\n- def delete_config_files(self): delete config file which are installed by rpm", "prompted_full_text": "Implement the Python class `PostUpgradeCmd` described below.\n\nClass description:\nPost Upgrade Setup Cmd.\n\nMethod signatures and docstrings:\n- def __init__(self, config: str, services: str=None): Constructor.\n- def process(self): Main processing function.\n- def copy_config_files(self): Copy sample and unsafe attribute config files from /opt/seagate/cortx to /etc/cortx.\n- def delete_config_files(self): delete config file which are installed by rpm\n\n<|skeleton|>\nclass PostUpgradeCmd:\n \"\"\"Post Upgrade Setup Cmd.\"\"\"\n\n def __init__(self, config: str, services: str=None):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def process(self):\n \"\"\"Main processing function.\"\"\"\n <|body_1|>\n\n def copy_config_files(self):\n \"\"\"Copy sample and unsafe attribute config files from /opt/seagate/cortx to /etc/cortx.\"\"\"\n <|body_2|>\n\n def delete_config_files(self):\n \"\"\"delete config file which are installed by rpm\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n super(PostUpgradeCmd, self).__init__(config, services)\n except Exception as e:\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.info(f'Processing phase = {self.name}, config = {self.url}, service = {self.services}')\n try:\n self.logger.info('validations started')\n self.phase_prereqs_validate(self.name)\n self.logger.info('validations completed')\n self.logger.info('Copy .ample and unsafe attribute files started')\n self.copy_config_files()\n self.logger.info('Copy .ample and unsafe attribute files completed')\n self.logger.info('Delete config file started')\n self.delete_config_files()\n self.logger.info('Delete config file completed')\n self.logger.info('merge configs started')\n config_file_path = '/etc/cortx'\n merge_configs(config_file_path, self.s3_tmp_dir)\n self.logger.info('merge configs completed')\n self.logger.info('Remove sample.old files started')\n regex = '*.sample.old'\n self.DeleteFileOrDirWithRegex(self.s3_tmp_dir, regex)\n self.logger.info('Remove sample.old files completed')\n self.logger.info('config file validations started')\n self.validate_config_files(self.name)\n self.logger.info('config file validations completed')\n except Exception as e:\n raise S3PROVError(f'process: {self.name} failed with exception: {e}')\n<|end_body_1|>\n\n<|body_start_2|>\n config_files = [self.get_confkey('S3_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_UNSAFE_ATTR_FILE')]\n for config_file in config_files:\n self.logger.info(f'Source config file: {config_file}')\n dest_config_file = config_file.replace('/opt/seagate/cortx', '/etc/cortx')\n self.logger.info(f'Dest config file: {dest_config_file}')\n os.makedirs(os.path.dirname(dest_config_file), exist_ok=True)\n shutil.move(config_file, dest_config_file)\n self.logger.info('Config file copied successfully to /etc/cortx')\n<|end_body_2|>\n\n<|body_start_3|>\n config_files = [self.get_confkey('S3_CONFIG_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_FILE')]\n for config_file in config_files:\n self.DeleteFile(config_file)\n self.logger.info(f'Config file {config_file} deleted successfully')\n<|end_body_3|>\n", "revision_id": "b1987967aec7e24530c9703db6f100d2c8289624", "skeleton": "<|skeleton|>\nclass PostUpgradeCmd:\n \"\"\"Post Upgrade Setup Cmd.\"\"\"\n\n def __init__(self, config: str, services: str=None):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def process(self):\n \"\"\"Main processing function.\"\"\"\n <|body_1|>\n\n def copy_config_files(self):\n \"\"\"Copy sample and unsafe attribute config files from /opt/seagate/cortx to /etc/cortx.\"\"\"\n <|body_2|>\n\n def delete_config_files(self):\n \"\"\"delete config file which are installed by rpm\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PostUpgradeCmd:\n \"\"\"Post Upgrade Setup Cmd.\"\"\"\n\n def __init__(self, config: str, services: str=None):\n \"\"\"Constructor.\"\"\"\n try:\n super(PostUpgradeCmd, self).__init__(config, services)\n except Exception as e:\n raise e\n\n def process(self):\n \"\"\"Main processing function.\"\"\"\n self.logger.info(f'Processing phase = {self.name}, config = {self.url}, service = {self.services}')\n try:\n self.logger.info('validations started')\n self.phase_prereqs_validate(self.name)\n self.logger.info('validations completed')\n self.logger.info('Copy .ample and unsafe attribute files started')\n self.copy_config_files()\n self.logger.info('Copy .ample and unsafe attribute files completed')\n self.logger.info('Delete config file started')\n self.delete_config_files()\n self.logger.info('Delete config file completed')\n self.logger.info('merge configs started')\n config_file_path = '/etc/cortx'\n merge_configs(config_file_path, self.s3_tmp_dir)\n self.logger.info('merge configs completed')\n self.logger.info('Remove sample.old files started')\n regex = '*.sample.old'\n self.DeleteFileOrDirWithRegex(self.s3_tmp_dir, regex)\n self.logger.info('Remove sample.old files completed')\n self.logger.info('config file validations started')\n self.validate_config_files(self.name)\n self.logger.info('config file validations completed')\n except Exception as e:\n raise S3PROVError(f'process: {self.name} failed with exception: {e}')\n\n def copy_config_files(self):\n \"\"\"Copy sample and unsafe attribute config files from /opt/seagate/cortx to /etc/cortx.\"\"\"\n config_files = [self.get_confkey('S3_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_UNSAFE_ATTR_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_SAMPLE_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_UNSAFE_ATTR_FILE')]\n for config_file in config_files:\n self.logger.info(f'Source config file: {config_file}')\n dest_config_file = config_file.replace('/opt/seagate/cortx', '/etc/cortx')\n self.logger.info(f'Dest config file: {dest_config_file}')\n os.makedirs(os.path.dirname(dest_config_file), exist_ok=True)\n shutil.move(config_file, dest_config_file)\n self.logger.info('Config file copied successfully to /etc/cortx')\n\n def delete_config_files(self):\n \"\"\"delete config file which are installed by rpm\"\"\"\n config_files = [self.get_confkey('S3_CONFIG_FILE'), self.get_confkey('S3_AUTHSERVER_CONFIG_FILE'), self.get_confkey('S3_KEYSTORE_CONFIG_FILE'), self.get_confkey('S3_BGDELETE_CONFIG_FILE'), self.get_confkey('S3_CLUSTER_CONFIG_FILE')]\n for config_file in config_files:\n self.DeleteFile(config_file)\n self.logger.info(f'Config file {config_file} deleted successfully')\n", "source": "the_stack_v2_python_sparse", "source_path": "scripts/provisioning/postupgradecmd.py", "source_repo": "Seagate/cortx-s3server", "split": "val", "star_events_count": 38}
{"blob_id": "cfdee13135ccde9794ca75eab0c93f95ba1e0d9b", "bodies": ["if isinstance(key, int):\n return TOS_THR(key)\nif key not in TOS_THR._member_map_:\n extend_enum(TOS_THR, key, default)\nreturn TOS_THR[key]", "if not (isinstance(value, int) and 0 <= value <= 1):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\nextend_enum(cls, 'Unassigned [%d]' % value, value)\nreturn cls(value)\nsuper()._missing_(value)"], "bodies_text": "<|body_start_0|>\n if isinstance(key, int):\n return TOS_THR(key)\n if key not in TOS_THR._member_map_:\n extend_enum(TOS_THR, key, default)\n return TOS_THR[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 1):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n extend_enum(cls, 'Unassigned [%d]' % value, value)\n return cls(value)\n super()._missing_(value)\n<|end_body_1|>\n", "class_docstring": "Enumeration class for TOS_THR.", "class_name": "TOS_THR", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TOS_THR:\n \"\"\"Enumeration class for TOS_THR.\"\"\"\n\n def get(key, default=-1):\n \"\"\"Backport support for original codes.\"\"\"\n <|body_0|>\n\n def _missing_(cls, value):\n \"\"\"Lookup function used when value is not found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(key, int):\n return TOS_THR(key)\n if key not in TOS_THR._member_map_:\n extend_enum(TOS_THR, key, default)\n return TOS_THR[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 1):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n extend_enum(cls, 'Unassigned [%d]' % value, value)\n return cls(value)\n super()._missing_(value)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000101", "length_bytes": 911, "license_type": "no_license", "methods": [{"docstring": "Backport support for original codes.", "name": "get", "signature": "def get(key, default=-1)"}, {"docstring": "Lookup function used when value is not found.", "name": "_missing_", "signature": "def _missing_(cls, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020181", "prompt": "Implement the Python class `TOS_THR` described below.\n\nClass description:\nEnumeration class for TOS_THR.\n\nMethod signatures and docstrings:\n- def get(key, default=-1): Backport support for original codes.\n- def _missing_(cls, value): Lookup function used when value is not found.", "prompted_full_text": "Implement the Python class `TOS_THR` described below.\n\nClass description:\nEnumeration class for TOS_THR.\n\nMethod signatures and docstrings:\n- def get(key, default=-1): Backport support for original codes.\n- def _missing_(cls, value): Lookup function used when value is not found.\n\n<|skeleton|>\nclass TOS_THR:\n \"\"\"Enumeration class for TOS_THR.\"\"\"\n\n def get(key, default=-1):\n \"\"\"Backport support for original codes.\"\"\"\n <|body_0|>\n\n def _missing_(cls, value):\n \"\"\"Lookup function used when value is not found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(key, int):\n return TOS_THR(key)\n if key not in TOS_THR._member_map_:\n extend_enum(TOS_THR, key, default)\n return TOS_THR[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 1):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n extend_enum(cls, 'Unassigned [%d]' % value, value)\n return cls(value)\n super()._missing_(value)\n<|end_body_1|>\n", "revision_id": "fd43ccca1d032f8f230c4467dcb5df757669ef13", "skeleton": "<|skeleton|>\nclass TOS_THR:\n \"\"\"Enumeration class for TOS_THR.\"\"\"\n\n def get(key, default=-1):\n \"\"\"Backport support for original codes.\"\"\"\n <|body_0|>\n\n def _missing_(cls, value):\n \"\"\"Lookup function used when value is not found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TOS_THR:\n \"\"\"Enumeration class for TOS_THR.\"\"\"\n\n def get(key, default=-1):\n \"\"\"Backport support for original codes.\"\"\"\n if isinstance(key, int):\n return TOS_THR(key)\n if key not in TOS_THR._member_map_:\n extend_enum(TOS_THR, key, default)\n return TOS_THR[key]\n\n def _missing_(cls, value):\n \"\"\"Lookup function used when value is not found.\"\"\"\n if not (isinstance(value, int) and 0 <= value <= 1):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n extend_enum(cls, 'Unassigned [%d]' % value, value)\n return cls(value)\n super()._missing_(value)\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/lib/python3.6/site-packages/pcapkit/const/ipv4/tos_thr.py", "source_repo": "IvanLetteri/MLfeaturesExtractor", "split": "val", "star_events_count": 0}
{"blob_id": "43ec64247b00cd74b4f7d33347c62f59636b06b8", "bodies": ["super(GenericModelForm, self).__init__(*args, **kwargs)\nfor field in self._meta.model._meta.virtual_fields:\n self.initial[field.name] = getattr(self.instance, field.name, None)", "super(GenericModelForm, self)._post_clean()\nfor field in self._meta.model._meta.virtual_fields:\n value = self.cleaned_data.get(field.name, None)\n if value:\n setattr(self.instance, field.name, value)", "for field in self._meta.model._meta.virtual_fields:\n if isinstance(field, GenericForeignKey):\n value = self.cleaned_data.get(field.name, None)\n if not value:\n continue\n setattr(self.instance, field.ct_field, ContentType.objects.get_for_model(value))\n setattr(self.instance, field.fk_field, value.pk)\nreturn super(GenericModelForm, self).save(commit)"], "bodies_text": "<|body_start_0|>\n super(GenericModelForm, self).__init__(*args, **kwargs)\n for field in self._meta.model._meta.virtual_fields:\n self.initial[field.name] = getattr(self.instance, field.name, None)\n<|end_body_0|>\n\n<|body_start_1|>\n super(GenericModelForm, self)._post_clean()\n for field in self._meta.model._meta.virtual_fields:\n value = self.cleaned_data.get(field.name, None)\n if value:\n setattr(self.instance, field.name, value)\n<|end_body_1|>\n\n<|body_start_2|>\n for field in self._meta.model._meta.virtual_fields:\n if isinstance(field, GenericForeignKey):\n value = self.cleaned_data.get(field.name, None)\n if not value:\n continue\n setattr(self.instance, field.ct_field, ContentType.objects.get_for_model(value))\n setattr(self.instance, field.fk_field, value.pk)\n return super(GenericModelForm, self).save(commit)\n<|end_body_2|>\n", "class_docstring": "This simple subclass of ModelForm fixes a couple of issues with django's ModelForm. - treat virtual fields like GenericForeignKey as normal fields, Django should already do that but it doesn't, - when setting a GenericForeignKey value, also set the object id and content type id fields, again Django could probably afford to do that.", "class_name": "GenericModelForm", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GenericModelForm:\n \"\"\"This simple subclass of ModelForm fixes a couple of issues with django's ModelForm. - treat virtual fields like GenericForeignKey as normal fields, Django should already do that but it doesn't, - when setting a GenericForeignKey value, also set the object id and content type id fields, again Django could probably afford to do that.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"What ModelForm does, but also add virtual field values to self.initial.\"\"\"\n <|body_0|>\n\n def _post_clean(self):\n \"\"\"What ModelForm does, but also set virtual field values from cleaned_data.\"\"\"\n <|body_1|>\n\n def save(self, commit=True):\n \"\"\"What ModelForm does, but also set GFK.ct_field and GFK.fk_field if such a virtual field has a value. This should probably be done in the GFK field itself, but it's here for convenience until Django fixes that.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(GenericModelForm, self).__init__(*args, **kwargs)\n for field in self._meta.model._meta.virtual_fields:\n self.initial[field.name] = getattr(self.instance, field.name, None)\n<|end_body_0|>\n\n<|body_start_1|>\n super(GenericModelForm, self)._post_clean()\n for field in self._meta.model._meta.virtual_fields:\n value = self.cleaned_data.get(field.name, None)\n if value:\n setattr(self.instance, field.name, value)\n<|end_body_1|>\n\n<|body_start_2|>\n for field in self._meta.model._meta.virtual_fields:\n if isinstance(field, GenericForeignKey):\n value = self.cleaned_data.get(field.name, None)\n if not value:\n continue\n setattr(self.instance, field.ct_field, ContentType.objects.get_for_model(value))\n setattr(self.instance, field.fk_field, value.pk)\n return super(GenericModelForm, self).save(commit)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000102", "length_bytes": 3887, "license_type": "permissive", "methods": [{"docstring": "What ModelForm does, but also add virtual field values to self.initial.", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "What ModelForm does, but also set virtual field values from cleaned_data.", "name": "_post_clean", "signature": "def _post_clean(self)"}, {"docstring": "What ModelForm does, but also set GFK.ct_field and GFK.fk_field if such a virtual field has a value. This should probably be done in the GFK field itself, but it's here for convenience until Django fixes that.", "name": "save", "signature": "def save(self, commit=True)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_015396", "prompt": "Implement the Python class `GenericModelForm` described below.\n\nClass description:\nThis simple subclass of ModelForm fixes a couple of issues with django's ModelForm. - treat virtual fields like GenericForeignKey as normal fields, Django should already do that but it doesn't, - when setting a GenericForeignKey value, also set the object id and content type id fields, again Django could probably afford to do that.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): What ModelForm does, but also add virtual field values to self.initial.\n- def _post_clean(self): What ModelForm does, but also set virtual field values from cleaned_data.\n- def save(self, commit=True): What ModelForm does, but also set GFK.ct_field and GFK.fk_field if such a virtual field has a value. This should probably be done in the GFK field itself, but it's here for convenience until Django fixes that.", "prompted_full_text": "Implement the Python class `GenericModelForm` described below.\n\nClass description:\nThis simple subclass of ModelForm fixes a couple of issues with django's ModelForm. - treat virtual fields like GenericForeignKey as normal fields, Django should already do that but it doesn't, - when setting a GenericForeignKey value, also set the object id and content type id fields, again Django could probably afford to do that.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): What ModelForm does, but also add virtual field values to self.initial.\n- def _post_clean(self): What ModelForm does, but also set virtual field values from cleaned_data.\n- def save(self, commit=True): What ModelForm does, but also set GFK.ct_field and GFK.fk_field if such a virtual field has a value. This should probably be done in the GFK field itself, but it's here for convenience until Django fixes that.\n\n<|skeleton|>\nclass GenericModelForm:\n \"\"\"This simple subclass of ModelForm fixes a couple of issues with django's ModelForm. - treat virtual fields like GenericForeignKey as normal fields, Django should already do that but it doesn't, - when setting a GenericForeignKey value, also set the object id and content type id fields, again Django could probably afford to do that.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"What ModelForm does, but also add virtual field values to self.initial.\"\"\"\n <|body_0|>\n\n def _post_clean(self):\n \"\"\"What ModelForm does, but also set virtual field values from cleaned_data.\"\"\"\n <|body_1|>\n\n def save(self, commit=True):\n \"\"\"What ModelForm does, but also set GFK.ct_field and GFK.fk_field if such a virtual field has a value. This should probably be done in the GFK field itself, but it's here for convenience until Django fixes that.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(GenericModelForm, self).__init__(*args, **kwargs)\n for field in self._meta.model._meta.virtual_fields:\n self.initial[field.name] = getattr(self.instance, field.name, None)\n<|end_body_0|>\n\n<|body_start_1|>\n super(GenericModelForm, self)._post_clean()\n for field in self._meta.model._meta.virtual_fields:\n value = self.cleaned_data.get(field.name, None)\n if value:\n setattr(self.instance, field.name, value)\n<|end_body_1|>\n\n<|body_start_2|>\n for field in self._meta.model._meta.virtual_fields:\n if isinstance(field, GenericForeignKey):\n value = self.cleaned_data.get(field.name, None)\n if not value:\n continue\n setattr(self.instance, field.ct_field, ContentType.objects.get_for_model(value))\n setattr(self.instance, field.fk_field, value.pk)\n return super(GenericModelForm, self).save(commit)\n<|end_body_2|>\n", "revision_id": "362314e35fdbdf2d0eabb2bec89e3eed060f8c46", "skeleton": "<|skeleton|>\nclass GenericModelForm:\n \"\"\"This simple subclass of ModelForm fixes a couple of issues with django's ModelForm. - treat virtual fields like GenericForeignKey as normal fields, Django should already do that but it doesn't, - when setting a GenericForeignKey value, also set the object id and content type id fields, again Django could probably afford to do that.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"What ModelForm does, but also add virtual field values to self.initial.\"\"\"\n <|body_0|>\n\n def _post_clean(self):\n \"\"\"What ModelForm does, but also set virtual field values from cleaned_data.\"\"\"\n <|body_1|>\n\n def save(self, commit=True):\n \"\"\"What ModelForm does, but also set GFK.ct_field and GFK.fk_field if such a virtual field has a value. This should probably be done in the GFK field itself, but it's here for convenience until Django fixes that.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GenericModelForm:\n \"\"\"This simple subclass of ModelForm fixes a couple of issues with django's ModelForm. - treat virtual fields like GenericForeignKey as normal fields, Django should already do that but it doesn't, - when setting a GenericForeignKey value, also set the object id and content type id fields, again Django could probably afford to do that.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"What ModelForm does, but also add virtual field values to self.initial.\"\"\"\n super(GenericModelForm, self).__init__(*args, **kwargs)\n for field in self._meta.model._meta.virtual_fields:\n self.initial[field.name] = getattr(self.instance, field.name, None)\n\n def _post_clean(self):\n \"\"\"What ModelForm does, but also set virtual field values from cleaned_data.\"\"\"\n super(GenericModelForm, self)._post_clean()\n for field in self._meta.model._meta.virtual_fields:\n value = self.cleaned_data.get(field.name, None)\n if value:\n setattr(self.instance, field.name, value)\n\n def save(self, commit=True):\n \"\"\"What ModelForm does, but also set GFK.ct_field and GFK.fk_field if such a virtual field has a value. This should probably be done in the GFK field itself, but it's here for convenience until Django fixes that.\"\"\"\n for field in self._meta.model._meta.virtual_fields:\n if isinstance(field, GenericForeignKey):\n value = self.cleaned_data.get(field.name, None)\n if not value:\n continue\n setattr(self.instance, field.ct_field, ContentType.objects.get_for_model(value))\n setattr(self.instance, field.fk_field, value.pk)\n return super(GenericModelForm, self).save(commit)\n", "source": "the_stack_v2_python_sparse", "source_path": "autocomplete_light/generic.py", "source_repo": "papalagichen/django-autocomplete-light", "split": "val", "star_events_count": 0}
{"blob_id": "76233e39db5a0bb5352a9188dc77ae22aa6116c4", "bodies": ["Inferencia.__init__(self)\nself.hallazgo = hallazgo\nself.norma = norma\nself.explicacion = u''", "diferencia = float(self.hallazgo.valor.replace(',', '.')) - float(self.norma.valor)\nif self.norma.tipo == '<=':\n if float(self.hallazgo.valor) <= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\nelif self.norma.tipo == '>=':\n if float(self.hallazgo.valor) >= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))"], "bodies_text": "<|body_start_0|>\n Inferencia.__init__(self)\n self.hallazgo = hallazgo\n self.norma = norma\n self.explicacion = u''\n<|end_body_0|>\n\n<|body_start_1|>\n diferencia = float(self.hallazgo.valor.replace(',', '.')) - float(self.norma.valor)\n if self.norma.tipo == '<=':\n if float(self.hallazgo.valor) <= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\n elif self.norma.tipo == '>=':\n if float(self.hallazgo.valor) >= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\n<|end_body_1|>\n", "class_docstring": "Clase representativa de la inferencia encargada de comparar el hallazgo recibido con la norma del parametro seleccionado :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015", "class_name": "Comparar", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Comparar:\n \"\"\"Clase representativa de la inferencia encargada de comparar el hallazgo recibido con la norma del parametro seleccionado :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\"\"\"\n\n def __init__(self, hallazgo, norma):\n \"\"\"Constructor de la clase :param hallazgo: Informacion recibida del exterior :param norma: Valor 'normal' del parametro :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\"\"\"\n <|body_0|>\n\n def execute(self):\n \"\"\"Metodo encargado de llevar a cabo la comparacion del hallazgo con la norma :return: Resultado de la equiparacion [True o False], seguido de su respectiva explicacion :author: Manuel Alejandro Luque León :date 06/06/2020\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Inferencia.__init__(self)\n self.hallazgo = hallazgo\n self.norma = norma\n self.explicacion = u''\n<|end_body_0|>\n\n<|body_start_1|>\n diferencia = float(self.hallazgo.valor.replace(',', '.')) - float(self.norma.valor)\n if self.norma.tipo == '<=':\n if float(self.hallazgo.valor) <= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\n elif self.norma.tipo == '>=':\n if float(self.hallazgo.valor) >= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000103", "length_bytes": 7195, "license_type": "no_license", "methods": [{"docstring": "Constructor de la clase :param hallazgo: Informacion recibida del exterior :param norma: Valor 'normal' del parametro :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015", "name": "__init__", "signature": "def __init__(self, hallazgo, norma)"}, {"docstring": "Metodo encargado de llevar a cabo la comparacion del hallazgo con la norma :return: Resultado de la equiparacion [True o False], seguido de su respectiva explicacion :author: Manuel Alejandro Luque León :date 06/06/2020", "name": "execute", "signature": "def execute(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_002676", "prompt": "Implement the Python class `Comparar` described below.\n\nClass description:\nClase representativa de la inferencia encargada de comparar el hallazgo recibido con la norma del parametro seleccionado :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\n\nMethod signatures and docstrings:\n- def __init__(self, hallazgo, norma): Constructor de la clase :param hallazgo: Informacion recibida del exterior :param norma: Valor 'normal' del parametro :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\n- def execute(self): Metodo encargado de llevar a cabo la comparacion del hallazgo con la norma :return: Resultado de la equiparacion [True o False], seguido de su respectiva explicacion :author: Manuel Alejandro Luque León :date 06/06/2020", "prompted_full_text": "Implement the Python class `Comparar` described below.\n\nClass description:\nClase representativa de la inferencia encargada de comparar el hallazgo recibido con la norma del parametro seleccionado :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\n\nMethod signatures and docstrings:\n- def __init__(self, hallazgo, norma): Constructor de la clase :param hallazgo: Informacion recibida del exterior :param norma: Valor 'normal' del parametro :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\n- def execute(self): Metodo encargado de llevar a cabo la comparacion del hallazgo con la norma :return: Resultado de la equiparacion [True o False], seguido de su respectiva explicacion :author: Manuel Alejandro Luque León :date 06/06/2020\n\n<|skeleton|>\nclass Comparar:\n \"\"\"Clase representativa de la inferencia encargada de comparar el hallazgo recibido con la norma del parametro seleccionado :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\"\"\"\n\n def __init__(self, hallazgo, norma):\n \"\"\"Constructor de la clase :param hallazgo: Informacion recibida del exterior :param norma: Valor 'normal' del parametro :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\"\"\"\n <|body_0|>\n\n def execute(self):\n \"\"\"Metodo encargado de llevar a cabo la comparacion del hallazgo con la norma :return: Resultado de la equiparacion [True o False], seguido de su respectiva explicacion :author: Manuel Alejandro Luque León :date 06/06/2020\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Inferencia.__init__(self)\n self.hallazgo = hallazgo\n self.norma = norma\n self.explicacion = u''\n<|end_body_0|>\n\n<|body_start_1|>\n diferencia = float(self.hallazgo.valor.replace(',', '.')) - float(self.norma.valor)\n if self.norma.tipo == '<=':\n if float(self.hallazgo.valor) <= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\n elif self.norma.tipo == '>=':\n if float(self.hallazgo.valor) >= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\n<|end_body_1|>\n", "revision_id": "2679bb833d4b849c109b2242af9417908c7bea21", "skeleton": "<|skeleton|>\nclass Comparar:\n \"\"\"Clase representativa de la inferencia encargada de comparar el hallazgo recibido con la norma del parametro seleccionado :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\"\"\"\n\n def __init__(self, hallazgo, norma):\n \"\"\"Constructor de la clase :param hallazgo: Informacion recibida del exterior :param norma: Valor 'normal' del parametro :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\"\"\"\n <|body_0|>\n\n def execute(self):\n \"\"\"Metodo encargado de llevar a cabo la comparacion del hallazgo con la norma :return: Resultado de la equiparacion [True o False], seguido de su respectiva explicacion :author: Manuel Alejandro Luque León :date 06/06/2020\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Comparar:\n \"\"\"Clase representativa de la inferencia encargada de comparar el hallazgo recibido con la norma del parametro seleccionado :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\"\"\"\n\n def __init__(self, hallazgo, norma):\n \"\"\"Constructor de la clase :param hallazgo: Informacion recibida del exterior :param norma: Valor 'normal' del parametro :author: Michael Castillo Polo y Luis Miguel López Coleto :date: 10/06/2015\"\"\"\n Inferencia.__init__(self)\n self.hallazgo = hallazgo\n self.norma = norma\n self.explicacion = u''\n\n def execute(self):\n \"\"\"Metodo encargado de llevar a cabo la comparacion del hallazgo con la norma :return: Resultado de la equiparacion [True o False], seguido de su respectiva explicacion :author: Manuel Alejandro Luque León :date 06/06/2020\"\"\"\n diferencia = float(self.hallazgo.valor.replace(',', '.')) - float(self.norma.valor)\n if self.norma.tipo == '<=':\n if float(self.hallazgo.valor) <= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\n elif self.norma.tipo == '>=':\n if float(self.hallazgo.valor) >= float(self.norma.valor):\n return (True, abs(diferencia))\n else:\n return (False, abs(diferencia))\n", "source": "the_stack_v2_python_sparse", "source_path": "Third_year/ISSBC/TrabajoFinal_Monitorizacion/source/ckModMonitorizacion.py", "source_repo": "AlexTheMagnus/UCO-Practices", "split": "val", "star_events_count": 0}
{"blob_id": "901f186b3f1e72c9cff7f350bdf0ed0c9a62beb6", "bodies": ["rmgpy_path = os.path.normpath(os.path.join(get_path(), '..'))\nqm = QMCalculator(software='mopac', method='pm7', fileStore=os.path.join(rmgpy_path, 'testing', 'qm', 'QMfiles'), scratchDirectory=os.path.join(rmgpy_path, 'testing', 'qm', 'QMscratch'))\nif not os.path.exists(qm.settings.fileStore):\n os.makedirs(qm.settings.fileStore)\nmol1 = Molecule().from_smiles('C1=CC=C2C=CC=CC2=C1')\nself.qmmol1 = MopacMolPM7(mol1, qm.settings)", "for directory in (self.qmmol1.settings.fileStore, self.qmmol1.settings.scratchDirectory):\n shutil.rmtree(directory, ignore_errors=True)\nself.qmmol1.generate_thermo_data()\nresult = self.qmmol1.qm_data\nself.assertTrue(self.qmmol1.verify_output_file())\nself.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\nself.assertEqual(result.numberOfAtoms, 18)\nself.assertIsInstance(result.atomicNumbers, np.ndarray)\nif result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\nself.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.9863, 0)\nself.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)", "self.qmmol1.generate_thermo_data()\nresult = self.qmmol1.qm_data\nself.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\nself.assertEqual(result.numberOfAtoms, 18)\nself.assertIsInstance(result.atomicNumbers, np.ndarray)\nif result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\nself.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.8571, 0)\nself.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)"], "bodies_text": "<|body_start_0|>\n rmgpy_path = os.path.normpath(os.path.join(get_path(), '..'))\n qm = QMCalculator(software='mopac', method='pm7', fileStore=os.path.join(rmgpy_path, 'testing', 'qm', 'QMfiles'), scratchDirectory=os.path.join(rmgpy_path, 'testing', 'qm', 'QMscratch'))\n if not os.path.exists(qm.settings.fileStore):\n os.makedirs(qm.settings.fileStore)\n mol1 = Molecule().from_smiles('C1=CC=C2C=CC=CC2=C1')\n self.qmmol1 = MopacMolPM7(mol1, qm.settings)\n<|end_body_0|>\n\n<|body_start_1|>\n for directory in (self.qmmol1.settings.fileStore, self.qmmol1.settings.scratchDirectory):\n shutil.rmtree(directory, ignore_errors=True)\n self.qmmol1.generate_thermo_data()\n result = self.qmmol1.qm_data\n self.assertTrue(self.qmmol1.verify_output_file())\n self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\n self.assertEqual(result.numberOfAtoms, 18)\n self.assertIsInstance(result.atomicNumbers, np.ndarray)\n if result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\n self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.9863, 0)\n self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n self.qmmol1.generate_thermo_data()\n result = self.qmmol1.qm_data\n self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\n self.assertEqual(result.numberOfAtoms, 18)\n self.assertIsInstance(result.atomicNumbers, np.ndarray)\n if result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\n self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.8571, 0)\n self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)\n<|end_body_2|>\n", "class_docstring": "Contains unit tests for the Geometry class.", "class_name": "TestMopacMolPM7", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestMopacMolPM7:\n \"\"\"Contains unit tests for the Geometry class.\"\"\"\n\n def setUp(self):\n \"\"\"A function run before each unit test in this class.\"\"\"\n <|body_0|>\n\n def test_generate_thermo_data(self):\n \"\"\"Test that generate_thermo_data() works correctly for MOPAC PM7\"\"\"\n <|body_1|>\n\n def test_load_thermo_data(self):\n \"\"\"Test that generate_thermo_data() can load thermo from the previous MOPAC PM7 run. Check that it loaded, and the values are the same as above.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rmgpy_path = os.path.normpath(os.path.join(get_path(), '..'))\n qm = QMCalculator(software='mopac', method='pm7', fileStore=os.path.join(rmgpy_path, 'testing', 'qm', 'QMfiles'), scratchDirectory=os.path.join(rmgpy_path, 'testing', 'qm', 'QMscratch'))\n if not os.path.exists(qm.settings.fileStore):\n os.makedirs(qm.settings.fileStore)\n mol1 = Molecule().from_smiles('C1=CC=C2C=CC=CC2=C1')\n self.qmmol1 = MopacMolPM7(mol1, qm.settings)\n<|end_body_0|>\n\n<|body_start_1|>\n for directory in (self.qmmol1.settings.fileStore, self.qmmol1.settings.scratchDirectory):\n shutil.rmtree(directory, ignore_errors=True)\n self.qmmol1.generate_thermo_data()\n result = self.qmmol1.qm_data\n self.assertTrue(self.qmmol1.verify_output_file())\n self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\n self.assertEqual(result.numberOfAtoms, 18)\n self.assertIsInstance(result.atomicNumbers, np.ndarray)\n if result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\n self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.9863, 0)\n self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n self.qmmol1.generate_thermo_data()\n result = self.qmmol1.qm_data\n self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\n self.assertEqual(result.numberOfAtoms, 18)\n self.assertIsInstance(result.atomicNumbers, np.ndarray)\n if result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\n self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.8571, 0)\n self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000104", "length_bytes": 11517, "license_type": "permissive", "methods": [{"docstring": "A function run before each unit test in this class.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Test that generate_thermo_data() works correctly for MOPAC PM7", "name": "test_generate_thermo_data", "signature": "def test_generate_thermo_data(self)"}, {"docstring": "Test that generate_thermo_data() can load thermo from the previous MOPAC PM7 run. Check that it loaded, and the values are the same as above.", "name": "test_load_thermo_data", "signature": "def test_load_thermo_data(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_014871", "prompt": "Implement the Python class `TestMopacMolPM7` described below.\n\nClass description:\nContains unit tests for the Geometry class.\n\nMethod signatures and docstrings:\n- def setUp(self): A function run before each unit test in this class.\n- def test_generate_thermo_data(self): Test that generate_thermo_data() works correctly for MOPAC PM7\n- def test_load_thermo_data(self): Test that generate_thermo_data() can load thermo from the previous MOPAC PM7 run. Check that it loaded, and the values are the same as above.", "prompted_full_text": "Implement the Python class `TestMopacMolPM7` described below.\n\nClass description:\nContains unit tests for the Geometry class.\n\nMethod signatures and docstrings:\n- def setUp(self): A function run before each unit test in this class.\n- def test_generate_thermo_data(self): Test that generate_thermo_data() works correctly for MOPAC PM7\n- def test_load_thermo_data(self): Test that generate_thermo_data() can load thermo from the previous MOPAC PM7 run. Check that it loaded, and the values are the same as above.\n\n<|skeleton|>\nclass TestMopacMolPM7:\n \"\"\"Contains unit tests for the Geometry class.\"\"\"\n\n def setUp(self):\n \"\"\"A function run before each unit test in this class.\"\"\"\n <|body_0|>\n\n def test_generate_thermo_data(self):\n \"\"\"Test that generate_thermo_data() works correctly for MOPAC PM7\"\"\"\n <|body_1|>\n\n def test_load_thermo_data(self):\n \"\"\"Test that generate_thermo_data() can load thermo from the previous MOPAC PM7 run. Check that it loaded, and the values are the same as above.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rmgpy_path = os.path.normpath(os.path.join(get_path(), '..'))\n qm = QMCalculator(software='mopac', method='pm7', fileStore=os.path.join(rmgpy_path, 'testing', 'qm', 'QMfiles'), scratchDirectory=os.path.join(rmgpy_path, 'testing', 'qm', 'QMscratch'))\n if not os.path.exists(qm.settings.fileStore):\n os.makedirs(qm.settings.fileStore)\n mol1 = Molecule().from_smiles('C1=CC=C2C=CC=CC2=C1')\n self.qmmol1 = MopacMolPM7(mol1, qm.settings)\n<|end_body_0|>\n\n<|body_start_1|>\n for directory in (self.qmmol1.settings.fileStore, self.qmmol1.settings.scratchDirectory):\n shutil.rmtree(directory, ignore_errors=True)\n self.qmmol1.generate_thermo_data()\n result = self.qmmol1.qm_data\n self.assertTrue(self.qmmol1.verify_output_file())\n self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\n self.assertEqual(result.numberOfAtoms, 18)\n self.assertIsInstance(result.atomicNumbers, np.ndarray)\n if result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\n self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.9863, 0)\n self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n self.qmmol1.generate_thermo_data()\n result = self.qmmol1.qm_data\n self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\n self.assertEqual(result.numberOfAtoms, 18)\n self.assertIsInstance(result.atomicNumbers, np.ndarray)\n if result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\n self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.8571, 0)\n self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)\n<|end_body_2|>\n", "revision_id": "349a4af759cf8877197772cd7eaca1e51d46eff5", "skeleton": "<|skeleton|>\nclass TestMopacMolPM7:\n \"\"\"Contains unit tests for the Geometry class.\"\"\"\n\n def setUp(self):\n \"\"\"A function run before each unit test in this class.\"\"\"\n <|body_0|>\n\n def test_generate_thermo_data(self):\n \"\"\"Test that generate_thermo_data() works correctly for MOPAC PM7\"\"\"\n <|body_1|>\n\n def test_load_thermo_data(self):\n \"\"\"Test that generate_thermo_data() can load thermo from the previous MOPAC PM7 run. Check that it loaded, and the values are the same as above.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestMopacMolPM7:\n \"\"\"Contains unit tests for the Geometry class.\"\"\"\n\n def setUp(self):\n \"\"\"A function run before each unit test in this class.\"\"\"\n rmgpy_path = os.path.normpath(os.path.join(get_path(), '..'))\n qm = QMCalculator(software='mopac', method='pm7', fileStore=os.path.join(rmgpy_path, 'testing', 'qm', 'QMfiles'), scratchDirectory=os.path.join(rmgpy_path, 'testing', 'qm', 'QMscratch'))\n if not os.path.exists(qm.settings.fileStore):\n os.makedirs(qm.settings.fileStore)\n mol1 = Molecule().from_smiles('C1=CC=C2C=CC=CC2=C1')\n self.qmmol1 = MopacMolPM7(mol1, qm.settings)\n\n def test_generate_thermo_data(self):\n \"\"\"Test that generate_thermo_data() works correctly for MOPAC PM7\"\"\"\n for directory in (self.qmmol1.settings.fileStore, self.qmmol1.settings.scratchDirectory):\n shutil.rmtree(directory, ignore_errors=True)\n self.qmmol1.generate_thermo_data()\n result = self.qmmol1.qm_data\n self.assertTrue(self.qmmol1.verify_output_file())\n self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\n self.assertEqual(result.numberOfAtoms, 18)\n self.assertIsInstance(result.atomicNumbers, np.ndarray)\n if result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\n self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.9863, 0)\n self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)\n\n def test_load_thermo_data(self):\n \"\"\"Test that generate_thermo_data() can load thermo from the previous MOPAC PM7 run. Check that it loaded, and the values are the same as above.\"\"\"\n self.qmmol1.generate_thermo_data()\n result = self.qmmol1.qm_data\n self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))\n self.assertEqual(result.numberOfAtoms, 18)\n self.assertIsInstance(result.atomicNumbers, np.ndarray)\n if result.molecularMass.units == 'amu':\n self.assertAlmostEqual(result.molecularMass.value, 128.173, 2)\n self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.8571, 0)\n self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1)\n", "source": "the_stack_v2_python_sparse", "source_path": "rmgpy/qm/mopacTest.py", "source_repo": "CanePan-cc/CanePanWorkshop", "split": "val", "star_events_count": 2}
{"blob_id": "2eca0b74a07af37e3cd383654d823cb0a5458d97", "bodies": ["super(LayerNorm, self).__init__()\nself.a_2 = nn.Parameter(torch.ones(features))\nself.b_2 = nn.Parameter(torch.zeros(features))\nself.eps = eps", "mean = x.mean(-1, keepdim=True)\nstd = x.std(-1, keepdim=True)\nreturn self.a_2 * (x - mean) / (std + self.eps) + self.b_2"], "bodies_text": "<|body_start_0|>\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n<|end_body_0|>\n\n<|body_start_1|>\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n<|end_body_1|>\n", "class_docstring": "Module that perform normalization of the input x. Attributes: a_2 (nn.Parameter): learnable parameter used to project the normalization of the batch. b_2 (nn.Parameter): learnable parameter corresponding to the bias of the projection on the normalization. eps (float): epsilon float to use when dividing by the std.", "class_name": "LayerNorm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LayerNorm:\n \"\"\"Module that perform normalization of the input x. Attributes: a_2 (nn.Parameter): learnable parameter used to project the normalization of the batch. b_2 (nn.Parameter): learnable parameter corresponding to the bias of the projection on the normalization. eps (float): epsilon float to use when dividing by the std.\"\"\"\n\n def __init__(self, features: int, eps: float=1e-06) -> None:\n \"\"\"Initialize the layer normalization module. Args: features (int): dimension of the features / model to normalize. eps (float): epsilon float to use when dividing by the std.\"\"\"\n <|body_0|>\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Perform the normalization on `x`. Args: x (Tensor): input tensor of shape `(batch, seq_len, size)`. Returns: Tensor: output normalized tensor of the same shape.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n<|end_body_0|>\n\n<|body_start_1|>\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000105", "length_bytes": 11321, "license_type": "no_license", "methods": [{"docstring": "Initialize the layer normalization module. Args: features (int): dimension of the features / model to normalize. eps (float): epsilon float to use when dividing by the std.", "name": "__init__", "signature": "def __init__(self, features: int, eps: float=1e-06) -> None"}, {"docstring": "Perform the normalization on `x`. Args: x (Tensor): input tensor of shape `(batch, seq_len, size)`. Returns: Tensor: output normalized tensor of the same shape.", "name": "forward", "signature": "def forward(self, x: Tensor) -> Tensor"}], "n_methods": 2, "prompt": "Implement the Python class `LayerNorm` described below.\n\nClass description:\nModule that perform normalization of the input x. Attributes: a_2 (nn.Parameter): learnable parameter used to project the normalization of the batch. b_2 (nn.Parameter): learnable parameter corresponding to the bias of the projection on the normalization. eps (float): epsilon float to use when dividing by the std.\n\nMethod signatures and docstrings:\n- def __init__(self, features: int, eps: float=1e-06) -> None: Initialize the layer normalization module. Args: features (int): dimension of the features / model to normalize. eps (float): epsilon float to use when dividing by the std.\n- def forward(self, x: Tensor) -> Tensor: Perform the normalization on `x`. Args: x (Tensor): input tensor of shape `(batch, seq_len, size)`. Returns: Tensor: output normalized tensor of the same shape.", "prompted_full_text": "Implement the Python class `LayerNorm` described below.\n\nClass description:\nModule that perform normalization of the input x. Attributes: a_2 (nn.Parameter): learnable parameter used to project the normalization of the batch. b_2 (nn.Parameter): learnable parameter corresponding to the bias of the projection on the normalization. eps (float): epsilon float to use when dividing by the std.\n\nMethod signatures and docstrings:\n- def __init__(self, features: int, eps: float=1e-06) -> None: Initialize the layer normalization module. Args: features (int): dimension of the features / model to normalize. eps (float): epsilon float to use when dividing by the std.\n- def forward(self, x: Tensor) -> Tensor: Perform the normalization on `x`. Args: x (Tensor): input tensor of shape `(batch, seq_len, size)`. Returns: Tensor: output normalized tensor of the same shape.\n\n<|skeleton|>\nclass LayerNorm:\n \"\"\"Module that perform normalization of the input x. Attributes: a_2 (nn.Parameter): learnable parameter used to project the normalization of the batch. b_2 (nn.Parameter): learnable parameter corresponding to the bias of the projection on the normalization. eps (float): epsilon float to use when dividing by the std.\"\"\"\n\n def __init__(self, features: int, eps: float=1e-06) -> None:\n \"\"\"Initialize the layer normalization module. Args: features (int): dimension of the features / model to normalize. eps (float): epsilon float to use when dividing by the std.\"\"\"\n <|body_0|>\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Perform the normalization on `x`. Args: x (Tensor): input tensor of shape `(batch, seq_len, size)`. Returns: Tensor: output normalized tensor of the same shape.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n<|end_body_0|>\n\n<|body_start_1|>\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n<|end_body_1|>\n", "revision_id": "896c4d5bb17bcbf00832a04e2844c5c02270f76f", "skeleton": "<|skeleton|>\nclass LayerNorm:\n \"\"\"Module that perform normalization of the input x. Attributes: a_2 (nn.Parameter): learnable parameter used to project the normalization of the batch. b_2 (nn.Parameter): learnable parameter corresponding to the bias of the projection on the normalization. eps (float): epsilon float to use when dividing by the std.\"\"\"\n\n def __init__(self, features: int, eps: float=1e-06) -> None:\n \"\"\"Initialize the layer normalization module. Args: features (int): dimension of the features / model to normalize. eps (float): epsilon float to use when dividing by the std.\"\"\"\n <|body_0|>\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Perform the normalization on `x`. Args: x (Tensor): input tensor of shape `(batch, seq_len, size)`. Returns: Tensor: output normalized tensor of the same shape.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LayerNorm:\n \"\"\"Module that perform normalization of the input x. Attributes: a_2 (nn.Parameter): learnable parameter used to project the normalization of the batch. b_2 (nn.Parameter): learnable parameter corresponding to the bias of the projection on the normalization. eps (float): epsilon float to use when dividing by the std.\"\"\"\n\n def __init__(self, features: int, eps: float=1e-06) -> None:\n \"\"\"Initialize the layer normalization module. Args: features (int): dimension of the features / model to normalize. eps (float): epsilon float to use when dividing by the std.\"\"\"\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Perform the normalization on `x`. Args: x (Tensor): input tensor of shape `(batch, seq_len, size)`. Returns: Tensor: output normalized tensor of the same shape.\"\"\"\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n", "source": "the_stack_v2_python_sparse", "source_path": "edit_transformer/modules.py", "source_repo": "grll/edit-transformer", "split": "val", "star_events_count": 3}
{"blob_id": "1b9577295917d359cb23943cefa8009ae8f92165", "bodies": ["self.wl_centre = wl_centre\nself.fwhm = fwhm\nself.peak_tx = peak_tx\nself.type = type\nself._tx_values = np.array([1e-05, 0.0001, 0.001, 0.01, 0.1, 0.5, 0.9]) * self.peak_tx\nself.fwhm_coeffs = {2: np.array([45, 15, 6.3, 3.5, 2, 1, 0.5]), 3: np.array([15, 5.4, 3.2, 2.2, 1.5, 1, 0.65]), 4: np.array([12, 4.25, 2.25, 1.8, 1.3, 1, 0.8])}\nwl, tx = self._get_semicustom_tx()\nsuper().__init__(wl, tx, ref_index)", "tx = np.concatenate([self._tx_values, [1 * self.peak_tx], self._tx_values[::-1]])\nwl = np.zeros_like(tx)\nfor idx in range(len(self._tx_values)):\n half_width = self.fwhm_coeffs[self.type][idx] * self.fwhm / 2\n wl[idx] = self.wl_centre - half_width\n wl[-(idx + 1)] = self.wl_centre + half_width\nwl[len(self._tx_values)] = self.wl_centre\nreturn (wl, tx)", "no_fwhm = 10\nwl_lo = self.wl_centre - no_fwhm * self.fwhm\nwl_hi = self.wl_centre + no_fwhm * self.fwhm\ninterp_wavelength_axis = np.linspace(wl_lo, wl_hi, 100)\nf = scipy.interpolate.InterpolatedUnivariateSpline(self.wls, self.tx, k=1, ext='zeros')\ninterp_transmission = f(interp_wavelength_axis)\nreturn (interp_wavelength_axis, interp_transmission)"], "bodies_text": "<|body_start_0|>\n self.wl_centre = wl_centre\n self.fwhm = fwhm\n self.peak_tx = peak_tx\n self.type = type\n self._tx_values = np.array([1e-05, 0.0001, 0.001, 0.01, 0.1, 0.5, 0.9]) * self.peak_tx\n self.fwhm_coeffs = {2: np.array([45, 15, 6.3, 3.5, 2, 1, 0.5]), 3: np.array([15, 5.4, 3.2, 2.2, 1.5, 1, 0.65]), 4: np.array([12, 4.25, 2.25, 1.8, 1.3, 1, 0.8])}\n wl, tx = self._get_semicustom_tx()\n super().__init__(wl, tx, ref_index)\n<|end_body_0|>\n\n<|body_start_1|>\n tx = np.concatenate([self._tx_values, [1 * self.peak_tx], self._tx_values[::-1]])\n wl = np.zeros_like(tx)\n for idx in range(len(self._tx_values)):\n half_width = self.fwhm_coeffs[self.type][idx] * self.fwhm / 2\n wl[idx] = self.wl_centre - half_width\n wl[-(idx + 1)] = self.wl_centre + half_width\n wl[len(self._tx_values)] = self.wl_centre\n return (wl, tx)\n<|end_body_1|>\n\n<|body_start_2|>\n no_fwhm = 10\n wl_lo = self.wl_centre - no_fwhm * self.fwhm\n wl_hi = self.wl_centre + no_fwhm * self.fwhm\n interp_wavelength_axis = np.linspace(wl_lo, wl_hi, 100)\n f = scipy.interpolate.InterpolatedUnivariateSpline(self.wls, self.tx, k=1, ext='zeros')\n interp_transmission = f(interp_wavelength_axis)\n return (interp_wavelength_axis, interp_transmission)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "AndoverSemiCustomFilter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AndoverSemiCustomFilter:\n\n def __init__(self, wl_centre, fwhm, peak_tx, type, ref_index):\n \"\"\"set the normalised transmission values, and the corresponding fwhm coefficients based on info from: https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ :param wl_centre: centre wavelength [m] :param fwhm: full width half max. wavelength [m] :param peak_tx: transmission at centre, as a fraction. :param type: type of semi-custom filters, according to Andover, basically the number of cavities. :param ref_index\"\"\"\n <|body_0|>\n\n def _get_semicustom_tx(self):\n \"\"\"use the info on the filters profiles from https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ to calculate the rough tx profile. :return: tx\"\"\"\n <|body_1|>\n\n def get_interp_profile(self):\n \"\"\"interpolate the few points provided by the manufacturer onto a finer wavelength grid.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.wl_centre = wl_centre\n self.fwhm = fwhm\n self.peak_tx = peak_tx\n self.type = type\n self._tx_values = np.array([1e-05, 0.0001, 0.001, 0.01, 0.1, 0.5, 0.9]) * self.peak_tx\n self.fwhm_coeffs = {2: np.array([45, 15, 6.3, 3.5, 2, 1, 0.5]), 3: np.array([15, 5.4, 3.2, 2.2, 1.5, 1, 0.65]), 4: np.array([12, 4.25, 2.25, 1.8, 1.3, 1, 0.8])}\n wl, tx = self._get_semicustom_tx()\n super().__init__(wl, tx, ref_index)\n<|end_body_0|>\n\n<|body_start_1|>\n tx = np.concatenate([self._tx_values, [1 * self.peak_tx], self._tx_values[::-1]])\n wl = np.zeros_like(tx)\n for idx in range(len(self._tx_values)):\n half_width = self.fwhm_coeffs[self.type][idx] * self.fwhm / 2\n wl[idx] = self.wl_centre - half_width\n wl[-(idx + 1)] = self.wl_centre + half_width\n wl[len(self._tx_values)] = self.wl_centre\n return (wl, tx)\n<|end_body_1|>\n\n<|body_start_2|>\n no_fwhm = 10\n wl_lo = self.wl_centre - no_fwhm * self.fwhm\n wl_hi = self.wl_centre + no_fwhm * self.fwhm\n interp_wavelength_axis = np.linspace(wl_lo, wl_hi, 100)\n f = scipy.interpolate.InterpolatedUnivariateSpline(self.wls, self.tx, k=1, ext='zeros')\n interp_transmission = f(interp_wavelength_axis)\n return (interp_wavelength_axis, interp_transmission)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000106", "length_bytes": 9129, "license_type": "no_license", "methods": [{"docstring": "set the normalised transmission values, and the corresponding fwhm coefficients based on info from: https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ :param wl_centre: centre wavelength [m] :param fwhm: full width half max. wavelength [m] :param peak_tx: transmission at centre, as a fraction. :param type: type of semi-custom filters, according to Andover, basically the number of cavities. :param ref_index", "name": "__init__", "signature": "def __init__(self, wl_centre, fwhm, peak_tx, type, ref_index)"}, {"docstring": "use the info on the filters profiles from https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ to calculate the rough tx profile. :return: tx", "name": "_get_semicustom_tx", "signature": "def _get_semicustom_tx(self)"}, {"docstring": "interpolate the few points provided by the manufacturer onto a finer wavelength grid.", "name": "get_interp_profile", "signature": "def get_interp_profile(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_012217", "prompt": "Implement the Python class `AndoverSemiCustomFilter` described below.\n\nClass description:\nImplement the AndoverSemiCustomFilter class.\n\nMethod signatures and docstrings:\n- def __init__(self, wl_centre, fwhm, peak_tx, type, ref_index): set the normalised transmission values, and the corresponding fwhm coefficients based on info from: https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ :param wl_centre: centre wavelength [m] :param fwhm: full width half max. wavelength [m] :param peak_tx: transmission at centre, as a fraction. :param type: type of semi-custom filters, according to Andover, basically the number of cavities. :param ref_index\n- def _get_semicustom_tx(self): use the info on the filters profiles from https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ to calculate the rough tx profile. :return: tx\n- def get_interp_profile(self): interpolate the few points provided by the manufacturer onto a finer wavelength grid.", "prompted_full_text": "Implement the Python class `AndoverSemiCustomFilter` described below.\n\nClass description:\nImplement the AndoverSemiCustomFilter class.\n\nMethod signatures and docstrings:\n- def __init__(self, wl_centre, fwhm, peak_tx, type, ref_index): set the normalised transmission values, and the corresponding fwhm coefficients based on info from: https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ :param wl_centre: centre wavelength [m] :param fwhm: full width half max. wavelength [m] :param peak_tx: transmission at centre, as a fraction. :param type: type of semi-custom filters, according to Andover, basically the number of cavities. :param ref_index\n- def _get_semicustom_tx(self): use the info on the filters profiles from https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ to calculate the rough tx profile. :return: tx\n- def get_interp_profile(self): interpolate the few points provided by the manufacturer onto a finer wavelength grid.\n\n<|skeleton|>\nclass AndoverSemiCustomFilter:\n\n def __init__(self, wl_centre, fwhm, peak_tx, type, ref_index):\n \"\"\"set the normalised transmission values, and the corresponding fwhm coefficients based on info from: https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ :param wl_centre: centre wavelength [m] :param fwhm: full width half max. wavelength [m] :param peak_tx: transmission at centre, as a fraction. :param type: type of semi-custom filters, according to Andover, basically the number of cavities. :param ref_index\"\"\"\n <|body_0|>\n\n def _get_semicustom_tx(self):\n \"\"\"use the info on the filters profiles from https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ to calculate the rough tx profile. :return: tx\"\"\"\n <|body_1|>\n\n def get_interp_profile(self):\n \"\"\"interpolate the few points provided by the manufacturer onto a finer wavelength grid.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.wl_centre = wl_centre\n self.fwhm = fwhm\n self.peak_tx = peak_tx\n self.type = type\n self._tx_values = np.array([1e-05, 0.0001, 0.001, 0.01, 0.1, 0.5, 0.9]) * self.peak_tx\n self.fwhm_coeffs = {2: np.array([45, 15, 6.3, 3.5, 2, 1, 0.5]), 3: np.array([15, 5.4, 3.2, 2.2, 1.5, 1, 0.65]), 4: np.array([12, 4.25, 2.25, 1.8, 1.3, 1, 0.8])}\n wl, tx = self._get_semicustom_tx()\n super().__init__(wl, tx, ref_index)\n<|end_body_0|>\n\n<|body_start_1|>\n tx = np.concatenate([self._tx_values, [1 * self.peak_tx], self._tx_values[::-1]])\n wl = np.zeros_like(tx)\n for idx in range(len(self._tx_values)):\n half_width = self.fwhm_coeffs[self.type][idx] * self.fwhm / 2\n wl[idx] = self.wl_centre - half_width\n wl[-(idx + 1)] = self.wl_centre + half_width\n wl[len(self._tx_values)] = self.wl_centre\n return (wl, tx)\n<|end_body_1|>\n\n<|body_start_2|>\n no_fwhm = 10\n wl_lo = self.wl_centre - no_fwhm * self.fwhm\n wl_hi = self.wl_centre + no_fwhm * self.fwhm\n interp_wavelength_axis = np.linspace(wl_lo, wl_hi, 100)\n f = scipy.interpolate.InterpolatedUnivariateSpline(self.wls, self.tx, k=1, ext='zeros')\n interp_transmission = f(interp_wavelength_axis)\n return (interp_wavelength_axis, interp_transmission)\n<|end_body_2|>\n", "revision_id": "845893fcefd23a1f9f0d9dbdc94788fccbb17379", "skeleton": "<|skeleton|>\nclass AndoverSemiCustomFilter:\n\n def __init__(self, wl_centre, fwhm, peak_tx, type, ref_index):\n \"\"\"set the normalised transmission values, and the corresponding fwhm coefficients based on info from: https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ :param wl_centre: centre wavelength [m] :param fwhm: full width half max. wavelength [m] :param peak_tx: transmission at centre, as a fraction. :param type: type of semi-custom filters, according to Andover, basically the number of cavities. :param ref_index\"\"\"\n <|body_0|>\n\n def _get_semicustom_tx(self):\n \"\"\"use the info on the filters profiles from https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ to calculate the rough tx profile. :return: tx\"\"\"\n <|body_1|>\n\n def get_interp_profile(self):\n \"\"\"interpolate the few points provided by the manufacturer onto a finer wavelength grid.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AndoverSemiCustomFilter:\n def __init__(self, wl_centre, fwhm, peak_tx, type, ref_index):\n \"\"\"set the normalised transmission values, and the corresponding fwhm coefficients based on info from: https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ :param wl_centre: centre wavelength [m] :param fwhm: full width half max. wavelength [m] :param peak_tx: transmission at centre, as a fraction. :param type: type of semi-custom filters, according to Andover, basically the number of cavities. :param ref_index\"\"\"\n self.wl_centre = wl_centre\n self.fwhm = fwhm\n self.peak_tx = peak_tx\n self.type = type\n self._tx_values = np.array([1e-05, 0.0001, 0.001, 0.01, 0.1, 0.5, 0.9]) * self.peak_tx\n self.fwhm_coeffs = {2: np.array([45, 15, 6.3, 3.5, 2, 1, 0.5]), 3: np.array([15, 5.4, 3.2, 2.2, 1.5, 1, 0.65]), 4: np.array([12, 4.25, 2.25, 1.8, 1.3, 1, 0.8])}\n wl, tx = self._get_semicustom_tx()\n super().__init__(wl, tx, ref_index)\n\n def _get_semicustom_tx(self):\n \"\"\"use the info on the filters profiles from https://www.andovercorp.com/technical/bandpass-filters-fundamentals/filters-types/ to calculate the rough tx profile. :return: tx\"\"\"\n tx = np.concatenate([self._tx_values, [1 * self.peak_tx], self._tx_values[::-1]])\n wl = np.zeros_like(tx)\n for idx in range(len(self._tx_values)):\n half_width = self.fwhm_coeffs[self.type][idx] * self.fwhm / 2\n wl[idx] = self.wl_centre - half_width\n wl[-(idx + 1)] = self.wl_centre + half_width\n wl[len(self._tx_values)] = self.wl_centre\n return (wl, tx)\n\n def get_interp_profile(self):\n \"\"\"interpolate the few points provided by the manufacturer onto a finer wavelength grid.\"\"\"\n no_fwhm = 10\n wl_lo = self.wl_centre - no_fwhm * self.fwhm\n wl_hi = self.wl_centre + no_fwhm * self.fwhm\n interp_wavelength_axis = np.linspace(wl_lo, wl_hi, 100)\n f = scipy.interpolate.InterpolatedUnivariateSpline(self.wls, self.tx, k=1, ext='zeros')\n interp_transmission = f(interp_wavelength_axis)\n return (interp_wavelength_axis, interp_transmission)\n", "source": "the_stack_v2_python_sparse", "source_path": "pycis/temp/bandpass_filter.py", "source_repo": "jsallcock/pycis", "split": "val", "star_events_count": 5}
{"blob_id": "2ed50d8ed0d3a3130c8d78802ec8676923107d79", "bodies": ["response: UserOutputData | list[UserOutputData] | None\nif id:\n response = self.__find_user_by_id(id)\nelse:\n response = self.__find_users()\nreturn response", "data = self.repository.find_users()\nresponse = []\nfor d in data:\n response.append(parse_obj_as(UserOutputData, d))\nreturn response", "user = UserBase(id=id)\ndata = self.repository.find_user_by_id(user.id)\nreturn parse_obj_as(UserOutputData, data)"], "bodies_text": "<|body_start_0|>\n response: UserOutputData | list[UserOutputData] | None\n if id:\n response = self.__find_user_by_id(id)\n else:\n response = self.__find_users()\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.repository.find_users()\n response = []\n for d in data:\n response.append(parse_obj_as(UserOutputData, d))\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n user = UserBase(id=id)\n data = self.repository.find_user_by_id(user.id)\n return parse_obj_as(UserOutputData, data)\n<|end_body_2|>\n", "class_docstring": "UserGetInteractorImpl", "class_name": "UserGetInteractorImpl", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserGetInteractorImpl:\n \"\"\"UserGetInteractorImpl\"\"\"\n\n def handle(self, id: int=0) -> UserOutputData | list[UserOutputData] | None:\n \"\"\"handle\"\"\"\n <|body_0|>\n\n def __find_users(self) -> list[UserOutputData] | None:\n \"\"\"__find_users\"\"\"\n <|body_1|>\n\n def __find_user_by_id(self, id: int) -> UserOutputData | None:\n \"\"\"__find_user_by_id\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response: UserOutputData | list[UserOutputData] | None\n if id:\n response = self.__find_user_by_id(id)\n else:\n response = self.__find_users()\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.repository.find_users()\n response = []\n for d in data:\n response.append(parse_obj_as(UserOutputData, d))\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n user = UserBase(id=id)\n data = self.repository.find_user_by_id(user.id)\n return parse_obj_as(UserOutputData, data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000107", "length_bytes": 1563, "license_type": "no_license", "methods": [{"docstring": "handle", "name": "handle", "signature": "def handle(self, id: int=0) -> UserOutputData | list[UserOutputData] | None"}, {"docstring": "__find_users", "name": "__find_users", "signature": "def __find_users(self) -> list[UserOutputData] | None"}, {"docstring": "__find_user_by_id", "name": "__find_user_by_id", "signature": "def __find_user_by_id(self, id: int) -> UserOutputData | None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_053476", "prompt": "Implement the Python class `UserGetInteractorImpl` described below.\n\nClass description:\nUserGetInteractorImpl\n\nMethod signatures and docstrings:\n- def handle(self, id: int=0) -> UserOutputData | list[UserOutputData] | None: handle\n- def __find_users(self) -> list[UserOutputData] | None: __find_users\n- def __find_user_by_id(self, id: int) -> UserOutputData | None: __find_user_by_id", "prompted_full_text": "Implement the Python class `UserGetInteractorImpl` described below.\n\nClass description:\nUserGetInteractorImpl\n\nMethod signatures and docstrings:\n- def handle(self, id: int=0) -> UserOutputData | list[UserOutputData] | None: handle\n- def __find_users(self) -> list[UserOutputData] | None: __find_users\n- def __find_user_by_id(self, id: int) -> UserOutputData | None: __find_user_by_id\n\n<|skeleton|>\nclass UserGetInteractorImpl:\n \"\"\"UserGetInteractorImpl\"\"\"\n\n def handle(self, id: int=0) -> UserOutputData | list[UserOutputData] | None:\n \"\"\"handle\"\"\"\n <|body_0|>\n\n def __find_users(self) -> list[UserOutputData] | None:\n \"\"\"__find_users\"\"\"\n <|body_1|>\n\n def __find_user_by_id(self, id: int) -> UserOutputData | None:\n \"\"\"__find_user_by_id\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response: UserOutputData | list[UserOutputData] | None\n if id:\n response = self.__find_user_by_id(id)\n else:\n response = self.__find_users()\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.repository.find_users()\n response = []\n for d in data:\n response.append(parse_obj_as(UserOutputData, d))\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n user = UserBase(id=id)\n data = self.repository.find_user_by_id(user.id)\n return parse_obj_as(UserOutputData, data)\n<|end_body_2|>\n", "revision_id": "b2740384608b7724238a2b426ec79f6ced057e9d", "skeleton": "<|skeleton|>\nclass UserGetInteractorImpl:\n \"\"\"UserGetInteractorImpl\"\"\"\n\n def handle(self, id: int=0) -> UserOutputData | list[UserOutputData] | None:\n \"\"\"handle\"\"\"\n <|body_0|>\n\n def __find_users(self) -> list[UserOutputData] | None:\n \"\"\"__find_users\"\"\"\n <|body_1|>\n\n def __find_user_by_id(self, id: int) -> UserOutputData | None:\n \"\"\"__find_user_by_id\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UserGetInteractorImpl:\n \"\"\"UserGetInteractorImpl\"\"\"\n\n def handle(self, id: int=0) -> UserOutputData | list[UserOutputData] | None:\n \"\"\"handle\"\"\"\n response: UserOutputData | list[UserOutputData] | None\n if id:\n response = self.__find_user_by_id(id)\n else:\n response = self.__find_users()\n return response\n\n def __find_users(self) -> list[UserOutputData] | None:\n \"\"\"__find_users\"\"\"\n data = self.repository.find_users()\n response = []\n for d in data:\n response.append(parse_obj_as(UserOutputData, d))\n return response\n\n def __find_user_by_id(self, id: int) -> UserOutputData | None:\n \"\"\"__find_user_by_id\"\"\"\n user = UserBase(id=id)\n data = self.repository.find_user_by_id(user.id)\n return parse_obj_as(UserOutputData, data)\n", "source": "the_stack_v2_python_sparse", "source_path": "app/usecases/users/user_get_usercase.py", "source_repo": "massa423/clean-architecture-fastapi", "split": "val", "star_events_count": 6}
{"blob_id": "eb5066d69e984c8da5f976e450883b8e4bbb69d0", "bodies": ["self.evaluator = Metrics_Evaluator()\nself.model = model\nself.perturb_generator = perturb_generator\nself.folder_path = os.path.join(folder_path, data_name, os.path.split(os.path.split(self.model.save_path)[0])[1])", "self.evaluator = Metrics_Evaluator()\nself.model.logits_test = {}\nself.model.labels_test = {}", "logits, labels = self.model.logits(data, self.perturb_generator, perturb_type, epsilon, from_cache=from_cache, to_cache=to_cache, save_to_file=save_to_file)\ntest_values = test_metric(logits, labels, bins_calibration=bins_calibration)\nperturb_type = perturb_type.replace('/', '_')\nself.evaluator.add(test_metric.__name__, perturb_type, epsilon, test_values)", "if folder_path is None:\n folder_path = self.folder_path\nself.evaluator.save(folder_path)", "if folder_path is None:\n folder_path = self.folder_path\nself.evaluator.load(folder_path)"], "bodies_text": "<|body_start_0|>\n self.evaluator = Metrics_Evaluator()\n self.model = model\n self.perturb_generator = perturb_generator\n self.folder_path = os.path.join(folder_path, data_name, os.path.split(os.path.split(self.model.save_path)[0])[1])\n<|end_body_0|>\n\n<|body_start_1|>\n self.evaluator = Metrics_Evaluator()\n self.model.logits_test = {}\n self.model.labels_test = {}\n<|end_body_1|>\n\n<|body_start_2|>\n logits, labels = self.model.logits(data, self.perturb_generator, perturb_type, epsilon, from_cache=from_cache, to_cache=to_cache, save_to_file=save_to_file)\n test_values = test_metric(logits, labels, bins_calibration=bins_calibration)\n perturb_type = perturb_type.replace('/', '_')\n self.evaluator.add(test_metric.__name__, perturb_type, epsilon, test_values)\n<|end_body_2|>\n\n<|body_start_3|>\n if folder_path is None:\n folder_path = self.folder_path\n self.evaluator.save(folder_path)\n<|end_body_3|>\n\n<|body_start_4|>\n if folder_path is None:\n folder_path = self.folder_path\n self.evaluator.load(folder_path)\n<|end_body_4|>\n", "class_docstring": "evaluate model and calculate metrics", "class_name": "Evaluator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Evaluator:\n \"\"\"evaluate model and calculate metrics\"\"\"\n\n def __init__(self, model, perturb_generator, data_name='Data', folder_path='results/Evaluation/'):\n \"\"\"Args: model: object of Model class perturb_generator: object of PerturbationGenerator data_name: string folder_path: string, determines path where Evaluator is stored\"\"\"\n <|body_0|>\n\n def reset(self):\n \"\"\"Delete old evaluator object (and its storage) and replaces it with a newly initialized one. Delete also logits in model. Careful: No save or backup is happening, so be sure nothing is lost.\"\"\"\n <|body_1|>\n\n def evaluate(self, data, test_metric, perturb_type, epsilon, from_cache=True, to_cache=True, save_to_file=False, bins_calibration=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], **params):\n \"\"\"evaluate metrics Args: data: tf.data.Dataset object of (x_data, y_labels), prepared with batch_size, shuffle etc. test_metric: metric from utilsevaluation.measures perturb_type: string, in case dataset should be perturbed epsilon: int, level of perturbation from_cache: if False the logits and labels are calculated via the Tensorflow graph, if True the logits are restored from cache to_cache: bool, if True the logits and labels are stored to cache save_to_file: bool, if True the logits and labels are saved to file bins_calibration: list of floats, limits of calibration bins\"\"\"\n <|body_2|>\n\n def save(self, folder_path=None):\n \"\"\"Method to save evaluation storage\"\"\"\n <|body_3|>\n\n def load(self, folder_path=None):\n \"\"\"Method to load evaluation storage\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.evaluator = Metrics_Evaluator()\n self.model = model\n self.perturb_generator = perturb_generator\n self.folder_path = os.path.join(folder_path, data_name, os.path.split(os.path.split(self.model.save_path)[0])[1])\n<|end_body_0|>\n\n<|body_start_1|>\n self.evaluator = Metrics_Evaluator()\n self.model.logits_test = {}\n self.model.labels_test = {}\n<|end_body_1|>\n\n<|body_start_2|>\n logits, labels = self.model.logits(data, self.perturb_generator, perturb_type, epsilon, from_cache=from_cache, to_cache=to_cache, save_to_file=save_to_file)\n test_values = test_metric(logits, labels, bins_calibration=bins_calibration)\n perturb_type = perturb_type.replace('/', '_')\n self.evaluator.add(test_metric.__name__, perturb_type, epsilon, test_values)\n<|end_body_2|>\n\n<|body_start_3|>\n if folder_path is None:\n folder_path = self.folder_path\n self.evaluator.save(folder_path)\n<|end_body_3|>\n\n<|body_start_4|>\n if folder_path is None:\n folder_path = self.folder_path\n self.evaluator.load(folder_path)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000108", "length_bytes": 3272, "license_type": "no_license", "methods": [{"docstring": "Args: model: object of Model class perturb_generator: object of PerturbationGenerator data_name: string folder_path: string, determines path where Evaluator is stored", "name": "__init__", "signature": "def __init__(self, model, perturb_generator, data_name='Data', folder_path='results/Evaluation/')"}, {"docstring": "Delete old evaluator object (and its storage) and replaces it with a newly initialized one. Delete also logits in model. Careful: No save or backup is happening, so be sure nothing is lost.", "name": "reset", "signature": "def reset(self)"}, {"docstring": "evaluate metrics Args: data: tf.data.Dataset object of (x_data, y_labels), prepared with batch_size, shuffle etc. test_metric: metric from utilsevaluation.measures perturb_type: string, in case dataset should be perturbed epsilon: int, level of perturbation from_cache: if False the logits and labels are calculated via the Tensorflow graph, if True the logits are restored from cache to_cache: bool, if True the logits and labels are stored to cache save_to_file: bool, if True the logits and labels are saved to file bins_calibration: list of floats, limits of calibration bins", "name": "evaluate", "signature": "def evaluate(self, data, test_metric, perturb_type, epsilon, from_cache=True, to_cache=True, save_to_file=False, bins_calibration=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], **params)"}, {"docstring": "Method to save evaluation storage", "name": "save", "signature": "def save(self, folder_path=None)"}, {"docstring": "Method to load evaluation storage", "name": "load", "signature": "def load(self, folder_path=None)"}], "n_methods": 5, "prompt": "Implement the Python class `Evaluator` described below.\n\nClass description:\nevaluate model and calculate metrics\n\nMethod signatures and docstrings:\n- def __init__(self, model, perturb_generator, data_name='Data', folder_path='results/Evaluation/'): Args: model: object of Model class perturb_generator: object of PerturbationGenerator data_name: string folder_path: string, determines path where Evaluator is stored\n- def reset(self): Delete old evaluator object (and its storage) and replaces it with a newly initialized one. Delete also logits in model. Careful: No save or backup is happening, so be sure nothing is lost.\n- def evaluate(self, data, test_metric, perturb_type, epsilon, from_cache=True, to_cache=True, save_to_file=False, bins_calibration=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], **params): evaluate metrics Args: data: tf.data.Dataset object of (x_data, y_labels), prepared with batch_size, shuffle etc. test_metric: metric from utilsevaluation.measures perturb_type: string, in case dataset should be perturbed epsilon: int, level of perturbation from_cache: if False the logits and labels are calculated via the Tensorflow graph, if True the logits are restored from cache to_cache: bool, if True the logits and labels are stored to cache save_to_file: bool, if True the logits and labels are saved to file bins_calibration: list of floats, limits of calibration bins\n- def save(self, folder_path=None): Method to save evaluation storage\n- def load(self, folder_path=None): Method to load evaluation storage", "prompted_full_text": "Implement the Python class `Evaluator` described below.\n\nClass description:\nevaluate model and calculate metrics\n\nMethod signatures and docstrings:\n- def __init__(self, model, perturb_generator, data_name='Data', folder_path='results/Evaluation/'): Args: model: object of Model class perturb_generator: object of PerturbationGenerator data_name: string folder_path: string, determines path where Evaluator is stored\n- def reset(self): Delete old evaluator object (and its storage) and replaces it with a newly initialized one. Delete also logits in model. Careful: No save or backup is happening, so be sure nothing is lost.\n- def evaluate(self, data, test_metric, perturb_type, epsilon, from_cache=True, to_cache=True, save_to_file=False, bins_calibration=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], **params): evaluate metrics Args: data: tf.data.Dataset object of (x_data, y_labels), prepared with batch_size, shuffle etc. test_metric: metric from utilsevaluation.measures perturb_type: string, in case dataset should be perturbed epsilon: int, level of perturbation from_cache: if False the logits and labels are calculated via the Tensorflow graph, if True the logits are restored from cache to_cache: bool, if True the logits and labels are stored to cache save_to_file: bool, if True the logits and labels are saved to file bins_calibration: list of floats, limits of calibration bins\n- def save(self, folder_path=None): Method to save evaluation storage\n- def load(self, folder_path=None): Method to load evaluation storage\n\n<|skeleton|>\nclass Evaluator:\n \"\"\"evaluate model and calculate metrics\"\"\"\n\n def __init__(self, model, perturb_generator, data_name='Data', folder_path='results/Evaluation/'):\n \"\"\"Args: model: object of Model class perturb_generator: object of PerturbationGenerator data_name: string folder_path: string, determines path where Evaluator is stored\"\"\"\n <|body_0|>\n\n def reset(self):\n \"\"\"Delete old evaluator object (and its storage) and replaces it with a newly initialized one. Delete also logits in model. Careful: No save or backup is happening, so be sure nothing is lost.\"\"\"\n <|body_1|>\n\n def evaluate(self, data, test_metric, perturb_type, epsilon, from_cache=True, to_cache=True, save_to_file=False, bins_calibration=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], **params):\n \"\"\"evaluate metrics Args: data: tf.data.Dataset object of (x_data, y_labels), prepared with batch_size, shuffle etc. test_metric: metric from utilsevaluation.measures perturb_type: string, in case dataset should be perturbed epsilon: int, level of perturbation from_cache: if False the logits and labels are calculated via the Tensorflow graph, if True the logits are restored from cache to_cache: bool, if True the logits and labels are stored to cache save_to_file: bool, if True the logits and labels are saved to file bins_calibration: list of floats, limits of calibration bins\"\"\"\n <|body_2|>\n\n def save(self, folder_path=None):\n \"\"\"Method to save evaluation storage\"\"\"\n <|body_3|>\n\n def load(self, folder_path=None):\n \"\"\"Method to load evaluation storage\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.evaluator = Metrics_Evaluator()\n self.model = model\n self.perturb_generator = perturb_generator\n self.folder_path = os.path.join(folder_path, data_name, os.path.split(os.path.split(self.model.save_path)[0])[1])\n<|end_body_0|>\n\n<|body_start_1|>\n self.evaluator = Metrics_Evaluator()\n self.model.logits_test = {}\n self.model.labels_test = {}\n<|end_body_1|>\n\n<|body_start_2|>\n logits, labels = self.model.logits(data, self.perturb_generator, perturb_type, epsilon, from_cache=from_cache, to_cache=to_cache, save_to_file=save_to_file)\n test_values = test_metric(logits, labels, bins_calibration=bins_calibration)\n perturb_type = perturb_type.replace('/', '_')\n self.evaluator.add(test_metric.__name__, perturb_type, epsilon, test_values)\n<|end_body_2|>\n\n<|body_start_3|>\n if folder_path is None:\n folder_path = self.folder_path\n self.evaluator.save(folder_path)\n<|end_body_3|>\n\n<|body_start_4|>\n if folder_path is None:\n folder_path = self.folder_path\n self.evaluator.load(folder_path)\n<|end_body_4|>\n", "revision_id": "6e0ff0f4359b6b8e8b4c843d7e6eb097f6e1a39f", "skeleton": "<|skeleton|>\nclass Evaluator:\n \"\"\"evaluate model and calculate metrics\"\"\"\n\n def __init__(self, model, perturb_generator, data_name='Data', folder_path='results/Evaluation/'):\n \"\"\"Args: model: object of Model class perturb_generator: object of PerturbationGenerator data_name: string folder_path: string, determines path where Evaluator is stored\"\"\"\n <|body_0|>\n\n def reset(self):\n \"\"\"Delete old evaluator object (and its storage) and replaces it with a newly initialized one. Delete also logits in model. Careful: No save or backup is happening, so be sure nothing is lost.\"\"\"\n <|body_1|>\n\n def evaluate(self, data, test_metric, perturb_type, epsilon, from_cache=True, to_cache=True, save_to_file=False, bins_calibration=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], **params):\n \"\"\"evaluate metrics Args: data: tf.data.Dataset object of (x_data, y_labels), prepared with batch_size, shuffle etc. test_metric: metric from utilsevaluation.measures perturb_type: string, in case dataset should be perturbed epsilon: int, level of perturbation from_cache: if False the logits and labels are calculated via the Tensorflow graph, if True the logits are restored from cache to_cache: bool, if True the logits and labels are stored to cache save_to_file: bool, if True the logits and labels are saved to file bins_calibration: list of floats, limits of calibration bins\"\"\"\n <|body_2|>\n\n def save(self, folder_path=None):\n \"\"\"Method to save evaluation storage\"\"\"\n <|body_3|>\n\n def load(self, folder_path=None):\n \"\"\"Method to load evaluation storage\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Evaluator:\n \"\"\"evaluate model and calculate metrics\"\"\"\n\n def __init__(self, model, perturb_generator, data_name='Data', folder_path='results/Evaluation/'):\n \"\"\"Args: model: object of Model class perturb_generator: object of PerturbationGenerator data_name: string folder_path: string, determines path where Evaluator is stored\"\"\"\n self.evaluator = Metrics_Evaluator()\n self.model = model\n self.perturb_generator = perturb_generator\n self.folder_path = os.path.join(folder_path, data_name, os.path.split(os.path.split(self.model.save_path)[0])[1])\n\n def reset(self):\n \"\"\"Delete old evaluator object (and its storage) and replaces it with a newly initialized one. Delete also logits in model. Careful: No save or backup is happening, so be sure nothing is lost.\"\"\"\n self.evaluator = Metrics_Evaluator()\n self.model.logits_test = {}\n self.model.labels_test = {}\n\n def evaluate(self, data, test_metric, perturb_type, epsilon, from_cache=True, to_cache=True, save_to_file=False, bins_calibration=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], **params):\n \"\"\"evaluate metrics Args: data: tf.data.Dataset object of (x_data, y_labels), prepared with batch_size, shuffle etc. test_metric: metric from utilsevaluation.measures perturb_type: string, in case dataset should be perturbed epsilon: int, level of perturbation from_cache: if False the logits and labels are calculated via the Tensorflow graph, if True the logits are restored from cache to_cache: bool, if True the logits and labels are stored to cache save_to_file: bool, if True the logits and labels are saved to file bins_calibration: list of floats, limits of calibration bins\"\"\"\n logits, labels = self.model.logits(data, self.perturb_generator, perturb_type, epsilon, from_cache=from_cache, to_cache=to_cache, save_to_file=save_to_file)\n test_values = test_metric(logits, labels, bins_calibration=bins_calibration)\n perturb_type = perturb_type.replace('/', '_')\n self.evaluator.add(test_metric.__name__, perturb_type, epsilon, test_values)\n\n def save(self, folder_path=None):\n \"\"\"Method to save evaluation storage\"\"\"\n if folder_path is None:\n folder_path = self.folder_path\n self.evaluator.save(folder_path)\n\n def load(self, folder_path=None):\n \"\"\"Method to load evaluation storage\"\"\"\n if folder_path is None:\n folder_path = self.folder_path\n self.evaluator.load(folder_path)\n", "source": "the_stack_v2_python_sparse", "source_path": "source/Evaluator.py", "source_repo": "tochris/falcon", "split": "val", "star_events_count": 6}
{"blob_id": "63baf4ae34a81d2efa875f074c6dd316f07a7b3f", "bodies": ["n, result = (x ^ y, 0)\nwhile n:\n result += 1 if n & 1 else 0\n n = n >> 1\nreturn result", "n, result = (x ^ y, 0)\nwhile n:\n n = n & n - 1\n result += 1\nreturn result"], "bodies_text": "<|body_start_0|>\n n, result = (x ^ y, 0)\n while n:\n result += 1 if n & 1 else 0\n n = n >> 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n n, result = (x ^ y, 0)\n while n:\n n = n & n - 1\n result += 1\n return result\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def hammingDistance(self, x, y):\n \"\"\":type x: int :type y: int :rtype: int 移位实现计数 我们可以不断地检查 s 的最低位,如果最低位为 1,那么令计数器加一 然后我们令 s 整体右移一位这样 s 的最低位将被舍去,原本的次低位就变成了新的最低位。 我们重复这个过程直到 s=0 为止。这样计数器中就累计了 s 的二进制表示中 1 的数量。 时间击败95.89%,内存击败43.84%\"\"\"\n <|body_0|>\n\n def hammingDistance1(self, x, y):\n \"\"\":type x: int :type y: int :rtype: int Brian Kernighan 算法 在方法二中,对于 s=(10001100)2的情况,我们需要循环右移 8 次才能得到答案。 而实际上如果我们可以跳过两个 1 之间的 0,直接对 1 进行计数,那么就只需要循环 3 次即可。 我们可以使用 Brian Kernighan 算法进行优化, 具体地,该算法可以被描述为这样一个结论:记 f(x) 表示 x 和 x−1 进行与运算所得的结果(即 f(x)=x & (x−1)) 那么 f(x) 恰为 x 删去其二进制表示中最右侧的 1 的结果。 基于该算法,当我们计算出 s=x⊕y,只需要不断让 s=f(s),直到 s=0 即可。 这样每循环一次,s 都会删去其二进制表示中最右侧的 1,最终循环的次数即为 s 的二进制表示中 1 的数量。 时间击败87.51%,内存击败8.22%\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n, result = (x ^ y, 0)\n while n:\n result += 1 if n & 1 else 0\n n = n >> 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n n, result = (x ^ y, 0)\n while n:\n n = n & n - 1\n result += 1\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000109", "length_bytes": 2400, "license_type": "no_license", "methods": [{"docstring": ":type x: int :type y: int :rtype: int 移位实现计数 我们可以不断地检查 s 的最低位,如果最低位为 1,那么令计数器加一 然后我们令 s 整体右移一位这样 s 的最低位将被舍去,原本的次低位就变成了新的最低位。 我们重复这个过程直到 s=0 为止。这样计数器中就累计了 s 的二进制表示中 1 的数量。 时间击败95.89%,内存击败43.84%", "name": "hammingDistance", "signature": "def hammingDistance(self, x, y)"}, {"docstring": ":type x: int :type y: int :rtype: int Brian Kernighan 算法 在方法二中,对于 s=(10001100)2的情况,我们需要循环右移 8 次才能得到答案。 而实际上如果我们可以跳过两个 1 之间的 0,直接对 1 进行计数,那么就只需要循环 3 次即可。 我们可以使用 Brian Kernighan 算法进行优化, 具体地,该算法可以被描述为这样一个结论:记 f(x) 表示 x 和 x−1 进行与运算所得的结果(即 f(x)=x & (x−1)) 那么 f(x) 恰为 x 删去其二进制表示中最右侧的 1 的结果。 基于该算法,当我们计算出 s=x⊕y,只需要不断让 s=f(s),直到 s=0 即可。 这样每循环一次,s 都会删去其二进制表示中最右侧的 1,最终循环的次数即为 s 的二进制表示中 1 的数量。 时间击败87.51%,内存击败8.22%", "name": "hammingDistance1", "signature": "def hammingDistance1(self, x, y)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001792", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hammingDistance(self, x, y): :type x: int :type y: int :rtype: int 移位实现计数 我们可以不断地检查 s 的最低位,如果最低位为 1,那么令计数器加一 然后我们令 s 整体右移一位这样 s 的最低位将被舍去,原本的次低位就变成了新的最低位。 我们重复这个过程直到 s=0 为止。这样计数器中就累计了 s 的二进制表示中 1 的数量。 时间击败95.89%,内存击败43.84%\n- def hammingDistance1(self, x, y): :type x: int :type y: int :rtype: int Brian Kernighan 算法 在方法二中,对于 s=(10001100)2的情况,我们需要循环右移 8 次才能得到答案。 而实际上如果我们可以跳过两个 1 之间的 0,直接对 1 进行计数,那么就只需要循环 3 次即可。 我们可以使用 Brian Kernighan 算法进行优化, 具体地,该算法可以被描述为这样一个结论:记 f(x) 表示 x 和 x−1 进行与运算所得的结果(即 f(x)=x & (x−1)) 那么 f(x) 恰为 x 删去其二进制表示中最右侧的 1 的结果。 基于该算法,当我们计算出 s=x⊕y,只需要不断让 s=f(s),直到 s=0 即可。 这样每循环一次,s 都会删去其二进制表示中最右侧的 1,最终循环的次数即为 s 的二进制表示中 1 的数量。 时间击败87.51%,内存击败8.22%", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hammingDistance(self, x, y): :type x: int :type y: int :rtype: int 移位实现计数 我们可以不断地检查 s 的最低位,如果最低位为 1,那么令计数器加一 然后我们令 s 整体右移一位这样 s 的最低位将被舍去,原本的次低位就变成了新的最低位。 我们重复这个过程直到 s=0 为止。这样计数器中就累计了 s 的二进制表示中 1 的数量。 时间击败95.89%,内存击败43.84%\n- def hammingDistance1(self, x, y): :type x: int :type y: int :rtype: int Brian Kernighan 算法 在方法二中,对于 s=(10001100)2的情况,我们需要循环右移 8 次才能得到答案。 而实际上如果我们可以跳过两个 1 之间的 0,直接对 1 进行计数,那么就只需要循环 3 次即可。 我们可以使用 Brian Kernighan 算法进行优化, 具体地,该算法可以被描述为这样一个结论:记 f(x) 表示 x 和 x−1 进行与运算所得的结果(即 f(x)=x & (x−1)) 那么 f(x) 恰为 x 删去其二进制表示中最右侧的 1 的结果。 基于该算法,当我们计算出 s=x⊕y,只需要不断让 s=f(s),直到 s=0 即可。 这样每循环一次,s 都会删去其二进制表示中最右侧的 1,最终循环的次数即为 s 的二进制表示中 1 的数量。 时间击败87.51%,内存击败8.22%\n\n<|skeleton|>\nclass Solution:\n\n def hammingDistance(self, x, y):\n \"\"\":type x: int :type y: int :rtype: int 移位实现计数 我们可以不断地检查 s 的最低位,如果最低位为 1,那么令计数器加一 然后我们令 s 整体右移一位这样 s 的最低位将被舍去,原本的次低位就变成了新的最低位。 我们重复这个过程直到 s=0 为止。这样计数器中就累计了 s 的二进制表示中 1 的数量。 时间击败95.89%,内存击败43.84%\"\"\"\n <|body_0|>\n\n def hammingDistance1(self, x, y):\n \"\"\":type x: int :type y: int :rtype: int Brian Kernighan 算法 在方法二中,对于 s=(10001100)2的情况,我们需要循环右移 8 次才能得到答案。 而实际上如果我们可以跳过两个 1 之间的 0,直接对 1 进行计数,那么就只需要循环 3 次即可。 我们可以使用 Brian Kernighan 算法进行优化, 具体地,该算法可以被描述为这样一个结论:记 f(x) 表示 x 和 x−1 进行与运算所得的结果(即 f(x)=x & (x−1)) 那么 f(x) 恰为 x 删去其二进制表示中最右侧的 1 的结果。 基于该算法,当我们计算出 s=x⊕y,只需要不断让 s=f(s),直到 s=0 即可。 这样每循环一次,s 都会删去其二进制表示中最右侧的 1,最终循环的次数即为 s 的二进制表示中 1 的数量。 时间击败87.51%,内存击败8.22%\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n, result = (x ^ y, 0)\n while n:\n result += 1 if n & 1 else 0\n n = n >> 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n n, result = (x ^ y, 0)\n while n:\n n = n & n - 1\n result += 1\n return result\n<|end_body_1|>\n", "revision_id": "2dc982e690b153c33bc7e27a63604f754a0df90c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def hammingDistance(self, x, y):\n \"\"\":type x: int :type y: int :rtype: int 移位实现计数 我们可以不断地检查 s 的最低位,如果最低位为 1,那么令计数器加一 然后我们令 s 整体右移一位这样 s 的最低位将被舍去,原本的次低位就变成了新的最低位。 我们重复这个过程直到 s=0 为止。这样计数器中就累计了 s 的二进制表示中 1 的数量。 时间击败95.89%,内存击败43.84%\"\"\"\n <|body_0|>\n\n def hammingDistance1(self, x, y):\n \"\"\":type x: int :type y: int :rtype: int Brian Kernighan 算法 在方法二中,对于 s=(10001100)2的情况,我们需要循环右移 8 次才能得到答案。 而实际上如果我们可以跳过两个 1 之间的 0,直接对 1 进行计数,那么就只需要循环 3 次即可。 我们可以使用 Brian Kernighan 算法进行优化, 具体地,该算法可以被描述为这样一个结论:记 f(x) 表示 x 和 x−1 进行与运算所得的结果(即 f(x)=x & (x−1)) 那么 f(x) 恰为 x 删去其二进制表示中最右侧的 1 的结果。 基于该算法,当我们计算出 s=x⊕y,只需要不断让 s=f(s),直到 s=0 即可。 这样每循环一次,s 都会删去其二进制表示中最右侧的 1,最终循环的次数即为 s 的二进制表示中 1 的数量。 时间击败87.51%,内存击败8.22%\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def hammingDistance(self, x, y):\n \"\"\":type x: int :type y: int :rtype: int 移位实现计数 我们可以不断地检查 s 的最低位,如果最低位为 1,那么令计数器加一 然后我们令 s 整体右移一位这样 s 的最低位将被舍去,原本的次低位就变成了新的最低位。 我们重复这个过程直到 s=0 为止。这样计数器中就累计了 s 的二进制表示中 1 的数量。 时间击败95.89%,内存击败43.84%\"\"\"\n n, result = (x ^ y, 0)\n while n:\n result += 1 if n & 1 else 0\n n = n >> 1\n return result\n\n def hammingDistance1(self, x, y):\n \"\"\":type x: int :type y: int :rtype: int Brian Kernighan 算法 在方法二中,对于 s=(10001100)2的情况,我们需要循环右移 8 次才能得到答案。 而实际上如果我们可以跳过两个 1 之间的 0,直接对 1 进行计数,那么就只需要循环 3 次即可。 我们可以使用 Brian Kernighan 算法进行优化, 具体地,该算法可以被描述为这样一个结论:记 f(x) 表示 x 和 x−1 进行与运算所得的结果(即 f(x)=x & (x−1)) 那么 f(x) 恰为 x 删去其二进制表示中最右侧的 1 的结果。 基于该算法,当我们计算出 s=x⊕y,只需要不断让 s=f(s),直到 s=0 即可。 这样每循环一次,s 都会删去其二进制表示中最右侧的 1,最终循环的次数即为 s 的二进制表示中 1 的数量。 时间击败87.51%,内存击败8.22%\"\"\"\n n, result = (x ^ y, 0)\n while n:\n n = n & n - 1\n result += 1\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "461_hamming-distance.py", "source_repo": "95275059/Algorithm", "split": "val", "star_events_count": 0}
{"blob_id": "5b4fc43de983474802fc405ba99a40a4a47776a2", "bodies": ["ordered_data = np.sort(z)\nextension = (ordered_data[1] - ordered_data[0]) / 2\nordered_data = np.insert(ordered_data, 0, ordered_data[0] - extension)\nordered_data = np.append(ordered_data, ordered_data[-1] + extension)\nn = ordered_data.shape[0]\nF = np.arange(n + 1) / n\nordered, i = np.unique(ordered_data, return_index=True)\nF = F[i]\nself.z = ordered\nself.cdf = F\nself.y = norm.ppf(F)", "u = np.interp(z, self.z, self.cdf)\ny = norm.ppf(u)\nreturn y", "u = norm.cdf(y)\nz = np.interp(u, self.cdf, self.z)\nreturn z"], "bodies_text": "<|body_start_0|>\n ordered_data = np.sort(z)\n extension = (ordered_data[1] - ordered_data[0]) / 2\n ordered_data = np.insert(ordered_data, 0, ordered_data[0] - extension)\n ordered_data = np.append(ordered_data, ordered_data[-1] + extension)\n n = ordered_data.shape[0]\n F = np.arange(n + 1) / n\n ordered, i = np.unique(ordered_data, return_index=True)\n F = F[i]\n self.z = ordered\n self.cdf = F\n self.y = norm.ppf(F)\n<|end_body_0|>\n\n<|body_start_1|>\n u = np.interp(z, self.z, self.cdf)\n y = norm.ppf(u)\n return y\n<|end_body_1|>\n\n<|body_start_2|>\n u = norm.cdf(y)\n z = np.interp(u, self.cdf, self.z)\n return z\n<|end_body_2|>\n", "class_docstring": "Class for defining empirical normal score transform based on data. Attributes: z: ordered and processed data cdf: cumulative distribution function y: inverse cdf", "class_name": "transform", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass transform:\n \"\"\"Class for defining empirical normal score transform based on data. Attributes: z: ordered and processed data cdf: cumulative distribution function y: inverse cdf\"\"\"\n\n def __init__(self, z):\n \"\"\"Constructs empirical normal score transform for a given data. Arguments: z: array containing measurements\"\"\"\n <|body_0|>\n\n def direct(self, z):\n \"\"\"Returns normal score transformed data\"\"\"\n <|body_1|>\n\n def back(self, y):\n \"\"\"Inverse normal score transform\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ordered_data = np.sort(z)\n extension = (ordered_data[1] - ordered_data[0]) / 2\n ordered_data = np.insert(ordered_data, 0, ordered_data[0] - extension)\n ordered_data = np.append(ordered_data, ordered_data[-1] + extension)\n n = ordered_data.shape[0]\n F = np.arange(n + 1) / n\n ordered, i = np.unique(ordered_data, return_index=True)\n F = F[i]\n self.z = ordered\n self.cdf = F\n self.y = norm.ppf(F)\n<|end_body_0|>\n\n<|body_start_1|>\n u = np.interp(z, self.z, self.cdf)\n y = norm.ppf(u)\n return y\n<|end_body_1|>\n\n<|body_start_2|>\n u = norm.cdf(y)\n z = np.interp(u, self.cdf, self.z)\n return z\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000110", "length_bytes": 1283, "license_type": "permissive", "methods": [{"docstring": "Constructs empirical normal score transform for a given data. Arguments: z: array containing measurements", "name": "__init__", "signature": "def __init__(self, z)"}, {"docstring": "Returns normal score transformed data", "name": "direct", "signature": "def direct(self, z)"}, {"docstring": "Inverse normal score transform", "name": "back", "signature": "def back(self, y)"}], "n_methods": 3, "prompt": "Implement the Python class `transform` described below.\n\nClass description:\nClass for defining empirical normal score transform based on data. Attributes: z: ordered and processed data cdf: cumulative distribution function y: inverse cdf\n\nMethod signatures and docstrings:\n- def __init__(self, z): Constructs empirical normal score transform for a given data. Arguments: z: array containing measurements\n- def direct(self, z): Returns normal score transformed data\n- def back(self, y): Inverse normal score transform", "prompted_full_text": "Implement the Python class `transform` described below.\n\nClass description:\nClass for defining empirical normal score transform based on data. Attributes: z: ordered and processed data cdf: cumulative distribution function y: inverse cdf\n\nMethod signatures and docstrings:\n- def __init__(self, z): Constructs empirical normal score transform for a given data. Arguments: z: array containing measurements\n- def direct(self, z): Returns normal score transformed data\n- def back(self, y): Inverse normal score transform\n\n<|skeleton|>\nclass transform:\n \"\"\"Class for defining empirical normal score transform based on data. Attributes: z: ordered and processed data cdf: cumulative distribution function y: inverse cdf\"\"\"\n\n def __init__(self, z):\n \"\"\"Constructs empirical normal score transform for a given data. Arguments: z: array containing measurements\"\"\"\n <|body_0|>\n\n def direct(self, z):\n \"\"\"Returns normal score transformed data\"\"\"\n <|body_1|>\n\n def back(self, y):\n \"\"\"Inverse normal score transform\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ordered_data = np.sort(z)\n extension = (ordered_data[1] - ordered_data[0]) / 2\n ordered_data = np.insert(ordered_data, 0, ordered_data[0] - extension)\n ordered_data = np.append(ordered_data, ordered_data[-1] + extension)\n n = ordered_data.shape[0]\n F = np.arange(n + 1) / n\n ordered, i = np.unique(ordered_data, return_index=True)\n F = F[i]\n self.z = ordered\n self.cdf = F\n self.y = norm.ppf(F)\n<|end_body_0|>\n\n<|body_start_1|>\n u = np.interp(z, self.z, self.cdf)\n y = norm.ppf(u)\n return y\n<|end_body_1|>\n\n<|body_start_2|>\n u = norm.cdf(y)\n z = np.interp(u, self.cdf, self.z)\n return z\n<|end_body_2|>\n", "revision_id": "1fdc84fdf256281938925ead0f2dfe8fbf2a1a37", "skeleton": "<|skeleton|>\nclass transform:\n \"\"\"Class for defining empirical normal score transform based on data. Attributes: z: ordered and processed data cdf: cumulative distribution function y: inverse cdf\"\"\"\n\n def __init__(self, z):\n \"\"\"Constructs empirical normal score transform for a given data. Arguments: z: array containing measurements\"\"\"\n <|body_0|>\n\n def direct(self, z):\n \"\"\"Returns normal score transformed data\"\"\"\n <|body_1|>\n\n def back(self, y):\n \"\"\"Inverse normal score transform\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class transform:\n \"\"\"Class for defining empirical normal score transform based on data. Attributes: z: ordered and processed data cdf: cumulative distribution function y: inverse cdf\"\"\"\n\n def __init__(self, z):\n \"\"\"Constructs empirical normal score transform for a given data. Arguments: z: array containing measurements\"\"\"\n ordered_data = np.sort(z)\n extension = (ordered_data[1] - ordered_data[0]) / 2\n ordered_data = np.insert(ordered_data, 0, ordered_data[0] - extension)\n ordered_data = np.append(ordered_data, ordered_data[-1] + extension)\n n = ordered_data.shape[0]\n F = np.arange(n + 1) / n\n ordered, i = np.unique(ordered_data, return_index=True)\n F = F[i]\n self.z = ordered\n self.cdf = F\n self.y = norm.ppf(F)\n\n def direct(self, z):\n \"\"\"Returns normal score transformed data\"\"\"\n u = np.interp(z, self.z, self.cdf)\n y = norm.ppf(u)\n return y\n\n def back(self, y):\n \"\"\"Inverse normal score transform\"\"\"\n u = norm.cdf(y)\n z = np.interp(u, self.cdf, self.z)\n return z\n", "source": "the_stack_v2_python_sparse", "source_path": "day2/normal_score_transform.py", "source_repo": "pjuda/inverse-modelling-workshop", "split": "val", "star_events_count": 0}
{"blob_id": "acbe14189d65bca3d54d8bc9059c2a98b55f308d", "bodies": ["if not (self.project and self.analysis):\n return\ndest_path = join(settings.STATIC_ROOT, settings.STATIC_METRIC_LOCATION, self.project.name, self.analysis.name)\ndest_link = join(dest_path, self.name)\nsource = join(self.project.path, settings.PROCESSED_DIR_NAME, self.analysis.name, settings.RESULTS_DIR_NAME, self.name)\nif exists(dest_link):\n unlink(dest_link)\nif not exists(dest_path):\n makedirs(dest_path)\nsymlink(source, dest_link)\nself.static_path = dest_link\nself.save()", "if not self.static_path:\n return\nanalysis_path, name = split(self.static_path)\nproject_path = dirname(analysis_path)\nif exists(self.static_path):\n unlink(self.static_path)\nif not listdir(analysis_path):\n rmdir(analysis_path)\nif not listdir(project_path):\n rmdir(project_path)"], "bodies_text": "<|body_start_0|>\n if not (self.project and self.analysis):\n return\n dest_path = join(settings.STATIC_ROOT, settings.STATIC_METRIC_LOCATION, self.project.name, self.analysis.name)\n dest_link = join(dest_path, self.name)\n source = join(self.project.path, settings.PROCESSED_DIR_NAME, self.analysis.name, settings.RESULTS_DIR_NAME, self.name)\n if exists(dest_link):\n unlink(dest_link)\n if not exists(dest_path):\n makedirs(dest_path)\n symlink(source, dest_link)\n self.static_path = dest_link\n self.save()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.static_path:\n return\n analysis_path, name = split(self.static_path)\n project_path = dirname(analysis_path)\n if exists(self.static_path):\n unlink(self.static_path)\n if not listdir(analysis_path):\n rmdir(analysis_path)\n if not listdir(project_path):\n rmdir(project_path)\n<|end_body_1|>\n", "class_docstring": "A Metric file. Because a metric can be viewed from the web, we can place it in the /static/ directory. However, we don't want to copy the file (too big) so we instead provide a symlink to its original path. The static_path argument is a path to the place in the /static/ directory where the symlink sits.", "class_name": "Metric", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Metric:\n \"\"\"A Metric file. Because a metric can be viewed from the web, we can place it in the /static/ directory. However, we don't want to copy the file (too big) so we instead provide a symlink to its original path. The static_path argument is a path to the place in the /static/ directory where the symlink sits.\"\"\"\n\n def create_static_path(self):\n \"\"\"Creates a symbolic link to the metric from the static directory, where it can be served to the user. If the link already exists, this function deletes the previous link and replaces it with a new one.\"\"\"\n <|body_0|>\n\n def remove_static_path(self):\n \"\"\"Deletes a the symbolic link associated with this Metric.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not (self.project and self.analysis):\n return\n dest_path = join(settings.STATIC_ROOT, settings.STATIC_METRIC_LOCATION, self.project.name, self.analysis.name)\n dest_link = join(dest_path, self.name)\n source = join(self.project.path, settings.PROCESSED_DIR_NAME, self.analysis.name, settings.RESULTS_DIR_NAME, self.name)\n if exists(dest_link):\n unlink(dest_link)\n if not exists(dest_path):\n makedirs(dest_path)\n symlink(source, dest_link)\n self.static_path = dest_link\n self.save()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.static_path:\n return\n analysis_path, name = split(self.static_path)\n project_path = dirname(analysis_path)\n if exists(self.static_path):\n unlink(self.static_path)\n if not listdir(analysis_path):\n rmdir(analysis_path)\n if not listdir(project_path):\n rmdir(project_path)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000111", "length_bytes": 8263, "license_type": "no_license", "methods": [{"docstring": "Creates a symbolic link to the metric from the static directory, where it can be served to the user. If the link already exists, this function deletes the previous link and replaces it with a new one.", "name": "create_static_path", "signature": "def create_static_path(self)"}, {"docstring": "Deletes a the symbolic link associated with this Metric.", "name": "remove_static_path", "signature": "def remove_static_path(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040730", "prompt": "Implement the Python class `Metric` described below.\n\nClass description:\nA Metric file. Because a metric can be viewed from the web, we can place it in the /static/ directory. However, we don't want to copy the file (too big) so we instead provide a symlink to its original path. The static_path argument is a path to the place in the /static/ directory where the symlink sits.\n\nMethod signatures and docstrings:\n- def create_static_path(self): Creates a symbolic link to the metric from the static directory, where it can be served to the user. If the link already exists, this function deletes the previous link and replaces it with a new one.\n- def remove_static_path(self): Deletes a the symbolic link associated with this Metric.", "prompted_full_text": "Implement the Python class `Metric` described below.\n\nClass description:\nA Metric file. Because a metric can be viewed from the web, we can place it in the /static/ directory. However, we don't want to copy the file (too big) so we instead provide a symlink to its original path. The static_path argument is a path to the place in the /static/ directory where the symlink sits.\n\nMethod signatures and docstrings:\n- def create_static_path(self): Creates a symbolic link to the metric from the static directory, where it can be served to the user. If the link already exists, this function deletes the previous link and replaces it with a new one.\n- def remove_static_path(self): Deletes a the symbolic link associated with this Metric.\n\n<|skeleton|>\nclass Metric:\n \"\"\"A Metric file. Because a metric can be viewed from the web, we can place it in the /static/ directory. However, we don't want to copy the file (too big) so we instead provide a symlink to its original path. The static_path argument is a path to the place in the /static/ directory where the symlink sits.\"\"\"\n\n def create_static_path(self):\n \"\"\"Creates a symbolic link to the metric from the static directory, where it can be served to the user. If the link already exists, this function deletes the previous link and replaces it with a new one.\"\"\"\n <|body_0|>\n\n def remove_static_path(self):\n \"\"\"Deletes a the symbolic link associated with this Metric.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not (self.project and self.analysis):\n return\n dest_path = join(settings.STATIC_ROOT, settings.STATIC_METRIC_LOCATION, self.project.name, self.analysis.name)\n dest_link = join(dest_path, self.name)\n source = join(self.project.path, settings.PROCESSED_DIR_NAME, self.analysis.name, settings.RESULTS_DIR_NAME, self.name)\n if exists(dest_link):\n unlink(dest_link)\n if not exists(dest_path):\n makedirs(dest_path)\n symlink(source, dest_link)\n self.static_path = dest_link\n self.save()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.static_path:\n return\n analysis_path, name = split(self.static_path)\n project_path = dirname(analysis_path)\n if exists(self.static_path):\n unlink(self.static_path)\n if not listdir(analysis_path):\n rmdir(analysis_path)\n if not listdir(project_path):\n rmdir(project_path)\n<|end_body_1|>\n", "revision_id": "c863c79c0cbc784834534a8ce894f9ff6b5ed4c1", "skeleton": "<|skeleton|>\nclass Metric:\n \"\"\"A Metric file. Because a metric can be viewed from the web, we can place it in the /static/ directory. However, we don't want to copy the file (too big) so we instead provide a symlink to its original path. The static_path argument is a path to the place in the /static/ directory where the symlink sits.\"\"\"\n\n def create_static_path(self):\n \"\"\"Creates a symbolic link to the metric from the static directory, where it can be served to the user. If the link already exists, this function deletes the previous link and replaces it with a new one.\"\"\"\n <|body_0|>\n\n def remove_static_path(self):\n \"\"\"Deletes a the symbolic link associated with this Metric.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Metric:\n \"\"\"A Metric file. Because a metric can be viewed from the web, we can place it in the /static/ directory. However, we don't want to copy the file (too big) so we instead provide a symlink to its original path. The static_path argument is a path to the place in the /static/ directory where the symlink sits.\"\"\"\n\n def create_static_path(self):\n \"\"\"Creates a symbolic link to the metric from the static directory, where it can be served to the user. If the link already exists, this function deletes the previous link and replaces it with a new one.\"\"\"\n if not (self.project and self.analysis):\n return\n dest_path = join(settings.STATIC_ROOT, settings.STATIC_METRIC_LOCATION, self.project.name, self.analysis.name)\n dest_link = join(dest_path, self.name)\n source = join(self.project.path, settings.PROCESSED_DIR_NAME, self.analysis.name, settings.RESULTS_DIR_NAME, self.name)\n if exists(dest_link):\n unlink(dest_link)\n if not exists(dest_path):\n makedirs(dest_path)\n symlink(source, dest_link)\n self.static_path = dest_link\n self.save()\n\n def remove_static_path(self):\n \"\"\"Deletes a the symbolic link associated with this Metric.\"\"\"\n if not self.static_path:\n return\n analysis_path, name = split(self.static_path)\n project_path = dirname(analysis_path)\n if exists(self.static_path):\n unlink(self.static_path)\n if not listdir(analysis_path):\n rmdir(analysis_path)\n if not listdir(project_path):\n rmdir(project_path)\n", "source": "the_stack_v2_python_sparse", "source_path": "pbg/apps/projects/models.py", "source_repo": "mdschramm/dashboardngs", "split": "val", "star_events_count": 0}
{"blob_id": "2125a96c457e30f7a521b353492202f6b2507c2b", "bodies": ["if not no_staleness:\n self.addStalenessPeriods()\nimporter = ImportCmd()\nfor filename in glob.glob(os.path.join(EGG_DIR, '*.json')):\n importer.run(filename, listName=self.getListName(filename))", "previousPeriod = None\nfor days in [180, 90, 30, 14, 7]:\n if previousPeriod is not None:\n period = StalenessPeriod(days=days, next=previousPeriod)\n else:\n period = StalenessPeriod(days=days)\n previousPeriod = period\n server.db.session.add(period)\nperiod = StalenessPeriod(days=3, next=previousPeriod, first=True)\nserver.db.session.add(period)\nserver.db.session.commit()", "fullPath = os.path.abspath(filename)\nbasename = os.path.basename(fullPath)\nfilenameWithoutExtension = os.path.splitext(basename)[0]\npieces = filenameWithoutExtension.split('_')\ncapitalizedPieces = [piece.capitalize() for piece in pieces]\nreturn ' '.join(capitalizedPieces)"], "bodies_text": "<|body_start_0|>\n if not no_staleness:\n self.addStalenessPeriods()\n importer = ImportCmd()\n for filename in glob.glob(os.path.join(EGG_DIR, '*.json')):\n importer.run(filename, listName=self.getListName(filename))\n<|end_body_0|>\n\n<|body_start_1|>\n previousPeriod = None\n for days in [180, 90, 30, 14, 7]:\n if previousPeriod is not None:\n period = StalenessPeriod(days=days, next=previousPeriod)\n else:\n period = StalenessPeriod(days=days)\n previousPeriod = period\n server.db.session.add(period)\n period = StalenessPeriod(days=3, next=previousPeriod, first=True)\n server.db.session.add(period)\n server.db.session.commit()\n<|end_body_1|>\n\n<|body_start_2|>\n fullPath = os.path.abspath(filename)\n basename = os.path.basename(fullPath)\n filenameWithoutExtension = os.path.splitext(basename)[0]\n pieces = filenameWithoutExtension.split('_')\n capitalizedPieces = [piece.capitalize() for piece in pieces]\n return ' '.join(capitalizedPieces)\n<|end_body_2|>\n", "class_docstring": "Command to seed the database with all available resource files", "class_name": "Seed", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Seed:\n \"\"\"Command to seed the database with all available resource files\"\"\"\n\n def run(self, *, no_staleness=False):\n \"\"\"Create the new log entry\"\"\"\n <|body_0|>\n\n def addStalenessPeriods(self):\n \"\"\"Add the staleness periods\"\"\"\n <|body_1|>\n\n def getListName(self, filename):\n \"\"\"Return the list name for the given file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not no_staleness:\n self.addStalenessPeriods()\n importer = ImportCmd()\n for filename in glob.glob(os.path.join(EGG_DIR, '*.json')):\n importer.run(filename, listName=self.getListName(filename))\n<|end_body_0|>\n\n<|body_start_1|>\n previousPeriod = None\n for days in [180, 90, 30, 14, 7]:\n if previousPeriod is not None:\n period = StalenessPeriod(days=days, next=previousPeriod)\n else:\n period = StalenessPeriod(days=days)\n previousPeriod = period\n server.db.session.add(period)\n period = StalenessPeriod(days=3, next=previousPeriod, first=True)\n server.db.session.add(period)\n server.db.session.commit()\n<|end_body_1|>\n\n<|body_start_2|>\n fullPath = os.path.abspath(filename)\n basename = os.path.basename(fullPath)\n filenameWithoutExtension = os.path.splitext(basename)[0]\n pieces = filenameWithoutExtension.split('_')\n capitalizedPieces = [piece.capitalize() for piece in pieces]\n return ' '.join(capitalizedPieces)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000112", "length_bytes": 1798, "license_type": "no_license", "methods": [{"docstring": "Create the new log entry", "name": "run", "signature": "def run(self, *, no_staleness=False)"}, {"docstring": "Add the staleness periods", "name": "addStalenessPeriods", "signature": "def addStalenessPeriods(self)"}, {"docstring": "Return the list name for the given file", "name": "getListName", "signature": "def getListName(self, filename)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_020758", "prompt": "Implement the Python class `Seed` described below.\n\nClass description:\nCommand to seed the database with all available resource files\n\nMethod signatures and docstrings:\n- def run(self, *, no_staleness=False): Create the new log entry\n- def addStalenessPeriods(self): Add the staleness periods\n- def getListName(self, filename): Return the list name for the given file", "prompted_full_text": "Implement the Python class `Seed` described below.\n\nClass description:\nCommand to seed the database with all available resource files\n\nMethod signatures and docstrings:\n- def run(self, *, no_staleness=False): Create the new log entry\n- def addStalenessPeriods(self): Add the staleness periods\n- def getListName(self, filename): Return the list name for the given file\n\n<|skeleton|>\nclass Seed:\n \"\"\"Command to seed the database with all available resource files\"\"\"\n\n def run(self, *, no_staleness=False):\n \"\"\"Create the new log entry\"\"\"\n <|body_0|>\n\n def addStalenessPeriods(self):\n \"\"\"Add the staleness periods\"\"\"\n <|body_1|>\n\n def getListName(self, filename):\n \"\"\"Return the list name for the given file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not no_staleness:\n self.addStalenessPeriods()\n importer = ImportCmd()\n for filename in glob.glob(os.path.join(EGG_DIR, '*.json')):\n importer.run(filename, listName=self.getListName(filename))\n<|end_body_0|>\n\n<|body_start_1|>\n previousPeriod = None\n for days in [180, 90, 30, 14, 7]:\n if previousPeriod is not None:\n period = StalenessPeriod(days=days, next=previousPeriod)\n else:\n period = StalenessPeriod(days=days)\n previousPeriod = period\n server.db.session.add(period)\n period = StalenessPeriod(days=3, next=previousPeriod, first=True)\n server.db.session.add(period)\n server.db.session.commit()\n<|end_body_1|>\n\n<|body_start_2|>\n fullPath = os.path.abspath(filename)\n basename = os.path.basename(fullPath)\n filenameWithoutExtension = os.path.splitext(basename)[0]\n pieces = filenameWithoutExtension.split('_')\n capitalizedPieces = [piece.capitalize() for piece in pieces]\n return ' '.join(capitalizedPieces)\n<|end_body_2|>\n", "revision_id": "f08dc4465b7e4fb32235e1647c46edd4472f9093", "skeleton": "<|skeleton|>\nclass Seed:\n \"\"\"Command to seed the database with all available resource files\"\"\"\n\n def run(self, *, no_staleness=False):\n \"\"\"Create the new log entry\"\"\"\n <|body_0|>\n\n def addStalenessPeriods(self):\n \"\"\"Add the staleness periods\"\"\"\n <|body_1|>\n\n def getListName(self, filename):\n \"\"\"Return the list name for the given file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Seed:\n \"\"\"Command to seed the database with all available resource files\"\"\"\n\n def run(self, *, no_staleness=False):\n \"\"\"Create the new log entry\"\"\"\n if not no_staleness:\n self.addStalenessPeriods()\n importer = ImportCmd()\n for filename in glob.glob(os.path.join(EGG_DIR, '*.json')):\n importer.run(filename, listName=self.getListName(filename))\n\n def addStalenessPeriods(self):\n \"\"\"Add the staleness periods\"\"\"\n previousPeriod = None\n for days in [180, 90, 30, 14, 7]:\n if previousPeriod is not None:\n period = StalenessPeriod(days=days, next=previousPeriod)\n else:\n period = StalenessPeriod(days=days)\n previousPeriod = period\n server.db.session.add(period)\n period = StalenessPeriod(days=3, next=previousPeriod, first=True)\n server.db.session.add(period)\n server.db.session.commit()\n\n def getListName(self, filename):\n \"\"\"Return the list name for the given file\"\"\"\n fullPath = os.path.abspath(filename)\n basename = os.path.basename(fullPath)\n filenameWithoutExtension = os.path.splitext(basename)[0]\n pieces = filenameWithoutExtension.split('_')\n capitalizedPieces = [piece.capitalize() for piece in pieces]\n return ' '.join(capitalizedPieces)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/Driver/Commands/seed.py", "source_repo": "cloew/VocabTester", "split": "val", "star_events_count": 0}
{"blob_id": "5125724af01874cdd1bb09bc025a2db7a43bd10a", "bodies": ["if request.user.is_authenticated:\n return redirect('research:index')\nus_form = UserForm()\ncontext = {'us_form': us_form}\nreturn render(request, 'user/register.html', context)", "us_form = UserForm(request.POST)\nif us_form.is_valid():\n name, password = (us_form.cleaned_data['username'], us_form.cleaned_data['password'])\n success, message = add_new_user(name, password)\n messages.info(request, message)\n if success:\n return redirect('user:connection')\n return redirect('user:register')\nreturn HttpResponse('Problème dans le formulaire !')"], "bodies_text": "<|body_start_0|>\n if request.user.is_authenticated:\n return redirect('research:index')\n us_form = UserForm()\n context = {'us_form': us_form}\n return render(request, 'user/register.html', context)\n<|end_body_0|>\n\n<|body_start_1|>\n us_form = UserForm(request.POST)\n if us_form.is_valid():\n name, password = (us_form.cleaned_data['username'], us_form.cleaned_data['password'])\n success, message = add_new_user(name, password)\n messages.info(request, message)\n if success:\n return redirect('user:connection')\n return redirect('user:register')\n return HttpResponse('Problème dans le formulaire !')\n<|end_body_1|>\n", "class_docstring": "This class deals with registration get > loads a registration page post > analyses datas to try to create a new user and profile", "class_name": "RegisterView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RegisterView:\n \"\"\"This class deals with registration get > loads a registration page post > analyses datas to try to create a new user and profile\"\"\"\n\n def get(self, request):\n \"\"\"manages the get request for the register page\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"manages the post request for the register page : get datas in order to try to create a new user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if request.user.is_authenticated:\n return redirect('research:index')\n us_form = UserForm()\n context = {'us_form': us_form}\n return render(request, 'user/register.html', context)\n<|end_body_0|>\n\n<|body_start_1|>\n us_form = UserForm(request.POST)\n if us_form.is_valid():\n name, password = (us_form.cleaned_data['username'], us_form.cleaned_data['password'])\n success, message = add_new_user(name, password)\n messages.info(request, message)\n if success:\n return redirect('user:connection')\n return redirect('user:register')\n return HttpResponse('Problème dans le formulaire !')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000113", "length_bytes": 6872, "license_type": "no_license", "methods": [{"docstring": "manages the get request for the register page", "name": "get", "signature": "def get(self, request)"}, {"docstring": "manages the post request for the register page : get datas in order to try to create a new user", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031365", "prompt": "Implement the Python class `RegisterView` described below.\n\nClass description:\nThis class deals with registration get > loads a registration page post > analyses datas to try to create a new user and profile\n\nMethod signatures and docstrings:\n- def get(self, request): manages the get request for the register page\n- def post(self, request): manages the post request for the register page : get datas in order to try to create a new user", "prompted_full_text": "Implement the Python class `RegisterView` described below.\n\nClass description:\nThis class deals with registration get > loads a registration page post > analyses datas to try to create a new user and profile\n\nMethod signatures and docstrings:\n- def get(self, request): manages the get request for the register page\n- def post(self, request): manages the post request for the register page : get datas in order to try to create a new user\n\n<|skeleton|>\nclass RegisterView:\n \"\"\"This class deals with registration get > loads a registration page post > analyses datas to try to create a new user and profile\"\"\"\n\n def get(self, request):\n \"\"\"manages the get request for the register page\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"manages the post request for the register page : get datas in order to try to create a new user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if request.user.is_authenticated:\n return redirect('research:index')\n us_form = UserForm()\n context = {'us_form': us_form}\n return render(request, 'user/register.html', context)\n<|end_body_0|>\n\n<|body_start_1|>\n us_form = UserForm(request.POST)\n if us_form.is_valid():\n name, password = (us_form.cleaned_data['username'], us_form.cleaned_data['password'])\n success, message = add_new_user(name, password)\n messages.info(request, message)\n if success:\n return redirect('user:connection')\n return redirect('user:register')\n return HttpResponse('Problème dans le formulaire !')\n<|end_body_1|>\n", "revision_id": "378244474186a2fe25f91377f3628a1479329f99", "skeleton": "<|skeleton|>\nclass RegisterView:\n \"\"\"This class deals with registration get > loads a registration page post > analyses datas to try to create a new user and profile\"\"\"\n\n def get(self, request):\n \"\"\"manages the get request for the register page\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"manages the post request for the register page : get datas in order to try to create a new user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RegisterView:\n \"\"\"This class deals with registration get > loads a registration page post > analyses datas to try to create a new user and profile\"\"\"\n\n def get(self, request):\n \"\"\"manages the get request for the register page\"\"\"\n if request.user.is_authenticated:\n return redirect('research:index')\n us_form = UserForm()\n context = {'us_form': us_form}\n return render(request, 'user/register.html', context)\n\n def post(self, request):\n \"\"\"manages the post request for the register page : get datas in order to try to create a new user\"\"\"\n us_form = UserForm(request.POST)\n if us_form.is_valid():\n name, password = (us_form.cleaned_data['username'], us_form.cleaned_data['password'])\n success, message = add_new_user(name, password)\n messages.info(request, message)\n if success:\n return redirect('user:connection')\n return redirect('user:register')\n return HttpResponse('Problème dans le formulaire !')\n", "source": "the_stack_v2_python_sparse", "source_path": "user/views.py", "source_repo": "blingstand/projet8", "split": "val", "star_events_count": 0}
{"blob_id": "162248bb0e324e8d7467fba03e58935f797dc7de", "bodies": ["if not builder:\n raise ValueError('Builder is not specified')\nself.__builder = builder", "if not containerOsh:\n raise ValueError('Container is not specified')\nosh = self.__builder.buildFile(file_)\nosh.setContainer(containerOsh)\nreturn osh"], "bodies_text": "<|body_start_0|>\n if not builder:\n raise ValueError('Builder is not specified')\n self.__builder = builder\n<|end_body_0|>\n\n<|body_start_1|>\n if not containerOsh:\n raise ValueError('Container is not specified')\n osh = self.__builder.buildFile(file_)\n osh.setContainer(containerOsh)\n return osh\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Reporter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Reporter:\n\n def __init__(self, builder):\n \"\"\"@types: file_topology.Builder @raise ValueError: Builder is not specified\"\"\"\n <|body_0|>\n\n def report(self, file_, containerOsh):\n \"\"\"@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not builder:\n raise ValueError('Builder is not specified')\n self.__builder = builder\n<|end_body_0|>\n\n<|body_start_1|>\n if not containerOsh:\n raise ValueError('Container is not specified')\n osh = self.__builder.buildFile(file_)\n osh.setContainer(containerOsh)\n return osh\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000114", "length_bytes": 12587, "license_type": "no_license", "methods": [{"docstring": "@types: file_topology.Builder @raise ValueError: Builder is not specified", "name": "__init__", "signature": "def __init__(self, builder)"}, {"docstring": "@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified", "name": "report", "signature": "def report(self, file_, containerOsh)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003783", "prompt": "Implement the Python class `Reporter` described below.\n\nClass description:\nImplement the Reporter class.\n\nMethod signatures and docstrings:\n- def __init__(self, builder): @types: file_topology.Builder @raise ValueError: Builder is not specified\n- def report(self, file_, containerOsh): @types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified", "prompted_full_text": "Implement the Python class `Reporter` described below.\n\nClass description:\nImplement the Reporter class.\n\nMethod signatures and docstrings:\n- def __init__(self, builder): @types: file_topology.Builder @raise ValueError: Builder is not specified\n- def report(self, file_, containerOsh): @types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\n\n<|skeleton|>\nclass Reporter:\n\n def __init__(self, builder):\n \"\"\"@types: file_topology.Builder @raise ValueError: Builder is not specified\"\"\"\n <|body_0|>\n\n def report(self, file_, containerOsh):\n \"\"\"@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not builder:\n raise ValueError('Builder is not specified')\n self.__builder = builder\n<|end_body_0|>\n\n<|body_start_1|>\n if not containerOsh:\n raise ValueError('Container is not specified')\n osh = self.__builder.buildFile(file_)\n osh.setContainer(containerOsh)\n return osh\n<|end_body_1|>\n", "revision_id": "c431e809e8d0f82e1bca7e3429dd0245560b5680", "skeleton": "<|skeleton|>\nclass Reporter:\n\n def __init__(self, builder):\n \"\"\"@types: file_topology.Builder @raise ValueError: Builder is not specified\"\"\"\n <|body_0|>\n\n def report(self, file_, containerOsh):\n \"\"\"@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Reporter:\n def __init__(self, builder):\n \"\"\"@types: file_topology.Builder @raise ValueError: Builder is not specified\"\"\"\n if not builder:\n raise ValueError('Builder is not specified')\n self.__builder = builder\n\n def report(self, file_, containerOsh):\n \"\"\"@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\"\"\"\n if not containerOsh:\n raise ValueError('Container is not specified')\n osh = self.__builder.buildFile(file_)\n osh.setContainer(containerOsh)\n return osh\n", "source": "the_stack_v2_python_sparse", "source_path": "reference/ucmdb/discovery/file_topology.py", "source_repo": "madmonkyang/cda-record", "split": "val", "star_events_count": 0}
{"blob_id": "89c33ee2a4abd2880f7cef7a2507ba252bbbc89d", "bodies": ["try:\n limit = await super().get(limit_id)\nexcept SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to retrieve requested budget limit.')\nif not limit:\n raise DBNoResultFoundError('The requested budget limit does not exist.')\nreturn limit", "try:\n limits = await db.select([cls.id, cast(cls.amount, db.String).label('balance'), MCCCategory.name, MCCCategory.info]).select_from(cls.join(MCCCategory)).where(cls.user_id == user_id).gino.all()\nexcept SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limits for user=%s. Error: %s', user_id, err)\n raise DatabaseError('Failed to retrieve budget limits for requested user.')\nreturn [dict(limit) for limit in limits]", "try:\n return await super().create(user_id=user_id, category_id=category_id, amount=amount)\nexcept exceptions.UniqueViolationError:\n raise DatabaseError('A limit with such category for requested user already exists.')\nexcept SQLAlchemyError as err:\n LOGGER.error('Could not create limit with category=%s for user=%s. Error: %s', category_id, user_id, err)\n raise DatabaseError('Failed to create limit budget for requested user.')", "try:\n status, _ = await super().update.values(amount=amount).where(cls.id == limit_id).gino.status()\nexcept SQLAlchemyError as err:\n LOGGER.error('Could not update budget limit=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to update budget limit.')\nupdated = parse_status(status)\nif not updated:\n raise DatabaseError('The budget limit was not updated.')", "try:\n status, _ = await super().delete.where(cls.id == limit_id).gino.status()\nexcept SQLAlchemyError as err:\n LOGGER.error('Could not delete budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to delete budget limit.')\ndeleted = parse_status(status)\nif not deleted:\n raise DatabaseError('The budget category limit was not deleted.')"], "bodies_text": "<|body_start_0|>\n try:\n limit = await super().get(limit_id)\n except SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to retrieve requested budget limit.')\n if not limit:\n raise DBNoResultFoundError('The requested budget limit does not exist.')\n return limit\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n limits = await db.select([cls.id, cast(cls.amount, db.String).label('balance'), MCCCategory.name, MCCCategory.info]).select_from(cls.join(MCCCategory)).where(cls.user_id == user_id).gino.all()\n except SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limits for user=%s. Error: %s', user_id, err)\n raise DatabaseError('Failed to retrieve budget limits for requested user.')\n return [dict(limit) for limit in limits]\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return await super().create(user_id=user_id, category_id=category_id, amount=amount)\n except exceptions.UniqueViolationError:\n raise DatabaseError('A limit with such category for requested user already exists.')\n except SQLAlchemyError as err:\n LOGGER.error('Could not create limit with category=%s for user=%s. Error: %s', category_id, user_id, err)\n raise DatabaseError('Failed to create limit budget for requested user.')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n status, _ = await super().update.values(amount=amount).where(cls.id == limit_id).gino.status()\n except SQLAlchemyError as err:\n LOGGER.error('Could not update budget limit=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to update budget limit.')\n updated = parse_status(status)\n if not updated:\n raise DatabaseError('The budget limit was not updated.')\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n status, _ = await super().delete.where(cls.id == limit_id).gino.status()\n except SQLAlchemyError as err:\n LOGGER.error('Could not delete budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to delete budget limit.')\n deleted = parse_status(status)\n if not deleted:\n raise DatabaseError('The budget category limit was not deleted.')\n<|end_body_4|>\n", "class_docstring": "Class that represents Budget Limit in system.", "class_name": "Limit", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Limit:\n \"\"\"Class that represents Budget Limit in system.\"\"\"\n\n async def get_by_id(cls, limit_id):\n \"\"\"Return queried budget limit by provided id.\"\"\"\n <|body_0|>\n\n async def get_user_limits(cls, user_id):\n \"\"\"Return queried user`s budget limits.\"\"\"\n <|body_1|>\n\n async def create(cls, user_id, category_id, amount):\n \"\"\"Create a new budget limit in database.\"\"\"\n <|body_2|>\n\n async def update(cls, limit_id, amount):\n \"\"\"Update budget limit instance in database.\"\"\"\n <|body_3|>\n\n async def delete(cls, limit_id):\n \"\"\"Delete budget limit by provided id.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n limit = await super().get(limit_id)\n except SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to retrieve requested budget limit.')\n if not limit:\n raise DBNoResultFoundError('The requested budget limit does not exist.')\n return limit\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n limits = await db.select([cls.id, cast(cls.amount, db.String).label('balance'), MCCCategory.name, MCCCategory.info]).select_from(cls.join(MCCCategory)).where(cls.user_id == user_id).gino.all()\n except SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limits for user=%s. Error: %s', user_id, err)\n raise DatabaseError('Failed to retrieve budget limits for requested user.')\n return [dict(limit) for limit in limits]\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return await super().create(user_id=user_id, category_id=category_id, amount=amount)\n except exceptions.UniqueViolationError:\n raise DatabaseError('A limit with such category for requested user already exists.')\n except SQLAlchemyError as err:\n LOGGER.error('Could not create limit with category=%s for user=%s. Error: %s', category_id, user_id, err)\n raise DatabaseError('Failed to create limit budget for requested user.')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n status, _ = await super().update.values(amount=amount).where(cls.id == limit_id).gino.status()\n except SQLAlchemyError as err:\n LOGGER.error('Could not update budget limit=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to update budget limit.')\n updated = parse_status(status)\n if not updated:\n raise DatabaseError('The budget limit was not updated.')\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n status, _ = await super().delete.where(cls.id == limit_id).gino.status()\n except SQLAlchemyError as err:\n LOGGER.error('Could not delete budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to delete budget limit.')\n deleted = parse_status(status)\n if not deleted:\n raise DatabaseError('The budget category limit was not deleted.')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000115", "length_bytes": 4202, "license_type": "permissive", "methods": [{"docstring": "Return queried budget limit by provided id.", "name": "get_by_id", "signature": "async def get_by_id(cls, limit_id)"}, {"docstring": "Return queried user`s budget limits.", "name": "get_user_limits", "signature": "async def get_user_limits(cls, user_id)"}, {"docstring": "Create a new budget limit in database.", "name": "create", "signature": "async def create(cls, user_id, category_id, amount)"}, {"docstring": "Update budget limit instance in database.", "name": "update", "signature": "async def update(cls, limit_id, amount)"}, {"docstring": "Delete budget limit by provided id.", "name": "delete", "signature": "async def delete(cls, limit_id)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_015915", "prompt": "Implement the Python class `Limit` described below.\n\nClass description:\nClass that represents Budget Limit in system.\n\nMethod signatures and docstrings:\n- async def get_by_id(cls, limit_id): Return queried budget limit by provided id.\n- async def get_user_limits(cls, user_id): Return queried user`s budget limits.\n- async def create(cls, user_id, category_id, amount): Create a new budget limit in database.\n- async def update(cls, limit_id, amount): Update budget limit instance in database.\n- async def delete(cls, limit_id): Delete budget limit by provided id.", "prompted_full_text": "Implement the Python class `Limit` described below.\n\nClass description:\nClass that represents Budget Limit in system.\n\nMethod signatures and docstrings:\n- async def get_by_id(cls, limit_id): Return queried budget limit by provided id.\n- async def get_user_limits(cls, user_id): Return queried user`s budget limits.\n- async def create(cls, user_id, category_id, amount): Create a new budget limit in database.\n- async def update(cls, limit_id, amount): Update budget limit instance in database.\n- async def delete(cls, limit_id): Delete budget limit by provided id.\n\n<|skeleton|>\nclass Limit:\n \"\"\"Class that represents Budget Limit in system.\"\"\"\n\n async def get_by_id(cls, limit_id):\n \"\"\"Return queried budget limit by provided id.\"\"\"\n <|body_0|>\n\n async def get_user_limits(cls, user_id):\n \"\"\"Return queried user`s budget limits.\"\"\"\n <|body_1|>\n\n async def create(cls, user_id, category_id, amount):\n \"\"\"Create a new budget limit in database.\"\"\"\n <|body_2|>\n\n async def update(cls, limit_id, amount):\n \"\"\"Update budget limit instance in database.\"\"\"\n <|body_3|>\n\n async def delete(cls, limit_id):\n \"\"\"Delete budget limit by provided id.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n limit = await super().get(limit_id)\n except SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to retrieve requested budget limit.')\n if not limit:\n raise DBNoResultFoundError('The requested budget limit does not exist.')\n return limit\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n limits = await db.select([cls.id, cast(cls.amount, db.String).label('balance'), MCCCategory.name, MCCCategory.info]).select_from(cls.join(MCCCategory)).where(cls.user_id == user_id).gino.all()\n except SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limits for user=%s. Error: %s', user_id, err)\n raise DatabaseError('Failed to retrieve budget limits for requested user.')\n return [dict(limit) for limit in limits]\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return await super().create(user_id=user_id, category_id=category_id, amount=amount)\n except exceptions.UniqueViolationError:\n raise DatabaseError('A limit with such category for requested user already exists.')\n except SQLAlchemyError as err:\n LOGGER.error('Could not create limit with category=%s for user=%s. Error: %s', category_id, user_id, err)\n raise DatabaseError('Failed to create limit budget for requested user.')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n status, _ = await super().update.values(amount=amount).where(cls.id == limit_id).gino.status()\n except SQLAlchemyError as err:\n LOGGER.error('Could not update budget limit=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to update budget limit.')\n updated = parse_status(status)\n if not updated:\n raise DatabaseError('The budget limit was not updated.')\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n status, _ = await super().delete.where(cls.id == limit_id).gino.status()\n except SQLAlchemyError as err:\n LOGGER.error('Could not delete budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to delete budget limit.')\n deleted = parse_status(status)\n if not deleted:\n raise DatabaseError('The budget category limit was not deleted.')\n<|end_body_4|>\n", "revision_id": "16b7154188f08b33f84d88caea217673cf989b2b", "skeleton": "<|skeleton|>\nclass Limit:\n \"\"\"Class that represents Budget Limit in system.\"\"\"\n\n async def get_by_id(cls, limit_id):\n \"\"\"Return queried budget limit by provided id.\"\"\"\n <|body_0|>\n\n async def get_user_limits(cls, user_id):\n \"\"\"Return queried user`s budget limits.\"\"\"\n <|body_1|>\n\n async def create(cls, user_id, category_id, amount):\n \"\"\"Create a new budget limit in database.\"\"\"\n <|body_2|>\n\n async def update(cls, limit_id, amount):\n \"\"\"Update budget limit instance in database.\"\"\"\n <|body_3|>\n\n async def delete(cls, limit_id):\n \"\"\"Delete budget limit by provided id.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Limit:\n \"\"\"Class that represents Budget Limit in system.\"\"\"\n\n async def get_by_id(cls, limit_id):\n \"\"\"Return queried budget limit by provided id.\"\"\"\n try:\n limit = await super().get(limit_id)\n except SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to retrieve requested budget limit.')\n if not limit:\n raise DBNoResultFoundError('The requested budget limit does not exist.')\n return limit\n\n async def get_user_limits(cls, user_id):\n \"\"\"Return queried user`s budget limits.\"\"\"\n try:\n limits = await db.select([cls.id, cast(cls.amount, db.String).label('balance'), MCCCategory.name, MCCCategory.info]).select_from(cls.join(MCCCategory)).where(cls.user_id == user_id).gino.all()\n except SQLAlchemyError as err:\n LOGGER.error('Could not retrieve budget limits for user=%s. Error: %s', user_id, err)\n raise DatabaseError('Failed to retrieve budget limits for requested user.')\n return [dict(limit) for limit in limits]\n\n async def create(cls, user_id, category_id, amount):\n \"\"\"Create a new budget limit in database.\"\"\"\n try:\n return await super().create(user_id=user_id, category_id=category_id, amount=amount)\n except exceptions.UniqueViolationError:\n raise DatabaseError('A limit with such category for requested user already exists.')\n except SQLAlchemyError as err:\n LOGGER.error('Could not create limit with category=%s for user=%s. Error: %s', category_id, user_id, err)\n raise DatabaseError('Failed to create limit budget for requested user.')\n\n async def update(cls, limit_id, amount):\n \"\"\"Update budget limit instance in database.\"\"\"\n try:\n status, _ = await super().update.values(amount=amount).where(cls.id == limit_id).gino.status()\n except SQLAlchemyError as err:\n LOGGER.error('Could not update budget limit=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to update budget limit.')\n updated = parse_status(status)\n if not updated:\n raise DatabaseError('The budget limit was not updated.')\n\n async def delete(cls, limit_id):\n \"\"\"Delete budget limit by provided id.\"\"\"\n try:\n status, _ = await super().delete.where(cls.id == limit_id).gino.status()\n except SQLAlchemyError as err:\n LOGGER.error('Could not delete budget limit by id=%s. Error: %s', limit_id, err)\n raise DatabaseError('Failed to delete budget limit.')\n deleted = parse_status(status)\n if not deleted:\n raise DatabaseError('The budget category limit was not deleted.')\n", "source": "the_stack_v2_python_sparse", "source_path": "server/app/models/limit.py", "source_repo": "SpentlessInc/spentless-server", "split": "val", "star_events_count": 0}
{"blob_id": "5241b46b0a221dbd11fb94c487e41e37e2946931", "bodies": ["super().__init__(shape, low, high, descriptions, default_action)\nself.scale_action_space = scale_action_space\nself.scaled_up_action_space_bounds = scaled_up_action_space_bounds", "if not self.scale_action_space:\n return actions\nscaled_actions = list()\nfor action, low, high, scaled_low, scaled_high in zip(actions, self.low, self.high, self.scaled_up_action_space_bounds[ModelMetadataKeys.LOW.value], self.scaled_up_action_space_bounds[ModelMetadataKeys.HIGH.value]):\n scaled_actions.append(self._scale_value(action, low, high, scaled_low, scaled_high))\nreturn scaled_actions", "if max_old == min_old:\n log_and_exit('Unsupported minimum and maximum action space bounds for scaling values. min_old: {}; max_old: {}'.format(min_old, max_old), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)\nreturn (max_new - min_new) / (max_old - min_old) * (action - min_old) + min_new"], "bodies_text": "<|body_start_0|>\n super().__init__(shape, low, high, descriptions, default_action)\n self.scale_action_space = scale_action_space\n self.scaled_up_action_space_bounds = scaled_up_action_space_bounds\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.scale_action_space:\n return actions\n scaled_actions = list()\n for action, low, high, scaled_low, scaled_high in zip(actions, self.low, self.high, self.scaled_up_action_space_bounds[ModelMetadataKeys.LOW.value], self.scaled_up_action_space_bounds[ModelMetadataKeys.HIGH.value]):\n scaled_actions.append(self._scale_value(action, low, high, scaled_low, scaled_high))\n return scaled_actions\n<|end_body_1|>\n\n<|body_start_2|>\n if max_old == min_old:\n log_and_exit('Unsupported minimum and maximum action space bounds for scaling values. min_old: {}; max_old: {}'.format(min_old, max_old), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)\n return (max_new - min_new) / (max_old - min_old) * (action - min_old) + min_new\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ScalableBoxActionSpace", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScalableBoxActionSpace:\n\n def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf, high: Union[None, int, float, np.ndarray]=np.inf, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None, scale_action_space: bool=False, scaled_up_action_space_bounds: Dict={ModelMetadataKeys.LOW.value: -np.inf, ModelMetadataKeys.HIGH.value: np.inf}):\n \"\"\"This class extends BoxActionSpace and adds ability to scale the actions Args: shape (Union[int, np.ndarray]): int or array value of the shape of the action space low (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to -np.inf. high (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to np.inf. descriptions (Union[None, List, Dict], optional): description set for each action value. Defaults to None. default_action (np.ndarray, optional): default action value. Defaults to None. scale_action_space (bool, optional): boolean value to indicate if scaling needs to be done. Defaults to False. scaled_up_action_space_bo\"\"\"\n <|body_0|>\n\n def scale_action_values(self, actions):\n \"\"\"Return the action space for the training algorithm Args: actions (list(float)): The list of actions that need to be scaled Returns: list(float): scaled/unscaled actions depending on the scale_action_space value set\"\"\"\n <|body_1|>\n\n def _scale_value(self, action, min_old, max_old, min_new, max_new):\n \"\"\"Return the scaled action value from min_old,max_old to min_new,max_new Args: action (float): The action value to be scaled min_old (float): The minimum bound value before scaling max_old (float): The maximum bound value before scaling min_new (float): The minimum bound value after scaling max_new (float): The maximum bound value after scaling Returns: (float): scaled action value\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(shape, low, high, descriptions, default_action)\n self.scale_action_space = scale_action_space\n self.scaled_up_action_space_bounds = scaled_up_action_space_bounds\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.scale_action_space:\n return actions\n scaled_actions = list()\n for action, low, high, scaled_low, scaled_high in zip(actions, self.low, self.high, self.scaled_up_action_space_bounds[ModelMetadataKeys.LOW.value], self.scaled_up_action_space_bounds[ModelMetadataKeys.HIGH.value]):\n scaled_actions.append(self._scale_value(action, low, high, scaled_low, scaled_high))\n return scaled_actions\n<|end_body_1|>\n\n<|body_start_2|>\n if max_old == min_old:\n log_and_exit('Unsupported minimum and maximum action space bounds for scaling values. min_old: {}; max_old: {}'.format(min_old, max_old), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)\n return (max_new - min_new) / (max_old - min_old) * (action - min_old) + min_new\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000116", "length_bytes": 4346, "license_type": "permissive", "methods": [{"docstring": "This class extends BoxActionSpace and adds ability to scale the actions Args: shape (Union[int, np.ndarray]): int or array value of the shape of the action space low (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to -np.inf. high (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to np.inf. descriptions (Union[None, List, Dict], optional): description set for each action value. Defaults to None. default_action (np.ndarray, optional): default action value. Defaults to None. scale_action_space (bool, optional): boolean value to indicate if scaling needs to be done. Defaults to False. scaled_up_action_space_bo", "name": "__init__", "signature": "def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf, high: Union[None, int, float, np.ndarray]=np.inf, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None, scale_action_space: bool=False, scaled_up_action_space_bounds: Dict={ModelMetadataKeys.LOW.value: -np.inf, ModelMetadataKeys.HIGH.value: np.inf})"}, {"docstring": "Return the action space for the training algorithm Args: actions (list(float)): The list of actions that need to be scaled Returns: list(float): scaled/unscaled actions depending on the scale_action_space value set", "name": "scale_action_values", "signature": "def scale_action_values(self, actions)"}, {"docstring": "Return the scaled action value from min_old,max_old to min_new,max_new Args: action (float): The action value to be scaled min_old (float): The minimum bound value before scaling max_old (float): The maximum bound value before scaling min_new (float): The minimum bound value after scaling max_new (float): The maximum bound value after scaling Returns: (float): scaled action value", "name": "_scale_value", "signature": "def _scale_value(self, action, min_old, max_old, min_new, max_new)"}], "n_methods": 3, "prompt": "Implement the Python class `ScalableBoxActionSpace` described below.\n\nClass description:\nImplement the ScalableBoxActionSpace class.\n\nMethod signatures and docstrings:\n- def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf, high: Union[None, int, float, np.ndarray]=np.inf, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None, scale_action_space: bool=False, scaled_up_action_space_bounds: Dict={ModelMetadataKeys.LOW.value: -np.inf, ModelMetadataKeys.HIGH.value: np.inf}): This class extends BoxActionSpace and adds ability to scale the actions Args: shape (Union[int, np.ndarray]): int or array value of the shape of the action space low (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to -np.inf. high (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to np.inf. descriptions (Union[None, List, Dict], optional): description set for each action value. Defaults to None. default_action (np.ndarray, optional): default action value. Defaults to None. scale_action_space (bool, optional): boolean value to indicate if scaling needs to be done. Defaults to False. scaled_up_action_space_bo\n- def scale_action_values(self, actions): Return the action space for the training algorithm Args: actions (list(float)): The list of actions that need to be scaled Returns: list(float): scaled/unscaled actions depending on the scale_action_space value set\n- def _scale_value(self, action, min_old, max_old, min_new, max_new): Return the scaled action value from min_old,max_old to min_new,max_new Args: action (float): The action value to be scaled min_old (float): The minimum bound value before scaling max_old (float): The maximum bound value before scaling min_new (float): The minimum bound value after scaling max_new (float): The maximum bound value after scaling Returns: (float): scaled action value", "prompted_full_text": "Implement the Python class `ScalableBoxActionSpace` described below.\n\nClass description:\nImplement the ScalableBoxActionSpace class.\n\nMethod signatures and docstrings:\n- def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf, high: Union[None, int, float, np.ndarray]=np.inf, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None, scale_action_space: bool=False, scaled_up_action_space_bounds: Dict={ModelMetadataKeys.LOW.value: -np.inf, ModelMetadataKeys.HIGH.value: np.inf}): This class extends BoxActionSpace and adds ability to scale the actions Args: shape (Union[int, np.ndarray]): int or array value of the shape of the action space low (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to -np.inf. high (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to np.inf. descriptions (Union[None, List, Dict], optional): description set for each action value. Defaults to None. default_action (np.ndarray, optional): default action value. Defaults to None. scale_action_space (bool, optional): boolean value to indicate if scaling needs to be done. Defaults to False. scaled_up_action_space_bo\n- def scale_action_values(self, actions): Return the action space for the training algorithm Args: actions (list(float)): The list of actions that need to be scaled Returns: list(float): scaled/unscaled actions depending on the scale_action_space value set\n- def _scale_value(self, action, min_old, max_old, min_new, max_new): Return the scaled action value from min_old,max_old to min_new,max_new Args: action (float): The action value to be scaled min_old (float): The minimum bound value before scaling max_old (float): The maximum bound value before scaling min_new (float): The minimum bound value after scaling max_new (float): The maximum bound value after scaling Returns: (float): scaled action value\n\n<|skeleton|>\nclass ScalableBoxActionSpace:\n\n def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf, high: Union[None, int, float, np.ndarray]=np.inf, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None, scale_action_space: bool=False, scaled_up_action_space_bounds: Dict={ModelMetadataKeys.LOW.value: -np.inf, ModelMetadataKeys.HIGH.value: np.inf}):\n \"\"\"This class extends BoxActionSpace and adds ability to scale the actions Args: shape (Union[int, np.ndarray]): int or array value of the shape of the action space low (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to -np.inf. high (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to np.inf. descriptions (Union[None, List, Dict], optional): description set for each action value. Defaults to None. default_action (np.ndarray, optional): default action value. Defaults to None. scale_action_space (bool, optional): boolean value to indicate if scaling needs to be done. Defaults to False. scaled_up_action_space_bo\"\"\"\n <|body_0|>\n\n def scale_action_values(self, actions):\n \"\"\"Return the action space for the training algorithm Args: actions (list(float)): The list of actions that need to be scaled Returns: list(float): scaled/unscaled actions depending on the scale_action_space value set\"\"\"\n <|body_1|>\n\n def _scale_value(self, action, min_old, max_old, min_new, max_new):\n \"\"\"Return the scaled action value from min_old,max_old to min_new,max_new Args: action (float): The action value to be scaled min_old (float): The minimum bound value before scaling max_old (float): The maximum bound value before scaling min_new (float): The minimum bound value after scaling max_new (float): The maximum bound value after scaling Returns: (float): scaled action value\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(shape, low, high, descriptions, default_action)\n self.scale_action_space = scale_action_space\n self.scaled_up_action_space_bounds = scaled_up_action_space_bounds\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.scale_action_space:\n return actions\n scaled_actions = list()\n for action, low, high, scaled_low, scaled_high in zip(actions, self.low, self.high, self.scaled_up_action_space_bounds[ModelMetadataKeys.LOW.value], self.scaled_up_action_space_bounds[ModelMetadataKeys.HIGH.value]):\n scaled_actions.append(self._scale_value(action, low, high, scaled_low, scaled_high))\n return scaled_actions\n<|end_body_1|>\n\n<|body_start_2|>\n if max_old == min_old:\n log_and_exit('Unsupported minimum and maximum action space bounds for scaling values. min_old: {}; max_old: {}'.format(min_old, max_old), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)\n return (max_new - min_new) / (max_old - min_old) * (action - min_old) + min_new\n<|end_body_2|>\n", "revision_id": "2ce50508dd4100eaef7f8729436549a801505705", "skeleton": "<|skeleton|>\nclass ScalableBoxActionSpace:\n\n def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf, high: Union[None, int, float, np.ndarray]=np.inf, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None, scale_action_space: bool=False, scaled_up_action_space_bounds: Dict={ModelMetadataKeys.LOW.value: -np.inf, ModelMetadataKeys.HIGH.value: np.inf}):\n \"\"\"This class extends BoxActionSpace and adds ability to scale the actions Args: shape (Union[int, np.ndarray]): int or array value of the shape of the action space low (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to -np.inf. high (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to np.inf. descriptions (Union[None, List, Dict], optional): description set for each action value. Defaults to None. default_action (np.ndarray, optional): default action value. Defaults to None. scale_action_space (bool, optional): boolean value to indicate if scaling needs to be done. Defaults to False. scaled_up_action_space_bo\"\"\"\n <|body_0|>\n\n def scale_action_values(self, actions):\n \"\"\"Return the action space for the training algorithm Args: actions (list(float)): The list of actions that need to be scaled Returns: list(float): scaled/unscaled actions depending on the scale_action_space value set\"\"\"\n <|body_1|>\n\n def _scale_value(self, action, min_old, max_old, min_new, max_new):\n \"\"\"Return the scaled action value from min_old,max_old to min_new,max_new Args: action (float): The action value to be scaled min_old (float): The minimum bound value before scaling max_old (float): The maximum bound value before scaling min_new (float): The minimum bound value after scaling max_new (float): The maximum bound value after scaling Returns: (float): scaled action value\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ScalableBoxActionSpace:\n def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf, high: Union[None, int, float, np.ndarray]=np.inf, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None, scale_action_space: bool=False, scaled_up_action_space_bounds: Dict={ModelMetadataKeys.LOW.value: -np.inf, ModelMetadataKeys.HIGH.value: np.inf}):\n \"\"\"This class extends BoxActionSpace and adds ability to scale the actions Args: shape (Union[int, np.ndarray]): int or array value of the shape of the action space low (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to -np.inf. high (Union[None, int, float, np.ndarray], optional): higher bound of the action space. Defaults to np.inf. descriptions (Union[None, List, Dict], optional): description set for each action value. Defaults to None. default_action (np.ndarray, optional): default action value. Defaults to None. scale_action_space (bool, optional): boolean value to indicate if scaling needs to be done. Defaults to False. scaled_up_action_space_bo\"\"\"\n super().__init__(shape, low, high, descriptions, default_action)\n self.scale_action_space = scale_action_space\n self.scaled_up_action_space_bounds = scaled_up_action_space_bounds\n\n def scale_action_values(self, actions):\n \"\"\"Return the action space for the training algorithm Args: actions (list(float)): The list of actions that need to be scaled Returns: list(float): scaled/unscaled actions depending on the scale_action_space value set\"\"\"\n if not self.scale_action_space:\n return actions\n scaled_actions = list()\n for action, low, high, scaled_low, scaled_high in zip(actions, self.low, self.high, self.scaled_up_action_space_bounds[ModelMetadataKeys.LOW.value], self.scaled_up_action_space_bounds[ModelMetadataKeys.HIGH.value]):\n scaled_actions.append(self._scale_value(action, low, high, scaled_low, scaled_high))\n return scaled_actions\n\n def _scale_value(self, action, min_old, max_old, min_new, max_new):\n \"\"\"Return the scaled action value from min_old,max_old to min_new,max_new Args: action (float): The action value to be scaled min_old (float): The minimum bound value before scaling max_old (float): The maximum bound value before scaling min_new (float): The minimum bound value after scaling max_new (float): The maximum bound value after scaling Returns: (float): scaled action value\"\"\"\n if max_old == min_old:\n log_and_exit('Unsupported minimum and maximum action space bounds for scaling values. min_old: {}; max_old: {}'.format(min_old, max_old), SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)\n return (max_new - min_new) / (max_old - min_old) * (action - min_old) + min_new\n", "source": "the_stack_v2_python_sparse", "source_path": "bundle/markov/multi_agent_coach/spaces.py", "source_repo": "aws-deepracer-community/deepracer-simapp", "split": "val", "star_events_count": 83}
{"blob_id": "109a7f4b043dc9bb993cd3ba83c004b66adc1b9c", "bodies": ["plugin = NeighbourSelection()\nx_points = np.array([0])\ny_points = np.array([0])\nresult = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\nradius = self.global_orography.coord_system().semi_major_axis\nexpected = [[radius, 0, 0]]\nself.assertArrayAlmostEqual(result, expected)", "plugin = NeighbourSelection()\nx_points = np.array([0])\ny_points = np.array([90])\nresult = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\nradius = self.global_orography.coord_system().semi_major_axis\nexpected = [[0, 0, radius]]\nself.assertArrayAlmostEqual(result, expected)", "plugin = NeighbourSelection()\nx_points = np.array([0])\ny_points = np.array([45])\nresult = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\nradius = self.global_orography.coord_system().semi_major_axis\ncomponent = radius / np.sqrt(2.0)\nexpected = [[component, 0, component]]\nself.assertArrayAlmostEqual(result, expected)", "plugin = NeighbourSelection()\nx_points = np.array([45])\ny_points = np.array([0])\nresult = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\nradius = self.global_orography.coord_system().semi_major_axis\ncomponent = radius / np.sqrt(2.0)\nexpected = [[component, component, 0]]\nself.assertArrayAlmostEqual(result, expected)", "plugin = NeighbourSelection()\nx_points = np.array([45])\ny_points = np.array([45])\nresult = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\nradius = self.global_orography.coord_system().semi_major_axis\ncomponent = radius / np.sqrt(2.0)\nsub_component = component / np.sqrt(2.0)\nexpected = [[sub_component, sub_component, component]]\nself.assertArrayAlmostEqual(result, expected)", "plugin = NeighbourSelection()\nx_points = np.array([-45])\ny_points = np.array([-45])\nresult = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\nradius = self.global_orography.coord_system().semi_major_axis\ncomponent = radius / np.sqrt(2.0)\nsub_component = component / np.sqrt(2.0)\nexpected = [[sub_component, -sub_component, -component]]\nself.assertArrayAlmostEqual(result, expected)"], "bodies_text": "<|body_start_0|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([0])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n expected = [[radius, 0, 0]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_0|>\n\n<|body_start_1|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([90])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n expected = [[0, 0, radius]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_1|>\n\n<|body_start_2|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n expected = [[component, 0, component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_2|>\n\n<|body_start_3|>\n plugin = NeighbourSelection()\n x_points = np.array([45])\n y_points = np.array([0])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n expected = [[component, component, 0]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_3|>\n\n<|body_start_4|>\n plugin = NeighbourSelection()\n x_points = np.array([45])\n y_points = np.array([45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n sub_component = component / np.sqrt(2.0)\n expected = [[sub_component, sub_component, component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_4|>\n\n<|body_start_5|>\n plugin = NeighbourSelection()\n x_points = np.array([-45])\n y_points = np.array([-45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n sub_component = component / np.sqrt(2.0)\n expected = [[sub_component, -sub_component, -component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_5|>\n", "class_docstring": "Test conversion of global coordinates to geocentric cartesians. In this coordinate system, x and y are in the equitorial plane, and z is towards the poles.", "class_name": "Test_geocentric_cartesian", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test_geocentric_cartesian:\n \"\"\"Test conversion of global coordinates to geocentric cartesians. In this coordinate system, x and y are in the equitorial plane, and z is towards the poles.\"\"\"\n\n def test_basic(self):\n \"\"\"Test a (0, 0) coordinate conversion to geocentric cartesian. This is expected to give an x coordinate which is the semi-major axis of the globe defined in the global coordinate system.\"\"\"\n <|body_0|>\n\n def test_north_pole(self):\n \"\"\"Test a (0, 90) coordinate conversion to geocentric cartesian, this being the north pole. This is expected to give an x coordinate which 0 and a z coordinate equivalent to the semi-major axis of the globe defined in the global coordinate system.\"\"\"\n <|body_1|>\n\n def test_45_degrees_latitude(self):\n \"\"\"Test a (0, 45) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and z coordinates.\"\"\"\n <|body_2|>\n\n def test_45_degrees_longitude(self):\n \"\"\"Test a (45, 0) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and y coordinates.\"\"\"\n <|body_3|>\n\n def test_45_degrees_latitude_and_longitude(self):\n \"\"\"Test a (45, 45) coordinate conversion to geocentric cartesian. In this case the z component should be a cos(45) component of the semi-major axis of the globe defined in the global coordinate system. The x and y coordinates should be cos(45) components of the remaining cos(45) component of the semi-major axis.\"\"\"\n <|body_4|>\n\n def test_negative_45_degrees_latitude_and_longitude(self):\n \"\"\"Test a (-45, -45) coordinate conversion to geocentric cartesian. In this case the x is expected to remain positive, whilst y and z become negative.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([0])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n expected = [[radius, 0, 0]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_0|>\n\n<|body_start_1|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([90])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n expected = [[0, 0, radius]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_1|>\n\n<|body_start_2|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n expected = [[component, 0, component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_2|>\n\n<|body_start_3|>\n plugin = NeighbourSelection()\n x_points = np.array([45])\n y_points = np.array([0])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n expected = [[component, component, 0]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_3|>\n\n<|body_start_4|>\n plugin = NeighbourSelection()\n x_points = np.array([45])\n y_points = np.array([45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n sub_component = component / np.sqrt(2.0)\n expected = [[sub_component, sub_component, component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_4|>\n\n<|body_start_5|>\n plugin = NeighbourSelection()\n x_points = np.array([-45])\n y_points = np.array([-45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n sub_component = component / np.sqrt(2.0)\n expected = [[sub_component, -sub_component, -component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000117", "length_bytes": 40371, "license_type": "permissive", "methods": [{"docstring": "Test a (0, 0) coordinate conversion to geocentric cartesian. This is expected to give an x coordinate which is the semi-major axis of the globe defined in the global coordinate system.", "name": "test_basic", "signature": "def test_basic(self)"}, {"docstring": "Test a (0, 90) coordinate conversion to geocentric cartesian, this being the north pole. This is expected to give an x coordinate which 0 and a z coordinate equivalent to the semi-major axis of the globe defined in the global coordinate system.", "name": "test_north_pole", "signature": "def test_north_pole(self)"}, {"docstring": "Test a (0, 45) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and z coordinates.", "name": "test_45_degrees_latitude", "signature": "def test_45_degrees_latitude(self)"}, {"docstring": "Test a (45, 0) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and y coordinates.", "name": "test_45_degrees_longitude", "signature": "def test_45_degrees_longitude(self)"}, {"docstring": "Test a (45, 45) coordinate conversion to geocentric cartesian. In this case the z component should be a cos(45) component of the semi-major axis of the globe defined in the global coordinate system. The x and y coordinates should be cos(45) components of the remaining cos(45) component of the semi-major axis.", "name": "test_45_degrees_latitude_and_longitude", "signature": "def test_45_degrees_latitude_and_longitude(self)"}, {"docstring": "Test a (-45, -45) coordinate conversion to geocentric cartesian. In this case the x is expected to remain positive, whilst y and z become negative.", "name": "test_negative_45_degrees_latitude_and_longitude", "signature": "def test_negative_45_degrees_latitude_and_longitude(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_val_002660", "prompt": "Implement the Python class `Test_geocentric_cartesian` described below.\n\nClass description:\nTest conversion of global coordinates to geocentric cartesians. In this coordinate system, x and y are in the equitorial plane, and z is towards the poles.\n\nMethod signatures and docstrings:\n- def test_basic(self): Test a (0, 0) coordinate conversion to geocentric cartesian. This is expected to give an x coordinate which is the semi-major axis of the globe defined in the global coordinate system.\n- def test_north_pole(self): Test a (0, 90) coordinate conversion to geocentric cartesian, this being the north pole. This is expected to give an x coordinate which 0 and a z coordinate equivalent to the semi-major axis of the globe defined in the global coordinate system.\n- def test_45_degrees_latitude(self): Test a (0, 45) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and z coordinates.\n- def test_45_degrees_longitude(self): Test a (45, 0) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and y coordinates.\n- def test_45_degrees_latitude_and_longitude(self): Test a (45, 45) coordinate conversion to geocentric cartesian. In this case the z component should be a cos(45) component of the semi-major axis of the globe defined in the global coordinate system. The x and y coordinates should be cos(45) components of the remaining cos(45) component of the semi-major axis.\n- def test_negative_45_degrees_latitude_and_longitude(self): Test a (-45, -45) coordinate conversion to geocentric cartesian. In this case the x is expected to remain positive, whilst y and z become negative.", "prompted_full_text": "Implement the Python class `Test_geocentric_cartesian` described below.\n\nClass description:\nTest conversion of global coordinates to geocentric cartesians. In this coordinate system, x and y are in the equitorial plane, and z is towards the poles.\n\nMethod signatures and docstrings:\n- def test_basic(self): Test a (0, 0) coordinate conversion to geocentric cartesian. This is expected to give an x coordinate which is the semi-major axis of the globe defined in the global coordinate system.\n- def test_north_pole(self): Test a (0, 90) coordinate conversion to geocentric cartesian, this being the north pole. This is expected to give an x coordinate which 0 and a z coordinate equivalent to the semi-major axis of the globe defined in the global coordinate system.\n- def test_45_degrees_latitude(self): Test a (0, 45) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and z coordinates.\n- def test_45_degrees_longitude(self): Test a (45, 0) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and y coordinates.\n- def test_45_degrees_latitude_and_longitude(self): Test a (45, 45) coordinate conversion to geocentric cartesian. In this case the z component should be a cos(45) component of the semi-major axis of the globe defined in the global coordinate system. The x and y coordinates should be cos(45) components of the remaining cos(45) component of the semi-major axis.\n- def test_negative_45_degrees_latitude_and_longitude(self): Test a (-45, -45) coordinate conversion to geocentric cartesian. In this case the x is expected to remain positive, whilst y and z become negative.\n\n<|skeleton|>\nclass Test_geocentric_cartesian:\n \"\"\"Test conversion of global coordinates to geocentric cartesians. In this coordinate system, x and y are in the equitorial plane, and z is towards the poles.\"\"\"\n\n def test_basic(self):\n \"\"\"Test a (0, 0) coordinate conversion to geocentric cartesian. This is expected to give an x coordinate which is the semi-major axis of the globe defined in the global coordinate system.\"\"\"\n <|body_0|>\n\n def test_north_pole(self):\n \"\"\"Test a (0, 90) coordinate conversion to geocentric cartesian, this being the north pole. This is expected to give an x coordinate which 0 and a z coordinate equivalent to the semi-major axis of the globe defined in the global coordinate system.\"\"\"\n <|body_1|>\n\n def test_45_degrees_latitude(self):\n \"\"\"Test a (0, 45) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and z coordinates.\"\"\"\n <|body_2|>\n\n def test_45_degrees_longitude(self):\n \"\"\"Test a (45, 0) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and y coordinates.\"\"\"\n <|body_3|>\n\n def test_45_degrees_latitude_and_longitude(self):\n \"\"\"Test a (45, 45) coordinate conversion to geocentric cartesian. In this case the z component should be a cos(45) component of the semi-major axis of the globe defined in the global coordinate system. The x and y coordinates should be cos(45) components of the remaining cos(45) component of the semi-major axis.\"\"\"\n <|body_4|>\n\n def test_negative_45_degrees_latitude_and_longitude(self):\n \"\"\"Test a (-45, -45) coordinate conversion to geocentric cartesian. In this case the x is expected to remain positive, whilst y and z become negative.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([0])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n expected = [[radius, 0, 0]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_0|>\n\n<|body_start_1|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([90])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n expected = [[0, 0, radius]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_1|>\n\n<|body_start_2|>\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n expected = [[component, 0, component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_2|>\n\n<|body_start_3|>\n plugin = NeighbourSelection()\n x_points = np.array([45])\n y_points = np.array([0])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n expected = [[component, component, 0]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_3|>\n\n<|body_start_4|>\n plugin = NeighbourSelection()\n x_points = np.array([45])\n y_points = np.array([45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n sub_component = component / np.sqrt(2.0)\n expected = [[sub_component, sub_component, component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_4|>\n\n<|body_start_5|>\n plugin = NeighbourSelection()\n x_points = np.array([-45])\n y_points = np.array([-45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n sub_component = component / np.sqrt(2.0)\n expected = [[sub_component, -sub_component, -component]]\n self.assertArrayAlmostEqual(result, expected)\n<|end_body_5|>\n", "revision_id": "cd2c9019944345df1e703bf8f625db537ad9f559", "skeleton": "<|skeleton|>\nclass Test_geocentric_cartesian:\n \"\"\"Test conversion of global coordinates to geocentric cartesians. In this coordinate system, x and y are in the equitorial plane, and z is towards the poles.\"\"\"\n\n def test_basic(self):\n \"\"\"Test a (0, 0) coordinate conversion to geocentric cartesian. This is expected to give an x coordinate which is the semi-major axis of the globe defined in the global coordinate system.\"\"\"\n <|body_0|>\n\n def test_north_pole(self):\n \"\"\"Test a (0, 90) coordinate conversion to geocentric cartesian, this being the north pole. This is expected to give an x coordinate which 0 and a z coordinate equivalent to the semi-major axis of the globe defined in the global coordinate system.\"\"\"\n <|body_1|>\n\n def test_45_degrees_latitude(self):\n \"\"\"Test a (0, 45) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and z coordinates.\"\"\"\n <|body_2|>\n\n def test_45_degrees_longitude(self):\n \"\"\"Test a (45, 0) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and y coordinates.\"\"\"\n <|body_3|>\n\n def test_45_degrees_latitude_and_longitude(self):\n \"\"\"Test a (45, 45) coordinate conversion to geocentric cartesian. In this case the z component should be a cos(45) component of the semi-major axis of the globe defined in the global coordinate system. The x and y coordinates should be cos(45) components of the remaining cos(45) component of the semi-major axis.\"\"\"\n <|body_4|>\n\n def test_negative_45_degrees_latitude_and_longitude(self):\n \"\"\"Test a (-45, -45) coordinate conversion to geocentric cartesian. In this case the x is expected to remain positive, whilst y and z become negative.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Test_geocentric_cartesian:\n \"\"\"Test conversion of global coordinates to geocentric cartesians. In this coordinate system, x and y are in the equitorial plane, and z is towards the poles.\"\"\"\n\n def test_basic(self):\n \"\"\"Test a (0, 0) coordinate conversion to geocentric cartesian. This is expected to give an x coordinate which is the semi-major axis of the globe defined in the global coordinate system.\"\"\"\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([0])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n expected = [[radius, 0, 0]]\n self.assertArrayAlmostEqual(result, expected)\n\n def test_north_pole(self):\n \"\"\"Test a (0, 90) coordinate conversion to geocentric cartesian, this being the north pole. This is expected to give an x coordinate which 0 and a z coordinate equivalent to the semi-major axis of the globe defined in the global coordinate system.\"\"\"\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([90])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n expected = [[0, 0, radius]]\n self.assertArrayAlmostEqual(result, expected)\n\n def test_45_degrees_latitude(self):\n \"\"\"Test a (0, 45) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and z coordinates.\"\"\"\n plugin = NeighbourSelection()\n x_points = np.array([0])\n y_points = np.array([45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n expected = [[component, 0, component]]\n self.assertArrayAlmostEqual(result, expected)\n\n def test_45_degrees_longitude(self):\n \"\"\"Test a (45, 0) coordinate conversion to geocentric cartesian. In this case the components of the semi-major axis of the globe defined in the global coordinate system should be shared between the resulting x and y coordinates.\"\"\"\n plugin = NeighbourSelection()\n x_points = np.array([45])\n y_points = np.array([0])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n expected = [[component, component, 0]]\n self.assertArrayAlmostEqual(result, expected)\n\n def test_45_degrees_latitude_and_longitude(self):\n \"\"\"Test a (45, 45) coordinate conversion to geocentric cartesian. In this case the z component should be a cos(45) component of the semi-major axis of the globe defined in the global coordinate system. The x and y coordinates should be cos(45) components of the remaining cos(45) component of the semi-major axis.\"\"\"\n plugin = NeighbourSelection()\n x_points = np.array([45])\n y_points = np.array([45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n sub_component = component / np.sqrt(2.0)\n expected = [[sub_component, sub_component, component]]\n self.assertArrayAlmostEqual(result, expected)\n\n def test_negative_45_degrees_latitude_and_longitude(self):\n \"\"\"Test a (-45, -45) coordinate conversion to geocentric cartesian. In this case the x is expected to remain positive, whilst y and z become negative.\"\"\"\n plugin = NeighbourSelection()\n x_points = np.array([-45])\n y_points = np.array([-45])\n result = plugin.geocentric_cartesian(self.global_orography, x_points, y_points)\n radius = self.global_orography.coord_system().semi_major_axis\n component = radius / np.sqrt(2.0)\n sub_component = component / np.sqrt(2.0)\n expected = [[sub_component, -sub_component, -component]]\n self.assertArrayAlmostEqual(result, expected)\n", "source": "the_stack_v2_python_sparse", "source_path": "improver_tests/spotdata/test_NeighbourSelection.py", "source_repo": "metoppv/improver", "split": "val", "star_events_count": 101}
{"blob_id": "464981c5fb9ad2168bf1c6cd20cee6b33580630e", "bodies": ["N, channel, height, width = X.shape\nPheight, Pwidth = self.pooling\nassert (height - Pheight) % self.stride[0] == 0\nassert (width - Pwidth) % self.stride[1] == 0\nout_height = np.uint32(1 + (height - Pheight) / self.stride[0])\nout_width = np.uint32(1 + (width - Pwidth) / self.stride[1])\nA = np.zeros((N, channel, out_height, out_width))\nindices = np.zeros((N, channel, out_height, out_width))\nfor index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n max = -np.Inf\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if X[index, c, i + ii, j + jj] > max:\n max = X[index, c, i + ii, j + jj]\n indices[index, c, out_i, out_j] = ii * Pwidth + jj\n A[index, c, out_i, out_j] = max\nself.cache = (X.shape, indices)\nreturn A", "X_shape, indices = self.cache\nN, channel, height, width = X_shape\nPheight, Pwidth = self.pooling\ndX = np.zeros(X_shape)\nfor index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if ii * Pwidth + jj == indices[index, c, out_i, out_j]:\n dX[index, c, i + ii, j + jj] += dA[index, c, out_i, out_j]\nreturn dX"], "bodies_text": "<|body_start_0|>\n N, channel, height, width = X.shape\n Pheight, Pwidth = self.pooling\n assert (height - Pheight) % self.stride[0] == 0\n assert (width - Pwidth) % self.stride[1] == 0\n out_height = np.uint32(1 + (height - Pheight) / self.stride[0])\n out_width = np.uint32(1 + (width - Pwidth) / self.stride[1])\n A = np.zeros((N, channel, out_height, out_width))\n indices = np.zeros((N, channel, out_height, out_width))\n for index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n max = -np.Inf\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if X[index, c, i + ii, j + jj] > max:\n max = X[index, c, i + ii, j + jj]\n indices[index, c, out_i, out_j] = ii * Pwidth + jj\n A[index, c, out_i, out_j] = max\n self.cache = (X.shape, indices)\n return A\n<|end_body_0|>\n\n<|body_start_1|>\n X_shape, indices = self.cache\n N, channel, height, width = X_shape\n Pheight, Pwidth = self.pooling\n dX = np.zeros(X_shape)\n for index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if ii * Pwidth + jj == indices[index, c, out_i, out_j]:\n dX[index, c, i + ii, j + jj] += dA[index, c, out_i, out_j]\n return dX\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MaxPool2DNaive", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MaxPool2DNaive:\n\n def forward_npdl(self, X, **kwargs):\n \"\"\"Effectue la propagation avant d'une couche MaxPool2D Arguments: X {ndarray} -- Sortie de la couche précédente. Returns: ndarray -- Scores de la couche\"\"\"\n <|body_0|>\n\n def backward_npdl(self, dA, **kwargs):\n \"\"\"Effectue la rétro-propagation. Arguments: dA {ndarray} -- Gradients de la loss par rapport aux sorties. Returns: ndarray -- Dérivée de la loss par rapport au input de la couche.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n N, channel, height, width = X.shape\n Pheight, Pwidth = self.pooling\n assert (height - Pheight) % self.stride[0] == 0\n assert (width - Pwidth) % self.stride[1] == 0\n out_height = np.uint32(1 + (height - Pheight) / self.stride[0])\n out_width = np.uint32(1 + (width - Pwidth) / self.stride[1])\n A = np.zeros((N, channel, out_height, out_width))\n indices = np.zeros((N, channel, out_height, out_width))\n for index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n max = -np.Inf\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if X[index, c, i + ii, j + jj] > max:\n max = X[index, c, i + ii, j + jj]\n indices[index, c, out_i, out_j] = ii * Pwidth + jj\n A[index, c, out_i, out_j] = max\n self.cache = (X.shape, indices)\n return A\n<|end_body_0|>\n\n<|body_start_1|>\n X_shape, indices = self.cache\n N, channel, height, width = X_shape\n Pheight, Pwidth = self.pooling\n dX = np.zeros(X_shape)\n for index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if ii * Pwidth + jj == indices[index, c, out_i, out_j]:\n dX[index, c, i + ii, j + jj] += dA[index, c, out_i, out_j]\n return dX\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000118", "length_bytes": 6145, "license_type": "no_license", "methods": [{"docstring": "Effectue la propagation avant d'une couche MaxPool2D Arguments: X {ndarray} -- Sortie de la couche précédente. Returns: ndarray -- Scores de la couche", "name": "forward_npdl", "signature": "def forward_npdl(self, X, **kwargs)"}, {"docstring": "Effectue la rétro-propagation. Arguments: dA {ndarray} -- Gradients de la loss par rapport aux sorties. Returns: ndarray -- Dérivée de la loss par rapport au input de la couche.", "name": "backward_npdl", "signature": "def backward_npdl(self, dA, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_037752", "prompt": "Implement the Python class `MaxPool2DNaive` described below.\n\nClass description:\nImplement the MaxPool2DNaive class.\n\nMethod signatures and docstrings:\n- def forward_npdl(self, X, **kwargs): Effectue la propagation avant d'une couche MaxPool2D Arguments: X {ndarray} -- Sortie de la couche précédente. Returns: ndarray -- Scores de la couche\n- def backward_npdl(self, dA, **kwargs): Effectue la rétro-propagation. Arguments: dA {ndarray} -- Gradients de la loss par rapport aux sorties. Returns: ndarray -- Dérivée de la loss par rapport au input de la couche.", "prompted_full_text": "Implement the Python class `MaxPool2DNaive` described below.\n\nClass description:\nImplement the MaxPool2DNaive class.\n\nMethod signatures and docstrings:\n- def forward_npdl(self, X, **kwargs): Effectue la propagation avant d'une couche MaxPool2D Arguments: X {ndarray} -- Sortie de la couche précédente. Returns: ndarray -- Scores de la couche\n- def backward_npdl(self, dA, **kwargs): Effectue la rétro-propagation. Arguments: dA {ndarray} -- Gradients de la loss par rapport aux sorties. Returns: ndarray -- Dérivée de la loss par rapport au input de la couche.\n\n<|skeleton|>\nclass MaxPool2DNaive:\n\n def forward_npdl(self, X, **kwargs):\n \"\"\"Effectue la propagation avant d'une couche MaxPool2D Arguments: X {ndarray} -- Sortie de la couche précédente. Returns: ndarray -- Scores de la couche\"\"\"\n <|body_0|>\n\n def backward_npdl(self, dA, **kwargs):\n \"\"\"Effectue la rétro-propagation. Arguments: dA {ndarray} -- Gradients de la loss par rapport aux sorties. Returns: ndarray -- Dérivée de la loss par rapport au input de la couche.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n N, channel, height, width = X.shape\n Pheight, Pwidth = self.pooling\n assert (height - Pheight) % self.stride[0] == 0\n assert (width - Pwidth) % self.stride[1] == 0\n out_height = np.uint32(1 + (height - Pheight) / self.stride[0])\n out_width = np.uint32(1 + (width - Pwidth) / self.stride[1])\n A = np.zeros((N, channel, out_height, out_width))\n indices = np.zeros((N, channel, out_height, out_width))\n for index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n max = -np.Inf\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if X[index, c, i + ii, j + jj] > max:\n max = X[index, c, i + ii, j + jj]\n indices[index, c, out_i, out_j] = ii * Pwidth + jj\n A[index, c, out_i, out_j] = max\n self.cache = (X.shape, indices)\n return A\n<|end_body_0|>\n\n<|body_start_1|>\n X_shape, indices = self.cache\n N, channel, height, width = X_shape\n Pheight, Pwidth = self.pooling\n dX = np.zeros(X_shape)\n for index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if ii * Pwidth + jj == indices[index, c, out_i, out_j]:\n dX[index, c, i + ii, j + jj] += dA[index, c, out_i, out_j]\n return dX\n<|end_body_1|>\n", "revision_id": "4d1fdfa2e5f9f9e09a812a42029ceaa27d42e734", "skeleton": "<|skeleton|>\nclass MaxPool2DNaive:\n\n def forward_npdl(self, X, **kwargs):\n \"\"\"Effectue la propagation avant d'une couche MaxPool2D Arguments: X {ndarray} -- Sortie de la couche précédente. Returns: ndarray -- Scores de la couche\"\"\"\n <|body_0|>\n\n def backward_npdl(self, dA, **kwargs):\n \"\"\"Effectue la rétro-propagation. Arguments: dA {ndarray} -- Gradients de la loss par rapport aux sorties. Returns: ndarray -- Dérivée de la loss par rapport au input de la couche.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MaxPool2DNaive:\n def forward_npdl(self, X, **kwargs):\n \"\"\"Effectue la propagation avant d'une couche MaxPool2D Arguments: X {ndarray} -- Sortie de la couche précédente. Returns: ndarray -- Scores de la couche\"\"\"\n N, channel, height, width = X.shape\n Pheight, Pwidth = self.pooling\n assert (height - Pheight) % self.stride[0] == 0\n assert (width - Pwidth) % self.stride[1] == 0\n out_height = np.uint32(1 + (height - Pheight) / self.stride[0])\n out_width = np.uint32(1 + (width - Pwidth) / self.stride[1])\n A = np.zeros((N, channel, out_height, out_width))\n indices = np.zeros((N, channel, out_height, out_width))\n for index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n max = -np.Inf\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if X[index, c, i + ii, j + jj] > max:\n max = X[index, c, i + ii, j + jj]\n indices[index, c, out_i, out_j] = ii * Pwidth + jj\n A[index, c, out_i, out_j] = max\n self.cache = (X.shape, indices)\n return A\n\n def backward_npdl(self, dA, **kwargs):\n \"\"\"Effectue la rétro-propagation. Arguments: dA {ndarray} -- Gradients de la loss par rapport aux sorties. Returns: ndarray -- Dérivée de la loss par rapport au input de la couche.\"\"\"\n X_shape, indices = self.cache\n N, channel, height, width = X_shape\n Pheight, Pwidth = self.pooling\n dX = np.zeros(X_shape)\n for index in range(N):\n for c in range(channel):\n for i in range(0, height - Pheight + 1, self.stride[0]):\n out_i = i // self.stride[0]\n for j in range(0, width - Pwidth + 1, self.stride[1]):\n out_j = j // self.stride[1]\n for ii in range(0, Pheight):\n for jj in range(0, Pwidth):\n if ii * Pwidth + jj == indices[index, c, out_i, out_j]:\n dX[index, c, i + ii, j + jj] += dA[index, c, out_i, out_j]\n return dX\n", "source": "the_stack_v2_python_sparse", "source_path": "layers/MaxPool.py", "source_repo": "plparent/NPDL", "split": "val", "star_events_count": 0}
{"blob_id": "636363603426bbb7b2293651810a0b96e7fe62a1", "bodies": ["if asyncEstimate:\n task = self._coreEstimator.asyncEstimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return AsyncTask(task, postProcessing)\nerror, dynamicRangeEstimation = self._coreEstimator.estimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\nreturn postProcessing(error, dynamicRangeEstimation)", "coreImages = [row.image.coreImage for row in batch]\ndetections = [row.boundingBox.coreEstimation for row in batch]\nvalidateInputByBatchEstimator(self._coreEstimator, coreImages, detections)\nif asyncEstimate:\n task = self._coreEstimator.asyncEstimate(coreImages, detections)\n return AsyncTask(task, postProcessingBatch)\nerror, coreDynamicRangeList = self._coreEstimator.estimate(coreImages, detections)\nreturn postProcessingBatch(error, coreDynamicRangeList)"], "bodies_text": "<|body_start_0|>\n if asyncEstimate:\n task = self._coreEstimator.asyncEstimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return AsyncTask(task, postProcessing)\n error, dynamicRangeEstimation = self._coreEstimator.estimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return postProcessing(error, dynamicRangeEstimation)\n<|end_body_0|>\n\n<|body_start_1|>\n coreImages = [row.image.coreImage for row in batch]\n detections = [row.boundingBox.coreEstimation for row in batch]\n validateInputByBatchEstimator(self._coreEstimator, coreImages, detections)\n if asyncEstimate:\n task = self._coreEstimator.asyncEstimate(coreImages, detections)\n return AsyncTask(task, postProcessingBatch)\n error, coreDynamicRangeList = self._coreEstimator.estimate(coreImages, detections)\n return postProcessingBatch(error, coreDynamicRangeList)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DynamicRangeEstimator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DynamicRangeEstimator:\n\n def estimate(self, imageWithFaceDetection: ImageWithFaceDetection, asyncEstimate: bool=False):\n \"\"\"Estimate dynamic range from single image Args: imageWithFaceDetection: image with face detection asyncEstimate: estimate or run estimation in background Returns: estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\"\"\"\n <|body_0|>\n\n def estimateBatch(self, batch: ImageWithFaceDetection, asyncEstimate: bool=False):\n \"\"\"Estimate dynamic range from single image Args: batch: list of image with face detection or face detections asyncEstimate: estimate or run estimation in background Returns: list of estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if asyncEstimate:\n task = self._coreEstimator.asyncEstimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return AsyncTask(task, postProcessing)\n error, dynamicRangeEstimation = self._coreEstimator.estimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return postProcessing(error, dynamicRangeEstimation)\n<|end_body_0|>\n\n<|body_start_1|>\n coreImages = [row.image.coreImage for row in batch]\n detections = [row.boundingBox.coreEstimation for row in batch]\n validateInputByBatchEstimator(self._coreEstimator, coreImages, detections)\n if asyncEstimate:\n task = self._coreEstimator.asyncEstimate(coreImages, detections)\n return AsyncTask(task, postProcessingBatch)\n error, coreDynamicRangeList = self._coreEstimator.estimate(coreImages, detections)\n return postProcessingBatch(error, coreDynamicRangeList)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000119", "length_bytes": 3350, "license_type": "permissive", "methods": [{"docstring": "Estimate dynamic range from single image Args: imageWithFaceDetection: image with face detection asyncEstimate: estimate or run estimation in background Returns: estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed", "name": "estimate", "signature": "def estimate(self, imageWithFaceDetection: ImageWithFaceDetection, asyncEstimate: bool=False)"}, {"docstring": "Estimate dynamic range from single image Args: batch: list of image with face detection or face detections asyncEstimate: estimate or run estimation in background Returns: list of estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed", "name": "estimateBatch", "signature": "def estimateBatch(self, batch: ImageWithFaceDetection, asyncEstimate: bool=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_017172", "prompt": "Implement the Python class `DynamicRangeEstimator` described below.\n\nClass description:\nImplement the DynamicRangeEstimator class.\n\nMethod signatures and docstrings:\n- def estimate(self, imageWithFaceDetection: ImageWithFaceDetection, asyncEstimate: bool=False): Estimate dynamic range from single image Args: imageWithFaceDetection: image with face detection asyncEstimate: estimate or run estimation in background Returns: estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\n- def estimateBatch(self, batch: ImageWithFaceDetection, asyncEstimate: bool=False): Estimate dynamic range from single image Args: batch: list of image with face detection or face detections asyncEstimate: estimate or run estimation in background Returns: list of estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed", "prompted_full_text": "Implement the Python class `DynamicRangeEstimator` described below.\n\nClass description:\nImplement the DynamicRangeEstimator class.\n\nMethod signatures and docstrings:\n- def estimate(self, imageWithFaceDetection: ImageWithFaceDetection, asyncEstimate: bool=False): Estimate dynamic range from single image Args: imageWithFaceDetection: image with face detection asyncEstimate: estimate or run estimation in background Returns: estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\n- def estimateBatch(self, batch: ImageWithFaceDetection, asyncEstimate: bool=False): Estimate dynamic range from single image Args: batch: list of image with face detection or face detections asyncEstimate: estimate or run estimation in background Returns: list of estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\n\n<|skeleton|>\nclass DynamicRangeEstimator:\n\n def estimate(self, imageWithFaceDetection: ImageWithFaceDetection, asyncEstimate: bool=False):\n \"\"\"Estimate dynamic range from single image Args: imageWithFaceDetection: image with face detection asyncEstimate: estimate or run estimation in background Returns: estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\"\"\"\n <|body_0|>\n\n def estimateBatch(self, batch: ImageWithFaceDetection, asyncEstimate: bool=False):\n \"\"\"Estimate dynamic range from single image Args: batch: list of image with face detection or face detections asyncEstimate: estimate or run estimation in background Returns: list of estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if asyncEstimate:\n task = self._coreEstimator.asyncEstimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return AsyncTask(task, postProcessing)\n error, dynamicRangeEstimation = self._coreEstimator.estimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return postProcessing(error, dynamicRangeEstimation)\n<|end_body_0|>\n\n<|body_start_1|>\n coreImages = [row.image.coreImage for row in batch]\n detections = [row.boundingBox.coreEstimation for row in batch]\n validateInputByBatchEstimator(self._coreEstimator, coreImages, detections)\n if asyncEstimate:\n task = self._coreEstimator.asyncEstimate(coreImages, detections)\n return AsyncTask(task, postProcessingBatch)\n error, coreDynamicRangeList = self._coreEstimator.estimate(coreImages, detections)\n return postProcessingBatch(error, coreDynamicRangeList)\n<|end_body_1|>\n", "revision_id": "7a4bebc92ae7a96d8d9c18a024208308942f90cd", "skeleton": "<|skeleton|>\nclass DynamicRangeEstimator:\n\n def estimate(self, imageWithFaceDetection: ImageWithFaceDetection, asyncEstimate: bool=False):\n \"\"\"Estimate dynamic range from single image Args: imageWithFaceDetection: image with face detection asyncEstimate: estimate or run estimation in background Returns: estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\"\"\"\n <|body_0|>\n\n def estimateBatch(self, batch: ImageWithFaceDetection, asyncEstimate: bool=False):\n \"\"\"Estimate dynamic range from single image Args: batch: list of image with face detection or face detections asyncEstimate: estimate or run estimation in background Returns: list of estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DynamicRangeEstimator:\n def estimate(self, imageWithFaceDetection: ImageWithFaceDetection, asyncEstimate: bool=False):\n \"\"\"Estimate dynamic range from single image Args: imageWithFaceDetection: image with face detection asyncEstimate: estimate or run estimation in background Returns: estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\"\"\"\n if asyncEstimate:\n task = self._coreEstimator.asyncEstimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return AsyncTask(task, postProcessing)\n error, dynamicRangeEstimation = self._coreEstimator.estimate([imageWithFaceDetection.image.coreImage], [imageWithFaceDetection.boundingBox.coreEstimation])\n return postProcessing(error, dynamicRangeEstimation)\n\n def estimateBatch(self, batch: ImageWithFaceDetection, asyncEstimate: bool=False):\n \"\"\"Estimate dynamic range from single image Args: batch: list of image with face detection or face detections asyncEstimate: estimate or run estimation in background Returns: list of estimated dynamic range or async task if asyncEstimate is true Raises: LunaSDKException: if estimation is failed\"\"\"\n coreImages = [row.image.coreImage for row in batch]\n detections = [row.boundingBox.coreEstimation for row in batch]\n validateInputByBatchEstimator(self._coreEstimator, coreImages, detections)\n if asyncEstimate:\n task = self._coreEstimator.asyncEstimate(coreImages, detections)\n return AsyncTask(task, postProcessingBatch)\n error, coreDynamicRangeList = self._coreEstimator.estimate(coreImages, detections)\n return postProcessingBatch(error, coreDynamicRangeList)\n", "source": "the_stack_v2_python_sparse", "source_path": "lunavl/sdk/estimators/face_estimators/dynamic_range.py", "source_repo": "matemax/lunasdk", "split": "val", "star_events_count": 16}
{"blob_id": "bf7705923713dd5348732ff520f97f8a79919311", "bodies": ["if graph.is_directed():\n raise ValueError('the graph is directed')\nself.graph = graph\nfor edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\nself.independent_set = dict(((node, False) for node in self.graph.iternodes()))\nself.cardinality = 0\nself.source = None", "used = dict(((node, False) for node in self.graph.iternodes()))\nif source is not None:\n self.source = source\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\nfor source in sorted(self.graph.iternodes(), key=self.graph.degree):\n if used[source]:\n continue\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True"], "bodies_text": "<|body_start_0|>\n if graph.is_directed():\n raise ValueError('the graph is directed')\n self.graph = graph\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\n self.independent_set = dict(((node, False) for node in self.graph.iternodes()))\n self.cardinality = 0\n self.source = None\n<|end_body_0|>\n\n<|body_start_1|>\n used = dict(((node, False) for node in self.graph.iternodes()))\n if source is not None:\n self.source = source\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\n for source in sorted(self.graph.iternodes(), key=self.graph.degree):\n if used[source]:\n continue\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\n<|end_body_1|>\n", "class_docstring": "Find a maximal independent set.", "class_name": "SmallestFirstIndependentSet3", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SmallestFirstIndependentSet3:\n \"\"\"Find a maximal independent set.\"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n <|body_0|>\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if graph.is_directed():\n raise ValueError('the graph is directed')\n self.graph = graph\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\n self.independent_set = dict(((node, False) for node in self.graph.iternodes()))\n self.cardinality = 0\n self.source = None\n<|end_body_0|>\n\n<|body_start_1|>\n used = dict(((node, False) for node in self.graph.iternodes()))\n if source is not None:\n self.source = source\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\n for source in sorted(self.graph.iternodes(), key=self.graph.degree):\n if used[source]:\n continue\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000120", "length_bytes": 13747, "license_type": "permissive", "methods": [{"docstring": "The algorithm initialization.", "name": "__init__", "signature": "def __init__(self, graph)"}, {"docstring": "Executable pseudocode.", "name": "run", "signature": "def run(self, source=None)"}], "n_methods": 2, "prompt": "Implement the Python class `SmallestFirstIndependentSet3` described below.\n\nClass description:\nFind a maximal independent set.\n\nMethod signatures and docstrings:\n- def __init__(self, graph): The algorithm initialization.\n- def run(self, source=None): Executable pseudocode.", "prompted_full_text": "Implement the Python class `SmallestFirstIndependentSet3` described below.\n\nClass description:\nFind a maximal independent set.\n\nMethod signatures and docstrings:\n- def __init__(self, graph): The algorithm initialization.\n- def run(self, source=None): Executable pseudocode.\n\n<|skeleton|>\nclass SmallestFirstIndependentSet3:\n \"\"\"Find a maximal independent set.\"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n <|body_0|>\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if graph.is_directed():\n raise ValueError('the graph is directed')\n self.graph = graph\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\n self.independent_set = dict(((node, False) for node in self.graph.iternodes()))\n self.cardinality = 0\n self.source = None\n<|end_body_0|>\n\n<|body_start_1|>\n used = dict(((node, False) for node in self.graph.iternodes()))\n if source is not None:\n self.source = source\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\n for source in sorted(self.graph.iternodes(), key=self.graph.degree):\n if used[source]:\n continue\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\n<|end_body_1|>\n", "revision_id": "0ff4ae303e8824e6bb8474d23b29a7b3e5ed8e60", "skeleton": "<|skeleton|>\nclass SmallestFirstIndependentSet3:\n \"\"\"Find a maximal independent set.\"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n <|body_0|>\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SmallestFirstIndependentSet3:\n \"\"\"Find a maximal independent set.\"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n if graph.is_directed():\n raise ValueError('the graph is directed')\n self.graph = graph\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\n self.independent_set = dict(((node, False) for node in self.graph.iternodes()))\n self.cardinality = 0\n self.source = None\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n used = dict(((node, False) for node in self.graph.iternodes()))\n if source is not None:\n self.source = source\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\n for source in sorted(self.graph.iternodes(), key=self.graph.degree):\n if used[source]:\n continue\n self.independent_set[source] = True\n used[source] = True\n self.cardinality += 1\n for target in self.graph.iteradjacent(source):\n used[target] = True\n", "source": "the_stack_v2_python_sparse", "source_path": "graphtheory/independentsets/isetsf.py", "source_repo": "kgashok/graphs-dict", "split": "val", "star_events_count": 0}
{"blob_id": "b36181fb5c8c4145f63760858127ba9ddcd60fe9", "bodies": ["super(ProtocolWrapper, self).__init__(env)\nself.protocol = protocol\nself.env.add_wrapper_info({'evaluation_environment': self.protocol.get_name()})\nself._elapsed_episodes = 0\nself._elapsed_timesteps = 0\nreturn", "observation, reward, done, info = self.env.step(action)\nself._elapsed_timesteps += 1\ninvalid_interventions = 0\ninterventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\nif interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n else:\n break\nreturn (observation, reward, done, info)", "self._elapsed_episodes += 1\nself._elapsed_timesteps = 0\ninvalid_interventions = 0\nobservation = self.env.reset()\ninterventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\nif interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n else:\n break\nreturn observation"], "bodies_text": "<|body_start_0|>\n super(ProtocolWrapper, self).__init__(env)\n self.protocol = protocol\n self.env.add_wrapper_info({'evaluation_environment': self.protocol.get_name()})\n self._elapsed_episodes = 0\n self._elapsed_timesteps = 0\n return\n<|end_body_0|>\n\n<|body_start_1|>\n observation, reward, done, info = self.env.step(action)\n self._elapsed_timesteps += 1\n invalid_interventions = 0\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n else:\n break\n return (observation, reward, done, info)\n<|end_body_1|>\n\n<|body_start_2|>\n self._elapsed_episodes += 1\n self._elapsed_timesteps = 0\n invalid_interventions = 0\n observation = self.env.reset()\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n else:\n break\n return observation\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ProtocolWrapper", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProtocolWrapper:\n\n def __init__(self, env, protocol):\n \"\"\":param env: :param protocol:\"\"\"\n <|body_0|>\n\n def step(self, action):\n \"\"\":param action: :return:\"\"\"\n <|body_1|>\n\n def reset(self):\n \"\"\":return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ProtocolWrapper, self).__init__(env)\n self.protocol = protocol\n self.env.add_wrapper_info({'evaluation_environment': self.protocol.get_name()})\n self._elapsed_episodes = 0\n self._elapsed_timesteps = 0\n return\n<|end_body_0|>\n\n<|body_start_1|>\n observation, reward, done, info = self.env.step(action)\n self._elapsed_timesteps += 1\n invalid_interventions = 0\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n else:\n break\n return (observation, reward, done, info)\n<|end_body_1|>\n\n<|body_start_2|>\n self._elapsed_episodes += 1\n self._elapsed_timesteps = 0\n invalid_interventions = 0\n observation = self.env.reset()\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n else:\n break\n return observation\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000121", "length_bytes": 2531, "license_type": "permissive", "methods": [{"docstring": ":param env: :param protocol:", "name": "__init__", "signature": "def __init__(self, env, protocol)"}, {"docstring": ":param action: :return:", "name": "step", "signature": "def step(self, action)"}, {"docstring": ":return:", "name": "reset", "signature": "def reset(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_035003", "prompt": "Implement the Python class `ProtocolWrapper` described below.\n\nClass description:\nImplement the ProtocolWrapper class.\n\nMethod signatures and docstrings:\n- def __init__(self, env, protocol): :param env: :param protocol:\n- def step(self, action): :param action: :return:\n- def reset(self): :return:", "prompted_full_text": "Implement the Python class `ProtocolWrapper` described below.\n\nClass description:\nImplement the ProtocolWrapper class.\n\nMethod signatures and docstrings:\n- def __init__(self, env, protocol): :param env: :param protocol:\n- def step(self, action): :param action: :return:\n- def reset(self): :return:\n\n<|skeleton|>\nclass ProtocolWrapper:\n\n def __init__(self, env, protocol):\n \"\"\":param env: :param protocol:\"\"\"\n <|body_0|>\n\n def step(self, action):\n \"\"\":param action: :return:\"\"\"\n <|body_1|>\n\n def reset(self):\n \"\"\":return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ProtocolWrapper, self).__init__(env)\n self.protocol = protocol\n self.env.add_wrapper_info({'evaluation_environment': self.protocol.get_name()})\n self._elapsed_episodes = 0\n self._elapsed_timesteps = 0\n return\n<|end_body_0|>\n\n<|body_start_1|>\n observation, reward, done, info = self.env.step(action)\n self._elapsed_timesteps += 1\n invalid_interventions = 0\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n else:\n break\n return (observation, reward, done, info)\n<|end_body_1|>\n\n<|body_start_2|>\n self._elapsed_episodes += 1\n self._elapsed_timesteps = 0\n invalid_interventions = 0\n observation = self.env.reset()\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n else:\n break\n return observation\n<|end_body_2|>\n", "revision_id": "4c0ac37e559daa0dd89668e5bff5eec82a4158c5", "skeleton": "<|skeleton|>\nclass ProtocolWrapper:\n\n def __init__(self, env, protocol):\n \"\"\":param env: :param protocol:\"\"\"\n <|body_0|>\n\n def step(self, action):\n \"\"\":param action: :return:\"\"\"\n <|body_1|>\n\n def reset(self):\n \"\"\":return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProtocolWrapper:\n def __init__(self, env, protocol):\n \"\"\":param env: :param protocol:\"\"\"\n super(ProtocolWrapper, self).__init__(env)\n self.protocol = protocol\n self.env.add_wrapper_info({'evaluation_environment': self.protocol.get_name()})\n self._elapsed_episodes = 0\n self._elapsed_timesteps = 0\n return\n\n def step(self, action):\n \"\"\":param action: :return:\"\"\"\n observation, reward, done, info = self.env.step(action)\n self._elapsed_timesteps += 1\n invalid_interventions = 0\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict=interventions_dict)\n else:\n break\n return (observation, reward, done, info)\n\n def reset(self):\n \"\"\":return:\"\"\"\n self._elapsed_episodes += 1\n self._elapsed_timesteps = 0\n invalid_interventions = 0\n observation = self.env.reset()\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n while not success_signal and invalid_interventions < 5:\n invalid_interventions += 1\n interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0)\n if interventions_dict is not None:\n success_signal, observation = self.env.do_intervention(interventions_dict)\n else:\n break\n return observation\n", "source": "the_stack_v2_python_sparse", "source_path": "Trifinger/causal_world/wrappers/protocol_wrapper.py", "source_repo": "emigmo/BenTDM", "split": "val", "star_events_count": 0}
{"blob_id": "e25f719090282c4a900de742fb95a022b5c97df2", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "VisionServicer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VisionServicer:\n\n def Recognize(self, request, context):\n \"\"\"sync call for result, use by service-app usually\"\"\"\n <|body_0|>\n\n def StreamingRecognize(self, request_iterator, context):\n \"\"\"sync call , use by rcu or other end point receive multi-face or other\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000122", "length_bytes": 2332, "license_type": "no_license", "methods": [{"docstring": "sync call for result, use by service-app usually", "name": "Recognize", "signature": "def Recognize(self, request, context)"}, {"docstring": "sync call , use by rcu or other end point receive multi-face or other", "name": "StreamingRecognize", "signature": "def StreamingRecognize(self, request_iterator, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_026680", "prompt": "Implement the Python class `VisionServicer` described below.\n\nClass description:\nImplement the VisionServicer class.\n\nMethod signatures and docstrings:\n- def Recognize(self, request, context): sync call for result, use by service-app usually\n- def StreamingRecognize(self, request_iterator, context): sync call , use by rcu or other end point receive multi-face or other", "prompted_full_text": "Implement the Python class `VisionServicer` described below.\n\nClass description:\nImplement the VisionServicer class.\n\nMethod signatures and docstrings:\n- def Recognize(self, request, context): sync call for result, use by service-app usually\n- def StreamingRecognize(self, request_iterator, context): sync call , use by rcu or other end point receive multi-face or other\n\n<|skeleton|>\nclass VisionServicer:\n\n def Recognize(self, request, context):\n \"\"\"sync call for result, use by service-app usually\"\"\"\n <|body_0|>\n\n def StreamingRecognize(self, request_iterator, context):\n \"\"\"sync call , use by rcu or other end point receive multi-face or other\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "revision_id": "be042a0ac5a44ca4148b4b3608a388519b268f6e", "skeleton": "<|skeleton|>\nclass VisionServicer:\n\n def Recognize(self, request, context):\n \"\"\"sync call for result, use by service-app usually\"\"\"\n <|body_0|>\n\n def StreamingRecognize(self, request_iterator, context):\n \"\"\"sync call , use by rcu or other end point receive multi-face or other\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VisionServicer:\n def Recognize(self, request, context):\n \"\"\"sync call for result, use by service-app usually\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def StreamingRecognize(self, request_iterator, context):\n \"\"\"sync call , use by rcu or other end point receive multi-face or other\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "protopb/vision/vision_pb2_grpc.py", "source_repo": "zlfdtc1983/lfzhaotest", "split": "val", "star_events_count": 0}
{"blob_id": "cfc7eeaf6f923fe08688040f050fa68afbda3650", "bodies": ["self.array = [None] * n\nself.top1 = -1\nself.top2 = len(self.array)", "if self.top2 - 1 == self.top1:\n print('Memory out of bound')\nelse:\n self.top1 += 1\n self.array[self.top1] = x", "if self.top2 - 1 == self.top1:\n print('Memory out of bound')\nelse:\n self.top2 -= 1\n self.array[self.top2] = x", "if self.top1 >= 0:\n x = self.array[self.top1]\n self.array[self.top1] = None\n self.top1 -= 1\n return x\nelse:\n print('Stack underflow')", "if self.top2 <= len(self.array) - 1:\n x = self.array[self.top2]\n self.array[self.top2] = None\n self.top2 += 1\n return x\nelse:\n print('Stack underflow')"], "bodies_text": "<|body_start_0|>\n self.array = [None] * n\n self.top1 = -1\n self.top2 = len(self.array)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.top2 - 1 == self.top1:\n print('Memory out of bound')\n else:\n self.top1 += 1\n self.array[self.top1] = x\n<|end_body_1|>\n\n<|body_start_2|>\n if self.top2 - 1 == self.top1:\n print('Memory out of bound')\n else:\n self.top2 -= 1\n self.array[self.top2] = x\n<|end_body_2|>\n\n<|body_start_3|>\n if self.top1 >= 0:\n x = self.array[self.top1]\n self.array[self.top1] = None\n self.top1 -= 1\n return x\n else:\n print('Stack underflow')\n<|end_body_3|>\n\n<|body_start_4|>\n if self.top2 <= len(self.array) - 1:\n x = self.array[self.top2]\n self.array[self.top2] = None\n self.top2 += 1\n return x\n else:\n print('Stack underflow')\n<|end_body_4|>\n", "class_docstring": "Function to implement two stacks in a array. Two stacks will start from index 0 and index n-1 and keep pushing the new elements in the middle of the array until their is not space left between the top of stack1 and stack2. Index of top of two stacks will be saved using two variables top1 and top2.", "class_name": "TwoStacks", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TwoStacks:\n \"\"\"Function to implement two stacks in a array. Two stacks will start from index 0 and index n-1 and keep pushing the new elements in the middle of the array until their is not space left between the top of stack1 and stack2. Index of top of two stacks will be saved using two variables top1 and top2.\"\"\"\n\n def __init__(self, n):\n \"\"\"array of size n to store elements of the stack. top1 and top2 to store indexes of the top of the stack.\"\"\"\n <|body_0|>\n\n def push1(self, x):\n \"\"\"Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: Int :return: None\"\"\"\n <|body_1|>\n\n def push2(self, x):\n \"\"\"Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: int :return: None\"\"\"\n <|body_2|>\n\n def pop1(self):\n \"\"\"Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\"\"\"\n <|body_3|>\n\n def pop2(self):\n \"\"\"Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.array = [None] * n\n self.top1 = -1\n self.top2 = len(self.array)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.top2 - 1 == self.top1:\n print('Memory out of bound')\n else:\n self.top1 += 1\n self.array[self.top1] = x\n<|end_body_1|>\n\n<|body_start_2|>\n if self.top2 - 1 == self.top1:\n print('Memory out of bound')\n else:\n self.top2 -= 1\n self.array[self.top2] = x\n<|end_body_2|>\n\n<|body_start_3|>\n if self.top1 >= 0:\n x = self.array[self.top1]\n self.array[self.top1] = None\n self.top1 -= 1\n return x\n else:\n print('Stack underflow')\n<|end_body_3|>\n\n<|body_start_4|>\n if self.top2 <= len(self.array) - 1:\n x = self.array[self.top2]\n self.array[self.top2] = None\n self.top2 += 1\n return x\n else:\n print('Stack underflow')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000123", "length_bytes": 2761, "license_type": "no_license", "methods": [{"docstring": "array of size n to store elements of the stack. top1 and top2 to store indexes of the top of the stack.", "name": "__init__", "signature": "def __init__(self, n)"}, {"docstring": "Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: Int :return: None", "name": "push1", "signature": "def push1(self, x)"}, {"docstring": "Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: int :return: None", "name": "push2", "signature": "def push2(self, x)"}, {"docstring": "Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int", "name": "pop1", "signature": "def pop1(self)"}, {"docstring": "Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int", "name": "pop2", "signature": "def pop2(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_006764", "prompt": "Implement the Python class `TwoStacks` described below.\n\nClass description:\nFunction to implement two stacks in a array. Two stacks will start from index 0 and index n-1 and keep pushing the new elements in the middle of the array until their is not space left between the top of stack1 and stack2. Index of top of two stacks will be saved using two variables top1 and top2.\n\nMethod signatures and docstrings:\n- def __init__(self, n): array of size n to store elements of the stack. top1 and top2 to store indexes of the top of the stack.\n- def push1(self, x): Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: Int :return: None\n- def push2(self, x): Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: int :return: None\n- def pop1(self): Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\n- def pop2(self): Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int", "prompted_full_text": "Implement the Python class `TwoStacks` described below.\n\nClass description:\nFunction to implement two stacks in a array. Two stacks will start from index 0 and index n-1 and keep pushing the new elements in the middle of the array until their is not space left between the top of stack1 and stack2. Index of top of two stacks will be saved using two variables top1 and top2.\n\nMethod signatures and docstrings:\n- def __init__(self, n): array of size n to store elements of the stack. top1 and top2 to store indexes of the top of the stack.\n- def push1(self, x): Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: Int :return: None\n- def push2(self, x): Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: int :return: None\n- def pop1(self): Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\n- def pop2(self): Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\n\n<|skeleton|>\nclass TwoStacks:\n \"\"\"Function to implement two stacks in a array. Two stacks will start from index 0 and index n-1 and keep pushing the new elements in the middle of the array until their is not space left between the top of stack1 and stack2. Index of top of two stacks will be saved using two variables top1 and top2.\"\"\"\n\n def __init__(self, n):\n \"\"\"array of size n to store elements of the stack. top1 and top2 to store indexes of the top of the stack.\"\"\"\n <|body_0|>\n\n def push1(self, x):\n \"\"\"Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: Int :return: None\"\"\"\n <|body_1|>\n\n def push2(self, x):\n \"\"\"Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: int :return: None\"\"\"\n <|body_2|>\n\n def pop1(self):\n \"\"\"Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\"\"\"\n <|body_3|>\n\n def pop2(self):\n \"\"\"Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.array = [None] * n\n self.top1 = -1\n self.top2 = len(self.array)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.top2 - 1 == self.top1:\n print('Memory out of bound')\n else:\n self.top1 += 1\n self.array[self.top1] = x\n<|end_body_1|>\n\n<|body_start_2|>\n if self.top2 - 1 == self.top1:\n print('Memory out of bound')\n else:\n self.top2 -= 1\n self.array[self.top2] = x\n<|end_body_2|>\n\n<|body_start_3|>\n if self.top1 >= 0:\n x = self.array[self.top1]\n self.array[self.top1] = None\n self.top1 -= 1\n return x\n else:\n print('Stack underflow')\n<|end_body_3|>\n\n<|body_start_4|>\n if self.top2 <= len(self.array) - 1:\n x = self.array[self.top2]\n self.array[self.top2] = None\n self.top2 += 1\n return x\n else:\n print('Stack underflow')\n<|end_body_4|>\n", "revision_id": "7e484faa5c75e690f2cb33ee95eedf4472c0089b", "skeleton": "<|skeleton|>\nclass TwoStacks:\n \"\"\"Function to implement two stacks in a array. Two stacks will start from index 0 and index n-1 and keep pushing the new elements in the middle of the array until their is not space left between the top of stack1 and stack2. Index of top of two stacks will be saved using two variables top1 and top2.\"\"\"\n\n def __init__(self, n):\n \"\"\"array of size n to store elements of the stack. top1 and top2 to store indexes of the top of the stack.\"\"\"\n <|body_0|>\n\n def push1(self, x):\n \"\"\"Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: Int :return: None\"\"\"\n <|body_1|>\n\n def push2(self, x):\n \"\"\"Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: int :return: None\"\"\"\n <|body_2|>\n\n def pop1(self):\n \"\"\"Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\"\"\"\n <|body_3|>\n\n def pop2(self):\n \"\"\"Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TwoStacks:\n \"\"\"Function to implement two stacks in a array. Two stacks will start from index 0 and index n-1 and keep pushing the new elements in the middle of the array until their is not space left between the top of stack1 and stack2. Index of top of two stacks will be saved using two variables top1 and top2.\"\"\"\n\n def __init__(self, n):\n \"\"\"array of size n to store elements of the stack. top1 and top2 to store indexes of the top of the stack.\"\"\"\n self.array = [None] * n\n self.top1 = -1\n self.top2 = len(self.array)\n\n def push1(self, x):\n \"\"\"Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: Int :return: None\"\"\"\n if self.top2 - 1 == self.top1:\n print('Memory out of bound')\n else:\n self.top1 += 1\n self.array[self.top1] = x\n\n def push2(self, x):\n \"\"\"Function to push element in the stack. It checks the difference between indexes of the two tops to check for stack full. If not, it increments the stack top index and pushed the element in the array. :param x: int :return: None\"\"\"\n if self.top2 - 1 == self.top1:\n print('Memory out of bound')\n else:\n self.top2 -= 1\n self.array[self.top2] = x\n\n def pop1(self):\n \"\"\"Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\"\"\"\n if self.top1 >= 0:\n x = self.array[self.top1]\n self.array[self.top1] = None\n self.top1 -= 1\n return x\n else:\n print('Stack underflow')\n\n def pop2(self):\n \"\"\"Function to pop the element from the stack. It check the stack index to check for empty stack. It not, it removes the element at the stack top and decrement the index of stack top :return: Int\"\"\"\n if self.top2 <= len(self.array) - 1:\n x = self.array[self.top2]\n self.array[self.top2] = None\n self.top2 += 1\n return x\n else:\n print('Stack underflow')\n", "source": "the_stack_v2_python_sparse", "source_path": "stacks & queue/two_stacks_in_array.py", "source_repo": "sunny0910/Data-Structures-Algorithms", "split": "val", "star_events_count": 5}
{"blob_id": "5da67df2885ce81c1d8ee28dc5d514490828e547", "bodies": ["from apysc import EventType\nfrom apysc import MouseEvent\nfrom apysc.event.handler import append_handler_expression\nfrom apysc.event.handler import get_handler_name\nfrom apysc.type.variable_name_interface import VariableNameInterface\nself_instance: VariableNameInterface = self._validate_self_is_variable_name_interface()\nself._initialize_click_handlers_if_not_initialized()\nname: str = get_handler_name(handler=handler, instance=self)\nself._set_handler_data(handler=handler, handlers_dict=self._click_handlers, options=options)\nself._append_event_binding_expression(name=name, event_type=EventType.CLICK)\ne: MouseEvent = MouseEvent(this=self_instance)\nappend_handler_expression(handler_data=self._click_handlers[name], handler_name=name, e=e)\nreturn name", "if hasattr(self, '_click_handlers'):\n return\nself._click_handlers = {}", "from apysc import EventType\nself._initialize_click_handlers_if_not_initialized()\nself._unbind_event(handler=handler, event_type=EventType.CLICK, handlers_dict=self._click_handlers)", "from apysc import EventType\nself._initialize_click_handlers_if_not_initialized()\nself._unbind_all_events(event_type=EventType.CLICK, handlers_dict=self._click_handlers)"], "bodies_text": "<|body_start_0|>\n from apysc import EventType\n from apysc import MouseEvent\n from apysc.event.handler import append_handler_expression\n from apysc.event.handler import get_handler_name\n from apysc.type.variable_name_interface import VariableNameInterface\n self_instance: VariableNameInterface = self._validate_self_is_variable_name_interface()\n self._initialize_click_handlers_if_not_initialized()\n name: str = get_handler_name(handler=handler, instance=self)\n self._set_handler_data(handler=handler, handlers_dict=self._click_handlers, options=options)\n self._append_event_binding_expression(name=name, event_type=EventType.CLICK)\n e: MouseEvent = MouseEvent(this=self_instance)\n append_handler_expression(handler_data=self._click_handlers[name], handler_name=name, e=e)\n return name\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self, '_click_handlers'):\n return\n self._click_handlers = {}\n<|end_body_1|>\n\n<|body_start_2|>\n from apysc import EventType\n self._initialize_click_handlers_if_not_initialized()\n self._unbind_event(handler=handler, event_type=EventType.CLICK, handlers_dict=self._click_handlers)\n<|end_body_2|>\n\n<|body_start_3|>\n from apysc import EventType\n self._initialize_click_handlers_if_not_initialized()\n self._unbind_all_events(event_type=EventType.CLICK, handlers_dict=self._click_handlers)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "ClickInterface", "detected_licenses": ["MIT", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ClickInterface:\n\n def click(self, handler: Handler, options: Optional[Dict[str, Any]]=None) -> str:\n \"\"\"Add click event listener setting. Parameters ---------- handler : Handler Callable that called when this instance is clicked. options : dict or None, default None Optional arguments dictionary to be passed to handler. Returns ------- name : str Handler's name.\"\"\"\n <|body_0|>\n\n def _initialize_click_handlers_if_not_initialized(self) -> None:\n \"\"\"Initialize _click_handlers attribute if it is not initialized yet.\"\"\"\n <|body_1|>\n\n def unbind_click(self, handler: Handler) -> None:\n \"\"\"Unbind specified handler's click event. Parameters ---------- handler : Handler Callable to be unbinded.\"\"\"\n <|body_2|>\n\n def unbind_click_all(self) -> None:\n \"\"\"Unbind all click events.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from apysc import EventType\n from apysc import MouseEvent\n from apysc.event.handler import append_handler_expression\n from apysc.event.handler import get_handler_name\n from apysc.type.variable_name_interface import VariableNameInterface\n self_instance: VariableNameInterface = self._validate_self_is_variable_name_interface()\n self._initialize_click_handlers_if_not_initialized()\n name: str = get_handler_name(handler=handler, instance=self)\n self._set_handler_data(handler=handler, handlers_dict=self._click_handlers, options=options)\n self._append_event_binding_expression(name=name, event_type=EventType.CLICK)\n e: MouseEvent = MouseEvent(this=self_instance)\n append_handler_expression(handler_data=self._click_handlers[name], handler_name=name, e=e)\n return name\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self, '_click_handlers'):\n return\n self._click_handlers = {}\n<|end_body_1|>\n\n<|body_start_2|>\n from apysc import EventType\n self._initialize_click_handlers_if_not_initialized()\n self._unbind_event(handler=handler, event_type=EventType.CLICK, handlers_dict=self._click_handlers)\n<|end_body_2|>\n\n<|body_start_3|>\n from apysc import EventType\n self._initialize_click_handlers_if_not_initialized()\n self._unbind_all_events(event_type=EventType.CLICK, handlers_dict=self._click_handlers)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000124", "length_bytes": 2924, "license_type": "permissive", "methods": [{"docstring": "Add click event listener setting. Parameters ---------- handler : Handler Callable that called when this instance is clicked. options : dict or None, default None Optional arguments dictionary to be passed to handler. Returns ------- name : str Handler's name.", "name": "click", "signature": "def click(self, handler: Handler, options: Optional[Dict[str, Any]]=None) -> str"}, {"docstring": "Initialize _click_handlers attribute if it is not initialized yet.", "name": "_initialize_click_handlers_if_not_initialized", "signature": "def _initialize_click_handlers_if_not_initialized(self) -> None"}, {"docstring": "Unbind specified handler's click event. Parameters ---------- handler : Handler Callable to be unbinded.", "name": "unbind_click", "signature": "def unbind_click(self, handler: Handler) -> None"}, {"docstring": "Unbind all click events.", "name": "unbind_click_all", "signature": "def unbind_click_all(self) -> None"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_000133", "prompt": "Implement the Python class `ClickInterface` described below.\n\nClass description:\nImplement the ClickInterface class.\n\nMethod signatures and docstrings:\n- def click(self, handler: Handler, options: Optional[Dict[str, Any]]=None) -> str: Add click event listener setting. Parameters ---------- handler : Handler Callable that called when this instance is clicked. options : dict or None, default None Optional arguments dictionary to be passed to handler. Returns ------- name : str Handler's name.\n- def _initialize_click_handlers_if_not_initialized(self) -> None: Initialize _click_handlers attribute if it is not initialized yet.\n- def unbind_click(self, handler: Handler) -> None: Unbind specified handler's click event. Parameters ---------- handler : Handler Callable to be unbinded.\n- def unbind_click_all(self) -> None: Unbind all click events.", "prompted_full_text": "Implement the Python class `ClickInterface` described below.\n\nClass description:\nImplement the ClickInterface class.\n\nMethod signatures and docstrings:\n- def click(self, handler: Handler, options: Optional[Dict[str, Any]]=None) -> str: Add click event listener setting. Parameters ---------- handler : Handler Callable that called when this instance is clicked. options : dict or None, default None Optional arguments dictionary to be passed to handler. Returns ------- name : str Handler's name.\n- def _initialize_click_handlers_if_not_initialized(self) -> None: Initialize _click_handlers attribute if it is not initialized yet.\n- def unbind_click(self, handler: Handler) -> None: Unbind specified handler's click event. Parameters ---------- handler : Handler Callable to be unbinded.\n- def unbind_click_all(self) -> None: Unbind all click events.\n\n<|skeleton|>\nclass ClickInterface:\n\n def click(self, handler: Handler, options: Optional[Dict[str, Any]]=None) -> str:\n \"\"\"Add click event listener setting. Parameters ---------- handler : Handler Callable that called when this instance is clicked. options : dict or None, default None Optional arguments dictionary to be passed to handler. Returns ------- name : str Handler's name.\"\"\"\n <|body_0|>\n\n def _initialize_click_handlers_if_not_initialized(self) -> None:\n \"\"\"Initialize _click_handlers attribute if it is not initialized yet.\"\"\"\n <|body_1|>\n\n def unbind_click(self, handler: Handler) -> None:\n \"\"\"Unbind specified handler's click event. Parameters ---------- handler : Handler Callable to be unbinded.\"\"\"\n <|body_2|>\n\n def unbind_click_all(self) -> None:\n \"\"\"Unbind all click events.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from apysc import EventType\n from apysc import MouseEvent\n from apysc.event.handler import append_handler_expression\n from apysc.event.handler import get_handler_name\n from apysc.type.variable_name_interface import VariableNameInterface\n self_instance: VariableNameInterface = self._validate_self_is_variable_name_interface()\n self._initialize_click_handlers_if_not_initialized()\n name: str = get_handler_name(handler=handler, instance=self)\n self._set_handler_data(handler=handler, handlers_dict=self._click_handlers, options=options)\n self._append_event_binding_expression(name=name, event_type=EventType.CLICK)\n e: MouseEvent = MouseEvent(this=self_instance)\n append_handler_expression(handler_data=self._click_handlers[name], handler_name=name, e=e)\n return name\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self, '_click_handlers'):\n return\n self._click_handlers = {}\n<|end_body_1|>\n\n<|body_start_2|>\n from apysc import EventType\n self._initialize_click_handlers_if_not_initialized()\n self._unbind_event(handler=handler, event_type=EventType.CLICK, handlers_dict=self._click_handlers)\n<|end_body_2|>\n\n<|body_start_3|>\n from apysc import EventType\n self._initialize_click_handlers_if_not_initialized()\n self._unbind_all_events(event_type=EventType.CLICK, handlers_dict=self._click_handlers)\n<|end_body_3|>\n", "revision_id": "5c6a4674e2e9684cb2cb1325dc9b070879d4d355", "skeleton": "<|skeleton|>\nclass ClickInterface:\n\n def click(self, handler: Handler, options: Optional[Dict[str, Any]]=None) -> str:\n \"\"\"Add click event listener setting. Parameters ---------- handler : Handler Callable that called when this instance is clicked. options : dict or None, default None Optional arguments dictionary to be passed to handler. Returns ------- name : str Handler's name.\"\"\"\n <|body_0|>\n\n def _initialize_click_handlers_if_not_initialized(self) -> None:\n \"\"\"Initialize _click_handlers attribute if it is not initialized yet.\"\"\"\n <|body_1|>\n\n def unbind_click(self, handler: Handler) -> None:\n \"\"\"Unbind specified handler's click event. Parameters ---------- handler : Handler Callable to be unbinded.\"\"\"\n <|body_2|>\n\n def unbind_click_all(self) -> None:\n \"\"\"Unbind all click events.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ClickInterface:\n def click(self, handler: Handler, options: Optional[Dict[str, Any]]=None) -> str:\n \"\"\"Add click event listener setting. Parameters ---------- handler : Handler Callable that called when this instance is clicked. options : dict or None, default None Optional arguments dictionary to be passed to handler. Returns ------- name : str Handler's name.\"\"\"\n from apysc import EventType\n from apysc import MouseEvent\n from apysc.event.handler import append_handler_expression\n from apysc.event.handler import get_handler_name\n from apysc.type.variable_name_interface import VariableNameInterface\n self_instance: VariableNameInterface = self._validate_self_is_variable_name_interface()\n self._initialize_click_handlers_if_not_initialized()\n name: str = get_handler_name(handler=handler, instance=self)\n self._set_handler_data(handler=handler, handlers_dict=self._click_handlers, options=options)\n self._append_event_binding_expression(name=name, event_type=EventType.CLICK)\n e: MouseEvent = MouseEvent(this=self_instance)\n append_handler_expression(handler_data=self._click_handlers[name], handler_name=name, e=e)\n return name\n\n def _initialize_click_handlers_if_not_initialized(self) -> None:\n \"\"\"Initialize _click_handlers attribute if it is not initialized yet.\"\"\"\n if hasattr(self, '_click_handlers'):\n return\n self._click_handlers = {}\n\n def unbind_click(self, handler: Handler) -> None:\n \"\"\"Unbind specified handler's click event. Parameters ---------- handler : Handler Callable to be unbinded.\"\"\"\n from apysc import EventType\n self._initialize_click_handlers_if_not_initialized()\n self._unbind_event(handler=handler, event_type=EventType.CLICK, handlers_dict=self._click_handlers)\n\n def unbind_click_all(self) -> None:\n \"\"\"Unbind all click events.\"\"\"\n from apysc import EventType\n self._initialize_click_handlers_if_not_initialized()\n self._unbind_all_events(event_type=EventType.CLICK, handlers_dict=self._click_handlers)\n", "source": "the_stack_v2_python_sparse", "source_path": "apysc/event/click_interface.py", "source_repo": "TrendingTechnology/apysc", "split": "val", "star_events_count": 0}
{"blob_id": "21fae193ed6bc59fa080065cf0c38327ca21606a", "bodies": ["self.callback = callback\nself._match_all = address_filters is None and group_addresses is None\nself._match_outgoing = match_for_outgoing_telegrams\nself.address_filters = [] if address_filters is None else address_filters\nself.group_addresses = [] if group_addresses is None else group_addresses", "if not self._match_outgoing and telegram.direction == TelegramDirection.OUTGOING:\n return False\nif self._match_all:\n return True\nif isinstance(telegram.destination_address, (GroupAddress, InternalGroupAddress)):\n for address_filter in self.address_filters:\n if address_filter.match(telegram.destination_address):\n return True\n for group_address in self.group_addresses:\n if telegram.destination_address == group_address:\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n self.callback = callback\n self._match_all = address_filters is None and group_addresses is None\n self._match_outgoing = match_for_outgoing_telegrams\n self.address_filters = [] if address_filters is None else address_filters\n self.group_addresses = [] if group_addresses is None else group_addresses\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._match_outgoing and telegram.direction == TelegramDirection.OUTGOING:\n return False\n if self._match_all:\n return True\n if isinstance(telegram.destination_address, (GroupAddress, InternalGroupAddress)):\n for address_filter in self.address_filters:\n if address_filter.match(telegram.destination_address):\n return True\n for group_address in self.group_addresses:\n if telegram.destination_address == group_address:\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "Callback class for handling telegram received callbacks.", "class_name": "Callback", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Callback:\n \"\"\"Callback class for handling telegram received callbacks.\"\"\"\n\n def __init__(self, callback: AsyncTelegramCallback, address_filters: list[AddressFilter] | None=None, group_addresses: list[GroupAddress | InternalGroupAddress] | None=None, match_for_outgoing_telegrams: bool=False):\n \"\"\"Initialize Callback class.\"\"\"\n <|body_0|>\n\n def is_within_filter(self, telegram: Telegram) -> bool:\n \"\"\"Test if callback is filtering for group address.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.callback = callback\n self._match_all = address_filters is None and group_addresses is None\n self._match_outgoing = match_for_outgoing_telegrams\n self.address_filters = [] if address_filters is None else address_filters\n self.group_addresses = [] if group_addresses is None else group_addresses\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._match_outgoing and telegram.direction == TelegramDirection.OUTGOING:\n return False\n if self._match_all:\n return True\n if isinstance(telegram.destination_address, (GroupAddress, InternalGroupAddress)):\n for address_filter in self.address_filters:\n if address_filter.match(telegram.destination_address):\n return True\n for group_address in self.group_addresses:\n if telegram.destination_address == group_address:\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000125", "length_bytes": 9724, "license_type": "permissive", "methods": [{"docstring": "Initialize Callback class.", "name": "__init__", "signature": "def __init__(self, callback: AsyncTelegramCallback, address_filters: list[AddressFilter] | None=None, group_addresses: list[GroupAddress | InternalGroupAddress] | None=None, match_for_outgoing_telegrams: bool=False)"}, {"docstring": "Test if callback is filtering for group address.", "name": "is_within_filter", "signature": "def is_within_filter(self, telegram: Telegram) -> bool"}], "n_methods": 2, "prompt": "Implement the Python class `Callback` described below.\n\nClass description:\nCallback class for handling telegram received callbacks.\n\nMethod signatures and docstrings:\n- def __init__(self, callback: AsyncTelegramCallback, address_filters: list[AddressFilter] | None=None, group_addresses: list[GroupAddress | InternalGroupAddress] | None=None, match_for_outgoing_telegrams: bool=False): Initialize Callback class.\n- def is_within_filter(self, telegram: Telegram) -> bool: Test if callback is filtering for group address.", "prompted_full_text": "Implement the Python class `Callback` described below.\n\nClass description:\nCallback class for handling telegram received callbacks.\n\nMethod signatures and docstrings:\n- def __init__(self, callback: AsyncTelegramCallback, address_filters: list[AddressFilter] | None=None, group_addresses: list[GroupAddress | InternalGroupAddress] | None=None, match_for_outgoing_telegrams: bool=False): Initialize Callback class.\n- def is_within_filter(self, telegram: Telegram) -> bool: Test if callback is filtering for group address.\n\n<|skeleton|>\nclass Callback:\n \"\"\"Callback class for handling telegram received callbacks.\"\"\"\n\n def __init__(self, callback: AsyncTelegramCallback, address_filters: list[AddressFilter] | None=None, group_addresses: list[GroupAddress | InternalGroupAddress] | None=None, match_for_outgoing_telegrams: bool=False):\n \"\"\"Initialize Callback class.\"\"\"\n <|body_0|>\n\n def is_within_filter(self, telegram: Telegram) -> bool:\n \"\"\"Test if callback is filtering for group address.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.callback = callback\n self._match_all = address_filters is None and group_addresses is None\n self._match_outgoing = match_for_outgoing_telegrams\n self.address_filters = [] if address_filters is None else address_filters\n self.group_addresses = [] if group_addresses is None else group_addresses\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._match_outgoing and telegram.direction == TelegramDirection.OUTGOING:\n return False\n if self._match_all:\n return True\n if isinstance(telegram.destination_address, (GroupAddress, InternalGroupAddress)):\n for address_filter in self.address_filters:\n if address_filter.match(telegram.destination_address):\n return True\n for group_address in self.group_addresses:\n if telegram.destination_address == group_address:\n return True\n return False\n<|end_body_1|>\n", "revision_id": "48d4e31365c15e632b275f0d129cd9f2b2b5717d", "skeleton": "<|skeleton|>\nclass Callback:\n \"\"\"Callback class for handling telegram received callbacks.\"\"\"\n\n def __init__(self, callback: AsyncTelegramCallback, address_filters: list[AddressFilter] | None=None, group_addresses: list[GroupAddress | InternalGroupAddress] | None=None, match_for_outgoing_telegrams: bool=False):\n \"\"\"Initialize Callback class.\"\"\"\n <|body_0|>\n\n def is_within_filter(self, telegram: Telegram) -> bool:\n \"\"\"Test if callback is filtering for group address.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Callback:\n \"\"\"Callback class for handling telegram received callbacks.\"\"\"\n\n def __init__(self, callback: AsyncTelegramCallback, address_filters: list[AddressFilter] | None=None, group_addresses: list[GroupAddress | InternalGroupAddress] | None=None, match_for_outgoing_telegrams: bool=False):\n \"\"\"Initialize Callback class.\"\"\"\n self.callback = callback\n self._match_all = address_filters is None and group_addresses is None\n self._match_outgoing = match_for_outgoing_telegrams\n self.address_filters = [] if address_filters is None else address_filters\n self.group_addresses = [] if group_addresses is None else group_addresses\n\n def is_within_filter(self, telegram: Telegram) -> bool:\n \"\"\"Test if callback is filtering for group address.\"\"\"\n if not self._match_outgoing and telegram.direction == TelegramDirection.OUTGOING:\n return False\n if self._match_all:\n return True\n if isinstance(telegram.destination_address, (GroupAddress, InternalGroupAddress)):\n for address_filter in self.address_filters:\n if address_filter.match(telegram.destination_address):\n return True\n for group_address in self.group_addresses:\n if telegram.destination_address == group_address:\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "xknx/core/telegram_queue.py", "source_repo": "XKNX/xknx", "split": "val", "star_events_count": 248}
{"blob_id": "d53b630b93502f2da5afbaad1d72ae347ddf1ccb", "bodies": ["try:\n ArgsMetaschemaProperty.instance2args(obj)\n KwargsMetaschemaProperty.instance2kwargs(obj)\n return True\nexcept MetaschemaTypeError:\n if raise_errors:\n raise ValueError(\"Class dosn't have an input_args attribute.\")\n return False", "args = ArgsMetaschemaProperty.instance2args(obj)\nkwargs = KwargsMetaschemaProperty.instance2kwargs(obj)\ntypedef_args = None\ntypedef_kwargs = None\nif isinstance(typedef, dict):\n if 'args' in typedef:\n typedef_args = {'items': typedef['args']}\n if 'kwargs' in typedef:\n typedef_kwargs = {'properties': typedef['kwargs']}\nout = [JSONArrayMetaschemaType.encode_data(args, typedef_args), JSONObjectMetaschemaType.encode_data(kwargs, typedef_kwargs)]\nreturn out", "typedef = cls.normalize_definition(typedef)\nassert isinstance(obj, list)\nassert len(obj) == 2\nargs = JSONArrayMetaschemaType.decode_data(obj[0], {'items': typedef.get('args', [])})\nkwargs = JSONObjectMetaschemaType.decode_data(obj[1], {'properties': typedef.get('kwargs', {})})\nreturn typedef['class'](*args, **kwargs)", "typedef = cls.normalize_definition(typedef)\nargs = JSONArrayMetaschemaType.generate_data({'type': 'array', 'items': typedef.get('args', [])})\nkwargs = JSONObjectMetaschemaType.generate_data({'type': 'object', 'properties': typedef.get('kwargs', {})})\nreturn typedef['class'](*args, **kwargs)"], "bodies_text": "<|body_start_0|>\n try:\n ArgsMetaschemaProperty.instance2args(obj)\n KwargsMetaschemaProperty.instance2kwargs(obj)\n return True\n except MetaschemaTypeError:\n if raise_errors:\n raise ValueError(\"Class dosn't have an input_args attribute.\")\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n args = ArgsMetaschemaProperty.instance2args(obj)\n kwargs = KwargsMetaschemaProperty.instance2kwargs(obj)\n typedef_args = None\n typedef_kwargs = None\n if isinstance(typedef, dict):\n if 'args' in typedef:\n typedef_args = {'items': typedef['args']}\n if 'kwargs' in typedef:\n typedef_kwargs = {'properties': typedef['kwargs']}\n out = [JSONArrayMetaschemaType.encode_data(args, typedef_args), JSONObjectMetaschemaType.encode_data(kwargs, typedef_kwargs)]\n return out\n<|end_body_1|>\n\n<|body_start_2|>\n typedef = cls.normalize_definition(typedef)\n assert isinstance(obj, list)\n assert len(obj) == 2\n args = JSONArrayMetaschemaType.decode_data(obj[0], {'items': typedef.get('args', [])})\n kwargs = JSONObjectMetaschemaType.decode_data(obj[1], {'properties': typedef.get('kwargs', {})})\n return typedef['class'](*args, **kwargs)\n<|end_body_2|>\n\n<|body_start_3|>\n typedef = cls.normalize_definition(typedef)\n args = JSONArrayMetaschemaType.generate_data({'type': 'array', 'items': typedef.get('args', [])})\n kwargs = JSONObjectMetaschemaType.generate_data({'type': 'object', 'properties': typedef.get('kwargs', {})})\n return typedef['class'](*args, **kwargs)\n<|end_body_3|>\n", "class_docstring": "Type for evaluating instances of Python classes.", "class_name": "InstanceMetaschemaType", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InstanceMetaschemaType:\n \"\"\"Type for evaluating instances of Python classes.\"\"\"\n\n def validate(cls, obj, raise_errors=False):\n \"\"\"Validate an object to check if it could be of this type. Args: obj (object): Object to validate. raise_errors (bool, optional): If True, errors will be raised when the object fails to be validated. Defaults to False. Returns: bool: True if the object could be of this type, False otherwise.\"\"\"\n <|body_0|>\n\n def encode_data(cls, obj, typedef):\n \"\"\"Encode an object's data. Args: obj (object): Object to encode. typedef (dict): Type definition that should be used to encode the object. Returns: string: Encoded object.\"\"\"\n <|body_1|>\n\n def decode_data(cls, obj, typedef):\n \"\"\"Decode an object. Args: obj (string): Encoded object to decode. typedef (dict): Type definition that should be used to decode the object. Returns: object: Decoded object.\"\"\"\n <|body_2|>\n\n def _generate_data(cls, typedef):\n \"\"\"Generate mock data for the specified type. Args: typedef (dict): Type definition. Returns: object: Python object of the specified type.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n ArgsMetaschemaProperty.instance2args(obj)\n KwargsMetaschemaProperty.instance2kwargs(obj)\n return True\n except MetaschemaTypeError:\n if raise_errors:\n raise ValueError(\"Class dosn't have an input_args attribute.\")\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n args = ArgsMetaschemaProperty.instance2args(obj)\n kwargs = KwargsMetaschemaProperty.instance2kwargs(obj)\n typedef_args = None\n typedef_kwargs = None\n if isinstance(typedef, dict):\n if 'args' in typedef:\n typedef_args = {'items': typedef['args']}\n if 'kwargs' in typedef:\n typedef_kwargs = {'properties': typedef['kwargs']}\n out = [JSONArrayMetaschemaType.encode_data(args, typedef_args), JSONObjectMetaschemaType.encode_data(kwargs, typedef_kwargs)]\n return out\n<|end_body_1|>\n\n<|body_start_2|>\n typedef = cls.normalize_definition(typedef)\n assert isinstance(obj, list)\n assert len(obj) == 2\n args = JSONArrayMetaschemaType.decode_data(obj[0], {'items': typedef.get('args', [])})\n kwargs = JSONObjectMetaschemaType.decode_data(obj[1], {'properties': typedef.get('kwargs', {})})\n return typedef['class'](*args, **kwargs)\n<|end_body_2|>\n\n<|body_start_3|>\n typedef = cls.normalize_definition(typedef)\n args = JSONArrayMetaschemaType.generate_data({'type': 'array', 'items': typedef.get('args', [])})\n kwargs = JSONObjectMetaschemaType.generate_data({'type': 'object', 'properties': typedef.get('kwargs', {})})\n return typedef['class'](*args, **kwargs)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000126", "length_bytes": 4172, "license_type": "permissive", "methods": [{"docstring": "Validate an object to check if it could be of this type. Args: obj (object): Object to validate. raise_errors (bool, optional): If True, errors will be raised when the object fails to be validated. Defaults to False. Returns: bool: True if the object could be of this type, False otherwise.", "name": "validate", "signature": "def validate(cls, obj, raise_errors=False)"}, {"docstring": "Encode an object's data. Args: obj (object): Object to encode. typedef (dict): Type definition that should be used to encode the object. Returns: string: Encoded object.", "name": "encode_data", "signature": "def encode_data(cls, obj, typedef)"}, {"docstring": "Decode an object. Args: obj (string): Encoded object to decode. typedef (dict): Type definition that should be used to decode the object. Returns: object: Decoded object.", "name": "decode_data", "signature": "def decode_data(cls, obj, typedef)"}, {"docstring": "Generate mock data for the specified type. Args: typedef (dict): Type definition. Returns: object: Python object of the specified type.", "name": "_generate_data", "signature": "def _generate_data(cls, typedef)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_047419", "prompt": "Implement the Python class `InstanceMetaschemaType` described below.\n\nClass description:\nType for evaluating instances of Python classes.\n\nMethod signatures and docstrings:\n- def validate(cls, obj, raise_errors=False): Validate an object to check if it could be of this type. Args: obj (object): Object to validate. raise_errors (bool, optional): If True, errors will be raised when the object fails to be validated. Defaults to False. Returns: bool: True if the object could be of this type, False otherwise.\n- def encode_data(cls, obj, typedef): Encode an object's data. Args: obj (object): Object to encode. typedef (dict): Type definition that should be used to encode the object. Returns: string: Encoded object.\n- def decode_data(cls, obj, typedef): Decode an object. Args: obj (string): Encoded object to decode. typedef (dict): Type definition that should be used to decode the object. Returns: object: Decoded object.\n- def _generate_data(cls, typedef): Generate mock data for the specified type. Args: typedef (dict): Type definition. Returns: object: Python object of the specified type.", "prompted_full_text": "Implement the Python class `InstanceMetaschemaType` described below.\n\nClass description:\nType for evaluating instances of Python classes.\n\nMethod signatures and docstrings:\n- def validate(cls, obj, raise_errors=False): Validate an object to check if it could be of this type. Args: obj (object): Object to validate. raise_errors (bool, optional): If True, errors will be raised when the object fails to be validated. Defaults to False. Returns: bool: True if the object could be of this type, False otherwise.\n- def encode_data(cls, obj, typedef): Encode an object's data. Args: obj (object): Object to encode. typedef (dict): Type definition that should be used to encode the object. Returns: string: Encoded object.\n- def decode_data(cls, obj, typedef): Decode an object. Args: obj (string): Encoded object to decode. typedef (dict): Type definition that should be used to decode the object. Returns: object: Decoded object.\n- def _generate_data(cls, typedef): Generate mock data for the specified type. Args: typedef (dict): Type definition. Returns: object: Python object of the specified type.\n\n<|skeleton|>\nclass InstanceMetaschemaType:\n \"\"\"Type for evaluating instances of Python classes.\"\"\"\n\n def validate(cls, obj, raise_errors=False):\n \"\"\"Validate an object to check if it could be of this type. Args: obj (object): Object to validate. raise_errors (bool, optional): If True, errors will be raised when the object fails to be validated. Defaults to False. Returns: bool: True if the object could be of this type, False otherwise.\"\"\"\n <|body_0|>\n\n def encode_data(cls, obj, typedef):\n \"\"\"Encode an object's data. Args: obj (object): Object to encode. typedef (dict): Type definition that should be used to encode the object. Returns: string: Encoded object.\"\"\"\n <|body_1|>\n\n def decode_data(cls, obj, typedef):\n \"\"\"Decode an object. Args: obj (string): Encoded object to decode. typedef (dict): Type definition that should be used to decode the object. Returns: object: Decoded object.\"\"\"\n <|body_2|>\n\n def _generate_data(cls, typedef):\n \"\"\"Generate mock data for the specified type. Args: typedef (dict): Type definition. Returns: object: Python object of the specified type.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n ArgsMetaschemaProperty.instance2args(obj)\n KwargsMetaschemaProperty.instance2kwargs(obj)\n return True\n except MetaschemaTypeError:\n if raise_errors:\n raise ValueError(\"Class dosn't have an input_args attribute.\")\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n args = ArgsMetaschemaProperty.instance2args(obj)\n kwargs = KwargsMetaschemaProperty.instance2kwargs(obj)\n typedef_args = None\n typedef_kwargs = None\n if isinstance(typedef, dict):\n if 'args' in typedef:\n typedef_args = {'items': typedef['args']}\n if 'kwargs' in typedef:\n typedef_kwargs = {'properties': typedef['kwargs']}\n out = [JSONArrayMetaschemaType.encode_data(args, typedef_args), JSONObjectMetaschemaType.encode_data(kwargs, typedef_kwargs)]\n return out\n<|end_body_1|>\n\n<|body_start_2|>\n typedef = cls.normalize_definition(typedef)\n assert isinstance(obj, list)\n assert len(obj) == 2\n args = JSONArrayMetaschemaType.decode_data(obj[0], {'items': typedef.get('args', [])})\n kwargs = JSONObjectMetaschemaType.decode_data(obj[1], {'properties': typedef.get('kwargs', {})})\n return typedef['class'](*args, **kwargs)\n<|end_body_2|>\n\n<|body_start_3|>\n typedef = cls.normalize_definition(typedef)\n args = JSONArrayMetaschemaType.generate_data({'type': 'array', 'items': typedef.get('args', [])})\n kwargs = JSONObjectMetaschemaType.generate_data({'type': 'object', 'properties': typedef.get('kwargs', {})})\n return typedef['class'](*args, **kwargs)\n<|end_body_3|>\n", "revision_id": "dcc4d75a4d2c6aaa7e50e75095a16df1df6b2b0a", "skeleton": "<|skeleton|>\nclass InstanceMetaschemaType:\n \"\"\"Type for evaluating instances of Python classes.\"\"\"\n\n def validate(cls, obj, raise_errors=False):\n \"\"\"Validate an object to check if it could be of this type. Args: obj (object): Object to validate. raise_errors (bool, optional): If True, errors will be raised when the object fails to be validated. Defaults to False. Returns: bool: True if the object could be of this type, False otherwise.\"\"\"\n <|body_0|>\n\n def encode_data(cls, obj, typedef):\n \"\"\"Encode an object's data. Args: obj (object): Object to encode. typedef (dict): Type definition that should be used to encode the object. Returns: string: Encoded object.\"\"\"\n <|body_1|>\n\n def decode_data(cls, obj, typedef):\n \"\"\"Decode an object. Args: obj (string): Encoded object to decode. typedef (dict): Type definition that should be used to decode the object. Returns: object: Decoded object.\"\"\"\n <|body_2|>\n\n def _generate_data(cls, typedef):\n \"\"\"Generate mock data for the specified type. Args: typedef (dict): Type definition. Returns: object: Python object of the specified type.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InstanceMetaschemaType:\n \"\"\"Type for evaluating instances of Python classes.\"\"\"\n\n def validate(cls, obj, raise_errors=False):\n \"\"\"Validate an object to check if it could be of this type. Args: obj (object): Object to validate. raise_errors (bool, optional): If True, errors will be raised when the object fails to be validated. Defaults to False. Returns: bool: True if the object could be of this type, False otherwise.\"\"\"\n try:\n ArgsMetaschemaProperty.instance2args(obj)\n KwargsMetaschemaProperty.instance2kwargs(obj)\n return True\n except MetaschemaTypeError:\n if raise_errors:\n raise ValueError(\"Class dosn't have an input_args attribute.\")\n return False\n\n def encode_data(cls, obj, typedef):\n \"\"\"Encode an object's data. Args: obj (object): Object to encode. typedef (dict): Type definition that should be used to encode the object. Returns: string: Encoded object.\"\"\"\n args = ArgsMetaschemaProperty.instance2args(obj)\n kwargs = KwargsMetaschemaProperty.instance2kwargs(obj)\n typedef_args = None\n typedef_kwargs = None\n if isinstance(typedef, dict):\n if 'args' in typedef:\n typedef_args = {'items': typedef['args']}\n if 'kwargs' in typedef:\n typedef_kwargs = {'properties': typedef['kwargs']}\n out = [JSONArrayMetaschemaType.encode_data(args, typedef_args), JSONObjectMetaschemaType.encode_data(kwargs, typedef_kwargs)]\n return out\n\n def decode_data(cls, obj, typedef):\n \"\"\"Decode an object. Args: obj (string): Encoded object to decode. typedef (dict): Type definition that should be used to decode the object. Returns: object: Decoded object.\"\"\"\n typedef = cls.normalize_definition(typedef)\n assert isinstance(obj, list)\n assert len(obj) == 2\n args = JSONArrayMetaschemaType.decode_data(obj[0], {'items': typedef.get('args', [])})\n kwargs = JSONObjectMetaschemaType.decode_data(obj[1], {'properties': typedef.get('kwargs', {})})\n return typedef['class'](*args, **kwargs)\n\n def _generate_data(cls, typedef):\n \"\"\"Generate mock data for the specified type. Args: typedef (dict): Type definition. Returns: object: Python object of the specified type.\"\"\"\n typedef = cls.normalize_definition(typedef)\n args = JSONArrayMetaschemaType.generate_data({'type': 'array', 'items': typedef.get('args', [])})\n kwargs = JSONObjectMetaschemaType.generate_data({'type': 'object', 'properties': typedef.get('kwargs', {})})\n return typedef['class'](*args, **kwargs)\n", "source": "the_stack_v2_python_sparse", "source_path": "yggdrasil/metaschema/datatypes/InstanceMetaschemaType.py", "source_repo": "leighmatth/yggdrasil", "split": "val", "star_events_count": 0}
{"blob_id": "0db1a313c88d0fd109a3f5b468fd4d0704819c29", "bodies": ["out = self._get_component('netcdf').get('unlimited_dimensions')\nif out is None:\n return set()\nreturn set(out)", "out = self._get_component('netcdf').get('unlimited_dimensions')\nif out is None:\n out = set()\nelse:\n out = set(out)\nout.update(axes)\nself._get_component('netcdf')['unlimited_dimensions'] = tuple(out)", "out = self._get_component('netcdf').get('unlimited_dimensions')\nif out is None:\n out = set()\nelse:\n out = set(out)\nself._get_component('netcdf')['unlimited_dimensions'] = ()\nreturn out"], "bodies_text": "<|body_start_0|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n return set()\n return set(out)\n<|end_body_0|>\n\n<|body_start_1|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n out = set()\n else:\n out = set(out)\n out.update(axes)\n self._get_component('netcdf')['unlimited_dimensions'] = tuple(out)\n<|end_body_1|>\n\n<|body_start_2|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n out = set()\n else:\n out = set(out)\n self._get_component('netcdf')['unlimited_dimensions'] = ()\n return out\n<|end_body_2|>\n", "class_docstring": "Mixin class for accessing netCDF unlimited dimensions. .. versionadded:: 1.7.0", "class_name": "NetCDFUnlimitedDimensions", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NetCDFUnlimitedDimensions:\n \"\"\"Mixin class for accessing netCDF unlimited dimensions. .. versionadded:: 1.7.0\"\"\"\n\n def nc_unlimited_dimensions(self):\n \"\"\"Return the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_clear_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs to be written as netCDF unlimited dimensions. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\"\"\"\n <|body_0|>\n\n def nc_set_unlimited_dimensions(self, axes):\n \"\"\"Select domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_clear_unlimited_dimensions` :Parameters: axes: sequence of `str`, optional Select the domain axis constructs from the sequence provided. Domain axis constructs are identified by their construct identifiers. *Parameter example:* ``axes=['domainaxis0', 'domainaxis1']`` *Parameter example:* ``axes=()`` :Returns: `None` **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f\"\"\"\n <|body_1|>\n\n def nc_clear_unlimited_dimensions(self):\n \"\"\"Remove the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs that has been removed. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n return set()\n return set(out)\n<|end_body_0|>\n\n<|body_start_1|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n out = set()\n else:\n out = set(out)\n out.update(axes)\n self._get_component('netcdf')['unlimited_dimensions'] = tuple(out)\n<|end_body_1|>\n\n<|body_start_2|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n out = set()\n else:\n out = set(out)\n self._get_component('netcdf')['unlimited_dimensions'] = ()\n return out\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000127", "length_bytes": 32736, "license_type": "permissive", "methods": [{"docstring": "Return the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_clear_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs to be written as netCDF unlimited dimensions. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()", "name": "nc_unlimited_dimensions", "signature": "def nc_unlimited_dimensions(self)"}, {"docstring": "Select domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_clear_unlimited_dimensions` :Parameters: axes: sequence of `str`, optional Select the domain axis constructs from the sequence provided. Domain axis constructs are identified by their construct identifiers. *Parameter example:* ``axes=['domainaxis0', 'domainaxis1']`` *Parameter example:* ``axes=()`` :Returns: `None` **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f", "name": "nc_set_unlimited_dimensions", "signature": "def nc_set_unlimited_dimensions(self, axes)"}, {"docstring": "Remove the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs that has been removed. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()", "name": "nc_clear_unlimited_dimensions", "signature": "def nc_clear_unlimited_dimensions(self)"}], "n_methods": 3, "prompt": "Implement the Python class `NetCDFUnlimitedDimensions` described below.\n\nClass description:\nMixin class for accessing netCDF unlimited dimensions. .. versionadded:: 1.7.0\n\nMethod signatures and docstrings:\n- def nc_unlimited_dimensions(self): Return the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_clear_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs to be written as netCDF unlimited dimensions. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\n- def nc_set_unlimited_dimensions(self, axes): Select domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_clear_unlimited_dimensions` :Parameters: axes: sequence of `str`, optional Select the domain axis constructs from the sequence provided. Domain axis constructs are identified by their construct identifiers. *Parameter example:* ``axes=['domainaxis0', 'domainaxis1']`` *Parameter example:* ``axes=()`` :Returns: `None` **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f\n- def nc_clear_unlimited_dimensions(self): Remove the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs that has been removed. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()", "prompted_full_text": "Implement the Python class `NetCDFUnlimitedDimensions` described below.\n\nClass description:\nMixin class for accessing netCDF unlimited dimensions. .. versionadded:: 1.7.0\n\nMethod signatures and docstrings:\n- def nc_unlimited_dimensions(self): Return the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_clear_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs to be written as netCDF unlimited dimensions. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\n- def nc_set_unlimited_dimensions(self, axes): Select domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_clear_unlimited_dimensions` :Parameters: axes: sequence of `str`, optional Select the domain axis constructs from the sequence provided. Domain axis constructs are identified by their construct identifiers. *Parameter example:* ``axes=['domainaxis0', 'domainaxis1']`` *Parameter example:* ``axes=()`` :Returns: `None` **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f\n- def nc_clear_unlimited_dimensions(self): Remove the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs that has been removed. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\n\n<|skeleton|>\nclass NetCDFUnlimitedDimensions:\n \"\"\"Mixin class for accessing netCDF unlimited dimensions. .. versionadded:: 1.7.0\"\"\"\n\n def nc_unlimited_dimensions(self):\n \"\"\"Return the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_clear_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs to be written as netCDF unlimited dimensions. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\"\"\"\n <|body_0|>\n\n def nc_set_unlimited_dimensions(self, axes):\n \"\"\"Select domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_clear_unlimited_dimensions` :Parameters: axes: sequence of `str`, optional Select the domain axis constructs from the sequence provided. Domain axis constructs are identified by their construct identifiers. *Parameter example:* ``axes=['domainaxis0', 'domainaxis1']`` *Parameter example:* ``axes=()`` :Returns: `None` **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f\"\"\"\n <|body_1|>\n\n def nc_clear_unlimited_dimensions(self):\n \"\"\"Remove the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs that has been removed. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n return set()\n return set(out)\n<|end_body_0|>\n\n<|body_start_1|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n out = set()\n else:\n out = set(out)\n out.update(axes)\n self._get_component('netcdf')['unlimited_dimensions'] = tuple(out)\n<|end_body_1|>\n\n<|body_start_2|>\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n out = set()\n else:\n out = set(out)\n self._get_component('netcdf')['unlimited_dimensions'] = ()\n return out\n<|end_body_2|>\n", "revision_id": "1e074dbc28054780a9ec667d61b9098b94956ea6", "skeleton": "<|skeleton|>\nclass NetCDFUnlimitedDimensions:\n \"\"\"Mixin class for accessing netCDF unlimited dimensions. .. versionadded:: 1.7.0\"\"\"\n\n def nc_unlimited_dimensions(self):\n \"\"\"Return the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_clear_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs to be written as netCDF unlimited dimensions. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\"\"\"\n <|body_0|>\n\n def nc_set_unlimited_dimensions(self, axes):\n \"\"\"Select domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_clear_unlimited_dimensions` :Parameters: axes: sequence of `str`, optional Select the domain axis constructs from the sequence provided. Domain axis constructs are identified by their construct identifiers. *Parameter example:* ``axes=['domainaxis0', 'domainaxis1']`` *Parameter example:* ``axes=()`` :Returns: `None` **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f\"\"\"\n <|body_1|>\n\n def nc_clear_unlimited_dimensions(self):\n \"\"\"Remove the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs that has been removed. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NetCDFUnlimitedDimensions:\n \"\"\"Mixin class for accessing netCDF unlimited dimensions. .. versionadded:: 1.7.0\"\"\"\n\n def nc_unlimited_dimensions(self):\n \"\"\"Return the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_clear_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs to be written as netCDF unlimited dimensions. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\"\"\"\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n return set()\n return set(out)\n\n def nc_set_unlimited_dimensions(self, axes):\n \"\"\"Select domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_clear_unlimited_dimensions` :Parameters: axes: sequence of `str`, optional Select the domain axis constructs from the sequence provided. Domain axis constructs are identified by their construct identifiers. *Parameter example:* ``axes=['domainaxis0', 'domainaxis1']`` *Parameter example:* ``axes=()`` :Returns: `None` **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f\"\"\"\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n out = set()\n else:\n out = set(out)\n out.update(axes)\n self._get_component('netcdf')['unlimited_dimensions'] = tuple(out)\n\n def nc_clear_unlimited_dimensions(self):\n \"\"\"Remove the selection of domain axis constructs to be written as netCDF unlimited dimensions. By default output netCDF dimensions are not unlimited. .. versionadded:: 1.7.0 .. seealso:: `cfdm.write`, `nc_unlimited_dimensions`, `nc_set_unlimited_dimensions` :Returns: `set` The selection of domain axis constructs that has been removed. **Examples:** >>> f.nc_set_unlimited_dimensions(['domainaxis0']) >>> f.nc_unlimited_dimensions() {'domainaxis0'} >>> f.nc_set_unlimited_dimensions(['domainaxis1']) >>> f.nc_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_clear_unlimited_dimensions() {'domainaxis0', 'domainaxis1'} >>> f.nc_unlimited_dimensions() set()\"\"\"\n out = self._get_component('netcdf').get('unlimited_dimensions')\n if out is None:\n out = set()\n else:\n out = set(out)\n self._get_component('netcdf')['unlimited_dimensions'] = ()\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "cfdm/mixin/netcdf.py", "source_repo": "cofinoa/cfdm", "split": "val", "star_events_count": 0}
{"blob_id": "ddae3fd6e8b5175403ab2811807c9e9f0f221d58", "bodies": ["self.s = s\nself.t = t\nself.left = self.right = None", "if self.left is None and self.right is None:\n length = abs(self.s - self.t)\n midpoint = (self.s + self.t) / 2\n p = complex(random.gauss(midpoint.real, length * FACTOR), random.gauss(midpoint.imag, length * FACTOR))\n self.left = Line(self.s, p)\n self.right = Line(p, self.t)\nreturn (self.left, self.right)"], "bodies_text": "<|body_start_0|>\n self.s = s\n self.t = t\n self.left = self.right = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.left is None and self.right is None:\n length = abs(self.s - self.t)\n midpoint = (self.s + self.t) / 2\n p = complex(random.gauss(midpoint.real, length * FACTOR), random.gauss(midpoint.imag, length * FACTOR))\n self.left = Line(self.s, p)\n self.right = Line(p, self.t)\n return (self.left, self.right)\n<|end_body_1|>\n", "class_docstring": "Each triangle is made up of three lines. When the triangle is split, the lines have to split, too, and they have to keep references to their respective segments, or otherwise gaps will form between adjacent triangles.", "class_name": "Line", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Line:\n \"\"\"Each triangle is made up of three lines. When the triangle is split, the lines have to split, too, and they have to keep references to their respective segments, or otherwise gaps will form between adjacent triangles.\"\"\"\n\n def __init__(self, s, t):\n \"\"\"Create Line instance. s and t are expected as complex.\"\"\"\n <|body_0|>\n\n def split(self):\n \"\"\"Split line into two segments. The split happens roughly in the middle, but with some noise applied determined by FACTOR. The segments are lazily created and then reused.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.s = s\n self.t = t\n self.left = self.right = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.left is None and self.right is None:\n length = abs(self.s - self.t)\n midpoint = (self.s + self.t) / 2\n p = complex(random.gauss(midpoint.real, length * FACTOR), random.gauss(midpoint.imag, length * FACTOR))\n self.left = Line(self.s, p)\n self.right = Line(p, self.t)\n return (self.left, self.right)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000128", "length_bytes": 5003, "license_type": "no_license", "methods": [{"docstring": "Create Line instance. s and t are expected as complex.", "name": "__init__", "signature": "def __init__(self, s, t)"}, {"docstring": "Split line into two segments. The split happens roughly in the middle, but with some noise applied determined by FACTOR. The segments are lazily created and then reused.", "name": "split", "signature": "def split(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024586", "prompt": "Implement the Python class `Line` described below.\n\nClass description:\nEach triangle is made up of three lines. When the triangle is split, the lines have to split, too, and they have to keep references to their respective segments, or otherwise gaps will form between adjacent triangles.\n\nMethod signatures and docstrings:\n- def __init__(self, s, t): Create Line instance. s and t are expected as complex.\n- def split(self): Split line into two segments. The split happens roughly in the middle, but with some noise applied determined by FACTOR. The segments are lazily created and then reused.", "prompted_full_text": "Implement the Python class `Line` described below.\n\nClass description:\nEach triangle is made up of three lines. When the triangle is split, the lines have to split, too, and they have to keep references to their respective segments, or otherwise gaps will form between adjacent triangles.\n\nMethod signatures and docstrings:\n- def __init__(self, s, t): Create Line instance. s and t are expected as complex.\n- def split(self): Split line into two segments. The split happens roughly in the middle, but with some noise applied determined by FACTOR. The segments are lazily created and then reused.\n\n<|skeleton|>\nclass Line:\n \"\"\"Each triangle is made up of three lines. When the triangle is split, the lines have to split, too, and they have to keep references to their respective segments, or otherwise gaps will form between adjacent triangles.\"\"\"\n\n def __init__(self, s, t):\n \"\"\"Create Line instance. s and t are expected as complex.\"\"\"\n <|body_0|>\n\n def split(self):\n \"\"\"Split line into two segments. The split happens roughly in the middle, but with some noise applied determined by FACTOR. The segments are lazily created and then reused.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.s = s\n self.t = t\n self.left = self.right = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.left is None and self.right is None:\n length = abs(self.s - self.t)\n midpoint = (self.s + self.t) / 2\n p = complex(random.gauss(midpoint.real, length * FACTOR), random.gauss(midpoint.imag, length * FACTOR))\n self.left = Line(self.s, p)\n self.right = Line(p, self.t)\n return (self.left, self.right)\n<|end_body_1|>\n", "revision_id": "b20b38653f54307ed83f2b7ed51c2507349d8975", "skeleton": "<|skeleton|>\nclass Line:\n \"\"\"Each triangle is made up of three lines. When the triangle is split, the lines have to split, too, and they have to keep references to their respective segments, or otherwise gaps will form between adjacent triangles.\"\"\"\n\n def __init__(self, s, t):\n \"\"\"Create Line instance. s and t are expected as complex.\"\"\"\n <|body_0|>\n\n def split(self):\n \"\"\"Split line into two segments. The split happens roughly in the middle, but with some noise applied determined by FACTOR. The segments are lazily created and then reused.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Line:\n \"\"\"Each triangle is made up of three lines. When the triangle is split, the lines have to split, too, and they have to keep references to their respective segments, or otherwise gaps will form between adjacent triangles.\"\"\"\n\n def __init__(self, s, t):\n \"\"\"Create Line instance. s and t are expected as complex.\"\"\"\n self.s = s\n self.t = t\n self.left = self.right = None\n\n def split(self):\n \"\"\"Split line into two segments. The split happens roughly in the middle, but with some noise applied determined by FACTOR. The segments are lazily created and then reused.\"\"\"\n if self.left is None and self.right is None:\n length = abs(self.s - self.t)\n midpoint = (self.s + self.t) / 2\n p = complex(random.gauss(midpoint.real, length * FACTOR), random.gauss(midpoint.imag, length * FACTOR))\n self.left = Line(self.s, p)\n self.right = Line(p, self.t)\n return (self.left, self.right)\n", "source": "the_stack_v2_python_sparse", "source_path": "automata/fractal_mountain.py", "source_repo": "t-kuester/misc", "split": "val", "star_events_count": 2}
{"blob_id": "d04d9d7e3deeed6869c46c8e696383c994eecd10", "bodies": ["committee_slug = self.kwargs.get('committee_slug')\nproject_slug = self.kwargs.get('project_slug')\nself.project = Project.objects.get(slug=project_slug)\nself.committee = Committee.objects.filter(project=self.project).get(slug=committee_slug)\nself.is_member = False\nif request.user in self.committee.users.all():\n self.is_member = True\nreturn super(BallotListView, self).get(request, *args, **kwargs)", "context = super(BallotListView, self).get_context_data(**kwargs)\ncontext['committee'] = self.committee\ncontext['is_member'] = self.is_member\nreturn context", "if self.request.user.is_authenticated() and self.is_member:\n qs = Ballot.objects.filter(committee=self.committee)\nelse:\n qs = Ballot.objects.filter(committee=self.committee).filter(private=False)\nreturn qs"], "bodies_text": "<|body_start_0|>\n committee_slug = self.kwargs.get('committee_slug')\n project_slug = self.kwargs.get('project_slug')\n self.project = Project.objects.get(slug=project_slug)\n self.committee = Committee.objects.filter(project=self.project).get(slug=committee_slug)\n self.is_member = False\n if request.user in self.committee.users.all():\n self.is_member = True\n return super(BallotListView, self).get(request, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super(BallotListView, self).get_context_data(**kwargs)\n context['committee'] = self.committee\n context['is_member'] = self.is_member\n return context\n<|end_body_1|>\n\n<|body_start_2|>\n if self.request.user.is_authenticated() and self.is_member:\n qs = Ballot.objects.filter(committee=self.committee)\n else:\n qs = Ballot.objects.filter(committee=self.committee).filter(private=False)\n return qs\n<|end_body_2|>\n", "class_docstring": "Show all Ballots for a Committee This view returns a list of all Ballots within a Committee. The queryset returned is defined by the requesting user's status: is_authenticated and or is a member of the Committee", "class_name": "BallotListView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BallotListView:\n \"\"\"Show all Ballots for a Committee This view returns a list of all Ballots within a Committee. The queryset returned is defined by the requesting user's status: is_authenticated and or is a member of the Committee\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Access URL parameters We need to define self.committee in order to return the correct set of Ballot objects :param request: Request object :type request: HttpRequestObject :param args: None :param kwargs: (django dict) :type kwargs: dict\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add to the context We need to add the Committee object to the context :param kwargs: (django dict) :type kwargs: dict\"\"\"\n <|body_1|>\n\n def get_queryset(self):\n \"\"\"Specify the queryset Return a specific queryset based on the requesting user's status :return: If user.is_authenticated and a member of the Committee: All public Ballots, both open and closed. If not user.is_authenticated: All public ballots, both open and closed :rtype: QuerySet\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n committee_slug = self.kwargs.get('committee_slug')\n project_slug = self.kwargs.get('project_slug')\n self.project = Project.objects.get(slug=project_slug)\n self.committee = Committee.objects.filter(project=self.project).get(slug=committee_slug)\n self.is_member = False\n if request.user in self.committee.users.all():\n self.is_member = True\n return super(BallotListView, self).get(request, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super(BallotListView, self).get_context_data(**kwargs)\n context['committee'] = self.committee\n context['is_member'] = self.is_member\n return context\n<|end_body_1|>\n\n<|body_start_2|>\n if self.request.user.is_authenticated() and self.is_member:\n qs = Ballot.objects.filter(committee=self.committee)\n else:\n qs = Ballot.objects.filter(committee=self.committee).filter(private=False)\n return qs\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000129", "length_bytes": 11212, "license_type": "no_license", "methods": [{"docstring": "Access URL parameters We need to define self.committee in order to return the correct set of Ballot objects :param request: Request object :type request: HttpRequestObject :param args: None :param kwargs: (django dict) :type kwargs: dict", "name": "get", "signature": "def get(self, request, *args, **kwargs)"}, {"docstring": "Add to the context We need to add the Committee object to the context :param kwargs: (django dict) :type kwargs: dict", "name": "get_context_data", "signature": "def get_context_data(self, **kwargs)"}, {"docstring": "Specify the queryset Return a specific queryset based on the requesting user's status :return: If user.is_authenticated and a member of the Committee: All public Ballots, both open and closed. If not user.is_authenticated: All public ballots, both open and closed :rtype: QuerySet", "name": "get_queryset", "signature": "def get_queryset(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007104", "prompt": "Implement the Python class `BallotListView` described below.\n\nClass description:\nShow all Ballots for a Committee This view returns a list of all Ballots within a Committee. The queryset returned is defined by the requesting user's status: is_authenticated and or is a member of the Committee\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): Access URL parameters We need to define self.committee in order to return the correct set of Ballot objects :param request: Request object :type request: HttpRequestObject :param args: None :param kwargs: (django dict) :type kwargs: dict\n- def get_context_data(self, **kwargs): Add to the context We need to add the Committee object to the context :param kwargs: (django dict) :type kwargs: dict\n- def get_queryset(self): Specify the queryset Return a specific queryset based on the requesting user's status :return: If user.is_authenticated and a member of the Committee: All public Ballots, both open and closed. If not user.is_authenticated: All public ballots, both open and closed :rtype: QuerySet", "prompted_full_text": "Implement the Python class `BallotListView` described below.\n\nClass description:\nShow all Ballots for a Committee This view returns a list of all Ballots within a Committee. The queryset returned is defined by the requesting user's status: is_authenticated and or is a member of the Committee\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): Access URL parameters We need to define self.committee in order to return the correct set of Ballot objects :param request: Request object :type request: HttpRequestObject :param args: None :param kwargs: (django dict) :type kwargs: dict\n- def get_context_data(self, **kwargs): Add to the context We need to add the Committee object to the context :param kwargs: (django dict) :type kwargs: dict\n- def get_queryset(self): Specify the queryset Return a specific queryset based on the requesting user's status :return: If user.is_authenticated and a member of the Committee: All public Ballots, both open and closed. If not user.is_authenticated: All public ballots, both open and closed :rtype: QuerySet\n\n<|skeleton|>\nclass BallotListView:\n \"\"\"Show all Ballots for a Committee This view returns a list of all Ballots within a Committee. The queryset returned is defined by the requesting user's status: is_authenticated and or is a member of the Committee\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Access URL parameters We need to define self.committee in order to return the correct set of Ballot objects :param request: Request object :type request: HttpRequestObject :param args: None :param kwargs: (django dict) :type kwargs: dict\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add to the context We need to add the Committee object to the context :param kwargs: (django dict) :type kwargs: dict\"\"\"\n <|body_1|>\n\n def get_queryset(self):\n \"\"\"Specify the queryset Return a specific queryset based on the requesting user's status :return: If user.is_authenticated and a member of the Committee: All public Ballots, both open and closed. If not user.is_authenticated: All public ballots, both open and closed :rtype: QuerySet\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n committee_slug = self.kwargs.get('committee_slug')\n project_slug = self.kwargs.get('project_slug')\n self.project = Project.objects.get(slug=project_slug)\n self.committee = Committee.objects.filter(project=self.project).get(slug=committee_slug)\n self.is_member = False\n if request.user in self.committee.users.all():\n self.is_member = True\n return super(BallotListView, self).get(request, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super(BallotListView, self).get_context_data(**kwargs)\n context['committee'] = self.committee\n context['is_member'] = self.is_member\n return context\n<|end_body_1|>\n\n<|body_start_2|>\n if self.request.user.is_authenticated() and self.is_member:\n qs = Ballot.objects.filter(committee=self.committee)\n else:\n qs = Ballot.objects.filter(committee=self.committee).filter(private=False)\n return qs\n<|end_body_2|>\n", "revision_id": "9cf5417946b811f4b236008c7cd3c9c84fd8c1bb", "skeleton": "<|skeleton|>\nclass BallotListView:\n \"\"\"Show all Ballots for a Committee This view returns a list of all Ballots within a Committee. The queryset returned is defined by the requesting user's status: is_authenticated and or is a member of the Committee\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Access URL parameters We need to define self.committee in order to return the correct set of Ballot objects :param request: Request object :type request: HttpRequestObject :param args: None :param kwargs: (django dict) :type kwargs: dict\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add to the context We need to add the Committee object to the context :param kwargs: (django dict) :type kwargs: dict\"\"\"\n <|body_1|>\n\n def get_queryset(self):\n \"\"\"Specify the queryset Return a specific queryset based on the requesting user's status :return: If user.is_authenticated and a member of the Committee: All public Ballots, both open and closed. If not user.is_authenticated: All public ballots, both open and closed :rtype: QuerySet\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BallotListView:\n \"\"\"Show all Ballots for a Committee This view returns a list of all Ballots within a Committee. The queryset returned is defined by the requesting user's status: is_authenticated and or is a member of the Committee\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Access URL parameters We need to define self.committee in order to return the correct set of Ballot objects :param request: Request object :type request: HttpRequestObject :param args: None :param kwargs: (django dict) :type kwargs: dict\"\"\"\n committee_slug = self.kwargs.get('committee_slug')\n project_slug = self.kwargs.get('project_slug')\n self.project = Project.objects.get(slug=project_slug)\n self.committee = Committee.objects.filter(project=self.project).get(slug=committee_slug)\n self.is_member = False\n if request.user in self.committee.users.all():\n self.is_member = True\n return super(BallotListView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"Add to the context We need to add the Committee object to the context :param kwargs: (django dict) :type kwargs: dict\"\"\"\n context = super(BallotListView, self).get_context_data(**kwargs)\n context['committee'] = self.committee\n context['is_member'] = self.is_member\n return context\n\n def get_queryset(self):\n \"\"\"Specify the queryset Return a specific queryset based on the requesting user's status :return: If user.is_authenticated and a member of the Committee: All public Ballots, both open and closed. If not user.is_authenticated: All public ballots, both open and closed :rtype: QuerySet\"\"\"\n if self.request.user.is_authenticated() and self.is_member:\n qs = Ballot.objects.filter(committee=self.committee)\n else:\n qs = Ballot.objects.filter(committee=self.committee).filter(private=False)\n return qs\n", "source": "the_stack_v2_python_sparse", "source_path": "django_project/vota/views/ballot.py", "source_repo": "ismailsunni/projecta", "split": "val", "star_events_count": 0}
{"blob_id": "4333062a6cf3982e3d7f743d897822d7cbd9a762", "bodies": ["def wrapped(*args, **kwargs):\n return self.wrangler.fit_transform(*args, **kwargs).count()\nreturn wrapped", "for df in dfs:\n df.persist()\n df.count()", "for df in dfs:\n df.unpersist()\n if df.is_cached:\n warnings.warn('Spark dataframe could not be unpersisted.', ResourceWarning)"], "bodies_text": "<|body_start_0|>\n def wrapped(*args, **kwargs):\n return self.wrangler.fit_transform(*args, **kwargs).count()\n return wrapped\n<|end_body_0|>\n\n<|body_start_1|>\n for df in dfs:\n df.persist()\n df.count()\n<|end_body_1|>\n\n<|body_start_2|>\n for df in dfs:\n df.unpersist()\n if df.is_cached:\n warnings.warn('Spark dataframe could not be unpersisted.', ResourceWarning)\n<|end_body_2|>\n", "class_docstring": "Define common methods for pyspark profiler.", "class_name": "PySparkBaseProfiler", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PySparkBaseProfiler:\n \"\"\"Define common methods for pyspark profiler.\"\"\"\n\n def _wrap_fit_transform(self) -> Callable:\n \"\"\"Wrapper function to call `count()` on wrangler's `fit_transform` to enforce computation on lazily evaluated pyspark dataframes. Returns ------- wrapped: callable Wrapped `fit_transform` method as a function.\"\"\"\n <|body_0|>\n\n def _cache_input(dfs: Iterable[DataFrame]):\n \"\"\"Persist lazily evaluated pyspark dataframes before profiling to capture only relevant `fit_transform`. Apply `count` to enforce computation to create cached representation. Parameters ---------- dfs: iterable Spark dataframes to be persisted. Returns ------- persisted: iterable List of computed dask collections.\"\"\"\n <|body_1|>\n\n def _clear_cached_input(dfs: Iterable[DataFrame]):\n \"\"\"Unpersist previously persisted pyspark dataframes after profiling. Parameters ---------- dfs: iterable Persisted pyspark dataframes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def wrapped(*args, **kwargs):\n return self.wrangler.fit_transform(*args, **kwargs).count()\n return wrapped\n<|end_body_0|>\n\n<|body_start_1|>\n for df in dfs:\n df.persist()\n df.count()\n<|end_body_1|>\n\n<|body_start_2|>\n for df in dfs:\n df.unpersist()\n if df.is_cached:\n warnings.warn('Spark dataframe could not be unpersisted.', ResourceWarning)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000130", "length_bytes": 4189, "license_type": "permissive", "methods": [{"docstring": "Wrapper function to call `count()` on wrangler's `fit_transform` to enforce computation on lazily evaluated pyspark dataframes. Returns ------- wrapped: callable Wrapped `fit_transform` method as a function.", "name": "_wrap_fit_transform", "signature": "def _wrap_fit_transform(self) -> Callable"}, {"docstring": "Persist lazily evaluated pyspark dataframes before profiling to capture only relevant `fit_transform`. Apply `count` to enforce computation to create cached representation. Parameters ---------- dfs: iterable Spark dataframes to be persisted. Returns ------- persisted: iterable List of computed dask collections.", "name": "_cache_input", "signature": "def _cache_input(dfs: Iterable[DataFrame])"}, {"docstring": "Unpersist previously persisted pyspark dataframes after profiling. Parameters ---------- dfs: iterable Persisted pyspark dataframes.", "name": "_clear_cached_input", "signature": "def _clear_cached_input(dfs: Iterable[DataFrame])"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_054165", "prompt": "Implement the Python class `PySparkBaseProfiler` described below.\n\nClass description:\nDefine common methods for pyspark profiler.\n\nMethod signatures and docstrings:\n- def _wrap_fit_transform(self) -> Callable: Wrapper function to call `count()` on wrangler's `fit_transform` to enforce computation on lazily evaluated pyspark dataframes. Returns ------- wrapped: callable Wrapped `fit_transform` method as a function.\n- def _cache_input(dfs: Iterable[DataFrame]): Persist lazily evaluated pyspark dataframes before profiling to capture only relevant `fit_transform`. Apply `count` to enforce computation to create cached representation. Parameters ---------- dfs: iterable Spark dataframes to be persisted. Returns ------- persisted: iterable List of computed dask collections.\n- def _clear_cached_input(dfs: Iterable[DataFrame]): Unpersist previously persisted pyspark dataframes after profiling. Parameters ---------- dfs: iterable Persisted pyspark dataframes.", "prompted_full_text": "Implement the Python class `PySparkBaseProfiler` described below.\n\nClass description:\nDefine common methods for pyspark profiler.\n\nMethod signatures and docstrings:\n- def _wrap_fit_transform(self) -> Callable: Wrapper function to call `count()` on wrangler's `fit_transform` to enforce computation on lazily evaluated pyspark dataframes. Returns ------- wrapped: callable Wrapped `fit_transform` method as a function.\n- def _cache_input(dfs: Iterable[DataFrame]): Persist lazily evaluated pyspark dataframes before profiling to capture only relevant `fit_transform`. Apply `count` to enforce computation to create cached representation. Parameters ---------- dfs: iterable Spark dataframes to be persisted. Returns ------- persisted: iterable List of computed dask collections.\n- def _clear_cached_input(dfs: Iterable[DataFrame]): Unpersist previously persisted pyspark dataframes after profiling. Parameters ---------- dfs: iterable Persisted pyspark dataframes.\n\n<|skeleton|>\nclass PySparkBaseProfiler:\n \"\"\"Define common methods for pyspark profiler.\"\"\"\n\n def _wrap_fit_transform(self) -> Callable:\n \"\"\"Wrapper function to call `count()` on wrangler's `fit_transform` to enforce computation on lazily evaluated pyspark dataframes. Returns ------- wrapped: callable Wrapped `fit_transform` method as a function.\"\"\"\n <|body_0|>\n\n def _cache_input(dfs: Iterable[DataFrame]):\n \"\"\"Persist lazily evaluated pyspark dataframes before profiling to capture only relevant `fit_transform`. Apply `count` to enforce computation to create cached representation. Parameters ---------- dfs: iterable Spark dataframes to be persisted. Returns ------- persisted: iterable List of computed dask collections.\"\"\"\n <|body_1|>\n\n def _clear_cached_input(dfs: Iterable[DataFrame]):\n \"\"\"Unpersist previously persisted pyspark dataframes after profiling. Parameters ---------- dfs: iterable Persisted pyspark dataframes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def wrapped(*args, **kwargs):\n return self.wrangler.fit_transform(*args, **kwargs).count()\n return wrapped\n<|end_body_0|>\n\n<|body_start_1|>\n for df in dfs:\n df.persist()\n df.count()\n<|end_body_1|>\n\n<|body_start_2|>\n for df in dfs:\n df.unpersist()\n if df.is_cached:\n warnings.warn('Spark dataframe could not be unpersisted.', ResourceWarning)\n<|end_body_2|>\n", "revision_id": "8561f5f267303e664487ae67095085fcea4308c9", "skeleton": "<|skeleton|>\nclass PySparkBaseProfiler:\n \"\"\"Define common methods for pyspark profiler.\"\"\"\n\n def _wrap_fit_transform(self) -> Callable:\n \"\"\"Wrapper function to call `count()` on wrangler's `fit_transform` to enforce computation on lazily evaluated pyspark dataframes. Returns ------- wrapped: callable Wrapped `fit_transform` method as a function.\"\"\"\n <|body_0|>\n\n def _cache_input(dfs: Iterable[DataFrame]):\n \"\"\"Persist lazily evaluated pyspark dataframes before profiling to capture only relevant `fit_transform`. Apply `count` to enforce computation to create cached representation. Parameters ---------- dfs: iterable Spark dataframes to be persisted. Returns ------- persisted: iterable List of computed dask collections.\"\"\"\n <|body_1|>\n\n def _clear_cached_input(dfs: Iterable[DataFrame]):\n \"\"\"Unpersist previously persisted pyspark dataframes after profiling. Parameters ---------- dfs: iterable Persisted pyspark dataframes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PySparkBaseProfiler:\n \"\"\"Define common methods for pyspark profiler.\"\"\"\n\n def _wrap_fit_transform(self) -> Callable:\n \"\"\"Wrapper function to call `count()` on wrangler's `fit_transform` to enforce computation on lazily evaluated pyspark dataframes. Returns ------- wrapped: callable Wrapped `fit_transform` method as a function.\"\"\"\n def wrapped(*args, **kwargs):\n return self.wrangler.fit_transform(*args, **kwargs).count()\n return wrapped\n\n def _cache_input(dfs: Iterable[DataFrame]):\n \"\"\"Persist lazily evaluated pyspark dataframes before profiling to capture only relevant `fit_transform`. Apply `count` to enforce computation to create cached representation. Parameters ---------- dfs: iterable Spark dataframes to be persisted. Returns ------- persisted: iterable List of computed dask collections.\"\"\"\n for df in dfs:\n df.persist()\n df.count()\n\n def _clear_cached_input(dfs: Iterable[DataFrame]):\n \"\"\"Unpersist previously persisted pyspark dataframes after profiling. Parameters ---------- dfs: iterable Persisted pyspark dataframes.\"\"\"\n for df in dfs:\n df.unpersist()\n if df.is_cached:\n warnings.warn('Spark dataframe could not be unpersisted.', ResourceWarning)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pywrangler/pyspark/benchmark.py", "source_repo": "mansenfranzen/pywrangler", "split": "val", "star_events_count": 15}
{"blob_id": "520f7af5b9edd656b45ceec1f489f717575ae2a3", "bodies": ["super(VideoSessionManager, self).__init__()\nself.getters.update({'assignment': 'get_foreign_key', 'date_completed': 'get_time', 'date_started': 'get_time', 'user': 'get_foreign_key', 'video': 'get_foreign_key'})\nself.setters.update({'date_completed': 'set_time', 'date_started': 'set_time'})\nself.my_django_model = facade.models.VideoSession", "assignment_object = self._find_by_id(assignment, facade.models.Assignment)\nnew_video_session = self.my_django_model(assignment=assignment_object)\nnew_video_session.save()\nself.authorizer.check_create_permissions(auth_token, new_video_session)\nret = {}\nret['id'] = new_video_session.id\nret['urls'] = list()\nfor encoded_video in new_video_session.video.encoded_videos.all():\n ret['urls'].append(encoded_video.url)\nreturn ret", "video = facade.models.Video.objects.get(id=video_id)\nassignments = facade.models.Assignment.objects.filter(task__id=video_id, user__id=auth_token.user.id).order_by('-id')\nif len(assignments):\n assignment = assignments[0]\nelse:\n assignment = facade.managers.AssignmentManager().create(auth_token, video_id)\nstart_cutoff = datetime.utcnow() - timedelta(hours=12)\nattempts = facade.models.AssignmentAttempt.objects.filter(assignment__id=assignment.id, date_started__gt=start_cutoff).order_by('-date_started')\nif len(attempts):\n attempt = attempts[0]\nelse:\n attempt = self.my_django_model.objects.create(assignment=assignment)\n self.authorizer.check_create_permissions(auth_token, attempt)\nreturn {'id': attempt.id}", "filters = {'member': {'assignment__task__id': videos}}\nif start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\nviews = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\nviews = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name'], 'video')\nviews = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'default_username_and_domain'], 'user')\nreturn views", "filters = {'member': {'assignment__user__id': users}}\nif start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\nviews = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\nviews = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name', 'author', 'description'], 'video')\nviews = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'username'], 'user')\nreturn views"], "bodies_text": "<|body_start_0|>\n super(VideoSessionManager, self).__init__()\n self.getters.update({'assignment': 'get_foreign_key', 'date_completed': 'get_time', 'date_started': 'get_time', 'user': 'get_foreign_key', 'video': 'get_foreign_key'})\n self.setters.update({'date_completed': 'set_time', 'date_started': 'set_time'})\n self.my_django_model = facade.models.VideoSession\n<|end_body_0|>\n\n<|body_start_1|>\n assignment_object = self._find_by_id(assignment, facade.models.Assignment)\n new_video_session = self.my_django_model(assignment=assignment_object)\n new_video_session.save()\n self.authorizer.check_create_permissions(auth_token, new_video_session)\n ret = {}\n ret['id'] = new_video_session.id\n ret['urls'] = list()\n for encoded_video in new_video_session.video.encoded_videos.all():\n ret['urls'].append(encoded_video.url)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n video = facade.models.Video.objects.get(id=video_id)\n assignments = facade.models.Assignment.objects.filter(task__id=video_id, user__id=auth_token.user.id).order_by('-id')\n if len(assignments):\n assignment = assignments[0]\n else:\n assignment = facade.managers.AssignmentManager().create(auth_token, video_id)\n start_cutoff = datetime.utcnow() - timedelta(hours=12)\n attempts = facade.models.AssignmentAttempt.objects.filter(assignment__id=assignment.id, date_started__gt=start_cutoff).order_by('-date_started')\n if len(attempts):\n attempt = attempts[0]\n else:\n attempt = self.my_django_model.objects.create(assignment=assignment)\n self.authorizer.check_create_permissions(auth_token, attempt)\n return {'id': attempt.id}\n<|end_body_2|>\n\n<|body_start_3|>\n filters = {'member': {'assignment__task__id': videos}}\n if start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\n views = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\n views = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name'], 'video')\n views = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'default_username_and_domain'], 'user')\n return views\n<|end_body_3|>\n\n<|body_start_4|>\n filters = {'member': {'assignment__user__id': users}}\n if start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\n views = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\n views = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name', 'author', 'description'], 'video')\n views = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'username'], 'user')\n return views\n<|end_body_4|>\n", "class_docstring": "Manage VideoSessions in the Power Reg system", "class_name": "VideoSessionManager", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VideoSessionManager:\n \"\"\"Manage VideoSessions in the Power Reg system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, assignment):\n \"\"\"Create a new VideoSession object. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for an assignment @type assignment int @return A dictionary with two indices. One is 'id' and is the primary key of the VideoSession object. The other is 'urls', and is a list of URLs that the user is authorized to view under this VideoSession.\"\"\"\n <|body_1|>\n\n def register_video_view(self, auth_token, video_id):\n \"\"\"Create an assignment to watch the video, if needed; then use it to create a VideoSession object, if needed. If a VideoSession for this (user, video) combination was created in the last 12 hours, does nothing. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for a video @type assignment int\"\"\"\n <|body_2|>\n\n def watcher_report(self, auth_token, videos, start_date=None, end_date=None):\n \"\"\"Returns a list of views of the given videos (optinally filtered by date) along with some information about the viewer.\"\"\"\n <|body_3|>\n\n def viewing_activity_report(self, auth_token, users, start_date=None, end_date=None):\n \"\"\"Returns a list of video views by the given users (optinally filtered by date) along with some information about the video.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(VideoSessionManager, self).__init__()\n self.getters.update({'assignment': 'get_foreign_key', 'date_completed': 'get_time', 'date_started': 'get_time', 'user': 'get_foreign_key', 'video': 'get_foreign_key'})\n self.setters.update({'date_completed': 'set_time', 'date_started': 'set_time'})\n self.my_django_model = facade.models.VideoSession\n<|end_body_0|>\n\n<|body_start_1|>\n assignment_object = self._find_by_id(assignment, facade.models.Assignment)\n new_video_session = self.my_django_model(assignment=assignment_object)\n new_video_session.save()\n self.authorizer.check_create_permissions(auth_token, new_video_session)\n ret = {}\n ret['id'] = new_video_session.id\n ret['urls'] = list()\n for encoded_video in new_video_session.video.encoded_videos.all():\n ret['urls'].append(encoded_video.url)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n video = facade.models.Video.objects.get(id=video_id)\n assignments = facade.models.Assignment.objects.filter(task__id=video_id, user__id=auth_token.user.id).order_by('-id')\n if len(assignments):\n assignment = assignments[0]\n else:\n assignment = facade.managers.AssignmentManager().create(auth_token, video_id)\n start_cutoff = datetime.utcnow() - timedelta(hours=12)\n attempts = facade.models.AssignmentAttempt.objects.filter(assignment__id=assignment.id, date_started__gt=start_cutoff).order_by('-date_started')\n if len(attempts):\n attempt = attempts[0]\n else:\n attempt = self.my_django_model.objects.create(assignment=assignment)\n self.authorizer.check_create_permissions(auth_token, attempt)\n return {'id': attempt.id}\n<|end_body_2|>\n\n<|body_start_3|>\n filters = {'member': {'assignment__task__id': videos}}\n if start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\n views = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\n views = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name'], 'video')\n views = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'default_username_and_domain'], 'user')\n return views\n<|end_body_3|>\n\n<|body_start_4|>\n filters = {'member': {'assignment__user__id': users}}\n if start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\n views = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\n views = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name', 'author', 'description'], 'video')\n views = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'username'], 'user')\n return views\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000131", "length_bytes": 6633, "license_type": "permissive", "methods": [{"docstring": "constructor", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Create a new VideoSession object. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for an assignment @type assignment int @return A dictionary with two indices. One is 'id' and is the primary key of the VideoSession object. The other is 'urls', and is a list of URLs that the user is authorized to view under this VideoSession.", "name": "create", "signature": "def create(self, auth_token, assignment)"}, {"docstring": "Create an assignment to watch the video, if needed; then use it to create a VideoSession object, if needed. If a VideoSession for this (user, video) combination was created in the last 12 hours, does nothing. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for a video @type assignment int", "name": "register_video_view", "signature": "def register_video_view(self, auth_token, video_id)"}, {"docstring": "Returns a list of views of the given videos (optinally filtered by date) along with some information about the viewer.", "name": "watcher_report", "signature": "def watcher_report(self, auth_token, videos, start_date=None, end_date=None)"}, {"docstring": "Returns a list of video views by the given users (optinally filtered by date) along with some information about the video.", "name": "viewing_activity_report", "signature": "def viewing_activity_report(self, auth_token, users, start_date=None, end_date=None)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_test_002746", "prompt": "Implement the Python class `VideoSessionManager` described below.\n\nClass description:\nManage VideoSessions in the Power Reg system\n\nMethod signatures and docstrings:\n- def __init__(self): constructor\n- def create(self, auth_token, assignment): Create a new VideoSession object. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for an assignment @type assignment int @return A dictionary with two indices. One is 'id' and is the primary key of the VideoSession object. The other is 'urls', and is a list of URLs that the user is authorized to view under this VideoSession.\n- def register_video_view(self, auth_token, video_id): Create an assignment to watch the video, if needed; then use it to create a VideoSession object, if needed. If a VideoSession for this (user, video) combination was created in the last 12 hours, does nothing. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for a video @type assignment int\n- def watcher_report(self, auth_token, videos, start_date=None, end_date=None): Returns a list of views of the given videos (optinally filtered by date) along with some information about the viewer.\n- def viewing_activity_report(self, auth_token, users, start_date=None, end_date=None): Returns a list of video views by the given users (optinally filtered by date) along with some information about the video.", "prompted_full_text": "Implement the Python class `VideoSessionManager` described below.\n\nClass description:\nManage VideoSessions in the Power Reg system\n\nMethod signatures and docstrings:\n- def __init__(self): constructor\n- def create(self, auth_token, assignment): Create a new VideoSession object. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for an assignment @type assignment int @return A dictionary with two indices. One is 'id' and is the primary key of the VideoSession object. The other is 'urls', and is a list of URLs that the user is authorized to view under this VideoSession.\n- def register_video_view(self, auth_token, video_id): Create an assignment to watch the video, if needed; then use it to create a VideoSession object, if needed. If a VideoSession for this (user, video) combination was created in the last 12 hours, does nothing. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for a video @type assignment int\n- def watcher_report(self, auth_token, videos, start_date=None, end_date=None): Returns a list of views of the given videos (optinally filtered by date) along with some information about the viewer.\n- def viewing_activity_report(self, auth_token, users, start_date=None, end_date=None): Returns a list of video views by the given users (optinally filtered by date) along with some information about the video.\n\n<|skeleton|>\nclass VideoSessionManager:\n \"\"\"Manage VideoSessions in the Power Reg system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, assignment):\n \"\"\"Create a new VideoSession object. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for an assignment @type assignment int @return A dictionary with two indices. One is 'id' and is the primary key of the VideoSession object. The other is 'urls', and is a list of URLs that the user is authorized to view under this VideoSession.\"\"\"\n <|body_1|>\n\n def register_video_view(self, auth_token, video_id):\n \"\"\"Create an assignment to watch the video, if needed; then use it to create a VideoSession object, if needed. If a VideoSession for this (user, video) combination was created in the last 12 hours, does nothing. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for a video @type assignment int\"\"\"\n <|body_2|>\n\n def watcher_report(self, auth_token, videos, start_date=None, end_date=None):\n \"\"\"Returns a list of views of the given videos (optinally filtered by date) along with some information about the viewer.\"\"\"\n <|body_3|>\n\n def viewing_activity_report(self, auth_token, users, start_date=None, end_date=None):\n \"\"\"Returns a list of video views by the given users (optinally filtered by date) along with some information about the video.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(VideoSessionManager, self).__init__()\n self.getters.update({'assignment': 'get_foreign_key', 'date_completed': 'get_time', 'date_started': 'get_time', 'user': 'get_foreign_key', 'video': 'get_foreign_key'})\n self.setters.update({'date_completed': 'set_time', 'date_started': 'set_time'})\n self.my_django_model = facade.models.VideoSession\n<|end_body_0|>\n\n<|body_start_1|>\n assignment_object = self._find_by_id(assignment, facade.models.Assignment)\n new_video_session = self.my_django_model(assignment=assignment_object)\n new_video_session.save()\n self.authorizer.check_create_permissions(auth_token, new_video_session)\n ret = {}\n ret['id'] = new_video_session.id\n ret['urls'] = list()\n for encoded_video in new_video_session.video.encoded_videos.all():\n ret['urls'].append(encoded_video.url)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n video = facade.models.Video.objects.get(id=video_id)\n assignments = facade.models.Assignment.objects.filter(task__id=video_id, user__id=auth_token.user.id).order_by('-id')\n if len(assignments):\n assignment = assignments[0]\n else:\n assignment = facade.managers.AssignmentManager().create(auth_token, video_id)\n start_cutoff = datetime.utcnow() - timedelta(hours=12)\n attempts = facade.models.AssignmentAttempt.objects.filter(assignment__id=assignment.id, date_started__gt=start_cutoff).order_by('-date_started')\n if len(attempts):\n attempt = attempts[0]\n else:\n attempt = self.my_django_model.objects.create(assignment=assignment)\n self.authorizer.check_create_permissions(auth_token, attempt)\n return {'id': attempt.id}\n<|end_body_2|>\n\n<|body_start_3|>\n filters = {'member': {'assignment__task__id': videos}}\n if start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\n views = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\n views = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name'], 'video')\n views = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'default_username_and_domain'], 'user')\n return views\n<|end_body_3|>\n\n<|body_start_4|>\n filters = {'member': {'assignment__user__id': users}}\n if start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\n views = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\n views = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name', 'author', 'description'], 'video')\n views = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'username'], 'user')\n return views\n<|end_body_4|>\n", "revision_id": "a59457bc37f0501aea1f54d006a6de94ff80511c", "skeleton": "<|skeleton|>\nclass VideoSessionManager:\n \"\"\"Manage VideoSessions in the Power Reg system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, assignment):\n \"\"\"Create a new VideoSession object. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for an assignment @type assignment int @return A dictionary with two indices. One is 'id' and is the primary key of the VideoSession object. The other is 'urls', and is a list of URLs that the user is authorized to view under this VideoSession.\"\"\"\n <|body_1|>\n\n def register_video_view(self, auth_token, video_id):\n \"\"\"Create an assignment to watch the video, if needed; then use it to create a VideoSession object, if needed. If a VideoSession for this (user, video) combination was created in the last 12 hours, does nothing. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for a video @type assignment int\"\"\"\n <|body_2|>\n\n def watcher_report(self, auth_token, videos, start_date=None, end_date=None):\n \"\"\"Returns a list of views of the given videos (optinally filtered by date) along with some information about the viewer.\"\"\"\n <|body_3|>\n\n def viewing_activity_report(self, auth_token, users, start_date=None, end_date=None):\n \"\"\"Returns a list of video views by the given users (optinally filtered by date) along with some information about the video.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VideoSessionManager:\n \"\"\"Manage VideoSessions in the Power Reg system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n super(VideoSessionManager, self).__init__()\n self.getters.update({'assignment': 'get_foreign_key', 'date_completed': 'get_time', 'date_started': 'get_time', 'user': 'get_foreign_key', 'video': 'get_foreign_key'})\n self.setters.update({'date_completed': 'set_time', 'date_started': 'set_time'})\n self.my_django_model = facade.models.VideoSession\n\n def create(self, auth_token, assignment):\n \"\"\"Create a new VideoSession object. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for an assignment @type assignment int @return A dictionary with two indices. One is 'id' and is the primary key of the VideoSession object. The other is 'urls', and is a list of URLs that the user is authorized to view under this VideoSession.\"\"\"\n assignment_object = self._find_by_id(assignment, facade.models.Assignment)\n new_video_session = self.my_django_model(assignment=assignment_object)\n new_video_session.save()\n self.authorizer.check_create_permissions(auth_token, new_video_session)\n ret = {}\n ret['id'] = new_video_session.id\n ret['urls'] = list()\n for encoded_video in new_video_session.video.encoded_videos.all():\n ret['urls'].append(encoded_video.url)\n return ret\n\n def register_video_view(self, auth_token, video_id):\n \"\"\"Create an assignment to watch the video, if needed; then use it to create a VideoSession object, if needed. If a VideoSession for this (user, video) combination was created in the last 12 hours, does nothing. @param auth_token The authentication token of the acting user @type auth_token facade.models.AuthToken @param assignment FK for a video @type assignment int\"\"\"\n video = facade.models.Video.objects.get(id=video_id)\n assignments = facade.models.Assignment.objects.filter(task__id=video_id, user__id=auth_token.user.id).order_by('-id')\n if len(assignments):\n assignment = assignments[0]\n else:\n assignment = facade.managers.AssignmentManager().create(auth_token, video_id)\n start_cutoff = datetime.utcnow() - timedelta(hours=12)\n attempts = facade.models.AssignmentAttempt.objects.filter(assignment__id=assignment.id, date_started__gt=start_cutoff).order_by('-date_started')\n if len(attempts):\n attempt = attempts[0]\n else:\n attempt = self.my_django_model.objects.create(assignment=assignment)\n self.authorizer.check_create_permissions(auth_token, attempt)\n return {'id': attempt.id}\n\n def watcher_report(self, auth_token, videos, start_date=None, end_date=None):\n \"\"\"Returns a list of views of the given videos (optinally filtered by date) along with some information about the viewer.\"\"\"\n filters = {'member': {'assignment__task__id': videos}}\n if start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\n views = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\n views = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name'], 'video')\n views = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'default_username_and_domain'], 'user')\n return views\n\n def viewing_activity_report(self, auth_token, users, start_date=None, end_date=None):\n \"\"\"Returns a list of video views by the given users (optinally filtered by date) along with some information about the video.\"\"\"\n filters = {'member': {'assignment__user__id': users}}\n if start_date or end_date:\n filters = [filters]\n if start_date:\n if pr_time.is_iso8601(start_date):\n start_date = pr_time.iso8601_to_datetime(start_date)\n filters.append({'greater_than_or_equal': {'date_started': start_date}})\n if end_date:\n if pr_time.is_iso8601(end_date):\n end_date = pr_time.iso8601_to_datetime(end_date)\n filters.append({'less_than_or_equal': {'date_started': end_date}})\n filters = {'and': filters}\n views = self.get_filtered(auth_token, filters, ['video', 'date_started', 'user'])\n views = Utils.merge_queries(views, facade.managers.VideoManager(), auth_token, ['name', 'author', 'description'], 'video')\n views = Utils.merge_queries(views, facade.managers.UserManager(), auth_token, ['first_name', 'last_name', 'email', 'username'], 'user')\n return views\n", "source": "the_stack_v2_python_sparse", "source_path": "vod_aws/managers/video_session_manager.py", "source_repo": "ninemoreminutes/openassign-server", "split": "val", "star_events_count": 0}
{"blob_id": "8bf15629eb64f728f3447db48dadf5f5f072b26c", "bodies": ["persistent_query_example = PersistentQueryExample(user_id=str(self.context['request'].user.id), content=validated_data['content'] if 'content' in validated_data else None, name=validated_data['name'] if 'name' in validated_data else None)\npersistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\nif 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\nreturn persistent_query_example", "persistent_query_example.content = validated_data.get('content', persistent_query_example.content)\npersistent_query_example.name = validated_data.get('name', persistent_query_example.name)\npersistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\nif 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\nreturn persistent_query_example"], "bodies_text": "<|body_start_0|>\n persistent_query_example = PersistentQueryExample(user_id=str(self.context['request'].user.id), content=validated_data['content'] if 'content' in validated_data else None, name=validated_data['name'] if 'name' in validated_data else None)\n persistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\n if 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\n return persistent_query_example\n<|end_body_0|>\n\n<|body_start_1|>\n persistent_query_example.content = validated_data.get('content', persistent_query_example.content)\n persistent_query_example.name = validated_data.get('name', persistent_query_example.name)\n persistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\n if 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\n return persistent_query_example\n<|end_body_1|>\n", "class_docstring": "Persistent query example", "class_name": "PersistentQueryExampleSerializer", "detected_licenses": ["NIST-Software", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PersistentQueryExampleSerializer:\n \"\"\"Persistent query example\"\"\"\n\n def create(self, validated_data):\n \"\"\"Create and return a new `PersistentQueryExample` instance, given the validated data.\"\"\"\n <|body_0|>\n\n def update(self, persistent_query_example, validated_data):\n \"\"\"Update and return an existing `PersistentQueryExample` instance, given the validated data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n persistent_query_example = PersistentQueryExample(user_id=str(self.context['request'].user.id), content=validated_data['content'] if 'content' in validated_data else None, name=validated_data['name'] if 'name' in validated_data else None)\n persistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\n if 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\n return persistent_query_example\n<|end_body_0|>\n\n<|body_start_1|>\n persistent_query_example.content = validated_data.get('content', persistent_query_example.content)\n persistent_query_example.name = validated_data.get('name', persistent_query_example.name)\n persistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\n if 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\n return persistent_query_example\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000132", "length_bytes": 3259, "license_type": "permissive", "methods": [{"docstring": "Create and return a new `PersistentQueryExample` instance, given the validated data.", "name": "create", "signature": "def create(self, validated_data)"}, {"docstring": "Update and return an existing `PersistentQueryExample` instance, given the validated data.", "name": "update", "signature": "def update(self, persistent_query_example, validated_data)"}], "n_methods": 2, "prompt": "Implement the Python class `PersistentQueryExampleSerializer` described below.\n\nClass description:\nPersistent query example\n\nMethod signatures and docstrings:\n- def create(self, validated_data): Create and return a new `PersistentQueryExample` instance, given the validated data.\n- def update(self, persistent_query_example, validated_data): Update and return an existing `PersistentQueryExample` instance, given the validated data.", "prompted_full_text": "Implement the Python class `PersistentQueryExampleSerializer` described below.\n\nClass description:\nPersistent query example\n\nMethod signatures and docstrings:\n- def create(self, validated_data): Create and return a new `PersistentQueryExample` instance, given the validated data.\n- def update(self, persistent_query_example, validated_data): Update and return an existing `PersistentQueryExample` instance, given the validated data.\n\n<|skeleton|>\nclass PersistentQueryExampleSerializer:\n \"\"\"Persistent query example\"\"\"\n\n def create(self, validated_data):\n \"\"\"Create and return a new `PersistentQueryExample` instance, given the validated data.\"\"\"\n <|body_0|>\n\n def update(self, persistent_query_example, validated_data):\n \"\"\"Update and return an existing `PersistentQueryExample` instance, given the validated data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n persistent_query_example = PersistentQueryExample(user_id=str(self.context['request'].user.id), content=validated_data['content'] if 'content' in validated_data else None, name=validated_data['name'] if 'name' in validated_data else None)\n persistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\n if 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\n return persistent_query_example\n<|end_body_0|>\n\n<|body_start_1|>\n persistent_query_example.content = validated_data.get('content', persistent_query_example.content)\n persistent_query_example.name = validated_data.get('name', persistent_query_example.name)\n persistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\n if 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\n return persistent_query_example\n<|end_body_1|>\n", "revision_id": "2abebfd1c2319899d907ad0b650fedb955be7492", "skeleton": "<|skeleton|>\nclass PersistentQueryExampleSerializer:\n \"\"\"Persistent query example\"\"\"\n\n def create(self, validated_data):\n \"\"\"Create and return a new `PersistentQueryExample` instance, given the validated data.\"\"\"\n <|body_0|>\n\n def update(self, persistent_query_example, validated_data):\n \"\"\"Update and return an existing `PersistentQueryExample` instance, given the validated data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PersistentQueryExampleSerializer:\n \"\"\"Persistent query example\"\"\"\n\n def create(self, validated_data):\n \"\"\"Create and return a new `PersistentQueryExample` instance, given the validated data.\"\"\"\n persistent_query_example = PersistentQueryExample(user_id=str(self.context['request'].user.id), content=validated_data['content'] if 'content' in validated_data else None, name=validated_data['name'] if 'name' in validated_data else None)\n persistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\n if 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\n return persistent_query_example\n\n def update(self, persistent_query_example, validated_data):\n \"\"\"Update and return an existing `PersistentQueryExample` instance, given the validated data.\"\"\"\n persistent_query_example.content = validated_data.get('content', persistent_query_example.content)\n persistent_query_example.name = validated_data.get('name', persistent_query_example.name)\n persistent_query_example_api.upsert(persistent_query_example, self.context['request'].user)\n if 'templates' in validated_data:\n persistent_query_example.templates.set(validated_data['templates'])\n return persistent_query_example\n", "source": "the_stack_v2_python_sparse", "source_path": "core_explore_example_app/rest/persistent_query_example/serializers.py", "source_repo": "usnistgov/core_explore_example_app", "split": "val", "star_events_count": 0}
{"blob_id": "831041fa838f692f863f1f1f448cba013f1649c6", "bodies": ["mol = Molecule()\nself.assertEqual(mol.to_smiles(), '')\nself.assertEqual(mol.to_inchi(), '')", "mol = Molecule(smiles='[CH2-][N+]#N')\nwith self.assertRaisesRegex(ValueError, 'Unable to generate identifier type'):\n to_inchi(mol, backend='rdkit')\nmock_logging.error.assert_called_with('Unable to generate identifier for this molecule:\\n{0}'.format(mol.to_adjacency_list()))"], "bodies_text": "<|body_start_0|>\n mol = Molecule()\n self.assertEqual(mol.to_smiles(), '')\n self.assertEqual(mol.to_inchi(), '')\n<|end_body_0|>\n\n<|body_start_1|>\n mol = Molecule(smiles='[CH2-][N+]#N')\n with self.assertRaisesRegex(ValueError, 'Unable to generate identifier type'):\n to_inchi(mol, backend='rdkit')\n mock_logging.error.assert_called_with('Unable to generate identifier for this molecule:\\n{0}'.format(mol.to_adjacency_list()))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TranslatorTest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TranslatorTest:\n\n def test_empty_molecule(self):\n \"\"\"Test that we can safely return a blank identifier for an empty molecule.\"\"\"\n <|body_0|>\n\n def test_failure_message(self, mock_logging):\n \"\"\"Test that we log the molecule adjlist upon failure.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mol = Molecule()\n self.assertEqual(mol.to_smiles(), '')\n self.assertEqual(mol.to_inchi(), '')\n<|end_body_0|>\n\n<|body_start_1|>\n mol = Molecule(smiles='[CH2-][N+]#N')\n with self.assertRaisesRegex(ValueError, 'Unable to generate identifier type'):\n to_inchi(mol, backend='rdkit')\n mock_logging.error.assert_called_with('Unable to generate identifier for this molecule:\\n{0}'.format(mol.to_adjacency_list()))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000133", "length_bytes": 40756, "license_type": "permissive", "methods": [{"docstring": "Test that we can safely return a blank identifier for an empty molecule.", "name": "test_empty_molecule", "signature": "def test_empty_molecule(self)"}, {"docstring": "Test that we log the molecule adjlist upon failure.", "name": "test_failure_message", "signature": "def test_failure_message(self, mock_logging)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000150", "prompt": "Implement the Python class `TranslatorTest` described below.\n\nClass description:\nImplement the TranslatorTest class.\n\nMethod signatures and docstrings:\n- def test_empty_molecule(self): Test that we can safely return a blank identifier for an empty molecule.\n- def test_failure_message(self, mock_logging): Test that we log the molecule adjlist upon failure.", "prompted_full_text": "Implement the Python class `TranslatorTest` described below.\n\nClass description:\nImplement the TranslatorTest class.\n\nMethod signatures and docstrings:\n- def test_empty_molecule(self): Test that we can safely return a blank identifier for an empty molecule.\n- def test_failure_message(self, mock_logging): Test that we log the molecule adjlist upon failure.\n\n<|skeleton|>\nclass TranslatorTest:\n\n def test_empty_molecule(self):\n \"\"\"Test that we can safely return a blank identifier for an empty molecule.\"\"\"\n <|body_0|>\n\n def test_failure_message(self, mock_logging):\n \"\"\"Test that we log the molecule adjlist upon failure.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mol = Molecule()\n self.assertEqual(mol.to_smiles(), '')\n self.assertEqual(mol.to_inchi(), '')\n<|end_body_0|>\n\n<|body_start_1|>\n mol = Molecule(smiles='[CH2-][N+]#N')\n with self.assertRaisesRegex(ValueError, 'Unable to generate identifier type'):\n to_inchi(mol, backend='rdkit')\n mock_logging.error.assert_called_with('Unable to generate identifier for this molecule:\\n{0}'.format(mol.to_adjacency_list()))\n<|end_body_1|>\n", "revision_id": "349a4af759cf8877197772cd7eaca1e51d46eff5", "skeleton": "<|skeleton|>\nclass TranslatorTest:\n\n def test_empty_molecule(self):\n \"\"\"Test that we can safely return a blank identifier for an empty molecule.\"\"\"\n <|body_0|>\n\n def test_failure_message(self, mock_logging):\n \"\"\"Test that we log the molecule adjlist upon failure.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TranslatorTest:\n def test_empty_molecule(self):\n \"\"\"Test that we can safely return a blank identifier for an empty molecule.\"\"\"\n mol = Molecule()\n self.assertEqual(mol.to_smiles(), '')\n self.assertEqual(mol.to_inchi(), '')\n\n def test_failure_message(self, mock_logging):\n \"\"\"Test that we log the molecule adjlist upon failure.\"\"\"\n mol = Molecule(smiles='[CH2-][N+]#N')\n with self.assertRaisesRegex(ValueError, 'Unable to generate identifier type'):\n to_inchi(mol, backend='rdkit')\n mock_logging.error.assert_called_with('Unable to generate identifier for this molecule:\\n{0}'.format(mol.to_adjacency_list()))\n", "source": "the_stack_v2_python_sparse", "source_path": "rmgpy/molecule/translatorTest.py", "source_repo": "CanePan-cc/CanePanWorkshop", "split": "val", "star_events_count": 2}
{"blob_id": "20b1394078e435635a7fdcdbbd3bf175da981e10", "bodies": ["super().setupUI(Form)\nself.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)\nself.label_4.setToolTip('')\nself.label_4.setAlignment(QtCore.Qt.AlignCenter)\nself.label_4.setObjectName('label_4')\nself.verticalLayout_2.addWidget(self.label_4)\nself.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)\nself.label_8.setObjectName('label_8')\nself.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_8)\nself.sbDenomImg = QtWidgets.QSpinBox(self.verticalLayoutWidget)\nself.sbDenomImg.setObjectName('sbDenomImg')\nself.sbDenomImg.setRange(0, 36)\nself.sbDenomImg.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\nself.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.sbDenomImg)\nself.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget)\nself.label_9.setObjectName('label_9')\nself.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_9)\nself.sbDenomImgT = QtWidgets.QSpinBox(self.verticalLayoutWidget)\nself.sbDenomImgT.setObjectName('sbDenomImgT')\nself.sbDenomImgT.setRange(0, 12)\nself.sbDenomImgT.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\nself.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.sbDenomImgT)\nself.retranslateUi(Form)\nQtCore.QMetaObject.connectSlotsByName(Form)", "_translate = QtCore.QCoreApplication.translate\nForm.setWindowTitle(_translate('Form', 'Prueba Memoria Visoespacial'))\nself.label_4.setText(_translate('Form', 'Ingrese los datos de la prueba de Memoria Visoespacia'))\nself.label_8.setText(_translate('Form', 'Total recall: '))\nself.label_9.setText(_translate('Form', 'Delayed recall:'))\nself.pbStart.setText(_translate('Form', 'Registrar Prueba'))\nself.backButton.setText(_translate('Form', 'Regresar al Menu'))"], "bodies_text": "<|body_start_0|>\n super().setupUI(Form)\n self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_4.setToolTip('')\n self.label_4.setAlignment(QtCore.Qt.AlignCenter)\n self.label_4.setObjectName('label_4')\n self.verticalLayout_2.addWidget(self.label_4)\n self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_8.setObjectName('label_8')\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_8)\n self.sbDenomImg = QtWidgets.QSpinBox(self.verticalLayoutWidget)\n self.sbDenomImg.setObjectName('sbDenomImg')\n self.sbDenomImg.setRange(0, 36)\n self.sbDenomImg.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.sbDenomImg)\n self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_9.setObjectName('label_9')\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_9)\n self.sbDenomImgT = QtWidgets.QSpinBox(self.verticalLayoutWidget)\n self.sbDenomImgT.setObjectName('sbDenomImgT')\n self.sbDenomImgT.setRange(0, 12)\n self.sbDenomImgT.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.sbDenomImgT)\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n<|end_body_0|>\n\n<|body_start_1|>\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate('Form', 'Prueba Memoria Visoespacial'))\n self.label_4.setText(_translate('Form', 'Ingrese los datos de la prueba de Memoria Visoespacia'))\n self.label_8.setText(_translate('Form', 'Total recall: '))\n self.label_9.setText(_translate('Form', 'Delayed recall:'))\n self.pbStart.setText(_translate('Form', 'Registrar Prueba'))\n self.backButton.setText(_translate('Form', 'Regresar al Menu'))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MemoriaVisoespaciaWidget", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MemoriaVisoespaciaWidget:\n\n def setupUi(self, Form):\n \"\"\"Método empleado para especificar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\"\"\"\n <|body_0|>\n\n def retranslateUi(self, Form):\n \"\"\"Método empleado paraasignar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().setupUI(Form)\n self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_4.setToolTip('')\n self.label_4.setAlignment(QtCore.Qt.AlignCenter)\n self.label_4.setObjectName('label_4')\n self.verticalLayout_2.addWidget(self.label_4)\n self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_8.setObjectName('label_8')\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_8)\n self.sbDenomImg = QtWidgets.QSpinBox(self.verticalLayoutWidget)\n self.sbDenomImg.setObjectName('sbDenomImg')\n self.sbDenomImg.setRange(0, 36)\n self.sbDenomImg.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.sbDenomImg)\n self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_9.setObjectName('label_9')\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_9)\n self.sbDenomImgT = QtWidgets.QSpinBox(self.verticalLayoutWidget)\n self.sbDenomImgT.setObjectName('sbDenomImgT')\n self.sbDenomImgT.setRange(0, 12)\n self.sbDenomImgT.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.sbDenomImgT)\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n<|end_body_0|>\n\n<|body_start_1|>\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate('Form', 'Prueba Memoria Visoespacial'))\n self.label_4.setText(_translate('Form', 'Ingrese los datos de la prueba de Memoria Visoespacia'))\n self.label_8.setText(_translate('Form', 'Total recall: '))\n self.label_9.setText(_translate('Form', 'Delayed recall:'))\n self.pbStart.setText(_translate('Form', 'Registrar Prueba'))\n self.backButton.setText(_translate('Form', 'Regresar al Menu'))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000134", "length_bytes": 2783, "license_type": "no_license", "methods": [{"docstring": "Método empleado para especificar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)", "name": "setupUi", "signature": "def setupUi(self, Form)"}, {"docstring": "Método empleado paraasignar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)", "name": "retranslateUi", "signature": "def retranslateUi(self, Form)"}], "n_methods": 2, "prompt": "Implement the Python class `MemoriaVisoespaciaWidget` described below.\n\nClass description:\nImplement the MemoriaVisoespaciaWidget class.\n\nMethod signatures and docstrings:\n- def setupUi(self, Form): Método empleado para especificar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\n- def retranslateUi(self, Form): Método empleado paraasignar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)", "prompted_full_text": "Implement the Python class `MemoriaVisoespaciaWidget` described below.\n\nClass description:\nImplement the MemoriaVisoespaciaWidget class.\n\nMethod signatures and docstrings:\n- def setupUi(self, Form): Método empleado para especificar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\n- def retranslateUi(self, Form): Método empleado paraasignar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\n\n<|skeleton|>\nclass MemoriaVisoespaciaWidget:\n\n def setupUi(self, Form):\n \"\"\"Método empleado para especificar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\"\"\"\n <|body_0|>\n\n def retranslateUi(self, Form):\n \"\"\"Método empleado paraasignar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().setupUI(Form)\n self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_4.setToolTip('')\n self.label_4.setAlignment(QtCore.Qt.AlignCenter)\n self.label_4.setObjectName('label_4')\n self.verticalLayout_2.addWidget(self.label_4)\n self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_8.setObjectName('label_8')\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_8)\n self.sbDenomImg = QtWidgets.QSpinBox(self.verticalLayoutWidget)\n self.sbDenomImg.setObjectName('sbDenomImg')\n self.sbDenomImg.setRange(0, 36)\n self.sbDenomImg.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.sbDenomImg)\n self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_9.setObjectName('label_9')\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_9)\n self.sbDenomImgT = QtWidgets.QSpinBox(self.verticalLayoutWidget)\n self.sbDenomImgT.setObjectName('sbDenomImgT')\n self.sbDenomImgT.setRange(0, 12)\n self.sbDenomImgT.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.sbDenomImgT)\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n<|end_body_0|>\n\n<|body_start_1|>\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate('Form', 'Prueba Memoria Visoespacial'))\n self.label_4.setText(_translate('Form', 'Ingrese los datos de la prueba de Memoria Visoespacia'))\n self.label_8.setText(_translate('Form', 'Total recall: '))\n self.label_9.setText(_translate('Form', 'Delayed recall:'))\n self.pbStart.setText(_translate('Form', 'Registrar Prueba'))\n self.backButton.setText(_translate('Form', 'Regresar al Menu'))\n<|end_body_1|>\n", "revision_id": "5d1d68fc4476ed866ecfc305112854d9a49c3876", "skeleton": "<|skeleton|>\nclass MemoriaVisoespaciaWidget:\n\n def setupUi(self, Form):\n \"\"\"Método empleado para especificar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\"\"\"\n <|body_0|>\n\n def retranslateUi(self, Form):\n \"\"\"Método empleado paraasignar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MemoriaVisoespaciaWidget:\n def setupUi(self, Form):\n \"\"\"Método empleado para especificar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\"\"\"\n super().setupUI(Form)\n self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_4.setToolTip('')\n self.label_4.setAlignment(QtCore.Qt.AlignCenter)\n self.label_4.setObjectName('label_4')\n self.verticalLayout_2.addWidget(self.label_4)\n self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_8.setObjectName('label_8')\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_8)\n self.sbDenomImg = QtWidgets.QSpinBox(self.verticalLayoutWidget)\n self.sbDenomImg.setObjectName('sbDenomImg')\n self.sbDenomImg.setRange(0, 36)\n self.sbDenomImg.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.sbDenomImg)\n self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_9.setObjectName('label_9')\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_9)\n self.sbDenomImgT = QtWidgets.QSpinBox(self.verticalLayoutWidget)\n self.sbDenomImgT.setObjectName('sbDenomImgT')\n self.sbDenomImgT.setRange(0, 12)\n self.sbDenomImgT.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.sbDenomImgT)\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n \"\"\"Método empleado paraasignar el contenido de la Interfáz gráfica, es generado por pyuic5. Args: Form: Ventana en la que se deplegará la interfáz gráfica (es un tipo de dato QtWidget.QWidget)\"\"\"\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate('Form', 'Prueba Memoria Visoespacial'))\n self.label_4.setText(_translate('Form', 'Ingrese los datos de la prueba de Memoria Visoespacia'))\n self.label_8.setText(_translate('Form', 'Total recall: '))\n self.label_9.setText(_translate('Form', 'Delayed recall:'))\n self.pbStart.setText(_translate('Form', 'Registrar Prueba'))\n self.backButton.setText(_translate('Form', 'Regresar al Menu'))\n", "source": "the_stack_v2_python_sparse", "source_path": "src/main/python/vistas/MemoriaVisoespaciaWidget.py", "source_repo": "ProyectoIntegrador2018/reportes-neurociencias", "split": "val", "star_events_count": 1}
{"blob_id": "94185403ea5a9d86b133aae81608774fb2ffd013", "bodies": ["self.pay_date = pay_date\nself.start_date = start_date\nself.end_date = end_date\nself.pay_period_hours = pay_period_hours\nself.gross_pay_amount = gross_pay_amount\nself.gross_pay_ytd = gross_pay_ytd\nself.net_pay_amount = net_pay_amount\nself.net_pay_ytd = net_pay_ytd\nself.payroll_provider = payroll_provider\nself.pay_frequency = pay_frequency\nself.pay_type = pay_type\nself.additional_properties = additional_properties", "if dictionary is None:\n return None\npay_date = dictionary.get('payDate')\nstart_date = dictionary.get('startDate')\nend_date = dictionary.get('endDate')\npay_period_hours = dictionary.get('payPeriodHours')\ngross_pay_amount = dictionary.get('grossPayAmount')\ngross_pay_ytd = dictionary.get('grossPayYTD')\nnet_pay_amount = dictionary.get('netPayAmount')\nnet_pay_ytd = dictionary.get('netPayYTD')\npayroll_provider = dictionary.get('payrollProvider')\npay_frequency = dictionary.get('payFrequency')\npay_type = dictionary.get('payType')\nfor key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\nreturn cls(pay_date, start_date, end_date, pay_period_hours, gross_pay_amount, gross_pay_ytd, net_pay_amount, net_pay_ytd, payroll_provider, pay_frequency, pay_type, dictionary)"], "bodies_text": "<|body_start_0|>\n self.pay_date = pay_date\n self.start_date = start_date\n self.end_date = end_date\n self.pay_period_hours = pay_period_hours\n self.gross_pay_amount = gross_pay_amount\n self.gross_pay_ytd = gross_pay_ytd\n self.net_pay_amount = net_pay_amount\n self.net_pay_ytd = net_pay_ytd\n self.payroll_provider = payroll_provider\n self.pay_frequency = pay_frequency\n self.pay_type = pay_type\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n pay_date = dictionary.get('payDate')\n start_date = dictionary.get('startDate')\n end_date = dictionary.get('endDate')\n pay_period_hours = dictionary.get('payPeriodHours')\n gross_pay_amount = dictionary.get('grossPayAmount')\n gross_pay_ytd = dictionary.get('grossPayYTD')\n net_pay_amount = dictionary.get('netPayAmount')\n net_pay_ytd = dictionary.get('netPayYTD')\n payroll_provider = dictionary.get('payrollProvider')\n pay_frequency = dictionary.get('payFrequency')\n pay_type = dictionary.get('payType')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(pay_date, start_date, end_date, pay_period_hours, gross_pay_amount, gross_pay_ytd, net_pay_amount, net_pay_ytd, payroll_provider, pay_frequency, pay_type, dictionary)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'Payroll Main Paystatement Fields' model. TODO: type model description here. Attributes: pay_date (long|int): Pay date for a pay period start_date (long|int): Start date for a pay period end_date (long|int): End date for a pay period pay_period_hours (int): The sum total of the number of hours worked each week for a pay period. gross_pay_amount (float): Gross pay amount for a pay period gross_pay_ytd (float): The year-to-date (YTD) gross pay amount for an employer.
**Note**: This field is only included in the pay histories from the last period of the year where `lastPayPeriodIndicator` = true. net_pay_amount (float): Net pay amount for a pay period net_pay_ytd", "class_name": "PayrollMainPaystatementFields", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PayrollMainPaystatementFields:\n \"\"\"Implementation of the 'Payroll Main Paystatement Fields' model. TODO: type model description here. Attributes: pay_date (long|int): Pay date for a pay period start_date (long|int): Start date for a pay period end_date (long|int): End date for a pay period pay_period_hours (int): The sum total of the number of hours worked each week for a pay period. gross_pay_amount (float): Gross pay amount for a pay period gross_pay_ytd (float): The year-to-date (YTD) gross pay amount for an employer.
**Note**: This field is only included in the pay histories from the last period of the year where `lastPayPeriodIndicator` = true. net_pay_amount (float): Net pay amount for a pay period net_pay_ytd\"\"\"\n\n def __init__(self, pay_date=None, start_date=None, end_date=None, pay_period_hours=None, gross_pay_amount=None, gross_pay_ytd=None, net_pay_amount=None, net_pay_ytd=None, payroll_provider=None, pay_frequency=None, pay_type=None, additional_properties={}):\n \"\"\"Constructor for the PayrollMainPaystatementFields class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pay_date = pay_date\n self.start_date = start_date\n self.end_date = end_date\n self.pay_period_hours = pay_period_hours\n self.gross_pay_amount = gross_pay_amount\n self.gross_pay_ytd = gross_pay_ytd\n self.net_pay_amount = net_pay_amount\n self.net_pay_ytd = net_pay_ytd\n self.payroll_provider = payroll_provider\n self.pay_frequency = pay_frequency\n self.pay_type = pay_type\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n pay_date = dictionary.get('payDate')\n start_date = dictionary.get('startDate')\n end_date = dictionary.get('endDate')\n pay_period_hours = dictionary.get('payPeriodHours')\n gross_pay_amount = dictionary.get('grossPayAmount')\n gross_pay_ytd = dictionary.get('grossPayYTD')\n net_pay_amount = dictionary.get('netPayAmount')\n net_pay_ytd = dictionary.get('netPayYTD')\n payroll_provider = dictionary.get('payrollProvider')\n pay_frequency = dictionary.get('payFrequency')\n pay_type = dictionary.get('payType')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(pay_date, start_date, end_date, pay_period_hours, gross_pay_amount, gross_pay_ytd, net_pay_amount, net_pay_ytd, payroll_provider, pay_frequency, pay_type, dictionary)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000135", "length_bytes": 5216, "license_type": "permissive", "methods": [{"docstring": "Constructor for the PayrollMainPaystatementFields class", "name": "__init__", "signature": "def __init__(self, pay_date=None, start_date=None, end_date=None, pay_period_hours=None, gross_pay_amount=None, gross_pay_ytd=None, net_pay_amount=None, net_pay_ytd=None, payroll_provider=None, pay_frequency=None, pay_type=None, additional_properties={})"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052976", "prompt": "Implement the Python class `PayrollMainPaystatementFields` described below.\n\nClass description:\nImplementation of the 'Payroll Main Paystatement Fields' model. TODO: type model description here. Attributes: pay_date (long|int): Pay date for a pay period start_date (long|int): Start date for a pay period end_date (long|int): End date for a pay period pay_period_hours (int): The sum total of the number of hours worked each week for a pay period. gross_pay_amount (float): Gross pay amount for a pay period gross_pay_ytd (float): The year-to-date (YTD) gross pay amount for an employer.
**Note**: This field is only included in the pay histories from the last period of the year where `lastPayPeriodIndicator` = true. net_pay_amount (float): Net pay amount for a pay period net_pay_ytd\n\nMethod signatures and docstrings:\n- def __init__(self, pay_date=None, start_date=None, end_date=None, pay_period_hours=None, gross_pay_amount=None, gross_pay_ytd=None, net_pay_amount=None, net_pay_ytd=None, payroll_provider=None, pay_frequency=None, pay_type=None, additional_properties={}): Constructor for the PayrollMainPaystatementFields class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `PayrollMainPaystatementFields` described below.\n\nClass description:\nImplementation of the 'Payroll Main Paystatement Fields' model. TODO: type model description here. Attributes: pay_date (long|int): Pay date for a pay period start_date (long|int): Start date for a pay period end_date (long|int): End date for a pay period pay_period_hours (int): The sum total of the number of hours worked each week for a pay period. gross_pay_amount (float): Gross pay amount for a pay period gross_pay_ytd (float): The year-to-date (YTD) gross pay amount for an employer.
**Note**: This field is only included in the pay histories from the last period of the year where `lastPayPeriodIndicator` = true. net_pay_amount (float): Net pay amount for a pay period net_pay_ytd\n\nMethod signatures and docstrings:\n- def __init__(self, pay_date=None, start_date=None, end_date=None, pay_period_hours=None, gross_pay_amount=None, gross_pay_ytd=None, net_pay_amount=None, net_pay_ytd=None, payroll_provider=None, pay_frequency=None, pay_type=None, additional_properties={}): Constructor for the PayrollMainPaystatementFields class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass PayrollMainPaystatementFields:\n \"\"\"Implementation of the 'Payroll Main Paystatement Fields' model. TODO: type model description here. Attributes: pay_date (long|int): Pay date for a pay period start_date (long|int): Start date for a pay period end_date (long|int): End date for a pay period pay_period_hours (int): The sum total of the number of hours worked each week for a pay period. gross_pay_amount (float): Gross pay amount for a pay period gross_pay_ytd (float): The year-to-date (YTD) gross pay amount for an employer.
**Note**: This field is only included in the pay histories from the last period of the year where `lastPayPeriodIndicator` = true. net_pay_amount (float): Net pay amount for a pay period net_pay_ytd\"\"\"\n\n def __init__(self, pay_date=None, start_date=None, end_date=None, pay_period_hours=None, gross_pay_amount=None, gross_pay_ytd=None, net_pay_amount=None, net_pay_ytd=None, payroll_provider=None, pay_frequency=None, pay_type=None, additional_properties={}):\n \"\"\"Constructor for the PayrollMainPaystatementFields class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pay_date = pay_date\n self.start_date = start_date\n self.end_date = end_date\n self.pay_period_hours = pay_period_hours\n self.gross_pay_amount = gross_pay_amount\n self.gross_pay_ytd = gross_pay_ytd\n self.net_pay_amount = net_pay_amount\n self.net_pay_ytd = net_pay_ytd\n self.payroll_provider = payroll_provider\n self.pay_frequency = pay_frequency\n self.pay_type = pay_type\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n pay_date = dictionary.get('payDate')\n start_date = dictionary.get('startDate')\n end_date = dictionary.get('endDate')\n pay_period_hours = dictionary.get('payPeriodHours')\n gross_pay_amount = dictionary.get('grossPayAmount')\n gross_pay_ytd = dictionary.get('grossPayYTD')\n net_pay_amount = dictionary.get('netPayAmount')\n net_pay_ytd = dictionary.get('netPayYTD')\n payroll_provider = dictionary.get('payrollProvider')\n pay_frequency = dictionary.get('payFrequency')\n pay_type = dictionary.get('payType')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(pay_date, start_date, end_date, pay_period_hours, gross_pay_amount, gross_pay_ytd, net_pay_amount, net_pay_ytd, payroll_provider, pay_frequency, pay_type, dictionary)\n<|end_body_1|>\n", "revision_id": "b2ab1ded435db75c78d42261f5e4acd2a3061487", "skeleton": "<|skeleton|>\nclass PayrollMainPaystatementFields:\n \"\"\"Implementation of the 'Payroll Main Paystatement Fields' model. TODO: type model description here. Attributes: pay_date (long|int): Pay date for a pay period start_date (long|int): Start date for a pay period end_date (long|int): End date for a pay period pay_period_hours (int): The sum total of the number of hours worked each week for a pay period. gross_pay_amount (float): Gross pay amount for a pay period gross_pay_ytd (float): The year-to-date (YTD) gross pay amount for an employer.
**Note**: This field is only included in the pay histories from the last period of the year where `lastPayPeriodIndicator` = true. net_pay_amount (float): Net pay amount for a pay period net_pay_ytd\"\"\"\n\n def __init__(self, pay_date=None, start_date=None, end_date=None, pay_period_hours=None, gross_pay_amount=None, gross_pay_ytd=None, net_pay_amount=None, net_pay_ytd=None, payroll_provider=None, pay_frequency=None, pay_type=None, additional_properties={}):\n \"\"\"Constructor for the PayrollMainPaystatementFields class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PayrollMainPaystatementFields:\n \"\"\"Implementation of the 'Payroll Main Paystatement Fields' model. TODO: type model description here. Attributes: pay_date (long|int): Pay date for a pay period start_date (long|int): Start date for a pay period end_date (long|int): End date for a pay period pay_period_hours (int): The sum total of the number of hours worked each week for a pay period. gross_pay_amount (float): Gross pay amount for a pay period gross_pay_ytd (float): The year-to-date (YTD) gross pay amount for an employer.
**Note**: This field is only included in the pay histories from the last period of the year where `lastPayPeriodIndicator` = true. net_pay_amount (float): Net pay amount for a pay period net_pay_ytd\"\"\"\n\n def __init__(self, pay_date=None, start_date=None, end_date=None, pay_period_hours=None, gross_pay_amount=None, gross_pay_ytd=None, net_pay_amount=None, net_pay_ytd=None, payroll_provider=None, pay_frequency=None, pay_type=None, additional_properties={}):\n \"\"\"Constructor for the PayrollMainPaystatementFields class\"\"\"\n self.pay_date = pay_date\n self.start_date = start_date\n self.end_date = end_date\n self.pay_period_hours = pay_period_hours\n self.gross_pay_amount = gross_pay_amount\n self.gross_pay_ytd = gross_pay_ytd\n self.net_pay_amount = net_pay_amount\n self.net_pay_ytd = net_pay_ytd\n self.payroll_provider = payroll_provider\n self.pay_frequency = pay_frequency\n self.pay_type = pay_type\n self.additional_properties = additional_properties\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n pay_date = dictionary.get('payDate')\n start_date = dictionary.get('startDate')\n end_date = dictionary.get('endDate')\n pay_period_hours = dictionary.get('payPeriodHours')\n gross_pay_amount = dictionary.get('grossPayAmount')\n gross_pay_ytd = dictionary.get('grossPayYTD')\n net_pay_amount = dictionary.get('netPayAmount')\n net_pay_ytd = dictionary.get('netPayYTD')\n payroll_provider = dictionary.get('payrollProvider')\n pay_frequency = dictionary.get('payFrequency')\n pay_type = dictionary.get('payType')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(pay_date, start_date, end_date, pay_period_hours, gross_pay_amount, gross_pay_ytd, net_pay_amount, net_pay_ytd, payroll_provider, pay_frequency, pay_type, dictionary)\n", "source": "the_stack_v2_python_sparse", "source_path": "finicityapi/models/payroll_main_paystatement_fields.py", "source_repo": "monarchmoney/finicity-python", "split": "val", "star_events_count": 0}
{"blob_id": "0871e41750c0397cfd7d1c9d3b069d66cda43c8c", "bodies": ["super().__init__(algo_class=algo_class or ApexDDPG)\nself.optimizer = {'max_weight_sync_delay': 400, 'num_replay_buffer_shards': 4, 'debug': False}\nself.max_requests_in_flight_per_sampler_worker = 2\nself.max_requests_in_flight_per_replay_worker = float('inf')\nself.timeout_s_sampler_manager = 0.0\nself.timeout_s_replay_manager = 0.0\nself.n_step = 3\nself.exploration_config = {'type': 'PerWorkerOrnsteinUhlenbeckNoise'}\nself.num_gpus = 0\nself.num_workers = 32\nself.min_sample_timesteps_per_iteration = 25000\nself.min_time_s_per_iteration = 30\nself.train_batch_size = 512\nself.rollout_fragment_length = 50\nself.replay_buffer_config = {'type': 'MultiAgentPrioritizedReplayBuffer', 'capacity': 2000000, 'no_local_replay_buffer': True, 'prioritized_replay_alpha': 0.6, 'prioritized_replay_beta': 0.4, 'prioritized_replay_eps': 1e-06, 'replay_buffer_shards_colocated_with_driver': True, 'worker_side_prioritization': True, 'prioritized_replay': DEPRECATED_VALUE}\nself.num_steps_sampled_before_learning_starts = 50000\nself.target_network_update_freq = 500000\nself.training_intensity = 1", "super().training(**kwargs)\nif optimizer is not None:\n self.optimizer = optimizer\nif max_requests_in_flight_per_sampler_worker is not None:\n self.max_requests_in_flight_per_sampler_worker = max_requests_in_flight_per_sampler_worker\nif max_requests_in_flight_per_replay_worker is not None:\n self.max_requests_in_flight_per_replay_worker = max_requests_in_flight_per_replay_worker\nif timeout_s_sampler_manager is not None:\n self.timeout_s_sampler_manager = timeout_s_sampler_manager\nif timeout_s_replay_manager is not None:\n self.timeout_s_replay_manager = timeout_s_replay_manager\nreturn self"], "bodies_text": "<|body_start_0|>\n super().__init__(algo_class=algo_class or ApexDDPG)\n self.optimizer = {'max_weight_sync_delay': 400, 'num_replay_buffer_shards': 4, 'debug': False}\n self.max_requests_in_flight_per_sampler_worker = 2\n self.max_requests_in_flight_per_replay_worker = float('inf')\n self.timeout_s_sampler_manager = 0.0\n self.timeout_s_replay_manager = 0.0\n self.n_step = 3\n self.exploration_config = {'type': 'PerWorkerOrnsteinUhlenbeckNoise'}\n self.num_gpus = 0\n self.num_workers = 32\n self.min_sample_timesteps_per_iteration = 25000\n self.min_time_s_per_iteration = 30\n self.train_batch_size = 512\n self.rollout_fragment_length = 50\n self.replay_buffer_config = {'type': 'MultiAgentPrioritizedReplayBuffer', 'capacity': 2000000, 'no_local_replay_buffer': True, 'prioritized_replay_alpha': 0.6, 'prioritized_replay_beta': 0.4, 'prioritized_replay_eps': 1e-06, 'replay_buffer_shards_colocated_with_driver': True, 'worker_side_prioritization': True, 'prioritized_replay': DEPRECATED_VALUE}\n self.num_steps_sampled_before_learning_starts = 50000\n self.target_network_update_freq = 500000\n self.training_intensity = 1\n<|end_body_0|>\n\n<|body_start_1|>\n super().training(**kwargs)\n if optimizer is not None:\n self.optimizer = optimizer\n if max_requests_in_flight_per_sampler_worker is not None:\n self.max_requests_in_flight_per_sampler_worker = max_requests_in_flight_per_sampler_worker\n if max_requests_in_flight_per_replay_worker is not None:\n self.max_requests_in_flight_per_replay_worker = max_requests_in_flight_per_replay_worker\n if timeout_s_sampler_manager is not None:\n self.timeout_s_sampler_manager = timeout_s_sampler_manager\n if timeout_s_replay_manager is not None:\n self.timeout_s_replay_manager = timeout_s_replay_manager\n return self\n<|end_body_1|>\n", "class_docstring": "Defines a configuration class from which an ApexDDPG Trainer can be built. Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> config = ApexDDPGConfig().training(lr=0.01).resources(num_gpus=1) >>> print(config.to_dict()) >>> # Build a Trainer object from the config and run one training iteration. >>> trainer = config.build(env=\"Pendulum-v1\") >>> trainer.train() Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> from ray import tune >>> config = ApexDDPGConfig() >>> # Print out some default values. >>> print(config.lr) # doctest: +SKIP 0.0004 >>> # Update the config object. >>> config.training(lr=tune.grid_search([0.001, 0.000", "class_name": "ApexDDPGConfig", "detected_licenses": ["MIT", "BSD-3-Clause", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ApexDDPGConfig:\n \"\"\"Defines a configuration class from which an ApexDDPG Trainer can be built. Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> config = ApexDDPGConfig().training(lr=0.01).resources(num_gpus=1) >>> print(config.to_dict()) >>> # Build a Trainer object from the config and run one training iteration. >>> trainer = config.build(env=\"Pendulum-v1\") >>> trainer.train() Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> from ray import tune >>> config = ApexDDPGConfig() >>> # Print out some default values. >>> print(config.lr) # doctest: +SKIP 0.0004 >>> # Update the config object. >>> config.training(lr=tune.grid_search([0.001, 0.000\"\"\"\n\n def __init__(self, algo_class=None):\n \"\"\"Initializes an ApexDDPGConfig instance.\"\"\"\n <|body_0|>\n\n def training(self, *, optimizer: Optional[dict]=None, max_requests_in_flight_per_sampler_worker: Optional[int]=None, max_requests_in_flight_per_replay_worker: Optional[int]=None, timeout_s_sampler_manager: Optional[float]=None, timeout_s_replay_manager: Optional[float]=None, **kwargs) -> 'ApexDDPGConfig':\n \"\"\"Sets the training related configuration. Args: optimizer: Apex-DDPG optimizer settings (dict). Set the number of reply buffer shards in here via the `num_replay_buffer_shards` key (default=4). max_requests_in_flight_per_sampler_worker: Max number of inflight requests to each sampling worker. See the AsyncRequestsManager class for more details. Tuning these values is important when running experimens with large sample batches, where there is the risk that the object store may fill up, causing spilling of objects to disk. This can cause any asynchronous requests to become very slow, making your experiment run slow as well. You can inspect the object store during your experiment via a call to r\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(algo_class=algo_class or ApexDDPG)\n self.optimizer = {'max_weight_sync_delay': 400, 'num_replay_buffer_shards': 4, 'debug': False}\n self.max_requests_in_flight_per_sampler_worker = 2\n self.max_requests_in_flight_per_replay_worker = float('inf')\n self.timeout_s_sampler_manager = 0.0\n self.timeout_s_replay_manager = 0.0\n self.n_step = 3\n self.exploration_config = {'type': 'PerWorkerOrnsteinUhlenbeckNoise'}\n self.num_gpus = 0\n self.num_workers = 32\n self.min_sample_timesteps_per_iteration = 25000\n self.min_time_s_per_iteration = 30\n self.train_batch_size = 512\n self.rollout_fragment_length = 50\n self.replay_buffer_config = {'type': 'MultiAgentPrioritizedReplayBuffer', 'capacity': 2000000, 'no_local_replay_buffer': True, 'prioritized_replay_alpha': 0.6, 'prioritized_replay_beta': 0.4, 'prioritized_replay_eps': 1e-06, 'replay_buffer_shards_colocated_with_driver': True, 'worker_side_prioritization': True, 'prioritized_replay': DEPRECATED_VALUE}\n self.num_steps_sampled_before_learning_starts = 50000\n self.target_network_update_freq = 500000\n self.training_intensity = 1\n<|end_body_0|>\n\n<|body_start_1|>\n super().training(**kwargs)\n if optimizer is not None:\n self.optimizer = optimizer\n if max_requests_in_flight_per_sampler_worker is not None:\n self.max_requests_in_flight_per_sampler_worker = max_requests_in_flight_per_sampler_worker\n if max_requests_in_flight_per_replay_worker is not None:\n self.max_requests_in_flight_per_replay_worker = max_requests_in_flight_per_replay_worker\n if timeout_s_sampler_manager is not None:\n self.timeout_s_sampler_manager = timeout_s_sampler_manager\n if timeout_s_replay_manager is not None:\n self.timeout_s_replay_manager = timeout_s_replay_manager\n return self\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000136", "length_bytes": 10309, "license_type": "permissive", "methods": [{"docstring": "Initializes an ApexDDPGConfig instance.", "name": "__init__", "signature": "def __init__(self, algo_class=None)"}, {"docstring": "Sets the training related configuration. Args: optimizer: Apex-DDPG optimizer settings (dict). Set the number of reply buffer shards in here via the `num_replay_buffer_shards` key (default=4). max_requests_in_flight_per_sampler_worker: Max number of inflight requests to each sampling worker. See the AsyncRequestsManager class for more details. Tuning these values is important when running experimens with large sample batches, where there is the risk that the object store may fill up, causing spilling of objects to disk. This can cause any asynchronous requests to become very slow, making your experiment run slow as well. You can inspect the object store during your experiment via a call to r", "name": "training", "signature": "def training(self, *, optimizer: Optional[dict]=None, max_requests_in_flight_per_sampler_worker: Optional[int]=None, max_requests_in_flight_per_replay_worker: Optional[int]=None, timeout_s_sampler_manager: Optional[float]=None, timeout_s_replay_manager: Optional[float]=None, **kwargs) -> 'ApexDDPGConfig'"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_015194", "prompt": "Implement the Python class `ApexDDPGConfig` described below.\n\nClass description:\nDefines a configuration class from which an ApexDDPG Trainer can be built. Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> config = ApexDDPGConfig().training(lr=0.01).resources(num_gpus=1) >>> print(config.to_dict()) >>> # Build a Trainer object from the config and run one training iteration. >>> trainer = config.build(env=\"Pendulum-v1\") >>> trainer.train() Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> from ray import tune >>> config = ApexDDPGConfig() >>> # Print out some default values. >>> print(config.lr) # doctest: +SKIP 0.0004 >>> # Update the config object. >>> config.training(lr=tune.grid_search([0.001, 0.000\n\nMethod signatures and docstrings:\n- def __init__(self, algo_class=None): Initializes an ApexDDPGConfig instance.\n- def training(self, *, optimizer: Optional[dict]=None, max_requests_in_flight_per_sampler_worker: Optional[int]=None, max_requests_in_flight_per_replay_worker: Optional[int]=None, timeout_s_sampler_manager: Optional[float]=None, timeout_s_replay_manager: Optional[float]=None, **kwargs) -> 'ApexDDPGConfig': Sets the training related configuration. Args: optimizer: Apex-DDPG optimizer settings (dict). Set the number of reply buffer shards in here via the `num_replay_buffer_shards` key (default=4). max_requests_in_flight_per_sampler_worker: Max number of inflight requests to each sampling worker. See the AsyncRequestsManager class for more details. Tuning these values is important when running experimens with large sample batches, where there is the risk that the object store may fill up, causing spilling of objects to disk. This can cause any asynchronous requests to become very slow, making your experiment run slow as well. You can inspect the object store during your experiment via a call to r", "prompted_full_text": "Implement the Python class `ApexDDPGConfig` described below.\n\nClass description:\nDefines a configuration class from which an ApexDDPG Trainer can be built. Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> config = ApexDDPGConfig().training(lr=0.01).resources(num_gpus=1) >>> print(config.to_dict()) >>> # Build a Trainer object from the config and run one training iteration. >>> trainer = config.build(env=\"Pendulum-v1\") >>> trainer.train() Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> from ray import tune >>> config = ApexDDPGConfig() >>> # Print out some default values. >>> print(config.lr) # doctest: +SKIP 0.0004 >>> # Update the config object. >>> config.training(lr=tune.grid_search([0.001, 0.000\n\nMethod signatures and docstrings:\n- def __init__(self, algo_class=None): Initializes an ApexDDPGConfig instance.\n- def training(self, *, optimizer: Optional[dict]=None, max_requests_in_flight_per_sampler_worker: Optional[int]=None, max_requests_in_flight_per_replay_worker: Optional[int]=None, timeout_s_sampler_manager: Optional[float]=None, timeout_s_replay_manager: Optional[float]=None, **kwargs) -> 'ApexDDPGConfig': Sets the training related configuration. Args: optimizer: Apex-DDPG optimizer settings (dict). Set the number of reply buffer shards in here via the `num_replay_buffer_shards` key (default=4). max_requests_in_flight_per_sampler_worker: Max number of inflight requests to each sampling worker. See the AsyncRequestsManager class for more details. Tuning these values is important when running experimens with large sample batches, where there is the risk that the object store may fill up, causing spilling of objects to disk. This can cause any asynchronous requests to become very slow, making your experiment run slow as well. You can inspect the object store during your experiment via a call to r\n\n<|skeleton|>\nclass ApexDDPGConfig:\n \"\"\"Defines a configuration class from which an ApexDDPG Trainer can be built. Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> config = ApexDDPGConfig().training(lr=0.01).resources(num_gpus=1) >>> print(config.to_dict()) >>> # Build a Trainer object from the config and run one training iteration. >>> trainer = config.build(env=\"Pendulum-v1\") >>> trainer.train() Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> from ray import tune >>> config = ApexDDPGConfig() >>> # Print out some default values. >>> print(config.lr) # doctest: +SKIP 0.0004 >>> # Update the config object. >>> config.training(lr=tune.grid_search([0.001, 0.000\"\"\"\n\n def __init__(self, algo_class=None):\n \"\"\"Initializes an ApexDDPGConfig instance.\"\"\"\n <|body_0|>\n\n def training(self, *, optimizer: Optional[dict]=None, max_requests_in_flight_per_sampler_worker: Optional[int]=None, max_requests_in_flight_per_replay_worker: Optional[int]=None, timeout_s_sampler_manager: Optional[float]=None, timeout_s_replay_manager: Optional[float]=None, **kwargs) -> 'ApexDDPGConfig':\n \"\"\"Sets the training related configuration. Args: optimizer: Apex-DDPG optimizer settings (dict). Set the number of reply buffer shards in here via the `num_replay_buffer_shards` key (default=4). max_requests_in_flight_per_sampler_worker: Max number of inflight requests to each sampling worker. See the AsyncRequestsManager class for more details. Tuning these values is important when running experimens with large sample batches, where there is the risk that the object store may fill up, causing spilling of objects to disk. This can cause any asynchronous requests to become very slow, making your experiment run slow as well. You can inspect the object store during your experiment via a call to r\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(algo_class=algo_class or ApexDDPG)\n self.optimizer = {'max_weight_sync_delay': 400, 'num_replay_buffer_shards': 4, 'debug': False}\n self.max_requests_in_flight_per_sampler_worker = 2\n self.max_requests_in_flight_per_replay_worker = float('inf')\n self.timeout_s_sampler_manager = 0.0\n self.timeout_s_replay_manager = 0.0\n self.n_step = 3\n self.exploration_config = {'type': 'PerWorkerOrnsteinUhlenbeckNoise'}\n self.num_gpus = 0\n self.num_workers = 32\n self.min_sample_timesteps_per_iteration = 25000\n self.min_time_s_per_iteration = 30\n self.train_batch_size = 512\n self.rollout_fragment_length = 50\n self.replay_buffer_config = {'type': 'MultiAgentPrioritizedReplayBuffer', 'capacity': 2000000, 'no_local_replay_buffer': True, 'prioritized_replay_alpha': 0.6, 'prioritized_replay_beta': 0.4, 'prioritized_replay_eps': 1e-06, 'replay_buffer_shards_colocated_with_driver': True, 'worker_side_prioritization': True, 'prioritized_replay': DEPRECATED_VALUE}\n self.num_steps_sampled_before_learning_starts = 50000\n self.target_network_update_freq = 500000\n self.training_intensity = 1\n<|end_body_0|>\n\n<|body_start_1|>\n super().training(**kwargs)\n if optimizer is not None:\n self.optimizer = optimizer\n if max_requests_in_flight_per_sampler_worker is not None:\n self.max_requests_in_flight_per_sampler_worker = max_requests_in_flight_per_sampler_worker\n if max_requests_in_flight_per_replay_worker is not None:\n self.max_requests_in_flight_per_replay_worker = max_requests_in_flight_per_replay_worker\n if timeout_s_sampler_manager is not None:\n self.timeout_s_sampler_manager = timeout_s_sampler_manager\n if timeout_s_replay_manager is not None:\n self.timeout_s_replay_manager = timeout_s_replay_manager\n return self\n<|end_body_1|>\n", "revision_id": "227aef381a605cb1ebccbba4e84b840634196a35", "skeleton": "<|skeleton|>\nclass ApexDDPGConfig:\n \"\"\"Defines a configuration class from which an ApexDDPG Trainer can be built. Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> config = ApexDDPGConfig().training(lr=0.01).resources(num_gpus=1) >>> print(config.to_dict()) >>> # Build a Trainer object from the config and run one training iteration. >>> trainer = config.build(env=\"Pendulum-v1\") >>> trainer.train() Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> from ray import tune >>> config = ApexDDPGConfig() >>> # Print out some default values. >>> print(config.lr) # doctest: +SKIP 0.0004 >>> # Update the config object. >>> config.training(lr=tune.grid_search([0.001, 0.000\"\"\"\n\n def __init__(self, algo_class=None):\n \"\"\"Initializes an ApexDDPGConfig instance.\"\"\"\n <|body_0|>\n\n def training(self, *, optimizer: Optional[dict]=None, max_requests_in_flight_per_sampler_worker: Optional[int]=None, max_requests_in_flight_per_replay_worker: Optional[int]=None, timeout_s_sampler_manager: Optional[float]=None, timeout_s_replay_manager: Optional[float]=None, **kwargs) -> 'ApexDDPGConfig':\n \"\"\"Sets the training related configuration. Args: optimizer: Apex-DDPG optimizer settings (dict). Set the number of reply buffer shards in here via the `num_replay_buffer_shards` key (default=4). max_requests_in_flight_per_sampler_worker: Max number of inflight requests to each sampling worker. See the AsyncRequestsManager class for more details. Tuning these values is important when running experimens with large sample batches, where there is the risk that the object store may fill up, causing spilling of objects to disk. This can cause any asynchronous requests to become very slow, making your experiment run slow as well. You can inspect the object store during your experiment via a call to r\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ApexDDPGConfig:\n \"\"\"Defines a configuration class from which an ApexDDPG Trainer can be built. Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> config = ApexDDPGConfig().training(lr=0.01).resources(num_gpus=1) >>> print(config.to_dict()) >>> # Build a Trainer object from the config and run one training iteration. >>> trainer = config.build(env=\"Pendulum-v1\") >>> trainer.train() Example: >>> from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ApexDDPGConfig >>> from ray import tune >>> config = ApexDDPGConfig() >>> # Print out some default values. >>> print(config.lr) # doctest: +SKIP 0.0004 >>> # Update the config object. >>> config.training(lr=tune.grid_search([0.001, 0.000\"\"\"\n\n def __init__(self, algo_class=None):\n \"\"\"Initializes an ApexDDPGConfig instance.\"\"\"\n super().__init__(algo_class=algo_class or ApexDDPG)\n self.optimizer = {'max_weight_sync_delay': 400, 'num_replay_buffer_shards': 4, 'debug': False}\n self.max_requests_in_flight_per_sampler_worker = 2\n self.max_requests_in_flight_per_replay_worker = float('inf')\n self.timeout_s_sampler_manager = 0.0\n self.timeout_s_replay_manager = 0.0\n self.n_step = 3\n self.exploration_config = {'type': 'PerWorkerOrnsteinUhlenbeckNoise'}\n self.num_gpus = 0\n self.num_workers = 32\n self.min_sample_timesteps_per_iteration = 25000\n self.min_time_s_per_iteration = 30\n self.train_batch_size = 512\n self.rollout_fragment_length = 50\n self.replay_buffer_config = {'type': 'MultiAgentPrioritizedReplayBuffer', 'capacity': 2000000, 'no_local_replay_buffer': True, 'prioritized_replay_alpha': 0.6, 'prioritized_replay_beta': 0.4, 'prioritized_replay_eps': 1e-06, 'replay_buffer_shards_colocated_with_driver': True, 'worker_side_prioritization': True, 'prioritized_replay': DEPRECATED_VALUE}\n self.num_steps_sampled_before_learning_starts = 50000\n self.target_network_update_freq = 500000\n self.training_intensity = 1\n\n def training(self, *, optimizer: Optional[dict]=None, max_requests_in_flight_per_sampler_worker: Optional[int]=None, max_requests_in_flight_per_replay_worker: Optional[int]=None, timeout_s_sampler_manager: Optional[float]=None, timeout_s_replay_manager: Optional[float]=None, **kwargs) -> 'ApexDDPGConfig':\n \"\"\"Sets the training related configuration. Args: optimizer: Apex-DDPG optimizer settings (dict). Set the number of reply buffer shards in here via the `num_replay_buffer_shards` key (default=4). max_requests_in_flight_per_sampler_worker: Max number of inflight requests to each sampling worker. See the AsyncRequestsManager class for more details. Tuning these values is important when running experimens with large sample batches, where there is the risk that the object store may fill up, causing spilling of objects to disk. This can cause any asynchronous requests to become very slow, making your experiment run slow as well. You can inspect the object store during your experiment via a call to r\"\"\"\n super().training(**kwargs)\n if optimizer is not None:\n self.optimizer = optimizer\n if max_requests_in_flight_per_sampler_worker is not None:\n self.max_requests_in_flight_per_sampler_worker = max_requests_in_flight_per_sampler_worker\n if max_requests_in_flight_per_replay_worker is not None:\n self.max_requests_in_flight_per_replay_worker = max_requests_in_flight_per_replay_worker\n if timeout_s_sampler_manager is not None:\n self.timeout_s_sampler_manager = timeout_s_sampler_manager\n if timeout_s_replay_manager is not None:\n self.timeout_s_replay_manager = timeout_s_replay_manager\n return self\n", "source": "the_stack_v2_python_sparse", "source_path": "rllib/algorithms/apex_ddpg/apex_ddpg.py", "source_repo": "jovany-wang/ray", "split": "val", "star_events_count": 1}
{"blob_id": "975e7d30fbbff016e8f9ee9a199432a8d28d05fa", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn DeviceConfigurationDeviceStatus()", "from .compliance_status import ComplianceStatus\nfrom .entity import Entity\nfrom .compliance_status import ComplianceStatus\nfrom .entity import Entity\nfields: Dict[str, Callable[[Any], None]] = {'complianceGracePeriodExpirationDateTime': lambda n: setattr(self, 'compliance_grace_period_expiration_date_time', n.get_datetime_value()), 'deviceDisplayName': lambda n: setattr(self, 'device_display_name', n.get_str_value()), 'deviceModel': lambda n: setattr(self, 'device_model', n.get_str_value()), 'lastReportedDateTime': lambda n: setattr(self, 'last_reported_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(ComplianceStatus)), 'userName': lambda n: setattr(self, 'user_name', n.get_str_value()), 'userPrincipalName': lambda n: setattr(self, 'user_principal_name', n.get_str_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_datetime_value('complianceGracePeriodExpirationDateTime', self.compliance_grace_period_expiration_date_time)\nwriter.write_str_value('deviceDisplayName', self.device_display_name)\nwriter.write_str_value('deviceModel', self.device_model)\nwriter.write_datetime_value('lastReportedDateTime', self.last_reported_date_time)\nwriter.write_enum_value('status', self.status)\nwriter.write_str_value('userName', self.user_name)\nwriter.write_str_value('userPrincipalName', self.user_principal_name)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return DeviceConfigurationDeviceStatus()\n<|end_body_0|>\n\n<|body_start_1|>\n from .compliance_status import ComplianceStatus\n from .entity import Entity\n from .compliance_status import ComplianceStatus\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'complianceGracePeriodExpirationDateTime': lambda n: setattr(self, 'compliance_grace_period_expiration_date_time', n.get_datetime_value()), 'deviceDisplayName': lambda n: setattr(self, 'device_display_name', n.get_str_value()), 'deviceModel': lambda n: setattr(self, 'device_model', n.get_str_value()), 'lastReportedDateTime': lambda n: setattr(self, 'last_reported_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(ComplianceStatus)), 'userName': lambda n: setattr(self, 'user_name', n.get_str_value()), 'userPrincipalName': lambda n: setattr(self, 'user_principal_name', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('complianceGracePeriodExpirationDateTime', self.compliance_grace_period_expiration_date_time)\n writer.write_str_value('deviceDisplayName', self.device_display_name)\n writer.write_str_value('deviceModel', self.device_model)\n writer.write_datetime_value('lastReportedDateTime', self.last_reported_date_time)\n writer.write_enum_value('status', self.status)\n writer.write_str_value('userName', self.user_name)\n writer.write_str_value('userPrincipalName', self.user_principal_name)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "DeviceConfigurationDeviceStatus", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeviceConfigurationDeviceStatus:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceConfigurationDeviceStatus:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceConfigurationDeviceStatus\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return DeviceConfigurationDeviceStatus()\n<|end_body_0|>\n\n<|body_start_1|>\n from .compliance_status import ComplianceStatus\n from .entity import Entity\n from .compliance_status import ComplianceStatus\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'complianceGracePeriodExpirationDateTime': lambda n: setattr(self, 'compliance_grace_period_expiration_date_time', n.get_datetime_value()), 'deviceDisplayName': lambda n: setattr(self, 'device_display_name', n.get_str_value()), 'deviceModel': lambda n: setattr(self, 'device_model', n.get_str_value()), 'lastReportedDateTime': lambda n: setattr(self, 'last_reported_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(ComplianceStatus)), 'userName': lambda n: setattr(self, 'user_name', n.get_str_value()), 'userPrincipalName': lambda n: setattr(self, 'user_principal_name', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('complianceGracePeriodExpirationDateTime', self.compliance_grace_period_expiration_date_time)\n writer.write_str_value('deviceDisplayName', self.device_display_name)\n writer.write_str_value('deviceModel', self.device_model)\n writer.write_datetime_value('lastReportedDateTime', self.last_reported_date_time)\n writer.write_enum_value('status', self.status)\n writer.write_str_value('userName', self.user_name)\n writer.write_str_value('userPrincipalName', self.user_principal_name)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000137", "length_bytes": 3938, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceConfigurationDeviceStatus", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceConfigurationDeviceStatus"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_042372", "prompt": "Implement the Python class `DeviceConfigurationDeviceStatus` described below.\n\nClass description:\nImplement the DeviceConfigurationDeviceStatus class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceConfigurationDeviceStatus: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceConfigurationDeviceStatus\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `DeviceConfigurationDeviceStatus` described below.\n\nClass description:\nImplement the DeviceConfigurationDeviceStatus class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceConfigurationDeviceStatus: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceConfigurationDeviceStatus\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass DeviceConfigurationDeviceStatus:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceConfigurationDeviceStatus:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceConfigurationDeviceStatus\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return DeviceConfigurationDeviceStatus()\n<|end_body_0|>\n\n<|body_start_1|>\n from .compliance_status import ComplianceStatus\n from .entity import Entity\n from .compliance_status import ComplianceStatus\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'complianceGracePeriodExpirationDateTime': lambda n: setattr(self, 'compliance_grace_period_expiration_date_time', n.get_datetime_value()), 'deviceDisplayName': lambda n: setattr(self, 'device_display_name', n.get_str_value()), 'deviceModel': lambda n: setattr(self, 'device_model', n.get_str_value()), 'lastReportedDateTime': lambda n: setattr(self, 'last_reported_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(ComplianceStatus)), 'userName': lambda n: setattr(self, 'user_name', n.get_str_value()), 'userPrincipalName': lambda n: setattr(self, 'user_principal_name', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('complianceGracePeriodExpirationDateTime', self.compliance_grace_period_expiration_date_time)\n writer.write_str_value('deviceDisplayName', self.device_display_name)\n writer.write_str_value('deviceModel', self.device_model)\n writer.write_datetime_value('lastReportedDateTime', self.last_reported_date_time)\n writer.write_enum_value('status', self.status)\n writer.write_str_value('userName', self.user_name)\n writer.write_str_value('userPrincipalName', self.user_principal_name)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass DeviceConfigurationDeviceStatus:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceConfigurationDeviceStatus:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceConfigurationDeviceStatus\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DeviceConfigurationDeviceStatus:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceConfigurationDeviceStatus:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceConfigurationDeviceStatus\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return DeviceConfigurationDeviceStatus()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .compliance_status import ComplianceStatus\n from .entity import Entity\n from .compliance_status import ComplianceStatus\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'complianceGracePeriodExpirationDateTime': lambda n: setattr(self, 'compliance_grace_period_expiration_date_time', n.get_datetime_value()), 'deviceDisplayName': lambda n: setattr(self, 'device_display_name', n.get_str_value()), 'deviceModel': lambda n: setattr(self, 'device_model', n.get_str_value()), 'lastReportedDateTime': lambda n: setattr(self, 'last_reported_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(ComplianceStatus)), 'userName': lambda n: setattr(self, 'user_name', n.get_str_value()), 'userPrincipalName': lambda n: setattr(self, 'user_principal_name', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('complianceGracePeriodExpirationDateTime', self.compliance_grace_period_expiration_date_time)\n writer.write_str_value('deviceDisplayName', self.device_display_name)\n writer.write_str_value('deviceModel', self.device_model)\n writer.write_datetime_value('lastReportedDateTime', self.last_reported_date_time)\n writer.write_enum_value('status', self.status)\n writer.write_str_value('userName', self.user_name)\n writer.write_str_value('userPrincipalName', self.user_principal_name)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/device_configuration_device_status.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135}
{"blob_id": "5dc9fdf5b7156f915f03771ce9dca1b36c9413d6", "bodies": ["data = request.query_params\nnext_month_only = data.get('next_month_only', True)\nnext_month_days_off = get_ua_days_off(next_month_only)\nreturn json_response_success(data=next_month_days_off)", "email = request.data.get('email', None)\nif not email:\n return json_response_error(\"Should provide customer's email\")\nnext_month_days_off = get_ua_days_off()\nif not next_month_days_off:\n return json_response_success('No holidays in next 30 days')\nif next_month_days_off:\n html = str(next_month_days_off)\n return json_response_success('Email to %s is succesfully sent' % email)"], "bodies_text": "<|body_start_0|>\n data = request.query_params\n next_month_only = data.get('next_month_only', True)\n next_month_days_off = get_ua_days_off(next_month_only)\n return json_response_success(data=next_month_days_off)\n<|end_body_0|>\n\n<|body_start_1|>\n email = request.data.get('email', None)\n if not email:\n return json_response_error(\"Should provide customer's email\")\n next_month_days_off = get_ua_days_off()\n if not next_month_days_off:\n return json_response_success('No holidays in next 30 days')\n if next_month_days_off:\n html = str(next_month_days_off)\n return json_response_success('Email to %s is succesfully sent' % email)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DaysOff", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DaysOff:\n\n def get(self, request):\n \"\"\"parameters: - name: next_month_only description: show only next month days off required: false type: bool\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"parameters: - name: email description: client's email to whom we should send an email required: true type: string\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.query_params\n next_month_only = data.get('next_month_only', True)\n next_month_days_off = get_ua_days_off(next_month_only)\n return json_response_success(data=next_month_days_off)\n<|end_body_0|>\n\n<|body_start_1|>\n email = request.data.get('email', None)\n if not email:\n return json_response_error(\"Should provide customer's email\")\n next_month_days_off = get_ua_days_off()\n if not next_month_days_off:\n return json_response_success('No holidays in next 30 days')\n if next_month_days_off:\n html = str(next_month_days_off)\n return json_response_success('Email to %s is succesfully sent' % email)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000138", "length_bytes": 43187, "license_type": "no_license", "methods": [{"docstring": "parameters: - name: next_month_only description: show only next month days off required: false type: bool", "name": "get", "signature": "def get(self, request)"}, {"docstring": "parameters: - name: email description: client's email to whom we should send an email required: true type: string", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_033082", "prompt": "Implement the Python class `DaysOff` described below.\n\nClass description:\nImplement the DaysOff class.\n\nMethod signatures and docstrings:\n- def get(self, request): parameters: - name: next_month_only description: show only next month days off required: false type: bool\n- def post(self, request): parameters: - name: email description: client's email to whom we should send an email required: true type: string", "prompted_full_text": "Implement the Python class `DaysOff` described below.\n\nClass description:\nImplement the DaysOff class.\n\nMethod signatures and docstrings:\n- def get(self, request): parameters: - name: next_month_only description: show only next month days off required: false type: bool\n- def post(self, request): parameters: - name: email description: client's email to whom we should send an email required: true type: string\n\n<|skeleton|>\nclass DaysOff:\n\n def get(self, request):\n \"\"\"parameters: - name: next_month_only description: show only next month days off required: false type: bool\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"parameters: - name: email description: client's email to whom we should send an email required: true type: string\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.query_params\n next_month_only = data.get('next_month_only', True)\n next_month_days_off = get_ua_days_off(next_month_only)\n return json_response_success(data=next_month_days_off)\n<|end_body_0|>\n\n<|body_start_1|>\n email = request.data.get('email', None)\n if not email:\n return json_response_error(\"Should provide customer's email\")\n next_month_days_off = get_ua_days_off()\n if not next_month_days_off:\n return json_response_success('No holidays in next 30 days')\n if next_month_days_off:\n html = str(next_month_days_off)\n return json_response_success('Email to %s is succesfully sent' % email)\n<|end_body_1|>\n", "revision_id": "ef392f0ec6f5a4eac2ecb48606ccfe753ffacd2e", "skeleton": "<|skeleton|>\nclass DaysOff:\n\n def get(self, request):\n \"\"\"parameters: - name: next_month_only description: show only next month days off required: false type: bool\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"parameters: - name: email description: client's email to whom we should send an email required: true type: string\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DaysOff:\n def get(self, request):\n \"\"\"parameters: - name: next_month_only description: show only next month days off required: false type: bool\"\"\"\n data = request.query_params\n next_month_only = data.get('next_month_only', True)\n next_month_days_off = get_ua_days_off(next_month_only)\n return json_response_success(data=next_month_days_off)\n\n def post(self, request):\n \"\"\"parameters: - name: email description: client's email to whom we should send an email required: true type: string\"\"\"\n email = request.data.get('email', None)\n if not email:\n return json_response_error(\"Should provide customer's email\")\n next_month_days_off = get_ua_days_off()\n if not next_month_days_off:\n return json_response_success('No holidays in next 30 days')\n if next_month_days_off:\n html = str(next_month_days_off)\n return json_response_success('Email to %s is succesfully sent' % email)\n", "source": "the_stack_v2_python_sparse", "source_path": "erp_django/apps/core/views.py", "source_repo": "Uvik-Software/erp-django", "split": "val", "star_events_count": 0}
{"blob_id": "19ef9eafcaee282c20d5097a625eec1dc607db6a", "bodies": ["super(InverseGamma, self).__init__(transform)\nself.covariance_prior = False\nself.alpha = alpha\nself.beta = beta", "if self.transform is not None:\n x = self.transform(x)\nreturn (-self.alpha - 1) * np.log(x) - self.beta / float(x)", "if self.transform is not None:\n x = self.transform(x)\nreturn x ** (-self.alpha - 1) * np.exp(-(self.beta / float(x)))"], "bodies_text": "<|body_start_0|>\n super(InverseGamma, self).__init__(transform)\n self.covariance_prior = False\n self.alpha = alpha\n self.beta = beta\n<|end_body_0|>\n\n<|body_start_1|>\n if self.transform is not None:\n x = self.transform(x)\n return (-self.alpha - 1) * np.log(x) - self.beta / float(x)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.transform is not None:\n x = self.transform(x)\n return x ** (-self.alpha - 1) * np.exp(-(self.beta / float(x)))\n<|end_body_2|>\n", "class_docstring": "Inverse Gamma Distribution ---- This class contains methods relating to the inverse gamma distribution for time series.", "class_name": "InverseGamma", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InverseGamma:\n \"\"\"Inverse Gamma Distribution ---- This class contains methods relating to the inverse gamma distribution for time series.\"\"\"\n\n def __init__(self, alpha, beta, transform=None, **kwargs):\n \"\"\"Parameters ---------- alpha : float Alpha parameter for the Inverse Gamma distribution beta : float Beta parameter for the Inverse Gamma distribution transform : str Whether to apply a transformation - e.g. 'exp' or 'logit'\"\"\"\n <|body_0|>\n\n def logpdf(self, x):\n \"\"\"Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x))\"\"\"\n <|body_1|>\n\n def pdf(self, x):\n \"\"\"PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(InverseGamma, self).__init__(transform)\n self.covariance_prior = False\n self.alpha = alpha\n self.beta = beta\n<|end_body_0|>\n\n<|body_start_1|>\n if self.transform is not None:\n x = self.transform(x)\n return (-self.alpha - 1) * np.log(x) - self.beta / float(x)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.transform is not None:\n x = self.transform(x)\n return x ** (-self.alpha - 1) * np.exp(-(self.beta / float(x)))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000139", "length_bytes": 1641, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- alpha : float Alpha parameter for the Inverse Gamma distribution beta : float Beta parameter for the Inverse Gamma distribution transform : str Whether to apply a transformation - e.g. 'exp' or 'logit'", "name": "__init__", "signature": "def __init__(self, alpha, beta, transform=None, **kwargs)"}, {"docstring": "Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x))", "name": "logpdf", "signature": "def logpdf(self, x)"}, {"docstring": "PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x)", "name": "pdf", "signature": "def pdf(self, x)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_041224", "prompt": "Implement the Python class `InverseGamma` described below.\n\nClass description:\nInverse Gamma Distribution ---- This class contains methods relating to the inverse gamma distribution for time series.\n\nMethod signatures and docstrings:\n- def __init__(self, alpha, beta, transform=None, **kwargs): Parameters ---------- alpha : float Alpha parameter for the Inverse Gamma distribution beta : float Beta parameter for the Inverse Gamma distribution transform : str Whether to apply a transformation - e.g. 'exp' or 'logit'\n- def logpdf(self, x): Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x))\n- def pdf(self, x): PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x)", "prompted_full_text": "Implement the Python class `InverseGamma` described below.\n\nClass description:\nInverse Gamma Distribution ---- This class contains methods relating to the inverse gamma distribution for time series.\n\nMethod signatures and docstrings:\n- def __init__(self, alpha, beta, transform=None, **kwargs): Parameters ---------- alpha : float Alpha parameter for the Inverse Gamma distribution beta : float Beta parameter for the Inverse Gamma distribution transform : str Whether to apply a transformation - e.g. 'exp' or 'logit'\n- def logpdf(self, x): Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x))\n- def pdf(self, x): PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x)\n\n<|skeleton|>\nclass InverseGamma:\n \"\"\"Inverse Gamma Distribution ---- This class contains methods relating to the inverse gamma distribution for time series.\"\"\"\n\n def __init__(self, alpha, beta, transform=None, **kwargs):\n \"\"\"Parameters ---------- alpha : float Alpha parameter for the Inverse Gamma distribution beta : float Beta parameter for the Inverse Gamma distribution transform : str Whether to apply a transformation - e.g. 'exp' or 'logit'\"\"\"\n <|body_0|>\n\n def logpdf(self, x):\n \"\"\"Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x))\"\"\"\n <|body_1|>\n\n def pdf(self, x):\n \"\"\"PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(InverseGamma, self).__init__(transform)\n self.covariance_prior = False\n self.alpha = alpha\n self.beta = beta\n<|end_body_0|>\n\n<|body_start_1|>\n if self.transform is not None:\n x = self.transform(x)\n return (-self.alpha - 1) * np.log(x) - self.beta / float(x)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.transform is not None:\n x = self.transform(x)\n return x ** (-self.alpha - 1) * np.exp(-(self.beta / float(x)))\n<|end_body_2|>\n", "revision_id": "f5166854bb4a24c997fc2b9b4e3e37325a740d34", "skeleton": "<|skeleton|>\nclass InverseGamma:\n \"\"\"Inverse Gamma Distribution ---- This class contains methods relating to the inverse gamma distribution for time series.\"\"\"\n\n def __init__(self, alpha, beta, transform=None, **kwargs):\n \"\"\"Parameters ---------- alpha : float Alpha parameter for the Inverse Gamma distribution beta : float Beta parameter for the Inverse Gamma distribution transform : str Whether to apply a transformation - e.g. 'exp' or 'logit'\"\"\"\n <|body_0|>\n\n def logpdf(self, x):\n \"\"\"Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x))\"\"\"\n <|body_1|>\n\n def pdf(self, x):\n \"\"\"PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InverseGamma:\n \"\"\"Inverse Gamma Distribution ---- This class contains methods relating to the inverse gamma distribution for time series.\"\"\"\n\n def __init__(self, alpha, beta, transform=None, **kwargs):\n \"\"\"Parameters ---------- alpha : float Alpha parameter for the Inverse Gamma distribution beta : float Beta parameter for the Inverse Gamma distribution transform : str Whether to apply a transformation - e.g. 'exp' or 'logit'\"\"\"\n super(InverseGamma, self).__init__(transform)\n self.covariance_prior = False\n self.alpha = alpha\n self.beta = beta\n\n def logpdf(self, x):\n \"\"\"Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x))\"\"\"\n if self.transform is not None:\n x = self.transform(x)\n return (-self.alpha - 1) * np.log(x) - self.beta / float(x)\n\n def pdf(self, x):\n \"\"\"PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x)\"\"\"\n if self.transform is not None:\n x = self.transform(x)\n return x ** (-self.alpha - 1) * np.exp(-(self.beta / float(x)))\n", "source": "the_stack_v2_python_sparse", "source_path": "pyflux/families/inverse_gamma.py", "source_repo": "ecastrow/pyflux", "split": "val", "star_events_count": 0}
{"blob_id": "fdffe727ec98390576308ecc1b90c1232c771e6e", "bodies": ["icetray.I3ConditionalModule.__init__(self, context)\nself.AddParameter('pulsemap', 'Define the name of the pulsemap', 'InIceDSTPulses')\nself.AddParameter('save_as', 'Define the Output key', 'Deep_Learning_Classification')\nself.AddParameter('batch_size', 'Size of the batches', 40)\nself.AddParameter('cpu_cores', 'number of cores to be used', 1)\nself.AddParameter('gpu_cores', 'number of gpu to be used', 1)\nself.AddParameter('remove_daq', 'whether or not to remove Q-Frames', False)\nself.AddParameter('model', 'which model to use', 'classification')", "print('Initializing')\nself.__pulsemap = self.GetParameter('pulsemap')\nself.__save_as = self.GetParameter('save_as')\nself.__batch_size = self.GetParameter('batch_size')\nself.__cpu_cores = self.GetParameter('cpu_cores')\nself.__gpu_cores = self.GetParameter('gpu_cores')\nself.__remove_daq = self.GetParameter('remove_daq')\nself.__frame_buffer = []\nself.__buffer_length = 0\nself.__num_pframes = 0\nconfig = tf.ConfigProto(intra_op_parallelism_threads=self.__cpu_cores, inter_op_parallelism_threads=self.__cpu_cores, device_count={'GPU': self.__gpu_cores, 'CPU': self.__cpu_cores}, log_device_placement=False)\nsess = tf.Session(config=config)\nset_session(sess)\nreturn", "if self.__num_pframes == 0:\n return\nfor frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\npredictions = self.__model.predict(you_processed_frames, *args, **kwargs)\ni = 0\nfor frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n frame.Put(self.__save_as, output)\n i += 1\nreturn", "self.__frame_buffer.append(frame)\nself.__buffer_length += 1\nself.__num_pframes += 1\nif self.__buffer_length == self.__batch_size:\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n self.__buffer_length = 0\n self.__num_pframes = 0\nreturn", "if not self.__remove_daq:\n self.__frame_buffer.append(frame)\nreturn", "self.BatchProcessBuffer(self.__frame_buffer)\nfor frame in self.__frame_buffer:\n self.PushFrame(frame)\nself.__frame_buffer[:] = []\nreturn"], "bodies_text": "<|body_start_0|>\n icetray.I3ConditionalModule.__init__(self, context)\n self.AddParameter('pulsemap', 'Define the name of the pulsemap', 'InIceDSTPulses')\n self.AddParameter('save_as', 'Define the Output key', 'Deep_Learning_Classification')\n self.AddParameter('batch_size', 'Size of the batches', 40)\n self.AddParameter('cpu_cores', 'number of cores to be used', 1)\n self.AddParameter('gpu_cores', 'number of gpu to be used', 1)\n self.AddParameter('remove_daq', 'whether or not to remove Q-Frames', False)\n self.AddParameter('model', 'which model to use', 'classification')\n<|end_body_0|>\n\n<|body_start_1|>\n print('Initializing')\n self.__pulsemap = self.GetParameter('pulsemap')\n self.__save_as = self.GetParameter('save_as')\n self.__batch_size = self.GetParameter('batch_size')\n self.__cpu_cores = self.GetParameter('cpu_cores')\n self.__gpu_cores = self.GetParameter('gpu_cores')\n self.__remove_daq = self.GetParameter('remove_daq')\n self.__frame_buffer = []\n self.__buffer_length = 0\n self.__num_pframes = 0\n config = tf.ConfigProto(intra_op_parallelism_threads=self.__cpu_cores, inter_op_parallelism_threads=self.__cpu_cores, device_count={'GPU': self.__gpu_cores, 'CPU': self.__cpu_cores}, log_device_placement=False)\n sess = tf.Session(config=config)\n set_session(sess)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n if self.__num_pframes == 0:\n return\n for frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n predictions = self.__model.predict(you_processed_frames, *args, **kwargs)\n i = 0\n for frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n frame.Put(self.__save_as, output)\n i += 1\n return\n<|end_body_2|>\n\n<|body_start_3|>\n self.__frame_buffer.append(frame)\n self.__buffer_length += 1\n self.__num_pframes += 1\n if self.__buffer_length == self.__batch_size:\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n self.__buffer_length = 0\n self.__num_pframes = 0\n return\n<|end_body_3|>\n\n<|body_start_4|>\n if not self.__remove_daq:\n self.__frame_buffer.append(frame)\n return\n<|end_body_4|>\n\n<|body_start_5|>\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n return\n<|end_body_5|>\n", "class_docstring": "IceTray compatible class of the Deep Learning Classifier", "class_name": "DeepLearningClassifier", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeepLearningClassifier:\n \"\"\"IceTray compatible class of the Deep Learning Classifier\"\"\"\n\n def __init__(self, context):\n \"\"\"Initialize the Class\"\"\"\n <|body_0|>\n\n def Configure(self):\n \"\"\"Read the network architecture and input, output information from config files\"\"\"\n <|body_1|>\n\n def BatchProcessBuffer(self, frames):\n \"\"\"Batch Process a list of frames. This includes pre-processing, prediction and storage of the results\"\"\"\n <|body_2|>\n\n def Physics(self, frame):\n \"\"\"Buffer physics frames until batch size is reached, then start processing\"\"\"\n <|body_3|>\n\n def DAQ(self, frame):\n \"\"\"Handel Q-Frames. Append to buffer if they should be kept\"\"\"\n <|body_4|>\n\n def Finish(self):\n \"\"\"Process the remaining (incomplete) batch of frames\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n icetray.I3ConditionalModule.__init__(self, context)\n self.AddParameter('pulsemap', 'Define the name of the pulsemap', 'InIceDSTPulses')\n self.AddParameter('save_as', 'Define the Output key', 'Deep_Learning_Classification')\n self.AddParameter('batch_size', 'Size of the batches', 40)\n self.AddParameter('cpu_cores', 'number of cores to be used', 1)\n self.AddParameter('gpu_cores', 'number of gpu to be used', 1)\n self.AddParameter('remove_daq', 'whether or not to remove Q-Frames', False)\n self.AddParameter('model', 'which model to use', 'classification')\n<|end_body_0|>\n\n<|body_start_1|>\n print('Initializing')\n self.__pulsemap = self.GetParameter('pulsemap')\n self.__save_as = self.GetParameter('save_as')\n self.__batch_size = self.GetParameter('batch_size')\n self.__cpu_cores = self.GetParameter('cpu_cores')\n self.__gpu_cores = self.GetParameter('gpu_cores')\n self.__remove_daq = self.GetParameter('remove_daq')\n self.__frame_buffer = []\n self.__buffer_length = 0\n self.__num_pframes = 0\n config = tf.ConfigProto(intra_op_parallelism_threads=self.__cpu_cores, inter_op_parallelism_threads=self.__cpu_cores, device_count={'GPU': self.__gpu_cores, 'CPU': self.__cpu_cores}, log_device_placement=False)\n sess = tf.Session(config=config)\n set_session(sess)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n if self.__num_pframes == 0:\n return\n for frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n predictions = self.__model.predict(you_processed_frames, *args, **kwargs)\n i = 0\n for frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n frame.Put(self.__save_as, output)\n i += 1\n return\n<|end_body_2|>\n\n<|body_start_3|>\n self.__frame_buffer.append(frame)\n self.__buffer_length += 1\n self.__num_pframes += 1\n if self.__buffer_length == self.__batch_size:\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n self.__buffer_length = 0\n self.__num_pframes = 0\n return\n<|end_body_3|>\n\n<|body_start_4|>\n if not self.__remove_daq:\n self.__frame_buffer.append(frame)\n return\n<|end_body_4|>\n\n<|body_start_5|>\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n return\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000140", "length_bytes": 6262, "license_type": "permissive", "methods": [{"docstring": "Initialize the Class", "name": "__init__", "signature": "def __init__(self, context)"}, {"docstring": "Read the network architecture and input, output information from config files", "name": "Configure", "signature": "def Configure(self)"}, {"docstring": "Batch Process a list of frames. This includes pre-processing, prediction and storage of the results", "name": "BatchProcessBuffer", "signature": "def BatchProcessBuffer(self, frames)"}, {"docstring": "Buffer physics frames until batch size is reached, then start processing", "name": "Physics", "signature": "def Physics(self, frame)"}, {"docstring": "Handel Q-Frames. Append to buffer if they should be kept", "name": "DAQ", "signature": "def DAQ(self, frame)"}, {"docstring": "Process the remaining (incomplete) batch of frames", "name": "Finish", "signature": "def Finish(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_011542", "prompt": "Implement the Python class `DeepLearningClassifier` described below.\n\nClass description:\nIceTray compatible class of the Deep Learning Classifier\n\nMethod signatures and docstrings:\n- def __init__(self, context): Initialize the Class\n- def Configure(self): Read the network architecture and input, output information from config files\n- def BatchProcessBuffer(self, frames): Batch Process a list of frames. This includes pre-processing, prediction and storage of the results\n- def Physics(self, frame): Buffer physics frames until batch size is reached, then start processing\n- def DAQ(self, frame): Handel Q-Frames. Append to buffer if they should be kept\n- def Finish(self): Process the remaining (incomplete) batch of frames", "prompted_full_text": "Implement the Python class `DeepLearningClassifier` described below.\n\nClass description:\nIceTray compatible class of the Deep Learning Classifier\n\nMethod signatures and docstrings:\n- def __init__(self, context): Initialize the Class\n- def Configure(self): Read the network architecture and input, output information from config files\n- def BatchProcessBuffer(self, frames): Batch Process a list of frames. This includes pre-processing, prediction and storage of the results\n- def Physics(self, frame): Buffer physics frames until batch size is reached, then start processing\n- def DAQ(self, frame): Handel Q-Frames. Append to buffer if they should be kept\n- def Finish(self): Process the remaining (incomplete) batch of frames\n\n<|skeleton|>\nclass DeepLearningClassifier:\n \"\"\"IceTray compatible class of the Deep Learning Classifier\"\"\"\n\n def __init__(self, context):\n \"\"\"Initialize the Class\"\"\"\n <|body_0|>\n\n def Configure(self):\n \"\"\"Read the network architecture and input, output information from config files\"\"\"\n <|body_1|>\n\n def BatchProcessBuffer(self, frames):\n \"\"\"Batch Process a list of frames. This includes pre-processing, prediction and storage of the results\"\"\"\n <|body_2|>\n\n def Physics(self, frame):\n \"\"\"Buffer physics frames until batch size is reached, then start processing\"\"\"\n <|body_3|>\n\n def DAQ(self, frame):\n \"\"\"Handel Q-Frames. Append to buffer if they should be kept\"\"\"\n <|body_4|>\n\n def Finish(self):\n \"\"\"Process the remaining (incomplete) batch of frames\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n icetray.I3ConditionalModule.__init__(self, context)\n self.AddParameter('pulsemap', 'Define the name of the pulsemap', 'InIceDSTPulses')\n self.AddParameter('save_as', 'Define the Output key', 'Deep_Learning_Classification')\n self.AddParameter('batch_size', 'Size of the batches', 40)\n self.AddParameter('cpu_cores', 'number of cores to be used', 1)\n self.AddParameter('gpu_cores', 'number of gpu to be used', 1)\n self.AddParameter('remove_daq', 'whether or not to remove Q-Frames', False)\n self.AddParameter('model', 'which model to use', 'classification')\n<|end_body_0|>\n\n<|body_start_1|>\n print('Initializing')\n self.__pulsemap = self.GetParameter('pulsemap')\n self.__save_as = self.GetParameter('save_as')\n self.__batch_size = self.GetParameter('batch_size')\n self.__cpu_cores = self.GetParameter('cpu_cores')\n self.__gpu_cores = self.GetParameter('gpu_cores')\n self.__remove_daq = self.GetParameter('remove_daq')\n self.__frame_buffer = []\n self.__buffer_length = 0\n self.__num_pframes = 0\n config = tf.ConfigProto(intra_op_parallelism_threads=self.__cpu_cores, inter_op_parallelism_threads=self.__cpu_cores, device_count={'GPU': self.__gpu_cores, 'CPU': self.__cpu_cores}, log_device_placement=False)\n sess = tf.Session(config=config)\n set_session(sess)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n if self.__num_pframes == 0:\n return\n for frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n predictions = self.__model.predict(you_processed_frames, *args, **kwargs)\n i = 0\n for frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n frame.Put(self.__save_as, output)\n i += 1\n return\n<|end_body_2|>\n\n<|body_start_3|>\n self.__frame_buffer.append(frame)\n self.__buffer_length += 1\n self.__num_pframes += 1\n if self.__buffer_length == self.__batch_size:\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n self.__buffer_length = 0\n self.__num_pframes = 0\n return\n<|end_body_3|>\n\n<|body_start_4|>\n if not self.__remove_daq:\n self.__frame_buffer.append(frame)\n return\n<|end_body_4|>\n\n<|body_start_5|>\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n return\n<|end_body_5|>\n", "revision_id": "f50d97811232c31a7257d4785f631d70ce779639", "skeleton": "<|skeleton|>\nclass DeepLearningClassifier:\n \"\"\"IceTray compatible class of the Deep Learning Classifier\"\"\"\n\n def __init__(self, context):\n \"\"\"Initialize the Class\"\"\"\n <|body_0|>\n\n def Configure(self):\n \"\"\"Read the network architecture and input, output information from config files\"\"\"\n <|body_1|>\n\n def BatchProcessBuffer(self, frames):\n \"\"\"Batch Process a list of frames. This includes pre-processing, prediction and storage of the results\"\"\"\n <|body_2|>\n\n def Physics(self, frame):\n \"\"\"Buffer physics frames until batch size is reached, then start processing\"\"\"\n <|body_3|>\n\n def DAQ(self, frame):\n \"\"\"Handel Q-Frames. Append to buffer if they should be kept\"\"\"\n <|body_4|>\n\n def Finish(self):\n \"\"\"Process the remaining (incomplete) batch of frames\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DeepLearningClassifier:\n \"\"\"IceTray compatible class of the Deep Learning Classifier\"\"\"\n\n def __init__(self, context):\n \"\"\"Initialize the Class\"\"\"\n icetray.I3ConditionalModule.__init__(self, context)\n self.AddParameter('pulsemap', 'Define the name of the pulsemap', 'InIceDSTPulses')\n self.AddParameter('save_as', 'Define the Output key', 'Deep_Learning_Classification')\n self.AddParameter('batch_size', 'Size of the batches', 40)\n self.AddParameter('cpu_cores', 'number of cores to be used', 1)\n self.AddParameter('gpu_cores', 'number of gpu to be used', 1)\n self.AddParameter('remove_daq', 'whether or not to remove Q-Frames', False)\n self.AddParameter('model', 'which model to use', 'classification')\n\n def Configure(self):\n \"\"\"Read the network architecture and input, output information from config files\"\"\"\n print('Initializing')\n self.__pulsemap = self.GetParameter('pulsemap')\n self.__save_as = self.GetParameter('save_as')\n self.__batch_size = self.GetParameter('batch_size')\n self.__cpu_cores = self.GetParameter('cpu_cores')\n self.__gpu_cores = self.GetParameter('gpu_cores')\n self.__remove_daq = self.GetParameter('remove_daq')\n self.__frame_buffer = []\n self.__buffer_length = 0\n self.__num_pframes = 0\n config = tf.ConfigProto(intra_op_parallelism_threads=self.__cpu_cores, inter_op_parallelism_threads=self.__cpu_cores, device_count={'GPU': self.__gpu_cores, 'CPU': self.__cpu_cores}, log_device_placement=False)\n sess = tf.Session(config=config)\n set_session(sess)\n return\n\n def BatchProcessBuffer(self, frames):\n \"\"\"Batch Process a list of frames. This includes pre-processing, prediction and storage of the results\"\"\"\n if self.__num_pframes == 0:\n return\n for frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n predictions = self.__model.predict(you_processed_frames, *args, **kwargs)\n i = 0\n for frame in frames:\n if frame.Stop != icetray.I3Frame.Physics:\n continue\n frame.Put(self.__save_as, output)\n i += 1\n return\n\n def Physics(self, frame):\n \"\"\"Buffer physics frames until batch size is reached, then start processing\"\"\"\n self.__frame_buffer.append(frame)\n self.__buffer_length += 1\n self.__num_pframes += 1\n if self.__buffer_length == self.__batch_size:\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n self.__buffer_length = 0\n self.__num_pframes = 0\n return\n\n def DAQ(self, frame):\n \"\"\"Handel Q-Frames. Append to buffer if they should be kept\"\"\"\n if not self.__remove_daq:\n self.__frame_buffer.append(frame)\n return\n\n def Finish(self):\n \"\"\"Process the remaining (incomplete) batch of frames\"\"\"\n self.BatchProcessBuffer(self.__frame_buffer)\n for frame in self.__frame_buffer:\n self.PushFrame(frame)\n self.__frame_buffer[:] = []\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "i3deepice/template.py", "source_repo": "tglauch/i3deepice", "split": "val", "star_events_count": 1}
{"blob_id": "2f5976adc2d2cd587e2cf47c3b4837da73d64745", "bodies": ["self.number = 0\nself.columns = columns\nself.total = [0 for i in range(self.columns)]", "self.number += 1\nfor i in range(self.columns):\n self.total[i] += values[i]", "if self.number == 0:\n return [default for i in range(self.columns)]\nelse:\n return [self.total[i] / self.number for i in range(self.columns)]"], "bodies_text": "<|body_start_0|>\n self.number = 0\n self.columns = columns\n self.total = [0 for i in range(self.columns)]\n<|end_body_0|>\n\n<|body_start_1|>\n self.number += 1\n for i in range(self.columns):\n self.total[i] += values[i]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.number == 0:\n return [default for i in range(self.columns)]\n else:\n return [self.total[i] / self.number for i in range(self.columns)]\n<|end_body_2|>\n", "class_docstring": "For each couple (receiver, sender) we gather a number of measurement points that must be averaged this is done through an instance of this Averager class all measurement points will contain one, two or three values depending on the number of antennas each measurement point is recorded, and at the end averages returns the right value(s)", "class_name": "Averager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Averager:\n \"\"\"For each couple (receiver, sender) we gather a number of measurement points that must be averaged this is done through an instance of this Averager class all measurement points will contain one, two or three values depending on the number of antennas each measurement point is recorded, and at the end averages returns the right value(s)\"\"\"\n\n def __init__(self, columns):\n \"\"\"number of expected columns\"\"\"\n <|body_0|>\n\n def record_point(self, values):\n \"\"\"values should have as many measurements as columns\"\"\"\n <|body_1|>\n\n def averages(self, default):\n \"\"\"once all measurement points have been recorded, they can be averaged\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.number = 0\n self.columns = columns\n self.total = [0 for i in range(self.columns)]\n<|end_body_0|>\n\n<|body_start_1|>\n self.number += 1\n for i in range(self.columns):\n self.total[i] += values[i]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.number == 0:\n return [default for i in range(self.columns)]\n else:\n return [self.total[i] / self.number for i in range(self.columns)]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000141", "length_bytes": 3309, "license_type": "no_license", "methods": [{"docstring": "number of expected columns", "name": "__init__", "signature": "def __init__(self, columns)"}, {"docstring": "values should have as many measurements as columns", "name": "record_point", "signature": "def record_point(self, values)"}, {"docstring": "once all measurement points have been recorded, they can be averaged", "name": "averages", "signature": "def averages(self, default)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_001808", "prompt": "Implement the Python class `Averager` described below.\n\nClass description:\nFor each couple (receiver, sender) we gather a number of measurement points that must be averaged this is done through an instance of this Averager class all measurement points will contain one, two or three values depending on the number of antennas each measurement point is recorded, and at the end averages returns the right value(s)\n\nMethod signatures and docstrings:\n- def __init__(self, columns): number of expected columns\n- def record_point(self, values): values should have as many measurements as columns\n- def averages(self, default): once all measurement points have been recorded, they can be averaged", "prompted_full_text": "Implement the Python class `Averager` described below.\n\nClass description:\nFor each couple (receiver, sender) we gather a number of measurement points that must be averaged this is done through an instance of this Averager class all measurement points will contain one, two or three values depending on the number of antennas each measurement point is recorded, and at the end averages returns the right value(s)\n\nMethod signatures and docstrings:\n- def __init__(self, columns): number of expected columns\n- def record_point(self, values): values should have as many measurements as columns\n- def averages(self, default): once all measurement points have been recorded, they can be averaged\n\n<|skeleton|>\nclass Averager:\n \"\"\"For each couple (receiver, sender) we gather a number of measurement points that must be averaged this is done through an instance of this Averager class all measurement points will contain one, two or three values depending on the number of antennas each measurement point is recorded, and at the end averages returns the right value(s)\"\"\"\n\n def __init__(self, columns):\n \"\"\"number of expected columns\"\"\"\n <|body_0|>\n\n def record_point(self, values):\n \"\"\"values should have as many measurements as columns\"\"\"\n <|body_1|>\n\n def averages(self, default):\n \"\"\"once all measurement points have been recorded, they can be averaged\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.number = 0\n self.columns = columns\n self.total = [0 for i in range(self.columns)]\n<|end_body_0|>\n\n<|body_start_1|>\n self.number += 1\n for i in range(self.columns):\n self.total[i] += values[i]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.number == 0:\n return [default for i in range(self.columns)]\n else:\n return [self.total[i] / self.number for i in range(self.columns)]\n<|end_body_2|>\n", "revision_id": "7f2854678c52b2ef26bcd33596d2f1ca53a9a1a4", "skeleton": "<|skeleton|>\nclass Averager:\n \"\"\"For each couple (receiver, sender) we gather a number of measurement points that must be averaged this is done through an instance of this Averager class all measurement points will contain one, two or three values depending on the number of antennas each measurement point is recorded, and at the end averages returns the right value(s)\"\"\"\n\n def __init__(self, columns):\n \"\"\"number of expected columns\"\"\"\n <|body_0|>\n\n def record_point(self, values):\n \"\"\"values should have as many measurements as columns\"\"\"\n <|body_1|>\n\n def averages(self, default):\n \"\"\"once all measurement points have been recorded, they can be averaged\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Averager:\n \"\"\"For each couple (receiver, sender) we gather a number of measurement points that must be averaged this is done through an instance of this Averager class all measurement points will contain one, two or three values depending on the number of antennas each measurement point is recorded, and at the end averages returns the right value(s)\"\"\"\n\n def __init__(self, columns):\n \"\"\"number of expected columns\"\"\"\n self.number = 0\n self.columns = columns\n self.total = [0 for i in range(self.columns)]\n\n def record_point(self, values):\n \"\"\"values should have as many measurements as columns\"\"\"\n self.number += 1\n for i in range(self.columns):\n self.total[i] += values[i]\n\n def averages(self, default):\n \"\"\"once all measurement points have been recorded, they can be averaged\"\"\"\n if self.number == 0:\n return [default for i in range(self.columns)]\n else:\n return [self.total[i] / self.number for i in range(self.columns)]\n", "source": "the_stack_v2_python_sparse", "source_path": "batman-vs-olsr/processmap.py", "source_repo": "fit-r2lab/r2lab-demos", "split": "val", "star_events_count": 4}
{"blob_id": "85a5f8f31550c6dd74e332dc72d1a97cd0658b2c", "bodies": ["self.state_space = state_space\nself.action_space = action_space\nself.epsilon = epsilon\nself.gamma = gamma\nself.q_network = q_network\nself.optimizer = optimizer", "predicted_q_value = self.q_network(torch.FloatTensor(state))[action]\nwith torch.no_grad():\n target_q_value = torch.FloatTensor([reward])\n if not done:\n target_q_value += self.gamma * self.q_network(torch.FloatTensor(next_state)).max().item()\nloss = F.smooth_l1_loss(predicted_q_value, target_q_value)\nself.optimizer.zero_grad()\nloss.backward()\nself.optimizer.step()"], "bodies_text": "<|body_start_0|>\n self.state_space = state_space\n self.action_space = action_space\n self.epsilon = epsilon\n self.gamma = gamma\n self.q_network = q_network\n self.optimizer = optimizer\n<|end_body_0|>\n\n<|body_start_1|>\n predicted_q_value = self.q_network(torch.FloatTensor(state))[action]\n with torch.no_grad():\n target_q_value = torch.FloatTensor([reward])\n if not done:\n target_q_value += self.gamma * self.q_network(torch.FloatTensor(next_state)).max().item()\n loss = F.smooth_l1_loss(predicted_q_value, target_q_value)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NNOnlineQAgent", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NNOnlineQAgent:\n\n def __init__(self, q_network, optimizer, state_space, action_space, epsilon=0.1, gamma=0.999):\n \"\"\"Agent that learns action values (Q) through Q-learning method. Assumes Box state space and Discrete action space.\"\"\"\n <|body_0|>\n\n def learn(self, state, action, next_state, reward, done):\n \"\"\"Train the agent with a given transition.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.state_space = state_space\n self.action_space = action_space\n self.epsilon = epsilon\n self.gamma = gamma\n self.q_network = q_network\n self.optimizer = optimizer\n<|end_body_0|>\n\n<|body_start_1|>\n predicted_q_value = self.q_network(torch.FloatTensor(state))[action]\n with torch.no_grad():\n target_q_value = torch.FloatTensor([reward])\n if not done:\n target_q_value += self.gamma * self.q_network(torch.FloatTensor(next_state)).max().item()\n loss = F.smooth_l1_loss(predicted_q_value, target_q_value)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000142", "length_bytes": 4297, "license_type": "permissive", "methods": [{"docstring": "Agent that learns action values (Q) through Q-learning method. Assumes Box state space and Discrete action space.", "name": "__init__", "signature": "def __init__(self, q_network, optimizer, state_space, action_space, epsilon=0.1, gamma=0.999)"}, {"docstring": "Train the agent with a given transition.", "name": "learn", "signature": "def learn(self, state, action, next_state, reward, done)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004809", "prompt": "Implement the Python class `NNOnlineQAgent` described below.\n\nClass description:\nImplement the NNOnlineQAgent class.\n\nMethod signatures and docstrings:\n- def __init__(self, q_network, optimizer, state_space, action_space, epsilon=0.1, gamma=0.999): Agent that learns action values (Q) through Q-learning method. Assumes Box state space and Discrete action space.\n- def learn(self, state, action, next_state, reward, done): Train the agent with a given transition.", "prompted_full_text": "Implement the Python class `NNOnlineQAgent` described below.\n\nClass description:\nImplement the NNOnlineQAgent class.\n\nMethod signatures and docstrings:\n- def __init__(self, q_network, optimizer, state_space, action_space, epsilon=0.1, gamma=0.999): Agent that learns action values (Q) through Q-learning method. Assumes Box state space and Discrete action space.\n- def learn(self, state, action, next_state, reward, done): Train the agent with a given transition.\n\n<|skeleton|>\nclass NNOnlineQAgent:\n\n def __init__(self, q_network, optimizer, state_space, action_space, epsilon=0.1, gamma=0.999):\n \"\"\"Agent that learns action values (Q) through Q-learning method. Assumes Box state space and Discrete action space.\"\"\"\n <|body_0|>\n\n def learn(self, state, action, next_state, reward, done):\n \"\"\"Train the agent with a given transition.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.state_space = state_space\n self.action_space = action_space\n self.epsilon = epsilon\n self.gamma = gamma\n self.q_network = q_network\n self.optimizer = optimizer\n<|end_body_0|>\n\n<|body_start_1|>\n predicted_q_value = self.q_network(torch.FloatTensor(state))[action]\n with torch.no_grad():\n target_q_value = torch.FloatTensor([reward])\n if not done:\n target_q_value += self.gamma * self.q_network(torch.FloatTensor(next_state)).max().item()\n loss = F.smooth_l1_loss(predicted_q_value, target_q_value)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n<|end_body_1|>\n", "revision_id": "63482846cfcc358f68a0f056657c60bc21c3f2bb", "skeleton": "<|skeleton|>\nclass NNOnlineQAgent:\n\n def __init__(self, q_network, optimizer, state_space, action_space, epsilon=0.1, gamma=0.999):\n \"\"\"Agent that learns action values (Q) through Q-learning method. Assumes Box state space and Discrete action space.\"\"\"\n <|body_0|>\n\n def learn(self, state, action, next_state, reward, done):\n \"\"\"Train the agent with a given transition.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NNOnlineQAgent:\n def __init__(self, q_network, optimizer, state_space, action_space, epsilon=0.1, gamma=0.999):\n \"\"\"Agent that learns action values (Q) through Q-learning method. Assumes Box state space and Discrete action space.\"\"\"\n self.state_space = state_space\n self.action_space = action_space\n self.epsilon = epsilon\n self.gamma = gamma\n self.q_network = q_network\n self.optimizer = optimizer\n\n def learn(self, state, action, next_state, reward, done):\n \"\"\"Train the agent with a given transition.\"\"\"\n predicted_q_value = self.q_network(torch.FloatTensor(state))[action]\n with torch.no_grad():\n target_q_value = torch.FloatTensor([reward])\n if not done:\n target_q_value += self.gamma * self.q_network(torch.FloatTensor(next_state)).max().item()\n loss = F.smooth_l1_loss(predicted_q_value, target_q_value)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n", "source": "the_stack_v2_python_sparse", "source_path": "agents.py", "source_repo": "seungjaeryanlee/combined-experience-replay", "split": "val", "star_events_count": 3}
{"blob_id": "ece9d28d540e8ba953c274a2b1bb586bee19e3b5", "bodies": ["for i in range(1, 11):\n dataList = self.GGListSpider(i)\n BmobUtils.insertListBmob('GGBean', dataList)\n print('经过不懈的努力,开哥爬下了品玩科技第 %d 页' % i)", "url = 'http://www.svinsight.com/api?callback=jQuery111306490858093306913_%d' % (time.time() * 1000)\ndata = {'id': '208', 'query': '{\"global\": {\"Loc\": 2, \"OS\": 2, \"Port\": 300, \"Sign\": \"\", \"Token\": \"\"},\\n \"data\": {\"Index\":%d, \"OrderBy\": 0, \"QueryString\": [], \"Size\": 1}}' % index}\nhtml = HtmlPostUtils.postHtml(url, data=data, type='PW')\ndatalist = GGHtmlDealUtils.dealHtml(html)\nreturn datalist"], "bodies_text": "<|body_start_0|>\n for i in range(1, 11):\n dataList = self.GGListSpider(i)\n BmobUtils.insertListBmob('GGBean', dataList)\n print('经过不懈的努力,开哥爬下了品玩科技第 %d 页' % i)\n<|end_body_0|>\n\n<|body_start_1|>\n url = 'http://www.svinsight.com/api?callback=jQuery111306490858093306913_%d' % (time.time() * 1000)\n data = {'id': '208', 'query': '{\"global\": {\"Loc\": 2, \"OS\": 2, \"Port\": 300, \"Sign\": \"\", \"Token\": \"\"},\\n \"data\": {\"Index\":%d, \"OrderBy\": 0, \"QueryString\": [], \"Size\": 1}}' % index}\n html = HtmlPostUtils.postHtml(url, data=data, type='PW')\n datalist = GGHtmlDealUtils.dealHtml(html)\n return datalist\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GG", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GG:\n\n def startSpider(self):\n \"\"\"针对硅谷密探的爬虫 :return:\"\"\"\n <|body_0|>\n\n def GGListSpider(self, index):\n \"\"\"硅谷密探网爬虫 :param url: :return: List\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for i in range(1, 11):\n dataList = self.GGListSpider(i)\n BmobUtils.insertListBmob('GGBean', dataList)\n print('经过不懈的努力,开哥爬下了品玩科技第 %d 页' % i)\n<|end_body_0|>\n\n<|body_start_1|>\n url = 'http://www.svinsight.com/api?callback=jQuery111306490858093306913_%d' % (time.time() * 1000)\n data = {'id': '208', 'query': '{\"global\": {\"Loc\": 2, \"OS\": 2, \"Port\": 300, \"Sign\": \"\", \"Token\": \"\"},\\n \"data\": {\"Index\":%d, \"OrderBy\": 0, \"QueryString\": [], \"Size\": 1}}' % index}\n html = HtmlPostUtils.postHtml(url, data=data, type='PW')\n datalist = GGHtmlDealUtils.dealHtml(html)\n return datalist\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000143", "length_bytes": 1209, "license_type": "no_license", "methods": [{"docstring": "针对硅谷密探的爬虫 :return:", "name": "startSpider", "signature": "def startSpider(self)"}, {"docstring": "硅谷密探网爬虫 :param url: :return: List", "name": "GGListSpider", "signature": "def GGListSpider(self, index)"}], "n_methods": 2, "prompt": "Implement the Python class `GG` described below.\n\nClass description:\nImplement the GG class.\n\nMethod signatures and docstrings:\n- def startSpider(self): 针对硅谷密探的爬虫 :return:\n- def GGListSpider(self, index): 硅谷密探网爬虫 :param url: :return: List", "prompted_full_text": "Implement the Python class `GG` described below.\n\nClass description:\nImplement the GG class.\n\nMethod signatures and docstrings:\n- def startSpider(self): 针对硅谷密探的爬虫 :return:\n- def GGListSpider(self, index): 硅谷密探网爬虫 :param url: :return: List\n\n<|skeleton|>\nclass GG:\n\n def startSpider(self):\n \"\"\"针对硅谷密探的爬虫 :return:\"\"\"\n <|body_0|>\n\n def GGListSpider(self, index):\n \"\"\"硅谷密探网爬虫 :param url: :return: List\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for i in range(1, 11):\n dataList = self.GGListSpider(i)\n BmobUtils.insertListBmob('GGBean', dataList)\n print('经过不懈的努力,开哥爬下了品玩科技第 %d 页' % i)\n<|end_body_0|>\n\n<|body_start_1|>\n url = 'http://www.svinsight.com/api?callback=jQuery111306490858093306913_%d' % (time.time() * 1000)\n data = {'id': '208', 'query': '{\"global\": {\"Loc\": 2, \"OS\": 2, \"Port\": 300, \"Sign\": \"\", \"Token\": \"\"},\\n \"data\": {\"Index\":%d, \"OrderBy\": 0, \"QueryString\": [], \"Size\": 1}}' % index}\n html = HtmlPostUtils.postHtml(url, data=data, type='PW')\n datalist = GGHtmlDealUtils.dealHtml(html)\n return datalist\n<|end_body_1|>\n", "revision_id": "f278c568708c6138160e3c8112ed1c9152e1d662", "skeleton": "<|skeleton|>\nclass GG:\n\n def startSpider(self):\n \"\"\"针对硅谷密探的爬虫 :return:\"\"\"\n <|body_0|>\n\n def GGListSpider(self, index):\n \"\"\"硅谷密探网爬虫 :param url: :return: List\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GG:\n def startSpider(self):\n \"\"\"针对硅谷密探的爬虫 :return:\"\"\"\n for i in range(1, 11):\n dataList = self.GGListSpider(i)\n BmobUtils.insertListBmob('GGBean', dataList)\n print('经过不懈的努力,开哥爬下了品玩科技第 %d 页' % i)\n\n def GGListSpider(self, index):\n \"\"\"硅谷密探网爬虫 :param url: :return: List\"\"\"\n url = 'http://www.svinsight.com/api?callback=jQuery111306490858093306913_%d' % (time.time() * 1000)\n data = {'id': '208', 'query': '{\"global\": {\"Loc\": 2, \"OS\": 2, \"Port\": 300, \"Sign\": \"\", \"Token\": \"\"},\\n \"data\": {\"Index\":%d, \"OrderBy\": 0, \"QueryString\": [], \"Size\": 1}}' % index}\n html = HtmlPostUtils.postHtml(url, data=data, type='PW')\n datalist = GGHtmlDealUtils.dealHtml(html)\n return datalist\n", "source": "the_stack_v2_python_sparse", "source_path": "Spiders/GGSpider.py", "source_repo": "mhgd3250905/CleanWaterSpiderOnPython", "split": "val", "star_events_count": 0}
{"blob_id": "0b55e62496924826c9dd2de6550858f4ae3db4da", "bodies": ["dict_with_level = {}\nfor key in all_parameters.keys():\n split_number = key.rfind('_')\n keyword = key[split_number + 1:]\n if keyword in levels:\n code_name = key[:split_number]\n else:\n code_name = key\n keyword = 'common'\n if keyword not in dict_with_level.keys():\n dict_with_level[keyword] = {}\n dict_with_level[keyword][code_name] = all_parameters[key]\nreturn dict_with_level", "dict_with_sources = {}\nfor key in all_parameters.keys():\n code_source = key.replace('_table', '')\n code_source = code_source.replace('_columns_names', '')\n if code_source in sources:\n keyword = code_source\n else:\n keyword = 'common'\n if keyword not in dict_with_sources.keys():\n dict_with_sources[keyword] = {}\n dict_with_sources[keyword][key] = all_parameters[key]\nreturn dict_with_sources"], "bodies_text": "<|body_start_0|>\n dict_with_level = {}\n for key in all_parameters.keys():\n split_number = key.rfind('_')\n keyword = key[split_number + 1:]\n if keyword in levels:\n code_name = key[:split_number]\n else:\n code_name = key\n keyword = 'common'\n if keyword not in dict_with_level.keys():\n dict_with_level[keyword] = {}\n dict_with_level[keyword][code_name] = all_parameters[key]\n return dict_with_level\n<|end_body_0|>\n\n<|body_start_1|>\n dict_with_sources = {}\n for key in all_parameters.keys():\n code_source = key.replace('_table', '')\n code_source = code_source.replace('_columns_names', '')\n if code_source in sources:\n keyword = code_source\n else:\n keyword = 'common'\n if keyword not in dict_with_sources.keys():\n dict_with_sources[keyword] = {}\n dict_with_sources[keyword][key] = all_parameters[key]\n return dict_with_sources\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Parameters", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Parameters:\n\n def split(all_parameters: dict, levels: list) -> dict:\n \"\"\"Split parameter in different levels :param all_parameters: dict with all parameters with keys like key_levels :param levels: list with all the levels presents :return: dict with separated dicts\"\"\"\n <|body_0|>\n\n def split_by_sources(all_parameters: dict, sources: list) -> dict:\n \"\"\"Split parameter by different sources All keys should be source_table or source_column_names :param all_parameters: dict with all parameters with keys like source_keys :param sources: list with all levels present :return: dict with separated dicts\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dict_with_level = {}\n for key in all_parameters.keys():\n split_number = key.rfind('_')\n keyword = key[split_number + 1:]\n if keyword in levels:\n code_name = key[:split_number]\n else:\n code_name = key\n keyword = 'common'\n if keyword not in dict_with_level.keys():\n dict_with_level[keyword] = {}\n dict_with_level[keyword][code_name] = all_parameters[key]\n return dict_with_level\n<|end_body_0|>\n\n<|body_start_1|>\n dict_with_sources = {}\n for key in all_parameters.keys():\n code_source = key.replace('_table', '')\n code_source = code_source.replace('_columns_names', '')\n if code_source in sources:\n keyword = code_source\n else:\n keyword = 'common'\n if keyword not in dict_with_sources.keys():\n dict_with_sources[keyword] = {}\n dict_with_sources[keyword][key] = all_parameters[key]\n return dict_with_sources\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000144", "length_bytes": 1954, "license_type": "permissive", "methods": [{"docstring": "Split parameter in different levels :param all_parameters: dict with all parameters with keys like key_levels :param levels: list with all the levels presents :return: dict with separated dicts", "name": "split", "signature": "def split(all_parameters: dict, levels: list) -> dict"}, {"docstring": "Split parameter by different sources All keys should be source_table or source_column_names :param all_parameters: dict with all parameters with keys like source_keys :param sources: list with all levels present :return: dict with separated dicts", "name": "split_by_sources", "signature": "def split_by_sources(all_parameters: dict, sources: list) -> dict"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052488", "prompt": "Implement the Python class `Parameters` described below.\n\nClass description:\nImplement the Parameters class.\n\nMethod signatures and docstrings:\n- def split(all_parameters: dict, levels: list) -> dict: Split parameter in different levels :param all_parameters: dict with all parameters with keys like key_levels :param levels: list with all the levels presents :return: dict with separated dicts\n- def split_by_sources(all_parameters: dict, sources: list) -> dict: Split parameter by different sources All keys should be source_table or source_column_names :param all_parameters: dict with all parameters with keys like source_keys :param sources: list with all levels present :return: dict with separated dicts", "prompted_full_text": "Implement the Python class `Parameters` described below.\n\nClass description:\nImplement the Parameters class.\n\nMethod signatures and docstrings:\n- def split(all_parameters: dict, levels: list) -> dict: Split parameter in different levels :param all_parameters: dict with all parameters with keys like key_levels :param levels: list with all the levels presents :return: dict with separated dicts\n- def split_by_sources(all_parameters: dict, sources: list) -> dict: Split parameter by different sources All keys should be source_table or source_column_names :param all_parameters: dict with all parameters with keys like source_keys :param sources: list with all levels present :return: dict with separated dicts\n\n<|skeleton|>\nclass Parameters:\n\n def split(all_parameters: dict, levels: list) -> dict:\n \"\"\"Split parameter in different levels :param all_parameters: dict with all parameters with keys like key_levels :param levels: list with all the levels presents :return: dict with separated dicts\"\"\"\n <|body_0|>\n\n def split_by_sources(all_parameters: dict, sources: list) -> dict:\n \"\"\"Split parameter by different sources All keys should be source_table or source_column_names :param all_parameters: dict with all parameters with keys like source_keys :param sources: list with all levels present :return: dict with separated dicts\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dict_with_level = {}\n for key in all_parameters.keys():\n split_number = key.rfind('_')\n keyword = key[split_number + 1:]\n if keyword in levels:\n code_name = key[:split_number]\n else:\n code_name = key\n keyword = 'common'\n if keyword not in dict_with_level.keys():\n dict_with_level[keyword] = {}\n dict_with_level[keyword][code_name] = all_parameters[key]\n return dict_with_level\n<|end_body_0|>\n\n<|body_start_1|>\n dict_with_sources = {}\n for key in all_parameters.keys():\n code_source = key.replace('_table', '')\n code_source = code_source.replace('_columns_names', '')\n if code_source in sources:\n keyword = code_source\n else:\n keyword = 'common'\n if keyword not in dict_with_sources.keys():\n dict_with_sources[keyword] = {}\n dict_with_sources[keyword][key] = all_parameters[key]\n return dict_with_sources\n<|end_body_1|>\n", "revision_id": "c98eb8c483a05af938a2f6f49d8ea803f5711572", "skeleton": "<|skeleton|>\nclass Parameters:\n\n def split(all_parameters: dict, levels: list) -> dict:\n \"\"\"Split parameter in different levels :param all_parameters: dict with all parameters with keys like key_levels :param levels: list with all the levels presents :return: dict with separated dicts\"\"\"\n <|body_0|>\n\n def split_by_sources(all_parameters: dict, sources: list) -> dict:\n \"\"\"Split parameter by different sources All keys should be source_table or source_column_names :param all_parameters: dict with all parameters with keys like source_keys :param sources: list with all levels present :return: dict with separated dicts\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Parameters:\n def split(all_parameters: dict, levels: list) -> dict:\n \"\"\"Split parameter in different levels :param all_parameters: dict with all parameters with keys like key_levels :param levels: list with all the levels presents :return: dict with separated dicts\"\"\"\n dict_with_level = {}\n for key in all_parameters.keys():\n split_number = key.rfind('_')\n keyword = key[split_number + 1:]\n if keyword in levels:\n code_name = key[:split_number]\n else:\n code_name = key\n keyword = 'common'\n if keyword not in dict_with_level.keys():\n dict_with_level[keyword] = {}\n dict_with_level[keyword][code_name] = all_parameters[key]\n return dict_with_level\n\n def split_by_sources(all_parameters: dict, sources: list) -> dict:\n \"\"\"Split parameter by different sources All keys should be source_table or source_column_names :param all_parameters: dict with all parameters with keys like source_keys :param sources: list with all levels present :return: dict with separated dicts\"\"\"\n dict_with_sources = {}\n for key in all_parameters.keys():\n code_source = key.replace('_table', '')\n code_source = code_source.replace('_columns_names', '')\n if code_source in sources:\n keyword = code_source\n else:\n keyword = 'common'\n if keyword not in dict_with_sources.keys():\n dict_with_sources[keyword] = {}\n dict_with_sources[keyword][key] = all_parameters[key]\n return dict_with_sources\n", "source": "the_stack_v2_python_sparse", "source_path": "engage-analytics/active_companies/src/workflows/configurations.py", "source_repo": "oliveriopt/mood-analytics", "split": "val", "star_events_count": 0}
{"blob_id": "9302b01b38104213c8e092873a450fd19cf9d2c3", "bodies": ["super().__init__(**kwargs)\nself.alpha = alpha\nself.gamma = gamma\nself.label_smoothing = label_smoothing", "class_targets = y_true\nclass_outputs, mask = y_pred\nalpha = tf.convert_to_tensor(self.alpha, dtype=y_pred[0][0].dtype)\ngamma = tf.convert_to_tensor(self.gamma, dtype=y_pred[0][0].dtype)\ntotal_loss = 0\nfor i in range(len(class_targets)):\n mask_one = tf.cast(mask[i], dtype=tf.float32)\n class_targets_one = class_targets[i]\n class_outputs_one = class_outputs[i]\n normalizer = tf.reduce_sum(mask_one) / tf.cast(tf.shape(mask_one)[0], tf.float32)\n pred_prob = tf.sigmoid(class_outputs_one)\n p_t = class_targets_one * pred_prob + (1 - class_targets_one) * (1 - pred_prob)\n alpha_factor = class_targets_one * alpha + (1 - class_targets_one) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n class_targets_one = class_targets_one * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=class_targets_one, logits=class_outputs_one)\n total_loss += tf.reduce_sum(tf.math.divide_no_nan(alpha_factor * modulating_factor * ce, normalizer))\nreturn total_loss"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.label_smoothing = label_smoothing\n<|end_body_0|>\n\n<|body_start_1|>\n class_targets = y_true\n class_outputs, mask = y_pred\n alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred[0][0].dtype)\n gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred[0][0].dtype)\n total_loss = 0\n for i in range(len(class_targets)):\n mask_one = tf.cast(mask[i], dtype=tf.float32)\n class_targets_one = class_targets[i]\n class_outputs_one = class_outputs[i]\n normalizer = tf.reduce_sum(mask_one) / tf.cast(tf.shape(mask_one)[0], tf.float32)\n pred_prob = tf.sigmoid(class_outputs_one)\n p_t = class_targets_one * pred_prob + (1 - class_targets_one) * (1 - pred_prob)\n alpha_factor = class_targets_one * alpha + (1 - class_targets_one) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n class_targets_one = class_targets_one * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=class_targets_one, logits=class_outputs_one)\n total_loss += tf.reduce_sum(tf.math.divide_no_nan(alpha_factor * modulating_factor * ce, normalizer))\n return total_loss\n<|end_body_1|>\n", "class_docstring": "Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.", "class_name": "ClassFocalLoss", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ClassFocalLoss:\n \"\"\"Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\"\"\"\n\n def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):\n \"\"\"Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\"\"\"\n <|body_0|>\n\n def call(self, y_true, y_pred):\n \"\"\"Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.label_smoothing = label_smoothing\n<|end_body_0|>\n\n<|body_start_1|>\n class_targets = y_true\n class_outputs, mask = y_pred\n alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred[0][0].dtype)\n gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred[0][0].dtype)\n total_loss = 0\n for i in range(len(class_targets)):\n mask_one = tf.cast(mask[i], dtype=tf.float32)\n class_targets_one = class_targets[i]\n class_outputs_one = class_outputs[i]\n normalizer = tf.reduce_sum(mask_one) / tf.cast(tf.shape(mask_one)[0], tf.float32)\n pred_prob = tf.sigmoid(class_outputs_one)\n p_t = class_targets_one * pred_prob + (1 - class_targets_one) * (1 - pred_prob)\n alpha_factor = class_targets_one * alpha + (1 - class_targets_one) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n class_targets_one = class_targets_one * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=class_targets_one, logits=class_outputs_one)\n total_loss += tf.reduce_sum(tf.math.divide_no_nan(alpha_factor * modulating_factor * ce, normalizer))\n return total_loss\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000145", "length_bytes": 2607, "license_type": "no_license", "methods": [{"docstring": "Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.", "name": "__init__", "signature": "def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs)"}, {"docstring": "Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.", "name": "call", "signature": "def call(self, y_true, y_pred)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_037777", "prompt": "Implement the Python class `ClassFocalLoss` described below.\n\nClass description:\nCompute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\n\nMethod signatures and docstrings:\n- def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs): Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\n- def call(self, y_true, y_pred): Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.", "prompted_full_text": "Implement the Python class `ClassFocalLoss` described below.\n\nClass description:\nCompute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\n\nMethod signatures and docstrings:\n- def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs): Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\n- def call(self, y_true, y_pred): Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\n\n<|skeleton|>\nclass ClassFocalLoss:\n \"\"\"Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\"\"\"\n\n def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):\n \"\"\"Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\"\"\"\n <|body_0|>\n\n def call(self, y_true, y_pred):\n \"\"\"Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.label_smoothing = label_smoothing\n<|end_body_0|>\n\n<|body_start_1|>\n class_targets = y_true\n class_outputs, mask = y_pred\n alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred[0][0].dtype)\n gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred[0][0].dtype)\n total_loss = 0\n for i in range(len(class_targets)):\n mask_one = tf.cast(mask[i], dtype=tf.float32)\n class_targets_one = class_targets[i]\n class_outputs_one = class_outputs[i]\n normalizer = tf.reduce_sum(mask_one) / tf.cast(tf.shape(mask_one)[0], tf.float32)\n pred_prob = tf.sigmoid(class_outputs_one)\n p_t = class_targets_one * pred_prob + (1 - class_targets_one) * (1 - pred_prob)\n alpha_factor = class_targets_one * alpha + (1 - class_targets_one) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n class_targets_one = class_targets_one * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=class_targets_one, logits=class_outputs_one)\n total_loss += tf.reduce_sum(tf.math.divide_no_nan(alpha_factor * modulating_factor * ce, normalizer))\n return total_loss\n<|end_body_1|>\n", "revision_id": "b7549701b0b1a7e4cc2c8275df2bc6c7a3253d24", "skeleton": "<|skeleton|>\nclass ClassFocalLoss:\n \"\"\"Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\"\"\"\n\n def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):\n \"\"\"Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\"\"\"\n <|body_0|>\n\n def call(self, y_true, y_pred):\n \"\"\"Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ClassFocalLoss:\n \"\"\"Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\"\"\"\n\n def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):\n \"\"\"Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\"\"\"\n super().__init__(**kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.label_smoothing = label_smoothing\n\n def call(self, y_true, y_pred):\n \"\"\"Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\"\"\"\n class_targets = y_true\n class_outputs, mask = y_pred\n alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred[0][0].dtype)\n gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred[0][0].dtype)\n total_loss = 0\n for i in range(len(class_targets)):\n mask_one = tf.cast(mask[i], dtype=tf.float32)\n class_targets_one = class_targets[i]\n class_outputs_one = class_outputs[i]\n normalizer = tf.reduce_sum(mask_one) / tf.cast(tf.shape(mask_one)[0], tf.float32)\n pred_prob = tf.sigmoid(class_outputs_one)\n p_t = class_targets_one * pred_prob + (1 - class_targets_one) * (1 - pred_prob)\n alpha_factor = class_targets_one * alpha + (1 - class_targets_one) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n class_targets_one = class_targets_one * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=class_targets_one, logits=class_outputs_one)\n total_loss += tf.reduce_sum(tf.math.divide_no_nan(alpha_factor * modulating_factor * ce, normalizer))\n return total_loss\n", "source": "the_stack_v2_python_sparse", "source_path": "AIServer/ai_api/ai_models/losses/class_loss.py", "source_repo": "tfwcn/tensorflow2-machine-vision", "split": "val", "star_events_count": 1}
{"blob_id": "297b10098f7991d86bdcf33a1d8d50962b6e152c", "bodies": ["self.Wf = np.random.normal(size=(h + i, h))\nself.Wu = np.random.normal(size=(h + i, h))\nself.Wc = np.random.normal(size=(h + i, h))\nself.Wo = np.random.normal(size=(h + i, h))\nself.Wy = np.random.normal(size=(h, o))\nself.bf = np.zeros((1, h))\nself.bu = np.zeros((1, h))\nself.bc = np.zeros((1, h))\nself.bo = np.zeros((1, h))\nself.by = np.zeros((1, o))", "x_max = np.max(x, axis=1, keepdims=True)\ne_x = np.exp(x - x_max)\nreturn e_x / np.sum(e_x, axis=1, keepdims=True)", "matrix = np.concatenate((h_prev, x_t), axis=1)\nu_t = sigmoid(np.matmul(matrix, self.Wu) + self.bu)\nf_t = sigmoid(np.matmul(matrix, self.Wf) + self.bf)\no_t = sigmoid(np.matmul(matrix, self.Wo) + self.bo)\nprime_c = np.tanh(np.matmul(matrix, self.Wc) + self.bc)\nc_next = f_t * c_prev + u_t * prime_c\nh_next = o_t * np.tanh(c_next)\ny = self.softmax(np.matmul(h_next, self.Wy) + self.by)\nreturn (h_next, c_next, y)"], "bodies_text": "<|body_start_0|>\n self.Wf = np.random.normal(size=(h + i, h))\n self.Wu = np.random.normal(size=(h + i, h))\n self.Wc = np.random.normal(size=(h + i, h))\n self.Wo = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bf = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bo = np.zeros((1, h))\n self.by = np.zeros((1, o))\n<|end_body_0|>\n\n<|body_start_1|>\n x_max = np.max(x, axis=1, keepdims=True)\n e_x = np.exp(x - x_max)\n return e_x / np.sum(e_x, axis=1, keepdims=True)\n<|end_body_1|>\n\n<|body_start_2|>\n matrix = np.concatenate((h_prev, x_t), axis=1)\n u_t = sigmoid(np.matmul(matrix, self.Wu) + self.bu)\n f_t = sigmoid(np.matmul(matrix, self.Wf) + self.bf)\n o_t = sigmoid(np.matmul(matrix, self.Wo) + self.bo)\n prime_c = np.tanh(np.matmul(matrix, self.Wc) + self.bc)\n c_next = f_t * c_prev + u_t * prime_c\n h_next = o_t * np.tanh(c_next)\n y = self.softmax(np.matmul(h_next, self.Wy) + self.by)\n return (h_next, c_next, y)\n<|end_body_2|>\n", "class_docstring": "Class LSTMCell that represents an LSTM unit", "class_name": "LSTMCell", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LSTMCell:\n \"\"\"Class LSTMCell that represents an LSTM unit\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"class constructor Argumetns: - i is the dimensionality of the data - h is the dimensionality of the hidden state - o is the dimensionality of the outputs Public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by that represent the weights and biases of the cell - Wf and bf are for the forget gate - Wu and bu are for the update gate - Wc and bc are for the intermediate cell state - Wo and bo are for the output gate - Wy and by are for the outputs\"\"\"\n <|body_0|>\n\n def softmax(self, x):\n \"\"\"Function to compute softmax values for each sets of scores in x\"\"\"\n <|body_1|>\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"Public instance method that performs forward propagation for one time step Arguments: - x_t is a numpy.ndarray of shape (m, i) that contains the data input for the cell * m is the batche size for the data - h_prev is a numpy.ndarray of shape (m, h) containing the previous hidden state - c_prev is a numpy.ndarray of shape (m, h) containing the previous cell state Returns: h_next, c_next, y - h_next is the next hidden state - c_next is the next cell state - y is the output of the cell\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Wf = np.random.normal(size=(h + i, h))\n self.Wu = np.random.normal(size=(h + i, h))\n self.Wc = np.random.normal(size=(h + i, h))\n self.Wo = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bf = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bo = np.zeros((1, h))\n self.by = np.zeros((1, o))\n<|end_body_0|>\n\n<|body_start_1|>\n x_max = np.max(x, axis=1, keepdims=True)\n e_x = np.exp(x - x_max)\n return e_x / np.sum(e_x, axis=1, keepdims=True)\n<|end_body_1|>\n\n<|body_start_2|>\n matrix = np.concatenate((h_prev, x_t), axis=1)\n u_t = sigmoid(np.matmul(matrix, self.Wu) + self.bu)\n f_t = sigmoid(np.matmul(matrix, self.Wf) + self.bf)\n o_t = sigmoid(np.matmul(matrix, self.Wo) + self.bo)\n prime_c = np.tanh(np.matmul(matrix, self.Wc) + self.bc)\n c_next = f_t * c_prev + u_t * prime_c\n h_next = o_t * np.tanh(c_next)\n y = self.softmax(np.matmul(h_next, self.Wy) + self.by)\n return (h_next, c_next, y)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000146", "length_bytes": 2868, "license_type": "no_license", "methods": [{"docstring": "class constructor Argumetns: - i is the dimensionality of the data - h is the dimensionality of the hidden state - o is the dimensionality of the outputs Public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by that represent the weights and biases of the cell - Wf and bf are for the forget gate - Wu and bu are for the update gate - Wc and bc are for the intermediate cell state - Wo and bo are for the output gate - Wy and by are for the outputs", "name": "__init__", "signature": "def __init__(self, i, h, o)"}, {"docstring": "Function to compute softmax values for each sets of scores in x", "name": "softmax", "signature": "def softmax(self, x)"}, {"docstring": "Public instance method that performs forward propagation for one time step Arguments: - x_t is a numpy.ndarray of shape (m, i) that contains the data input for the cell * m is the batche size for the data - h_prev is a numpy.ndarray of shape (m, h) containing the previous hidden state - c_prev is a numpy.ndarray of shape (m, h) containing the previous cell state Returns: h_next, c_next, y - h_next is the next hidden state - c_next is the next cell state - y is the output of the cell", "name": "forward", "signature": "def forward(self, h_prev, c_prev, x_t)"}], "n_methods": 3, "prompt": "Implement the Python class `LSTMCell` described below.\n\nClass description:\nClass LSTMCell that represents an LSTM unit\n\nMethod signatures and docstrings:\n- def __init__(self, i, h, o): class constructor Argumetns: - i is the dimensionality of the data - h is the dimensionality of the hidden state - o is the dimensionality of the outputs Public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by that represent the weights and biases of the cell - Wf and bf are for the forget gate - Wu and bu are for the update gate - Wc and bc are for the intermediate cell state - Wo and bo are for the output gate - Wy and by are for the outputs\n- def softmax(self, x): Function to compute softmax values for each sets of scores in x\n- def forward(self, h_prev, c_prev, x_t): Public instance method that performs forward propagation for one time step Arguments: - x_t is a numpy.ndarray of shape (m, i) that contains the data input for the cell * m is the batche size for the data - h_prev is a numpy.ndarray of shape (m, h) containing the previous hidden state - c_prev is a numpy.ndarray of shape (m, h) containing the previous cell state Returns: h_next, c_next, y - h_next is the next hidden state - c_next is the next cell state - y is the output of the cell", "prompted_full_text": "Implement the Python class `LSTMCell` described below.\n\nClass description:\nClass LSTMCell that represents an LSTM unit\n\nMethod signatures and docstrings:\n- def __init__(self, i, h, o): class constructor Argumetns: - i is the dimensionality of the data - h is the dimensionality of the hidden state - o is the dimensionality of the outputs Public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by that represent the weights and biases of the cell - Wf and bf are for the forget gate - Wu and bu are for the update gate - Wc and bc are for the intermediate cell state - Wo and bo are for the output gate - Wy and by are for the outputs\n- def softmax(self, x): Function to compute softmax values for each sets of scores in x\n- def forward(self, h_prev, c_prev, x_t): Public instance method that performs forward propagation for one time step Arguments: - x_t is a numpy.ndarray of shape (m, i) that contains the data input for the cell * m is the batche size for the data - h_prev is a numpy.ndarray of shape (m, h) containing the previous hidden state - c_prev is a numpy.ndarray of shape (m, h) containing the previous cell state Returns: h_next, c_next, y - h_next is the next hidden state - c_next is the next cell state - y is the output of the cell\n\n<|skeleton|>\nclass LSTMCell:\n \"\"\"Class LSTMCell that represents an LSTM unit\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"class constructor Argumetns: - i is the dimensionality of the data - h is the dimensionality of the hidden state - o is the dimensionality of the outputs Public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by that represent the weights and biases of the cell - Wf and bf are for the forget gate - Wu and bu are for the update gate - Wc and bc are for the intermediate cell state - Wo and bo are for the output gate - Wy and by are for the outputs\"\"\"\n <|body_0|>\n\n def softmax(self, x):\n \"\"\"Function to compute softmax values for each sets of scores in x\"\"\"\n <|body_1|>\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"Public instance method that performs forward propagation for one time step Arguments: - x_t is a numpy.ndarray of shape (m, i) that contains the data input for the cell * m is the batche size for the data - h_prev is a numpy.ndarray of shape (m, h) containing the previous hidden state - c_prev is a numpy.ndarray of shape (m, h) containing the previous cell state Returns: h_next, c_next, y - h_next is the next hidden state - c_next is the next cell state - y is the output of the cell\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Wf = np.random.normal(size=(h + i, h))\n self.Wu = np.random.normal(size=(h + i, h))\n self.Wc = np.random.normal(size=(h + i, h))\n self.Wo = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bf = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bo = np.zeros((1, h))\n self.by = np.zeros((1, o))\n<|end_body_0|>\n\n<|body_start_1|>\n x_max = np.max(x, axis=1, keepdims=True)\n e_x = np.exp(x - x_max)\n return e_x / np.sum(e_x, axis=1, keepdims=True)\n<|end_body_1|>\n\n<|body_start_2|>\n matrix = np.concatenate((h_prev, x_t), axis=1)\n u_t = sigmoid(np.matmul(matrix, self.Wu) + self.bu)\n f_t = sigmoid(np.matmul(matrix, self.Wf) + self.bf)\n o_t = sigmoid(np.matmul(matrix, self.Wo) + self.bo)\n prime_c = np.tanh(np.matmul(matrix, self.Wc) + self.bc)\n c_next = f_t * c_prev + u_t * prime_c\n h_next = o_t * np.tanh(c_next)\n y = self.softmax(np.matmul(h_next, self.Wy) + self.by)\n return (h_next, c_next, y)\n<|end_body_2|>\n", "revision_id": "fc2cec306961f7ca2448965ddd3a2f656bbe10c7", "skeleton": "<|skeleton|>\nclass LSTMCell:\n \"\"\"Class LSTMCell that represents an LSTM unit\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"class constructor Argumetns: - i is the dimensionality of the data - h is the dimensionality of the hidden state - o is the dimensionality of the outputs Public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by that represent the weights and biases of the cell - Wf and bf are for the forget gate - Wu and bu are for the update gate - Wc and bc are for the intermediate cell state - Wo and bo are for the output gate - Wy and by are for the outputs\"\"\"\n <|body_0|>\n\n def softmax(self, x):\n \"\"\"Function to compute softmax values for each sets of scores in x\"\"\"\n <|body_1|>\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"Public instance method that performs forward propagation for one time step Arguments: - x_t is a numpy.ndarray of shape (m, i) that contains the data input for the cell * m is the batche size for the data - h_prev is a numpy.ndarray of shape (m, h) containing the previous hidden state - c_prev is a numpy.ndarray of shape (m, h) containing the previous cell state Returns: h_next, c_next, y - h_next is the next hidden state - c_next is the next cell state - y is the output of the cell\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LSTMCell:\n \"\"\"Class LSTMCell that represents an LSTM unit\"\"\"\n\n def __init__(self, i, h, o):\n \"\"\"class constructor Argumetns: - i is the dimensionality of the data - h is the dimensionality of the hidden state - o is the dimensionality of the outputs Public instance attributes Wf, Wu, Wc, Wo, Wy, bf, bu, bc, bo, by that represent the weights and biases of the cell - Wf and bf are for the forget gate - Wu and bu are for the update gate - Wc and bc are for the intermediate cell state - Wo and bo are for the output gate - Wy and by are for the outputs\"\"\"\n self.Wf = np.random.normal(size=(h + i, h))\n self.Wu = np.random.normal(size=(h + i, h))\n self.Wc = np.random.normal(size=(h + i, h))\n self.Wo = np.random.normal(size=(h + i, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bf = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bo = np.zeros((1, h))\n self.by = np.zeros((1, o))\n\n def softmax(self, x):\n \"\"\"Function to compute softmax values for each sets of scores in x\"\"\"\n x_max = np.max(x, axis=1, keepdims=True)\n e_x = np.exp(x - x_max)\n return e_x / np.sum(e_x, axis=1, keepdims=True)\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"Public instance method that performs forward propagation for one time step Arguments: - x_t is a numpy.ndarray of shape (m, i) that contains the data input for the cell * m is the batche size for the data - h_prev is a numpy.ndarray of shape (m, h) containing the previous hidden state - c_prev is a numpy.ndarray of shape (m, h) containing the previous cell state Returns: h_next, c_next, y - h_next is the next hidden state - c_next is the next cell state - y is the output of the cell\"\"\"\n matrix = np.concatenate((h_prev, x_t), axis=1)\n u_t = sigmoid(np.matmul(matrix, self.Wu) + self.bu)\n f_t = sigmoid(np.matmul(matrix, self.Wf) + self.bf)\n o_t = sigmoid(np.matmul(matrix, self.Wo) + self.bo)\n prime_c = np.tanh(np.matmul(matrix, self.Wc) + self.bc)\n c_next = f_t * c_prev + u_t * prime_c\n h_next = o_t * np.tanh(c_next)\n y = self.softmax(np.matmul(h_next, self.Wy) + self.by)\n return (h_next, c_next, y)\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x0D-RNNs/3-lstm_cell.py", "source_repo": "dalexach/holbertonschool-machine_learning", "split": "val", "star_events_count": 2}
{"blob_id": "12a0fdae3314c0cf443252f3556a8781433a6af4", "bodies": ["self.lock: threading.RLock = threading.RLock()\nself.queue: list[Event] = []\nself.eventnum: int = 0\nself.timer: Optional[Timer] = None\nself.running: bool = False\nself.start: Optional[float] = None", "schedule = False\nwhile True:\n with self.lock:\n if not self.running or not self.queue:\n break\n now = time.monotonic()\n if self.queue[0].time > now:\n schedule = True\n break\n event = heapq.heappop(self.queue)\n if event.time > now:\n raise ValueError('invalid event time: %s > %s', event.time, now)\n event.run()\nwith self.lock:\n self.timer = None\n if schedule:\n self._schedule_event()", "with self.lock:\n if not self.running:\n raise ValueError('scheduling event while not running')\n if not self.queue:\n return\n delay = self.queue[0].time - time.monotonic()\n if self.timer:\n raise ValueError('timer was already set')\n self.timer = Timer(delay, self._run_events)\n self.timer.daemon = True\n self.timer.start()", "with self.lock:\n if self.running:\n return\n self.running = True\n self.start = time.monotonic()\n for event in self.queue:\n event.time += self.start\n self._schedule_event()", "with self.lock:\n if not self.running:\n return\n self.queue = []\n self.eventnum = 0\n if self.timer is not None:\n self.timer.cancel()\n self.timer = None\n self.running = False\n self.start = None", "with self.lock:\n eventnum = self.eventnum\n self.eventnum += 1\n evtime = float(delaysec)\n if self.running:\n evtime += time.monotonic()\n event = Event(eventnum, evtime, func, *args, **kwds)\n if self.queue:\n prevhead = self.queue[0]\n else:\n prevhead = None\n heapq.heappush(self.queue, event)\n head = self.queue[0]\n if prevhead is not None and prevhead != head:\n if self.timer is not None and self.timer.cancel():\n self.timer = None\n if self.running and self.timer is None:\n self._schedule_event()\nreturn event"], "bodies_text": "<|body_start_0|>\n self.lock: threading.RLock = threading.RLock()\n self.queue: list[Event] = []\n self.eventnum: int = 0\n self.timer: Optional[Timer] = None\n self.running: bool = False\n self.start: Optional[float] = None\n<|end_body_0|>\n\n<|body_start_1|>\n schedule = False\n while True:\n with self.lock:\n if not self.running or not self.queue:\n break\n now = time.monotonic()\n if self.queue[0].time > now:\n schedule = True\n break\n event = heapq.heappop(self.queue)\n if event.time > now:\n raise ValueError('invalid event time: %s > %s', event.time, now)\n event.run()\n with self.lock:\n self.timer = None\n if schedule:\n self._schedule_event()\n<|end_body_1|>\n\n<|body_start_2|>\n with self.lock:\n if not self.running:\n raise ValueError('scheduling event while not running')\n if not self.queue:\n return\n delay = self.queue[0].time - time.monotonic()\n if self.timer:\n raise ValueError('timer was already set')\n self.timer = Timer(delay, self._run_events)\n self.timer.daemon = True\n self.timer.start()\n<|end_body_2|>\n\n<|body_start_3|>\n with self.lock:\n if self.running:\n return\n self.running = True\n self.start = time.monotonic()\n for event in self.queue:\n event.time += self.start\n self._schedule_event()\n<|end_body_3|>\n\n<|body_start_4|>\n with self.lock:\n if not self.running:\n return\n self.queue = []\n self.eventnum = 0\n if self.timer is not None:\n self.timer.cancel()\n self.timer = None\n self.running = False\n self.start = None\n<|end_body_4|>\n\n<|body_start_5|>\n with self.lock:\n eventnum = self.eventnum\n self.eventnum += 1\n evtime = float(delaysec)\n if self.running:\n evtime += time.monotonic()\n event = Event(eventnum, evtime, func, *args, **kwds)\n if self.queue:\n prevhead = self.queue[0]\n else:\n prevhead = None\n heapq.heappush(self.queue, event)\n head = self.queue[0]\n if prevhead is not None and prevhead != head:\n if self.timer is not None and self.timer.cancel():\n self.timer = None\n if self.running and self.timer is None:\n self._schedule_event()\n return event\n<|end_body_5|>\n", "class_docstring": "Provides an event loop for running events.", "class_name": "EventLoop", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EventLoop:\n \"\"\"Provides an event loop for running events.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Creates a EventLoop instance.\"\"\"\n <|body_0|>\n\n def _run_events(self) -> None:\n \"\"\"Run events. :return: nothing\"\"\"\n <|body_1|>\n\n def _schedule_event(self) -> None:\n \"\"\"Schedule event. :return: nothing\"\"\"\n <|body_2|>\n\n def run(self) -> None:\n \"\"\"Start event loop. :return: nothing\"\"\"\n <|body_3|>\n\n def stop(self) -> None:\n \"\"\"Stop event loop. :return: nothing\"\"\"\n <|body_4|>\n\n def add_event(self, delaysec: float, func: Callable, *args: Any, **kwds: Any):\n \"\"\"Add an event to the event loop. :param delaysec: delay in seconds for event :param func: event function :param args: event arguments :param kwds: event keyword arguments :return: created event\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lock: threading.RLock = threading.RLock()\n self.queue: list[Event] = []\n self.eventnum: int = 0\n self.timer: Optional[Timer] = None\n self.running: bool = False\n self.start: Optional[float] = None\n<|end_body_0|>\n\n<|body_start_1|>\n schedule = False\n while True:\n with self.lock:\n if not self.running or not self.queue:\n break\n now = time.monotonic()\n if self.queue[0].time > now:\n schedule = True\n break\n event = heapq.heappop(self.queue)\n if event.time > now:\n raise ValueError('invalid event time: %s > %s', event.time, now)\n event.run()\n with self.lock:\n self.timer = None\n if schedule:\n self._schedule_event()\n<|end_body_1|>\n\n<|body_start_2|>\n with self.lock:\n if not self.running:\n raise ValueError('scheduling event while not running')\n if not self.queue:\n return\n delay = self.queue[0].time - time.monotonic()\n if self.timer:\n raise ValueError('timer was already set')\n self.timer = Timer(delay, self._run_events)\n self.timer.daemon = True\n self.timer.start()\n<|end_body_2|>\n\n<|body_start_3|>\n with self.lock:\n if self.running:\n return\n self.running = True\n self.start = time.monotonic()\n for event in self.queue:\n event.time += self.start\n self._schedule_event()\n<|end_body_3|>\n\n<|body_start_4|>\n with self.lock:\n if not self.running:\n return\n self.queue = []\n self.eventnum = 0\n if self.timer is not None:\n self.timer.cancel()\n self.timer = None\n self.running = False\n self.start = None\n<|end_body_4|>\n\n<|body_start_5|>\n with self.lock:\n eventnum = self.eventnum\n self.eventnum += 1\n evtime = float(delaysec)\n if self.running:\n evtime += time.monotonic()\n event = Event(eventnum, evtime, func, *args, **kwds)\n if self.queue:\n prevhead = self.queue[0]\n else:\n prevhead = None\n heapq.heappush(self.queue, event)\n head = self.queue[0]\n if prevhead is not None and prevhead != head:\n if self.timer is not None and self.timer.cancel():\n self.timer = None\n if self.running and self.timer is None:\n self._schedule_event()\n return event\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000147", "length_bytes": 6786, "license_type": "permissive", "methods": [{"docstring": "Creates a EventLoop instance.", "name": "__init__", "signature": "def __init__(self) -> None"}, {"docstring": "Run events. :return: nothing", "name": "_run_events", "signature": "def _run_events(self) -> None"}, {"docstring": "Schedule event. :return: nothing", "name": "_schedule_event", "signature": "def _schedule_event(self) -> None"}, {"docstring": "Start event loop. :return: nothing", "name": "run", "signature": "def run(self) -> None"}, {"docstring": "Stop event loop. :return: nothing", "name": "stop", "signature": "def stop(self) -> None"}, {"docstring": "Add an event to the event loop. :param delaysec: delay in seconds for event :param func: event function :param args: event arguments :param kwds: event keyword arguments :return: created event", "name": "add_event", "signature": "def add_event(self, delaysec: float, func: Callable, *args: Any, **kwds: Any)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_053955", "prompt": "Implement the Python class `EventLoop` described below.\n\nClass description:\nProvides an event loop for running events.\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Creates a EventLoop instance.\n- def _run_events(self) -> None: Run events. :return: nothing\n- def _schedule_event(self) -> None: Schedule event. :return: nothing\n- def run(self) -> None: Start event loop. :return: nothing\n- def stop(self) -> None: Stop event loop. :return: nothing\n- def add_event(self, delaysec: float, func: Callable, *args: Any, **kwds: Any): Add an event to the event loop. :param delaysec: delay in seconds for event :param func: event function :param args: event arguments :param kwds: event keyword arguments :return: created event", "prompted_full_text": "Implement the Python class `EventLoop` described below.\n\nClass description:\nProvides an event loop for running events.\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Creates a EventLoop instance.\n- def _run_events(self) -> None: Run events. :return: nothing\n- def _schedule_event(self) -> None: Schedule event. :return: nothing\n- def run(self) -> None: Start event loop. :return: nothing\n- def stop(self) -> None: Stop event loop. :return: nothing\n- def add_event(self, delaysec: float, func: Callable, *args: Any, **kwds: Any): Add an event to the event loop. :param delaysec: delay in seconds for event :param func: event function :param args: event arguments :param kwds: event keyword arguments :return: created event\n\n<|skeleton|>\nclass EventLoop:\n \"\"\"Provides an event loop for running events.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Creates a EventLoop instance.\"\"\"\n <|body_0|>\n\n def _run_events(self) -> None:\n \"\"\"Run events. :return: nothing\"\"\"\n <|body_1|>\n\n def _schedule_event(self) -> None:\n \"\"\"Schedule event. :return: nothing\"\"\"\n <|body_2|>\n\n def run(self) -> None:\n \"\"\"Start event loop. :return: nothing\"\"\"\n <|body_3|>\n\n def stop(self) -> None:\n \"\"\"Stop event loop. :return: nothing\"\"\"\n <|body_4|>\n\n def add_event(self, delaysec: float, func: Callable, *args: Any, **kwds: Any):\n \"\"\"Add an event to the event loop. :param delaysec: delay in seconds for event :param func: event function :param args: event arguments :param kwds: event keyword arguments :return: created event\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lock: threading.RLock = threading.RLock()\n self.queue: list[Event] = []\n self.eventnum: int = 0\n self.timer: Optional[Timer] = None\n self.running: bool = False\n self.start: Optional[float] = None\n<|end_body_0|>\n\n<|body_start_1|>\n schedule = False\n while True:\n with self.lock:\n if not self.running or not self.queue:\n break\n now = time.monotonic()\n if self.queue[0].time > now:\n schedule = True\n break\n event = heapq.heappop(self.queue)\n if event.time > now:\n raise ValueError('invalid event time: %s > %s', event.time, now)\n event.run()\n with self.lock:\n self.timer = None\n if schedule:\n self._schedule_event()\n<|end_body_1|>\n\n<|body_start_2|>\n with self.lock:\n if not self.running:\n raise ValueError('scheduling event while not running')\n if not self.queue:\n return\n delay = self.queue[0].time - time.monotonic()\n if self.timer:\n raise ValueError('timer was already set')\n self.timer = Timer(delay, self._run_events)\n self.timer.daemon = True\n self.timer.start()\n<|end_body_2|>\n\n<|body_start_3|>\n with self.lock:\n if self.running:\n return\n self.running = True\n self.start = time.monotonic()\n for event in self.queue:\n event.time += self.start\n self._schedule_event()\n<|end_body_3|>\n\n<|body_start_4|>\n with self.lock:\n if not self.running:\n return\n self.queue = []\n self.eventnum = 0\n if self.timer is not None:\n self.timer.cancel()\n self.timer = None\n self.running = False\n self.start = None\n<|end_body_4|>\n\n<|body_start_5|>\n with self.lock:\n eventnum = self.eventnum\n self.eventnum += 1\n evtime = float(delaysec)\n if self.running:\n evtime += time.monotonic()\n event = Event(eventnum, evtime, func, *args, **kwds)\n if self.queue:\n prevhead = self.queue[0]\n else:\n prevhead = None\n heapq.heappush(self.queue, event)\n head = self.queue[0]\n if prevhead is not None and prevhead != head:\n if self.timer is not None and self.timer.cancel():\n self.timer = None\n if self.running and self.timer is None:\n self._schedule_event()\n return event\n<|end_body_5|>\n", "revision_id": "20071eed2e73a2287aa385698dd604f4933ae7ff", "skeleton": "<|skeleton|>\nclass EventLoop:\n \"\"\"Provides an event loop for running events.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Creates a EventLoop instance.\"\"\"\n <|body_0|>\n\n def _run_events(self) -> None:\n \"\"\"Run events. :return: nothing\"\"\"\n <|body_1|>\n\n def _schedule_event(self) -> None:\n \"\"\"Schedule event. :return: nothing\"\"\"\n <|body_2|>\n\n def run(self) -> None:\n \"\"\"Start event loop. :return: nothing\"\"\"\n <|body_3|>\n\n def stop(self) -> None:\n \"\"\"Stop event loop. :return: nothing\"\"\"\n <|body_4|>\n\n def add_event(self, delaysec: float, func: Callable, *args: Any, **kwds: Any):\n \"\"\"Add an event to the event loop. :param delaysec: delay in seconds for event :param func: event function :param args: event arguments :param kwds: event keyword arguments :return: created event\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EventLoop:\n \"\"\"Provides an event loop for running events.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Creates a EventLoop instance.\"\"\"\n self.lock: threading.RLock = threading.RLock()\n self.queue: list[Event] = []\n self.eventnum: int = 0\n self.timer: Optional[Timer] = None\n self.running: bool = False\n self.start: Optional[float] = None\n\n def _run_events(self) -> None:\n \"\"\"Run events. :return: nothing\"\"\"\n schedule = False\n while True:\n with self.lock:\n if not self.running or not self.queue:\n break\n now = time.monotonic()\n if self.queue[0].time > now:\n schedule = True\n break\n event = heapq.heappop(self.queue)\n if event.time > now:\n raise ValueError('invalid event time: %s > %s', event.time, now)\n event.run()\n with self.lock:\n self.timer = None\n if schedule:\n self._schedule_event()\n\n def _schedule_event(self) -> None:\n \"\"\"Schedule event. :return: nothing\"\"\"\n with self.lock:\n if not self.running:\n raise ValueError('scheduling event while not running')\n if not self.queue:\n return\n delay = self.queue[0].time - time.monotonic()\n if self.timer:\n raise ValueError('timer was already set')\n self.timer = Timer(delay, self._run_events)\n self.timer.daemon = True\n self.timer.start()\n\n def run(self) -> None:\n \"\"\"Start event loop. :return: nothing\"\"\"\n with self.lock:\n if self.running:\n return\n self.running = True\n self.start = time.monotonic()\n for event in self.queue:\n event.time += self.start\n self._schedule_event()\n\n def stop(self) -> None:\n \"\"\"Stop event loop. :return: nothing\"\"\"\n with self.lock:\n if not self.running:\n return\n self.queue = []\n self.eventnum = 0\n if self.timer is not None:\n self.timer.cancel()\n self.timer = None\n self.running = False\n self.start = None\n\n def add_event(self, delaysec: float, func: Callable, *args: Any, **kwds: Any):\n \"\"\"Add an event to the event loop. :param delaysec: delay in seconds for event :param func: event function :param args: event arguments :param kwds: event keyword arguments :return: created event\"\"\"\n with self.lock:\n eventnum = self.eventnum\n self.eventnum += 1\n evtime = float(delaysec)\n if self.running:\n evtime += time.monotonic()\n event = Event(eventnum, evtime, func, *args, **kwds)\n if self.queue:\n prevhead = self.queue[0]\n else:\n prevhead = None\n heapq.heappush(self.queue, event)\n head = self.queue[0]\n if prevhead is not None and prevhead != head:\n if self.timer is not None and self.timer.cancel():\n self.timer = None\n if self.running and self.timer is None:\n self._schedule_event()\n return event\n", "source": "the_stack_v2_python_sparse", "source_path": "daemon/core/location/event.py", "source_repo": "coreemu/core", "split": "val", "star_events_count": 606}
{"blob_id": "1da156c7473e8185e08199fe843c908cdcc3459a", "bodies": ["queue = deque()\nqueue.append(root)\nseq = ''\nwhile queue:\n for _ in range(len(queue)):\n node = queue.popleft()\n if not node:\n seq += ',#'\n elif not seq:\n seq += f'{node.val}'\n else:\n seq += f',{node.val}'\n if node:\n queue.append(node.left)\n queue.append(node.right)\nreturn seq", "serial = data.split(',')\nprint(data)\nif serial[0] == '':\n return None\nroot = TreeNode(serial[0])\nqueue = deque()\nqueue.append(root)\ni = 0\nwhile i + 1 < len(serial):\n node = queue.popleft()\n left = serial[i + 1]\n i += 1\n if left != '#':\n node.left = TreeNode(left)\n queue.append(node.left)\n right = serial[i + 1]\n i += 1\n if right != '#':\n node.right = TreeNode(right)\n queue.append(node.right)\nreturn root"], "bodies_text": "<|body_start_0|>\n queue = deque()\n queue.append(root)\n seq = ''\n while queue:\n for _ in range(len(queue)):\n node = queue.popleft()\n if not node:\n seq += ',#'\n elif not seq:\n seq += f'{node.val}'\n else:\n seq += f',{node.val}'\n if node:\n queue.append(node.left)\n queue.append(node.right)\n return seq\n<|end_body_0|>\n\n<|body_start_1|>\n serial = data.split(',')\n print(data)\n if serial[0] == '':\n return None\n root = TreeNode(serial[0])\n queue = deque()\n queue.append(root)\n i = 0\n while i + 1 < len(serial):\n node = queue.popleft()\n left = serial[i + 1]\n i += 1\n if left != '#':\n node.left = TreeNode(left)\n queue.append(node.left)\n right = serial[i + 1]\n i += 1\n if right != '#':\n node.right = TreeNode(right)\n queue.append(node.right)\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n queue = deque()\n queue.append(root)\n seq = ''\n while queue:\n for _ in range(len(queue)):\n node = queue.popleft()\n if not node:\n seq += ',#'\n elif not seq:\n seq += f'{node.val}'\n else:\n seq += f',{node.val}'\n if node:\n queue.append(node.left)\n queue.append(node.right)\n return seq\n<|end_body_0|>\n\n<|body_start_1|>\n serial = data.split(',')\n print(data)\n if serial[0] == '':\n return None\n root = TreeNode(serial[0])\n queue = deque()\n queue.append(root)\n i = 0\n while i + 1 < len(serial):\n node = queue.popleft()\n left = serial[i + 1]\n i += 1\n if left != '#':\n node.left = TreeNode(left)\n queue.append(node.left)\n right = serial[i + 1]\n i += 1\n if right != '#':\n node.right = TreeNode(right)\n queue.append(node.right)\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000148", "length_bytes": 3302, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043228", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n queue = deque()\n queue.append(root)\n seq = ''\n while queue:\n for _ in range(len(queue)):\n node = queue.popleft()\n if not node:\n seq += ',#'\n elif not seq:\n seq += f'{node.val}'\n else:\n seq += f',{node.val}'\n if node:\n queue.append(node.left)\n queue.append(node.right)\n return seq\n<|end_body_0|>\n\n<|body_start_1|>\n serial = data.split(',')\n print(data)\n if serial[0] == '':\n return None\n root = TreeNode(serial[0])\n queue = deque()\n queue.append(root)\n i = 0\n while i + 1 < len(serial):\n node = queue.popleft()\n left = serial[i + 1]\n i += 1\n if left != '#':\n node.left = TreeNode(left)\n queue.append(node.left)\n right = serial[i + 1]\n i += 1\n if right != '#':\n node.right = TreeNode(right)\n queue.append(node.right)\n return root\n<|end_body_1|>\n", "revision_id": "04c2b38fb387c0b25cba01773d3b126cc916eb03", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n queue = deque()\n queue.append(root)\n seq = ''\n while queue:\n for _ in range(len(queue)):\n node = queue.popleft()\n if not node:\n seq += ',#'\n elif not seq:\n seq += f'{node.val}'\n else:\n seq += f',{node.val}'\n if node:\n queue.append(node.left)\n queue.append(node.right)\n return seq\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n serial = data.split(',')\n print(data)\n if serial[0] == '':\n return None\n root = TreeNode(serial[0])\n queue = deque()\n queue.append(root)\n i = 0\n while i + 1 < len(serial):\n node = queue.popleft()\n left = serial[i + 1]\n i += 1\n if left != '#':\n node.left = TreeNode(left)\n queue.append(node.left)\n right = serial[i + 1]\n i += 1\n if right != '#':\n node.right = TreeNode(right)\n queue.append(node.right)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "BinaryTree/297.二叉树的序列化与反序列化.py", "source_repo": "snow-tyan/LeetCode", "split": "val", "star_events_count": 0}
{"blob_id": "951e9294f4c0d4c9c9346e4886fe1ecf234a15d0", "bodies": ["if data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n mean = sum(data) / len(data)\n variance = 0\n for x in data:\n variance += (x - mean) ** 2\n variance = variance / len(data)\n p = 1 - variance / mean\n self.n = int(round(mean / p))\n self.p = float(mean / self.n)\nelse:\n if n <= 0:\n raise ValueError('n must be a positive value')\n self.n = int(n)\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.p = float(p)", "factorial_num = 1\nfor m in range(1, n + 1):\n factorial_num *= m\nreturn factorial_num", "k = int(k)\nif k < 0 or k > self.n:\n return 0\nn_factorial = self.factorial(self.n)\nk_factorial = self.factorial(k)\nnk_factorial = self.factorial(self.n - k)\nbinomial_coefficient = n_factorial / (nk_factorial * k_factorial)\nq = 1 - self.p\nreturn binomial_coefficient * (self.p ** k * q ** (self.n - k))", "k = int(k)\nif k < 0:\n return 0\ncdf = 0\nfor x in range(k + 1):\n cdf += self.pmf(x)\nreturn cdf"], "bodies_text": "<|body_start_0|>\n if data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n mean = sum(data) / len(data)\n variance = 0\n for x in data:\n variance += (x - mean) ** 2\n variance = variance / len(data)\n p = 1 - variance / mean\n self.n = int(round(mean / p))\n self.p = float(mean / self.n)\n else:\n if n <= 0:\n raise ValueError('n must be a positive value')\n self.n = int(n)\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.p = float(p)\n<|end_body_0|>\n\n<|body_start_1|>\n factorial_num = 1\n for m in range(1, n + 1):\n factorial_num *= m\n return factorial_num\n<|end_body_1|>\n\n<|body_start_2|>\n k = int(k)\n if k < 0 or k > self.n:\n return 0\n n_factorial = self.factorial(self.n)\n k_factorial = self.factorial(k)\n nk_factorial = self.factorial(self.n - k)\n binomial_coefficient = n_factorial / (nk_factorial * k_factorial)\n q = 1 - self.p\n return binomial_coefficient * (self.p ** k * q ** (self.n - k))\n<|end_body_2|>\n\n<|body_start_3|>\n k = int(k)\n if k < 0:\n return 0\n cdf = 0\n for x in range(k + 1):\n cdf += self.pmf(x)\n return cdf\n<|end_body_3|>\n", "class_docstring": "Binomial class", "class_name": "Binomial", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Binomial:\n \"\"\"Binomial class\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Class constructor\"\"\"\n <|body_0|>\n\n def factorial(n):\n \"\"\"Calculates the factorial of a given number\"\"\"\n <|body_1|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number of “successes”\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the CDF for a given number of “successes”\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n mean = sum(data) / len(data)\n variance = 0\n for x in data:\n variance += (x - mean) ** 2\n variance = variance / len(data)\n p = 1 - variance / mean\n self.n = int(round(mean / p))\n self.p = float(mean / self.n)\n else:\n if n <= 0:\n raise ValueError('n must be a positive value')\n self.n = int(n)\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.p = float(p)\n<|end_body_0|>\n\n<|body_start_1|>\n factorial_num = 1\n for m in range(1, n + 1):\n factorial_num *= m\n return factorial_num\n<|end_body_1|>\n\n<|body_start_2|>\n k = int(k)\n if k < 0 or k > self.n:\n return 0\n n_factorial = self.factorial(self.n)\n k_factorial = self.factorial(k)\n nk_factorial = self.factorial(self.n - k)\n binomial_coefficient = n_factorial / (nk_factorial * k_factorial)\n q = 1 - self.p\n return binomial_coefficient * (self.p ** k * q ** (self.n - k))\n<|end_body_2|>\n\n<|body_start_3|>\n k = int(k)\n if k < 0:\n return 0\n cdf = 0\n for x in range(k + 1):\n cdf += self.pmf(x)\n return cdf\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000149", "length_bytes": 2028, "license_type": "no_license", "methods": [{"docstring": "Class constructor", "name": "__init__", "signature": "def __init__(self, data=None, n=1, p=0.5)"}, {"docstring": "Calculates the factorial of a given number", "name": "factorial", "signature": "def factorial(n)"}, {"docstring": "Calculates the value of the PMF for a given number of “successes”", "name": "pmf", "signature": "def pmf(self, k)"}, {"docstring": "Calculates the value of the CDF for a given number of “successes”", "name": "cdf", "signature": "def cdf(self, k)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_021079", "prompt": "Implement the Python class `Binomial` described below.\n\nClass description:\nBinomial class\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, n=1, p=0.5): Class constructor\n- def factorial(n): Calculates the factorial of a given number\n- def pmf(self, k): Calculates the value of the PMF for a given number of “successes”\n- def cdf(self, k): Calculates the value of the CDF for a given number of “successes”", "prompted_full_text": "Implement the Python class `Binomial` described below.\n\nClass description:\nBinomial class\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, n=1, p=0.5): Class constructor\n- def factorial(n): Calculates the factorial of a given number\n- def pmf(self, k): Calculates the value of the PMF for a given number of “successes”\n- def cdf(self, k): Calculates the value of the CDF for a given number of “successes”\n\n<|skeleton|>\nclass Binomial:\n \"\"\"Binomial class\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Class constructor\"\"\"\n <|body_0|>\n\n def factorial(n):\n \"\"\"Calculates the factorial of a given number\"\"\"\n <|body_1|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number of “successes”\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the CDF for a given number of “successes”\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n mean = sum(data) / len(data)\n variance = 0\n for x in data:\n variance += (x - mean) ** 2\n variance = variance / len(data)\n p = 1 - variance / mean\n self.n = int(round(mean / p))\n self.p = float(mean / self.n)\n else:\n if n <= 0:\n raise ValueError('n must be a positive value')\n self.n = int(n)\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.p = float(p)\n<|end_body_0|>\n\n<|body_start_1|>\n factorial_num = 1\n for m in range(1, n + 1):\n factorial_num *= m\n return factorial_num\n<|end_body_1|>\n\n<|body_start_2|>\n k = int(k)\n if k < 0 or k > self.n:\n return 0\n n_factorial = self.factorial(self.n)\n k_factorial = self.factorial(k)\n nk_factorial = self.factorial(self.n - k)\n binomial_coefficient = n_factorial / (nk_factorial * k_factorial)\n q = 1 - self.p\n return binomial_coefficient * (self.p ** k * q ** (self.n - k))\n<|end_body_2|>\n\n<|body_start_3|>\n k = int(k)\n if k < 0:\n return 0\n cdf = 0\n for x in range(k + 1):\n cdf += self.pmf(x)\n return cdf\n<|end_body_3|>\n", "revision_id": "23162e01761cfa56158a1ebc88ac7709ff1c2af2", "skeleton": "<|skeleton|>\nclass Binomial:\n \"\"\"Binomial class\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Class constructor\"\"\"\n <|body_0|>\n\n def factorial(n):\n \"\"\"Calculates the factorial of a given number\"\"\"\n <|body_1|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number of “successes”\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the CDF for a given number of “successes”\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Binomial:\n \"\"\"Binomial class\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Class constructor\"\"\"\n if data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n mean = sum(data) / len(data)\n variance = 0\n for x in data:\n variance += (x - mean) ** 2\n variance = variance / len(data)\n p = 1 - variance / mean\n self.n = int(round(mean / p))\n self.p = float(mean / self.n)\n else:\n if n <= 0:\n raise ValueError('n must be a positive value')\n self.n = int(n)\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.p = float(p)\n\n def factorial(n):\n \"\"\"Calculates the factorial of a given number\"\"\"\n factorial_num = 1\n for m in range(1, n + 1):\n factorial_num *= m\n return factorial_num\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number of “successes”\"\"\"\n k = int(k)\n if k < 0 or k > self.n:\n return 0\n n_factorial = self.factorial(self.n)\n k_factorial = self.factorial(k)\n nk_factorial = self.factorial(self.n - k)\n binomial_coefficient = n_factorial / (nk_factorial * k_factorial)\n q = 1 - self.p\n return binomial_coefficient * (self.p ** k * q ** (self.n - k))\n\n def cdf(self, k):\n \"\"\"Calculates the value of the CDF for a given number of “successes”\"\"\"\n k = int(k)\n if k < 0:\n return 0\n cdf = 0\n for x in range(k + 1):\n cdf += self.pmf(x)\n return cdf\n", "source": "the_stack_v2_python_sparse", "source_path": "math/0x03-probability/binomial.py", "source_repo": "emmanavarro/holbertonschool-machine_learning", "split": "val", "star_events_count": 0}
{"blob_id": "81f097286be9eea88f5aea7c7086230b683ff299", "bodies": ["res_lst = []\nfor x in range(1 << len(nums)):\n subset = []\n pow_2 = 1\n for i in range(len(nums)):\n if x & pow_2 > 0:\n subset.append(nums[i])\n pow_2 <<= 1\n res_lst.append(subset)\nreturn res_lst", "res_lst = []\n\ndef dfs(index, subset):\n if index >= len(nums):\n res_lst.append(subset)\n return\n dfs(index + 1, subset)\n dfs(index + 1, subset + [nums[index]])\ndfs(0, [])\nreturn res_lst"], "bodies_text": "<|body_start_0|>\n res_lst = []\n for x in range(1 << len(nums)):\n subset = []\n pow_2 = 1\n for i in range(len(nums)):\n if x & pow_2 > 0:\n subset.append(nums[i])\n pow_2 <<= 1\n res_lst.append(subset)\n return res_lst\n<|end_body_0|>\n\n<|body_start_1|>\n res_lst = []\n\n def dfs(index, subset):\n if index >= len(nums):\n res_lst.append(subset)\n return\n dfs(index + 1, subset)\n dfs(index + 1, subset + [nums[index]])\n dfs(0, [])\n return res_lst\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def subsets(self, nums):\n \"\"\"Use binary to implement. :type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def subsets(self, nums):\n \"\"\"DFS :type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res_lst = []\n for x in range(1 << len(nums)):\n subset = []\n pow_2 = 1\n for i in range(len(nums)):\n if x & pow_2 > 0:\n subset.append(nums[i])\n pow_2 <<= 1\n res_lst.append(subset)\n return res_lst\n<|end_body_0|>\n\n<|body_start_1|>\n res_lst = []\n\n def dfs(index, subset):\n if index >= len(nums):\n res_lst.append(subset)\n return\n dfs(index + 1, subset)\n dfs(index + 1, subset + [nums[index]])\n dfs(0, [])\n return res_lst\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000150", "length_bytes": 1012, "license_type": "no_license", "methods": [{"docstring": "Use binary to implement. :type nums: List[int] :rtype: List[List[int]]", "name": "subsets", "signature": "def subsets(self, nums)"}, {"docstring": "DFS :type nums: List[int] :rtype: List[List[int]]", "name": "subsets", "signature": "def subsets(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013710", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def subsets(self, nums): Use binary to implement. :type nums: List[int] :rtype: List[List[int]]\n- def subsets(self, nums): DFS :type nums: List[int] :rtype: List[List[int]]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def subsets(self, nums): Use binary to implement. :type nums: List[int] :rtype: List[List[int]]\n- def subsets(self, nums): DFS :type nums: List[int] :rtype: List[List[int]]\n\n<|skeleton|>\nclass Solution:\n\n def subsets(self, nums):\n \"\"\"Use binary to implement. :type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def subsets(self, nums):\n \"\"\"DFS :type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res_lst = []\n for x in range(1 << len(nums)):\n subset = []\n pow_2 = 1\n for i in range(len(nums)):\n if x & pow_2 > 0:\n subset.append(nums[i])\n pow_2 <<= 1\n res_lst.append(subset)\n return res_lst\n<|end_body_0|>\n\n<|body_start_1|>\n res_lst = []\n\n def dfs(index, subset):\n if index >= len(nums):\n res_lst.append(subset)\n return\n dfs(index + 1, subset)\n dfs(index + 1, subset + [nums[index]])\n dfs(0, [])\n return res_lst\n<|end_body_1|>\n", "revision_id": "052bd7915257679877dbe55b60ed1abb7528eaa2", "skeleton": "<|skeleton|>\nclass Solution:\n\n def subsets(self, nums):\n \"\"\"Use binary to implement. :type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def subsets(self, nums):\n \"\"\"DFS :type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def subsets(self, nums):\n \"\"\"Use binary to implement. :type nums: List[int] :rtype: List[List[int]]\"\"\"\n res_lst = []\n for x in range(1 << len(nums)):\n subset = []\n pow_2 = 1\n for i in range(len(nums)):\n if x & pow_2 > 0:\n subset.append(nums[i])\n pow_2 <<= 1\n res_lst.append(subset)\n return res_lst\n\n def subsets(self, nums):\n \"\"\"DFS :type nums: List[int] :rtype: List[List[int]]\"\"\"\n res_lst = []\n\n def dfs(index, subset):\n if index >= len(nums):\n res_lst.append(subset)\n return\n dfs(index + 1, subset)\n dfs(index + 1, subset + [nums[index]])\n dfs(0, [])\n return res_lst\n", "source": "the_stack_v2_python_sparse", "source_path": "python_solution/BitManipulation/78_Subsets.py", "source_repo": "Dimen61/leetcode", "split": "val", "star_events_count": 4}
{"blob_id": "2472e5588098cf0681b6fd472b0a34a6055abd65", "bodies": ["super(PullBaseImagePlugin, self).__init__(tasker, workflow)\nself.parent_registry = parent_registry\nself.parent_registry_insecure = parent_registry_insecure", "base_image = self.workflow.builder.base_image\nself.log.info(\"pulling base image '%s' from registry '%s'\", base_image, self.parent_registry)\nbase_image_with_registry = base_image.copy()\nif self.parent_registry:\n if base_image.registry and base_image.registry != self.parent_registry:\n self.log.error(\"registry in dockerfile doesn't match provided source registry, dockerfile = '%s', provided = '%s'\", base_image.registry, self.parent_registry)\n raise RuntimeError(\"Registry specified in dockerfile doesn't match provided one. Dockerfile: '%s', Provided: '%s'\" % (base_image.registry, self.parent_registry))\n base_image_with_registry.registry = self.parent_registry\npulled_base = self.tasker.pull_image(base_image_with_registry, insecure=self.parent_registry_insecure)\nself.workflow.pulled_base_images.add(pulled_base)\nif not base_image.registry:\n response = self.tasker.tag_image(base_image_with_registry, base_image, force=True)\n self.workflow.pulled_base_images.add(response)\n pulled_base = response\nself.log.debug(\"image '%s' is available\", pulled_base)"], "bodies_text": "<|body_start_0|>\n super(PullBaseImagePlugin, self).__init__(tasker, workflow)\n self.parent_registry = parent_registry\n self.parent_registry_insecure = parent_registry_insecure\n<|end_body_0|>\n\n<|body_start_1|>\n base_image = self.workflow.builder.base_image\n self.log.info(\"pulling base image '%s' from registry '%s'\", base_image, self.parent_registry)\n base_image_with_registry = base_image.copy()\n if self.parent_registry:\n if base_image.registry and base_image.registry != self.parent_registry:\n self.log.error(\"registry in dockerfile doesn't match provided source registry, dockerfile = '%s', provided = '%s'\", base_image.registry, self.parent_registry)\n raise RuntimeError(\"Registry specified in dockerfile doesn't match provided one. Dockerfile: '%s', Provided: '%s'\" % (base_image.registry, self.parent_registry))\n base_image_with_registry.registry = self.parent_registry\n pulled_base = self.tasker.pull_image(base_image_with_registry, insecure=self.parent_registry_insecure)\n self.workflow.pulled_base_images.add(pulled_base)\n if not base_image.registry:\n response = self.tasker.tag_image(base_image_with_registry, base_image, force=True)\n self.workflow.pulled_base_images.add(response)\n pulled_base = response\n self.log.debug(\"image '%s' is available\", pulled_base)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "PullBaseImagePlugin", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PullBaseImagePlugin:\n\n def __init__(self, tasker, workflow, parent_registry=None, parent_registry_insecure=False):\n \"\"\"constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param parent_registry: registry to enforce pulling from :param parent_registry_insecure: allow connecting to the registry over plain http\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"pull base image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PullBaseImagePlugin, self).__init__(tasker, workflow)\n self.parent_registry = parent_registry\n self.parent_registry_insecure = parent_registry_insecure\n<|end_body_0|>\n\n<|body_start_1|>\n base_image = self.workflow.builder.base_image\n self.log.info(\"pulling base image '%s' from registry '%s'\", base_image, self.parent_registry)\n base_image_with_registry = base_image.copy()\n if self.parent_registry:\n if base_image.registry and base_image.registry != self.parent_registry:\n self.log.error(\"registry in dockerfile doesn't match provided source registry, dockerfile = '%s', provided = '%s'\", base_image.registry, self.parent_registry)\n raise RuntimeError(\"Registry specified in dockerfile doesn't match provided one. Dockerfile: '%s', Provided: '%s'\" % (base_image.registry, self.parent_registry))\n base_image_with_registry.registry = self.parent_registry\n pulled_base = self.tasker.pull_image(base_image_with_registry, insecure=self.parent_registry_insecure)\n self.workflow.pulled_base_images.add(pulled_base)\n if not base_image.registry:\n response = self.tasker.tag_image(base_image_with_registry, base_image, force=True)\n self.workflow.pulled_base_images.add(response)\n pulled_base = response\n self.log.debug(\"image '%s' is available\", pulled_base)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000151", "length_bytes": 2565, "license_type": "permissive", "methods": [{"docstring": "constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param parent_registry: registry to enforce pulling from :param parent_registry_insecure: allow connecting to the registry over plain http", "name": "__init__", "signature": "def __init__(self, tasker, workflow, parent_registry=None, parent_registry_insecure=False)"}, {"docstring": "pull base image", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_042081", "prompt": "Implement the Python class `PullBaseImagePlugin` described below.\n\nClass description:\nImplement the PullBaseImagePlugin class.\n\nMethod signatures and docstrings:\n- def __init__(self, tasker, workflow, parent_registry=None, parent_registry_insecure=False): constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param parent_registry: registry to enforce pulling from :param parent_registry_insecure: allow connecting to the registry over plain http\n- def run(self): pull base image", "prompted_full_text": "Implement the Python class `PullBaseImagePlugin` described below.\n\nClass description:\nImplement the PullBaseImagePlugin class.\n\nMethod signatures and docstrings:\n- def __init__(self, tasker, workflow, parent_registry=None, parent_registry_insecure=False): constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param parent_registry: registry to enforce pulling from :param parent_registry_insecure: allow connecting to the registry over plain http\n- def run(self): pull base image\n\n<|skeleton|>\nclass PullBaseImagePlugin:\n\n def __init__(self, tasker, workflow, parent_registry=None, parent_registry_insecure=False):\n \"\"\"constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param parent_registry: registry to enforce pulling from :param parent_registry_insecure: allow connecting to the registry over plain http\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"pull base image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PullBaseImagePlugin, self).__init__(tasker, workflow)\n self.parent_registry = parent_registry\n self.parent_registry_insecure = parent_registry_insecure\n<|end_body_0|>\n\n<|body_start_1|>\n base_image = self.workflow.builder.base_image\n self.log.info(\"pulling base image '%s' from registry '%s'\", base_image, self.parent_registry)\n base_image_with_registry = base_image.copy()\n if self.parent_registry:\n if base_image.registry and base_image.registry != self.parent_registry:\n self.log.error(\"registry in dockerfile doesn't match provided source registry, dockerfile = '%s', provided = '%s'\", base_image.registry, self.parent_registry)\n raise RuntimeError(\"Registry specified in dockerfile doesn't match provided one. Dockerfile: '%s', Provided: '%s'\" % (base_image.registry, self.parent_registry))\n base_image_with_registry.registry = self.parent_registry\n pulled_base = self.tasker.pull_image(base_image_with_registry, insecure=self.parent_registry_insecure)\n self.workflow.pulled_base_images.add(pulled_base)\n if not base_image.registry:\n response = self.tasker.tag_image(base_image_with_registry, base_image, force=True)\n self.workflow.pulled_base_images.add(response)\n pulled_base = response\n self.log.debug(\"image '%s' is available\", pulled_base)\n<|end_body_1|>\n", "revision_id": "85cd4f161e7b020575b0e7b20d5867d2b16286bc", "skeleton": "<|skeleton|>\nclass PullBaseImagePlugin:\n\n def __init__(self, tasker, workflow, parent_registry=None, parent_registry_insecure=False):\n \"\"\"constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param parent_registry: registry to enforce pulling from :param parent_registry_insecure: allow connecting to the registry over plain http\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"pull base image\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PullBaseImagePlugin:\n def __init__(self, tasker, workflow, parent_registry=None, parent_registry_insecure=False):\n \"\"\"constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param parent_registry: registry to enforce pulling from :param parent_registry_insecure: allow connecting to the registry over plain http\"\"\"\n super(PullBaseImagePlugin, self).__init__(tasker, workflow)\n self.parent_registry = parent_registry\n self.parent_registry_insecure = parent_registry_insecure\n\n def run(self):\n \"\"\"pull base image\"\"\"\n base_image = self.workflow.builder.base_image\n self.log.info(\"pulling base image '%s' from registry '%s'\", base_image, self.parent_registry)\n base_image_with_registry = base_image.copy()\n if self.parent_registry:\n if base_image.registry and base_image.registry != self.parent_registry:\n self.log.error(\"registry in dockerfile doesn't match provided source registry, dockerfile = '%s', provided = '%s'\", base_image.registry, self.parent_registry)\n raise RuntimeError(\"Registry specified in dockerfile doesn't match provided one. Dockerfile: '%s', Provided: '%s'\" % (base_image.registry, self.parent_registry))\n base_image_with_registry.registry = self.parent_registry\n pulled_base = self.tasker.pull_image(base_image_with_registry, insecure=self.parent_registry_insecure)\n self.workflow.pulled_base_images.add(pulled_base)\n if not base_image.registry:\n response = self.tasker.tag_image(base_image_with_registry, base_image, force=True)\n self.workflow.pulled_base_images.add(response)\n pulled_base = response\n self.log.debug(\"image '%s' is available\", pulled_base)\n", "source": "the_stack_v2_python_sparse", "source_path": "atomic_reactor/plugins/pre_pull_base_image.py", "source_repo": "fatherlinux/atomic-reactor", "split": "val", "star_events_count": 1}
{"blob_id": "0bda9f6572a872c51895eb44ebea3c386176da9a", "bodies": ["self._pt_1 = pt_3d_1\nself._pt_2 = pt_3d_2\nself._pt_3 = pt_3d_3", "def versor3d(pt_1, pt_2):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :return:\n \"\"\"\n return Segment(pt_1, pt_2).vector().versor_full()\n\ndef is_pt_in_fascio(pt_1, pt_2, pt_3):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :param pt_3:\n :return:\n \"\"\"\n apex = pt_1\n versor_1 = versor3d(pt_1, pt_2)\n versor_2 = versor3d(pt_1, pt_3)\n fascio = TriangBeam(apex, versor_1, versor_2)\n if not fascio.is_within_fascio(pt_3d):\n return False\n else:\n return True\nif not (is_pt_in_fascio(self._pt_1, self._pt_2, self._pt_3) and is_pt_in_fascio(self._pt_2, self._pt_1, self._pt_3)):\n return False\nelse:\n return True"], "bodies_text": "<|body_start_0|>\n self._pt_1 = pt_3d_1\n self._pt_2 = pt_3d_2\n self._pt_3 = pt_3d_3\n<|end_body_0|>\n\n<|body_start_1|>\n def versor3d(pt_1, pt_2):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :return:\n \"\"\"\n return Segment(pt_1, pt_2).vector().versor_full()\n\n def is_pt_in_fascio(pt_1, pt_2, pt_3):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :param pt_3:\n :return:\n \"\"\"\n apex = pt_1\n versor_1 = versor3d(pt_1, pt_2)\n versor_2 = versor3d(pt_1, pt_3)\n fascio = TriangBeam(apex, versor_1, versor_2)\n if not fascio.is_within_fascio(pt_3d):\n return False\n else:\n return True\n if not (is_pt_in_fascio(self._pt_1, self._pt_2, self._pt_3) and is_pt_in_fascio(self._pt_2, self._pt_1, self._pt_3)):\n return False\n else:\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "CartesianTriangle", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CartesianTriangle:\n\n def __init__(self, pt_3d_1, pt_3d_2, pt_3d_3):\n \"\"\":param pt_3d_1: :param pt_3d_2: :param pt_3d_3:\"\"\"\n <|body_0|>\n\n def is_pt_within(self, pt_3d):\n \"\"\":param pt_3d: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._pt_1 = pt_3d_1\n self._pt_2 = pt_3d_2\n self._pt_3 = pt_3d_3\n<|end_body_0|>\n\n<|body_start_1|>\n def versor3d(pt_1, pt_2):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :return:\n \"\"\"\n return Segment(pt_1, pt_2).vector().versor_full()\n\n def is_pt_in_fascio(pt_1, pt_2, pt_3):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :param pt_3:\n :return:\n \"\"\"\n apex = pt_1\n versor_1 = versor3d(pt_1, pt_2)\n versor_2 = versor3d(pt_1, pt_3)\n fascio = TriangBeam(apex, versor_1, versor_2)\n if not fascio.is_within_fascio(pt_3d):\n return False\n else:\n return True\n if not (is_pt_in_fascio(self._pt_1, self._pt_2, self._pt_3) and is_pt_in_fascio(self._pt_2, self._pt_1, self._pt_3)):\n return False\n else:\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000152", "length_bytes": 18232, "license_type": "no_license", "methods": [{"docstring": ":param pt_3d_1: :param pt_3d_2: :param pt_3d_3:", "name": "__init__", "signature": "def __init__(self, pt_3d_1, pt_3d_2, pt_3d_3)"}, {"docstring": ":param pt_3d: :return:", "name": "is_pt_within", "signature": "def is_pt_within(self, pt_3d)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043687", "prompt": "Implement the Python class `CartesianTriangle` described below.\n\nClass description:\nImplement the CartesianTriangle class.\n\nMethod signatures and docstrings:\n- def __init__(self, pt_3d_1, pt_3d_2, pt_3d_3): :param pt_3d_1: :param pt_3d_2: :param pt_3d_3:\n- def is_pt_within(self, pt_3d): :param pt_3d: :return:", "prompted_full_text": "Implement the Python class `CartesianTriangle` described below.\n\nClass description:\nImplement the CartesianTriangle class.\n\nMethod signatures and docstrings:\n- def __init__(self, pt_3d_1, pt_3d_2, pt_3d_3): :param pt_3d_1: :param pt_3d_2: :param pt_3d_3:\n- def is_pt_within(self, pt_3d): :param pt_3d: :return:\n\n<|skeleton|>\nclass CartesianTriangle:\n\n def __init__(self, pt_3d_1, pt_3d_2, pt_3d_3):\n \"\"\":param pt_3d_1: :param pt_3d_2: :param pt_3d_3:\"\"\"\n <|body_0|>\n\n def is_pt_within(self, pt_3d):\n \"\"\":param pt_3d: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._pt_1 = pt_3d_1\n self._pt_2 = pt_3d_2\n self._pt_3 = pt_3d_3\n<|end_body_0|>\n\n<|body_start_1|>\n def versor3d(pt_1, pt_2):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :return:\n \"\"\"\n return Segment(pt_1, pt_2).vector().versor_full()\n\n def is_pt_in_fascio(pt_1, pt_2, pt_3):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :param pt_3:\n :return:\n \"\"\"\n apex = pt_1\n versor_1 = versor3d(pt_1, pt_2)\n versor_2 = versor3d(pt_1, pt_3)\n fascio = TriangBeam(apex, versor_1, versor_2)\n if not fascio.is_within_fascio(pt_3d):\n return False\n else:\n return True\n if not (is_pt_in_fascio(self._pt_1, self._pt_2, self._pt_3) and is_pt_in_fascio(self._pt_2, self._pt_1, self._pt_3)):\n return False\n else:\n return True\n<|end_body_1|>\n", "revision_id": "b07ab23400b4ff4151555c2e81392a7adf99fc33", "skeleton": "<|skeleton|>\nclass CartesianTriangle:\n\n def __init__(self, pt_3d_1, pt_3d_2, pt_3d_3):\n \"\"\":param pt_3d_1: :param pt_3d_2: :param pt_3d_3:\"\"\"\n <|body_0|>\n\n def is_pt_within(self, pt_3d):\n \"\"\":param pt_3d: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CartesianTriangle:\n def __init__(self, pt_3d_1, pt_3d_2, pt_3d_3):\n \"\"\":param pt_3d_1: :param pt_3d_2: :param pt_3d_3:\"\"\"\n self._pt_1 = pt_3d_1\n self._pt_2 = pt_3d_2\n self._pt_3 = pt_3d_3\n\n def is_pt_within(self, pt_3d):\n \"\"\":param pt_3d: :return:\"\"\"\n def versor3d(pt_1, pt_2):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :return:\n \"\"\"\n return Segment(pt_1, pt_2).vector().versor_full()\n\n def is_pt_in_fascio(pt_1, pt_2, pt_3):\n \"\"\"\n\n :param pt_1:\n :param pt_2:\n :param pt_3:\n :return:\n \"\"\"\n apex = pt_1\n versor_1 = versor3d(pt_1, pt_2)\n versor_2 = versor3d(pt_1, pt_3)\n fascio = TriangBeam(apex, versor_1, versor_2)\n if not fascio.is_within_fascio(pt_3d):\n return False\n else:\n return True\n if not (is_pt_in_fascio(self._pt_1, self._pt_2, self._pt_3) and is_pt_in_fascio(self._pt_2, self._pt_1, self._pt_3)):\n return False\n else:\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "pygsf/spatial/vectorial/meshes.py", "source_repo": "mauroalberti/qgSurf", "split": "val", "star_events_count": 5}
{"blob_id": "02774c1261595edc12f3a21ea68dcdbe2bf3c05f", "bodies": ["identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\ntimestamp = datetime.datetime(2020, 1, 1)\nprocess_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\nself.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'SMS'}})\nprocess_subscription(identities, 'identity-uuid', 'whatsapp_momconnect', timestamp)\nself.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\nprocess_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\nself.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})", "identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\ntimestamp = datetime.datetime(2020, 1, 1)\nprocess_subscription(identities, 'identity-uuid', 'pmtct_prebirth.hw_full.1', timestamp)\nself.assertEqual(identities['identity-uuid']['pmtct_messaging'], 'TRUE')\nprocess_subscription(identities, 'identity-uuid', 'loss_miscarriage.patient.1', timestamp)\nself.assertEqual(identities['identity-uuid']['loss_messaging'], 'TRUE')\nself.assertEqual(identities['identity-uuid']['optout_reason'], 'miscarriage')\nself.assertEqual(identities['identity-uuid']['optout_timestamp'], '2020-01-01T00:00:00')\nprocess_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_partial.1', timestamp)\nself.assertEqual(identities['identity-uuid']['public_messaging'], 'TRUE')\nself.assertEqual(identities['identity-uuid']['public_registration_date'], '2020-01-01T00:00:00')\nprocess_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_full.3', timestamp)\nself.assertEqual(identities['identity-uuid']['prebirth_messaging'], '3')\nprocess_subscription(identities, 'identity-uuid', 'momconnect_postbirth.hw_full.2', timestamp)\nself.assertEqual(identities['identity-uuid']['postbirth_messaging'], 'TRUE')"], "bodies_text": "<|body_start_0|>\n identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\n timestamp = datetime.datetime(2020, 1, 1)\n process_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'SMS'}})\n process_subscription(identities, 'identity-uuid', 'whatsapp_momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\n process_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\n<|end_body_0|>\n\n<|body_start_1|>\n identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\n timestamp = datetime.datetime(2020, 1, 1)\n process_subscription(identities, 'identity-uuid', 'pmtct_prebirth.hw_full.1', timestamp)\n self.assertEqual(identities['identity-uuid']['pmtct_messaging'], 'TRUE')\n process_subscription(identities, 'identity-uuid', 'loss_miscarriage.patient.1', timestamp)\n self.assertEqual(identities['identity-uuid']['loss_messaging'], 'TRUE')\n self.assertEqual(identities['identity-uuid']['optout_reason'], 'miscarriage')\n self.assertEqual(identities['identity-uuid']['optout_timestamp'], '2020-01-01T00:00:00')\n process_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_partial.1', timestamp)\n self.assertEqual(identities['identity-uuid']['public_messaging'], 'TRUE')\n self.assertEqual(identities['identity-uuid']['public_registration_date'], '2020-01-01T00:00:00')\n process_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_full.3', timestamp)\n self.assertEqual(identities['identity-uuid']['prebirth_messaging'], '3')\n process_subscription(identities, 'identity-uuid', 'momconnect_postbirth.hw_full.2', timestamp)\n self.assertEqual(identities['identity-uuid']['postbirth_messaging'], 'TRUE')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ProcessSubscriptionTests", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProcessSubscriptionTests:\n\n def test_channel_prefer_whatsapp(self):\n \"\"\"Should set the channel, but never overwrite WhatsApp with SMS\"\"\"\n <|body_0|>\n\n def test_subscription_types(self):\n \"\"\"Should add to the subscription list depending on the name\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\n timestamp = datetime.datetime(2020, 1, 1)\n process_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'SMS'}})\n process_subscription(identities, 'identity-uuid', 'whatsapp_momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\n process_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\n<|end_body_0|>\n\n<|body_start_1|>\n identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\n timestamp = datetime.datetime(2020, 1, 1)\n process_subscription(identities, 'identity-uuid', 'pmtct_prebirth.hw_full.1', timestamp)\n self.assertEqual(identities['identity-uuid']['pmtct_messaging'], 'TRUE')\n process_subscription(identities, 'identity-uuid', 'loss_miscarriage.patient.1', timestamp)\n self.assertEqual(identities['identity-uuid']['loss_messaging'], 'TRUE')\n self.assertEqual(identities['identity-uuid']['optout_reason'], 'miscarriage')\n self.assertEqual(identities['identity-uuid']['optout_timestamp'], '2020-01-01T00:00:00')\n process_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_partial.1', timestamp)\n self.assertEqual(identities['identity-uuid']['public_messaging'], 'TRUE')\n self.assertEqual(identities['identity-uuid']['public_registration_date'], '2020-01-01T00:00:00')\n process_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_full.3', timestamp)\n self.assertEqual(identities['identity-uuid']['prebirth_messaging'], '3')\n process_subscription(identities, 'identity-uuid', 'momconnect_postbirth.hw_full.2', timestamp)\n self.assertEqual(identities['identity-uuid']['postbirth_messaging'], 'TRUE')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000153", "length_bytes": 17808, "license_type": "permissive", "methods": [{"docstring": "Should set the channel, but never overwrite WhatsApp with SMS", "name": "test_channel_prefer_whatsapp", "signature": "def test_channel_prefer_whatsapp(self)"}, {"docstring": "Should add to the subscription list depending on the name", "name": "test_subscription_types", "signature": "def test_subscription_types(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_053523", "prompt": "Implement the Python class `ProcessSubscriptionTests` described below.\n\nClass description:\nImplement the ProcessSubscriptionTests class.\n\nMethod signatures and docstrings:\n- def test_channel_prefer_whatsapp(self): Should set the channel, but never overwrite WhatsApp with SMS\n- def test_subscription_types(self): Should add to the subscription list depending on the name", "prompted_full_text": "Implement the Python class `ProcessSubscriptionTests` described below.\n\nClass description:\nImplement the ProcessSubscriptionTests class.\n\nMethod signatures and docstrings:\n- def test_channel_prefer_whatsapp(self): Should set the channel, but never overwrite WhatsApp with SMS\n- def test_subscription_types(self): Should add to the subscription list depending on the name\n\n<|skeleton|>\nclass ProcessSubscriptionTests:\n\n def test_channel_prefer_whatsapp(self):\n \"\"\"Should set the channel, but never overwrite WhatsApp with SMS\"\"\"\n <|body_0|>\n\n def test_subscription_types(self):\n \"\"\"Should add to the subscription list depending on the name\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\n timestamp = datetime.datetime(2020, 1, 1)\n process_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'SMS'}})\n process_subscription(identities, 'identity-uuid', 'whatsapp_momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\n process_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\n<|end_body_0|>\n\n<|body_start_1|>\n identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\n timestamp = datetime.datetime(2020, 1, 1)\n process_subscription(identities, 'identity-uuid', 'pmtct_prebirth.hw_full.1', timestamp)\n self.assertEqual(identities['identity-uuid']['pmtct_messaging'], 'TRUE')\n process_subscription(identities, 'identity-uuid', 'loss_miscarriage.patient.1', timestamp)\n self.assertEqual(identities['identity-uuid']['loss_messaging'], 'TRUE')\n self.assertEqual(identities['identity-uuid']['optout_reason'], 'miscarriage')\n self.assertEqual(identities['identity-uuid']['optout_timestamp'], '2020-01-01T00:00:00')\n process_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_partial.1', timestamp)\n self.assertEqual(identities['identity-uuid']['public_messaging'], 'TRUE')\n self.assertEqual(identities['identity-uuid']['public_registration_date'], '2020-01-01T00:00:00')\n process_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_full.3', timestamp)\n self.assertEqual(identities['identity-uuid']['prebirth_messaging'], '3')\n process_subscription(identities, 'identity-uuid', 'momconnect_postbirth.hw_full.2', timestamp)\n self.assertEqual(identities['identity-uuid']['postbirth_messaging'], 'TRUE')\n<|end_body_1|>\n", "revision_id": "e1ea0beaf079f4f4d5f9562fb9d9a4f0670f459f", "skeleton": "<|skeleton|>\nclass ProcessSubscriptionTests:\n\n def test_channel_prefer_whatsapp(self):\n \"\"\"Should set the channel, but never overwrite WhatsApp with SMS\"\"\"\n <|body_0|>\n\n def test_subscription_types(self):\n \"\"\"Should add to the subscription list depending on the name\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProcessSubscriptionTests:\n def test_channel_prefer_whatsapp(self):\n \"\"\"Should set the channel, but never overwrite WhatsApp with SMS\"\"\"\n identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\n timestamp = datetime.datetime(2020, 1, 1)\n process_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'SMS'}})\n process_subscription(identities, 'identity-uuid', 'whatsapp_momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\n process_subscription(identities, 'identity-uuid', 'momconnect', timestamp)\n self.assertEqual(identities, {'identity-uuid': {'uuid': 'identity-uuid', 'channel': 'WhatsApp'}})\n\n def test_subscription_types(self):\n \"\"\"Should add to the subscription list depending on the name\"\"\"\n identities = {'identity-uuid': {'uuid': 'identity-uuid'}}\n timestamp = datetime.datetime(2020, 1, 1)\n process_subscription(identities, 'identity-uuid', 'pmtct_prebirth.hw_full.1', timestamp)\n self.assertEqual(identities['identity-uuid']['pmtct_messaging'], 'TRUE')\n process_subscription(identities, 'identity-uuid', 'loss_miscarriage.patient.1', timestamp)\n self.assertEqual(identities['identity-uuid']['loss_messaging'], 'TRUE')\n self.assertEqual(identities['identity-uuid']['optout_reason'], 'miscarriage')\n self.assertEqual(identities['identity-uuid']['optout_timestamp'], '2020-01-01T00:00:00')\n process_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_partial.1', timestamp)\n self.assertEqual(identities['identity-uuid']['public_messaging'], 'TRUE')\n self.assertEqual(identities['identity-uuid']['public_registration_date'], '2020-01-01T00:00:00')\n process_subscription(identities, 'identity-uuid', 'momconnect_prebirth.hw_full.3', timestamp)\n self.assertEqual(identities['identity-uuid']['prebirth_messaging'], '3')\n process_subscription(identities, 'identity-uuid', 'momconnect_postbirth.hw_full.2', timestamp)\n self.assertEqual(identities['identity-uuid']['postbirth_messaging'], 'TRUE')\n", "source": "the_stack_v2_python_sparse", "source_path": "scripts/migrate_to_rapidpro/test_collect_information.py", "source_repo": "praekeltfoundation/ndoh-hub", "split": "val", "star_events_count": 0}
{"blob_id": "bd1732e004337edca9b21cc9aea9d159b65b5a46", "bodies": ["try:\n existing_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile)\n existing_reaction.delete()\nexcept UserReactionOnComment.DoesNotExist:\n pass\ndata = {'comment': comment.id, 'user': profile, 'reaction': reaction}\nserializer = self.serializer_class(data=data)\nserializer.is_valid(raise_exception=True)\nserializer.save()\nresponse = {'message': 'Reaction successfully set.', 'reaction': serializer.data}\nstatus_code = status.HTTP_201_CREATED\nreturn (response, status_code)", "comment_id = request.data.get('comment_id')\ncomment = find_comment_helper(comment_id)\nuser = request.user\nprofile = Profile.objects.get(user=user)\nreaction = request.data.get('reaction')\ntry:\n existing_same_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile, reaction=reaction)\n existing_same_reaction.delete()\n response = {'message': 'You nolonger `{}` this comment'.format('LIKE' if reaction in [1, '1'] else 'DISLIKE')}\n status_code = status.HTTP_200_OK\nexcept UserReactionOnComment.DoesNotExist:\n response, status_code = self.set_reaction(comment, profile, reaction)\nreturn Response(response, status_code)", "reactions = UserReactionOnComment.objects.all()\nlist_reactions = [format_response(reaction) for reaction in reactions]\nresponse = {'reactions': list_reactions}\nreturn Response(response, status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n try:\n existing_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile)\n existing_reaction.delete()\n except UserReactionOnComment.DoesNotExist:\n pass\n data = {'comment': comment.id, 'user': profile, 'reaction': reaction}\n serializer = self.serializer_class(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n response = {'message': 'Reaction successfully set.', 'reaction': serializer.data}\n status_code = status.HTTP_201_CREATED\n return (response, status_code)\n<|end_body_0|>\n\n<|body_start_1|>\n comment_id = request.data.get('comment_id')\n comment = find_comment_helper(comment_id)\n user = request.user\n profile = Profile.objects.get(user=user)\n reaction = request.data.get('reaction')\n try:\n existing_same_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile, reaction=reaction)\n existing_same_reaction.delete()\n response = {'message': 'You nolonger `{}` this comment'.format('LIKE' if reaction in [1, '1'] else 'DISLIKE')}\n status_code = status.HTTP_200_OK\n except UserReactionOnComment.DoesNotExist:\n response, status_code = self.set_reaction(comment, profile, reaction)\n return Response(response, status_code)\n<|end_body_1|>\n\n<|body_start_2|>\n reactions = UserReactionOnComment.objects.all()\n list_reactions = [format_response(reaction) for reaction in reactions]\n response = {'reactions': list_reactions}\n return Response(response, status.HTTP_200_OK)\n<|end_body_2|>\n", "class_docstring": "Define the view for UserReactionOnComment", "class_name": "UserReactionOnCommentView", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserReactionOnCommentView:\n \"\"\"Define the view for UserReactionOnComment\"\"\"\n\n def set_reaction(self, comment, profile, reaction):\n \"\"\"Helper method to set the reaction\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Post a new comment reaction\"\"\"\n <|body_1|>\n\n def get(self, request):\n \"\"\"View the reactions on all comments\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n existing_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile)\n existing_reaction.delete()\n except UserReactionOnComment.DoesNotExist:\n pass\n data = {'comment': comment.id, 'user': profile, 'reaction': reaction}\n serializer = self.serializer_class(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n response = {'message': 'Reaction successfully set.', 'reaction': serializer.data}\n status_code = status.HTTP_201_CREATED\n return (response, status_code)\n<|end_body_0|>\n\n<|body_start_1|>\n comment_id = request.data.get('comment_id')\n comment = find_comment_helper(comment_id)\n user = request.user\n profile = Profile.objects.get(user=user)\n reaction = request.data.get('reaction')\n try:\n existing_same_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile, reaction=reaction)\n existing_same_reaction.delete()\n response = {'message': 'You nolonger `{}` this comment'.format('LIKE' if reaction in [1, '1'] else 'DISLIKE')}\n status_code = status.HTTP_200_OK\n except UserReactionOnComment.DoesNotExist:\n response, status_code = self.set_reaction(comment, profile, reaction)\n return Response(response, status_code)\n<|end_body_1|>\n\n<|body_start_2|>\n reactions = UserReactionOnComment.objects.all()\n list_reactions = [format_response(reaction) for reaction in reactions]\n response = {'reactions': list_reactions}\n return Response(response, status.HTTP_200_OK)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000154", "length_bytes": 6152, "license_type": "permissive", "methods": [{"docstring": "Helper method to set the reaction", "name": "set_reaction", "signature": "def set_reaction(self, comment, profile, reaction)"}, {"docstring": "Post a new comment reaction", "name": "post", "signature": "def post(self, request)"}, {"docstring": "View the reactions on all comments", "name": "get", "signature": "def get(self, request)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_045607", "prompt": "Implement the Python class `UserReactionOnCommentView` described below.\n\nClass description:\nDefine the view for UserReactionOnComment\n\nMethod signatures and docstrings:\n- def set_reaction(self, comment, profile, reaction): Helper method to set the reaction\n- def post(self, request): Post a new comment reaction\n- def get(self, request): View the reactions on all comments", "prompted_full_text": "Implement the Python class `UserReactionOnCommentView` described below.\n\nClass description:\nDefine the view for UserReactionOnComment\n\nMethod signatures and docstrings:\n- def set_reaction(self, comment, profile, reaction): Helper method to set the reaction\n- def post(self, request): Post a new comment reaction\n- def get(self, request): View the reactions on all comments\n\n<|skeleton|>\nclass UserReactionOnCommentView:\n \"\"\"Define the view for UserReactionOnComment\"\"\"\n\n def set_reaction(self, comment, profile, reaction):\n \"\"\"Helper method to set the reaction\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Post a new comment reaction\"\"\"\n <|body_1|>\n\n def get(self, request):\n \"\"\"View the reactions on all comments\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n existing_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile)\n existing_reaction.delete()\n except UserReactionOnComment.DoesNotExist:\n pass\n data = {'comment': comment.id, 'user': profile, 'reaction': reaction}\n serializer = self.serializer_class(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n response = {'message': 'Reaction successfully set.', 'reaction': serializer.data}\n status_code = status.HTTP_201_CREATED\n return (response, status_code)\n<|end_body_0|>\n\n<|body_start_1|>\n comment_id = request.data.get('comment_id')\n comment = find_comment_helper(comment_id)\n user = request.user\n profile = Profile.objects.get(user=user)\n reaction = request.data.get('reaction')\n try:\n existing_same_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile, reaction=reaction)\n existing_same_reaction.delete()\n response = {'message': 'You nolonger `{}` this comment'.format('LIKE' if reaction in [1, '1'] else 'DISLIKE')}\n status_code = status.HTTP_200_OK\n except UserReactionOnComment.DoesNotExist:\n response, status_code = self.set_reaction(comment, profile, reaction)\n return Response(response, status_code)\n<|end_body_1|>\n\n<|body_start_2|>\n reactions = UserReactionOnComment.objects.all()\n list_reactions = [format_response(reaction) for reaction in reactions]\n response = {'reactions': list_reactions}\n return Response(response, status.HTTP_200_OK)\n<|end_body_2|>\n", "revision_id": "a304718929936dd4a759d737fb3570d6cc25fb76", "skeleton": "<|skeleton|>\nclass UserReactionOnCommentView:\n \"\"\"Define the view for UserReactionOnComment\"\"\"\n\n def set_reaction(self, comment, profile, reaction):\n \"\"\"Helper method to set the reaction\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Post a new comment reaction\"\"\"\n <|body_1|>\n\n def get(self, request):\n \"\"\"View the reactions on all comments\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UserReactionOnCommentView:\n \"\"\"Define the view for UserReactionOnComment\"\"\"\n\n def set_reaction(self, comment, profile, reaction):\n \"\"\"Helper method to set the reaction\"\"\"\n try:\n existing_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile)\n existing_reaction.delete()\n except UserReactionOnComment.DoesNotExist:\n pass\n data = {'comment': comment.id, 'user': profile, 'reaction': reaction}\n serializer = self.serializer_class(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n response = {'message': 'Reaction successfully set.', 'reaction': serializer.data}\n status_code = status.HTTP_201_CREATED\n return (response, status_code)\n\n def post(self, request):\n \"\"\"Post a new comment reaction\"\"\"\n comment_id = request.data.get('comment_id')\n comment = find_comment_helper(comment_id)\n user = request.user\n profile = Profile.objects.get(user=user)\n reaction = request.data.get('reaction')\n try:\n existing_same_reaction = UserReactionOnComment.objects.get(comment=comment, user=profile, reaction=reaction)\n existing_same_reaction.delete()\n response = {'message': 'You nolonger `{}` this comment'.format('LIKE' if reaction in [1, '1'] else 'DISLIKE')}\n status_code = status.HTTP_200_OK\n except UserReactionOnComment.DoesNotExist:\n response, status_code = self.set_reaction(comment, profile, reaction)\n return Response(response, status_code)\n\n def get(self, request):\n \"\"\"View the reactions on all comments\"\"\"\n reactions = UserReactionOnComment.objects.all()\n list_reactions = [format_response(reaction) for reaction in reactions]\n response = {'reactions': list_reactions}\n return Response(response, status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "authors/apps/user_comment_reaction/views.py", "source_repo": "andela/ah-jumanji", "split": "val", "star_events_count": 1}
{"blob_id": "67a1e582ee8623f6e7498a73b310bd51c3d5fe1b", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\ntry:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\nexcept AttributeError:\n mapping_value = None\nif mapping_value and mapping_value.casefold() == '#microsoft.graph.security.hostname'.casefold():\n from .hostname import Hostname\n return Hostname()\nif mapping_value and mapping_value.casefold() == '#microsoft.graph.security.ipAddress'.casefold():\n from .ip_address import IpAddress\n return IpAddress()\nreturn Host()", "from .artifact import Artifact\nfrom .host_component import HostComponent\nfrom .host_cookie import HostCookie\nfrom .hostname import Hostname\nfrom .host_reputation import HostReputation\nfrom .host_tracker import HostTracker\nfrom .ip_address import IpAddress\nfrom .passive_dns_record import PassiveDnsRecord\nfrom .artifact import Artifact\nfrom .host_component import HostComponent\nfrom .host_cookie import HostCookie\nfrom .hostname import Hostname\nfrom .host_reputation import HostReputation\nfrom .host_tracker import HostTracker\nfrom .ip_address import IpAddress\nfrom .passive_dns_record import PassiveDnsRecord\nfields: Dict[str, Callable[[Any], None]] = {'components': lambda n: setattr(self, 'components', n.get_collection_of_object_values(HostComponent)), 'cookies': lambda n: setattr(self, 'cookies', n.get_collection_of_object_values(HostCookie)), 'firstSeenDateTime': lambda n: setattr(self, 'first_seen_date_time', n.get_datetime_value()), 'lastSeenDateTime': lambda n: setattr(self, 'last_seen_date_time', n.get_datetime_value()), 'passiveDns': lambda n: setattr(self, 'passive_dns', n.get_collection_of_object_values(PassiveDnsRecord)), 'passiveDnsReverse': lambda n: setattr(self, 'passive_dns_reverse', n.get_collection_of_object_values(PassiveDnsRecord)), 'reputation': lambda n: setattr(self, 'reputation', n.get_object_value(HostReputation)), 'trackers': lambda n: setattr(self, 'trackers', n.get_collection_of_object_values(HostTracker))}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_collection_of_object_values('components', self.components)\nwriter.write_collection_of_object_values('cookies', self.cookies)\nwriter.write_datetime_value('firstSeenDateTime', self.first_seen_date_time)\nwriter.write_datetime_value('lastSeenDateTime', self.last_seen_date_time)\nwriter.write_collection_of_object_values('passiveDns', self.passive_dns)\nwriter.write_collection_of_object_values('passiveDnsReverse', self.passive_dns_reverse)\nwriter.write_object_value('reputation', self.reputation)\nwriter.write_collection_of_object_values('trackers', self.trackers)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.security.hostname'.casefold():\n from .hostname import Hostname\n return Hostname()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.security.ipAddress'.casefold():\n from .ip_address import IpAddress\n return IpAddress()\n return Host()\n<|end_body_0|>\n\n<|body_start_1|>\n from .artifact import Artifact\n from .host_component import HostComponent\n from .host_cookie import HostCookie\n from .hostname import Hostname\n from .host_reputation import HostReputation\n from .host_tracker import HostTracker\n from .ip_address import IpAddress\n from .passive_dns_record import PassiveDnsRecord\n from .artifact import Artifact\n from .host_component import HostComponent\n from .host_cookie import HostCookie\n from .hostname import Hostname\n from .host_reputation import HostReputation\n from .host_tracker import HostTracker\n from .ip_address import IpAddress\n from .passive_dns_record import PassiveDnsRecord\n fields: Dict[str, Callable[[Any], None]] = {'components': lambda n: setattr(self, 'components', n.get_collection_of_object_values(HostComponent)), 'cookies': lambda n: setattr(self, 'cookies', n.get_collection_of_object_values(HostCookie)), 'firstSeenDateTime': lambda n: setattr(self, 'first_seen_date_time', n.get_datetime_value()), 'lastSeenDateTime': lambda n: setattr(self, 'last_seen_date_time', n.get_datetime_value()), 'passiveDns': lambda n: setattr(self, 'passive_dns', n.get_collection_of_object_values(PassiveDnsRecord)), 'passiveDnsReverse': lambda n: setattr(self, 'passive_dns_reverse', n.get_collection_of_object_values(PassiveDnsRecord)), 'reputation': lambda n: setattr(self, 'reputation', n.get_object_value(HostReputation)), 'trackers': lambda n: setattr(self, 'trackers', n.get_collection_of_object_values(HostTracker))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('components', self.components)\n writer.write_collection_of_object_values('cookies', self.cookies)\n writer.write_datetime_value('firstSeenDateTime', self.first_seen_date_time)\n writer.write_datetime_value('lastSeenDateTime', self.last_seen_date_time)\n writer.write_collection_of_object_values('passiveDns', self.passive_dns)\n writer.write_collection_of_object_values('passiveDnsReverse', self.passive_dns_reverse)\n writer.write_object_value('reputation', self.reputation)\n writer.write_collection_of_object_values('trackers', self.trackers)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Host", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Host:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Host:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Host\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.security.hostname'.casefold():\n from .hostname import Hostname\n return Hostname()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.security.ipAddress'.casefold():\n from .ip_address import IpAddress\n return IpAddress()\n return Host()\n<|end_body_0|>\n\n<|body_start_1|>\n from .artifact import Artifact\n from .host_component import HostComponent\n from .host_cookie import HostCookie\n from .hostname import Hostname\n from .host_reputation import HostReputation\n from .host_tracker import HostTracker\n from .ip_address import IpAddress\n from .passive_dns_record import PassiveDnsRecord\n from .artifact import Artifact\n from .host_component import HostComponent\n from .host_cookie import HostCookie\n from .hostname import Hostname\n from .host_reputation import HostReputation\n from .host_tracker import HostTracker\n from .ip_address import IpAddress\n from .passive_dns_record import PassiveDnsRecord\n fields: Dict[str, Callable[[Any], None]] = {'components': lambda n: setattr(self, 'components', n.get_collection_of_object_values(HostComponent)), 'cookies': lambda n: setattr(self, 'cookies', n.get_collection_of_object_values(HostCookie)), 'firstSeenDateTime': lambda n: setattr(self, 'first_seen_date_time', n.get_datetime_value()), 'lastSeenDateTime': lambda n: setattr(self, 'last_seen_date_time', n.get_datetime_value()), 'passiveDns': lambda n: setattr(self, 'passive_dns', n.get_collection_of_object_values(PassiveDnsRecord)), 'passiveDnsReverse': lambda n: setattr(self, 'passive_dns_reverse', n.get_collection_of_object_values(PassiveDnsRecord)), 'reputation': lambda n: setattr(self, 'reputation', n.get_object_value(HostReputation)), 'trackers': lambda n: setattr(self, 'trackers', n.get_collection_of_object_values(HostTracker))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('components', self.components)\n writer.write_collection_of_object_values('cookies', self.cookies)\n writer.write_datetime_value('firstSeenDateTime', self.first_seen_date_time)\n writer.write_datetime_value('lastSeenDateTime', self.last_seen_date_time)\n writer.write_collection_of_object_values('passiveDns', self.passive_dns)\n writer.write_collection_of_object_values('passiveDnsReverse', self.passive_dns_reverse)\n writer.write_object_value('reputation', self.reputation)\n writer.write_collection_of_object_values('trackers', self.trackers)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000155", "length_bytes": 6016, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Host", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Host"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `Host` described below.\n\nClass description:\nImplement the Host class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Host: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Host\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `Host` described below.\n\nClass description:\nImplement the Host class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Host: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Host\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass Host:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Host:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Host\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.security.hostname'.casefold():\n from .hostname import Hostname\n return Hostname()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.security.ipAddress'.casefold():\n from .ip_address import IpAddress\n return IpAddress()\n return Host()\n<|end_body_0|>\n\n<|body_start_1|>\n from .artifact import Artifact\n from .host_component import HostComponent\n from .host_cookie import HostCookie\n from .hostname import Hostname\n from .host_reputation import HostReputation\n from .host_tracker import HostTracker\n from .ip_address import IpAddress\n from .passive_dns_record import PassiveDnsRecord\n from .artifact import Artifact\n from .host_component import HostComponent\n from .host_cookie import HostCookie\n from .hostname import Hostname\n from .host_reputation import HostReputation\n from .host_tracker import HostTracker\n from .ip_address import IpAddress\n from .passive_dns_record import PassiveDnsRecord\n fields: Dict[str, Callable[[Any], None]] = {'components': lambda n: setattr(self, 'components', n.get_collection_of_object_values(HostComponent)), 'cookies': lambda n: setattr(self, 'cookies', n.get_collection_of_object_values(HostCookie)), 'firstSeenDateTime': lambda n: setattr(self, 'first_seen_date_time', n.get_datetime_value()), 'lastSeenDateTime': lambda n: setattr(self, 'last_seen_date_time', n.get_datetime_value()), 'passiveDns': lambda n: setattr(self, 'passive_dns', n.get_collection_of_object_values(PassiveDnsRecord)), 'passiveDnsReverse': lambda n: setattr(self, 'passive_dns_reverse', n.get_collection_of_object_values(PassiveDnsRecord)), 'reputation': lambda n: setattr(self, 'reputation', n.get_object_value(HostReputation)), 'trackers': lambda n: setattr(self, 'trackers', n.get_collection_of_object_values(HostTracker))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('components', self.components)\n writer.write_collection_of_object_values('cookies', self.cookies)\n writer.write_datetime_value('firstSeenDateTime', self.first_seen_date_time)\n writer.write_datetime_value('lastSeenDateTime', self.last_seen_date_time)\n writer.write_collection_of_object_values('passiveDns', self.passive_dns)\n writer.write_collection_of_object_values('passiveDnsReverse', self.passive_dns_reverse)\n writer.write_object_value('reputation', self.reputation)\n writer.write_collection_of_object_values('trackers', self.trackers)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass Host:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Host:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Host\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Host:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Host:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Host\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.security.hostname'.casefold():\n from .hostname import Hostname\n return Hostname()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.security.ipAddress'.casefold():\n from .ip_address import IpAddress\n return IpAddress()\n return Host()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .artifact import Artifact\n from .host_component import HostComponent\n from .host_cookie import HostCookie\n from .hostname import Hostname\n from .host_reputation import HostReputation\n from .host_tracker import HostTracker\n from .ip_address import IpAddress\n from .passive_dns_record import PassiveDnsRecord\n from .artifact import Artifact\n from .host_component import HostComponent\n from .host_cookie import HostCookie\n from .hostname import Hostname\n from .host_reputation import HostReputation\n from .host_tracker import HostTracker\n from .ip_address import IpAddress\n from .passive_dns_record import PassiveDnsRecord\n fields: Dict[str, Callable[[Any], None]] = {'components': lambda n: setattr(self, 'components', n.get_collection_of_object_values(HostComponent)), 'cookies': lambda n: setattr(self, 'cookies', n.get_collection_of_object_values(HostCookie)), 'firstSeenDateTime': lambda n: setattr(self, 'first_seen_date_time', n.get_datetime_value()), 'lastSeenDateTime': lambda n: setattr(self, 'last_seen_date_time', n.get_datetime_value()), 'passiveDns': lambda n: setattr(self, 'passive_dns', n.get_collection_of_object_values(PassiveDnsRecord)), 'passiveDnsReverse': lambda n: setattr(self, 'passive_dns_reverse', n.get_collection_of_object_values(PassiveDnsRecord)), 'reputation': lambda n: setattr(self, 'reputation', n.get_object_value(HostReputation)), 'trackers': lambda n: setattr(self, 'trackers', n.get_collection_of_object_values(HostTracker))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('components', self.components)\n writer.write_collection_of_object_values('cookies', self.cookies)\n writer.write_datetime_value('firstSeenDateTime', self.first_seen_date_time)\n writer.write_datetime_value('lastSeenDateTime', self.last_seen_date_time)\n writer.write_collection_of_object_values('passiveDns', self.passive_dns)\n writer.write_collection_of_object_values('passiveDnsReverse', self.passive_dns_reverse)\n writer.write_object_value('reputation', self.reputation)\n writer.write_collection_of_object_values('trackers', self.trackers)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/security/host.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135}
{"blob_id": "c8d2eaf019620e142bdf6922820d2d2048ab44c8", "bodies": ["with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n try:\n _dict = tools[tool_name]\n except KeyError:\n _dict = {'versions': [], 'executable': {}, 'script': {}}\n if version in _dict['versions']:\n logger.error(\"%s already exists. Use 'edit' to modify existing tools\", version)\n raise SonarInvalidOpError\n _dict['versions'].append(version)\n _dict['executable']['cad'] = cad_exe\n _dict['executable']['hls'] = hls_exe\n _dict['executable']['sim'] = sim_exe\n _dict['script'][version] = script\n tools[tool_name] = _dict\n db['tool'] = tools", "with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\nif tool is None:\n return tools\nif tool not in tools:\n raise SonarInvalidArgError\nreturn tools[tool]", "with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n return {k: active[k] for k in ('cad', 'hls', 'sim')}", "with shelve.open(Constants.SONAR_DB_PATH) as db:\n db['tool'] = {}\n active = db['active']\n for key in ('cad', 'hls', 'sim'):\n active[key] = None\n db['active'] = active", "with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = cad_tool\n active['hls'] = hls_tool\n active['sim'] = sim_tool\n tools = db['tool']\n with open(Constants.SONAR_SHELL_TOOL_SOURCE, 'w') as f:\n script = []\n for key in ['cad', 'sim', 'hls']:\n tool = active[key]\n if tool:\n tool_id, version = tool\n tool_script = tools[tool_id]['script'][version]\n if tool_script not in script:\n script.append(tool_script)\n f.write('\\n'.join(script))\n db['active'] = active", "if os.path.exists(Constants.SONAR_SHELL_TOOL_SOURCE):\n os.remove(Constants.SONAR_SHELL_TOOL_SOURCE)\nwith shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = None\n active['hls'] = None\n active['sim'] = None\n db['active'] = active"], "bodies_text": "<|body_start_0|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n try:\n _dict = tools[tool_name]\n except KeyError:\n _dict = {'versions': [], 'executable': {}, 'script': {}}\n if version in _dict['versions']:\n logger.error(\"%s already exists. Use 'edit' to modify existing tools\", version)\n raise SonarInvalidOpError\n _dict['versions'].append(version)\n _dict['executable']['cad'] = cad_exe\n _dict['executable']['hls'] = hls_exe\n _dict['executable']['sim'] = sim_exe\n _dict['script'][version] = script\n tools[tool_name] = _dict\n db['tool'] = tools\n<|end_body_0|>\n\n<|body_start_1|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n if tool is None:\n return tools\n if tool not in tools:\n raise SonarInvalidArgError\n return tools[tool]\n<|end_body_1|>\n\n<|body_start_2|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n return {k: active[k] for k in ('cad', 'hls', 'sim')}\n<|end_body_2|>\n\n<|body_start_3|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n db['tool'] = {}\n active = db['active']\n for key in ('cad', 'hls', 'sim'):\n active[key] = None\n db['active'] = active\n<|end_body_3|>\n\n<|body_start_4|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = cad_tool\n active['hls'] = hls_tool\n active['sim'] = sim_tool\n tools = db['tool']\n with open(Constants.SONAR_SHELL_TOOL_SOURCE, 'w') as f:\n script = []\n for key in ['cad', 'sim', 'hls']:\n tool = active[key]\n if tool:\n tool_id, version = tool\n tool_script = tools[tool_id]['script'][version]\n if tool_script not in script:\n script.append(tool_script)\n f.write('\\n'.join(script))\n db['active'] = active\n<|end_body_4|>\n\n<|body_start_5|>\n if os.path.exists(Constants.SONAR_SHELL_TOOL_SOURCE):\n os.remove(Constants.SONAR_SHELL_TOOL_SOURCE)\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = None\n active['hls'] = None\n active['sim'] = None\n db['active'] = active\n<|end_body_5|>\n", "class_docstring": "Manage the tools in the database", "class_name": "Tool", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Tool:\n \"\"\"Manage the tools in the database\"\"\"\n\n def add(tool_name, version, cad_exe, hls_exe, sim_exe, script):\n \"\"\"Add a new tool to the database Args: tool_name (str): Name of the tool version (str): Tool version cad_exe (str): Name of CAD tool executable. None if not applicable hls_exe (str): Name of HLS tool executable. None if not applicable sim_exe (str): Name of simulation tool executable. None if not applicable script (str): Shell script to initialize the tool Raises: SonarInvalidOpError: Raised if attempting to add an already existing tool.\"\"\"\n <|body_0|>\n\n def get(tool=None):\n \"\"\"Get a tool from the database Args: tool (str, optional): Get a particular tool only. Defaults to None. Raises: SonarInvalidArgError: Raised if named tool not found in the database Returns: dict: Tool entry from the database\"\"\"\n <|body_1|>\n\n def get_active():\n \"\"\"Get the active tool Returns: dict: Tool entry from the database\"\"\"\n <|body_2|>\n\n def clear():\n \"\"\"Clear all tools from the database\"\"\"\n <|body_3|>\n\n def activate(cad_tool, hls_tool, sim_tool):\n \"\"\"Activate a set of tools Args: cad_tool (str): Name of CAD tool hls_tool (str): Name of HLS tool sim_tool (str): Name of SIM tool\"\"\"\n <|body_4|>\n\n def deactivate():\n \"\"\"Deactivate all active tools\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n try:\n _dict = tools[tool_name]\n except KeyError:\n _dict = {'versions': [], 'executable': {}, 'script': {}}\n if version in _dict['versions']:\n logger.error(\"%s already exists. Use 'edit' to modify existing tools\", version)\n raise SonarInvalidOpError\n _dict['versions'].append(version)\n _dict['executable']['cad'] = cad_exe\n _dict['executable']['hls'] = hls_exe\n _dict['executable']['sim'] = sim_exe\n _dict['script'][version] = script\n tools[tool_name] = _dict\n db['tool'] = tools\n<|end_body_0|>\n\n<|body_start_1|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n if tool is None:\n return tools\n if tool not in tools:\n raise SonarInvalidArgError\n return tools[tool]\n<|end_body_1|>\n\n<|body_start_2|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n return {k: active[k] for k in ('cad', 'hls', 'sim')}\n<|end_body_2|>\n\n<|body_start_3|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n db['tool'] = {}\n active = db['active']\n for key in ('cad', 'hls', 'sim'):\n active[key] = None\n db['active'] = active\n<|end_body_3|>\n\n<|body_start_4|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = cad_tool\n active['hls'] = hls_tool\n active['sim'] = sim_tool\n tools = db['tool']\n with open(Constants.SONAR_SHELL_TOOL_SOURCE, 'w') as f:\n script = []\n for key in ['cad', 'sim', 'hls']:\n tool = active[key]\n if tool:\n tool_id, version = tool\n tool_script = tools[tool_id]['script'][version]\n if tool_script not in script:\n script.append(tool_script)\n f.write('\\n'.join(script))\n db['active'] = active\n<|end_body_4|>\n\n<|body_start_5|>\n if os.path.exists(Constants.SONAR_SHELL_TOOL_SOURCE):\n os.remove(Constants.SONAR_SHELL_TOOL_SOURCE)\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = None\n active['hls'] = None\n active['sim'] = None\n db['active'] = active\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000156", "length_bytes": 22434, "license_type": "permissive", "methods": [{"docstring": "Add a new tool to the database Args: tool_name (str): Name of the tool version (str): Tool version cad_exe (str): Name of CAD tool executable. None if not applicable hls_exe (str): Name of HLS tool executable. None if not applicable sim_exe (str): Name of simulation tool executable. None if not applicable script (str): Shell script to initialize the tool Raises: SonarInvalidOpError: Raised if attempting to add an already existing tool.", "name": "add", "signature": "def add(tool_name, version, cad_exe, hls_exe, sim_exe, script)"}, {"docstring": "Get a tool from the database Args: tool (str, optional): Get a particular tool only. Defaults to None. Raises: SonarInvalidArgError: Raised if named tool not found in the database Returns: dict: Tool entry from the database", "name": "get", "signature": "def get(tool=None)"}, {"docstring": "Get the active tool Returns: dict: Tool entry from the database", "name": "get_active", "signature": "def get_active()"}, {"docstring": "Clear all tools from the database", "name": "clear", "signature": "def clear()"}, {"docstring": "Activate a set of tools Args: cad_tool (str): Name of CAD tool hls_tool (str): Name of HLS tool sim_tool (str): Name of SIM tool", "name": "activate", "signature": "def activate(cad_tool, hls_tool, sim_tool)"}, {"docstring": "Deactivate all active tools", "name": "deactivate", "signature": "def deactivate()"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_011959", "prompt": "Implement the Python class `Tool` described below.\n\nClass description:\nManage the tools in the database\n\nMethod signatures and docstrings:\n- def add(tool_name, version, cad_exe, hls_exe, sim_exe, script): Add a new tool to the database Args: tool_name (str): Name of the tool version (str): Tool version cad_exe (str): Name of CAD tool executable. None if not applicable hls_exe (str): Name of HLS tool executable. None if not applicable sim_exe (str): Name of simulation tool executable. None if not applicable script (str): Shell script to initialize the tool Raises: SonarInvalidOpError: Raised if attempting to add an already existing tool.\n- def get(tool=None): Get a tool from the database Args: tool (str, optional): Get a particular tool only. Defaults to None. Raises: SonarInvalidArgError: Raised if named tool not found in the database Returns: dict: Tool entry from the database\n- def get_active(): Get the active tool Returns: dict: Tool entry from the database\n- def clear(): Clear all tools from the database\n- def activate(cad_tool, hls_tool, sim_tool): Activate a set of tools Args: cad_tool (str): Name of CAD tool hls_tool (str): Name of HLS tool sim_tool (str): Name of SIM tool\n- def deactivate(): Deactivate all active tools", "prompted_full_text": "Implement the Python class `Tool` described below.\n\nClass description:\nManage the tools in the database\n\nMethod signatures and docstrings:\n- def add(tool_name, version, cad_exe, hls_exe, sim_exe, script): Add a new tool to the database Args: tool_name (str): Name of the tool version (str): Tool version cad_exe (str): Name of CAD tool executable. None if not applicable hls_exe (str): Name of HLS tool executable. None if not applicable sim_exe (str): Name of simulation tool executable. None if not applicable script (str): Shell script to initialize the tool Raises: SonarInvalidOpError: Raised if attempting to add an already existing tool.\n- def get(tool=None): Get a tool from the database Args: tool (str, optional): Get a particular tool only. Defaults to None. Raises: SonarInvalidArgError: Raised if named tool not found in the database Returns: dict: Tool entry from the database\n- def get_active(): Get the active tool Returns: dict: Tool entry from the database\n- def clear(): Clear all tools from the database\n- def activate(cad_tool, hls_tool, sim_tool): Activate a set of tools Args: cad_tool (str): Name of CAD tool hls_tool (str): Name of HLS tool sim_tool (str): Name of SIM tool\n- def deactivate(): Deactivate all active tools\n\n<|skeleton|>\nclass Tool:\n \"\"\"Manage the tools in the database\"\"\"\n\n def add(tool_name, version, cad_exe, hls_exe, sim_exe, script):\n \"\"\"Add a new tool to the database Args: tool_name (str): Name of the tool version (str): Tool version cad_exe (str): Name of CAD tool executable. None if not applicable hls_exe (str): Name of HLS tool executable. None if not applicable sim_exe (str): Name of simulation tool executable. None if not applicable script (str): Shell script to initialize the tool Raises: SonarInvalidOpError: Raised if attempting to add an already existing tool.\"\"\"\n <|body_0|>\n\n def get(tool=None):\n \"\"\"Get a tool from the database Args: tool (str, optional): Get a particular tool only. Defaults to None. Raises: SonarInvalidArgError: Raised if named tool not found in the database Returns: dict: Tool entry from the database\"\"\"\n <|body_1|>\n\n def get_active():\n \"\"\"Get the active tool Returns: dict: Tool entry from the database\"\"\"\n <|body_2|>\n\n def clear():\n \"\"\"Clear all tools from the database\"\"\"\n <|body_3|>\n\n def activate(cad_tool, hls_tool, sim_tool):\n \"\"\"Activate a set of tools Args: cad_tool (str): Name of CAD tool hls_tool (str): Name of HLS tool sim_tool (str): Name of SIM tool\"\"\"\n <|body_4|>\n\n def deactivate():\n \"\"\"Deactivate all active tools\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n try:\n _dict = tools[tool_name]\n except KeyError:\n _dict = {'versions': [], 'executable': {}, 'script': {}}\n if version in _dict['versions']:\n logger.error(\"%s already exists. Use 'edit' to modify existing tools\", version)\n raise SonarInvalidOpError\n _dict['versions'].append(version)\n _dict['executable']['cad'] = cad_exe\n _dict['executable']['hls'] = hls_exe\n _dict['executable']['sim'] = sim_exe\n _dict['script'][version] = script\n tools[tool_name] = _dict\n db['tool'] = tools\n<|end_body_0|>\n\n<|body_start_1|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n if tool is None:\n return tools\n if tool not in tools:\n raise SonarInvalidArgError\n return tools[tool]\n<|end_body_1|>\n\n<|body_start_2|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n return {k: active[k] for k in ('cad', 'hls', 'sim')}\n<|end_body_2|>\n\n<|body_start_3|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n db['tool'] = {}\n active = db['active']\n for key in ('cad', 'hls', 'sim'):\n active[key] = None\n db['active'] = active\n<|end_body_3|>\n\n<|body_start_4|>\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = cad_tool\n active['hls'] = hls_tool\n active['sim'] = sim_tool\n tools = db['tool']\n with open(Constants.SONAR_SHELL_TOOL_SOURCE, 'w') as f:\n script = []\n for key in ['cad', 'sim', 'hls']:\n tool = active[key]\n if tool:\n tool_id, version = tool\n tool_script = tools[tool_id]['script'][version]\n if tool_script not in script:\n script.append(tool_script)\n f.write('\\n'.join(script))\n db['active'] = active\n<|end_body_4|>\n\n<|body_start_5|>\n if os.path.exists(Constants.SONAR_SHELL_TOOL_SOURCE):\n os.remove(Constants.SONAR_SHELL_TOOL_SOURCE)\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = None\n active['hls'] = None\n active['sim'] = None\n db['active'] = active\n<|end_body_5|>\n", "revision_id": "99de16dd16d0aa77734584e67263c78a37abef86", "skeleton": "<|skeleton|>\nclass Tool:\n \"\"\"Manage the tools in the database\"\"\"\n\n def add(tool_name, version, cad_exe, hls_exe, sim_exe, script):\n \"\"\"Add a new tool to the database Args: tool_name (str): Name of the tool version (str): Tool version cad_exe (str): Name of CAD tool executable. None if not applicable hls_exe (str): Name of HLS tool executable. None if not applicable sim_exe (str): Name of simulation tool executable. None if not applicable script (str): Shell script to initialize the tool Raises: SonarInvalidOpError: Raised if attempting to add an already existing tool.\"\"\"\n <|body_0|>\n\n def get(tool=None):\n \"\"\"Get a tool from the database Args: tool (str, optional): Get a particular tool only. Defaults to None. Raises: SonarInvalidArgError: Raised if named tool not found in the database Returns: dict: Tool entry from the database\"\"\"\n <|body_1|>\n\n def get_active():\n \"\"\"Get the active tool Returns: dict: Tool entry from the database\"\"\"\n <|body_2|>\n\n def clear():\n \"\"\"Clear all tools from the database\"\"\"\n <|body_3|>\n\n def activate(cad_tool, hls_tool, sim_tool):\n \"\"\"Activate a set of tools Args: cad_tool (str): Name of CAD tool hls_tool (str): Name of HLS tool sim_tool (str): Name of SIM tool\"\"\"\n <|body_4|>\n\n def deactivate():\n \"\"\"Deactivate all active tools\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Tool:\n \"\"\"Manage the tools in the database\"\"\"\n\n def add(tool_name, version, cad_exe, hls_exe, sim_exe, script):\n \"\"\"Add a new tool to the database Args: tool_name (str): Name of the tool version (str): Tool version cad_exe (str): Name of CAD tool executable. None if not applicable hls_exe (str): Name of HLS tool executable. None if not applicable sim_exe (str): Name of simulation tool executable. None if not applicable script (str): Shell script to initialize the tool Raises: SonarInvalidOpError: Raised if attempting to add an already existing tool.\"\"\"\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n try:\n _dict = tools[tool_name]\n except KeyError:\n _dict = {'versions': [], 'executable': {}, 'script': {}}\n if version in _dict['versions']:\n logger.error(\"%s already exists. Use 'edit' to modify existing tools\", version)\n raise SonarInvalidOpError\n _dict['versions'].append(version)\n _dict['executable']['cad'] = cad_exe\n _dict['executable']['hls'] = hls_exe\n _dict['executable']['sim'] = sim_exe\n _dict['script'][version] = script\n tools[tool_name] = _dict\n db['tool'] = tools\n\n def get(tool=None):\n \"\"\"Get a tool from the database Args: tool (str, optional): Get a particular tool only. Defaults to None. Raises: SonarInvalidArgError: Raised if named tool not found in the database Returns: dict: Tool entry from the database\"\"\"\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n tools = db['tool']\n if tool is None:\n return tools\n if tool not in tools:\n raise SonarInvalidArgError\n return tools[tool]\n\n def get_active():\n \"\"\"Get the active tool Returns: dict: Tool entry from the database\"\"\"\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n return {k: active[k] for k in ('cad', 'hls', 'sim')}\n\n def clear():\n \"\"\"Clear all tools from the database\"\"\"\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n db['tool'] = {}\n active = db['active']\n for key in ('cad', 'hls', 'sim'):\n active[key] = None\n db['active'] = active\n\n def activate(cad_tool, hls_tool, sim_tool):\n \"\"\"Activate a set of tools Args: cad_tool (str): Name of CAD tool hls_tool (str): Name of HLS tool sim_tool (str): Name of SIM tool\"\"\"\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = cad_tool\n active['hls'] = hls_tool\n active['sim'] = sim_tool\n tools = db['tool']\n with open(Constants.SONAR_SHELL_TOOL_SOURCE, 'w') as f:\n script = []\n for key in ['cad', 'sim', 'hls']:\n tool = active[key]\n if tool:\n tool_id, version = tool\n tool_script = tools[tool_id]['script'][version]\n if tool_script not in script:\n script.append(tool_script)\n f.write('\\n'.join(script))\n db['active'] = active\n\n def deactivate():\n \"\"\"Deactivate all active tools\"\"\"\n if os.path.exists(Constants.SONAR_SHELL_TOOL_SOURCE):\n os.remove(Constants.SONAR_SHELL_TOOL_SOURCE)\n with shelve.open(Constants.SONAR_DB_PATH) as db:\n active = db['active']\n active['cad'] = None\n active['hls'] = None\n active['sim'] = None\n db['active'] = active\n", "source": "the_stack_v2_python_sparse", "source_path": "sonar/database.py", "source_repo": "Zyk-Hyphen/sonar", "split": "val", "star_events_count": 0}
{"blob_id": "87a368408756c0dfec2f5f4fd0813045dbd19d0b", "bodies": ["self.num_units = num_units\nself.layer_norm = layer_norm\nself.recurrent_dropout = recurrent_dropout\nself.leak_factor = leak_factor", "with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs"], "bodies_text": "<|body_start_0|>\n self.num_units = num_units\n self.layer_norm = layer_norm\n self.recurrent_dropout = recurrent_dropout\n self.leak_factor = leak_factor\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs\n<|end_body_1|>\n", "class_docstring": "a leaky BLSTM layer", "class_name": "LeakyBLSTMLayer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LeakyBLSTMLayer:\n \"\"\"a leaky BLSTM layer\"\"\"\n\n def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):\n \"\"\"LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\"\"\"\n <|body_0|>\n\n def __call__(self, inputs, sequence_length, scope=None):\n \"\"\"Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_units = num_units\n self.layer_norm = layer_norm\n self.recurrent_dropout = recurrent_dropout\n self.leak_factor = leak_factor\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000157", "length_bytes": 49091, "license_type": "permissive", "methods": [{"docstring": "LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)", "name": "__init__", "signature": "def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0)"}, {"docstring": "Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer", "name": "__call__", "signature": "def __call__(self, inputs, sequence_length, scope=None)"}], "n_methods": 2, "prompt": "Implement the Python class `LeakyBLSTMLayer` described below.\n\nClass description:\na leaky BLSTM layer\n\nMethod signatures and docstrings:\n- def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0): LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\n- def __call__(self, inputs, sequence_length, scope=None): Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer", "prompted_full_text": "Implement the Python class `LeakyBLSTMLayer` described below.\n\nClass description:\na leaky BLSTM layer\n\nMethod signatures and docstrings:\n- def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0): LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\n- def __call__(self, inputs, sequence_length, scope=None): Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\n\n<|skeleton|>\nclass LeakyBLSTMLayer:\n \"\"\"a leaky BLSTM layer\"\"\"\n\n def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):\n \"\"\"LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\"\"\"\n <|body_0|>\n\n def __call__(self, inputs, sequence_length, scope=None):\n \"\"\"Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_units = num_units\n self.layer_norm = layer_norm\n self.recurrent_dropout = recurrent_dropout\n self.leak_factor = leak_factor\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs\n<|end_body_1|>\n", "revision_id": "5e862cbf846d45b8a317f87588533f3fde9f0726", "skeleton": "<|skeleton|>\nclass LeakyBLSTMLayer:\n \"\"\"a leaky BLSTM layer\"\"\"\n\n def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):\n \"\"\"LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\"\"\"\n <|body_0|>\n\n def __call__(self, inputs, sequence_length, scope=None):\n \"\"\"Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LeakyBLSTMLayer:\n \"\"\"a leaky BLSTM layer\"\"\"\n\n def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):\n \"\"\"LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\"\"\"\n self.num_units = num_units\n self.layer_norm = layer_norm\n self.recurrent_dropout = recurrent_dropout\n self.leak_factor = leak_factor\n\n def __call__(self, inputs, sequence_length, scope=None):\n \"\"\"Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\"\"\"\n with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs\n", "source": "the_stack_v2_python_sparse", "source_path": "nabu/neuralnetworks/components/layer.py", "source_repo": "JeroenZegers/Nabu-MSSS", "split": "val", "star_events_count": 19}
{"blob_id": "8f2b6c49682e34d47da3c6f6a708a4e3ec605787", "bodies": ["if not hasattr(cobj, 'f_int_eucken'):\n cobj.f_int_eucken = Var(doc='Dimensionless factor in Eucken formula associated with internal degrees of freedom', units=pyunits.dimensionless)\n set_param_from_config(cobj, param='f_int_eucken')", "units = b.params.get_metadata().derived_units\nM = pyunits.convert(cobj.mw, pyunits.kg / pyunits.mol)\nR = pyunits.convert(Constants.gas_constant, units['heat_capacity_mole'])\nf_int = cobj.f_int_eucken\ntry:\n cp_mol_ig_comp = cobj.config.cp_mol_ig_comp\nexcept AttributeError:\n raise ConfigurationError(f'Cannot find method to calculate cp_mol_ig_comp for component {cobj.local_name}.')\nif not hasattr(b, '_visc_d_phase_comp'):\n b._make_visc_d_phase_comp()\nif hasattr(cp_mol_ig_comp, 'return_expression'):\n cp_func = cp_mol_ig_comp.return_expression\nelse:\n cp_func = cp_mol_ig_comp.cp_mol_ig_comp.return_expression\ntherm_cond = b._visc_d_phase_comp[p, cobj.local_name] / M * (f_int * cp_func(b, cobj, T) + (15 / 4 - 5 * f_int / 2) * R)\nreturn pyunits.convert(therm_cond, units['thermal_conductivity'])"], "bodies_text": "<|body_start_0|>\n if not hasattr(cobj, 'f_int_eucken'):\n cobj.f_int_eucken = Var(doc='Dimensionless factor in Eucken formula associated with internal degrees of freedom', units=pyunits.dimensionless)\n set_param_from_config(cobj, param='f_int_eucken')\n<|end_body_0|>\n\n<|body_start_1|>\n units = b.params.get_metadata().derived_units\n M = pyunits.convert(cobj.mw, pyunits.kg / pyunits.mol)\n R = pyunits.convert(Constants.gas_constant, units['heat_capacity_mole'])\n f_int = cobj.f_int_eucken\n try:\n cp_mol_ig_comp = cobj.config.cp_mol_ig_comp\n except AttributeError:\n raise ConfigurationError(f'Cannot find method to calculate cp_mol_ig_comp for component {cobj.local_name}.')\n if not hasattr(b, '_visc_d_phase_comp'):\n b._make_visc_d_phase_comp()\n if hasattr(cp_mol_ig_comp, 'return_expression'):\n cp_func = cp_mol_ig_comp.return_expression\n else:\n cp_func = cp_mol_ig_comp.cp_mol_ig_comp.return_expression\n therm_cond = b._visc_d_phase_comp[p, cobj.local_name] / M * (f_int * cp_func(b, cobj, T) + (15 / 4 - 5 * f_int / 2) * R)\n return pyunits.convert(therm_cond, units['thermal_conductivity'])\n<|end_body_1|>\n", "class_docstring": "Eucken correlation for thermal conductivity", "class_name": "therm_cond_phase_comp", "detected_licenses": ["BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass therm_cond_phase_comp:\n \"\"\"Eucken correlation for thermal conductivity\"\"\"\n\n def build_parameters(cobj, p):\n \"\"\"Builds dimensionless parameter f_int\"\"\"\n <|body_0|>\n\n def return_expression(b, cobj, p, T):\n \"\"\"Returns expression for therm_cond_phase_comp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not hasattr(cobj, 'f_int_eucken'):\n cobj.f_int_eucken = Var(doc='Dimensionless factor in Eucken formula associated with internal degrees of freedom', units=pyunits.dimensionless)\n set_param_from_config(cobj, param='f_int_eucken')\n<|end_body_0|>\n\n<|body_start_1|>\n units = b.params.get_metadata().derived_units\n M = pyunits.convert(cobj.mw, pyunits.kg / pyunits.mol)\n R = pyunits.convert(Constants.gas_constant, units['heat_capacity_mole'])\n f_int = cobj.f_int_eucken\n try:\n cp_mol_ig_comp = cobj.config.cp_mol_ig_comp\n except AttributeError:\n raise ConfigurationError(f'Cannot find method to calculate cp_mol_ig_comp for component {cobj.local_name}.')\n if not hasattr(b, '_visc_d_phase_comp'):\n b._make_visc_d_phase_comp()\n if hasattr(cp_mol_ig_comp, 'return_expression'):\n cp_func = cp_mol_ig_comp.return_expression\n else:\n cp_func = cp_mol_ig_comp.cp_mol_ig_comp.return_expression\n therm_cond = b._visc_d_phase_comp[p, cobj.local_name] / M * (f_int * cp_func(b, cobj, T) + (15 / 4 - 5 * f_int / 2) * R)\n return pyunits.convert(therm_cond, units['thermal_conductivity'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000158", "length_bytes": 3776, "license_type": "permissive", "methods": [{"docstring": "Builds dimensionless parameter f_int", "name": "build_parameters", "signature": "def build_parameters(cobj, p)"}, {"docstring": "Returns expression for therm_cond_phase_comp", "name": "return_expression", "signature": "def return_expression(b, cobj, p, T)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002368", "prompt": "Implement the Python class `therm_cond_phase_comp` described below.\n\nClass description:\nEucken correlation for thermal conductivity\n\nMethod signatures and docstrings:\n- def build_parameters(cobj, p): Builds dimensionless parameter f_int\n- def return_expression(b, cobj, p, T): Returns expression for therm_cond_phase_comp", "prompted_full_text": "Implement the Python class `therm_cond_phase_comp` described below.\n\nClass description:\nEucken correlation for thermal conductivity\n\nMethod signatures and docstrings:\n- def build_parameters(cobj, p): Builds dimensionless parameter f_int\n- def return_expression(b, cobj, p, T): Returns expression for therm_cond_phase_comp\n\n<|skeleton|>\nclass therm_cond_phase_comp:\n \"\"\"Eucken correlation for thermal conductivity\"\"\"\n\n def build_parameters(cobj, p):\n \"\"\"Builds dimensionless parameter f_int\"\"\"\n <|body_0|>\n\n def return_expression(b, cobj, p, T):\n \"\"\"Returns expression for therm_cond_phase_comp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not hasattr(cobj, 'f_int_eucken'):\n cobj.f_int_eucken = Var(doc='Dimensionless factor in Eucken formula associated with internal degrees of freedom', units=pyunits.dimensionless)\n set_param_from_config(cobj, param='f_int_eucken')\n<|end_body_0|>\n\n<|body_start_1|>\n units = b.params.get_metadata().derived_units\n M = pyunits.convert(cobj.mw, pyunits.kg / pyunits.mol)\n R = pyunits.convert(Constants.gas_constant, units['heat_capacity_mole'])\n f_int = cobj.f_int_eucken\n try:\n cp_mol_ig_comp = cobj.config.cp_mol_ig_comp\n except AttributeError:\n raise ConfigurationError(f'Cannot find method to calculate cp_mol_ig_comp for component {cobj.local_name}.')\n if not hasattr(b, '_visc_d_phase_comp'):\n b._make_visc_d_phase_comp()\n if hasattr(cp_mol_ig_comp, 'return_expression'):\n cp_func = cp_mol_ig_comp.return_expression\n else:\n cp_func = cp_mol_ig_comp.cp_mol_ig_comp.return_expression\n therm_cond = b._visc_d_phase_comp[p, cobj.local_name] / M * (f_int * cp_func(b, cobj, T) + (15 / 4 - 5 * f_int / 2) * R)\n return pyunits.convert(therm_cond, units['thermal_conductivity'])\n<|end_body_1|>\n", "revision_id": "deacf4c422bc9e50cb347e11a8cbfa0195bd4274", "skeleton": "<|skeleton|>\nclass therm_cond_phase_comp:\n \"\"\"Eucken correlation for thermal conductivity\"\"\"\n\n def build_parameters(cobj, p):\n \"\"\"Builds dimensionless parameter f_int\"\"\"\n <|body_0|>\n\n def return_expression(b, cobj, p, T):\n \"\"\"Returns expression for therm_cond_phase_comp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class therm_cond_phase_comp:\n \"\"\"Eucken correlation for thermal conductivity\"\"\"\n\n def build_parameters(cobj, p):\n \"\"\"Builds dimensionless parameter f_int\"\"\"\n if not hasattr(cobj, 'f_int_eucken'):\n cobj.f_int_eucken = Var(doc='Dimensionless factor in Eucken formula associated with internal degrees of freedom', units=pyunits.dimensionless)\n set_param_from_config(cobj, param='f_int_eucken')\n\n def return_expression(b, cobj, p, T):\n \"\"\"Returns expression for therm_cond_phase_comp\"\"\"\n units = b.params.get_metadata().derived_units\n M = pyunits.convert(cobj.mw, pyunits.kg / pyunits.mol)\n R = pyunits.convert(Constants.gas_constant, units['heat_capacity_mole'])\n f_int = cobj.f_int_eucken\n try:\n cp_mol_ig_comp = cobj.config.cp_mol_ig_comp\n except AttributeError:\n raise ConfigurationError(f'Cannot find method to calculate cp_mol_ig_comp for component {cobj.local_name}.')\n if not hasattr(b, '_visc_d_phase_comp'):\n b._make_visc_d_phase_comp()\n if hasattr(cp_mol_ig_comp, 'return_expression'):\n cp_func = cp_mol_ig_comp.return_expression\n else:\n cp_func = cp_mol_ig_comp.cp_mol_ig_comp.return_expression\n therm_cond = b._visc_d_phase_comp[p, cobj.local_name] / M * (f_int * cp_func(b, cobj, T) + (15 / 4 - 5 * f_int / 2) * R)\n return pyunits.convert(therm_cond, units['thermal_conductivity'])\n", "source": "the_stack_v2_python_sparse", "source_path": "idaes/models/properties/modular_properties/pure/Eucken.py", "source_repo": "IDAES/idaes-pse", "split": "val", "star_events_count": 173}
{"blob_id": "334ebdcc048920fdc4f728db6b23674c861216ac", "bodies": ["if self.request.method == 'POST':\n self.event['qsize'] = len(options.ids)\n options.esqb.q = options.ids\nelif self.request.method == 'GET':\n options.esqb.q = options.id\noptions.esqb.regexs = self.web_settings.ANNOTATION_ID_REGEX_LIST\noptions.esqb.scopes = self.web_settings.ANNOTATION_DEFAULT_SCOPES\noptions.esqb.version = True\noptions = super().pre_query_builder_hook(options)\nreturn options", "if isinstance(res, dict):\n if not res.get('hits'):\n template = self.web_settings.ID_NOT_FOUND_TEMPLATE\n reason = template.format(bid=options.esqb.q)\n raise EndRequest(404, reason=reason)\n if len(res['hits']) > 1:\n raise EndRequest(404, reason='not a unique id.')\n res = res['hits'][0]\n res.pop('_score', None)\nelif isinstance(res, list):\n for hit in res:\n hit.pop('_score', None)\nreturn res"], "bodies_text": "<|body_start_0|>\n if self.request.method == 'POST':\n self.event['qsize'] = len(options.ids)\n options.esqb.q = options.ids\n elif self.request.method == 'GET':\n options.esqb.q = options.id\n options.esqb.regexs = self.web_settings.ANNOTATION_ID_REGEX_LIST\n options.esqb.scopes = self.web_settings.ANNOTATION_DEFAULT_SCOPES\n options.esqb.version = True\n options = super().pre_query_builder_hook(options)\n return options\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(res, dict):\n if not res.get('hits'):\n template = self.web_settings.ID_NOT_FOUND_TEMPLATE\n reason = template.format(bid=options.esqb.q)\n raise EndRequest(404, reason=reason)\n if len(res['hits']) > 1:\n raise EndRequest(404, reason='not a unique id.')\n res = res['hits'][0]\n res.pop('_score', None)\n elif isinstance(res, list):\n for hit in res:\n hit.pop('_score', None)\n return res\n<|end_body_1|>\n", "class_docstring": "Biothings Annotation Endpoint URL pattern examples: /{pre}/{ver}/{typ}/? /{pre}/{ver}/{typ}/([^\\\\/]+)/? queries a term against a pre-determined field that represents the id of a document, like _id and dbsnp.rsid GET -> {...} POST -> [{...}, ...]", "class_name": "BiothingHandler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BiothingHandler:\n \"\"\"Biothings Annotation Endpoint URL pattern examples: /{pre}/{ver}/{typ}/? /{pre}/{ver}/{typ}/([^\\\\/]+)/? queries a term against a pre-determined field that represents the id of a document, like _id and dbsnp.rsid GET -> {...} POST -> [{...}, ...]\"\"\"\n\n def pre_query_builder_hook(self, options):\n \"\"\"Annotation query has default scopes. Annotation query include _version field.\"\"\"\n <|body_0|>\n\n def pre_finish_hook(self, options, res):\n \"\"\"Return single result for GET. Empty results trigger 404 error. Keep _version field, Discard _score field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.method == 'POST':\n self.event['qsize'] = len(options.ids)\n options.esqb.q = options.ids\n elif self.request.method == 'GET':\n options.esqb.q = options.id\n options.esqb.regexs = self.web_settings.ANNOTATION_ID_REGEX_LIST\n options.esqb.scopes = self.web_settings.ANNOTATION_DEFAULT_SCOPES\n options.esqb.version = True\n options = super().pre_query_builder_hook(options)\n return options\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(res, dict):\n if not res.get('hits'):\n template = self.web_settings.ID_NOT_FOUND_TEMPLATE\n reason = template.format(bid=options.esqb.q)\n raise EndRequest(404, reason=reason)\n if len(res['hits']) > 1:\n raise EndRequest(404, reason='not a unique id.')\n res = res['hits'][0]\n res.pop('_score', None)\n elif isinstance(res, list):\n for hit in res:\n hit.pop('_score', None)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000159", "length_bytes": 10816, "license_type": "permissive", "methods": [{"docstring": "Annotation query has default scopes. Annotation query include _version field.", "name": "pre_query_builder_hook", "signature": "def pre_query_builder_hook(self, options)"}, {"docstring": "Return single result for GET. Empty results trigger 404 error. Keep _version field, Discard _score field.", "name": "pre_finish_hook", "signature": "def pre_finish_hook(self, options, res)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_023788", "prompt": "Implement the Python class `BiothingHandler` described below.\n\nClass description:\nBiothings Annotation Endpoint URL pattern examples: /{pre}/{ver}/{typ}/? /{pre}/{ver}/{typ}/([^\\\\/]+)/? queries a term against a pre-determined field that represents the id of a document, like _id and dbsnp.rsid GET -> {...} POST -> [{...}, ...]\n\nMethod signatures and docstrings:\n- def pre_query_builder_hook(self, options): Annotation query has default scopes. Annotation query include _version field.\n- def pre_finish_hook(self, options, res): Return single result for GET. Empty results trigger 404 error. Keep _version field, Discard _score field.", "prompted_full_text": "Implement the Python class `BiothingHandler` described below.\n\nClass description:\nBiothings Annotation Endpoint URL pattern examples: /{pre}/{ver}/{typ}/? /{pre}/{ver}/{typ}/([^\\\\/]+)/? queries a term against a pre-determined field that represents the id of a document, like _id and dbsnp.rsid GET -> {...} POST -> [{...}, ...]\n\nMethod signatures and docstrings:\n- def pre_query_builder_hook(self, options): Annotation query has default scopes. Annotation query include _version field.\n- def pre_finish_hook(self, options, res): Return single result for GET. Empty results trigger 404 error. Keep _version field, Discard _score field.\n\n<|skeleton|>\nclass BiothingHandler:\n \"\"\"Biothings Annotation Endpoint URL pattern examples: /{pre}/{ver}/{typ}/? /{pre}/{ver}/{typ}/([^\\\\/]+)/? queries a term against a pre-determined field that represents the id of a document, like _id and dbsnp.rsid GET -> {...} POST -> [{...}, ...]\"\"\"\n\n def pre_query_builder_hook(self, options):\n \"\"\"Annotation query has default scopes. Annotation query include _version field.\"\"\"\n <|body_0|>\n\n def pre_finish_hook(self, options, res):\n \"\"\"Return single result for GET. Empty results trigger 404 error. Keep _version field, Discard _score field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.method == 'POST':\n self.event['qsize'] = len(options.ids)\n options.esqb.q = options.ids\n elif self.request.method == 'GET':\n options.esqb.q = options.id\n options.esqb.regexs = self.web_settings.ANNOTATION_ID_REGEX_LIST\n options.esqb.scopes = self.web_settings.ANNOTATION_DEFAULT_SCOPES\n options.esqb.version = True\n options = super().pre_query_builder_hook(options)\n return options\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(res, dict):\n if not res.get('hits'):\n template = self.web_settings.ID_NOT_FOUND_TEMPLATE\n reason = template.format(bid=options.esqb.q)\n raise EndRequest(404, reason=reason)\n if len(res['hits']) > 1:\n raise EndRequest(404, reason='not a unique id.')\n res = res['hits'][0]\n res.pop('_score', None)\n elif isinstance(res, list):\n for hit in res:\n hit.pop('_score', None)\n return res\n<|end_body_1|>\n", "revision_id": "8e090d26e559bfadbdbf8d2cebbd228adf7fb2be", "skeleton": "<|skeleton|>\nclass BiothingHandler:\n \"\"\"Biothings Annotation Endpoint URL pattern examples: /{pre}/{ver}/{typ}/? /{pre}/{ver}/{typ}/([^\\\\/]+)/? queries a term against a pre-determined field that represents the id of a document, like _id and dbsnp.rsid GET -> {...} POST -> [{...}, ...]\"\"\"\n\n def pre_query_builder_hook(self, options):\n \"\"\"Annotation query has default scopes. Annotation query include _version field.\"\"\"\n <|body_0|>\n\n def pre_finish_hook(self, options, res):\n \"\"\"Return single result for GET. Empty results trigger 404 error. Keep _version field, Discard _score field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BiothingHandler:\n \"\"\"Biothings Annotation Endpoint URL pattern examples: /{pre}/{ver}/{typ}/? /{pre}/{ver}/{typ}/([^\\\\/]+)/? queries a term against a pre-determined field that represents the id of a document, like _id and dbsnp.rsid GET -> {...} POST -> [{...}, ...]\"\"\"\n\n def pre_query_builder_hook(self, options):\n \"\"\"Annotation query has default scopes. Annotation query include _version field.\"\"\"\n if self.request.method == 'POST':\n self.event['qsize'] = len(options.ids)\n options.esqb.q = options.ids\n elif self.request.method == 'GET':\n options.esqb.q = options.id\n options.esqb.regexs = self.web_settings.ANNOTATION_ID_REGEX_LIST\n options.esqb.scopes = self.web_settings.ANNOTATION_DEFAULT_SCOPES\n options.esqb.version = True\n options = super().pre_query_builder_hook(options)\n return options\n\n def pre_finish_hook(self, options, res):\n \"\"\"Return single result for GET. Empty results trigger 404 error. Keep _version field, Discard _score field.\"\"\"\n if isinstance(res, dict):\n if not res.get('hits'):\n template = self.web_settings.ID_NOT_FOUND_TEMPLATE\n reason = template.format(bid=options.esqb.q)\n raise EndRequest(404, reason=reason)\n if len(res['hits']) > 1:\n raise EndRequest(404, reason='not a unique id.')\n res = res['hits'][0]\n res.pop('_score', None)\n elif isinstance(res, list):\n for hit in res:\n hit.pop('_score', None)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "biothings/web/handlers/es.py", "source_repo": "NCATSTranslator/biothings.api", "split": "val", "star_events_count": 1}
{"blob_id": "7b9c906d6cd3f83f63e95ab467f7d7f9b6f76781", "bodies": ["global dev_plan_list_page, admin_page\ndev_plan_list_page = DevPlanListPage(self.driver)\nadmin_page = AdminPage(self.driver)\nadmin_page.into_subsystem('业务管理')\nadmin_page.select_menu('首页/渠道业务管理/年度发展计划')", "admin_page.select_menu('计划列表')\ndev_plan_list_page.query_by_year(_year='2020')\nassert '2020' in dev_plan_list_page.read_table_cell_value(1, 3), '年度计划查询失败'", "admin_page.select_menu('计划列表')\ndev_plan_list_page.reset_query()\nassert dev_plan_list_page.read_query_year() == '', '重置年度计划查询条件失败'", "admin_page.select_menu('计划列表')\ndev_plan_list_page.click_create_dev_plan()\nassert dev_plan_list_page.check_current_menu('新建年度发展计划'), '点击新建年度发展计划失败'", "admin_page.select_menu('计划列表')\ndev_plan_list_page.click_shi_plan_btn()\nassert dev_plan_list_page.check_current_menu('市级计划列表'), '点击查看渠道管市级计划列表详情失败'", "admin_page.select_menu('计划列表')\ndev_plan_list_page.click_view_plan_btn()\nassert dev_plan_list_page.check_current_menu('年度发展计划详情'), '点击查看年度发展计划详情失败'"], "bodies_text": "<|body_start_0|>\n global dev_plan_list_page, admin_page\n dev_plan_list_page = DevPlanListPage(self.driver)\n admin_page = AdminPage(self.driver)\n admin_page.into_subsystem('业务管理')\n admin_page.select_menu('首页/渠道业务管理/年度发展计划')\n<|end_body_0|>\n\n<|body_start_1|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.query_by_year(_year='2020')\n assert '2020' in dev_plan_list_page.read_table_cell_value(1, 3), '年度计划查询失败'\n<|end_body_1|>\n\n<|body_start_2|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.reset_query()\n assert dev_plan_list_page.read_query_year() == '', '重置年度计划查询条件失败'\n<|end_body_2|>\n\n<|body_start_3|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_create_dev_plan()\n assert dev_plan_list_page.check_current_menu('新建年度发展计划'), '点击新建年度发展计划失败'\n<|end_body_3|>\n\n<|body_start_4|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_shi_plan_btn()\n assert dev_plan_list_page.check_current_menu('市级计划列表'), '点击查看渠道管市级计划列表详情失败'\n<|end_body_4|>\n\n<|body_start_5|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_view_plan_btn()\n assert dev_plan_list_page.check_current_menu('年度发展计划详情'), '点击查看年度发展计划详情失败'\n<|end_body_5|>\n", "class_docstring": "", "class_name": "TestDevPlanList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestDevPlanList:\n\n def set_up(self):\n \"\"\"前置操作 :return:\"\"\"\n <|body_0|>\n\n def test_query_dev_plan(self, set_up):\n \"\"\"年度计划查询 :return:\"\"\"\n <|body_1|>\n\n def test_reset_dev_plan_query(self):\n \"\"\"重置年度计划查询 :return:\"\"\"\n <|body_2|>\n\n def test_click_create_dev_plan(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_3|>\n\n def test_view_shi_plan_list(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_4|>\n\n def test_view_shi_plan_detail(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global dev_plan_list_page, admin_page\n dev_plan_list_page = DevPlanListPage(self.driver)\n admin_page = AdminPage(self.driver)\n admin_page.into_subsystem('业务管理')\n admin_page.select_menu('首页/渠道业务管理/年度发展计划')\n<|end_body_0|>\n\n<|body_start_1|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.query_by_year(_year='2020')\n assert '2020' in dev_plan_list_page.read_table_cell_value(1, 3), '年度计划查询失败'\n<|end_body_1|>\n\n<|body_start_2|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.reset_query()\n assert dev_plan_list_page.read_query_year() == '', '重置年度计划查询条件失败'\n<|end_body_2|>\n\n<|body_start_3|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_create_dev_plan()\n assert dev_plan_list_page.check_current_menu('新建年度发展计划'), '点击新建年度发展计划失败'\n<|end_body_3|>\n\n<|body_start_4|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_shi_plan_btn()\n assert dev_plan_list_page.check_current_menu('市级计划列表'), '点击查看渠道管市级计划列表详情失败'\n<|end_body_4|>\n\n<|body_start_5|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_view_plan_btn()\n assert dev_plan_list_page.check_current_menu('年度发展计划详情'), '点击查看年度发展计划详情失败'\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000160", "length_bytes": 2658, "license_type": "no_license", "methods": [{"docstring": "前置操作 :return:", "name": "set_up", "signature": "def set_up(self)"}, {"docstring": "年度计划查询 :return:", "name": "test_query_dev_plan", "signature": "def test_query_dev_plan(self, set_up)"}, {"docstring": "重置年度计划查询 :return:", "name": "test_reset_dev_plan_query", "signature": "def test_reset_dev_plan_query(self)"}, {"docstring": "查看市级计划 :return:", "name": "test_click_create_dev_plan", "signature": "def test_click_create_dev_plan(self)"}, {"docstring": "查看市级计划 :return:", "name": "test_view_shi_plan_list", "signature": "def test_view_shi_plan_list(self)"}, {"docstring": "查看市级计划 :return:", "name": "test_view_shi_plan_detail", "signature": "def test_view_shi_plan_detail(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_036458", "prompt": "Implement the Python class `TestDevPlanList` described below.\n\nClass description:\nImplement the TestDevPlanList class.\n\nMethod signatures and docstrings:\n- def set_up(self): 前置操作 :return:\n- def test_query_dev_plan(self, set_up): 年度计划查询 :return:\n- def test_reset_dev_plan_query(self): 重置年度计划查询 :return:\n- def test_click_create_dev_plan(self): 查看市级计划 :return:\n- def test_view_shi_plan_list(self): 查看市级计划 :return:\n- def test_view_shi_plan_detail(self): 查看市级计划 :return:", "prompted_full_text": "Implement the Python class `TestDevPlanList` described below.\n\nClass description:\nImplement the TestDevPlanList class.\n\nMethod signatures and docstrings:\n- def set_up(self): 前置操作 :return:\n- def test_query_dev_plan(self, set_up): 年度计划查询 :return:\n- def test_reset_dev_plan_query(self): 重置年度计划查询 :return:\n- def test_click_create_dev_plan(self): 查看市级计划 :return:\n- def test_view_shi_plan_list(self): 查看市级计划 :return:\n- def test_view_shi_plan_detail(self): 查看市级计划 :return:\n\n<|skeleton|>\nclass TestDevPlanList:\n\n def set_up(self):\n \"\"\"前置操作 :return:\"\"\"\n <|body_0|>\n\n def test_query_dev_plan(self, set_up):\n \"\"\"年度计划查询 :return:\"\"\"\n <|body_1|>\n\n def test_reset_dev_plan_query(self):\n \"\"\"重置年度计划查询 :return:\"\"\"\n <|body_2|>\n\n def test_click_create_dev_plan(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_3|>\n\n def test_view_shi_plan_list(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_4|>\n\n def test_view_shi_plan_detail(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global dev_plan_list_page, admin_page\n dev_plan_list_page = DevPlanListPage(self.driver)\n admin_page = AdminPage(self.driver)\n admin_page.into_subsystem('业务管理')\n admin_page.select_menu('首页/渠道业务管理/年度发展计划')\n<|end_body_0|>\n\n<|body_start_1|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.query_by_year(_year='2020')\n assert '2020' in dev_plan_list_page.read_table_cell_value(1, 3), '年度计划查询失败'\n<|end_body_1|>\n\n<|body_start_2|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.reset_query()\n assert dev_plan_list_page.read_query_year() == '', '重置年度计划查询条件失败'\n<|end_body_2|>\n\n<|body_start_3|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_create_dev_plan()\n assert dev_plan_list_page.check_current_menu('新建年度发展计划'), '点击新建年度发展计划失败'\n<|end_body_3|>\n\n<|body_start_4|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_shi_plan_btn()\n assert dev_plan_list_page.check_current_menu('市级计划列表'), '点击查看渠道管市级计划列表详情失败'\n<|end_body_4|>\n\n<|body_start_5|>\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_view_plan_btn()\n assert dev_plan_list_page.check_current_menu('年度发展计划详情'), '点击查看年度发展计划详情失败'\n<|end_body_5|>\n", "revision_id": "86d1b085af2d3808ac8472d541f4bf26d26591e0", "skeleton": "<|skeleton|>\nclass TestDevPlanList:\n\n def set_up(self):\n \"\"\"前置操作 :return:\"\"\"\n <|body_0|>\n\n def test_query_dev_plan(self, set_up):\n \"\"\"年度计划查询 :return:\"\"\"\n <|body_1|>\n\n def test_reset_dev_plan_query(self):\n \"\"\"重置年度计划查询 :return:\"\"\"\n <|body_2|>\n\n def test_click_create_dev_plan(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_3|>\n\n def test_view_shi_plan_list(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_4|>\n\n def test_view_shi_plan_detail(self):\n \"\"\"查看市级计划 :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestDevPlanList:\n def set_up(self):\n \"\"\"前置操作 :return:\"\"\"\n global dev_plan_list_page, admin_page\n dev_plan_list_page = DevPlanListPage(self.driver)\n admin_page = AdminPage(self.driver)\n admin_page.into_subsystem('业务管理')\n admin_page.select_menu('首页/渠道业务管理/年度发展计划')\n\n def test_query_dev_plan(self, set_up):\n \"\"\"年度计划查询 :return:\"\"\"\n admin_page.select_menu('计划列表')\n dev_plan_list_page.query_by_year(_year='2020')\n assert '2020' in dev_plan_list_page.read_table_cell_value(1, 3), '年度计划查询失败'\n\n def test_reset_dev_plan_query(self):\n \"\"\"重置年度计划查询 :return:\"\"\"\n admin_page.select_menu('计划列表')\n dev_plan_list_page.reset_query()\n assert dev_plan_list_page.read_query_year() == '', '重置年度计划查询条件失败'\n\n def test_click_create_dev_plan(self):\n \"\"\"查看市级计划 :return:\"\"\"\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_create_dev_plan()\n assert dev_plan_list_page.check_current_menu('新建年度发展计划'), '点击新建年度发展计划失败'\n\n def test_view_shi_plan_list(self):\n \"\"\"查看市级计划 :return:\"\"\"\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_shi_plan_btn()\n assert dev_plan_list_page.check_current_menu('市级计划列表'), '点击查看渠道管市级计划列表详情失败'\n\n def test_view_shi_plan_detail(self):\n \"\"\"查看市级计划 :return:\"\"\"\n admin_page.select_menu('计划列表')\n dev_plan_list_page.click_view_plan_btn()\n assert dev_plan_list_page.check_current_menu('年度发展计划详情'), '点击查看年度发展计划详情失败'\n", "source": "the_stack_v2_python_sparse", "source_path": "src/cases/business_manage/channel_business_manage/developmentPlan/test_dev_plan_list_page_170.py", "source_repo": "102244653/SeleniumByPython", "split": "val", "star_events_count": 2}
{"blob_id": "b63470498b08e5fc34c44a51c6915ce8591e8cb1", "bodies": ["super(Selector_Form, self).__init__()\nself.logger = logging.getLogger(__name__ + '.Selector_Form')\nself.logger.debug('logger is %s', self.logger.name)\nself.ID = key\nself.parent = parent\nself.state = state\nself.button_text = button_text\nself.signal = SignalMaker()\nself.logger.debug('__init__: Nx1 selector %s form instantiated', self)", "self.logger.debug('setupUI: setting up Nx1 selector form')\nrows = len(labels)\nself.rows = rows / cols + rows % cols\nself.cols = cols\nself.logger.debug('setupUI: %d rows x %d cols', self.rows, self.cols)\nself.gridLayout = QtGui.QGridLayout()\nself.radioButton = {}\nrb_action = {}\nfor row in range(self.rows):\n for col in range(self.cols):\n index = col * self.rows + row\n self.radioButton[index] = QtGui.QRadioButton()\n self.label_radiobutton(labels, index, label_default)\n self.gridLayout.addWidget(self.radioButton[index], row, col, 1, 1)\n QtCore.QObject.connect(self.radioButton[index], QtCore.SIGNAL('clicked()'), slotgen((self.ID, index), self.send_signal))\nself.setLayout(self.gridLayout)\nself.logger.debug('setupUI: Nx1 selector %s setup completed', self.ID)", "try:\n if labels[index]:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', labels[index], None, QtGui.QApplication.UnicodeUTF8))\n else:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', label_default + ' ' + str(index), None, QtGui.QApplication.UnicodeUTF8))\nexcept IndexError:\n self.radioButton[index].setText('None')\n self.radioButton[index].setDisabled(True)", "self.last_switch_ID, self.state = args\nself.logger.debug(' send_signal: selected input %d for switch %s', self.state, self.last_switch_ID)\nself.signal.stateChanged.emit()", "self.logger.debug('update_selector: %s invoked with switch %s', self, self.switch)\nif new_state > -1:\n self.state = new_state\nelse:\n try:\n self.parent._set_switch_button_text(self.switch, self.state)\n except AttributeError:\n self.state = new_state\n self.parent._set_switch_button_text(self.switch, -1, 'Input ', text='Unknown')\nself.logger.debug('update_selector: new state for switch %s is %d', key, self.state)\nself.logger.debug('update_selector: switch %s in row %s was changed', self.ID, rowname)\nself.parent.parent.switch_changed(self, rowname, self.ID)\nself.close()"], "bodies_text": "<|body_start_0|>\n super(Selector_Form, self).__init__()\n self.logger = logging.getLogger(__name__ + '.Selector_Form')\n self.logger.debug('logger is %s', self.logger.name)\n self.ID = key\n self.parent = parent\n self.state = state\n self.button_text = button_text\n self.signal = SignalMaker()\n self.logger.debug('__init__: Nx1 selector %s form instantiated', self)\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.debug('setupUI: setting up Nx1 selector form')\n rows = len(labels)\n self.rows = rows / cols + rows % cols\n self.cols = cols\n self.logger.debug('setupUI: %d rows x %d cols', self.rows, self.cols)\n self.gridLayout = QtGui.QGridLayout()\n self.radioButton = {}\n rb_action = {}\n for row in range(self.rows):\n for col in range(self.cols):\n index = col * self.rows + row\n self.radioButton[index] = QtGui.QRadioButton()\n self.label_radiobutton(labels, index, label_default)\n self.gridLayout.addWidget(self.radioButton[index], row, col, 1, 1)\n QtCore.QObject.connect(self.radioButton[index], QtCore.SIGNAL('clicked()'), slotgen((self.ID, index), self.send_signal))\n self.setLayout(self.gridLayout)\n self.logger.debug('setupUI: Nx1 selector %s setup completed', self.ID)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if labels[index]:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', labels[index], None, QtGui.QApplication.UnicodeUTF8))\n else:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', label_default + ' ' + str(index), None, QtGui.QApplication.UnicodeUTF8))\n except IndexError:\n self.radioButton[index].setText('None')\n self.radioButton[index].setDisabled(True)\n<|end_body_2|>\n\n<|body_start_3|>\n self.last_switch_ID, self.state = args\n self.logger.debug(' send_signal: selected input %d for switch %s', self.state, self.last_switch_ID)\n self.signal.stateChanged.emit()\n<|end_body_3|>\n\n<|body_start_4|>\n self.logger.debug('update_selector: %s invoked with switch %s', self, self.switch)\n if new_state > -1:\n self.state = new_state\n else:\n try:\n self.parent._set_switch_button_text(self.switch, self.state)\n except AttributeError:\n self.state = new_state\n self.parent._set_switch_button_text(self.switch, -1, 'Input ', text='Unknown')\n self.logger.debug('update_selector: new state for switch %s is %d', key, self.state)\n self.logger.debug('update_selector: switch %s in row %s was changed', self.ID, rowname)\n self.parent.parent.switch_changed(self, rowname, self.ID)\n self.close()\n<|end_body_4|>\n", "class_docstring": "Nx1 or 1xN selector widget The form is a dialog window with radio buttons which allows a selection of one out of N. When the selection is made, it changes the text on the button on the parent which invoked pop-up. It also passes a signal to the top-level window so it can implement the selection.", "class_name": "Selector_Form", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Selector_Form:\n \"\"\"Nx1 or 1xN selector widget The form is a dialog window with radio buttons which allows a selection of one out of N. When the selection is made, it changes the text on the button on the parent which invoked pop-up. It also passes a signal to the top-level window so it can implement the selection.\"\"\"\n\n def __init__(self, key, parent=None, state=-1, button_text=None):\n \"\"\"Create instance of selector widget @param key : selector ID @type key : int\"\"\"\n <|body_0|>\n\n def setupUi(self, labels, label_default='Port', cols=1):\n \"\"\"Generate the radiobutton form. When the state of a button is changed, it sends a signal to that effect. The parent form, the MultiSelectorForm() instance, is connected to that signal and takes appropriate action. @param labels : labels for the selector radiobuttons @type labels : list of str or empty list @param label_default : Prefix for selection number if no label is given @type label_default : str @param cols : number of radiobutton column @type cols : int\"\"\"\n <|body_1|>\n\n def label_radiobutton(self, labels, index, label_default):\n \"\"\"Put text on the button, either specified in labels, or a default @param labels : text next to each radio button @type labels : list of str @param index : ID for the MultiSelectorForm column @type index : int @param label_default : text to use with button number if not in labels @type label_default : str\"\"\"\n <|body_2|>\n\n def send_signal(self, *args):\n \"\"\"Register the selected value\"\"\"\n <|body_3|>\n\n def update_selector(self, selector, key, rowname, new_state=-1):\n \"\"\"Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is -1 and the text \"Unknown\". Additionally it invokes a method to actually set the switch. @param new_state : optional new state if known @type new_state : int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Selector_Form, self).__init__()\n self.logger = logging.getLogger(__name__ + '.Selector_Form')\n self.logger.debug('logger is %s', self.logger.name)\n self.ID = key\n self.parent = parent\n self.state = state\n self.button_text = button_text\n self.signal = SignalMaker()\n self.logger.debug('__init__: Nx1 selector %s form instantiated', self)\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.debug('setupUI: setting up Nx1 selector form')\n rows = len(labels)\n self.rows = rows / cols + rows % cols\n self.cols = cols\n self.logger.debug('setupUI: %d rows x %d cols', self.rows, self.cols)\n self.gridLayout = QtGui.QGridLayout()\n self.radioButton = {}\n rb_action = {}\n for row in range(self.rows):\n for col in range(self.cols):\n index = col * self.rows + row\n self.radioButton[index] = QtGui.QRadioButton()\n self.label_radiobutton(labels, index, label_default)\n self.gridLayout.addWidget(self.radioButton[index], row, col, 1, 1)\n QtCore.QObject.connect(self.radioButton[index], QtCore.SIGNAL('clicked()'), slotgen((self.ID, index), self.send_signal))\n self.setLayout(self.gridLayout)\n self.logger.debug('setupUI: Nx1 selector %s setup completed', self.ID)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if labels[index]:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', labels[index], None, QtGui.QApplication.UnicodeUTF8))\n else:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', label_default + ' ' + str(index), None, QtGui.QApplication.UnicodeUTF8))\n except IndexError:\n self.radioButton[index].setText('None')\n self.radioButton[index].setDisabled(True)\n<|end_body_2|>\n\n<|body_start_3|>\n self.last_switch_ID, self.state = args\n self.logger.debug(' send_signal: selected input %d for switch %s', self.state, self.last_switch_ID)\n self.signal.stateChanged.emit()\n<|end_body_3|>\n\n<|body_start_4|>\n self.logger.debug('update_selector: %s invoked with switch %s', self, self.switch)\n if new_state > -1:\n self.state = new_state\n else:\n try:\n self.parent._set_switch_button_text(self.switch, self.state)\n except AttributeError:\n self.state = new_state\n self.parent._set_switch_button_text(self.switch, -1, 'Input ', text='Unknown')\n self.logger.debug('update_selector: new state for switch %s is %d', key, self.state)\n self.logger.debug('update_selector: switch %s in row %s was changed', self.ID, rowname)\n self.parent.parent.switch_changed(self, rowname, self.ID)\n self.close()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000161", "length_bytes": 5438, "license_type": "no_license", "methods": [{"docstring": "Create instance of selector widget @param key : selector ID @type key : int", "name": "__init__", "signature": "def __init__(self, key, parent=None, state=-1, button_text=None)"}, {"docstring": "Generate the radiobutton form. When the state of a button is changed, it sends a signal to that effect. The parent form, the MultiSelectorForm() instance, is connected to that signal and takes appropriate action. @param labels : labels for the selector radiobuttons @type labels : list of str or empty list @param label_default : Prefix for selection number if no label is given @type label_default : str @param cols : number of radiobutton column @type cols : int", "name": "setupUi", "signature": "def setupUi(self, labels, label_default='Port', cols=1)"}, {"docstring": "Put text on the button, either specified in labels, or a default @param labels : text next to each radio button @type labels : list of str @param index : ID for the MultiSelectorForm column @type index : int @param label_default : text to use with button number if not in labels @type label_default : str", "name": "label_radiobutton", "signature": "def label_radiobutton(self, labels, index, label_default)"}, {"docstring": "Register the selected value", "name": "send_signal", "signature": "def send_signal(self, *args)"}, {"docstring": "Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is -1 and the text \"Unknown\". Additionally it invokes a method to actually set the switch. @param new_state : optional new state if known @type new_state : int", "name": "update_selector", "signature": "def update_selector(self, selector, key, rowname, new_state=-1)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_test_000157", "prompt": "Implement the Python class `Selector_Form` described below.\n\nClass description:\nNx1 or 1xN selector widget The form is a dialog window with radio buttons which allows a selection of one out of N. When the selection is made, it changes the text on the button on the parent which invoked pop-up. It also passes a signal to the top-level window so it can implement the selection.\n\nMethod signatures and docstrings:\n- def __init__(self, key, parent=None, state=-1, button_text=None): Create instance of selector widget @param key : selector ID @type key : int\n- def setupUi(self, labels, label_default='Port', cols=1): Generate the radiobutton form. When the state of a button is changed, it sends a signal to that effect. The parent form, the MultiSelectorForm() instance, is connected to that signal and takes appropriate action. @param labels : labels for the selector radiobuttons @type labels : list of str or empty list @param label_default : Prefix for selection number if no label is given @type label_default : str @param cols : number of radiobutton column @type cols : int\n- def label_radiobutton(self, labels, index, label_default): Put text on the button, either specified in labels, or a default @param labels : text next to each radio button @type labels : list of str @param index : ID for the MultiSelectorForm column @type index : int @param label_default : text to use with button number if not in labels @type label_default : str\n- def send_signal(self, *args): Register the selected value\n- def update_selector(self, selector, key, rowname, new_state=-1): Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is -1 and the text \"Unknown\". Additionally it invokes a method to actually set the switch. @param new_state : optional new state if known @type new_state : int", "prompted_full_text": "Implement the Python class `Selector_Form` described below.\n\nClass description:\nNx1 or 1xN selector widget The form is a dialog window with radio buttons which allows a selection of one out of N. When the selection is made, it changes the text on the button on the parent which invoked pop-up. It also passes a signal to the top-level window so it can implement the selection.\n\nMethod signatures and docstrings:\n- def __init__(self, key, parent=None, state=-1, button_text=None): Create instance of selector widget @param key : selector ID @type key : int\n- def setupUi(self, labels, label_default='Port', cols=1): Generate the radiobutton form. When the state of a button is changed, it sends a signal to that effect. The parent form, the MultiSelectorForm() instance, is connected to that signal and takes appropriate action. @param labels : labels for the selector radiobuttons @type labels : list of str or empty list @param label_default : Prefix for selection number if no label is given @type label_default : str @param cols : number of radiobutton column @type cols : int\n- def label_radiobutton(self, labels, index, label_default): Put text on the button, either specified in labels, or a default @param labels : text next to each radio button @type labels : list of str @param index : ID for the MultiSelectorForm column @type index : int @param label_default : text to use with button number if not in labels @type label_default : str\n- def send_signal(self, *args): Register the selected value\n- def update_selector(self, selector, key, rowname, new_state=-1): Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is -1 and the text \"Unknown\". Additionally it invokes a method to actually set the switch. @param new_state : optional new state if known @type new_state : int\n\n<|skeleton|>\nclass Selector_Form:\n \"\"\"Nx1 or 1xN selector widget The form is a dialog window with radio buttons which allows a selection of one out of N. When the selection is made, it changes the text on the button on the parent which invoked pop-up. It also passes a signal to the top-level window so it can implement the selection.\"\"\"\n\n def __init__(self, key, parent=None, state=-1, button_text=None):\n \"\"\"Create instance of selector widget @param key : selector ID @type key : int\"\"\"\n <|body_0|>\n\n def setupUi(self, labels, label_default='Port', cols=1):\n \"\"\"Generate the radiobutton form. When the state of a button is changed, it sends a signal to that effect. The parent form, the MultiSelectorForm() instance, is connected to that signal and takes appropriate action. @param labels : labels for the selector radiobuttons @type labels : list of str or empty list @param label_default : Prefix for selection number if no label is given @type label_default : str @param cols : number of radiobutton column @type cols : int\"\"\"\n <|body_1|>\n\n def label_radiobutton(self, labels, index, label_default):\n \"\"\"Put text on the button, either specified in labels, or a default @param labels : text next to each radio button @type labels : list of str @param index : ID for the MultiSelectorForm column @type index : int @param label_default : text to use with button number if not in labels @type label_default : str\"\"\"\n <|body_2|>\n\n def send_signal(self, *args):\n \"\"\"Register the selected value\"\"\"\n <|body_3|>\n\n def update_selector(self, selector, key, rowname, new_state=-1):\n \"\"\"Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is -1 and the text \"Unknown\". Additionally it invokes a method to actually set the switch. @param new_state : optional new state if known @type new_state : int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Selector_Form, self).__init__()\n self.logger = logging.getLogger(__name__ + '.Selector_Form')\n self.logger.debug('logger is %s', self.logger.name)\n self.ID = key\n self.parent = parent\n self.state = state\n self.button_text = button_text\n self.signal = SignalMaker()\n self.logger.debug('__init__: Nx1 selector %s form instantiated', self)\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.debug('setupUI: setting up Nx1 selector form')\n rows = len(labels)\n self.rows = rows / cols + rows % cols\n self.cols = cols\n self.logger.debug('setupUI: %d rows x %d cols', self.rows, self.cols)\n self.gridLayout = QtGui.QGridLayout()\n self.radioButton = {}\n rb_action = {}\n for row in range(self.rows):\n for col in range(self.cols):\n index = col * self.rows + row\n self.radioButton[index] = QtGui.QRadioButton()\n self.label_radiobutton(labels, index, label_default)\n self.gridLayout.addWidget(self.radioButton[index], row, col, 1, 1)\n QtCore.QObject.connect(self.radioButton[index], QtCore.SIGNAL('clicked()'), slotgen((self.ID, index), self.send_signal))\n self.setLayout(self.gridLayout)\n self.logger.debug('setupUI: Nx1 selector %s setup completed', self.ID)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if labels[index]:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', labels[index], None, QtGui.QApplication.UnicodeUTF8))\n else:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', label_default + ' ' + str(index), None, QtGui.QApplication.UnicodeUTF8))\n except IndexError:\n self.radioButton[index].setText('None')\n self.radioButton[index].setDisabled(True)\n<|end_body_2|>\n\n<|body_start_3|>\n self.last_switch_ID, self.state = args\n self.logger.debug(' send_signal: selected input %d for switch %s', self.state, self.last_switch_ID)\n self.signal.stateChanged.emit()\n<|end_body_3|>\n\n<|body_start_4|>\n self.logger.debug('update_selector: %s invoked with switch %s', self, self.switch)\n if new_state > -1:\n self.state = new_state\n else:\n try:\n self.parent._set_switch_button_text(self.switch, self.state)\n except AttributeError:\n self.state = new_state\n self.parent._set_switch_button_text(self.switch, -1, 'Input ', text='Unknown')\n self.logger.debug('update_selector: new state for switch %s is %d', key, self.state)\n self.logger.debug('update_selector: switch %s in row %s was changed', self.ID, rowname)\n self.parent.parent.switch_changed(self, rowname, self.ID)\n self.close()\n<|end_body_4|>\n", "revision_id": "2a5ab02cefa9883cf1912849429c788319c32105", "skeleton": "<|skeleton|>\nclass Selector_Form:\n \"\"\"Nx1 or 1xN selector widget The form is a dialog window with radio buttons which allows a selection of one out of N. When the selection is made, it changes the text on the button on the parent which invoked pop-up. It also passes a signal to the top-level window so it can implement the selection.\"\"\"\n\n def __init__(self, key, parent=None, state=-1, button_text=None):\n \"\"\"Create instance of selector widget @param key : selector ID @type key : int\"\"\"\n <|body_0|>\n\n def setupUi(self, labels, label_default='Port', cols=1):\n \"\"\"Generate the radiobutton form. When the state of a button is changed, it sends a signal to that effect. The parent form, the MultiSelectorForm() instance, is connected to that signal and takes appropriate action. @param labels : labels for the selector radiobuttons @type labels : list of str or empty list @param label_default : Prefix for selection number if no label is given @type label_default : str @param cols : number of radiobutton column @type cols : int\"\"\"\n <|body_1|>\n\n def label_radiobutton(self, labels, index, label_default):\n \"\"\"Put text on the button, either specified in labels, or a default @param labels : text next to each radio button @type labels : list of str @param index : ID for the MultiSelectorForm column @type index : int @param label_default : text to use with button number if not in labels @type label_default : str\"\"\"\n <|body_2|>\n\n def send_signal(self, *args):\n \"\"\"Register the selected value\"\"\"\n <|body_3|>\n\n def update_selector(self, selector, key, rowname, new_state=-1):\n \"\"\"Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is -1 and the text \"Unknown\". Additionally it invokes a method to actually set the switch. @param new_state : optional new state if known @type new_state : int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Selector_Form:\n \"\"\"Nx1 or 1xN selector widget The form is a dialog window with radio buttons which allows a selection of one out of N. When the selection is made, it changes the text on the button on the parent which invoked pop-up. It also passes a signal to the top-level window so it can implement the selection.\"\"\"\n\n def __init__(self, key, parent=None, state=-1, button_text=None):\n \"\"\"Create instance of selector widget @param key : selector ID @type key : int\"\"\"\n super(Selector_Form, self).__init__()\n self.logger = logging.getLogger(__name__ + '.Selector_Form')\n self.logger.debug('logger is %s', self.logger.name)\n self.ID = key\n self.parent = parent\n self.state = state\n self.button_text = button_text\n self.signal = SignalMaker()\n self.logger.debug('__init__: Nx1 selector %s form instantiated', self)\n\n def setupUi(self, labels, label_default='Port', cols=1):\n \"\"\"Generate the radiobutton form. When the state of a button is changed, it sends a signal to that effect. The parent form, the MultiSelectorForm() instance, is connected to that signal and takes appropriate action. @param labels : labels for the selector radiobuttons @type labels : list of str or empty list @param label_default : Prefix for selection number if no label is given @type label_default : str @param cols : number of radiobutton column @type cols : int\"\"\"\n self.logger.debug('setupUI: setting up Nx1 selector form')\n rows = len(labels)\n self.rows = rows / cols + rows % cols\n self.cols = cols\n self.logger.debug('setupUI: %d rows x %d cols', self.rows, self.cols)\n self.gridLayout = QtGui.QGridLayout()\n self.radioButton = {}\n rb_action = {}\n for row in range(self.rows):\n for col in range(self.cols):\n index = col * self.rows + row\n self.radioButton[index] = QtGui.QRadioButton()\n self.label_radiobutton(labels, index, label_default)\n self.gridLayout.addWidget(self.radioButton[index], row, col, 1, 1)\n QtCore.QObject.connect(self.radioButton[index], QtCore.SIGNAL('clicked()'), slotgen((self.ID, index), self.send_signal))\n self.setLayout(self.gridLayout)\n self.logger.debug('setupUI: Nx1 selector %s setup completed', self.ID)\n\n def label_radiobutton(self, labels, index, label_default):\n \"\"\"Put text on the button, either specified in labels, or a default @param labels : text next to each radio button @type labels : list of str @param index : ID for the MultiSelectorForm column @type index : int @param label_default : text to use with button number if not in labels @type label_default : str\"\"\"\n try:\n if labels[index]:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', labels[index], None, QtGui.QApplication.UnicodeUTF8))\n else:\n self.radioButton[index].setText(QtGui.QApplication.translate('gridLayout', label_default + ' ' + str(index), None, QtGui.QApplication.UnicodeUTF8))\n except IndexError:\n self.radioButton[index].setText('None')\n self.radioButton[index].setDisabled(True)\n\n def send_signal(self, *args):\n \"\"\"Register the selected value\"\"\"\n self.last_switch_ID, self.state = args\n self.logger.debug(' send_signal: selected input %d for switch %s', self.state, self.last_switch_ID)\n self.signal.stateChanged.emit()\n\n def update_selector(self, selector, key, rowname, new_state=-1):\n \"\"\"Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is -1 and the text \"Unknown\". Additionally it invokes a method to actually set the switch. @param new_state : optional new state if known @type new_state : int\"\"\"\n self.logger.debug('update_selector: %s invoked with switch %s', self, self.switch)\n if new_state > -1:\n self.state = new_state\n else:\n try:\n self.parent._set_switch_button_text(self.switch, self.state)\n except AttributeError:\n self.state = new_state\n self.parent._set_switch_button_text(self.switch, -1, 'Input ', text='Unknown')\n self.logger.debug('update_selector: new state for switch %s is %d', key, self.state)\n self.logger.debug('update_selector: switch %s in row %s was changed', self.ID, rowname)\n self.parent.parent.switch_changed(self, rowname, self.ID)\n self.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "GUI/Qt_widgets/selector_popup.py", "source_repo": "SDRAST/MCClient", "split": "val", "star_events_count": 0}
{"blob_id": "eb7e9b8df7ea5d556f816ab36678c13cd04e1049", "bodies": ["dv_max = max(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\ndv_min = min(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\nx_l, x, x_r = [image.species.coordinates.flatten() for image in (im_l, self, im_r)]\ntau_plus = x_r - x\ntau_minus = x - x_l\nif im_l.energy < self.energy < im_r.energy:\n tau = tau_plus\nelif im_r.energy < self.energy < im_l.energy:\n tau = tau_minus\nelif im_l.energy < im_r.energy:\n tau = tau_plus * dv_max + tau_minus * dv_min\nelif im_r.energy < im_l.energy:\n tau = tau_plus * dv_min + tau_minus * dv_max\nelse:\n raise RuntimeError('Something went very wrong in the NEB!')\nreturn (tau / np.linalg.norm(tau), x_l, x, x_r)", "hat_tau, x_l, x, x_r = self._tau_xl_x_xr(im_l, im_r)\nf_parallel = (np.linalg.norm(x_r - x) * im_r.k - np.linalg.norm(x - x_l) * im_l.k) * hat_tau\ngrad_perp = self.grad - np.dot(self.grad, hat_tau) * hat_tau\nreturn f_parallel - grad_perp", "self.name = name\nself.iteration = 0\nself.k = k\nself.species = None\nself.energy = None\nself.grad = None"], "bodies_text": "<|body_start_0|>\n dv_max = max(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\n dv_min = min(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\n x_l, x, x_r = [image.species.coordinates.flatten() for image in (im_l, self, im_r)]\n tau_plus = x_r - x\n tau_minus = x - x_l\n if im_l.energy < self.energy < im_r.energy:\n tau = tau_plus\n elif im_r.energy < self.energy < im_l.energy:\n tau = tau_minus\n elif im_l.energy < im_r.energy:\n tau = tau_plus * dv_max + tau_minus * dv_min\n elif im_r.energy < im_l.energy:\n tau = tau_plus * dv_min + tau_minus * dv_max\n else:\n raise RuntimeError('Something went very wrong in the NEB!')\n return (tau / np.linalg.norm(tau), x_l, x, x_r)\n<|end_body_0|>\n\n<|body_start_1|>\n hat_tau, x_l, x, x_r = self._tau_xl_x_xr(im_l, im_r)\n f_parallel = (np.linalg.norm(x_r - x) * im_r.k - np.linalg.norm(x - x_l) * im_l.k) * hat_tau\n grad_perp = self.grad - np.dot(self.grad, hat_tau) * hat_tau\n return f_parallel - grad_perp\n<|end_body_1|>\n\n<|body_start_2|>\n self.name = name\n self.iteration = 0\n self.k = k\n self.species = None\n self.energy = None\n self.grad = None\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Image", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Image:\n\n def _tau_xl_x_xr(self, im_l, im_r):\n \"\"\"Calculate the normalised τ vector, along with the coordinates of the left, this and right images :param im_l: (autode.neb.Image) :param im_r: (autode.neb.Image) :return: (np.ndarray)\"\"\"\n <|body_0|>\n\n def get_force(self, im_l, im_r):\n \"\"\"Compute F_i. Notation from: Henkelman and H. J ́onsson, J. Chem. Phys. 113, 9978 (2000) also a copy in autode/common Arguments: im_l (autode.neb.Image): Left image (i-1) im_r (autode.neb.Image): Right image (i+1)\"\"\"\n <|body_1|>\n\n def __init__(self, name, k):\n \"\"\"Image in a NEB Arguments: name (str):\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dv_max = max(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\n dv_min = min(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\n x_l, x, x_r = [image.species.coordinates.flatten() for image in (im_l, self, im_r)]\n tau_plus = x_r - x\n tau_minus = x - x_l\n if im_l.energy < self.energy < im_r.energy:\n tau = tau_plus\n elif im_r.energy < self.energy < im_l.energy:\n tau = tau_minus\n elif im_l.energy < im_r.energy:\n tau = tau_plus * dv_max + tau_minus * dv_min\n elif im_r.energy < im_l.energy:\n tau = tau_plus * dv_min + tau_minus * dv_max\n else:\n raise RuntimeError('Something went very wrong in the NEB!')\n return (tau / np.linalg.norm(tau), x_l, x, x_r)\n<|end_body_0|>\n\n<|body_start_1|>\n hat_tau, x_l, x, x_r = self._tau_xl_x_xr(im_l, im_r)\n f_parallel = (np.linalg.norm(x_r - x) * im_r.k - np.linalg.norm(x - x_l) * im_l.k) * hat_tau\n grad_perp = self.grad - np.dot(self.grad, hat_tau) * hat_tau\n return f_parallel - grad_perp\n<|end_body_1|>\n\n<|body_start_2|>\n self.name = name\n self.iteration = 0\n self.k = k\n self.species = None\n self.energy = None\n self.grad = None\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000162", "length_bytes": 14072, "license_type": "permissive", "methods": [{"docstring": "Calculate the normalised τ vector, along with the coordinates of the left, this and right images :param im_l: (autode.neb.Image) :param im_r: (autode.neb.Image) :return: (np.ndarray)", "name": "_tau_xl_x_xr", "signature": "def _tau_xl_x_xr(self, im_l, im_r)"}, {"docstring": "Compute F_i. Notation from: Henkelman and H. J ́onsson, J. Chem. Phys. 113, 9978 (2000) also a copy in autode/common Arguments: im_l (autode.neb.Image): Left image (i-1) im_r (autode.neb.Image): Right image (i+1)", "name": "get_force", "signature": "def get_force(self, im_l, im_r)"}, {"docstring": "Image in a NEB Arguments: name (str):", "name": "__init__", "signature": "def __init__(self, name, k)"}], "n_methods": 3, "prompt": "Implement the Python class `Image` described below.\n\nClass description:\nImplement the Image class.\n\nMethod signatures and docstrings:\n- def _tau_xl_x_xr(self, im_l, im_r): Calculate the normalised τ vector, along with the coordinates of the left, this and right images :param im_l: (autode.neb.Image) :param im_r: (autode.neb.Image) :return: (np.ndarray)\n- def get_force(self, im_l, im_r): Compute F_i. Notation from: Henkelman and H. J ́onsson, J. Chem. Phys. 113, 9978 (2000) also a copy in autode/common Arguments: im_l (autode.neb.Image): Left image (i-1) im_r (autode.neb.Image): Right image (i+1)\n- def __init__(self, name, k): Image in a NEB Arguments: name (str):", "prompted_full_text": "Implement the Python class `Image` described below.\n\nClass description:\nImplement the Image class.\n\nMethod signatures and docstrings:\n- def _tau_xl_x_xr(self, im_l, im_r): Calculate the normalised τ vector, along with the coordinates of the left, this and right images :param im_l: (autode.neb.Image) :param im_r: (autode.neb.Image) :return: (np.ndarray)\n- def get_force(self, im_l, im_r): Compute F_i. Notation from: Henkelman and H. J ́onsson, J. Chem. Phys. 113, 9978 (2000) also a copy in autode/common Arguments: im_l (autode.neb.Image): Left image (i-1) im_r (autode.neb.Image): Right image (i+1)\n- def __init__(self, name, k): Image in a NEB Arguments: name (str):\n\n<|skeleton|>\nclass Image:\n\n def _tau_xl_x_xr(self, im_l, im_r):\n \"\"\"Calculate the normalised τ vector, along with the coordinates of the left, this and right images :param im_l: (autode.neb.Image) :param im_r: (autode.neb.Image) :return: (np.ndarray)\"\"\"\n <|body_0|>\n\n def get_force(self, im_l, im_r):\n \"\"\"Compute F_i. Notation from: Henkelman and H. J ́onsson, J. Chem. Phys. 113, 9978 (2000) also a copy in autode/common Arguments: im_l (autode.neb.Image): Left image (i-1) im_r (autode.neb.Image): Right image (i+1)\"\"\"\n <|body_1|>\n\n def __init__(self, name, k):\n \"\"\"Image in a NEB Arguments: name (str):\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dv_max = max(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\n dv_min = min(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\n x_l, x, x_r = [image.species.coordinates.flatten() for image in (im_l, self, im_r)]\n tau_plus = x_r - x\n tau_minus = x - x_l\n if im_l.energy < self.energy < im_r.energy:\n tau = tau_plus\n elif im_r.energy < self.energy < im_l.energy:\n tau = tau_minus\n elif im_l.energy < im_r.energy:\n tau = tau_plus * dv_max + tau_minus * dv_min\n elif im_r.energy < im_l.energy:\n tau = tau_plus * dv_min + tau_minus * dv_max\n else:\n raise RuntimeError('Something went very wrong in the NEB!')\n return (tau / np.linalg.norm(tau), x_l, x, x_r)\n<|end_body_0|>\n\n<|body_start_1|>\n hat_tau, x_l, x, x_r = self._tau_xl_x_xr(im_l, im_r)\n f_parallel = (np.linalg.norm(x_r - x) * im_r.k - np.linalg.norm(x - x_l) * im_l.k) * hat_tau\n grad_perp = self.grad - np.dot(self.grad, hat_tau) * hat_tau\n return f_parallel - grad_perp\n<|end_body_1|>\n\n<|body_start_2|>\n self.name = name\n self.iteration = 0\n self.k = k\n self.species = None\n self.energy = None\n self.grad = None\n<|end_body_2|>\n", "revision_id": "6505d5bbbd1906f57e4102e13f177510f166bbed", "skeleton": "<|skeleton|>\nclass Image:\n\n def _tau_xl_x_xr(self, im_l, im_r):\n \"\"\"Calculate the normalised τ vector, along with the coordinates of the left, this and right images :param im_l: (autode.neb.Image) :param im_r: (autode.neb.Image) :return: (np.ndarray)\"\"\"\n <|body_0|>\n\n def get_force(self, im_l, im_r):\n \"\"\"Compute F_i. Notation from: Henkelman and H. J ́onsson, J. Chem. Phys. 113, 9978 (2000) also a copy in autode/common Arguments: im_l (autode.neb.Image): Left image (i-1) im_r (autode.neb.Image): Right image (i+1)\"\"\"\n <|body_1|>\n\n def __init__(self, name, k):\n \"\"\"Image in a NEB Arguments: name (str):\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Image:\n def _tau_xl_x_xr(self, im_l, im_r):\n \"\"\"Calculate the normalised τ vector, along with the coordinates of the left, this and right images :param im_l: (autode.neb.Image) :param im_r: (autode.neb.Image) :return: (np.ndarray)\"\"\"\n dv_max = max(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\n dv_min = min(np.abs(im_r.energy - self.energy), np.abs(im_l.energy - self.energy))\n x_l, x, x_r = [image.species.coordinates.flatten() for image in (im_l, self, im_r)]\n tau_plus = x_r - x\n tau_minus = x - x_l\n if im_l.energy < self.energy < im_r.energy:\n tau = tau_plus\n elif im_r.energy < self.energy < im_l.energy:\n tau = tau_minus\n elif im_l.energy < im_r.energy:\n tau = tau_plus * dv_max + tau_minus * dv_min\n elif im_r.energy < im_l.energy:\n tau = tau_plus * dv_min + tau_minus * dv_max\n else:\n raise RuntimeError('Something went very wrong in the NEB!')\n return (tau / np.linalg.norm(tau), x_l, x, x_r)\n\n def get_force(self, im_l, im_r):\n \"\"\"Compute F_i. Notation from: Henkelman and H. J ́onsson, J. Chem. Phys. 113, 9978 (2000) also a copy in autode/common Arguments: im_l (autode.neb.Image): Left image (i-1) im_r (autode.neb.Image): Right image (i+1)\"\"\"\n hat_tau, x_l, x, x_r = self._tau_xl_x_xr(im_l, im_r)\n f_parallel = (np.linalg.norm(x_r - x) * im_r.k - np.linalg.norm(x - x_l) * im_l.k) * hat_tau\n grad_perp = self.grad - np.dot(self.grad, hat_tau) * hat_tau\n return f_parallel - grad_perp\n\n def __init__(self, name, k):\n \"\"\"Image in a NEB Arguments: name (str):\"\"\"\n self.name = name\n self.iteration = 0\n self.k = k\n self.species = None\n self.energy = None\n self.grad = None\n", "source": "the_stack_v2_python_sparse", "source_path": "autode/neb/original.py", "source_repo": "jdelev/autodE", "split": "val", "star_events_count": 0}
{"blob_id": "64557ed05773abfcb8c0a610bebbb08b88896c33", "bodies": ["python_hack_urn = signed_binary_utils.GetAFF4PythonHackRoot().Add(self.args.hack_name)\ntry:\n blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN(python_hack_urn)\nexcept signed_binary_utils.SignedBinaryNotFoundError as ex:\n raise flow_base.FlowError('Python hack %s not found.' % self.args.hack_name) from ex\nfor python_blob in blob_iterator:\n self.CallClient(server_stubs.ExecutePython, python_code=python_blob, py_args=self.args.py_args, next_state=self.Done.__name__)", "response = responses.First()\nif not responses.success:\n raise flow_base.FlowError('Execute Python hack failed: %s' % responses.status)\nif response:\n result = response.return_val\n str_result = result[0:200]\n if len(result) >= 200:\n str_result += '...[truncated]'\n self.Log('Result: %s', str_result)\n result = ExecutePythonHackResult()\n result.result_string = response.return_val\n self.SendReply(result)"], "bodies_text": "<|body_start_0|>\n python_hack_urn = signed_binary_utils.GetAFF4PythonHackRoot().Add(self.args.hack_name)\n try:\n blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN(python_hack_urn)\n except signed_binary_utils.SignedBinaryNotFoundError as ex:\n raise flow_base.FlowError('Python hack %s not found.' % self.args.hack_name) from ex\n for python_blob in blob_iterator:\n self.CallClient(server_stubs.ExecutePython, python_code=python_blob, py_args=self.args.py_args, next_state=self.Done.__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n response = responses.First()\n if not responses.success:\n raise flow_base.FlowError('Execute Python hack failed: %s' % responses.status)\n if response:\n result = response.return_val\n str_result = result[0:200]\n if len(result) >= 200:\n str_result += '...[truncated]'\n self.Log('Result: %s', str_result)\n result = ExecutePythonHackResult()\n result.result_string = response.return_val\n self.SendReply(result)\n<|end_body_1|>\n", "class_docstring": "Execute a signed python hack on a client.", "class_name": "ExecutePythonHack", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExecutePythonHack:\n \"\"\"Execute a signed python hack on a client.\"\"\"\n\n def Start(self):\n \"\"\"The start method.\"\"\"\n <|body_0|>\n\n def Done(self, responses):\n \"\"\"Retrieves the output for the hack.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n python_hack_urn = signed_binary_utils.GetAFF4PythonHackRoot().Add(self.args.hack_name)\n try:\n blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN(python_hack_urn)\n except signed_binary_utils.SignedBinaryNotFoundError as ex:\n raise flow_base.FlowError('Python hack %s not found.' % self.args.hack_name) from ex\n for python_blob in blob_iterator:\n self.CallClient(server_stubs.ExecutePython, python_code=python_blob, py_args=self.args.py_args, next_state=self.Done.__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n response = responses.First()\n if not responses.success:\n raise flow_base.FlowError('Execute Python hack failed: %s' % responses.status)\n if response:\n result = response.return_val\n str_result = result[0:200]\n if len(result) >= 200:\n str_result += '...[truncated]'\n self.Log('Result: %s', str_result)\n result = ExecutePythonHackResult()\n result.result_string = response.return_val\n self.SendReply(result)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000163", "length_bytes": 28815, "license_type": "permissive", "methods": [{"docstring": "The start method.", "name": "Start", "signature": "def Start(self)"}, {"docstring": "Retrieves the output for the hack.", "name": "Done", "signature": "def Done(self, responses)"}], "n_methods": 2, "prompt": "Implement the Python class `ExecutePythonHack` described below.\n\nClass description:\nExecute a signed python hack on a client.\n\nMethod signatures and docstrings:\n- def Start(self): The start method.\n- def Done(self, responses): Retrieves the output for the hack.", "prompted_full_text": "Implement the Python class `ExecutePythonHack` described below.\n\nClass description:\nExecute a signed python hack on a client.\n\nMethod signatures and docstrings:\n- def Start(self): The start method.\n- def Done(self, responses): Retrieves the output for the hack.\n\n<|skeleton|>\nclass ExecutePythonHack:\n \"\"\"Execute a signed python hack on a client.\"\"\"\n\n def Start(self):\n \"\"\"The start method.\"\"\"\n <|body_0|>\n\n def Done(self, responses):\n \"\"\"Retrieves the output for the hack.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n python_hack_urn = signed_binary_utils.GetAFF4PythonHackRoot().Add(self.args.hack_name)\n try:\n blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN(python_hack_urn)\n except signed_binary_utils.SignedBinaryNotFoundError as ex:\n raise flow_base.FlowError('Python hack %s not found.' % self.args.hack_name) from ex\n for python_blob in blob_iterator:\n self.CallClient(server_stubs.ExecutePython, python_code=python_blob, py_args=self.args.py_args, next_state=self.Done.__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n response = responses.First()\n if not responses.success:\n raise flow_base.FlowError('Execute Python hack failed: %s' % responses.status)\n if response:\n result = response.return_val\n str_result = result[0:200]\n if len(result) >= 200:\n str_result += '...[truncated]'\n self.Log('Result: %s', str_result)\n result = ExecutePythonHackResult()\n result.result_string = response.return_val\n self.SendReply(result)\n<|end_body_1|>\n", "revision_id": "44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6", "skeleton": "<|skeleton|>\nclass ExecutePythonHack:\n \"\"\"Execute a signed python hack on a client.\"\"\"\n\n def Start(self):\n \"\"\"The start method.\"\"\"\n <|body_0|>\n\n def Done(self, responses):\n \"\"\"Retrieves the output for the hack.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ExecutePythonHack:\n \"\"\"Execute a signed python hack on a client.\"\"\"\n\n def Start(self):\n \"\"\"The start method.\"\"\"\n python_hack_urn = signed_binary_utils.GetAFF4PythonHackRoot().Add(self.args.hack_name)\n try:\n blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN(python_hack_urn)\n except signed_binary_utils.SignedBinaryNotFoundError as ex:\n raise flow_base.FlowError('Python hack %s not found.' % self.args.hack_name) from ex\n for python_blob in blob_iterator:\n self.CallClient(server_stubs.ExecutePython, python_code=python_blob, py_args=self.args.py_args, next_state=self.Done.__name__)\n\n def Done(self, responses):\n \"\"\"Retrieves the output for the hack.\"\"\"\n response = responses.First()\n if not responses.success:\n raise flow_base.FlowError('Execute Python hack failed: %s' % responses.status)\n if response:\n result = response.return_val\n str_result = result[0:200]\n if len(result) >= 200:\n str_result += '...[truncated]'\n self.Log('Result: %s', str_result)\n result = ExecutePythonHackResult()\n result.result_string = response.return_val\n self.SendReply(result)\n", "source": "the_stack_v2_python_sparse", "source_path": "grr/server/grr_response_server/flows/general/administrative.py", "source_repo": "google/grr", "split": "val", "star_events_count": 4683}
{"blob_id": "518d200dd4b365b580f2bcede4de1b2e8d0f185b", "bodies": ["if not head or n < 1:\n return head\ncurr = head\nwhile curr:\n curr = curr.next\n n -= 1\nif n == 0:\n head = head.next\nif n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n curr.next = curr.next.next\nreturn head", "if not head or n < 1:\n return head\ncurr = head\nwhile curr:\n curr = curr.next\n n -= 1\nif n == 0:\n head = head.next\n head.last = None\nif n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n new = curr.next.next\n curr.next = new\n if new:\n new.last = curr\nreturn head"], "bodies_text": "<|body_start_0|>\n if not head or n < 1:\n return head\n curr = head\n while curr:\n curr = curr.next\n n -= 1\n if n == 0:\n head = head.next\n if n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n curr.next = curr.next.next\n return head\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or n < 1:\n return head\n curr = head\n while curr:\n curr = curr.next\n n -= 1\n if n == 0:\n head = head.next\n head.last = None\n if n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n new = curr.next.next\n curr.next = new\n if new:\n new.last = curr\n return head\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def removeNthFromEnd2(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not head or n < 1:\n return head\n curr = head\n while curr:\n curr = curr.next\n n -= 1\n if n == 0:\n head = head.next\n if n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n curr.next = curr.next.next\n return head\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or n < 1:\n return head\n curr = head\n while curr:\n curr = curr.next\n n -= 1\n if n == 0:\n head = head.next\n head.last = None\n if n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n new = curr.next.next\n curr.next = new\n if new:\n new.last = curr\n return head\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000164", "length_bytes": 1526, "license_type": "no_license", "methods": [{"docstring": ":type head: ListNode :type n: int :rtype: ListNode", "name": "removeNthFromEnd", "signature": "def removeNthFromEnd(self, head, n)"}, {"docstring": ":type head: ListNode :type n: int :rtype: ListNode", "name": "removeNthFromEnd2", "signature": "def removeNthFromEnd2(self, head, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_022962", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeNthFromEnd(self, head, n): :type head: ListNode :type n: int :rtype: ListNode\n- def removeNthFromEnd2(self, head, n): :type head: ListNode :type n: int :rtype: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeNthFromEnd(self, head, n): :type head: ListNode :type n: int :rtype: ListNode\n- def removeNthFromEnd2(self, head, n): :type head: ListNode :type n: int :rtype: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def removeNthFromEnd2(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not head or n < 1:\n return head\n curr = head\n while curr:\n curr = curr.next\n n -= 1\n if n == 0:\n head = head.next\n if n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n curr.next = curr.next.next\n return head\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or n < 1:\n return head\n curr = head\n while curr:\n curr = curr.next\n n -= 1\n if n == 0:\n head = head.next\n head.last = None\n if n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n new = curr.next.next\n curr.next = new\n if new:\n new.last = curr\n return head\n<|end_body_1|>\n", "revision_id": "604efd2c53c369fb262f42f7f7f31997ea4d029b", "skeleton": "<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def removeNthFromEnd2(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def removeNthFromEnd(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n if not head or n < 1:\n return head\n curr = head\n while curr:\n curr = curr.next\n n -= 1\n if n == 0:\n head = head.next\n if n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n curr.next = curr.next.next\n return head\n\n def removeNthFromEnd2(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n if not head or n < 1:\n return head\n curr = head\n while curr:\n curr = curr.next\n n -= 1\n if n == 0:\n head = head.next\n head.last = None\n if n < 0:\n curr = head\n n += 1\n while n < 0:\n curr = curr.next\n n += 1\n new = curr.next.next\n curr.next = new\n if new:\n new.last = curr\n return head\n", "source": "the_stack_v2_python_sparse", "source_path": "DeleteNode.py", "source_repo": "fxy1018/Leetcode", "split": "val", "star_events_count": 1}
{"blob_id": "bc3efcc12694a0627391f0ae2e84833480631da2", "bodies": ["session = Session()\ntry:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n query = session.query(OrganizationITService).filter(OrganizationITService.organization_id == organization_code).order_by(OrganizationITService.created_on)\n process_instance_id = req.get_param_as_int('processInstanceId')\n if process_instance_id:\n query = query.filter(OrganizationITService.process_instance_id == process_instance_id)\n data, paging = get_collection_page(req, query, custom_asdict)\n resp.media = {'data': data, 'paging': paging}\nfinally:\n session.close()", "session = Session()\ntry:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n errors = validate_post(req.media, organization_code, session)\n if errors:\n raise HTTPUnprocessableEntity(errors)\n accepted_fields = ['process_instance_id', 'it_service_id', 'relevance_level_id']\n item = OrganizationITService().fromdict(req.media, only=accepted_fields)\n item.organization_id = organization_code\n session.add(item)\n session.commit()\n resp.status = falcon.HTTP_CREATED\n resp.location = req.relative_uri + f'/{item.instance_id}'\n resp.media = {'data': custom_asdict(item)}\nfinally:\n session.close()"], "bodies_text": "<|body_start_0|>\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n query = session.query(OrganizationITService).filter(OrganizationITService.organization_id == organization_code).order_by(OrganizationITService.created_on)\n process_instance_id = req.get_param_as_int('processInstanceId')\n if process_instance_id:\n query = query.filter(OrganizationITService.process_instance_id == process_instance_id)\n data, paging = get_collection_page(req, query, custom_asdict)\n resp.media = {'data': data, 'paging': paging}\n finally:\n session.close()\n<|end_body_0|>\n\n<|body_start_1|>\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n errors = validate_post(req.media, organization_code, session)\n if errors:\n raise HTTPUnprocessableEntity(errors)\n accepted_fields = ['process_instance_id', 'it_service_id', 'relevance_level_id']\n item = OrganizationITService().fromdict(req.media, only=accepted_fields)\n item.organization_id = organization_code\n session.add(item)\n session.commit()\n resp.status = falcon.HTTP_CREATED\n resp.location = req.relative_uri + f'/{item.instance_id}'\n resp.media = {'data': custom_asdict(item)}\n finally:\n session.close()\n<|end_body_1|>\n", "class_docstring": "GET and POST IT services of an organization.", "class_name": "Collection", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Collection:\n \"\"\"GET and POST IT services of an organization.\"\"\"\n\n def on_get(self, req, resp, organization_code):\n \"\"\"GETs a paged collection of IT services of an organization. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\"\"\"\n <|body_0|>\n\n def on_post(self, req, resp, organization_code):\n \"\"\"Adds a IT service to an organization's process. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n query = session.query(OrganizationITService).filter(OrganizationITService.organization_id == organization_code).order_by(OrganizationITService.created_on)\n process_instance_id = req.get_param_as_int('processInstanceId')\n if process_instance_id:\n query = query.filter(OrganizationITService.process_instance_id == process_instance_id)\n data, paging = get_collection_page(req, query, custom_asdict)\n resp.media = {'data': data, 'paging': paging}\n finally:\n session.close()\n<|end_body_0|>\n\n<|body_start_1|>\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n errors = validate_post(req.media, organization_code, session)\n if errors:\n raise HTTPUnprocessableEntity(errors)\n accepted_fields = ['process_instance_id', 'it_service_id', 'relevance_level_id']\n item = OrganizationITService().fromdict(req.media, only=accepted_fields)\n item.organization_id = organization_code\n session.add(item)\n session.commit()\n resp.status = falcon.HTTP_CREATED\n resp.location = req.relative_uri + f'/{item.instance_id}'\n resp.media = {'data': custom_asdict(item)}\n finally:\n session.close()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000165", "length_bytes": 9409, "license_type": "no_license", "methods": [{"docstring": "GETs a paged collection of IT services of an organization. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.", "name": "on_get", "signature": "def on_get(self, req, resp, organization_code)"}, {"docstring": "Adds a IT service to an organization's process. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.", "name": "on_post", "signature": "def on_post(self, req, resp, organization_code)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005273", "prompt": "Implement the Python class `Collection` described below.\n\nClass description:\nGET and POST IT services of an organization.\n\nMethod signatures and docstrings:\n- def on_get(self, req, resp, organization_code): GETs a paged collection of IT services of an organization. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\n- def on_post(self, req, resp, organization_code): Adds a IT service to an organization's process. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.", "prompted_full_text": "Implement the Python class `Collection` described below.\n\nClass description:\nGET and POST IT services of an organization.\n\nMethod signatures and docstrings:\n- def on_get(self, req, resp, organization_code): GETs a paged collection of IT services of an organization. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\n- def on_post(self, req, resp, organization_code): Adds a IT service to an organization's process. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\n\n<|skeleton|>\nclass Collection:\n \"\"\"GET and POST IT services of an organization.\"\"\"\n\n def on_get(self, req, resp, organization_code):\n \"\"\"GETs a paged collection of IT services of an organization. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\"\"\"\n <|body_0|>\n\n def on_post(self, req, resp, organization_code):\n \"\"\"Adds a IT service to an organization's process. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n query = session.query(OrganizationITService).filter(OrganizationITService.organization_id == organization_code).order_by(OrganizationITService.created_on)\n process_instance_id = req.get_param_as_int('processInstanceId')\n if process_instance_id:\n query = query.filter(OrganizationITService.process_instance_id == process_instance_id)\n data, paging = get_collection_page(req, query, custom_asdict)\n resp.media = {'data': data, 'paging': paging}\n finally:\n session.close()\n<|end_body_0|>\n\n<|body_start_1|>\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n errors = validate_post(req.media, organization_code, session)\n if errors:\n raise HTTPUnprocessableEntity(errors)\n accepted_fields = ['process_instance_id', 'it_service_id', 'relevance_level_id']\n item = OrganizationITService().fromdict(req.media, only=accepted_fields)\n item.organization_id = organization_code\n session.add(item)\n session.commit()\n resp.status = falcon.HTTP_CREATED\n resp.location = req.relative_uri + f'/{item.instance_id}'\n resp.media = {'data': custom_asdict(item)}\n finally:\n session.close()\n<|end_body_1|>\n", "revision_id": "62723133595829230e5b589431a32cda3b092460", "skeleton": "<|skeleton|>\nclass Collection:\n \"\"\"GET and POST IT services of an organization.\"\"\"\n\n def on_get(self, req, resp, organization_code):\n \"\"\"GETs a paged collection of IT services of an organization. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\"\"\"\n <|body_0|>\n\n def on_post(self, req, resp, organization_code):\n \"\"\"Adds a IT service to an organization's process. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Collection:\n \"\"\"GET and POST IT services of an organization.\"\"\"\n\n def on_get(self, req, resp, organization_code):\n \"\"\"GETs a paged collection of IT services of an organization. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\"\"\"\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n query = session.query(OrganizationITService).filter(OrganizationITService.organization_id == organization_code).order_by(OrganizationITService.created_on)\n process_instance_id = req.get_param_as_int('processInstanceId')\n if process_instance_id:\n query = query.filter(OrganizationITService.process_instance_id == process_instance_id)\n data, paging = get_collection_page(req, query, custom_asdict)\n resp.media = {'data': data, 'paging': paging}\n finally:\n session.close()\n\n def on_post(self, req, resp, organization_code):\n \"\"\"Adds a IT service to an organization's process. :param req: See Falcon Request documentation. :param resp: See Falcon Response documentation. :param organization_code: The code of the organization.\"\"\"\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n errors = validate_post(req.media, organization_code, session)\n if errors:\n raise HTTPUnprocessableEntity(errors)\n accepted_fields = ['process_instance_id', 'it_service_id', 'relevance_level_id']\n item = OrganizationITService().fromdict(req.media, only=accepted_fields)\n item.organization_id = organization_code\n session.add(item)\n session.commit()\n resp.status = falcon.HTTP_CREATED\n resp.location = req.relative_uri + f'/{item.instance_id}'\n resp.media = {'data': custom_asdict(item)}\n finally:\n session.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "knoweak/api/resources/organization_it_service.py", "source_repo": "psvaiter/knoweak-api", "split": "val", "star_events_count": 0}
{"blob_id": "407cefc3f44c7a0bab87186edcf12ca23bca9d64", "bodies": ["url = get_table_url(self.URL)\ntry:\n password = Tables.objects.filter(url=url).first()\nexcept:\n raise NoSuchTable\nif not password and password.password == '':\n return False\nreturn True", "is_added = Particip.objects.select_related().filter(user_id__username=self.USERNAME, table_id__url=self.table_ID)\nis_added = is_added.exists()\nreturn is_added", "self.URL = request.path\nself.table_ID = get_table_url(request.path)\nself.USERNAME = request.user.username\nif request.user.is_authenticated:\n if self.already_added():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n return queryset\n elif not self.need_password():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n else:\n raise NotAdded\n return True\nelse:\n if self.need_password():\n raise Unauthorized\n return True"], "bodies_text": "<|body_start_0|>\n url = get_table_url(self.URL)\n try:\n password = Tables.objects.filter(url=url).first()\n except:\n raise NoSuchTable\n if not password and password.password == '':\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n is_added = Particip.objects.select_related().filter(user_id__username=self.USERNAME, table_id__url=self.table_ID)\n is_added = is_added.exists()\n return is_added\n<|end_body_1|>\n\n<|body_start_2|>\n self.URL = request.path\n self.table_ID = get_table_url(request.path)\n self.USERNAME = request.user.username\n if request.user.is_authenticated:\n if self.already_added():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n return queryset\n elif not self.need_password():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n else:\n raise NotAdded\n return True\n else:\n if self.need_password():\n raise Unauthorized\n return True\n<|end_body_2|>\n", "class_docstring": "permissions for allowing user read table content", "class_name": "CanReadTableContent", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CanReadTableContent:\n \"\"\"permissions for allowing user read table content\"\"\"\n\n def need_password(self) -> bool:\n \"\"\"does table need password?\"\"\"\n <|body_0|>\n\n def already_added(self) -> bool:\n \"\"\"check if user is added to table\"\"\"\n <|body_1|>\n\n def has_permission(self, request, view) -> bool:\n \"\"\"main function\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = get_table_url(self.URL)\n try:\n password = Tables.objects.filter(url=url).first()\n except:\n raise NoSuchTable\n if not password and password.password == '':\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n is_added = Particip.objects.select_related().filter(user_id__username=self.USERNAME, table_id__url=self.table_ID)\n is_added = is_added.exists()\n return is_added\n<|end_body_1|>\n\n<|body_start_2|>\n self.URL = request.path\n self.table_ID = get_table_url(request.path)\n self.USERNAME = request.user.username\n if request.user.is_authenticated:\n if self.already_added():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n return queryset\n elif not self.need_password():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n else:\n raise NotAdded\n return True\n else:\n if self.need_password():\n raise Unauthorized\n return True\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000166", "length_bytes": 2369, "license_type": "no_license", "methods": [{"docstring": "does table need password?", "name": "need_password", "signature": "def need_password(self) -> bool"}, {"docstring": "check if user is added to table", "name": "already_added", "signature": "def already_added(self) -> bool"}, {"docstring": "main function", "name": "has_permission", "signature": "def has_permission(self, request, view) -> bool"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_035325", "prompt": "Implement the Python class `CanReadTableContent` described below.\n\nClass description:\npermissions for allowing user read table content\n\nMethod signatures and docstrings:\n- def need_password(self) -> bool: does table need password?\n- def already_added(self) -> bool: check if user is added to table\n- def has_permission(self, request, view) -> bool: main function", "prompted_full_text": "Implement the Python class `CanReadTableContent` described below.\n\nClass description:\npermissions for allowing user read table content\n\nMethod signatures and docstrings:\n- def need_password(self) -> bool: does table need password?\n- def already_added(self) -> bool: check if user is added to table\n- def has_permission(self, request, view) -> bool: main function\n\n<|skeleton|>\nclass CanReadTableContent:\n \"\"\"permissions for allowing user read table content\"\"\"\n\n def need_password(self) -> bool:\n \"\"\"does table need password?\"\"\"\n <|body_0|>\n\n def already_added(self) -> bool:\n \"\"\"check if user is added to table\"\"\"\n <|body_1|>\n\n def has_permission(self, request, view) -> bool:\n \"\"\"main function\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = get_table_url(self.URL)\n try:\n password = Tables.objects.filter(url=url).first()\n except:\n raise NoSuchTable\n if not password and password.password == '':\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n is_added = Particip.objects.select_related().filter(user_id__username=self.USERNAME, table_id__url=self.table_ID)\n is_added = is_added.exists()\n return is_added\n<|end_body_1|>\n\n<|body_start_2|>\n self.URL = request.path\n self.table_ID = get_table_url(request.path)\n self.USERNAME = request.user.username\n if request.user.is_authenticated:\n if self.already_added():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n return queryset\n elif not self.need_password():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n else:\n raise NotAdded\n return True\n else:\n if self.need_password():\n raise Unauthorized\n return True\n<|end_body_2|>\n", "revision_id": "25e1bf7a3ae4a75c02f576582778bb259d7d8d4a", "skeleton": "<|skeleton|>\nclass CanReadTableContent:\n \"\"\"permissions for allowing user read table content\"\"\"\n\n def need_password(self) -> bool:\n \"\"\"does table need password?\"\"\"\n <|body_0|>\n\n def already_added(self) -> bool:\n \"\"\"check if user is added to table\"\"\"\n <|body_1|>\n\n def has_permission(self, request, view) -> bool:\n \"\"\"main function\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CanReadTableContent:\n \"\"\"permissions for allowing user read table content\"\"\"\n\n def need_password(self) -> bool:\n \"\"\"does table need password?\"\"\"\n url = get_table_url(self.URL)\n try:\n password = Tables.objects.filter(url=url).first()\n except:\n raise NoSuchTable\n if not password and password.password == '':\n return False\n return True\n\n def already_added(self) -> bool:\n \"\"\"check if user is added to table\"\"\"\n is_added = Particip.objects.select_related().filter(user_id__username=self.USERNAME, table_id__url=self.table_ID)\n is_added = is_added.exists()\n return is_added\n\n def has_permission(self, request, view) -> bool:\n \"\"\"main function\"\"\"\n self.URL = request.path\n self.table_ID = get_table_url(request.path)\n self.USERNAME = request.user.username\n if request.user.is_authenticated:\n if self.already_added():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n return queryset\n elif not self.need_password():\n queryset = Particip.objects.select_related().filter(table_id__url=self.table_ID)\n else:\n raise NotAdded\n return True\n else:\n if self.need_password():\n raise Unauthorized\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "api/permissions.py", "source_repo": "RandomGuy090/taskmanager", "split": "val", "star_events_count": 0}
{"blob_id": "af57943a53ae7caef5d6a6938ae7fdb38c209392", "bodies": ["self.res = []\nif n == 0:\n return self.res\nelse:\n self.generateParenthesisRecur(n, n, '')\n return self.res", "if open == 0 and close == 0:\n self.res.append(curr)\nelif open != 0 and open <= close:\n self.generateParenthesisRecur(open - 1, close, curr + '(')\nelif close <= 0:\n return\nself.generateParenthesisRecur(open, close - 1, curr + ')')"], "bodies_text": "<|body_start_0|>\n self.res = []\n if n == 0:\n return self.res\n else:\n self.generateParenthesisRecur(n, n, '')\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n if open == 0 and close == 0:\n self.res.append(curr)\n elif open != 0 and open <= close:\n self.generateParenthesisRecur(open - 1, close, curr + '(')\n elif close <= 0:\n return\n self.generateParenthesisRecur(open, close - 1, curr + ')')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def generateParenthesis(self, n):\n \"\"\":type n: int :rtype: List[str]\"\"\"\n <|body_0|>\n\n def generateParenthesisRecur(self, open, close, curr):\n \"\"\":type open: int :type close: int :type curr: str :rtype: void\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.res = []\n if n == 0:\n return self.res\n else:\n self.generateParenthesisRecur(n, n, '')\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n if open == 0 and close == 0:\n self.res.append(curr)\n elif open != 0 and open <= close:\n self.generateParenthesisRecur(open - 1, close, curr + '(')\n elif close <= 0:\n return\n self.generateParenthesisRecur(open, close - 1, curr + ')')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000167", "length_bytes": 1119, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: List[str]", "name": "generateParenthesis", "signature": "def generateParenthesis(self, n)"}, {"docstring": ":type open: int :type close: int :type curr: str :rtype: void", "name": "generateParenthesisRecur", "signature": "def generateParenthesisRecur(self, open, close, curr)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002719", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def generateParenthesis(self, n): :type n: int :rtype: List[str]\n- def generateParenthesisRecur(self, open, close, curr): :type open: int :type close: int :type curr: str :rtype: void", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def generateParenthesis(self, n): :type n: int :rtype: List[str]\n- def generateParenthesisRecur(self, open, close, curr): :type open: int :type close: int :type curr: str :rtype: void\n\n<|skeleton|>\nclass Solution:\n\n def generateParenthesis(self, n):\n \"\"\":type n: int :rtype: List[str]\"\"\"\n <|body_0|>\n\n def generateParenthesisRecur(self, open, close, curr):\n \"\"\":type open: int :type close: int :type curr: str :rtype: void\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.res = []\n if n == 0:\n return self.res\n else:\n self.generateParenthesisRecur(n, n, '')\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n if open == 0 and close == 0:\n self.res.append(curr)\n elif open != 0 and open <= close:\n self.generateParenthesisRecur(open - 1, close, curr + '(')\n elif close <= 0:\n return\n self.generateParenthesisRecur(open, close - 1, curr + ')')\n<|end_body_1|>\n", "revision_id": "8cda0518440488992d7e2c70cb8555ec7b34083f", "skeleton": "<|skeleton|>\nclass Solution:\n\n def generateParenthesis(self, n):\n \"\"\":type n: int :rtype: List[str]\"\"\"\n <|body_0|>\n\n def generateParenthesisRecur(self, open, close, curr):\n \"\"\":type open: int :type close: int :type curr: str :rtype: void\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def generateParenthesis(self, n):\n \"\"\":type n: int :rtype: List[str]\"\"\"\n self.res = []\n if n == 0:\n return self.res\n else:\n self.generateParenthesisRecur(n, n, '')\n return self.res\n\n def generateParenthesisRecur(self, open, close, curr):\n \"\"\":type open: int :type close: int :type curr: str :rtype: void\"\"\"\n if open == 0 and close == 0:\n self.res.append(curr)\n elif open != 0 and open <= close:\n self.generateParenthesisRecur(open - 1, close, curr + '(')\n elif close <= 0:\n return\n self.generateParenthesisRecur(open, close - 1, curr + ')')\n", "source": "the_stack_v2_python_sparse", "source_path": "22/main.py", "source_repo": "szhongren/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "32aeff0b0b2c4dd6bf271769146a97a0e8cf5e40", "bodies": ["trie = Trie()\ntrie.build_from_words(words)\nrslt = []\nfor w in words:\n if trie.is_concatenated_word(w, 0, 0):\n rslt.append(w)\nreturn rslt", "memo, wordSet = ({}, set(words))\n\ndef is_concatenated_word(w: str) -> bool:\n if w in memo:\n return memo[w]\n for i in range(1, len(w)):\n prefix, suffix = (w[:i], w[i:])\n if prefix in wordSet and suffix in wordSet or (prefix in wordSet and is_concatenated_word(suffix)):\n memo[w] = True\n return True\n memo[w] = False\n return False\nreturn [w for w in words if is_concatenated_word(w)]"], "bodies_text": "<|body_start_0|>\n trie = Trie()\n trie.build_from_words(words)\n rslt = []\n for w in words:\n if trie.is_concatenated_word(w, 0, 0):\n rslt.append(w)\n return rslt\n<|end_body_0|>\n\n<|body_start_1|>\n memo, wordSet = ({}, set(words))\n\n def is_concatenated_word(w: str) -> bool:\n if w in memo:\n return memo[w]\n for i in range(1, len(w)):\n prefix, suffix = (w[:i], w[i:])\n if prefix in wordSet and suffix in wordSet or (prefix in wordSet and is_concatenated_word(suffix)):\n memo[w] = True\n return True\n memo[w] = False\n return False\n return [w for w in words if is_concatenated_word(w)]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findAllConcatenatedWordsInADict(self, words: List[str]) -> List[str]:\n \"\"\"1. First build a prefix tree based on the input words. 2. Then use prefix tree to determine if a long word is concatenated by the previous small words by counting the total word end in the trie for the current word.\"\"\"\n <|body_0|>\n\n def findAllConcatenatedWordsInADict2(self, words: List[str]) -> List[str]:\n \"\"\"Simply brutal force with cache.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n trie = Trie()\n trie.build_from_words(words)\n rslt = []\n for w in words:\n if trie.is_concatenated_word(w, 0, 0):\n rslt.append(w)\n return rslt\n<|end_body_0|>\n\n<|body_start_1|>\n memo, wordSet = ({}, set(words))\n\n def is_concatenated_word(w: str) -> bool:\n if w in memo:\n return memo[w]\n for i in range(1, len(w)):\n prefix, suffix = (w[:i], w[i:])\n if prefix in wordSet and suffix in wordSet or (prefix in wordSet and is_concatenated_word(suffix)):\n memo[w] = True\n return True\n memo[w] = False\n return False\n return [w for w in words if is_concatenated_word(w)]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000168", "length_bytes": 2730, "license_type": "no_license", "methods": [{"docstring": "1. First build a prefix tree based on the input words. 2. Then use prefix tree to determine if a long word is concatenated by the previous small words by counting the total word end in the trie for the current word.", "name": "findAllConcatenatedWordsInADict", "signature": "def findAllConcatenatedWordsInADict(self, words: List[str]) -> List[str]"}, {"docstring": "Simply brutal force with cache.", "name": "findAllConcatenatedWordsInADict2", "signature": "def findAllConcatenatedWordsInADict2(self, words: List[str]) -> List[str]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030005", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findAllConcatenatedWordsInADict(self, words: List[str]) -> List[str]: 1. First build a prefix tree based on the input words. 2. Then use prefix tree to determine if a long word is concatenated by the previous small words by counting the total word end in the trie for the current word.\n- def findAllConcatenatedWordsInADict2(self, words: List[str]) -> List[str]: Simply brutal force with cache.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findAllConcatenatedWordsInADict(self, words: List[str]) -> List[str]: 1. First build a prefix tree based on the input words. 2. Then use prefix tree to determine if a long word is concatenated by the previous small words by counting the total word end in the trie for the current word.\n- def findAllConcatenatedWordsInADict2(self, words: List[str]) -> List[str]: Simply brutal force with cache.\n\n<|skeleton|>\nclass Solution:\n\n def findAllConcatenatedWordsInADict(self, words: List[str]) -> List[str]:\n \"\"\"1. First build a prefix tree based on the input words. 2. Then use prefix tree to determine if a long word is concatenated by the previous small words by counting the total word end in the trie for the current word.\"\"\"\n <|body_0|>\n\n def findAllConcatenatedWordsInADict2(self, words: List[str]) -> List[str]:\n \"\"\"Simply brutal force with cache.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n trie = Trie()\n trie.build_from_words(words)\n rslt = []\n for w in words:\n if trie.is_concatenated_word(w, 0, 0):\n rslt.append(w)\n return rslt\n<|end_body_0|>\n\n<|body_start_1|>\n memo, wordSet = ({}, set(words))\n\n def is_concatenated_word(w: str) -> bool:\n if w in memo:\n return memo[w]\n for i in range(1, len(w)):\n prefix, suffix = (w[:i], w[i:])\n if prefix in wordSet and suffix in wordSet or (prefix in wordSet and is_concatenated_word(suffix)):\n memo[w] = True\n return True\n memo[w] = False\n return False\n return [w for w in words if is_concatenated_word(w)]\n<|end_body_1|>\n", "revision_id": "edb870f83f0c4568cce0cacec04ee70cf6b545bf", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findAllConcatenatedWordsInADict(self, words: List[str]) -> List[str]:\n \"\"\"1. First build a prefix tree based on the input words. 2. Then use prefix tree to determine if a long word is concatenated by the previous small words by counting the total word end in the trie for the current word.\"\"\"\n <|body_0|>\n\n def findAllConcatenatedWordsInADict2(self, words: List[str]) -> List[str]:\n \"\"\"Simply brutal force with cache.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def findAllConcatenatedWordsInADict(self, words: List[str]) -> List[str]:\n \"\"\"1. First build a prefix tree based on the input words. 2. Then use prefix tree to determine if a long word is concatenated by the previous small words by counting the total word end in the trie for the current word.\"\"\"\n trie = Trie()\n trie.build_from_words(words)\n rslt = []\n for w in words:\n if trie.is_concatenated_word(w, 0, 0):\n rslt.append(w)\n return rslt\n\n def findAllConcatenatedWordsInADict2(self, words: List[str]) -> List[str]:\n \"\"\"Simply brutal force with cache.\"\"\"\n memo, wordSet = ({}, set(words))\n\n def is_concatenated_word(w: str) -> bool:\n if w in memo:\n return memo[w]\n for i in range(1, len(w)):\n prefix, suffix = (w[:i], w[i:])\n if prefix in wordSet and suffix in wordSet or (prefix in wordSet and is_concatenated_word(suffix)):\n memo[w] = True\n return True\n memo[w] = False\n return False\n return [w for w in words if is_concatenated_word(w)]\n", "source": "the_stack_v2_python_sparse", "source_path": "2020/concatenated_words.py", "source_repo": "eronekogin/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "6ca7afc0b45203b1b5fd53ad4a0b91c1e7cf6886", "bodies": ["EasyFrame.__init__(self, title='Canvas Demo 2')\nself.colors = ('blue', 'green', 'red', 'yellow')\nself.shapes = list()\nself.canvas = self.addCanvas(row=0, column=0, columnspan=2, width=300, height=150, background='gray')\nself.addButton(text='Draw oval', row=1, column=0, command=self.drawOval)\nself.addButton(text='Erase all', row=1, column=1, command=self.eraseAll)", "x = random.randint(0, 300)\ny = random.randint(0, 150)\ncolor = random.choice(self.colors)\nshape = self.canvas.drawOval(x, y, x + 25, y + 25, fill=color)\nself.shapes.append(shape)", "for shape in self.shapes:\n self.canvas.delete(shape)\nself.shapes = list()"], "bodies_text": "<|body_start_0|>\n EasyFrame.__init__(self, title='Canvas Demo 2')\n self.colors = ('blue', 'green', 'red', 'yellow')\n self.shapes = list()\n self.canvas = self.addCanvas(row=0, column=0, columnspan=2, width=300, height=150, background='gray')\n self.addButton(text='Draw oval', row=1, column=0, command=self.drawOval)\n self.addButton(text='Erase all', row=1, column=1, command=self.eraseAll)\n<|end_body_0|>\n\n<|body_start_1|>\n x = random.randint(0, 300)\n y = random.randint(0, 150)\n color = random.choice(self.colors)\n shape = self.canvas.drawOval(x, y, x + 25, y + 25, fill=color)\n self.shapes.append(shape)\n<|end_body_1|>\n\n<|body_start_2|>\n for shape in self.shapes:\n self.canvas.delete(shape)\n self.shapes = list()\n<|end_body_2|>\n", "class_docstring": "Draws filled ovals on a canvas, and allows the user to erase them all.", "class_name": "CanvasDemo", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CanvasDemo:\n \"\"\"Draws filled ovals on a canvas, and allows the user to erase them all.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n <|body_0|>\n\n def drawOval(self):\n \"\"\"Draws a filled oval at a random position.\"\"\"\n <|body_1|>\n\n def eraseAll(self):\n \"\"\"Deletes all ovals from the canvas.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n EasyFrame.__init__(self, title='Canvas Demo 2')\n self.colors = ('blue', 'green', 'red', 'yellow')\n self.shapes = list()\n self.canvas = self.addCanvas(row=0, column=0, columnspan=2, width=300, height=150, background='gray')\n self.addButton(text='Draw oval', row=1, column=0, command=self.drawOval)\n self.addButton(text='Erase all', row=1, column=1, command=self.eraseAll)\n<|end_body_0|>\n\n<|body_start_1|>\n x = random.randint(0, 300)\n y = random.randint(0, 150)\n color = random.choice(self.colors)\n shape = self.canvas.drawOval(x, y, x + 25, y + 25, fill=color)\n self.shapes.append(shape)\n<|end_body_1|>\n\n<|body_start_2|>\n for shape in self.shapes:\n self.canvas.delete(shape)\n self.shapes = list()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000169", "length_bytes": 1550, "license_type": "no_license", "methods": [{"docstring": "Sets up the window and widgets.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Draws a filled oval at a random position.", "name": "drawOval", "signature": "def drawOval(self)"}, {"docstring": "Deletes all ovals from the canvas.", "name": "eraseAll", "signature": "def eraseAll(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_011970", "prompt": "Implement the Python class `CanvasDemo` described below.\n\nClass description:\nDraws filled ovals on a canvas, and allows the user to erase them all.\n\nMethod signatures and docstrings:\n- def __init__(self): Sets up the window and widgets.\n- def drawOval(self): Draws a filled oval at a random position.\n- def eraseAll(self): Deletes all ovals from the canvas.", "prompted_full_text": "Implement the Python class `CanvasDemo` described below.\n\nClass description:\nDraws filled ovals on a canvas, and allows the user to erase them all.\n\nMethod signatures and docstrings:\n- def __init__(self): Sets up the window and widgets.\n- def drawOval(self): Draws a filled oval at a random position.\n- def eraseAll(self): Deletes all ovals from the canvas.\n\n<|skeleton|>\nclass CanvasDemo:\n \"\"\"Draws filled ovals on a canvas, and allows the user to erase them all.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n <|body_0|>\n\n def drawOval(self):\n \"\"\"Draws a filled oval at a random position.\"\"\"\n <|body_1|>\n\n def eraseAll(self):\n \"\"\"Deletes all ovals from the canvas.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n EasyFrame.__init__(self, title='Canvas Demo 2')\n self.colors = ('blue', 'green', 'red', 'yellow')\n self.shapes = list()\n self.canvas = self.addCanvas(row=0, column=0, columnspan=2, width=300, height=150, background='gray')\n self.addButton(text='Draw oval', row=1, column=0, command=self.drawOval)\n self.addButton(text='Erase all', row=1, column=1, command=self.eraseAll)\n<|end_body_0|>\n\n<|body_start_1|>\n x = random.randint(0, 300)\n y = random.randint(0, 150)\n color = random.choice(self.colors)\n shape = self.canvas.drawOval(x, y, x + 25, y + 25, fill=color)\n self.shapes.append(shape)\n<|end_body_1|>\n\n<|body_start_2|>\n for shape in self.shapes:\n self.canvas.delete(shape)\n self.shapes = list()\n<|end_body_2|>\n", "revision_id": "eca69d000dc77681a30734b073b2383c97ccc02e", "skeleton": "<|skeleton|>\nclass CanvasDemo:\n \"\"\"Draws filled ovals on a canvas, and allows the user to erase them all.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n <|body_0|>\n\n def drawOval(self):\n \"\"\"Draws a filled oval at a random position.\"\"\"\n <|body_1|>\n\n def eraseAll(self):\n \"\"\"Deletes all ovals from the canvas.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CanvasDemo:\n \"\"\"Draws filled ovals on a canvas, and allows the user to erase them all.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n EasyFrame.__init__(self, title='Canvas Demo 2')\n self.colors = ('blue', 'green', 'red', 'yellow')\n self.shapes = list()\n self.canvas = self.addCanvas(row=0, column=0, columnspan=2, width=300, height=150, background='gray')\n self.addButton(text='Draw oval', row=1, column=0, command=self.drawOval)\n self.addButton(text='Erase all', row=1, column=1, command=self.eraseAll)\n\n def drawOval(self):\n \"\"\"Draws a filled oval at a random position.\"\"\"\n x = random.randint(0, 300)\n y = random.randint(0, 150)\n color = random.choice(self.colors)\n shape = self.canvas.drawOval(x, y, x + 25, y + 25, fill=color)\n self.shapes.append(shape)\n\n def eraseAll(self):\n \"\"\"Deletes all ovals from the canvas.\"\"\"\n for shape in self.shapes:\n self.canvas.delete(shape)\n self.shapes = list()\n", "source": "the_stack_v2_python_sparse", "source_path": "gui/breezy/canvasdemo2.py", "source_repo": "lforet/robomow", "split": "val", "star_events_count": 11}
{"blob_id": "4566c329e13a252d9f7624fe3e7d45cdd4a30bec", "bodies": ["i = n\ncnt = 0\nwhile i > 0:\n print('i is {0},n is {1}'.format(i, n))\n sq = math.sqrt(i)\n if sq - sq // 1 == 0:\n n -= i\n i = n\n cnt += 1\n else:\n i -= 1\nreturn cnt", "dq = deque()\ndq.append(n)\nstep = 0\nwhile dq:\n for i in range(len(dq)):\n pop = dq.popleft()\n print('pop:', pop)\n if pop == 0:\n return step\n for k in range(1, pop + 1):\n if math.sqrt(k) - math.sqrt(k) // 1 == 0:\n dq.append(pop - k)\n step += 1", "q = list()\nq.append([n, 0])\nvisited = [False for _ in range(n + 1)]\nvisited[n] = True\nwhile any(q):\n num, step = q.pop(0)\n i = 1\n tNum = num - i ** 2\n while tNum >= 0:\n if tNum == 0:\n return step + 1\n if not visited[tNum]:\n q.append((tNum, step + 1))\n visited[tNum] = True\n i += 1\n tNum = num - i ** 2"], "bodies_text": "<|body_start_0|>\n i = n\n cnt = 0\n while i > 0:\n print('i is {0},n is {1}'.format(i, n))\n sq = math.sqrt(i)\n if sq - sq // 1 == 0:\n n -= i\n i = n\n cnt += 1\n else:\n i -= 1\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n dq = deque()\n dq.append(n)\n step = 0\n while dq:\n for i in range(len(dq)):\n pop = dq.popleft()\n print('pop:', pop)\n if pop == 0:\n return step\n for k in range(1, pop + 1):\n if math.sqrt(k) - math.sqrt(k) // 1 == 0:\n dq.append(pop - k)\n step += 1\n<|end_body_1|>\n\n<|body_start_2|>\n q = list()\n q.append([n, 0])\n visited = [False for _ in range(n + 1)]\n visited[n] = True\n while any(q):\n num, step = q.pop(0)\n i = 1\n tNum = num - i ** 2\n while tNum >= 0:\n if tNum == 0:\n return step + 1\n if not visited[tNum]:\n q.append((tNum, step + 1))\n visited[tNum] = True\n i += 1\n tNum = num - i ** 2\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numSquares_failed(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def numSquares_slow(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n def numSquares(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n i = n\n cnt = 0\n while i > 0:\n print('i is {0},n is {1}'.format(i, n))\n sq = math.sqrt(i)\n if sq - sq // 1 == 0:\n n -= i\n i = n\n cnt += 1\n else:\n i -= 1\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n dq = deque()\n dq.append(n)\n step = 0\n while dq:\n for i in range(len(dq)):\n pop = dq.popleft()\n print('pop:', pop)\n if pop == 0:\n return step\n for k in range(1, pop + 1):\n if math.sqrt(k) - math.sqrt(k) // 1 == 0:\n dq.append(pop - k)\n step += 1\n<|end_body_1|>\n\n<|body_start_2|>\n q = list()\n q.append([n, 0])\n visited = [False for _ in range(n + 1)]\n visited[n] = True\n while any(q):\n num, step = q.pop(0)\n i = 1\n tNum = num - i ** 2\n while tNum >= 0:\n if tNum == 0:\n return step + 1\n if not visited[tNum]:\n q.append((tNum, step + 1))\n visited[tNum] = True\n i += 1\n tNum = num - i ** 2\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000170", "length_bytes": 2586, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: int", "name": "numSquares_failed", "signature": "def numSquares_failed(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "numSquares_slow", "signature": "def numSquares_slow(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "numSquares", "signature": "def numSquares(self, n)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003390", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSquares_failed(self, n): :type n: int :rtype: int\n- def numSquares_slow(self, n): :type n: int :rtype: int\n- def numSquares(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSquares_failed(self, n): :type n: int :rtype: int\n- def numSquares_slow(self, n): :type n: int :rtype: int\n- def numSquares(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def numSquares_failed(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def numSquares_slow(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n def numSquares(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n i = n\n cnt = 0\n while i > 0:\n print('i is {0},n is {1}'.format(i, n))\n sq = math.sqrt(i)\n if sq - sq // 1 == 0:\n n -= i\n i = n\n cnt += 1\n else:\n i -= 1\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n dq = deque()\n dq.append(n)\n step = 0\n while dq:\n for i in range(len(dq)):\n pop = dq.popleft()\n print('pop:', pop)\n if pop == 0:\n return step\n for k in range(1, pop + 1):\n if math.sqrt(k) - math.sqrt(k) // 1 == 0:\n dq.append(pop - k)\n step += 1\n<|end_body_1|>\n\n<|body_start_2|>\n q = list()\n q.append([n, 0])\n visited = [False for _ in range(n + 1)]\n visited[n] = True\n while any(q):\n num, step = q.pop(0)\n i = 1\n tNum = num - i ** 2\n while tNum >= 0:\n if tNum == 0:\n return step + 1\n if not visited[tNum]:\n q.append((tNum, step + 1))\n visited[tNum] = True\n i += 1\n tNum = num - i ** 2\n<|end_body_2|>\n", "revision_id": "93266095329e2e8e949a72371b88b07382a60e0d", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numSquares_failed(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def numSquares_slow(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n def numSquares(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def numSquares_failed(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n i = n\n cnt = 0\n while i > 0:\n print('i is {0},n is {1}'.format(i, n))\n sq = math.sqrt(i)\n if sq - sq // 1 == 0:\n n -= i\n i = n\n cnt += 1\n else:\n i -= 1\n return cnt\n\n def numSquares_slow(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n dq = deque()\n dq.append(n)\n step = 0\n while dq:\n for i in range(len(dq)):\n pop = dq.popleft()\n print('pop:', pop)\n if pop == 0:\n return step\n for k in range(1, pop + 1):\n if math.sqrt(k) - math.sqrt(k) // 1 == 0:\n dq.append(pop - k)\n step += 1\n\n def numSquares(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n q = list()\n q.append([n, 0])\n visited = [False for _ in range(n + 1)]\n visited[n] = True\n while any(q):\n num, step = q.pop(0)\n i = 1\n tNum = num - i ** 2\n while tNum >= 0:\n if tNum == 0:\n return step + 1\n if not visited[tNum]:\n q.append((tNum, step + 1))\n visited[tNum] = True\n i += 1\n tNum = num - i ** 2\n", "source": "the_stack_v2_python_sparse", "source_path": "numSquares.py", "source_repo": "shivangi-prog/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "8b3f61222332b276c900f83c0287b3a7eea38922", "bodies": ["if datacenter is None:\n pylogger.error('Datacenter name is required')\nroot_folder = client_object.get_root_folder()\nfor folder in root_folder.childEntity:\n if isinstance(folder, vim.Datacenter):\n if folder.name == datacenter:\n for component in folder.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n elif isinstance(folder, vim.Folder):\n child = cls._recurse(folder)\n if isinstance(child, vim.Datacenter):\n if child.name == datacenter:\n for component in child.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\nreturn constants.Result.FAILURE", "for child in entity.childEntity:\n if isinstance(child, vim.Folder):\n child = cls._recurse(child)\n return child\n elif isinstance(child, vim.Datacenter):\n return child"], "bodies_text": "<|body_start_0|>\n if datacenter is None:\n pylogger.error('Datacenter name is required')\n root_folder = client_object.get_root_folder()\n for folder in root_folder.childEntity:\n if isinstance(folder, vim.Datacenter):\n if folder.name == datacenter:\n for component in folder.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n elif isinstance(folder, vim.Folder):\n child = cls._recurse(folder)\n if isinstance(child, vim.Datacenter):\n if child.name == datacenter:\n for component in child.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n return constants.Result.FAILURE\n<|end_body_0|>\n\n<|body_start_1|>\n for child in entity.childEntity:\n if isinstance(child, vim.Folder):\n child = cls._recurse(child)\n return child\n elif isinstance(child, vim.Datacenter):\n return child\n<|end_body_1|>\n", "class_docstring": "", "class_name": "VC55SwitchImpl", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VC55SwitchImpl:\n\n def check_DVS_exists(cls, client_object, name=None, datacenter=None):\n \"\"\"Checks if a distributed virtual switch exists. @type client_object: VCAPIClient instance @param client_object: VCAPIClient instance @type name: str @param name: DVS name @type datacenter: str @param datacenter: Datacenter name @rtype: str @return: Success or Failure\"\"\"\n <|body_0|>\n\n def _recurse(cls, entity):\n \"\"\"Helper to recurse through nested folders\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if datacenter is None:\n pylogger.error('Datacenter name is required')\n root_folder = client_object.get_root_folder()\n for folder in root_folder.childEntity:\n if isinstance(folder, vim.Datacenter):\n if folder.name == datacenter:\n for component in folder.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n elif isinstance(folder, vim.Folder):\n child = cls._recurse(folder)\n if isinstance(child, vim.Datacenter):\n if child.name == datacenter:\n for component in child.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n return constants.Result.FAILURE\n<|end_body_0|>\n\n<|body_start_1|>\n for child in entity.childEntity:\n if isinstance(child, vim.Folder):\n child = cls._recurse(child)\n return child\n elif isinstance(child, vim.Datacenter):\n return child\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000171", "length_bytes": 2762, "license_type": "no_license", "methods": [{"docstring": "Checks if a distributed virtual switch exists. @type client_object: VCAPIClient instance @param client_object: VCAPIClient instance @type name: str @param name: DVS name @type datacenter: str @param datacenter: Datacenter name @rtype: str @return: Success or Failure", "name": "check_DVS_exists", "signature": "def check_DVS_exists(cls, client_object, name=None, datacenter=None)"}, {"docstring": "Helper to recurse through nested folders", "name": "_recurse", "signature": "def _recurse(cls, entity)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032816", "prompt": "Implement the Python class `VC55SwitchImpl` described below.\n\nClass description:\nImplement the VC55SwitchImpl class.\n\nMethod signatures and docstrings:\n- def check_DVS_exists(cls, client_object, name=None, datacenter=None): Checks if a distributed virtual switch exists. @type client_object: VCAPIClient instance @param client_object: VCAPIClient instance @type name: str @param name: DVS name @type datacenter: str @param datacenter: Datacenter name @rtype: str @return: Success or Failure\n- def _recurse(cls, entity): Helper to recurse through nested folders", "prompted_full_text": "Implement the Python class `VC55SwitchImpl` described below.\n\nClass description:\nImplement the VC55SwitchImpl class.\n\nMethod signatures and docstrings:\n- def check_DVS_exists(cls, client_object, name=None, datacenter=None): Checks if a distributed virtual switch exists. @type client_object: VCAPIClient instance @param client_object: VCAPIClient instance @type name: str @param name: DVS name @type datacenter: str @param datacenter: Datacenter name @rtype: str @return: Success or Failure\n- def _recurse(cls, entity): Helper to recurse through nested folders\n\n<|skeleton|>\nclass VC55SwitchImpl:\n\n def check_DVS_exists(cls, client_object, name=None, datacenter=None):\n \"\"\"Checks if a distributed virtual switch exists. @type client_object: VCAPIClient instance @param client_object: VCAPIClient instance @type name: str @param name: DVS name @type datacenter: str @param datacenter: Datacenter name @rtype: str @return: Success or Failure\"\"\"\n <|body_0|>\n\n def _recurse(cls, entity):\n \"\"\"Helper to recurse through nested folders\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if datacenter is None:\n pylogger.error('Datacenter name is required')\n root_folder = client_object.get_root_folder()\n for folder in root_folder.childEntity:\n if isinstance(folder, vim.Datacenter):\n if folder.name == datacenter:\n for component in folder.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n elif isinstance(folder, vim.Folder):\n child = cls._recurse(folder)\n if isinstance(child, vim.Datacenter):\n if child.name == datacenter:\n for component in child.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n return constants.Result.FAILURE\n<|end_body_0|>\n\n<|body_start_1|>\n for child in entity.childEntity:\n if isinstance(child, vim.Folder):\n child = cls._recurse(child)\n return child\n elif isinstance(child, vim.Datacenter):\n return child\n<|end_body_1|>\n", "revision_id": "5b55817c050b637e2747084290f6206d2e622938", "skeleton": "<|skeleton|>\nclass VC55SwitchImpl:\n\n def check_DVS_exists(cls, client_object, name=None, datacenter=None):\n \"\"\"Checks if a distributed virtual switch exists. @type client_object: VCAPIClient instance @param client_object: VCAPIClient instance @type name: str @param name: DVS name @type datacenter: str @param datacenter: Datacenter name @rtype: str @return: Success or Failure\"\"\"\n <|body_0|>\n\n def _recurse(cls, entity):\n \"\"\"Helper to recurse through nested folders\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VC55SwitchImpl:\n def check_DVS_exists(cls, client_object, name=None, datacenter=None):\n \"\"\"Checks if a distributed virtual switch exists. @type client_object: VCAPIClient instance @param client_object: VCAPIClient instance @type name: str @param name: DVS name @type datacenter: str @param datacenter: Datacenter name @rtype: str @return: Success or Failure\"\"\"\n if datacenter is None:\n pylogger.error('Datacenter name is required')\n root_folder = client_object.get_root_folder()\n for folder in root_folder.childEntity:\n if isinstance(folder, vim.Datacenter):\n if folder.name == datacenter:\n for component in folder.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n elif isinstance(folder, vim.Folder):\n child = cls._recurse(folder)\n if isinstance(child, vim.Datacenter):\n if child.name == datacenter:\n for component in child.networkFolder.childEntity:\n if isinstance(component, vim.DistributedVirtualSwitch) or isinstance(component, vim.dvs.VmwareDistributedVirtualSwitch):\n if component.name == name:\n return constants.Result.SUCCESS\n return constants.Result.FAILURE\n\n def _recurse(cls, entity):\n \"\"\"Helper to recurse through nested folders\"\"\"\n for child in entity.childEntity:\n if isinstance(child, vim.Folder):\n child = cls._recurse(child)\n return child\n elif isinstance(child, vim.Datacenter):\n return child\n", "source": "the_stack_v2_python_sparse", "source_path": "SystemTesting/pylib/vmware/vsphere/vc/api/vc55_switch_impl.py", "source_repo": "Cloudxtreme/MyProject", "split": "val", "star_events_count": 0}
{"blob_id": "cc387584584a8c1ce95a51112eb8a00c61e13b5c", "bodies": ["super(BasicTcpClient, self).__init__(client_id, is_sharing)\ntry:\n self.in_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.in_socket.connect((ip, in_socket_port))\n self.out_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.out_socket.connect((ip, out_socket_port))\n self.send_packet(self.id)\nexcept socket.error as e:\n print(type(self).__name__, e)\n raise e", "try:\n self.out_socket.sendall(create_packet(data))\nexcept socket.error as e:\n if self.running:\n print(type(self).__name__, e)\n self.network_error.emit(str(e))", "super(BasicTcpClient, self).close()\nself.send_packet(EXIT_SIGN)\nself.in_socket.close()\nself.out_socket.close()"], "bodies_text": "<|body_start_0|>\n super(BasicTcpClient, self).__init__(client_id, is_sharing)\n try:\n self.in_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.in_socket.connect((ip, in_socket_port))\n self.out_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.out_socket.connect((ip, out_socket_port))\n self.send_packet(self.id)\n except socket.error as e:\n print(type(self).__name__, e)\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.out_socket.sendall(create_packet(data))\n except socket.error as e:\n if self.running:\n print(type(self).__name__, e)\n self.network_error.emit(str(e))\n<|end_body_1|>\n\n<|body_start_2|>\n super(BasicTcpClient, self).close()\n self.send_packet(EXIT_SIGN)\n self.in_socket.close()\n self.out_socket.close()\n<|end_body_2|>\n", "class_docstring": "Definition of the abstract class BasicTCPClient.", "class_name": "BasicTcpClient", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BasicTcpClient:\n \"\"\"Definition of the abstract class BasicTCPClient.\"\"\"\n\n def __init__(self, ip: str, in_socket_port: int, out_socket_port: int, client_id: bytes, is_sharing=True):\n \"\"\"Initializes input and output sockets.\"\"\"\n <|body_0|>\n\n def send_packet(self, data: bytes):\n \"\"\"Creates a packet of data and sends it.\"\"\"\n <|body_1|>\n\n def close(self):\n \"\"\"Sends the server EXIT_SIGN (if the output socket is open) and closes the sockets.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BasicTcpClient, self).__init__(client_id, is_sharing)\n try:\n self.in_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.in_socket.connect((ip, in_socket_port))\n self.out_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.out_socket.connect((ip, out_socket_port))\n self.send_packet(self.id)\n except socket.error as e:\n print(type(self).__name__, e)\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.out_socket.sendall(create_packet(data))\n except socket.error as e:\n if self.running:\n print(type(self).__name__, e)\n self.network_error.emit(str(e))\n<|end_body_1|>\n\n<|body_start_2|>\n super(BasicTcpClient, self).close()\n self.send_packet(EXIT_SIGN)\n self.in_socket.close()\n self.out_socket.close()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000172", "length_bytes": 1953, "license_type": "no_license", "methods": [{"docstring": "Initializes input and output sockets.", "name": "__init__", "signature": "def __init__(self, ip: str, in_socket_port: int, out_socket_port: int, client_id: bytes, is_sharing=True)"}, {"docstring": "Creates a packet of data and sends it.", "name": "send_packet", "signature": "def send_packet(self, data: bytes)"}, {"docstring": "Sends the server EXIT_SIGN (if the output socket is open) and closes the sockets.", "name": "close", "signature": "def close(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_042247", "prompt": "Implement the Python class `BasicTcpClient` described below.\n\nClass description:\nDefinition of the abstract class BasicTCPClient.\n\nMethod signatures and docstrings:\n- def __init__(self, ip: str, in_socket_port: int, out_socket_port: int, client_id: bytes, is_sharing=True): Initializes input and output sockets.\n- def send_packet(self, data: bytes): Creates a packet of data and sends it.\n- def close(self): Sends the server EXIT_SIGN (if the output socket is open) and closes the sockets.", "prompted_full_text": "Implement the Python class `BasicTcpClient` described below.\n\nClass description:\nDefinition of the abstract class BasicTCPClient.\n\nMethod signatures and docstrings:\n- def __init__(self, ip: str, in_socket_port: int, out_socket_port: int, client_id: bytes, is_sharing=True): Initializes input and output sockets.\n- def send_packet(self, data: bytes): Creates a packet of data and sends it.\n- def close(self): Sends the server EXIT_SIGN (if the output socket is open) and closes the sockets.\n\n<|skeleton|>\nclass BasicTcpClient:\n \"\"\"Definition of the abstract class BasicTCPClient.\"\"\"\n\n def __init__(self, ip: str, in_socket_port: int, out_socket_port: int, client_id: bytes, is_sharing=True):\n \"\"\"Initializes input and output sockets.\"\"\"\n <|body_0|>\n\n def send_packet(self, data: bytes):\n \"\"\"Creates a packet of data and sends it.\"\"\"\n <|body_1|>\n\n def close(self):\n \"\"\"Sends the server EXIT_SIGN (if the output socket is open) and closes the sockets.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BasicTcpClient, self).__init__(client_id, is_sharing)\n try:\n self.in_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.in_socket.connect((ip, in_socket_port))\n self.out_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.out_socket.connect((ip, out_socket_port))\n self.send_packet(self.id)\n except socket.error as e:\n print(type(self).__name__, e)\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.out_socket.sendall(create_packet(data))\n except socket.error as e:\n if self.running:\n print(type(self).__name__, e)\n self.network_error.emit(str(e))\n<|end_body_1|>\n\n<|body_start_2|>\n super(BasicTcpClient, self).close()\n self.send_packet(EXIT_SIGN)\n self.in_socket.close()\n self.out_socket.close()\n<|end_body_2|>\n", "revision_id": "d88933620286e655c39776e0a4e99de9d9067172", "skeleton": "<|skeleton|>\nclass BasicTcpClient:\n \"\"\"Definition of the abstract class BasicTCPClient.\"\"\"\n\n def __init__(self, ip: str, in_socket_port: int, out_socket_port: int, client_id: bytes, is_sharing=True):\n \"\"\"Initializes input and output sockets.\"\"\"\n <|body_0|>\n\n def send_packet(self, data: bytes):\n \"\"\"Creates a packet of data and sends it.\"\"\"\n <|body_1|>\n\n def close(self):\n \"\"\"Sends the server EXIT_SIGN (if the output socket is open) and closes the sockets.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BasicTcpClient:\n \"\"\"Definition of the abstract class BasicTCPClient.\"\"\"\n\n def __init__(self, ip: str, in_socket_port: int, out_socket_port: int, client_id: bytes, is_sharing=True):\n \"\"\"Initializes input and output sockets.\"\"\"\n super(BasicTcpClient, self).__init__(client_id, is_sharing)\n try:\n self.in_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.in_socket.connect((ip, in_socket_port))\n self.out_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.out_socket.connect((ip, out_socket_port))\n self.send_packet(self.id)\n except socket.error as e:\n print(type(self).__name__, e)\n raise e\n\n def send_packet(self, data: bytes):\n \"\"\"Creates a packet of data and sends it.\"\"\"\n try:\n self.out_socket.sendall(create_packet(data))\n except socket.error as e:\n if self.running:\n print(type(self).__name__, e)\n self.network_error.emit(str(e))\n\n def close(self):\n \"\"\"Sends the server EXIT_SIGN (if the output socket is open) and closes the sockets.\"\"\"\n super(BasicTcpClient, self).close()\n self.send_packet(EXIT_SIGN)\n self.in_socket.close()\n self.out_socket.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "client/basic_tcp_client.py", "source_repo": "HadarShahar/zoom", "split": "val", "star_events_count": 0}
{"blob_id": "654b20e76d60557a7104d47149aaff209de3d9d1", "bodies": ["self.big = big\nself.medium = medium\nself.small = small", "if carType == 1:\n if self.big == 0:\n return False\n else:\n self.big -= 1\n return True\nelif carType == 2:\n if self.medium == 0:\n return False\n else:\n self.medium -= 1\n return True\nelif carType == 3:\n if self.small == 0:\n return False\n else:\n self.small -= 1\n return True"], "bodies_text": "<|body_start_0|>\n self.big = big\n self.medium = medium\n self.small = small\n<|end_body_0|>\n\n<|body_start_1|>\n if carType == 1:\n if self.big == 0:\n return False\n else:\n self.big -= 1\n return True\n elif carType == 2:\n if self.medium == 0:\n return False\n else:\n self.medium -= 1\n return True\n elif carType == 3:\n if self.small == 0:\n return False\n else:\n self.small -= 1\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ParkingSystem", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParkingSystem:\n\n def __init__(self, big, medium, small):\n \"\"\":type big: int :type medium: int :type small: int\"\"\"\n <|body_0|>\n\n def addCar(self, carType):\n \"\"\":type carType: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.big = big\n self.medium = medium\n self.small = small\n<|end_body_0|>\n\n<|body_start_1|>\n if carType == 1:\n if self.big == 0:\n return False\n else:\n self.big -= 1\n return True\n elif carType == 2:\n if self.medium == 0:\n return False\n else:\n self.medium -= 1\n return True\n elif carType == 3:\n if self.small == 0:\n return False\n else:\n self.small -= 1\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000173", "length_bytes": 1674, "license_type": "permissive", "methods": [{"docstring": ":type big: int :type medium: int :type small: int", "name": "__init__", "signature": "def __init__(self, big, medium, small)"}, {"docstring": ":type carType: int :rtype: bool", "name": "addCar", "signature": "def addCar(self, carType)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_001862", "prompt": "Implement the Python class `ParkingSystem` described below.\n\nClass description:\nImplement the ParkingSystem class.\n\nMethod signatures and docstrings:\n- def __init__(self, big, medium, small): :type big: int :type medium: int :type small: int\n- def addCar(self, carType): :type carType: int :rtype: bool", "prompted_full_text": "Implement the Python class `ParkingSystem` described below.\n\nClass description:\nImplement the ParkingSystem class.\n\nMethod signatures and docstrings:\n- def __init__(self, big, medium, small): :type big: int :type medium: int :type small: int\n- def addCar(self, carType): :type carType: int :rtype: bool\n\n<|skeleton|>\nclass ParkingSystem:\n\n def __init__(self, big, medium, small):\n \"\"\":type big: int :type medium: int :type small: int\"\"\"\n <|body_0|>\n\n def addCar(self, carType):\n \"\"\":type carType: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.big = big\n self.medium = medium\n self.small = small\n<|end_body_0|>\n\n<|body_start_1|>\n if carType == 1:\n if self.big == 0:\n return False\n else:\n self.big -= 1\n return True\n elif carType == 2:\n if self.medium == 0:\n return False\n else:\n self.medium -= 1\n return True\n elif carType == 3:\n if self.small == 0:\n return False\n else:\n self.small -= 1\n return True\n<|end_body_1|>\n", "revision_id": "b19ae99715f4b7a0d95e32b128a3679e6a298a6f", "skeleton": "<|skeleton|>\nclass ParkingSystem:\n\n def __init__(self, big, medium, small):\n \"\"\":type big: int :type medium: int :type small: int\"\"\"\n <|body_0|>\n\n def addCar(self, carType):\n \"\"\":type carType: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ParkingSystem:\n def __init__(self, big, medium, small):\n \"\"\":type big: int :type medium: int :type small: int\"\"\"\n self.big = big\n self.medium = medium\n self.small = small\n\n def addCar(self, carType):\n \"\"\":type carType: int :rtype: bool\"\"\"\n if carType == 1:\n if self.big == 0:\n return False\n else:\n self.big -= 1\n return True\n elif carType == 2:\n if self.medium == 0:\n return False\n else:\n self.medium -= 1\n return True\n elif carType == 3:\n if self.small == 0:\n return False\n else:\n self.small -= 1\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "1603_design_parking_system.py", "source_repo": "mjtsai/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "bc961275221ea148a4f8f2926037c4afa3bc0ae4", "bodies": ["self.driftwood = driftwood\nself.__registry = []\nself.__latest_tick = SDL_GetTicks()\nself.paused = False\nself.paused_at = None", "for reg in self.__registry:\n if reg['callback'] == callback:\n self.unregister(callback)\nself.__registry.append({'ticks': self.__latest_tick, 'delay': delay, 'callback': callback, 'once': once})\nself.driftwood.log.info('Tick', 'registered', callback.__qualname__)", "for n, reg in enumerate(self.__registry):\n if reg['callback'] == callback:\n del self.__registry[n]\n self.driftwood.log.info('Tick', 'unregistered', callback.__qualname__)", "current_tick = SDL_GetTicks()\nlast_tick = self.__latest_tick\nself.__latest_tick = current_tick\nif not self.paused:\n for reg in self.__registry:\n millis_past = current_tick - reg['ticks']\n if reg['delay']:\n if millis_past >= reg['delay']:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\nelse:\n self.driftwood.input.tick(None)\n self.driftwood.window.tick(None)\ntick_delta = current_tick - last_tick\nif tick_delta < 1000 // self.driftwood.config['tick']['tps']:\n SDL_Delay(1000 // self.driftwood.config['tick']['tps'] - tick_delta)", "if self.paused:\n self.paused = False\n paused_for = SDL_GetTicks() - self.paused_at\n for reg in self.__registry:\n reg['ticks'] += paused_for\n self.paused_at = None\nelse:\n self.paused = True\n self.paused_at = SDL_GetTicks()"], "bodies_text": "<|body_start_0|>\n self.driftwood = driftwood\n self.__registry = []\n self.__latest_tick = SDL_GetTicks()\n self.paused = False\n self.paused_at = None\n<|end_body_0|>\n\n<|body_start_1|>\n for reg in self.__registry:\n if reg['callback'] == callback:\n self.unregister(callback)\n self.__registry.append({'ticks': self.__latest_tick, 'delay': delay, 'callback': callback, 'once': once})\n self.driftwood.log.info('Tick', 'registered', callback.__qualname__)\n<|end_body_1|>\n\n<|body_start_2|>\n for n, reg in enumerate(self.__registry):\n if reg['callback'] == callback:\n del self.__registry[n]\n self.driftwood.log.info('Tick', 'unregistered', callback.__qualname__)\n<|end_body_2|>\n\n<|body_start_3|>\n current_tick = SDL_GetTicks()\n last_tick = self.__latest_tick\n self.__latest_tick = current_tick\n if not self.paused:\n for reg in self.__registry:\n millis_past = current_tick - reg['ticks']\n if reg['delay']:\n if millis_past >= reg['delay']:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n self.driftwood.input.tick(None)\n self.driftwood.window.tick(None)\n tick_delta = current_tick - last_tick\n if tick_delta < 1000 // self.driftwood.config['tick']['tps']:\n SDL_Delay(1000 // self.driftwood.config['tick']['tps'] - tick_delta)\n<|end_body_3|>\n\n<|body_start_4|>\n if self.paused:\n self.paused = False\n paused_for = SDL_GetTicks() - self.paused_at\n for reg in self.__registry:\n reg['ticks'] += paused_for\n self.paused_at = None\n else:\n self.paused = True\n self.paused_at = SDL_GetTicks()\n<|end_body_4|>\n", "class_docstring": "The Tick Manager This class manages tick callbacks. Attributes: driftwood: Base class instance.", "class_name": "TickManager", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TickManager:\n \"\"\"The Tick Manager This class manages tick callbacks. Attributes: driftwood: Base class instance.\"\"\"\n\n def __init__(self, driftwood):\n \"\"\"TickManager class initializer. Args: driftwood: Base class instance.\"\"\"\n <|body_0|>\n\n def register(self, callback, delay=0, once=False):\n \"\"\"Register a tick callback, with an optional delay between calls. Args: callback: The function to be called. delay: (optional) Delay in milliseconds between calls. once: Whether to only call once.\"\"\"\n <|body_1|>\n\n def unregister(self, callback):\n \"\"\"Unregister a tick callback. Args: callback: The function to unregister.\"\"\"\n <|body_2|>\n\n def tick(self):\n \"\"\"Call all registered tick callbacks not currently delayed, and regulate tps.\"\"\"\n <|body_3|>\n\n def toggle_pause(self):\n \"\"\"Toggle a pause in all registered ticks. During this time, no ticks will get called, and all timing related information is kept track of and is restored upon unpause. Contrary to this, this, InputManager and WindowManager still receieve ticks during a pause, but they are told that the number of milliseconds that have passed is None (not 0).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.driftwood = driftwood\n self.__registry = []\n self.__latest_tick = SDL_GetTicks()\n self.paused = False\n self.paused_at = None\n<|end_body_0|>\n\n<|body_start_1|>\n for reg in self.__registry:\n if reg['callback'] == callback:\n self.unregister(callback)\n self.__registry.append({'ticks': self.__latest_tick, 'delay': delay, 'callback': callback, 'once': once})\n self.driftwood.log.info('Tick', 'registered', callback.__qualname__)\n<|end_body_1|>\n\n<|body_start_2|>\n for n, reg in enumerate(self.__registry):\n if reg['callback'] == callback:\n del self.__registry[n]\n self.driftwood.log.info('Tick', 'unregistered', callback.__qualname__)\n<|end_body_2|>\n\n<|body_start_3|>\n current_tick = SDL_GetTicks()\n last_tick = self.__latest_tick\n self.__latest_tick = current_tick\n if not self.paused:\n for reg in self.__registry:\n millis_past = current_tick - reg['ticks']\n if reg['delay']:\n if millis_past >= reg['delay']:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n self.driftwood.input.tick(None)\n self.driftwood.window.tick(None)\n tick_delta = current_tick - last_tick\n if tick_delta < 1000 // self.driftwood.config['tick']['tps']:\n SDL_Delay(1000 // self.driftwood.config['tick']['tps'] - tick_delta)\n<|end_body_3|>\n\n<|body_start_4|>\n if self.paused:\n self.paused = False\n paused_for = SDL_GetTicks() - self.paused_at\n for reg in self.__registry:\n reg['ticks'] += paused_for\n self.paused_at = None\n else:\n self.paused = True\n self.paused_at = SDL_GetTicks()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000174", "length_bytes": 5529, "license_type": "permissive", "methods": [{"docstring": "TickManager class initializer. Args: driftwood: Base class instance.", "name": "__init__", "signature": "def __init__(self, driftwood)"}, {"docstring": "Register a tick callback, with an optional delay between calls. Args: callback: The function to be called. delay: (optional) Delay in milliseconds between calls. once: Whether to only call once.", "name": "register", "signature": "def register(self, callback, delay=0, once=False)"}, {"docstring": "Unregister a tick callback. Args: callback: The function to unregister.", "name": "unregister", "signature": "def unregister(self, callback)"}, {"docstring": "Call all registered tick callbacks not currently delayed, and regulate tps.", "name": "tick", "signature": "def tick(self)"}, {"docstring": "Toggle a pause in all registered ticks. During this time, no ticks will get called, and all timing related information is kept track of and is restored upon unpause. Contrary to this, this, InputManager and WindowManager still receieve ticks during a pause, but they are told that the number of milliseconds that have passed is None (not 0).", "name": "toggle_pause", "signature": "def toggle_pause(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_005224", "prompt": "Implement the Python class `TickManager` described below.\n\nClass description:\nThe Tick Manager This class manages tick callbacks. Attributes: driftwood: Base class instance.\n\nMethod signatures and docstrings:\n- def __init__(self, driftwood): TickManager class initializer. Args: driftwood: Base class instance.\n- def register(self, callback, delay=0, once=False): Register a tick callback, with an optional delay between calls. Args: callback: The function to be called. delay: (optional) Delay in milliseconds between calls. once: Whether to only call once.\n- def unregister(self, callback): Unregister a tick callback. Args: callback: The function to unregister.\n- def tick(self): Call all registered tick callbacks not currently delayed, and regulate tps.\n- def toggle_pause(self): Toggle a pause in all registered ticks. During this time, no ticks will get called, and all timing related information is kept track of and is restored upon unpause. Contrary to this, this, InputManager and WindowManager still receieve ticks during a pause, but they are told that the number of milliseconds that have passed is None (not 0).", "prompted_full_text": "Implement the Python class `TickManager` described below.\n\nClass description:\nThe Tick Manager This class manages tick callbacks. Attributes: driftwood: Base class instance.\n\nMethod signatures and docstrings:\n- def __init__(self, driftwood): TickManager class initializer. Args: driftwood: Base class instance.\n- def register(self, callback, delay=0, once=False): Register a tick callback, with an optional delay between calls. Args: callback: The function to be called. delay: (optional) Delay in milliseconds between calls. once: Whether to only call once.\n- def unregister(self, callback): Unregister a tick callback. Args: callback: The function to unregister.\n- def tick(self): Call all registered tick callbacks not currently delayed, and regulate tps.\n- def toggle_pause(self): Toggle a pause in all registered ticks. During this time, no ticks will get called, and all timing related information is kept track of and is restored upon unpause. Contrary to this, this, InputManager and WindowManager still receieve ticks during a pause, but they are told that the number of milliseconds that have passed is None (not 0).\n\n<|skeleton|>\nclass TickManager:\n \"\"\"The Tick Manager This class manages tick callbacks. Attributes: driftwood: Base class instance.\"\"\"\n\n def __init__(self, driftwood):\n \"\"\"TickManager class initializer. Args: driftwood: Base class instance.\"\"\"\n <|body_0|>\n\n def register(self, callback, delay=0, once=False):\n \"\"\"Register a tick callback, with an optional delay between calls. Args: callback: The function to be called. delay: (optional) Delay in milliseconds between calls. once: Whether to only call once.\"\"\"\n <|body_1|>\n\n def unregister(self, callback):\n \"\"\"Unregister a tick callback. Args: callback: The function to unregister.\"\"\"\n <|body_2|>\n\n def tick(self):\n \"\"\"Call all registered tick callbacks not currently delayed, and regulate tps.\"\"\"\n <|body_3|>\n\n def toggle_pause(self):\n \"\"\"Toggle a pause in all registered ticks. During this time, no ticks will get called, and all timing related information is kept track of and is restored upon unpause. Contrary to this, this, InputManager and WindowManager still receieve ticks during a pause, but they are told that the number of milliseconds that have passed is None (not 0).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.driftwood = driftwood\n self.__registry = []\n self.__latest_tick = SDL_GetTicks()\n self.paused = False\n self.paused_at = None\n<|end_body_0|>\n\n<|body_start_1|>\n for reg in self.__registry:\n if reg['callback'] == callback:\n self.unregister(callback)\n self.__registry.append({'ticks': self.__latest_tick, 'delay': delay, 'callback': callback, 'once': once})\n self.driftwood.log.info('Tick', 'registered', callback.__qualname__)\n<|end_body_1|>\n\n<|body_start_2|>\n for n, reg in enumerate(self.__registry):\n if reg['callback'] == callback:\n del self.__registry[n]\n self.driftwood.log.info('Tick', 'unregistered', callback.__qualname__)\n<|end_body_2|>\n\n<|body_start_3|>\n current_tick = SDL_GetTicks()\n last_tick = self.__latest_tick\n self.__latest_tick = current_tick\n if not self.paused:\n for reg in self.__registry:\n millis_past = current_tick - reg['ticks']\n if reg['delay']:\n if millis_past >= reg['delay']:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n self.driftwood.input.tick(None)\n self.driftwood.window.tick(None)\n tick_delta = current_tick - last_tick\n if tick_delta < 1000 // self.driftwood.config['tick']['tps']:\n SDL_Delay(1000 // self.driftwood.config['tick']['tps'] - tick_delta)\n<|end_body_3|>\n\n<|body_start_4|>\n if self.paused:\n self.paused = False\n paused_for = SDL_GetTicks() - self.paused_at\n for reg in self.__registry:\n reg['ticks'] += paused_for\n self.paused_at = None\n else:\n self.paused = True\n self.paused_at = SDL_GetTicks()\n<|end_body_4|>\n", "revision_id": "95fd4497c268ef10fa950a91ca9cc26f6dff557d", "skeleton": "<|skeleton|>\nclass TickManager:\n \"\"\"The Tick Manager This class manages tick callbacks. Attributes: driftwood: Base class instance.\"\"\"\n\n def __init__(self, driftwood):\n \"\"\"TickManager class initializer. Args: driftwood: Base class instance.\"\"\"\n <|body_0|>\n\n def register(self, callback, delay=0, once=False):\n \"\"\"Register a tick callback, with an optional delay between calls. Args: callback: The function to be called. delay: (optional) Delay in milliseconds between calls. once: Whether to only call once.\"\"\"\n <|body_1|>\n\n def unregister(self, callback):\n \"\"\"Unregister a tick callback. Args: callback: The function to unregister.\"\"\"\n <|body_2|>\n\n def tick(self):\n \"\"\"Call all registered tick callbacks not currently delayed, and regulate tps.\"\"\"\n <|body_3|>\n\n def toggle_pause(self):\n \"\"\"Toggle a pause in all registered ticks. During this time, no ticks will get called, and all timing related information is kept track of and is restored upon unpause. Contrary to this, this, InputManager and WindowManager still receieve ticks during a pause, but they are told that the number of milliseconds that have passed is None (not 0).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TickManager:\n \"\"\"The Tick Manager This class manages tick callbacks. Attributes: driftwood: Base class instance.\"\"\"\n\n def __init__(self, driftwood):\n \"\"\"TickManager class initializer. Args: driftwood: Base class instance.\"\"\"\n self.driftwood = driftwood\n self.__registry = []\n self.__latest_tick = SDL_GetTicks()\n self.paused = False\n self.paused_at = None\n\n def register(self, callback, delay=0, once=False):\n \"\"\"Register a tick callback, with an optional delay between calls. Args: callback: The function to be called. delay: (optional) Delay in milliseconds between calls. once: Whether to only call once.\"\"\"\n for reg in self.__registry:\n if reg['callback'] == callback:\n self.unregister(callback)\n self.__registry.append({'ticks': self.__latest_tick, 'delay': delay, 'callback': callback, 'once': once})\n self.driftwood.log.info('Tick', 'registered', callback.__qualname__)\n\n def unregister(self, callback):\n \"\"\"Unregister a tick callback. Args: callback: The function to unregister.\"\"\"\n for n, reg in enumerate(self.__registry):\n if reg['callback'] == callback:\n del self.__registry[n]\n self.driftwood.log.info('Tick', 'unregistered', callback.__qualname__)\n\n def tick(self):\n \"\"\"Call all registered tick callbacks not currently delayed, and regulate tps.\"\"\"\n current_tick = SDL_GetTicks()\n last_tick = self.__latest_tick\n self.__latest_tick = current_tick\n if not self.paused:\n for reg in self.__registry:\n millis_past = current_tick - reg['ticks']\n if reg['delay']:\n if millis_past >= reg['delay']:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n reg['ticks'] = current_tick\n reg['callback'](millis_past)\n if reg['once']:\n self.unregister(reg['callback'])\n else:\n self.driftwood.input.tick(None)\n self.driftwood.window.tick(None)\n tick_delta = current_tick - last_tick\n if tick_delta < 1000 // self.driftwood.config['tick']['tps']:\n SDL_Delay(1000 // self.driftwood.config['tick']['tps'] - tick_delta)\n\n def toggle_pause(self):\n \"\"\"Toggle a pause in all registered ticks. During this time, no ticks will get called, and all timing related information is kept track of and is restored upon unpause. Contrary to this, this, InputManager and WindowManager still receieve ticks during a pause, but they are told that the number of milliseconds that have passed is None (not 0).\"\"\"\n if self.paused:\n self.paused = False\n paused_for = SDL_GetTicks() - self.paused_at\n for reg in self.__registry:\n reg['ticks'] += paused_for\n self.paused_at = None\n else:\n self.paused = True\n self.paused_at = SDL_GetTicks()\n", "source": "the_stack_v2_python_sparse", "source_path": "src/tickmanager.py", "source_repo": "pmer/Driftwood", "split": "val", "star_events_count": 0}
{"blob_id": "c85c742669ea61d18d713bdb5d6c35633719c43a", "bodies": ["super(MultiHeadedAttention, self).__init__()\nassert d_model % h == 0\nself.d_k = d_model // h\nself.h = h\nself.linears = clones(nn.Linear(d_model, d_model), 5)\nself.attn = None\nself.dropout = nn.Dropout(p=dropout)", "if mask is not None:\n mask = mask.unsqueeze(1)\nnbatches = query.size(0)\nquery, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))]\nx, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\nx = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\nif with_mem:\n mem = value\n return (self.linears[3](x), mem, self.attn)\nreturn self.linears[3](x)"], "bodies_text": "<|body_start_0|>\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 5)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n<|end_body_0|>\n\n<|body_start_1|>\n if mask is not None:\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))]\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n if with_mem:\n mem = value\n return (self.linears[3](x), mem, self.attn)\n return self.linears[3](x)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MultiHeadedAttention", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiHeadedAttention:\n\n def __init__(self, h, d_model, dropout=0.1):\n \"\"\"Take in model size and number of heads.\"\"\"\n <|body_0|>\n\n def forward(self, query, key, value, mask=None, with_mem=False):\n \"\"\"Implements Figure 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 5)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n<|end_body_0|>\n\n<|body_start_1|>\n if mask is not None:\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))]\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n if with_mem:\n mem = value\n return (self.linears[3](x), mem, self.attn)\n return self.linears[3](x)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000175", "length_bytes": 6749, "license_type": "no_license", "methods": [{"docstring": "Take in model size and number of heads.", "name": "__init__", "signature": "def __init__(self, h, d_model, dropout=0.1)"}, {"docstring": "Implements Figure 2", "name": "forward", "signature": "def forward(self, query, key, value, mask=None, with_mem=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_010543", "prompt": "Implement the Python class `MultiHeadedAttention` described below.\n\nClass description:\nImplement the MultiHeadedAttention class.\n\nMethod signatures and docstrings:\n- def __init__(self, h, d_model, dropout=0.1): Take in model size and number of heads.\n- def forward(self, query, key, value, mask=None, with_mem=False): Implements Figure 2", "prompted_full_text": "Implement the Python class `MultiHeadedAttention` described below.\n\nClass description:\nImplement the MultiHeadedAttention class.\n\nMethod signatures and docstrings:\n- def __init__(self, h, d_model, dropout=0.1): Take in model size and number of heads.\n- def forward(self, query, key, value, mask=None, with_mem=False): Implements Figure 2\n\n<|skeleton|>\nclass MultiHeadedAttention:\n\n def __init__(self, h, d_model, dropout=0.1):\n \"\"\"Take in model size and number of heads.\"\"\"\n <|body_0|>\n\n def forward(self, query, key, value, mask=None, with_mem=False):\n \"\"\"Implements Figure 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 5)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n<|end_body_0|>\n\n<|body_start_1|>\n if mask is not None:\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))]\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n if with_mem:\n mem = value\n return (self.linears[3](x), mem, self.attn)\n return self.linears[3](x)\n<|end_body_1|>\n", "revision_id": "21e04d307733f0cd08271752a417b35be2adf543", "skeleton": "<|skeleton|>\nclass MultiHeadedAttention:\n\n def __init__(self, h, d_model, dropout=0.1):\n \"\"\"Take in model size and number of heads.\"\"\"\n <|body_0|>\n\n def forward(self, query, key, value, mask=None, with_mem=False):\n \"\"\"Implements Figure 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MultiHeadedAttention:\n def __init__(self, h, d_model, dropout=0.1):\n \"\"\"Take in model size and number of heads.\"\"\"\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 5)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, mask=None, with_mem=False):\n \"\"\"Implements Figure 2\"\"\"\n if mask is not None:\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))]\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n if with_mem:\n mem = value\n return (self.linears[3](x), mem, self.attn)\n return self.linears[3](x)\n", "source": "the_stack_v2_python_sparse", "source_path": "CV/CancerSeg/models_/modules_/tf_submod.py", "source_repo": "qwjaskzxl/AI-practice", "split": "val", "star_events_count": 0}
{"blob_id": "53de3535157d706e25333065aa5615166e2bd7b9", "bodies": ["if len(nums) <= 1:\n return len(nums)\nvalues = [1] * len(nums)\nmax_value = 1\nfor i in range(0, len(nums)):\n for j in range(0, i):\n if nums[j] < nums[i]:\n values[i] = max(values[i], values[j] + 1)\n max_value = max(max_value, values[i])\nreturn max_value", "length_tail_min_values = [0] * len(nums)\nsize = 0\nfor num in nums:\n start = 0\n end = size\n while start != end:\n middle = int((start + end) / 2)\n if length_tail_min_values[middle] < num:\n start = middle + 1\n else:\n end = middle\n length_tail_min_values[start] = num\n size = max(start + 1, size)\nreturn size"], "bodies_text": "<|body_start_0|>\n if len(nums) <= 1:\n return len(nums)\n values = [1] * len(nums)\n max_value = 1\n for i in range(0, len(nums)):\n for j in range(0, i):\n if nums[j] < nums[i]:\n values[i] = max(values[i], values[j] + 1)\n max_value = max(max_value, values[i])\n return max_value\n<|end_body_0|>\n\n<|body_start_1|>\n length_tail_min_values = [0] * len(nums)\n size = 0\n for num in nums:\n start = 0\n end = size\n while start != end:\n middle = int((start + end) / 2)\n if length_tail_min_values[middle] < num:\n start = middle + 1\n else:\n end = middle\n length_tail_min_values[start] = num\n size = max(start + 1, size)\n return size\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def lengthOfLISDP(self, nums):\n \"\"\"This O(n^2) approach uses DP. Each index contains the longest increasing subsequence at that index. When evaluating each index, iterate through previous values. If a previous number is less than current number, increment the LIS value at the current index, if LIS value is greater than existing LIS value. At the end of iterating through each current index value, if the LIS value at the current index is greater than the max LIS, use it. :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def lengthOfLIS(self, nums):\n \"\"\"Keep track of the smallest value in an array, where the index of the array corresponds to the size of the subsequence. So index 0 of the array represents the smallest value of all subsequences with a size of 1. Index 1 of the array would correspond to the smallest value of all subsequences with a size of 2. As one iterates over the numbers in the inputted array, if the number is greater than any of the smallest values in any of the subsequences, then add it to the array for the next greatest subsequence size. This basically means that an increasing subsequence exists with the updated size since a smaller increasing subsequence existed with a lesser value, and we're iterating through the numb\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) <= 1:\n return len(nums)\n values = [1] * len(nums)\n max_value = 1\n for i in range(0, len(nums)):\n for j in range(0, i):\n if nums[j] < nums[i]:\n values[i] = max(values[i], values[j] + 1)\n max_value = max(max_value, values[i])\n return max_value\n<|end_body_0|>\n\n<|body_start_1|>\n length_tail_min_values = [0] * len(nums)\n size = 0\n for num in nums:\n start = 0\n end = size\n while start != end:\n middle = int((start + end) / 2)\n if length_tail_min_values[middle] < num:\n start = middle + 1\n else:\n end = middle\n length_tail_min_values[start] = num\n size = max(start + 1, size)\n return size\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000176", "length_bytes": 3174, "license_type": "permissive", "methods": [{"docstring": "This O(n^2) approach uses DP. Each index contains the longest increasing subsequence at that index. When evaluating each index, iterate through previous values. If a previous number is less than current number, increment the LIS value at the current index, if LIS value is greater than existing LIS value. At the end of iterating through each current index value, if the LIS value at the current index is greater than the max LIS, use it. :type nums: List[int] :rtype: int", "name": "lengthOfLISDP", "signature": "def lengthOfLISDP(self, nums)"}, {"docstring": "Keep track of the smallest value in an array, where the index of the array corresponds to the size of the subsequence. So index 0 of the array represents the smallest value of all subsequences with a size of 1. Index 1 of the array would correspond to the smallest value of all subsequences with a size of 2. As one iterates over the numbers in the inputted array, if the number is greater than any of the smallest values in any of the subsequences, then add it to the array for the next greatest subsequence size. This basically means that an increasing subsequence exists with the updated size since a smaller increasing subsequence existed with a lesser value, and we're iterating through the numb", "name": "lengthOfLIS", "signature": "def lengthOfLIS(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002613", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def lengthOfLISDP(self, nums): This O(n^2) approach uses DP. Each index contains the longest increasing subsequence at that index. When evaluating each index, iterate through previous values. If a previous number is less than current number, increment the LIS value at the current index, if LIS value is greater than existing LIS value. At the end of iterating through each current index value, if the LIS value at the current index is greater than the max LIS, use it. :type nums: List[int] :rtype: int\n- def lengthOfLIS(self, nums): Keep track of the smallest value in an array, where the index of the array corresponds to the size of the subsequence. So index 0 of the array represents the smallest value of all subsequences with a size of 1. Index 1 of the array would correspond to the smallest value of all subsequences with a size of 2. As one iterates over the numbers in the inputted array, if the number is greater than any of the smallest values in any of the subsequences, then add it to the array for the next greatest subsequence size. This basically means that an increasing subsequence exists with the updated size since a smaller increasing subsequence existed with a lesser value, and we're iterating through the numb", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def lengthOfLISDP(self, nums): This O(n^2) approach uses DP. Each index contains the longest increasing subsequence at that index. When evaluating each index, iterate through previous values. If a previous number is less than current number, increment the LIS value at the current index, if LIS value is greater than existing LIS value. At the end of iterating through each current index value, if the LIS value at the current index is greater than the max LIS, use it. :type nums: List[int] :rtype: int\n- def lengthOfLIS(self, nums): Keep track of the smallest value in an array, where the index of the array corresponds to the size of the subsequence. So index 0 of the array represents the smallest value of all subsequences with a size of 1. Index 1 of the array would correspond to the smallest value of all subsequences with a size of 2. As one iterates over the numbers in the inputted array, if the number is greater than any of the smallest values in any of the subsequences, then add it to the array for the next greatest subsequence size. This basically means that an increasing subsequence exists with the updated size since a smaller increasing subsequence existed with a lesser value, and we're iterating through the numb\n\n<|skeleton|>\nclass Solution:\n\n def lengthOfLISDP(self, nums):\n \"\"\"This O(n^2) approach uses DP. Each index contains the longest increasing subsequence at that index. When evaluating each index, iterate through previous values. If a previous number is less than current number, increment the LIS value at the current index, if LIS value is greater than existing LIS value. At the end of iterating through each current index value, if the LIS value at the current index is greater than the max LIS, use it. :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def lengthOfLIS(self, nums):\n \"\"\"Keep track of the smallest value in an array, where the index of the array corresponds to the size of the subsequence. So index 0 of the array represents the smallest value of all subsequences with a size of 1. Index 1 of the array would correspond to the smallest value of all subsequences with a size of 2. As one iterates over the numbers in the inputted array, if the number is greater than any of the smallest values in any of the subsequences, then add it to the array for the next greatest subsequence size. This basically means that an increasing subsequence exists with the updated size since a smaller increasing subsequence existed with a lesser value, and we're iterating through the numb\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) <= 1:\n return len(nums)\n values = [1] * len(nums)\n max_value = 1\n for i in range(0, len(nums)):\n for j in range(0, i):\n if nums[j] < nums[i]:\n values[i] = max(values[i], values[j] + 1)\n max_value = max(max_value, values[i])\n return max_value\n<|end_body_0|>\n\n<|body_start_1|>\n length_tail_min_values = [0] * len(nums)\n size = 0\n for num in nums:\n start = 0\n end = size\n while start != end:\n middle = int((start + end) / 2)\n if length_tail_min_values[middle] < num:\n start = middle + 1\n else:\n end = middle\n length_tail_min_values[start] = num\n size = max(start + 1, size)\n return size\n<|end_body_1|>\n", "revision_id": "b37b14f49b4b6ee9304a3956b3b52f30d22fac29", "skeleton": "<|skeleton|>\nclass Solution:\n\n def lengthOfLISDP(self, nums):\n \"\"\"This O(n^2) approach uses DP. Each index contains the longest increasing subsequence at that index. When evaluating each index, iterate through previous values. If a previous number is less than current number, increment the LIS value at the current index, if LIS value is greater than existing LIS value. At the end of iterating through each current index value, if the LIS value at the current index is greater than the max LIS, use it. :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def lengthOfLIS(self, nums):\n \"\"\"Keep track of the smallest value in an array, where the index of the array corresponds to the size of the subsequence. So index 0 of the array represents the smallest value of all subsequences with a size of 1. Index 1 of the array would correspond to the smallest value of all subsequences with a size of 2. As one iterates over the numbers in the inputted array, if the number is greater than any of the smallest values in any of the subsequences, then add it to the array for the next greatest subsequence size. This basically means that an increasing subsequence exists with the updated size since a smaller increasing subsequence existed with a lesser value, and we're iterating through the numb\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def lengthOfLISDP(self, nums):\n \"\"\"This O(n^2) approach uses DP. Each index contains the longest increasing subsequence at that index. When evaluating each index, iterate through previous values. If a previous number is less than current number, increment the LIS value at the current index, if LIS value is greater than existing LIS value. At the end of iterating through each current index value, if the LIS value at the current index is greater than the max LIS, use it. :type nums: List[int] :rtype: int\"\"\"\n if len(nums) <= 1:\n return len(nums)\n values = [1] * len(nums)\n max_value = 1\n for i in range(0, len(nums)):\n for j in range(0, i):\n if nums[j] < nums[i]:\n values[i] = max(values[i], values[j] + 1)\n max_value = max(max_value, values[i])\n return max_value\n\n def lengthOfLIS(self, nums):\n \"\"\"Keep track of the smallest value in an array, where the index of the array corresponds to the size of the subsequence. So index 0 of the array represents the smallest value of all subsequences with a size of 1. Index 1 of the array would correspond to the smallest value of all subsequences with a size of 2. As one iterates over the numbers in the inputted array, if the number is greater than any of the smallest values in any of the subsequences, then add it to the array for the next greatest subsequence size. This basically means that an increasing subsequence exists with the updated size since a smaller increasing subsequence existed with a lesser value, and we're iterating through the numb\"\"\"\n length_tail_min_values = [0] * len(nums)\n size = 0\n for num in nums:\n start = 0\n end = size\n while start != end:\n middle = int((start + end) / 2)\n if length_tail_min_values[middle] < num:\n start = middle + 1\n else:\n end = middle\n length_tail_min_values[start] = num\n size = max(start + 1, size)\n return size\n", "source": "the_stack_v2_python_sparse", "source_path": "longest_increasing_subsequence.py", "source_repo": "jaebradley/leetcode.py", "split": "val", "star_events_count": 1}
{"blob_id": "a97262a42d27c1261cf8e00e737556789a467134", "bodies": ["target_count = 0\nfor num in array:\n if num == target:\n target_count += 1\n elif num > target:\n break\nreturn target_count", "start, end = (0, len(array) - 1)\nwhile start < end:\n mid = (start + end) // 2\n if array[mid] < target:\n start = mid + 1\n else:\n end = mid\nleft = start\nif array[left] != target:\n return 0\nstart, end = (left, len(array) - 1)\nwhile start < end:\n mid = (start + end + 1) // 2\n if array[mid] > target:\n end = mid - 1\n else:\n start = mid\nright = start\nreturn right - left + 1"], "bodies_text": "<|body_start_0|>\n target_count = 0\n for num in array:\n if num == target:\n target_count += 1\n elif num > target:\n break\n return target_count\n<|end_body_0|>\n\n<|body_start_1|>\n start, end = (0, len(array) - 1)\n while start < end:\n mid = (start + end) // 2\n if array[mid] < target:\n start = mid + 1\n else:\n end = mid\n left = start\n if array[left] != target:\n return 0\n start, end = (left, len(array) - 1)\n while start < end:\n mid = (start + end + 1) // 2\n if array[mid] > target:\n end = mid - 1\n else:\n start = mid\n right = start\n return right - left + 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def find_count_linear(self, array, target):\n \"\"\"Time complexity: O(n). Space complexity: O(1), n is len(array).\"\"\"\n <|body_0|>\n\n def find_count_binary(self, array, target):\n \"\"\"Time complexity: O(lg(n)). Space complexity: O(1), n is len(array).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n target_count = 0\n for num in array:\n if num == target:\n target_count += 1\n elif num > target:\n break\n return target_count\n<|end_body_0|>\n\n<|body_start_1|>\n start, end = (0, len(array) - 1)\n while start < end:\n mid = (start + end) // 2\n if array[mid] < target:\n start = mid + 1\n else:\n end = mid\n left = start\n if array[left] != target:\n return 0\n start, end = (left, len(array) - 1)\n while start < end:\n mid = (start + end + 1) // 2\n if array[mid] > target:\n end = mid - 1\n else:\n start = mid\n right = start\n return right - left + 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000177", "length_bytes": 1541, "license_type": "no_license", "methods": [{"docstring": "Time complexity: O(n). Space complexity: O(1), n is len(array).", "name": "find_count_linear", "signature": "def find_count_linear(self, array, target)"}, {"docstring": "Time complexity: O(lg(n)). Space complexity: O(1), n is len(array).", "name": "find_count_binary", "signature": "def find_count_binary(self, array, target)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def find_count_linear(self, array, target): Time complexity: O(n). Space complexity: O(1), n is len(array).\n- def find_count_binary(self, array, target): Time complexity: O(lg(n)). Space complexity: O(1), n is len(array).", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def find_count_linear(self, array, target): Time complexity: O(n). Space complexity: O(1), n is len(array).\n- def find_count_binary(self, array, target): Time complexity: O(lg(n)). Space complexity: O(1), n is len(array).\n\n<|skeleton|>\nclass Solution:\n\n def find_count_linear(self, array, target):\n \"\"\"Time complexity: O(n). Space complexity: O(1), n is len(array).\"\"\"\n <|body_0|>\n\n def find_count_binary(self, array, target):\n \"\"\"Time complexity: O(lg(n)). Space complexity: O(1), n is len(array).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n target_count = 0\n for num in array:\n if num == target:\n target_count += 1\n elif num > target:\n break\n return target_count\n<|end_body_0|>\n\n<|body_start_1|>\n start, end = (0, len(array) - 1)\n while start < end:\n mid = (start + end) // 2\n if array[mid] < target:\n start = mid + 1\n else:\n end = mid\n left = start\n if array[left] != target:\n return 0\n start, end = (left, len(array) - 1)\n while start < end:\n mid = (start + end + 1) // 2\n if array[mid] > target:\n end = mid - 1\n else:\n start = mid\n right = start\n return right - left + 1\n<|end_body_1|>\n", "revision_id": "71b722ddfe8da04572e527b055cf8723d5c87bbf", "skeleton": "<|skeleton|>\nclass Solution:\n\n def find_count_linear(self, array, target):\n \"\"\"Time complexity: O(n). Space complexity: O(1), n is len(array).\"\"\"\n <|body_0|>\n\n def find_count_binary(self, array, target):\n \"\"\"Time complexity: O(lg(n)). Space complexity: O(1), n is len(array).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def find_count_linear(self, array, target):\n \"\"\"Time complexity: O(n). Space complexity: O(1), n is len(array).\"\"\"\n target_count = 0\n for num in array:\n if num == target:\n target_count += 1\n elif num > target:\n break\n return target_count\n\n def find_count_binary(self, array, target):\n \"\"\"Time complexity: O(lg(n)). Space complexity: O(1), n is len(array).\"\"\"\n start, end = (0, len(array) - 1)\n while start < end:\n mid = (start + end) // 2\n if array[mid] < target:\n start = mid + 1\n else:\n end = mid\n left = start\n if array[left] != target:\n return 0\n start, end = (left, len(array) - 1)\n while start < end:\n mid = (start + end + 1) // 2\n if array[mid] > target:\n end = mid - 1\n else:\n start = mid\n right = start\n return right - left + 1\n", "source": "the_stack_v2_python_sparse", "source_path": "Binary_Search/count_element_occurence.py", "source_repo": "vladn90/Algorithms", "split": "val", "star_events_count": 0}
{"blob_id": "d2eb0ab386d6262dcb4729f4d7715ae768cb35eb", "bodies": ["self.filters = filters()\nself.encoder = encoder\nself.weak_learner = weak_learner()", "with torch.no_grad():\n if self.encoder is not None:\n Y, W = self.encoder.encode_labels(Y)\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n fit_sig = inspect.signature(self.weak_learner.fit)\n if 'W' in fit_sig.parameters:\n self.weak_learner.fit(random_feat, Y=Y, W=W, **weak_learner_kwargs)\n else:\n self.weak_learner.fit(random_feat, Y, **weak_learner_kwargs)\nreturn self", "if type(data) is np.ndarray:\n data = torch.from_numpy(data).float()\nif len(data.shape) == 3:\n data = torch.unsqueeze(data, dim=1)\nreturn data", "with torch.no_grad():\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\nreturn self.weak_learner.predict(random_feat)"], "bodies_text": "<|body_start_0|>\n self.filters = filters()\n self.encoder = encoder\n self.weak_learner = weak_learner()\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n if self.encoder is not None:\n Y, W = self.encoder.encode_labels(Y)\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n fit_sig = inspect.signature(self.weak_learner.fit)\n if 'W' in fit_sig.parameters:\n self.weak_learner.fit(random_feat, Y=Y, W=W, **weak_learner_kwargs)\n else:\n self.weak_learner.fit(random_feat, Y, **weak_learner_kwargs)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if type(data) is np.ndarray:\n data = torch.from_numpy(data).float()\n if len(data.shape) == 3:\n data = torch.unsqueeze(data, dim=1)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n with torch.no_grad():\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\n return self.weak_learner.predict(random_feat)\n<|end_body_3|>\n", "class_docstring": "This weak learner is takes random filters and convolutes the dataset with them. It then applies a non-linearity on the resulting random features and uses an other weak regressor as final classifier.", "class_name": "RandomConvolution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RandomConvolution:\n \"\"\"This weak learner is takes random filters and convolutes the dataset with them. It then applies a non-linearity on the resulting random features and uses an other weak regressor as final classifier.\"\"\"\n\n def __init__(self, filters, encoder=None, weak_learner=Ridge):\n \"\"\"Args: filters (callable): Callable that creates and returns a Filters object. A Filters object should define an 'apply' method that receives the examples (torch array of shape (n_examples, n_channels, width, height)) and outputs extracted features (torch array of shape (n_examples, n_features)). encoder (LabelEncoder object, optional): Encoder to encode labels. If None, no encoding will be made before fitting. weak_learner (Callable that returns a new object that defines the 'fit' and 'predict' methods, such as object inheriting from _WeakLearnerBase, optional): Regressor that will fit the data. Default is a Ridge regressor from scikit-learn.\"\"\"\n <|body_0|>\n\n def fit(self, X, Y, W=None, **weak_learner_kwargs):\n \"\"\"Args: X (Array of shape (n_examples, ...)): Examples to fit. Y (Array of shape (n_examples) or (n_examples, encoding_dim)): Labels of the examples. If an encoder is provided, Y should have shape (n_examples), otherwise it should have a shape (n_examples, encoding_dim). W (Array of shape (n_examples, encoding_dim), optional): Weights of the examples for each labels. weak_learner_kwargs: Keyword arguments needed to fit the weak learner. Returns self.\"\"\"\n <|body_1|>\n\n def format_data(data):\n \"\"\"Formats a data array to the right format accepted by this class, which is a torch.Tensor of shape (n_examples, n_channels, height, width).\"\"\"\n <|body_2|>\n\n def predict(self, X):\n \"\"\"Predicts the label of the sample X. Args: X (Array of shape (n_examples, n_channels, height, width)): Examples to predict.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.filters = filters()\n self.encoder = encoder\n self.weak_learner = weak_learner()\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n if self.encoder is not None:\n Y, W = self.encoder.encode_labels(Y)\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n fit_sig = inspect.signature(self.weak_learner.fit)\n if 'W' in fit_sig.parameters:\n self.weak_learner.fit(random_feat, Y=Y, W=W, **weak_learner_kwargs)\n else:\n self.weak_learner.fit(random_feat, Y, **weak_learner_kwargs)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if type(data) is np.ndarray:\n data = torch.from_numpy(data).float()\n if len(data.shape) == 3:\n data = torch.unsqueeze(data, dim=1)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n with torch.no_grad():\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\n return self.weak_learner.predict(random_feat)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000178", "length_bytes": 18484, "license_type": "permissive", "methods": [{"docstring": "Args: filters (callable): Callable that creates and returns a Filters object. A Filters object should define an 'apply' method that receives the examples (torch array of shape (n_examples, n_channels, width, height)) and outputs extracted features (torch array of shape (n_examples, n_features)). encoder (LabelEncoder object, optional): Encoder to encode labels. If None, no encoding will be made before fitting. weak_learner (Callable that returns a new object that defines the 'fit' and 'predict' methods, such as object inheriting from _WeakLearnerBase, optional): Regressor that will fit the data. Default is a Ridge regressor from scikit-learn.", "name": "__init__", "signature": "def __init__(self, filters, encoder=None, weak_learner=Ridge)"}, {"docstring": "Args: X (Array of shape (n_examples, ...)): Examples to fit. Y (Array of shape (n_examples) or (n_examples, encoding_dim)): Labels of the examples. If an encoder is provided, Y should have shape (n_examples), otherwise it should have a shape (n_examples, encoding_dim). W (Array of shape (n_examples, encoding_dim), optional): Weights of the examples for each labels. weak_learner_kwargs: Keyword arguments needed to fit the weak learner. Returns self.", "name": "fit", "signature": "def fit(self, X, Y, W=None, **weak_learner_kwargs)"}, {"docstring": "Formats a data array to the right format accepted by this class, which is a torch.Tensor of shape (n_examples, n_channels, height, width).", "name": "format_data", "signature": "def format_data(data)"}, {"docstring": "Predicts the label of the sample X. Args: X (Array of shape (n_examples, n_channels, height, width)): Examples to predict.", "name": "predict", "signature": "def predict(self, X)"}], "n_methods": 4, "prompt": "Implement the Python class `RandomConvolution` described below.\n\nClass description:\nThis weak learner is takes random filters and convolutes the dataset with them. It then applies a non-linearity on the resulting random features and uses an other weak regressor as final classifier.\n\nMethod signatures and docstrings:\n- def __init__(self, filters, encoder=None, weak_learner=Ridge): Args: filters (callable): Callable that creates and returns a Filters object. A Filters object should define an 'apply' method that receives the examples (torch array of shape (n_examples, n_channels, width, height)) and outputs extracted features (torch array of shape (n_examples, n_features)). encoder (LabelEncoder object, optional): Encoder to encode labels. If None, no encoding will be made before fitting. weak_learner (Callable that returns a new object that defines the 'fit' and 'predict' methods, such as object inheriting from _WeakLearnerBase, optional): Regressor that will fit the data. Default is a Ridge regressor from scikit-learn.\n- def fit(self, X, Y, W=None, **weak_learner_kwargs): Args: X (Array of shape (n_examples, ...)): Examples to fit. Y (Array of shape (n_examples) or (n_examples, encoding_dim)): Labels of the examples. If an encoder is provided, Y should have shape (n_examples), otherwise it should have a shape (n_examples, encoding_dim). W (Array of shape (n_examples, encoding_dim), optional): Weights of the examples for each labels. weak_learner_kwargs: Keyword arguments needed to fit the weak learner. Returns self.\n- def format_data(data): Formats a data array to the right format accepted by this class, which is a torch.Tensor of shape (n_examples, n_channels, height, width).\n- def predict(self, X): Predicts the label of the sample X. Args: X (Array of shape (n_examples, n_channels, height, width)): Examples to predict.", "prompted_full_text": "Implement the Python class `RandomConvolution` described below.\n\nClass description:\nThis weak learner is takes random filters and convolutes the dataset with them. It then applies a non-linearity on the resulting random features and uses an other weak regressor as final classifier.\n\nMethod signatures and docstrings:\n- def __init__(self, filters, encoder=None, weak_learner=Ridge): Args: filters (callable): Callable that creates and returns a Filters object. A Filters object should define an 'apply' method that receives the examples (torch array of shape (n_examples, n_channels, width, height)) and outputs extracted features (torch array of shape (n_examples, n_features)). encoder (LabelEncoder object, optional): Encoder to encode labels. If None, no encoding will be made before fitting. weak_learner (Callable that returns a new object that defines the 'fit' and 'predict' methods, such as object inheriting from _WeakLearnerBase, optional): Regressor that will fit the data. Default is a Ridge regressor from scikit-learn.\n- def fit(self, X, Y, W=None, **weak_learner_kwargs): Args: X (Array of shape (n_examples, ...)): Examples to fit. Y (Array of shape (n_examples) or (n_examples, encoding_dim)): Labels of the examples. If an encoder is provided, Y should have shape (n_examples), otherwise it should have a shape (n_examples, encoding_dim). W (Array of shape (n_examples, encoding_dim), optional): Weights of the examples for each labels. weak_learner_kwargs: Keyword arguments needed to fit the weak learner. Returns self.\n- def format_data(data): Formats a data array to the right format accepted by this class, which is a torch.Tensor of shape (n_examples, n_channels, height, width).\n- def predict(self, X): Predicts the label of the sample X. Args: X (Array of shape (n_examples, n_channels, height, width)): Examples to predict.\n\n<|skeleton|>\nclass RandomConvolution:\n \"\"\"This weak learner is takes random filters and convolutes the dataset with them. It then applies a non-linearity on the resulting random features and uses an other weak regressor as final classifier.\"\"\"\n\n def __init__(self, filters, encoder=None, weak_learner=Ridge):\n \"\"\"Args: filters (callable): Callable that creates and returns a Filters object. A Filters object should define an 'apply' method that receives the examples (torch array of shape (n_examples, n_channels, width, height)) and outputs extracted features (torch array of shape (n_examples, n_features)). encoder (LabelEncoder object, optional): Encoder to encode labels. If None, no encoding will be made before fitting. weak_learner (Callable that returns a new object that defines the 'fit' and 'predict' methods, such as object inheriting from _WeakLearnerBase, optional): Regressor that will fit the data. Default is a Ridge regressor from scikit-learn.\"\"\"\n <|body_0|>\n\n def fit(self, X, Y, W=None, **weak_learner_kwargs):\n \"\"\"Args: X (Array of shape (n_examples, ...)): Examples to fit. Y (Array of shape (n_examples) or (n_examples, encoding_dim)): Labels of the examples. If an encoder is provided, Y should have shape (n_examples), otherwise it should have a shape (n_examples, encoding_dim). W (Array of shape (n_examples, encoding_dim), optional): Weights of the examples for each labels. weak_learner_kwargs: Keyword arguments needed to fit the weak learner. Returns self.\"\"\"\n <|body_1|>\n\n def format_data(data):\n \"\"\"Formats a data array to the right format accepted by this class, which is a torch.Tensor of shape (n_examples, n_channels, height, width).\"\"\"\n <|body_2|>\n\n def predict(self, X):\n \"\"\"Predicts the label of the sample X. Args: X (Array of shape (n_examples, n_channels, height, width)): Examples to predict.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.filters = filters()\n self.encoder = encoder\n self.weak_learner = weak_learner()\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n if self.encoder is not None:\n Y, W = self.encoder.encode_labels(Y)\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n fit_sig = inspect.signature(self.weak_learner.fit)\n if 'W' in fit_sig.parameters:\n self.weak_learner.fit(random_feat, Y=Y, W=W, **weak_learner_kwargs)\n else:\n self.weak_learner.fit(random_feat, Y, **weak_learner_kwargs)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if type(data) is np.ndarray:\n data = torch.from_numpy(data).float()\n if len(data.shape) == 3:\n data = torch.unsqueeze(data, dim=1)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n with torch.no_grad():\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\n return self.weak_learner.predict(random_feat)\n<|end_body_3|>\n", "revision_id": "b4b980ff4af727d5cec0348484a34f34e82168cd", "skeleton": "<|skeleton|>\nclass RandomConvolution:\n \"\"\"This weak learner is takes random filters and convolutes the dataset with them. It then applies a non-linearity on the resulting random features and uses an other weak regressor as final classifier.\"\"\"\n\n def __init__(self, filters, encoder=None, weak_learner=Ridge):\n \"\"\"Args: filters (callable): Callable that creates and returns a Filters object. A Filters object should define an 'apply' method that receives the examples (torch array of shape (n_examples, n_channels, width, height)) and outputs extracted features (torch array of shape (n_examples, n_features)). encoder (LabelEncoder object, optional): Encoder to encode labels. If None, no encoding will be made before fitting. weak_learner (Callable that returns a new object that defines the 'fit' and 'predict' methods, such as object inheriting from _WeakLearnerBase, optional): Regressor that will fit the data. Default is a Ridge regressor from scikit-learn.\"\"\"\n <|body_0|>\n\n def fit(self, X, Y, W=None, **weak_learner_kwargs):\n \"\"\"Args: X (Array of shape (n_examples, ...)): Examples to fit. Y (Array of shape (n_examples) or (n_examples, encoding_dim)): Labels of the examples. If an encoder is provided, Y should have shape (n_examples), otherwise it should have a shape (n_examples, encoding_dim). W (Array of shape (n_examples, encoding_dim), optional): Weights of the examples for each labels. weak_learner_kwargs: Keyword arguments needed to fit the weak learner. Returns self.\"\"\"\n <|body_1|>\n\n def format_data(data):\n \"\"\"Formats a data array to the right format accepted by this class, which is a torch.Tensor of shape (n_examples, n_channels, height, width).\"\"\"\n <|body_2|>\n\n def predict(self, X):\n \"\"\"Predicts the label of the sample X. Args: X (Array of shape (n_examples, n_channels, height, width)): Examples to predict.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RandomConvolution:\n \"\"\"This weak learner is takes random filters and convolutes the dataset with them. It then applies a non-linearity on the resulting random features and uses an other weak regressor as final classifier.\"\"\"\n\n def __init__(self, filters, encoder=None, weak_learner=Ridge):\n \"\"\"Args: filters (callable): Callable that creates and returns a Filters object. A Filters object should define an 'apply' method that receives the examples (torch array of shape (n_examples, n_channels, width, height)) and outputs extracted features (torch array of shape (n_examples, n_features)). encoder (LabelEncoder object, optional): Encoder to encode labels. If None, no encoding will be made before fitting. weak_learner (Callable that returns a new object that defines the 'fit' and 'predict' methods, such as object inheriting from _WeakLearnerBase, optional): Regressor that will fit the data. Default is a Ridge regressor from scikit-learn.\"\"\"\n self.filters = filters()\n self.encoder = encoder\n self.weak_learner = weak_learner()\n\n def fit(self, X, Y, W=None, **weak_learner_kwargs):\n \"\"\"Args: X (Array of shape (n_examples, ...)): Examples to fit. Y (Array of shape (n_examples) or (n_examples, encoding_dim)): Labels of the examples. If an encoder is provided, Y should have shape (n_examples), otherwise it should have a shape (n_examples, encoding_dim). W (Array of shape (n_examples, encoding_dim), optional): Weights of the examples for each labels. weak_learner_kwargs: Keyword arguments needed to fit the weak learner. Returns self.\"\"\"\n with torch.no_grad():\n if self.encoder is not None:\n Y, W = self.encoder.encode_labels(Y)\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n fit_sig = inspect.signature(self.weak_learner.fit)\n if 'W' in fit_sig.parameters:\n self.weak_learner.fit(random_feat, Y=Y, W=W, **weak_learner_kwargs)\n else:\n self.weak_learner.fit(random_feat, Y, **weak_learner_kwargs)\n return self\n\n def format_data(data):\n \"\"\"Formats a data array to the right format accepted by this class, which is a torch.Tensor of shape (n_examples, n_channels, height, width).\"\"\"\n if type(data) is np.ndarray:\n data = torch.from_numpy(data).float()\n if len(data.shape) == 3:\n data = torch.unsqueeze(data, dim=1)\n return data\n\n def predict(self, X):\n \"\"\"Predicts the label of the sample X. Args: X (Array of shape (n_examples, n_channels, height, width)): Examples to predict.\"\"\"\n with torch.no_grad():\n X = self.format_data(X)\n random_feat = self.filters.apply(X)\n return self.weak_learner.predict(random_feat)\n", "source": "the_stack_v2_python_sparse", "source_path": "quadboost/weak_learner/random_convolution.py", "source_repo": "jsleb333/quadboost", "split": "val", "star_events_count": 1}
{"blob_id": "7c2be0276418aab5b5f364cd08c10b6a8e2e4453", "bodies": ["results = list()\nchar_limit = CHARACTER_LIMIT - len(continuation)\ntweets = self.split_tweet_by_lines(tweet=status, character_limit=char_limit)\nif len(tweets) == 1:\n results.append(self.PostUpdate(status=tweets[0], **kwargs))\n return results\nlast_reply_to_id = None\nfor tweet in tweets[0:-1]:\n if threaded and last_reply_to_id is not None:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n latest_tweet = self.PostUpdate(status=tweet + continuation, **kwargs)\n last_reply_to_id = latest_tweet.id\n results.append(latest_tweet)\nif threaded:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\nresults.append(self.PostUpdate(status=tweets[-1], **kwargs))\nreturn results", "lines = tweet.splitlines()\ntweets = []\ncurrent_tweet = ''\nfor line in lines:\n if len(line) > character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = ''\n split = self._TweetTextWrap(line, char_lim=character_limit)\n tweets += split[:-1]\n current_tweet = split[-1]\n continue\n potential_next_msg = f'{current_tweet}\\r\\n{line}' if current_tweet else line\n if len(potential_next_msg) >= character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = line\n continue\n current_tweet = potential_next_msg\nif current_tweet:\n tweets.append(current_tweet)\nreturn tweets"], "bodies_text": "<|body_start_0|>\n results = list()\n char_limit = CHARACTER_LIMIT - len(continuation)\n tweets = self.split_tweet_by_lines(tweet=status, character_limit=char_limit)\n if len(tweets) == 1:\n results.append(self.PostUpdate(status=tweets[0], **kwargs))\n return results\n last_reply_to_id = None\n for tweet in tweets[0:-1]:\n if threaded and last_reply_to_id is not None:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n latest_tweet = self.PostUpdate(status=tweet + continuation, **kwargs)\n last_reply_to_id = latest_tweet.id\n results.append(latest_tweet)\n if threaded:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n results.append(self.PostUpdate(status=tweets[-1], **kwargs))\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n lines = tweet.splitlines()\n tweets = []\n current_tweet = ''\n for line in lines:\n if len(line) > character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = ''\n split = self._TweetTextWrap(line, char_lim=character_limit)\n tweets += split[:-1]\n current_tweet = split[-1]\n continue\n potential_next_msg = f'{current_tweet}\\r\\n{line}' if current_tweet else line\n if len(potential_next_msg) >= character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = line\n continue\n current_tweet = potential_next_msg\n if current_tweet:\n tweets.append(current_tweet)\n return tweets\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ThreadedApi", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ThreadedApi:\n\n def PostUpdates(self, status: str, continuation: str='', threaded: bool=False, **kwargs) -> list[Status]:\n \"\"\"Post one or more twitter status messages from the authenticated user. Unlike api.PostUpdate, this method will post multiple status updates if the message is longer than CHARACTER_LIMIT characters. Args: status: The message text to be posted. May be longer than CHARACTER_LIMIT characters. continuation: The character string, if any, to be appended to all but the last message. Note that Twitter strips trailing '...' strings from messages. Consider using the unicode … character (horizontal ellipsis) instead. [Defaults to None] threaded: If True, makes each additional status message a reply to the previous one. [Defaults to False] **kwargs: See api.PostUpdate for a list of accepted parameters. Re\"\"\"\n <|body_0|>\n\n def split_tweet_by_lines(self, tweet: str, character_limit: int) -> list[str]:\n \"\"\"Break the thread up by lines if possible\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n results = list()\n char_limit = CHARACTER_LIMIT - len(continuation)\n tweets = self.split_tweet_by_lines(tweet=status, character_limit=char_limit)\n if len(tweets) == 1:\n results.append(self.PostUpdate(status=tweets[0], **kwargs))\n return results\n last_reply_to_id = None\n for tweet in tweets[0:-1]:\n if threaded and last_reply_to_id is not None:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n latest_tweet = self.PostUpdate(status=tweet + continuation, **kwargs)\n last_reply_to_id = latest_tweet.id\n results.append(latest_tweet)\n if threaded:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n results.append(self.PostUpdate(status=tweets[-1], **kwargs))\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n lines = tweet.splitlines()\n tweets = []\n current_tweet = ''\n for line in lines:\n if len(line) > character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = ''\n split = self._TweetTextWrap(line, char_lim=character_limit)\n tweets += split[:-1]\n current_tweet = split[-1]\n continue\n potential_next_msg = f'{current_tweet}\\r\\n{line}' if current_tweet else line\n if len(potential_next_msg) >= character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = line\n continue\n current_tweet = potential_next_msg\n if current_tweet:\n tweets.append(current_tweet)\n return tweets\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000179", "length_bytes": 3796, "license_type": "permissive", "methods": [{"docstring": "Post one or more twitter status messages from the authenticated user. Unlike api.PostUpdate, this method will post multiple status updates if the message is longer than CHARACTER_LIMIT characters. Args: status: The message text to be posted. May be longer than CHARACTER_LIMIT characters. continuation: The character string, if any, to be appended to all but the last message. Note that Twitter strips trailing '...' strings from messages. Consider using the unicode … character (horizontal ellipsis) instead. [Defaults to None] threaded: If True, makes each additional status message a reply to the previous one. [Defaults to False] **kwargs: See api.PostUpdate for a list of accepted parameters. Re", "name": "PostUpdates", "signature": "def PostUpdates(self, status: str, continuation: str='', threaded: bool=False, **kwargs) -> list[Status]"}, {"docstring": "Break the thread up by lines if possible", "name": "split_tweet_by_lines", "signature": "def split_tweet_by_lines(self, tweet: str, character_limit: int) -> list[str]"}], "n_methods": 2, "prompt": "Implement the Python class `ThreadedApi` described below.\n\nClass description:\nImplement the ThreadedApi class.\n\nMethod signatures and docstrings:\n- def PostUpdates(self, status: str, continuation: str='', threaded: bool=False, **kwargs) -> list[Status]: Post one or more twitter status messages from the authenticated user. Unlike api.PostUpdate, this method will post multiple status updates if the message is longer than CHARACTER_LIMIT characters. Args: status: The message text to be posted. May be longer than CHARACTER_LIMIT characters. continuation: The character string, if any, to be appended to all but the last message. Note that Twitter strips trailing '...' strings from messages. Consider using the unicode … character (horizontal ellipsis) instead. [Defaults to None] threaded: If True, makes each additional status message a reply to the previous one. [Defaults to False] **kwargs: See api.PostUpdate for a list of accepted parameters. Re\n- def split_tweet_by_lines(self, tweet: str, character_limit: int) -> list[str]: Break the thread up by lines if possible", "prompted_full_text": "Implement the Python class `ThreadedApi` described below.\n\nClass description:\nImplement the ThreadedApi class.\n\nMethod signatures and docstrings:\n- def PostUpdates(self, status: str, continuation: str='', threaded: bool=False, **kwargs) -> list[Status]: Post one or more twitter status messages from the authenticated user. Unlike api.PostUpdate, this method will post multiple status updates if the message is longer than CHARACTER_LIMIT characters. Args: status: The message text to be posted. May be longer than CHARACTER_LIMIT characters. continuation: The character string, if any, to be appended to all but the last message. Note that Twitter strips trailing '...' strings from messages. Consider using the unicode … character (horizontal ellipsis) instead. [Defaults to None] threaded: If True, makes each additional status message a reply to the previous one. [Defaults to False] **kwargs: See api.PostUpdate for a list of accepted parameters. Re\n- def split_tweet_by_lines(self, tweet: str, character_limit: int) -> list[str]: Break the thread up by lines if possible\n\n<|skeleton|>\nclass ThreadedApi:\n\n def PostUpdates(self, status: str, continuation: str='', threaded: bool=False, **kwargs) -> list[Status]:\n \"\"\"Post one or more twitter status messages from the authenticated user. Unlike api.PostUpdate, this method will post multiple status updates if the message is longer than CHARACTER_LIMIT characters. Args: status: The message text to be posted. May be longer than CHARACTER_LIMIT characters. continuation: The character string, if any, to be appended to all but the last message. Note that Twitter strips trailing '...' strings from messages. Consider using the unicode … character (horizontal ellipsis) instead. [Defaults to None] threaded: If True, makes each additional status message a reply to the previous one. [Defaults to False] **kwargs: See api.PostUpdate for a list of accepted parameters. Re\"\"\"\n <|body_0|>\n\n def split_tweet_by_lines(self, tweet: str, character_limit: int) -> list[str]:\n \"\"\"Break the thread up by lines if possible\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n results = list()\n char_limit = CHARACTER_LIMIT - len(continuation)\n tweets = self.split_tweet_by_lines(tweet=status, character_limit=char_limit)\n if len(tweets) == 1:\n results.append(self.PostUpdate(status=tweets[0], **kwargs))\n return results\n last_reply_to_id = None\n for tweet in tweets[0:-1]:\n if threaded and last_reply_to_id is not None:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n latest_tweet = self.PostUpdate(status=tweet + continuation, **kwargs)\n last_reply_to_id = latest_tweet.id\n results.append(latest_tweet)\n if threaded:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n results.append(self.PostUpdate(status=tweets[-1], **kwargs))\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n lines = tweet.splitlines()\n tweets = []\n current_tweet = ''\n for line in lines:\n if len(line) > character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = ''\n split = self._TweetTextWrap(line, char_lim=character_limit)\n tweets += split[:-1]\n current_tweet = split[-1]\n continue\n potential_next_msg = f'{current_tweet}\\r\\n{line}' if current_tweet else line\n if len(potential_next_msg) >= character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = line\n continue\n current_tweet = potential_next_msg\n if current_tweet:\n tweets.append(current_tweet)\n return tweets\n<|end_body_1|>\n", "revision_id": "08d80ac5adea599eccba1d5bea08a5a27368e7c1", "skeleton": "<|skeleton|>\nclass ThreadedApi:\n\n def PostUpdates(self, status: str, continuation: str='', threaded: bool=False, **kwargs) -> list[Status]:\n \"\"\"Post one or more twitter status messages from the authenticated user. Unlike api.PostUpdate, this method will post multiple status updates if the message is longer than CHARACTER_LIMIT characters. Args: status: The message text to be posted. May be longer than CHARACTER_LIMIT characters. continuation: The character string, if any, to be appended to all but the last message. Note that Twitter strips trailing '...' strings from messages. Consider using the unicode … character (horizontal ellipsis) instead. [Defaults to None] threaded: If True, makes each additional status message a reply to the previous one. [Defaults to False] **kwargs: See api.PostUpdate for a list of accepted parameters. Re\"\"\"\n <|body_0|>\n\n def split_tweet_by_lines(self, tweet: str, character_limit: int) -> list[str]:\n \"\"\"Break the thread up by lines if possible\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ThreadedApi:\n def PostUpdates(self, status: str, continuation: str='', threaded: bool=False, **kwargs) -> list[Status]:\n \"\"\"Post one or more twitter status messages from the authenticated user. Unlike api.PostUpdate, this method will post multiple status updates if the message is longer than CHARACTER_LIMIT characters. Args: status: The message text to be posted. May be longer than CHARACTER_LIMIT characters. continuation: The character string, if any, to be appended to all but the last message. Note that Twitter strips trailing '...' strings from messages. Consider using the unicode … character (horizontal ellipsis) instead. [Defaults to None] threaded: If True, makes each additional status message a reply to the previous one. [Defaults to False] **kwargs: See api.PostUpdate for a list of accepted parameters. Re\"\"\"\n results = list()\n char_limit = CHARACTER_LIMIT - len(continuation)\n tweets = self.split_tweet_by_lines(tweet=status, character_limit=char_limit)\n if len(tweets) == 1:\n results.append(self.PostUpdate(status=tweets[0], **kwargs))\n return results\n last_reply_to_id = None\n for tweet in tweets[0:-1]:\n if threaded and last_reply_to_id is not None:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n latest_tweet = self.PostUpdate(status=tweet + continuation, **kwargs)\n last_reply_to_id = latest_tweet.id\n results.append(latest_tweet)\n if threaded:\n kwargs['in_reply_to_status_id'] = last_reply_to_id\n results.append(self.PostUpdate(status=tweets[-1], **kwargs))\n return results\n\n def split_tweet_by_lines(self, tweet: str, character_limit: int) -> list[str]:\n \"\"\"Break the thread up by lines if possible\"\"\"\n lines = tweet.splitlines()\n tweets = []\n current_tweet = ''\n for line in lines:\n if len(line) > character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = ''\n split = self._TweetTextWrap(line, char_lim=character_limit)\n tweets += split[:-1]\n current_tweet = split[-1]\n continue\n potential_next_msg = f'{current_tweet}\\r\\n{line}' if current_tweet else line\n if len(potential_next_msg) >= character_limit:\n if current_tweet:\n tweets.append(current_tweet)\n current_tweet = line\n continue\n current_tweet = potential_next_msg\n if current_tweet:\n tweets.append(current_tweet)\n return tweets\n", "source": "the_stack_v2_python_sparse", "source_path": "tap_list_providers/twitter_api.py", "source_repo": "hsv-dot-beer/hsvdotbeer", "split": "val", "star_events_count": 25}
{"blob_id": "5c8a3842f80279a0f15ea37094bbabc6e5942845", "bodies": ["Bill.__init__(self, user)\nself.amounts['submit_sm'] = 0.0\nself.amounts['submit_sm_resp'] = 0.0\nself.actions['decrement_submit_sm_count'] = 0", "bill = SubmitSmRespBill(self.user)\nbill.setAmount('submit_sm_resp', self.getAmount('submit_sm_resp'))\nreturn bill"], "bodies_text": "<|body_start_0|>\n Bill.__init__(self, user)\n self.amounts['submit_sm'] = 0.0\n self.amounts['submit_sm_resp'] = 0.0\n self.actions['decrement_submit_sm_count'] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n bill = SubmitSmRespBill(self.user)\n bill.setAmount('submit_sm_resp', self.getAmount('submit_sm_resp'))\n return bill\n<|end_body_1|>\n", "class_docstring": "This is the bill for user to pay when sending a MT SMS", "class_name": "SubmitSmBill", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SubmitSmBill:\n \"\"\"This is the bill for user to pay when sending a MT SMS\"\"\"\n\n def __init__(self, user):\n \"\"\"Defining billables\"\"\"\n <|body_0|>\n\n def getSubmitSmRespBill(self):\n \"\"\"Will return a separate Bill for submit_sm_resp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Bill.__init__(self, user)\n self.amounts['submit_sm'] = 0.0\n self.amounts['submit_sm_resp'] = 0.0\n self.actions['decrement_submit_sm_count'] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n bill = SubmitSmRespBill(self.user)\n bill.setAmount('submit_sm_resp', self.getAmount('submit_sm_resp'))\n return bill\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000180", "length_bytes": 3091, "license_type": "permissive", "methods": [{"docstring": "Defining billables", "name": "__init__", "signature": "def __init__(self, user)"}, {"docstring": "Will return a separate Bill for submit_sm_resp", "name": "getSubmitSmRespBill", "signature": "def getSubmitSmRespBill(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_035683", "prompt": "Implement the Python class `SubmitSmBill` described below.\n\nClass description:\nThis is the bill for user to pay when sending a MT SMS\n\nMethod signatures and docstrings:\n- def __init__(self, user): Defining billables\n- def getSubmitSmRespBill(self): Will return a separate Bill for submit_sm_resp", "prompted_full_text": "Implement the Python class `SubmitSmBill` described below.\n\nClass description:\nThis is the bill for user to pay when sending a MT SMS\n\nMethod signatures and docstrings:\n- def __init__(self, user): Defining billables\n- def getSubmitSmRespBill(self): Will return a separate Bill for submit_sm_resp\n\n<|skeleton|>\nclass SubmitSmBill:\n \"\"\"This is the bill for user to pay when sending a MT SMS\"\"\"\n\n def __init__(self, user):\n \"\"\"Defining billables\"\"\"\n <|body_0|>\n\n def getSubmitSmRespBill(self):\n \"\"\"Will return a separate Bill for submit_sm_resp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Bill.__init__(self, user)\n self.amounts['submit_sm'] = 0.0\n self.amounts['submit_sm_resp'] = 0.0\n self.actions['decrement_submit_sm_count'] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n bill = SubmitSmRespBill(self.user)\n bill.setAmount('submit_sm_resp', self.getAmount('submit_sm_resp'))\n return bill\n<|end_body_1|>\n", "revision_id": "e352208f22677b0a0769d1246892cff9558503cf", "skeleton": "<|skeleton|>\nclass SubmitSmBill:\n \"\"\"This is the bill for user to pay when sending a MT SMS\"\"\"\n\n def __init__(self, user):\n \"\"\"Defining billables\"\"\"\n <|body_0|>\n\n def getSubmitSmRespBill(self):\n \"\"\"Will return a separate Bill for submit_sm_resp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SubmitSmBill:\n \"\"\"This is the bill for user to pay when sending a MT SMS\"\"\"\n\n def __init__(self, user):\n \"\"\"Defining billables\"\"\"\n Bill.__init__(self, user)\n self.amounts['submit_sm'] = 0.0\n self.amounts['submit_sm_resp'] = 0.0\n self.actions['decrement_submit_sm_count'] = 0\n\n def getSubmitSmRespBill(self):\n \"\"\"Will return a separate Bill for submit_sm_resp\"\"\"\n bill = SubmitSmRespBill(self.user)\n bill.setAmount('submit_sm_resp', self.getAmount('submit_sm_resp'))\n return bill\n", "source": "the_stack_v2_python_sparse", "source_path": "jasmin/routing/Bills.py", "source_repo": "jookies/jasmin", "split": "val", "star_events_count": 943}
{"blob_id": "19254201cbd0e978e06f94ee7994df89238e69f3", "bodies": ["index_m = m - 1\nindex_n = n - 1\nwhile index_m >= 0 and index_n >= 0:\n if nums1[index_m] >= nums2[index_n]:\n nums1[index_m + index_n + 1] = nums1[index_m]\n index_m -= 1\n else:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\nwhile index_n >= 0:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1", "i1 = m - 1\ni2 = n - 1\nfor k in range(m + n - 1, -1, -1):\n if i1 < 0:\n nums1[k] = nums2[i2]\n i2 -= 1\n elif i2 < 0:\n i1 -= 1\n continue\n elif nums2[i2] >= nums1[i1]:\n nums1[k] = nums2[i2]\n i2 -= 1\n else:\n nums1[k] = nums1[i1]\n i1 -= 1\n k -= 1"], "bodies_text": "<|body_start_0|>\n index_m = m - 1\n index_n = n - 1\n while index_m >= 0 and index_n >= 0:\n if nums1[index_m] >= nums2[index_n]:\n nums1[index_m + index_n + 1] = nums1[index_m]\n index_m -= 1\n else:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\n while index_n >= 0:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\n<|end_body_0|>\n\n<|body_start_1|>\n i1 = m - 1\n i2 = n - 1\n for k in range(m + n - 1, -1, -1):\n if i1 < 0:\n nums1[k] = nums2[i2]\n i2 -= 1\n elif i2 < 0:\n i1 -= 1\n continue\n elif nums2[i2] >= nums1[i1]:\n nums1[k] = nums2[i2]\n i2 -= 1\n else:\n nums1[k] = nums1[i1]\n i1 -= 1\n k -= 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\"\"\"\n <|body_0|>\n\n def merge_v2(self, nums1, m, nums2, n):\n \"\"\":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n index_m = m - 1\n index_n = n - 1\n while index_m >= 0 and index_n >= 0:\n if nums1[index_m] >= nums2[index_n]:\n nums1[index_m + index_n + 1] = nums1[index_m]\n index_m -= 1\n else:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\n while index_n >= 0:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\n<|end_body_0|>\n\n<|body_start_1|>\n i1 = m - 1\n i2 = n - 1\n for k in range(m + n - 1, -1, -1):\n if i1 < 0:\n nums1[k] = nums2[i2]\n i2 -= 1\n elif i2 < 0:\n i1 -= 1\n continue\n elif nums2[i2] >= nums1[i1]:\n nums1[k] = nums2[i2]\n i2 -= 1\n else:\n nums1[k] = nums1[i1]\n i1 -= 1\n k -= 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000181", "length_bytes": 2568, "license_type": "no_license", "methods": [{"docstring": ":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.", "name": "merge", "signature": "def merge(self, nums1, m, nums2, n)"}, {"docstring": ":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.", "name": "merge_v2", "signature": "def merge_v2(self, nums1, m, nums2, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052757", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def merge(self, nums1, m, nums2, n): :type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\n- def merge_v2(self, nums1, m, nums2, n): :type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def merge(self, nums1, m, nums2, n): :type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\n- def merge_v2(self, nums1, m, nums2, n): :type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\"\"\"\n <|body_0|>\n\n def merge_v2(self, nums1, m, nums2, n):\n \"\"\":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n index_m = m - 1\n index_n = n - 1\n while index_m >= 0 and index_n >= 0:\n if nums1[index_m] >= nums2[index_n]:\n nums1[index_m + index_n + 1] = nums1[index_m]\n index_m -= 1\n else:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\n while index_n >= 0:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\n<|end_body_0|>\n\n<|body_start_1|>\n i1 = m - 1\n i2 = n - 1\n for k in range(m + n - 1, -1, -1):\n if i1 < 0:\n nums1[k] = nums2[i2]\n i2 -= 1\n elif i2 < 0:\n i1 -= 1\n continue\n elif nums2[i2] >= nums1[i1]:\n nums1[k] = nums2[i2]\n i2 -= 1\n else:\n nums1[k] = nums1[i1]\n i1 -= 1\n k -= 1\n<|end_body_1|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def merge(self, nums1, m, nums2, n):\n \"\"\":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\"\"\"\n <|body_0|>\n\n def merge_v2(self, nums1, m, nums2, n):\n \"\"\":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def merge(self, nums1, m, nums2, n):\n \"\"\":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\"\"\"\n index_m = m - 1\n index_n = n - 1\n while index_m >= 0 and index_n >= 0:\n if nums1[index_m] >= nums2[index_n]:\n nums1[index_m + index_n + 1] = nums1[index_m]\n index_m -= 1\n else:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\n while index_n >= 0:\n nums1[index_m + index_n + 1] = nums2[index_n]\n index_n -= 1\n\n def merge_v2(self, nums1, m, nums2, n):\n \"\"\":type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead.\"\"\"\n i1 = m - 1\n i2 = n - 1\n for k in range(m + n - 1, -1, -1):\n if i1 < 0:\n nums1[k] = nums2[i2]\n i2 -= 1\n elif i2 < 0:\n i1 -= 1\n continue\n elif nums2[i2] >= nums1[i1]:\n nums1[k] = nums2[i2]\n i2 -= 1\n else:\n nums1[k] = nums1[i1]\n i1 -= 1\n k -= 1\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_88.py", "source_repo": "oxhead/CodingYourWay", "split": "val", "star_events_count": 0}
{"blob_id": "dec9507e7e84e2f8bb0fc62b8105f46d6cdf9855", "bodies": ["pe.set_default_val(config, 'layer_norm', True)\npe.set_default_val(config, 'activation_fn', tfe.leakyrelu(0.1))\npe.set_default_val(config, 'mask_type', None)\nself.structure = structure\nsuper().__init__(config)", "with tf.variable_scope(scope, reuse=scope in self.log):\n Q = self._eval_fc_network(x, self.structure, regression_layer=True)\nself._collect_scope_vars(scope)\nreturn Q"], "bodies_text": "<|body_start_0|>\n pe.set_default_val(config, 'layer_norm', True)\n pe.set_default_val(config, 'activation_fn', tfe.leakyrelu(0.1))\n pe.set_default_val(config, 'mask_type', None)\n self.structure = structure\n super().__init__(config)\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope, reuse=scope in self.log):\n Q = self._eval_fc_network(x, self.structure, regression_layer=True)\n self._collect_scope_vars(scope)\n return Q\n<|end_body_1|>\n", "class_docstring": "This represents a general FeedForwardNetwork. It can be used in different types of contexts. In general it operates on batches of 1-dimensional data.", "class_name": "FeedForwardNetwork", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FeedForwardNetwork:\n \"\"\"This represents a general FeedForwardNetwork. It can be used in different types of contexts. In general it operates on batches of 1-dimensional data.\"\"\"\n\n def __init__(self, structure, config):\n \"\"\"This constructs a new FeedForwardNetwork. It takes the structure of the network and also the configuration. Args: structure: The structure for the hidden layers, a list of integers. config: layer_norm: Activate Layer Normalization activation_fn: Define the activation_fn as lambda tf.Tensor: -> tf.Tensor mask_type: The mask type to use for regularization.\"\"\"\n <|body_0|>\n\n def eval_graph(self, x, scope, **kwargs):\n \"\"\"This creates the graph it therefore receives the input tensor and of course the weights for each l. Args: x: The input to the graph. Usually a placeholder scope: The scope of this graph. It is used for creating multiple graph copies Returns: A fully constructed graph using the weights supplied.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pe.set_default_val(config, 'layer_norm', True)\n pe.set_default_val(config, 'activation_fn', tfe.leakyrelu(0.1))\n pe.set_default_val(config, 'mask_type', None)\n self.structure = structure\n super().__init__(config)\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope, reuse=scope in self.log):\n Q = self._eval_fc_network(x, self.structure, regression_layer=True)\n self._collect_scope_vars(scope)\n return Q\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000182", "length_bytes": 2992, "license_type": "permissive", "methods": [{"docstring": "This constructs a new FeedForwardNetwork. It takes the structure of the network and also the configuration. Args: structure: The structure for the hidden layers, a list of integers. config: layer_norm: Activate Layer Normalization activation_fn: Define the activation_fn as lambda tf.Tensor: -> tf.Tensor mask_type: The mask type to use for regularization.", "name": "__init__", "signature": "def __init__(self, structure, config)"}, {"docstring": "This creates the graph it therefore receives the input tensor and of course the weights for each l. Args: x: The input to the graph. Usually a placeholder scope: The scope of this graph. It is used for creating multiple graph copies Returns: A fully constructed graph using the weights supplied.", "name": "eval_graph", "signature": "def eval_graph(self, x, scope, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039857", "prompt": "Implement the Python class `FeedForwardNetwork` described below.\n\nClass description:\nThis represents a general FeedForwardNetwork. It can be used in different types of contexts. In general it operates on batches of 1-dimensional data.\n\nMethod signatures and docstrings:\n- def __init__(self, structure, config): This constructs a new FeedForwardNetwork. It takes the structure of the network and also the configuration. Args: structure: The structure for the hidden layers, a list of integers. config: layer_norm: Activate Layer Normalization activation_fn: Define the activation_fn as lambda tf.Tensor: -> tf.Tensor mask_type: The mask type to use for regularization.\n- def eval_graph(self, x, scope, **kwargs): This creates the graph it therefore receives the input tensor and of course the weights for each l. Args: x: The input to the graph. Usually a placeholder scope: The scope of this graph. It is used for creating multiple graph copies Returns: A fully constructed graph using the weights supplied.", "prompted_full_text": "Implement the Python class `FeedForwardNetwork` described below.\n\nClass description:\nThis represents a general FeedForwardNetwork. It can be used in different types of contexts. In general it operates on batches of 1-dimensional data.\n\nMethod signatures and docstrings:\n- def __init__(self, structure, config): This constructs a new FeedForwardNetwork. It takes the structure of the network and also the configuration. Args: structure: The structure for the hidden layers, a list of integers. config: layer_norm: Activate Layer Normalization activation_fn: Define the activation_fn as lambda tf.Tensor: -> tf.Tensor mask_type: The mask type to use for regularization.\n- def eval_graph(self, x, scope, **kwargs): This creates the graph it therefore receives the input tensor and of course the weights for each l. Args: x: The input to the graph. Usually a placeholder scope: The scope of this graph. It is used for creating multiple graph copies Returns: A fully constructed graph using the weights supplied.\n\n<|skeleton|>\nclass FeedForwardNetwork:\n \"\"\"This represents a general FeedForwardNetwork. It can be used in different types of contexts. In general it operates on batches of 1-dimensional data.\"\"\"\n\n def __init__(self, structure, config):\n \"\"\"This constructs a new FeedForwardNetwork. It takes the structure of the network and also the configuration. Args: structure: The structure for the hidden layers, a list of integers. config: layer_norm: Activate Layer Normalization activation_fn: Define the activation_fn as lambda tf.Tensor: -> tf.Tensor mask_type: The mask type to use for regularization.\"\"\"\n <|body_0|>\n\n def eval_graph(self, x, scope, **kwargs):\n \"\"\"This creates the graph it therefore receives the input tensor and of course the weights for each l. Args: x: The input to the graph. Usually a placeholder scope: The scope of this graph. It is used for creating multiple graph copies Returns: A fully constructed graph using the weights supplied.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pe.set_default_val(config, 'layer_norm', True)\n pe.set_default_val(config, 'activation_fn', tfe.leakyrelu(0.1))\n pe.set_default_val(config, 'mask_type', None)\n self.structure = structure\n super().__init__(config)\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope, reuse=scope in self.log):\n Q = self._eval_fc_network(x, self.structure, regression_layer=True)\n self._collect_scope_vars(scope)\n return Q\n<|end_body_1|>\n", "revision_id": "0d72e0a3e6f39c9a4e797a17911e2beec352b14a", "skeleton": "<|skeleton|>\nclass FeedForwardNetwork:\n \"\"\"This represents a general FeedForwardNetwork. It can be used in different types of contexts. In general it operates on batches of 1-dimensional data.\"\"\"\n\n def __init__(self, structure, config):\n \"\"\"This constructs a new FeedForwardNetwork. It takes the structure of the network and also the configuration. Args: structure: The structure for the hidden layers, a list of integers. config: layer_norm: Activate Layer Normalization activation_fn: Define the activation_fn as lambda tf.Tensor: -> tf.Tensor mask_type: The mask type to use for regularization.\"\"\"\n <|body_0|>\n\n def eval_graph(self, x, scope, **kwargs):\n \"\"\"This creates the graph it therefore receives the input tensor and of course the weights for each l. Args: x: The input to the graph. Usually a placeholder scope: The scope of this graph. It is used for creating multiple graph copies Returns: A fully constructed graph using the weights supplied.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FeedForwardNetwork:\n \"\"\"This represents a general FeedForwardNetwork. It can be used in different types of contexts. In general it operates on batches of 1-dimensional data.\"\"\"\n\n def __init__(self, structure, config):\n \"\"\"This constructs a new FeedForwardNetwork. It takes the structure of the network and also the configuration. Args: structure: The structure for the hidden layers, a list of integers. config: layer_norm: Activate Layer Normalization activation_fn: Define the activation_fn as lambda tf.Tensor: -> tf.Tensor mask_type: The mask type to use for regularization.\"\"\"\n pe.set_default_val(config, 'layer_norm', True)\n pe.set_default_val(config, 'activation_fn', tfe.leakyrelu(0.1))\n pe.set_default_val(config, 'mask_type', None)\n self.structure = structure\n super().__init__(config)\n\n def eval_graph(self, x, scope, **kwargs):\n \"\"\"This creates the graph it therefore receives the input tensor and of course the weights for each l. Args: x: The input to the graph. Usually a placeholder scope: The scope of this graph. It is used for creating multiple graph copies Returns: A fully constructed graph using the weights supplied.\"\"\"\n with tf.variable_scope(scope, reuse=scope in self.log):\n Q = self._eval_fc_network(x, self.structure, regression_layer=True)\n self._collect_scope_vars(scope)\n return Q\n", "source": "the_stack_v2_python_sparse", "source_path": "nn/FeedForwardNetwork.py", "source_repo": "kosmitive/bootstrapped-dqn", "split": "val", "star_events_count": 2}
{"blob_id": "2e289a5c1734aeedd19bf02c50ce4d6f3e6cd6f7", "bodies": ["super(InceptionV3, self).__init__()\nself.resize_input = resize_input\nself.normalize_input = normalize_input\nself.output_blocks = sorted(output_blocks)\nself.last_needed_block = max(output_blocks)\nassert self.last_needed_block <= 3, 'Last possible output block index is 3'\nself.blocks = nn.ModuleList()\ninception = models.inception_v3(pretrained=True)\nblock0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\nself.blocks.append(nn.Sequential(*block0))\nif self.last_needed_block >= 1:\n block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block1))\nif self.last_needed_block >= 2:\n block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]\n self.blocks.append(nn.Sequential(*block2))\nif self.last_needed_block >= 3:\n block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]\n self.blocks.append(nn.Sequential(*block3))\nfor param in self.parameters():\n param.requires_grad = requires_grad", "outp = []\nx = inp\nif self.resize_input:\n x = F.upsample(x, size=(299, 299), mode='bilinear')\nif self.normalize_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\nfor idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n if idx == self.last_needed_block:\n break\nreturn outp"], "bodies_text": "<|body_start_0|>\n super(InceptionV3, self).__init__()\n self.resize_input = resize_input\n self.normalize_input = normalize_input\n self.output_blocks = sorted(output_blocks)\n self.last_needed_block = max(output_blocks)\n assert self.last_needed_block <= 3, 'Last possible output block index is 3'\n self.blocks = nn.ModuleList()\n inception = models.inception_v3(pretrained=True)\n block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block0))\n if self.last_needed_block >= 1:\n block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block1))\n if self.last_needed_block >= 2:\n block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]\n self.blocks.append(nn.Sequential(*block2))\n if self.last_needed_block >= 3:\n block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]\n self.blocks.append(nn.Sequential(*block3))\n for param in self.parameters():\n param.requires_grad = requires_grad\n<|end_body_0|>\n\n<|body_start_1|>\n outp = []\n x = inp\n if self.resize_input:\n x = F.upsample(x, size=(299, 299), mode='bilinear')\n if self.normalize_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n if idx == self.last_needed_block:\n break\n return outp\n<|end_body_1|>\n", "class_docstring": "Pretrained InceptionV3 network returning feature maps", "class_name": "InceptionV3", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InceptionV3:\n \"\"\"Pretrained InceptionV3 network returning feature maps\"\"\"\n\n def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False):\n \"\"\"Build pretrained InceptionV3 Parameters ---------- output_blocks : list of int Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input : bool If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed normalize_input : bool If true, normalizes the input to the statistics the pre\"\"\"\n <|body_0|>\n\n def forward(self, inp):\n \"\"\"Get Inception feature maps Parameters ---------- inp : torch.autograd.Variable Input tensor of shape Bx3xHxW. Values are expected to be in range (0, 1) Returns ------- List of torch.autograd.Variable, corresponding to the selected output block, sorted ascending by index\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(InceptionV3, self).__init__()\n self.resize_input = resize_input\n self.normalize_input = normalize_input\n self.output_blocks = sorted(output_blocks)\n self.last_needed_block = max(output_blocks)\n assert self.last_needed_block <= 3, 'Last possible output block index is 3'\n self.blocks = nn.ModuleList()\n inception = models.inception_v3(pretrained=True)\n block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block0))\n if self.last_needed_block >= 1:\n block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block1))\n if self.last_needed_block >= 2:\n block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]\n self.blocks.append(nn.Sequential(*block2))\n if self.last_needed_block >= 3:\n block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]\n self.blocks.append(nn.Sequential(*block3))\n for param in self.parameters():\n param.requires_grad = requires_grad\n<|end_body_0|>\n\n<|body_start_1|>\n outp = []\n x = inp\n if self.resize_input:\n x = F.upsample(x, size=(299, 299), mode='bilinear')\n if self.normalize_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n if idx == self.last_needed_block:\n break\n return outp\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000183", "length_bytes": 29410, "license_type": "no_license", "methods": [{"docstring": "Build pretrained InceptionV3 Parameters ---------- output_blocks : list of int Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input : bool If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed normalize_input : bool If true, normalizes the input to the statistics the pre", "name": "__init__", "signature": "def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False)"}, {"docstring": "Get Inception feature maps Parameters ---------- inp : torch.autograd.Variable Input tensor of shape Bx3xHxW. Values are expected to be in range (0, 1) Returns ------- List of torch.autograd.Variable, corresponding to the selected output block, sorted ascending by index", "name": "forward", "signature": "def forward(self, inp)"}], "n_methods": 2, "prompt": "Implement the Python class `InceptionV3` described below.\n\nClass description:\nPretrained InceptionV3 network returning feature maps\n\nMethod signatures and docstrings:\n- def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False): Build pretrained InceptionV3 Parameters ---------- output_blocks : list of int Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input : bool If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed normalize_input : bool If true, normalizes the input to the statistics the pre\n- def forward(self, inp): Get Inception feature maps Parameters ---------- inp : torch.autograd.Variable Input tensor of shape Bx3xHxW. Values are expected to be in range (0, 1) Returns ------- List of torch.autograd.Variable, corresponding to the selected output block, sorted ascending by index", "prompted_full_text": "Implement the Python class `InceptionV3` described below.\n\nClass description:\nPretrained InceptionV3 network returning feature maps\n\nMethod signatures and docstrings:\n- def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False): Build pretrained InceptionV3 Parameters ---------- output_blocks : list of int Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input : bool If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed normalize_input : bool If true, normalizes the input to the statistics the pre\n- def forward(self, inp): Get Inception feature maps Parameters ---------- inp : torch.autograd.Variable Input tensor of shape Bx3xHxW. Values are expected to be in range (0, 1) Returns ------- List of torch.autograd.Variable, corresponding to the selected output block, sorted ascending by index\n\n<|skeleton|>\nclass InceptionV3:\n \"\"\"Pretrained InceptionV3 network returning feature maps\"\"\"\n\n def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False):\n \"\"\"Build pretrained InceptionV3 Parameters ---------- output_blocks : list of int Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input : bool If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed normalize_input : bool If true, normalizes the input to the statistics the pre\"\"\"\n <|body_0|>\n\n def forward(self, inp):\n \"\"\"Get Inception feature maps Parameters ---------- inp : torch.autograd.Variable Input tensor of shape Bx3xHxW. Values are expected to be in range (0, 1) Returns ------- List of torch.autograd.Variable, corresponding to the selected output block, sorted ascending by index\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(InceptionV3, self).__init__()\n self.resize_input = resize_input\n self.normalize_input = normalize_input\n self.output_blocks = sorted(output_blocks)\n self.last_needed_block = max(output_blocks)\n assert self.last_needed_block <= 3, 'Last possible output block index is 3'\n self.blocks = nn.ModuleList()\n inception = models.inception_v3(pretrained=True)\n block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block0))\n if self.last_needed_block >= 1:\n block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block1))\n if self.last_needed_block >= 2:\n block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]\n self.blocks.append(nn.Sequential(*block2))\n if self.last_needed_block >= 3:\n block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]\n self.blocks.append(nn.Sequential(*block3))\n for param in self.parameters():\n param.requires_grad = requires_grad\n<|end_body_0|>\n\n<|body_start_1|>\n outp = []\n x = inp\n if self.resize_input:\n x = F.upsample(x, size=(299, 299), mode='bilinear')\n if self.normalize_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n if idx == self.last_needed_block:\n break\n return outp\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass InceptionV3:\n \"\"\"Pretrained InceptionV3 network returning feature maps\"\"\"\n\n def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False):\n \"\"\"Build pretrained InceptionV3 Parameters ---------- output_blocks : list of int Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input : bool If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed normalize_input : bool If true, normalizes the input to the statistics the pre\"\"\"\n <|body_0|>\n\n def forward(self, inp):\n \"\"\"Get Inception feature maps Parameters ---------- inp : torch.autograd.Variable Input tensor of shape Bx3xHxW. Values are expected to be in range (0, 1) Returns ------- List of torch.autograd.Variable, corresponding to the selected output block, sorted ascending by index\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InceptionV3:\n \"\"\"Pretrained InceptionV3 network returning feature maps\"\"\"\n\n def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False):\n \"\"\"Build pretrained InceptionV3 Parameters ---------- output_blocks : list of int Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input : bool If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed normalize_input : bool If true, normalizes the input to the statistics the pre\"\"\"\n super(InceptionV3, self).__init__()\n self.resize_input = resize_input\n self.normalize_input = normalize_input\n self.output_blocks = sorted(output_blocks)\n self.last_needed_block = max(output_blocks)\n assert self.last_needed_block <= 3, 'Last possible output block index is 3'\n self.blocks = nn.ModuleList()\n inception = models.inception_v3(pretrained=True)\n block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block0))\n if self.last_needed_block >= 1:\n block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]\n self.blocks.append(nn.Sequential(*block1))\n if self.last_needed_block >= 2:\n block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]\n self.blocks.append(nn.Sequential(*block2))\n if self.last_needed_block >= 3:\n block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]\n self.blocks.append(nn.Sequential(*block3))\n for param in self.parameters():\n param.requires_grad = requires_grad\n\n def forward(self, inp):\n \"\"\"Get Inception feature maps Parameters ---------- inp : torch.autograd.Variable Input tensor of shape Bx3xHxW. Values are expected to be in range (0, 1) Returns ------- List of torch.autograd.Variable, corresponding to the selected output block, sorted ascending by index\"\"\"\n outp = []\n x = inp\n if self.resize_input:\n x = F.upsample(x, size=(299, 299), mode='bilinear')\n if self.normalize_input:\n x = x.clone()\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n if idx == self.last_needed_block:\n break\n return outp\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_youyuge34_Anime_InPainting.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35}
{"blob_id": "1fd11d927fa29686727bdf6229280b7422c59679", "bodies": ["if not self.get_accepted_answer(answer.question):\n answer.accepted = True\n answer.save()\n answer_accepted.send(sender=Answer, instance=answer)\nelse:\n answer = None\nreturn answer", "try:\n answer = Answer.objects.get(question=question, accepted=True)\nexcept ObjectDoesNotExist:\n answer = None\nreturn answer", "if option == 'most_recent':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\nelif option == 'most_discussed':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\nelif option == 'highest_rated':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\nreturn answers", "if self.can_add(user):\n name = data.get('title', None)\n body = data.get('question', None)\n question = self.model(name=name, question=body, user=user)\n question.save()\n signals.edit.send(self.model, original=None, current=question, editor=user)\nelse:\n question = None\nreturn question", "name = data.get('title', None)\nbody = data.get('question', None)\noriginal = self.model.objects.get(id=question.id)\nquestion.name = name\nquestion.question = body\nquestion.save()\nsignals.edit.send(sender=self.model, original=original, current=question, editor=user)\nreturn question"], "bodies_text": "<|body_start_0|>\n if not self.get_accepted_answer(answer.question):\n answer.accepted = True\n answer.save()\n answer_accepted.send(sender=Answer, instance=answer)\n else:\n answer = None\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n answer = Answer.objects.get(question=question, accepted=True)\n except ObjectDoesNotExist:\n answer = None\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n if option == 'most_recent':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n elif option == 'most_discussed':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n elif option == 'highest_rated':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n return answers\n<|end_body_2|>\n\n<|body_start_3|>\n if self.can_add(user):\n name = data.get('title', None)\n body = data.get('question', None)\n question = self.model(name=name, question=body, user=user)\n question.save()\n signals.edit.send(self.model, original=None, current=question, editor=user)\n else:\n question = None\n return question\n<|end_body_3|>\n\n<|body_start_4|>\n name = data.get('title', None)\n body = data.get('question', None)\n original = self.model.objects.get(id=question.id)\n question.name = name\n question.question = body\n question.save()\n signals.edit.send(sender=self.model, original=original, current=question, editor=user)\n return question\n<|end_body_4|>\n", "class_docstring": "", "class_name": "QuestionManager", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QuestionManager:\n\n def accept_answer(self, answer):\n \"\"\"Accepts an @answer as the correct answer to a question. Each question can only have max 1 accepted answer.\"\"\"\n <|body_0|>\n\n def get_accepted_answer(self, question):\n \"\"\"Returns the accepted answer for target @question if it has one, otherwise return None.\"\"\"\n <|body_1|>\n\n def get_answers(self, question, option='highest_rated'):\n \"\"\"Returns a QuerySet of answers associated with target @question sorted by @option.\"\"\"\n <|body_2|>\n\n def add(self, user, data):\n \"\"\"Create a new question owned by @user based on @data.\"\"\"\n <|body_3|>\n\n def edit(self, question, user, data):\n \"\"\"Modifies target @question with new @data. Sends edit signal.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.get_accepted_answer(answer.question):\n answer.accepted = True\n answer.save()\n answer_accepted.send(sender=Answer, instance=answer)\n else:\n answer = None\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n answer = Answer.objects.get(question=question, accepted=True)\n except ObjectDoesNotExist:\n answer = None\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n if option == 'most_recent':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n elif option == 'most_discussed':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n elif option == 'highest_rated':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n return answers\n<|end_body_2|>\n\n<|body_start_3|>\n if self.can_add(user):\n name = data.get('title', None)\n body = data.get('question', None)\n question = self.model(name=name, question=body, user=user)\n question.save()\n signals.edit.send(self.model, original=None, current=question, editor=user)\n else:\n question = None\n return question\n<|end_body_3|>\n\n<|body_start_4|>\n name = data.get('title', None)\n body = data.get('question', None)\n original = self.model.objects.get(id=question.id)\n question.name = name\n question.question = body\n question.save()\n signals.edit.send(sender=self.model, original=original, current=question, editor=user)\n return question\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000184", "length_bytes": 4799, "license_type": "permissive", "methods": [{"docstring": "Accepts an @answer as the correct answer to a question. Each question can only have max 1 accepted answer.", "name": "accept_answer", "signature": "def accept_answer(self, answer)"}, {"docstring": "Returns the accepted answer for target @question if it has one, otherwise return None.", "name": "get_accepted_answer", "signature": "def get_accepted_answer(self, question)"}, {"docstring": "Returns a QuerySet of answers associated with target @question sorted by @option.", "name": "get_answers", "signature": "def get_answers(self, question, option='highest_rated')"}, {"docstring": "Create a new question owned by @user based on @data.", "name": "add", "signature": "def add(self, user, data)"}, {"docstring": "Modifies target @question with new @data. Sends edit signal.", "name": "edit", "signature": "def edit(self, question, user, data)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_016299", "prompt": "Implement the Python class `QuestionManager` described below.\n\nClass description:\nImplement the QuestionManager class.\n\nMethod signatures and docstrings:\n- def accept_answer(self, answer): Accepts an @answer as the correct answer to a question. Each question can only have max 1 accepted answer.\n- def get_accepted_answer(self, question): Returns the accepted answer for target @question if it has one, otherwise return None.\n- def get_answers(self, question, option='highest_rated'): Returns a QuerySet of answers associated with target @question sorted by @option.\n- def add(self, user, data): Create a new question owned by @user based on @data.\n- def edit(self, question, user, data): Modifies target @question with new @data. Sends edit signal.", "prompted_full_text": "Implement the Python class `QuestionManager` described below.\n\nClass description:\nImplement the QuestionManager class.\n\nMethod signatures and docstrings:\n- def accept_answer(self, answer): Accepts an @answer as the correct answer to a question. Each question can only have max 1 accepted answer.\n- def get_accepted_answer(self, question): Returns the accepted answer for target @question if it has one, otherwise return None.\n- def get_answers(self, question, option='highest_rated'): Returns a QuerySet of answers associated with target @question sorted by @option.\n- def add(self, user, data): Create a new question owned by @user based on @data.\n- def edit(self, question, user, data): Modifies target @question with new @data. Sends edit signal.\n\n<|skeleton|>\nclass QuestionManager:\n\n def accept_answer(self, answer):\n \"\"\"Accepts an @answer as the correct answer to a question. Each question can only have max 1 accepted answer.\"\"\"\n <|body_0|>\n\n def get_accepted_answer(self, question):\n \"\"\"Returns the accepted answer for target @question if it has one, otherwise return None.\"\"\"\n <|body_1|>\n\n def get_answers(self, question, option='highest_rated'):\n \"\"\"Returns a QuerySet of answers associated with target @question sorted by @option.\"\"\"\n <|body_2|>\n\n def add(self, user, data):\n \"\"\"Create a new question owned by @user based on @data.\"\"\"\n <|body_3|>\n\n def edit(self, question, user, data):\n \"\"\"Modifies target @question with new @data. Sends edit signal.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.get_accepted_answer(answer.question):\n answer.accepted = True\n answer.save()\n answer_accepted.send(sender=Answer, instance=answer)\n else:\n answer = None\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n answer = Answer.objects.get(question=question, accepted=True)\n except ObjectDoesNotExist:\n answer = None\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n if option == 'most_recent':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n elif option == 'most_discussed':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n elif option == 'highest_rated':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n return answers\n<|end_body_2|>\n\n<|body_start_3|>\n if self.can_add(user):\n name = data.get('title', None)\n body = data.get('question', None)\n question = self.model(name=name, question=body, user=user)\n question.save()\n signals.edit.send(self.model, original=None, current=question, editor=user)\n else:\n question = None\n return question\n<|end_body_3|>\n\n<|body_start_4|>\n name = data.get('title', None)\n body = data.get('question', None)\n original = self.model.objects.get(id=question.id)\n question.name = name\n question.question = body\n question.save()\n signals.edit.send(sender=self.model, original=original, current=question, editor=user)\n return question\n<|end_body_4|>\n", "revision_id": "5f8f3b682ac28fd3f464e7a993c3988c1a49eb02", "skeleton": "<|skeleton|>\nclass QuestionManager:\n\n def accept_answer(self, answer):\n \"\"\"Accepts an @answer as the correct answer to a question. Each question can only have max 1 accepted answer.\"\"\"\n <|body_0|>\n\n def get_accepted_answer(self, question):\n \"\"\"Returns the accepted answer for target @question if it has one, otherwise return None.\"\"\"\n <|body_1|>\n\n def get_answers(self, question, option='highest_rated'):\n \"\"\"Returns a QuerySet of answers associated with target @question sorted by @option.\"\"\"\n <|body_2|>\n\n def add(self, user, data):\n \"\"\"Create a new question owned by @user based on @data.\"\"\"\n <|body_3|>\n\n def edit(self, question, user, data):\n \"\"\"Modifies target @question with new @data. Sends edit signal.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QuestionManager:\n def accept_answer(self, answer):\n \"\"\"Accepts an @answer as the correct answer to a question. Each question can only have max 1 accepted answer.\"\"\"\n if not self.get_accepted_answer(answer.question):\n answer.accepted = True\n answer.save()\n answer_accepted.send(sender=Answer, instance=answer)\n else:\n answer = None\n return answer\n\n def get_accepted_answer(self, question):\n \"\"\"Returns the accepted answer for target @question if it has one, otherwise return None.\"\"\"\n try:\n answer = Answer.objects.get(question=question, accepted=True)\n except ObjectDoesNotExist:\n answer = None\n return answer\n\n def get_answers(self, question, option='highest_rated'):\n \"\"\"Returns a QuerySet of answers associated with target @question sorted by @option.\"\"\"\n if option == 'most_recent':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n elif option == 'most_discussed':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n elif option == 'highest_rated':\n answers = Answer.objects.get_sorted_objects(option).filter(question=question)\n return answers\n\n def add(self, user, data):\n \"\"\"Create a new question owned by @user based on @data.\"\"\"\n if self.can_add(user):\n name = data.get('title', None)\n body = data.get('question', None)\n question = self.model(name=name, question=body, user=user)\n question.save()\n signals.edit.send(self.model, original=None, current=question, editor=user)\n else:\n question = None\n return question\n\n def edit(self, question, user, data):\n \"\"\"Modifies target @question with new @data. Sends edit signal.\"\"\"\n name = data.get('title', None)\n body = data.get('question', None)\n original = self.model.objects.get(id=question.id)\n question.name = name\n question.question = body\n question.save()\n signals.edit.send(sender=self.model, original=original, current=question, editor=user)\n return question\n", "source": "the_stack_v2_python_sparse", "source_path": "eruditio/shared_apps/django_qa/models.py", "source_repo": "genghisu/eruditio", "split": "val", "star_events_count": 0}
{"blob_id": "d8bbe49eaa15dda5785235839a66a1ab96be2b34", "bodies": ["def search(left, right):\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n if left == 0:\n cuts[right] = 0\n else:\n cuts[right] = min(cuts[right], cuts[left - 1] + 1)\n left -= 1\n right += 1\ncuts = [x for x in range(len(s))]\nfor i in range(len(s)):\n search(i, i)\n search(i, i + 1)\nreturn cuts[-1]", "cut = [x for x in range(-1, len(s))]\nfor i in range(1, len(s)):\n for left, right in [(i, i), (i - 1, i)]:\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n cut[right + 1] = min(cut[right + 1], cut[left] + 1)\n left -= 1\n right += 1\nreturn cut[-1]", "dp = [[0] * len(s) for _ in range(len(s))]\nfor size in range(len(s)):\n for i in range(len(s)):\n j = i + size\n if j >= len(s):\n break\n elif i == j:\n continue\n elif j - i == 1:\n if s[i] == s[j]:\n dp[i][j] = 0\n else:\n dp[i][j] = 1\n elif s[i] == s[j] and dp[i + 1][j - 1] == 0:\n dp[i][j] = 0\n else:\n min_cut = float('inf')\n for k in range(i, j):\n min_cut = min(min_cut, 1 + dp[i][k] + dp[k + 1][j])\n dp[i][j] = min_cut\nreturn dp[0][-1]"], "bodies_text": "<|body_start_0|>\n def search(left, right):\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n if left == 0:\n cuts[right] = 0\n else:\n cuts[right] = min(cuts[right], cuts[left - 1] + 1)\n left -= 1\n right += 1\n cuts = [x for x in range(len(s))]\n for i in range(len(s)):\n search(i, i)\n search(i, i + 1)\n return cuts[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n cut = [x for x in range(-1, len(s))]\n for i in range(1, len(s)):\n for left, right in [(i, i), (i - 1, i)]:\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n cut[right + 1] = min(cut[right + 1], cut[left] + 1)\n left -= 1\n right += 1\n return cut[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n dp = [[0] * len(s) for _ in range(len(s))]\n for size in range(len(s)):\n for i in range(len(s)):\n j = i + size\n if j >= len(s):\n break\n elif i == j:\n continue\n elif j - i == 1:\n if s[i] == s[j]:\n dp[i][j] = 0\n else:\n dp[i][j] = 1\n elif s[i] == s[j] and dp[i + 1][j - 1] == 0:\n dp[i][j] = 0\n else:\n min_cut = float('inf')\n for k in range(i, j):\n min_cut = min(min_cut, 1 + dp[i][k] + dp[k + 1][j])\n dp[i][j] = min_cut\n return dp[0][-1]\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minCut(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def minCut_v2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n def minCut_TLE(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def search(left, right):\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n if left == 0:\n cuts[right] = 0\n else:\n cuts[right] = min(cuts[right], cuts[left - 1] + 1)\n left -= 1\n right += 1\n cuts = [x for x in range(len(s))]\n for i in range(len(s)):\n search(i, i)\n search(i, i + 1)\n return cuts[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n cut = [x for x in range(-1, len(s))]\n for i in range(1, len(s)):\n for left, right in [(i, i), (i - 1, i)]:\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n cut[right + 1] = min(cut[right + 1], cut[left] + 1)\n left -= 1\n right += 1\n return cut[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n dp = [[0] * len(s) for _ in range(len(s))]\n for size in range(len(s)):\n for i in range(len(s)):\n j = i + size\n if j >= len(s):\n break\n elif i == j:\n continue\n elif j - i == 1:\n if s[i] == s[j]:\n dp[i][j] = 0\n else:\n dp[i][j] = 1\n elif s[i] == s[j] and dp[i + 1][j - 1] == 0:\n dp[i][j] = 0\n else:\n min_cut = float('inf')\n for k in range(i, j):\n min_cut = min(min_cut, 1 + dp[i][k] + dp[k + 1][j])\n dp[i][j] = min_cut\n return dp[0][-1]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000185", "length_bytes": 3592, "license_type": "no_license", "methods": [{"docstring": ":type s: str :rtype: int", "name": "minCut", "signature": "def minCut(self, s)"}, {"docstring": ":type s: str :rtype: int", "name": "minCut_v2", "signature": "def minCut_v2(self, s)"}, {"docstring": ":type s: str :rtype: int", "name": "minCut_TLE", "signature": "def minCut_TLE(self, s)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_002796", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minCut(self, s): :type s: str :rtype: int\n- def minCut_v2(self, s): :type s: str :rtype: int\n- def minCut_TLE(self, s): :type s: str :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minCut(self, s): :type s: str :rtype: int\n- def minCut_v2(self, s): :type s: str :rtype: int\n- def minCut_TLE(self, s): :type s: str :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def minCut(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def minCut_v2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n def minCut_TLE(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def search(left, right):\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n if left == 0:\n cuts[right] = 0\n else:\n cuts[right] = min(cuts[right], cuts[left - 1] + 1)\n left -= 1\n right += 1\n cuts = [x for x in range(len(s))]\n for i in range(len(s)):\n search(i, i)\n search(i, i + 1)\n return cuts[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n cut = [x for x in range(-1, len(s))]\n for i in range(1, len(s)):\n for left, right in [(i, i), (i - 1, i)]:\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n cut[right + 1] = min(cut[right + 1], cut[left] + 1)\n left -= 1\n right += 1\n return cut[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n dp = [[0] * len(s) for _ in range(len(s))]\n for size in range(len(s)):\n for i in range(len(s)):\n j = i + size\n if j >= len(s):\n break\n elif i == j:\n continue\n elif j - i == 1:\n if s[i] == s[j]:\n dp[i][j] = 0\n else:\n dp[i][j] = 1\n elif s[i] == s[j] and dp[i + 1][j - 1] == 0:\n dp[i][j] = 0\n else:\n min_cut = float('inf')\n for k in range(i, j):\n min_cut = min(min_cut, 1 + dp[i][k] + dp[k + 1][j])\n dp[i][j] = min_cut\n return dp[0][-1]\n<|end_body_2|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minCut(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def minCut_v2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n def minCut_TLE(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def minCut(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n def search(left, right):\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n if left == 0:\n cuts[right] = 0\n else:\n cuts[right] = min(cuts[right], cuts[left - 1] + 1)\n left -= 1\n right += 1\n cuts = [x for x in range(len(s))]\n for i in range(len(s)):\n search(i, i)\n search(i, i + 1)\n return cuts[-1]\n\n def minCut_v2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n cut = [x for x in range(-1, len(s))]\n for i in range(1, len(s)):\n for left, right in [(i, i), (i - 1, i)]:\n while left >= 0 and right < len(s) and (s[left] == s[right]):\n cut[right + 1] = min(cut[right + 1], cut[left] + 1)\n left -= 1\n right += 1\n return cut[-1]\n\n def minCut_TLE(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n dp = [[0] * len(s) for _ in range(len(s))]\n for size in range(len(s)):\n for i in range(len(s)):\n j = i + size\n if j >= len(s):\n break\n elif i == j:\n continue\n elif j - i == 1:\n if s[i] == s[j]:\n dp[i][j] = 0\n else:\n dp[i][j] = 1\n elif s[i] == s[j] and dp[i + 1][j - 1] == 0:\n dp[i][j] = 0\n else:\n min_cut = float('inf')\n for k in range(i, j):\n min_cut = min(min_cut, 1 + dp[i][k] + dp[k + 1][j])\n dp[i][j] = min_cut\n return dp[0][-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_132.py", "source_repo": "oxhead/CodingYourWay", "split": "val", "star_events_count": 0}
{"blob_id": "28a63b9dc97030f2966f44ec0da9dbb007e5749a", "bodies": ["empty_reply = create_question()\nempty_reply.reply('')\nself.assertIs(empty_reply.seen, False)", "not_empty_reply = create_question()\nnot_empty_reply.reply('Wong')\nself.assertIs(not_empty_reply.seen, True)", "unreply_question = create_question()\nunreply_question.submit()\nself.assertIs(unreply_question.seen, False)"], "bodies_text": "<|body_start_0|>\n empty_reply = create_question()\n empty_reply.reply('')\n self.assertIs(empty_reply.seen, False)\n<|end_body_0|>\n\n<|body_start_1|>\n not_empty_reply = create_question()\n not_empty_reply.reply('Wong')\n self.assertIs(not_empty_reply.seen, True)\n<|end_body_1|>\n\n<|body_start_2|>\n unreply_question = create_question()\n unreply_question.submit()\n self.assertIs(unreply_question.seen, False)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "QuestionModelTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QuestionModelTests:\n\n def test_reply_empty_string(self):\n \"\"\"seen be False when response is empty\"\"\"\n <|body_0|>\n\n def test_reply_not_empty_string(self):\n \"\"\"seen be True when response is not empty\"\"\"\n <|body_1|>\n\n def test_unreply(self):\n \"\"\"seen be True when response is not empty\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n empty_reply = create_question()\n empty_reply.reply('')\n self.assertIs(empty_reply.seen, False)\n<|end_body_0|>\n\n<|body_start_1|>\n not_empty_reply = create_question()\n not_empty_reply.reply('Wong')\n self.assertIs(not_empty_reply.seen, True)\n<|end_body_1|>\n\n<|body_start_2|>\n unreply_question = create_question()\n unreply_question.submit()\n self.assertIs(unreply_question.seen, False)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000186", "length_bytes": 2710, "license_type": "no_license", "methods": [{"docstring": "seen be False when response is empty", "name": "test_reply_empty_string", "signature": "def test_reply_empty_string(self)"}, {"docstring": "seen be True when response is not empty", "name": "test_reply_not_empty_string", "signature": "def test_reply_not_empty_string(self)"}, {"docstring": "seen be True when response is not empty", "name": "test_unreply", "signature": "def test_unreply(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_037828", "prompt": "Implement the Python class `QuestionModelTests` described below.\n\nClass description:\nImplement the QuestionModelTests class.\n\nMethod signatures and docstrings:\n- def test_reply_empty_string(self): seen be False when response is empty\n- def test_reply_not_empty_string(self): seen be True when response is not empty\n- def test_unreply(self): seen be True when response is not empty", "prompted_full_text": "Implement the Python class `QuestionModelTests` described below.\n\nClass description:\nImplement the QuestionModelTests class.\n\nMethod signatures and docstrings:\n- def test_reply_empty_string(self): seen be False when response is empty\n- def test_reply_not_empty_string(self): seen be True when response is not empty\n- def test_unreply(self): seen be True when response is not empty\n\n<|skeleton|>\nclass QuestionModelTests:\n\n def test_reply_empty_string(self):\n \"\"\"seen be False when response is empty\"\"\"\n <|body_0|>\n\n def test_reply_not_empty_string(self):\n \"\"\"seen be True when response is not empty\"\"\"\n <|body_1|>\n\n def test_unreply(self):\n \"\"\"seen be True when response is not empty\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n empty_reply = create_question()\n empty_reply.reply('')\n self.assertIs(empty_reply.seen, False)\n<|end_body_0|>\n\n<|body_start_1|>\n not_empty_reply = create_question()\n not_empty_reply.reply('Wong')\n self.assertIs(not_empty_reply.seen, True)\n<|end_body_1|>\n\n<|body_start_2|>\n unreply_question = create_question()\n unreply_question.submit()\n self.assertIs(unreply_question.seen, False)\n<|end_body_2|>\n", "revision_id": "4fd6ac227a6a123cd2153a266a34c90a7309f18f", "skeleton": "<|skeleton|>\nclass QuestionModelTests:\n\n def test_reply_empty_string(self):\n \"\"\"seen be False when response is empty\"\"\"\n <|body_0|>\n\n def test_reply_not_empty_string(self):\n \"\"\"seen be True when response is not empty\"\"\"\n <|body_1|>\n\n def test_unreply(self):\n \"\"\"seen be True when response is not empty\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QuestionModelTests:\n def test_reply_empty_string(self):\n \"\"\"seen be False when response is empty\"\"\"\n empty_reply = create_question()\n empty_reply.reply('')\n self.assertIs(empty_reply.seen, False)\n\n def test_reply_not_empty_string(self):\n \"\"\"seen be True when response is not empty\"\"\"\n not_empty_reply = create_question()\n not_empty_reply.reply('Wong')\n self.assertIs(not_empty_reply.seen, True)\n\n def test_unreply(self):\n \"\"\"seen be True when response is not empty\"\"\"\n unreply_question = create_question()\n unreply_question.submit()\n self.assertIs(unreply_question.seen, False)\n", "source": "the_stack_v2_python_sparse", "source_path": "main_site/tests.py", "source_repo": "dl8sd11/pi_official", "split": "val", "star_events_count": 0}
{"blob_id": "58e0281a554257872ab993c3657df5caa246d981", "bodies": ["self.k = k\nself.nu = nu\nself.plane_origin = plane_origin.reshape(3, 1)\nself.plane_normal = plane_normal.reshape(3)\nself.surface_tol = 0.0001", "nodal_total_forces = system.internal_forces + system.external_forces\nelement_total_forces = nodes_to_elements(nodal_total_forces)\nforce_component_along_normal_direction = np.einsum('i, ij->j', self.plane_normal, element_total_forces)\nforces_along_normal_direction = np.einsum('i, j->ij', self.plane_normal, force_component_along_normal_direction)\nforces_along_normal_direction[..., np.where(force_component_along_normal_direction > 0)] = 0.0\nplane_response_force = -forces_along_normal_direction\nelement_position = 0.5 * (system.position_collection[..., :-1] + system.position_collection[..., 1:])\ndistance_from_plane = np.einsum('i, ij->j', self.plane_normal, element_position - self.plane_origin)\nplane_penetration = np.minimum(distance_from_plane - system.radius, 0.0)\nelastic_force = -self.k * np.einsum('i, j->ij', self.plane_normal, plane_penetration)\nelement_velocity = 0.5 * (system.velocity_collection[..., :-1] + system.velocity_collection[..., 1:])\nnormal_component_of_element_velocity = np.einsum('i, ij->j', self.plane_normal, element_velocity)\ndamping_force = -self.nu * np.einsum('i, j->ij', self.plane_normal, normal_component_of_element_velocity)\nplane_response_force_total = plane_response_force + elastic_force + damping_force\nno_contact_point_idx = np.where(distance_from_plane - system.radius > self.surface_tol)\nplane_response_force[..., no_contact_point_idx] = 0.0\nplane_response_force_total[..., no_contact_point_idx] = 0.0\nsystem.external_forces[..., :-1] += 0.5 * plane_response_force_total\nsystem.external_forces[..., 1:] += 0.5 * plane_response_force_total\nreturn (np.sqrt(np.einsum('ij, ij->j', plane_response_force, plane_response_force)), no_contact_point_idx)"], "bodies_text": "<|body_start_0|>\n self.k = k\n self.nu = nu\n self.plane_origin = plane_origin.reshape(3, 1)\n self.plane_normal = plane_normal.reshape(3)\n self.surface_tol = 0.0001\n<|end_body_0|>\n\n<|body_start_1|>\n nodal_total_forces = system.internal_forces + system.external_forces\n element_total_forces = nodes_to_elements(nodal_total_forces)\n force_component_along_normal_direction = np.einsum('i, ij->j', self.plane_normal, element_total_forces)\n forces_along_normal_direction = np.einsum('i, j->ij', self.plane_normal, force_component_along_normal_direction)\n forces_along_normal_direction[..., np.where(force_component_along_normal_direction > 0)] = 0.0\n plane_response_force = -forces_along_normal_direction\n element_position = 0.5 * (system.position_collection[..., :-1] + system.position_collection[..., 1:])\n distance_from_plane = np.einsum('i, ij->j', self.plane_normal, element_position - self.plane_origin)\n plane_penetration = np.minimum(distance_from_plane - system.radius, 0.0)\n elastic_force = -self.k * np.einsum('i, j->ij', self.plane_normal, plane_penetration)\n element_velocity = 0.5 * (system.velocity_collection[..., :-1] + system.velocity_collection[..., 1:])\n normal_component_of_element_velocity = np.einsum('i, ij->j', self.plane_normal, element_velocity)\n damping_force = -self.nu * np.einsum('i, j->ij', self.plane_normal, normal_component_of_element_velocity)\n plane_response_force_total = plane_response_force + elastic_force + damping_force\n no_contact_point_idx = np.where(distance_from_plane - system.radius > self.surface_tol)\n plane_response_force[..., no_contact_point_idx] = 0.0\n plane_response_force_total[..., no_contact_point_idx] = 0.0\n system.external_forces[..., :-1] += 0.5 * plane_response_force_total\n system.external_forces[..., 1:] += 0.5 * plane_response_force_total\n return (np.sqrt(np.einsum('ij, ij->j', plane_response_force, plane_response_force)), no_contact_point_idx)\n<|end_body_1|>\n", "class_docstring": "The interaction plane class computes the plane reaction force on a rod-like object. For more details regarding the contact module refer to Eqn 4.8 of Gazzola et al. RSoS (2018). Attributes ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane. surface_tol: float Penetration tolerance between the plane and the rod-like object.", "class_name": "InteractionPlane", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InteractionPlane:\n \"\"\"The interaction plane class computes the plane reaction force on a rod-like object. For more details regarding the contact module refer to Eqn 4.8 of Gazzola et al. RSoS (2018). Attributes ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane. surface_tol: float Penetration tolerance between the plane and the rod-like object.\"\"\"\n\n def __init__(self, k, nu, plane_origin, plane_normal):\n \"\"\"Parameters ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane.\"\"\"\n <|body_0|>\n\n def apply_normal_force(self, system):\n \"\"\"In the case of contact with the plane, this function computes the plane reaction force on the element. Parameters ---------- system: object Rod-like object. Returns ------- plane_response_force_mag : numpy.ndarray 1D (blocksize) array containing data with 'float' type. Magnitude of plane response force acting on rod-like object. no_contact_point_idx : numpy.ndarray 1D (blocksize) array containing data with 'int' type. Index of rod-like object elements that are not in contact with the plane.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.k = k\n self.nu = nu\n self.plane_origin = plane_origin.reshape(3, 1)\n self.plane_normal = plane_normal.reshape(3)\n self.surface_tol = 0.0001\n<|end_body_0|>\n\n<|body_start_1|>\n nodal_total_forces = system.internal_forces + system.external_forces\n element_total_forces = nodes_to_elements(nodal_total_forces)\n force_component_along_normal_direction = np.einsum('i, ij->j', self.plane_normal, element_total_forces)\n forces_along_normal_direction = np.einsum('i, j->ij', self.plane_normal, force_component_along_normal_direction)\n forces_along_normal_direction[..., np.where(force_component_along_normal_direction > 0)] = 0.0\n plane_response_force = -forces_along_normal_direction\n element_position = 0.5 * (system.position_collection[..., :-1] + system.position_collection[..., 1:])\n distance_from_plane = np.einsum('i, ij->j', self.plane_normal, element_position - self.plane_origin)\n plane_penetration = np.minimum(distance_from_plane - system.radius, 0.0)\n elastic_force = -self.k * np.einsum('i, j->ij', self.plane_normal, plane_penetration)\n element_velocity = 0.5 * (system.velocity_collection[..., :-1] + system.velocity_collection[..., 1:])\n normal_component_of_element_velocity = np.einsum('i, ij->j', self.plane_normal, element_velocity)\n damping_force = -self.nu * np.einsum('i, j->ij', self.plane_normal, normal_component_of_element_velocity)\n plane_response_force_total = plane_response_force + elastic_force + damping_force\n no_contact_point_idx = np.where(distance_from_plane - system.radius > self.surface_tol)\n plane_response_force[..., no_contact_point_idx] = 0.0\n plane_response_force_total[..., no_contact_point_idx] = 0.0\n system.external_forces[..., :-1] += 0.5 * plane_response_force_total\n system.external_forces[..., 1:] += 0.5 * plane_response_force_total\n return (np.sqrt(np.einsum('ij, ij->j', plane_response_force, plane_response_force)), no_contact_point_idx)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000187", "length_bytes": 37229, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane.", "name": "__init__", "signature": "def __init__(self, k, nu, plane_origin, plane_normal)"}, {"docstring": "In the case of contact with the plane, this function computes the plane reaction force on the element. Parameters ---------- system: object Rod-like object. Returns ------- plane_response_force_mag : numpy.ndarray 1D (blocksize) array containing data with 'float' type. Magnitude of plane response force acting on rod-like object. no_contact_point_idx : numpy.ndarray 1D (blocksize) array containing data with 'int' type. Index of rod-like object elements that are not in contact with the plane.", "name": "apply_normal_force", "signature": "def apply_normal_force(self, system)"}], "n_methods": 2, "prompt": "Implement the Python class `InteractionPlane` described below.\n\nClass description:\nThe interaction plane class computes the plane reaction force on a rod-like object. For more details regarding the contact module refer to Eqn 4.8 of Gazzola et al. RSoS (2018). Attributes ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane. surface_tol: float Penetration tolerance between the plane and the rod-like object.\n\nMethod signatures and docstrings:\n- def __init__(self, k, nu, plane_origin, plane_normal): Parameters ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane.\n- def apply_normal_force(self, system): In the case of contact with the plane, this function computes the plane reaction force on the element. Parameters ---------- system: object Rod-like object. Returns ------- plane_response_force_mag : numpy.ndarray 1D (blocksize) array containing data with 'float' type. Magnitude of plane response force acting on rod-like object. no_contact_point_idx : numpy.ndarray 1D (blocksize) array containing data with 'int' type. Index of rod-like object elements that are not in contact with the plane.", "prompted_full_text": "Implement the Python class `InteractionPlane` described below.\n\nClass description:\nThe interaction plane class computes the plane reaction force on a rod-like object. For more details regarding the contact module refer to Eqn 4.8 of Gazzola et al. RSoS (2018). Attributes ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane. surface_tol: float Penetration tolerance between the plane and the rod-like object.\n\nMethod signatures and docstrings:\n- def __init__(self, k, nu, plane_origin, plane_normal): Parameters ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane.\n- def apply_normal_force(self, system): In the case of contact with the plane, this function computes the plane reaction force on the element. Parameters ---------- system: object Rod-like object. Returns ------- plane_response_force_mag : numpy.ndarray 1D (blocksize) array containing data with 'float' type. Magnitude of plane response force acting on rod-like object. no_contact_point_idx : numpy.ndarray 1D (blocksize) array containing data with 'int' type. Index of rod-like object elements that are not in contact with the plane.\n\n<|skeleton|>\nclass InteractionPlane:\n \"\"\"The interaction plane class computes the plane reaction force on a rod-like object. For more details regarding the contact module refer to Eqn 4.8 of Gazzola et al. RSoS (2018). Attributes ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane. surface_tol: float Penetration tolerance between the plane and the rod-like object.\"\"\"\n\n def __init__(self, k, nu, plane_origin, plane_normal):\n \"\"\"Parameters ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane.\"\"\"\n <|body_0|>\n\n def apply_normal_force(self, system):\n \"\"\"In the case of contact with the plane, this function computes the plane reaction force on the element. Parameters ---------- system: object Rod-like object. Returns ------- plane_response_force_mag : numpy.ndarray 1D (blocksize) array containing data with 'float' type. Magnitude of plane response force acting on rod-like object. no_contact_point_idx : numpy.ndarray 1D (blocksize) array containing data with 'int' type. Index of rod-like object elements that are not in contact with the plane.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.k = k\n self.nu = nu\n self.plane_origin = plane_origin.reshape(3, 1)\n self.plane_normal = plane_normal.reshape(3)\n self.surface_tol = 0.0001\n<|end_body_0|>\n\n<|body_start_1|>\n nodal_total_forces = system.internal_forces + system.external_forces\n element_total_forces = nodes_to_elements(nodal_total_forces)\n force_component_along_normal_direction = np.einsum('i, ij->j', self.plane_normal, element_total_forces)\n forces_along_normal_direction = np.einsum('i, j->ij', self.plane_normal, force_component_along_normal_direction)\n forces_along_normal_direction[..., np.where(force_component_along_normal_direction > 0)] = 0.0\n plane_response_force = -forces_along_normal_direction\n element_position = 0.5 * (system.position_collection[..., :-1] + system.position_collection[..., 1:])\n distance_from_plane = np.einsum('i, ij->j', self.plane_normal, element_position - self.plane_origin)\n plane_penetration = np.minimum(distance_from_plane - system.radius, 0.0)\n elastic_force = -self.k * np.einsum('i, j->ij', self.plane_normal, plane_penetration)\n element_velocity = 0.5 * (system.velocity_collection[..., :-1] + system.velocity_collection[..., 1:])\n normal_component_of_element_velocity = np.einsum('i, ij->j', self.plane_normal, element_velocity)\n damping_force = -self.nu * np.einsum('i, j->ij', self.plane_normal, normal_component_of_element_velocity)\n plane_response_force_total = plane_response_force + elastic_force + damping_force\n no_contact_point_idx = np.where(distance_from_plane - system.radius > self.surface_tol)\n plane_response_force[..., no_contact_point_idx] = 0.0\n plane_response_force_total[..., no_contact_point_idx] = 0.0\n system.external_forces[..., :-1] += 0.5 * plane_response_force_total\n system.external_forces[..., 1:] += 0.5 * plane_response_force_total\n return (np.sqrt(np.einsum('ij, ij->j', plane_response_force, plane_response_force)), no_contact_point_idx)\n<|end_body_1|>\n", "revision_id": "b63edc489a7dd36f353a8ec2a003a7202ebc7e95", "skeleton": "<|skeleton|>\nclass InteractionPlane:\n \"\"\"The interaction plane class computes the plane reaction force on a rod-like object. For more details regarding the contact module refer to Eqn 4.8 of Gazzola et al. RSoS (2018). Attributes ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane. surface_tol: float Penetration tolerance between the plane and the rod-like object.\"\"\"\n\n def __init__(self, k, nu, plane_origin, plane_normal):\n \"\"\"Parameters ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane.\"\"\"\n <|body_0|>\n\n def apply_normal_force(self, system):\n \"\"\"In the case of contact with the plane, this function computes the plane reaction force on the element. Parameters ---------- system: object Rod-like object. Returns ------- plane_response_force_mag : numpy.ndarray 1D (blocksize) array containing data with 'float' type. Magnitude of plane response force acting on rod-like object. no_contact_point_idx : numpy.ndarray 1D (blocksize) array containing data with 'int' type. Index of rod-like object elements that are not in contact with the plane.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InteractionPlane:\n \"\"\"The interaction plane class computes the plane reaction force on a rod-like object. For more details regarding the contact module refer to Eqn 4.8 of Gazzola et al. RSoS (2018). Attributes ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane. surface_tol: float Penetration tolerance between the plane and the rod-like object.\"\"\"\n\n def __init__(self, k, nu, plane_origin, plane_normal):\n \"\"\"Parameters ---------- k: float Stiffness coefficient between the plane and the rod-like object. nu: float Dissipation coefficient between the plane and the rod-like object. plane_origin: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. Origin of the plane. plane_normal: numpy.ndarray 2D (dim, 1) array containing data with 'float' type. The normal vector of the plane.\"\"\"\n self.k = k\n self.nu = nu\n self.plane_origin = plane_origin.reshape(3, 1)\n self.plane_normal = plane_normal.reshape(3)\n self.surface_tol = 0.0001\n\n def apply_normal_force(self, system):\n \"\"\"In the case of contact with the plane, this function computes the plane reaction force on the element. Parameters ---------- system: object Rod-like object. Returns ------- plane_response_force_mag : numpy.ndarray 1D (blocksize) array containing data with 'float' type. Magnitude of plane response force acting on rod-like object. no_contact_point_idx : numpy.ndarray 1D (blocksize) array containing data with 'int' type. Index of rod-like object elements that are not in contact with the plane.\"\"\"\n nodal_total_forces = system.internal_forces + system.external_forces\n element_total_forces = nodes_to_elements(nodal_total_forces)\n force_component_along_normal_direction = np.einsum('i, ij->j', self.plane_normal, element_total_forces)\n forces_along_normal_direction = np.einsum('i, j->ij', self.plane_normal, force_component_along_normal_direction)\n forces_along_normal_direction[..., np.where(force_component_along_normal_direction > 0)] = 0.0\n plane_response_force = -forces_along_normal_direction\n element_position = 0.5 * (system.position_collection[..., :-1] + system.position_collection[..., 1:])\n distance_from_plane = np.einsum('i, ij->j', self.plane_normal, element_position - self.plane_origin)\n plane_penetration = np.minimum(distance_from_plane - system.radius, 0.0)\n elastic_force = -self.k * np.einsum('i, j->ij', self.plane_normal, plane_penetration)\n element_velocity = 0.5 * (system.velocity_collection[..., :-1] + system.velocity_collection[..., 1:])\n normal_component_of_element_velocity = np.einsum('i, ij->j', self.plane_normal, element_velocity)\n damping_force = -self.nu * np.einsum('i, j->ij', self.plane_normal, normal_component_of_element_velocity)\n plane_response_force_total = plane_response_force + elastic_force + damping_force\n no_contact_point_idx = np.where(distance_from_plane - system.radius > self.surface_tol)\n plane_response_force[..., no_contact_point_idx] = 0.0\n plane_response_force_total[..., no_contact_point_idx] = 0.0\n system.external_forces[..., :-1] += 0.5 * plane_response_force_total\n system.external_forces[..., 1:] += 0.5 * plane_response_force_total\n return (np.sqrt(np.einsum('ij, ij->j', plane_response_force, plane_response_force)), no_contact_point_idx)\n", "source": "the_stack_v2_python_sparse", "source_path": "elastica/_elastica_numpy/_interaction.py", "source_repo": "J-Woj/PyElastica", "split": "val", "star_events_count": 0}
{"blob_id": "04e9db91e9f13986f864faf786b51ba8177adee7", "bodies": ["try:\n isbn = request.data['isbn'].replace('-', '')\n if Book.objects.filter(isbn=isbn).count() == 0:\n data = OpenBD().get_json(isbn)\n logger.debug('Success BookManagementView.list')\n logger.debug(data)\n return Response(data=data, status=HTTP_200_OK)\n else:\n book = Book.objects.filter(isbn=request.data['isbn']).get()\n serializer = BookManagementSerializer(data=book)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n logger.debug('Success BookManagementView.list')\n logger.debug(serializer.data)\n return Response(data=serializer.data, status=HTTP_200_OK)\nexcept Exception as e:\n logger.error('Exception BookManagementView.list')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)", "try:\n serializer = BookManagementSerializer(data=request.data)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n publisher_serializer = PublisherSerializer(data={''})\n publisher_serializer.save()\n author_serializer = AuthorSerializer(data=serializer.validated_data)\n author_serializer.save()\n book_manage_serializer = BookManagementSerializer(data=serializer.validated_data)\n book_manage_serializer.save()\n if serializer.validated_data['series'] != '':\n series = Series.objects.filter(name=serializer.validated_data['series']).get()\n if len(series) == 0:\n series_serializer = SeriesSerializer(data=serializer.validated_data)\n series_serializer.save()\n series_book_serializer = SeriesBookSerializer(data=serializer.validated_data)\n series_book_serializer.save()\n serializer = BookManagementSerializer(data=serializer.validated_data)\n return Response(data=serializer.data, status=HTTP_201_CREATED)\nexcept RegisterError as e:\n logger.error('Exception BookManageViewSet.create')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)"], "bodies_text": "<|body_start_0|>\n try:\n isbn = request.data['isbn'].replace('-', '')\n if Book.objects.filter(isbn=isbn).count() == 0:\n data = OpenBD().get_json(isbn)\n logger.debug('Success BookManagementView.list')\n logger.debug(data)\n return Response(data=data, status=HTTP_200_OK)\n else:\n book = Book.objects.filter(isbn=request.data['isbn']).get()\n serializer = BookManagementSerializer(data=book)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n logger.debug('Success BookManagementView.list')\n logger.debug(serializer.data)\n return Response(data=serializer.data, status=HTTP_200_OK)\n except Exception as e:\n logger.error('Exception BookManagementView.list')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n serializer = BookManagementSerializer(data=request.data)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n publisher_serializer = PublisherSerializer(data={''})\n publisher_serializer.save()\n author_serializer = AuthorSerializer(data=serializer.validated_data)\n author_serializer.save()\n book_manage_serializer = BookManagementSerializer(data=serializer.validated_data)\n book_manage_serializer.save()\n if serializer.validated_data['series'] != '':\n series = Series.objects.filter(name=serializer.validated_data['series']).get()\n if len(series) == 0:\n series_serializer = SeriesSerializer(data=serializer.validated_data)\n series_serializer.save()\n series_book_serializer = SeriesBookSerializer(data=serializer.validated_data)\n series_book_serializer.save()\n serializer = BookManagementSerializer(data=serializer.validated_data)\n return Response(data=serializer.data, status=HTTP_201_CREATED)\n except RegisterError as e:\n logger.error('Exception BookManageViewSet.create')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BookManagementView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BookManagementView:\n\n def list(self, request, *args, **kwargs):\n \"\"\"Find book info from OpenBD API by ISBN Code. :param request: ISBN Code :param args: :param kwargs: :return: book info(format:json)\"\"\"\n <|body_0|>\n\n def create(self, request, *args, **kwargs):\n \"\"\"create book info :param request: book info :param args: :param kwargs: :return: The registered book info if successful. Otherwise an error message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n isbn = request.data['isbn'].replace('-', '')\n if Book.objects.filter(isbn=isbn).count() == 0:\n data = OpenBD().get_json(isbn)\n logger.debug('Success BookManagementView.list')\n logger.debug(data)\n return Response(data=data, status=HTTP_200_OK)\n else:\n book = Book.objects.filter(isbn=request.data['isbn']).get()\n serializer = BookManagementSerializer(data=book)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n logger.debug('Success BookManagementView.list')\n logger.debug(serializer.data)\n return Response(data=serializer.data, status=HTTP_200_OK)\n except Exception as e:\n logger.error('Exception BookManagementView.list')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n serializer = BookManagementSerializer(data=request.data)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n publisher_serializer = PublisherSerializer(data={''})\n publisher_serializer.save()\n author_serializer = AuthorSerializer(data=serializer.validated_data)\n author_serializer.save()\n book_manage_serializer = BookManagementSerializer(data=serializer.validated_data)\n book_manage_serializer.save()\n if serializer.validated_data['series'] != '':\n series = Series.objects.filter(name=serializer.validated_data['series']).get()\n if len(series) == 0:\n series_serializer = SeriesSerializer(data=serializer.validated_data)\n series_serializer.save()\n series_book_serializer = SeriesBookSerializer(data=serializer.validated_data)\n series_book_serializer.save()\n serializer = BookManagementSerializer(data=serializer.validated_data)\n return Response(data=serializer.data, status=HTTP_201_CREATED)\n except RegisterError as e:\n logger.error('Exception BookManageViewSet.create')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000188", "length_bytes": 5086, "license_type": "no_license", "methods": [{"docstring": "Find book info from OpenBD API by ISBN Code. :param request: ISBN Code :param args: :param kwargs: :return: book info(format:json)", "name": "list", "signature": "def list(self, request, *args, **kwargs)"}, {"docstring": "create book info :param request: book info :param args: :param kwargs: :return: The registered book info if successful. Otherwise an error message", "name": "create", "signature": "def create(self, request, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040319", "prompt": "Implement the Python class `BookManagementView` described below.\n\nClass description:\nImplement the BookManagementView class.\n\nMethod signatures and docstrings:\n- def list(self, request, *args, **kwargs): Find book info from OpenBD API by ISBN Code. :param request: ISBN Code :param args: :param kwargs: :return: book info(format:json)\n- def create(self, request, *args, **kwargs): create book info :param request: book info :param args: :param kwargs: :return: The registered book info if successful. Otherwise an error message", "prompted_full_text": "Implement the Python class `BookManagementView` described below.\n\nClass description:\nImplement the BookManagementView class.\n\nMethod signatures and docstrings:\n- def list(self, request, *args, **kwargs): Find book info from OpenBD API by ISBN Code. :param request: ISBN Code :param args: :param kwargs: :return: book info(format:json)\n- def create(self, request, *args, **kwargs): create book info :param request: book info :param args: :param kwargs: :return: The registered book info if successful. Otherwise an error message\n\n<|skeleton|>\nclass BookManagementView:\n\n def list(self, request, *args, **kwargs):\n \"\"\"Find book info from OpenBD API by ISBN Code. :param request: ISBN Code :param args: :param kwargs: :return: book info(format:json)\"\"\"\n <|body_0|>\n\n def create(self, request, *args, **kwargs):\n \"\"\"create book info :param request: book info :param args: :param kwargs: :return: The registered book info if successful. Otherwise an error message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n isbn = request.data['isbn'].replace('-', '')\n if Book.objects.filter(isbn=isbn).count() == 0:\n data = OpenBD().get_json(isbn)\n logger.debug('Success BookManagementView.list')\n logger.debug(data)\n return Response(data=data, status=HTTP_200_OK)\n else:\n book = Book.objects.filter(isbn=request.data['isbn']).get()\n serializer = BookManagementSerializer(data=book)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n logger.debug('Success BookManagementView.list')\n logger.debug(serializer.data)\n return Response(data=serializer.data, status=HTTP_200_OK)\n except Exception as e:\n logger.error('Exception BookManagementView.list')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n serializer = BookManagementSerializer(data=request.data)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n publisher_serializer = PublisherSerializer(data={''})\n publisher_serializer.save()\n author_serializer = AuthorSerializer(data=serializer.validated_data)\n author_serializer.save()\n book_manage_serializer = BookManagementSerializer(data=serializer.validated_data)\n book_manage_serializer.save()\n if serializer.validated_data['series'] != '':\n series = Series.objects.filter(name=serializer.validated_data['series']).get()\n if len(series) == 0:\n series_serializer = SeriesSerializer(data=serializer.validated_data)\n series_serializer.save()\n series_book_serializer = SeriesBookSerializer(data=serializer.validated_data)\n series_book_serializer.save()\n serializer = BookManagementSerializer(data=serializer.validated_data)\n return Response(data=serializer.data, status=HTTP_201_CREATED)\n except RegisterError as e:\n logger.error('Exception BookManageViewSet.create')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "revision_id": "20dee6f3d5f928b9184dc7856292d8025a09fd47", "skeleton": "<|skeleton|>\nclass BookManagementView:\n\n def list(self, request, *args, **kwargs):\n \"\"\"Find book info from OpenBD API by ISBN Code. :param request: ISBN Code :param args: :param kwargs: :return: book info(format:json)\"\"\"\n <|body_0|>\n\n def create(self, request, *args, **kwargs):\n \"\"\"create book info :param request: book info :param args: :param kwargs: :return: The registered book info if successful. Otherwise an error message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BookManagementView:\n def list(self, request, *args, **kwargs):\n \"\"\"Find book info from OpenBD API by ISBN Code. :param request: ISBN Code :param args: :param kwargs: :return: book info(format:json)\"\"\"\n try:\n isbn = request.data['isbn'].replace('-', '')\n if Book.objects.filter(isbn=isbn).count() == 0:\n data = OpenBD().get_json(isbn)\n logger.debug('Success BookManagementView.list')\n logger.debug(data)\n return Response(data=data, status=HTTP_200_OK)\n else:\n book = Book.objects.filter(isbn=request.data['isbn']).get()\n serializer = BookManagementSerializer(data=book)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n logger.debug('Success BookManagementView.list')\n logger.debug(serializer.data)\n return Response(data=serializer.data, status=HTTP_200_OK)\n except Exception as e:\n logger.error('Exception BookManagementView.list')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)\n\n def create(self, request, *args, **kwargs):\n \"\"\"create book info :param request: book info :param args: :param kwargs: :return: The registered book info if successful. Otherwise an error message\"\"\"\n try:\n serializer = BookManagementSerializer(data=request.data)\n if not serializer.is_valid():\n raise ValueError(serializer.errors)\n publisher_serializer = PublisherSerializer(data={''})\n publisher_serializer.save()\n author_serializer = AuthorSerializer(data=serializer.validated_data)\n author_serializer.save()\n book_manage_serializer = BookManagementSerializer(data=serializer.validated_data)\n book_manage_serializer.save()\n if serializer.validated_data['series'] != '':\n series = Series.objects.filter(name=serializer.validated_data['series']).get()\n if len(series) == 0:\n series_serializer = SeriesSerializer(data=serializer.validated_data)\n series_serializer.save()\n series_book_serializer = SeriesBookSerializer(data=serializer.validated_data)\n series_book_serializer.save()\n serializer = BookManagementSerializer(data=serializer.validated_data)\n return Response(data=serializer.data, status=HTTP_201_CREATED)\n except RegisterError as e:\n logger.error('Exception BookManageViewSet.create')\n logger.error(e)\n return Response(data={'error': e.args[0]}, status=HTTP_400_BAD_REQUEST)\n", "source": "the_stack_v2_python_sparse", "source_path": "api/book/views.py", "source_repo": "kanade0404/nanary-api", "split": "val", "star_events_count": 0}
{"blob_id": "a8a687d14207be3b5f06580122c15ac89aa273dc", "bodies": ["res = []\nif not root:\n return res\nres.append(root.val)\nfor node in root.children:\n res.append(self.serialize(node))\nreturn res", "if not data:\n return None\nv = data[0]\nchildren = []\nfor c in data[1:]:\n children.append(self.deserialize(c))\nreturn Node(v, children)"], "bodies_text": "<|body_start_0|>\n res = []\n if not root:\n return res\n res.append(root.val)\n for node in root.children:\n res.append(self.serialize(node))\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n v = data[0]\n children = []\n for c in data[1:]:\n children.append(self.deserialize(c))\n return Node(v, children)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: Node):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n if not root:\n return res\n res.append(root.val)\n for node in root.children:\n res.append(self.serialize(node))\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n v = data[0]\n children = []\n for c in data[1:]:\n children.append(self.deserialize(c))\n return Node(v, children)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000189", "length_bytes": 2699, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root: Node)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_021747", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: Node): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: Node): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: Node):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n if not root:\n return res\n res.append(root.val)\n for node in root.children:\n res.append(self.serialize(node))\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n v = data[0]\n children = []\n for c in data[1:]:\n children.append(self.deserialize(c))\n return Node(v, children)\n<|end_body_1|>\n", "revision_id": "f96a2273c6831a8035e1adacfa452f73c599ae16", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: Node):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root: Node):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n res = []\n if not root:\n return res\n res.append(root.val)\n for node in root.children:\n res.append(self.serialize(node))\n return res\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n if not data:\n return None\n v = data[0]\n children = []\n for c in data[1:]:\n children.append(self.deserialize(c))\n return Node(v, children)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/428_SerializeandDeserializeNaryTree.py", "source_repo": "here0009/LeetCode", "split": "val", "star_events_count": 1}
{"blob_id": "499d62fae37169d4f615d35183e14be80e9bdb85", "bodies": ["if not settings.BSYNCR_SERVER_HOST:\n message = 'SEED instance is not configured to run bsyncr analysis. Please contact the server administrator.'\n self.fail(message, logger)\n raise AnalysisPipelineException(message)\nvalidation_errors = _validate_bsyncr_config(Analysis.objects.get(id=self._analysis_id))\nif validation_errors:\n raise AnalysisPipelineException(f\"Unexpected error(s) while validating analysis configuration: {'; '.join(validation_errors)}\")\nprogress_data = self.get_progress_data()\nprogress_data.total = 3\nprogress_data.save()\nchain(task_create_analysis_property_views.si(self._analysis_id, property_view_ids), _prepare_all_properties.s(self._analysis_id), _finish_preparation.si(self._analysis_id, start_analysis)).apply_async()", "progress_data = self.get_progress_data()\nprogress_data.total = 3\nprogress_data.save()\nchain(_start_analysis.si(self._analysis_id), _process_results.s(self._analysis_id), _finish_analysis.si(self._analysis_id)).apply_async()"], "bodies_text": "<|body_start_0|>\n if not settings.BSYNCR_SERVER_HOST:\n message = 'SEED instance is not configured to run bsyncr analysis. Please contact the server administrator.'\n self.fail(message, logger)\n raise AnalysisPipelineException(message)\n validation_errors = _validate_bsyncr_config(Analysis.objects.get(id=self._analysis_id))\n if validation_errors:\n raise AnalysisPipelineException(f\"Unexpected error(s) while validating analysis configuration: {'; '.join(validation_errors)}\")\n progress_data = self.get_progress_data()\n progress_data.total = 3\n progress_data.save()\n chain(task_create_analysis_property_views.si(self._analysis_id, property_view_ids), _prepare_all_properties.s(self._analysis_id), _finish_preparation.si(self._analysis_id, start_analysis)).apply_async()\n<|end_body_0|>\n\n<|body_start_1|>\n progress_data = self.get_progress_data()\n progress_data.total = 3\n progress_data.save()\n chain(_start_analysis.si(self._analysis_id), _process_results.s(self._analysis_id), _finish_analysis.si(self._analysis_id)).apply_async()\n<|end_body_1|>\n", "class_docstring": "BsyncrPipeline is a class for preparing, running, and post processing the bsyncr analysis by implementing the AnalysisPipeline's abstract methods.", "class_name": "BsyncrPipeline", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BsyncrPipeline:\n \"\"\"BsyncrPipeline is a class for preparing, running, and post processing the bsyncr analysis by implementing the AnalysisPipeline's abstract methods.\"\"\"\n\n def _prepare_analysis(self, property_view_ids, start_analysis=False):\n \"\"\"Internal implementation for preparing bsyncr analysis\"\"\"\n <|body_0|>\n\n def _start_analysis(self):\n \"\"\"Internal implementation for starting the bsyncr analysis\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not settings.BSYNCR_SERVER_HOST:\n message = 'SEED instance is not configured to run bsyncr analysis. Please contact the server administrator.'\n self.fail(message, logger)\n raise AnalysisPipelineException(message)\n validation_errors = _validate_bsyncr_config(Analysis.objects.get(id=self._analysis_id))\n if validation_errors:\n raise AnalysisPipelineException(f\"Unexpected error(s) while validating analysis configuration: {'; '.join(validation_errors)}\")\n progress_data = self.get_progress_data()\n progress_data.total = 3\n progress_data.save()\n chain(task_create_analysis_property_views.si(self._analysis_id, property_view_ids), _prepare_all_properties.s(self._analysis_id), _finish_preparation.si(self._analysis_id, start_analysis)).apply_async()\n<|end_body_0|>\n\n<|body_start_1|>\n progress_data = self.get_progress_data()\n progress_data.total = 3\n progress_data.save()\n chain(_start_analysis.si(self._analysis_id), _process_results.s(self._analysis_id), _finish_analysis.si(self._analysis_id)).apply_async()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000190", "length_bytes": 19975, "license_type": "permissive", "methods": [{"docstring": "Internal implementation for preparing bsyncr analysis", "name": "_prepare_analysis", "signature": "def _prepare_analysis(self, property_view_ids, start_analysis=False)"}, {"docstring": "Internal implementation for starting the bsyncr analysis", "name": "_start_analysis", "signature": "def _start_analysis(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007816", "prompt": "Implement the Python class `BsyncrPipeline` described below.\n\nClass description:\nBsyncrPipeline is a class for preparing, running, and post processing the bsyncr analysis by implementing the AnalysisPipeline's abstract methods.\n\nMethod signatures and docstrings:\n- def _prepare_analysis(self, property_view_ids, start_analysis=False): Internal implementation for preparing bsyncr analysis\n- def _start_analysis(self): Internal implementation for starting the bsyncr analysis", "prompted_full_text": "Implement the Python class `BsyncrPipeline` described below.\n\nClass description:\nBsyncrPipeline is a class for preparing, running, and post processing the bsyncr analysis by implementing the AnalysisPipeline's abstract methods.\n\nMethod signatures and docstrings:\n- def _prepare_analysis(self, property_view_ids, start_analysis=False): Internal implementation for preparing bsyncr analysis\n- def _start_analysis(self): Internal implementation for starting the bsyncr analysis\n\n<|skeleton|>\nclass BsyncrPipeline:\n \"\"\"BsyncrPipeline is a class for preparing, running, and post processing the bsyncr analysis by implementing the AnalysisPipeline's abstract methods.\"\"\"\n\n def _prepare_analysis(self, property_view_ids, start_analysis=False):\n \"\"\"Internal implementation for preparing bsyncr analysis\"\"\"\n <|body_0|>\n\n def _start_analysis(self):\n \"\"\"Internal implementation for starting the bsyncr analysis\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not settings.BSYNCR_SERVER_HOST:\n message = 'SEED instance is not configured to run bsyncr analysis. Please contact the server administrator.'\n self.fail(message, logger)\n raise AnalysisPipelineException(message)\n validation_errors = _validate_bsyncr_config(Analysis.objects.get(id=self._analysis_id))\n if validation_errors:\n raise AnalysisPipelineException(f\"Unexpected error(s) while validating analysis configuration: {'; '.join(validation_errors)}\")\n progress_data = self.get_progress_data()\n progress_data.total = 3\n progress_data.save()\n chain(task_create_analysis_property_views.si(self._analysis_id, property_view_ids), _prepare_all_properties.s(self._analysis_id), _finish_preparation.si(self._analysis_id, start_analysis)).apply_async()\n<|end_body_0|>\n\n<|body_start_1|>\n progress_data = self.get_progress_data()\n progress_data.total = 3\n progress_data.save()\n chain(_start_analysis.si(self._analysis_id), _process_results.s(self._analysis_id), _finish_analysis.si(self._analysis_id)).apply_async()\n<|end_body_1|>\n", "revision_id": "680b6a2b45f3c568d779d8ac86553a0b08c384c8", "skeleton": "<|skeleton|>\nclass BsyncrPipeline:\n \"\"\"BsyncrPipeline is a class for preparing, running, and post processing the bsyncr analysis by implementing the AnalysisPipeline's abstract methods.\"\"\"\n\n def _prepare_analysis(self, property_view_ids, start_analysis=False):\n \"\"\"Internal implementation for preparing bsyncr analysis\"\"\"\n <|body_0|>\n\n def _start_analysis(self):\n \"\"\"Internal implementation for starting the bsyncr analysis\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BsyncrPipeline:\n \"\"\"BsyncrPipeline is a class for preparing, running, and post processing the bsyncr analysis by implementing the AnalysisPipeline's abstract methods.\"\"\"\n\n def _prepare_analysis(self, property_view_ids, start_analysis=False):\n \"\"\"Internal implementation for preparing bsyncr analysis\"\"\"\n if not settings.BSYNCR_SERVER_HOST:\n message = 'SEED instance is not configured to run bsyncr analysis. Please contact the server administrator.'\n self.fail(message, logger)\n raise AnalysisPipelineException(message)\n validation_errors = _validate_bsyncr_config(Analysis.objects.get(id=self._analysis_id))\n if validation_errors:\n raise AnalysisPipelineException(f\"Unexpected error(s) while validating analysis configuration: {'; '.join(validation_errors)}\")\n progress_data = self.get_progress_data()\n progress_data.total = 3\n progress_data.save()\n chain(task_create_analysis_property_views.si(self._analysis_id, property_view_ids), _prepare_all_properties.s(self._analysis_id), _finish_preparation.si(self._analysis_id, start_analysis)).apply_async()\n\n def _start_analysis(self):\n \"\"\"Internal implementation for starting the bsyncr analysis\"\"\"\n progress_data = self.get_progress_data()\n progress_data.total = 3\n progress_data.save()\n chain(_start_analysis.si(self._analysis_id), _process_results.s(self._analysis_id), _finish_analysis.si(self._analysis_id)).apply_async()\n", "source": "the_stack_v2_python_sparse", "source_path": "seed/analysis_pipelines/bsyncr.py", "source_repo": "SEED-platform/seed", "split": "val", "star_events_count": 108}
{"blob_id": "8dc399961b337d8324ae2ef537fed33ca75f82cd", "bodies": ["super(WikiParser, self).__init__(base_url)\nself.inclusions = [doc_id] if doc_id else []\nself.registerInternalLinkHook('Include', self._hook_include)\nself.registerInternalLinkHook('I', self._hook_include)\nself.registerInternalLinkHook('Template', self._hook_template)\nself.registerInternalLinkHook('T', self._hook_template)", "text, data = ForParser.strip_fors(text)\ntext = parse_simple_syntax(text)\nhtml = super(WikiParser, self).parse(text, youtube_embeds=False, ui_component_embeds=False, **kwargs)\nhtml = ForParser.unstrip_fors(html, data)\nfor_parser = ForParser(html)\nfor_parser.expand_fors()\nhtml = for_parser.serialize(**kwargs)\nhtml = self.add_youtube_embeds(html)\nhtml = self.add_ui_component_embeds(html)\nreturn html", "message = _('The document \"%s\" does not exist.') % title\ninclude = get_object_fallback(Document, title, locale=self.locale)\nif not include or not include.current_revision:\n return message\nif include.id in parser.inclusions:\n return RECURSION_MESSAGE % title\nelse:\n parser.inclusions.append(include.id)\nret = parser.parse(include.current_revision.content, show_toc=False, locale=self.locale)\nparser.inclusions.pop()\nreturn ret", "params = title.split('|')\nshort_title = params.pop(0)\ntemplate_title = 'Template:' + short_title\nmessage = _('The template \"%s\" does not exist or has no approved revision.') % short_title\ntemplate = get_object_fallback(Document, template_title, locale=self.locale, is_template=True)\nif not template or not template.current_revision:\n return message\nif template.id in parser.inclusions:\n return RECURSION_MESSAGE % template_title\nelse:\n parser.inclusions.append(template.id)\nc = template.current_revision.content.rstrip()\nparsed = parser.parse(c, show_toc=False, attributes=ALLOWED_ATTRIBUTES, locale=self.locale)\nparser.inclusions.pop()\nif '\\n' not in c:\n parsed = parsed.replace('
', '')\n parsed = parsed.replace('
', '')\nreturn _format_template_content(parsed, _build_template_params(params))"], "bodies_text": "<|body_start_0|>\n super(WikiParser, self).__init__(base_url)\n self.inclusions = [doc_id] if doc_id else []\n self.registerInternalLinkHook('Include', self._hook_include)\n self.registerInternalLinkHook('I', self._hook_include)\n self.registerInternalLinkHook('Template', self._hook_template)\n self.registerInternalLinkHook('T', self._hook_template)\n<|end_body_0|>\n\n<|body_start_1|>\n text, data = ForParser.strip_fors(text)\n text = parse_simple_syntax(text)\n html = super(WikiParser, self).parse(text, youtube_embeds=False, ui_component_embeds=False, **kwargs)\n html = ForParser.unstrip_fors(html, data)\n for_parser = ForParser(html)\n for_parser.expand_fors()\n html = for_parser.serialize(**kwargs)\n html = self.add_youtube_embeds(html)\n html = self.add_ui_component_embeds(html)\n return html\n<|end_body_1|>\n\n<|body_start_2|>\n message = _('The document \"%s\" does not exist.') % title\n include = get_object_fallback(Document, title, locale=self.locale)\n if not include or not include.current_revision:\n return message\n if include.id in parser.inclusions:\n return RECURSION_MESSAGE % title\n else:\n parser.inclusions.append(include.id)\n ret = parser.parse(include.current_revision.content, show_toc=False, locale=self.locale)\n parser.inclusions.pop()\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n params = title.split('|')\n short_title = params.pop(0)\n template_title = 'Template:' + short_title\n message = _('The template \"%s\" does not exist or has no approved revision.') % short_title\n template = get_object_fallback(Document, template_title, locale=self.locale, is_template=True)\n if not template or not template.current_revision:\n return message\n if template.id in parser.inclusions:\n return RECURSION_MESSAGE % template_title\n else:\n parser.inclusions.append(template.id)\n c = template.current_revision.content.rstrip()\n parsed = parser.parse(c, show_toc=False, attributes=ALLOWED_ATTRIBUTES, locale=self.locale)\n parser.inclusions.pop()\n if '\\n' not in c:\n parsed = parsed.replace('
', '')\n parsed = parsed.replace('
', '')\n return _format_template_content(parsed, _build_template_params(params))\n<|end_body_3|>\n", "class_docstring": "An extension of the parser from the forums adding more crazy features {for} tags, inclusions, and templates--oh my!", "class_name": "WikiParser", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WikiParser:\n \"\"\"An extension of the parser from the forums adding more crazy features {for} tags, inclusions, and templates--oh my!\"\"\"\n\n def __init__(self, base_url=None, doc_id=None):\n \"\"\"doc_id -- If you want to be nice, pass the ID of the Document you are rendering. This will make recursive inclusions fail immediately rather than after the first round of recursion.\"\"\"\n <|body_0|>\n\n def parse(self, text, **kwargs):\n \"\"\"Wrap SUMO's parse() to support additional wiki-only features.\"\"\"\n <|body_1|>\n\n def _hook_include(self, parser, space, title):\n \"\"\"Returns the document's parsed content.\"\"\"\n <|body_2|>\n\n def _hook_template(self, parser, space, title):\n \"\"\"Handles Template:Template name, formatting the content using given args\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(WikiParser, self).__init__(base_url)\n self.inclusions = [doc_id] if doc_id else []\n self.registerInternalLinkHook('Include', self._hook_include)\n self.registerInternalLinkHook('I', self._hook_include)\n self.registerInternalLinkHook('Template', self._hook_template)\n self.registerInternalLinkHook('T', self._hook_template)\n<|end_body_0|>\n\n<|body_start_1|>\n text, data = ForParser.strip_fors(text)\n text = parse_simple_syntax(text)\n html = super(WikiParser, self).parse(text, youtube_embeds=False, ui_component_embeds=False, **kwargs)\n html = ForParser.unstrip_fors(html, data)\n for_parser = ForParser(html)\n for_parser.expand_fors()\n html = for_parser.serialize(**kwargs)\n html = self.add_youtube_embeds(html)\n html = self.add_ui_component_embeds(html)\n return html\n<|end_body_1|>\n\n<|body_start_2|>\n message = _('The document \"%s\" does not exist.') % title\n include = get_object_fallback(Document, title, locale=self.locale)\n if not include or not include.current_revision:\n return message\n if include.id in parser.inclusions:\n return RECURSION_MESSAGE % title\n else:\n parser.inclusions.append(include.id)\n ret = parser.parse(include.current_revision.content, show_toc=False, locale=self.locale)\n parser.inclusions.pop()\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n params = title.split('|')\n short_title = params.pop(0)\n template_title = 'Template:' + short_title\n message = _('The template \"%s\" does not exist or has no approved revision.') % short_title\n template = get_object_fallback(Document, template_title, locale=self.locale, is_template=True)\n if not template or not template.current_revision:\n return message\n if template.id in parser.inclusions:\n return RECURSION_MESSAGE % template_title\n else:\n parser.inclusions.append(template.id)\n c = template.current_revision.content.rstrip()\n parsed = parser.parse(c, show_toc=False, attributes=ALLOWED_ATTRIBUTES, locale=self.locale)\n parser.inclusions.pop()\n if '\\n' not in c:\n parsed = parsed.replace('
', '')\n parsed = parsed.replace('
', '')\n return _format_template_content(parsed, _build_template_params(params))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000191", "length_bytes": 19586, "license_type": "permissive", "methods": [{"docstring": "doc_id -- If you want to be nice, pass the ID of the Document you are rendering. This will make recursive inclusions fail immediately rather than after the first round of recursion.", "name": "__init__", "signature": "def __init__(self, base_url=None, doc_id=None)"}, {"docstring": "Wrap SUMO's parse() to support additional wiki-only features.", "name": "parse", "signature": "def parse(self, text, **kwargs)"}, {"docstring": "Returns the document's parsed content.", "name": "_hook_include", "signature": "def _hook_include(self, parser, space, title)"}, {"docstring": "Handles Template:Template name, formatting the content using given args", "name": "_hook_template", "signature": "def _hook_template(self, parser, space, title)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_009359", "prompt": "Implement the Python class `WikiParser` described below.\n\nClass description:\nAn extension of the parser from the forums adding more crazy features {for} tags, inclusions, and templates--oh my!\n\nMethod signatures and docstrings:\n- def __init__(self, base_url=None, doc_id=None): doc_id -- If you want to be nice, pass the ID of the Document you are rendering. This will make recursive inclusions fail immediately rather than after the first round of recursion.\n- def parse(self, text, **kwargs): Wrap SUMO's parse() to support additional wiki-only features.\n- def _hook_include(self, parser, space, title): Returns the document's parsed content.\n- def _hook_template(self, parser, space, title): Handles Template:Template name, formatting the content using given args", "prompted_full_text": "Implement the Python class `WikiParser` described below.\n\nClass description:\nAn extension of the parser from the forums adding more crazy features {for} tags, inclusions, and templates--oh my!\n\nMethod signatures and docstrings:\n- def __init__(self, base_url=None, doc_id=None): doc_id -- If you want to be nice, pass the ID of the Document you are rendering. This will make recursive inclusions fail immediately rather than after the first round of recursion.\n- def parse(self, text, **kwargs): Wrap SUMO's parse() to support additional wiki-only features.\n- def _hook_include(self, parser, space, title): Returns the document's parsed content.\n- def _hook_template(self, parser, space, title): Handles Template:Template name, formatting the content using given args\n\n<|skeleton|>\nclass WikiParser:\n \"\"\"An extension of the parser from the forums adding more crazy features {for} tags, inclusions, and templates--oh my!\"\"\"\n\n def __init__(self, base_url=None, doc_id=None):\n \"\"\"doc_id -- If you want to be nice, pass the ID of the Document you are rendering. This will make recursive inclusions fail immediately rather than after the first round of recursion.\"\"\"\n <|body_0|>\n\n def parse(self, text, **kwargs):\n \"\"\"Wrap SUMO's parse() to support additional wiki-only features.\"\"\"\n <|body_1|>\n\n def _hook_include(self, parser, space, title):\n \"\"\"Returns the document's parsed content.\"\"\"\n <|body_2|>\n\n def _hook_template(self, parser, space, title):\n \"\"\"Handles Template:Template name, formatting the content using given args\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(WikiParser, self).__init__(base_url)\n self.inclusions = [doc_id] if doc_id else []\n self.registerInternalLinkHook('Include', self._hook_include)\n self.registerInternalLinkHook('I', self._hook_include)\n self.registerInternalLinkHook('Template', self._hook_template)\n self.registerInternalLinkHook('T', self._hook_template)\n<|end_body_0|>\n\n<|body_start_1|>\n text, data = ForParser.strip_fors(text)\n text = parse_simple_syntax(text)\n html = super(WikiParser, self).parse(text, youtube_embeds=False, ui_component_embeds=False, **kwargs)\n html = ForParser.unstrip_fors(html, data)\n for_parser = ForParser(html)\n for_parser.expand_fors()\n html = for_parser.serialize(**kwargs)\n html = self.add_youtube_embeds(html)\n html = self.add_ui_component_embeds(html)\n return html\n<|end_body_1|>\n\n<|body_start_2|>\n message = _('The document \"%s\" does not exist.') % title\n include = get_object_fallback(Document, title, locale=self.locale)\n if not include or not include.current_revision:\n return message\n if include.id in parser.inclusions:\n return RECURSION_MESSAGE % title\n else:\n parser.inclusions.append(include.id)\n ret = parser.parse(include.current_revision.content, show_toc=False, locale=self.locale)\n parser.inclusions.pop()\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n params = title.split('|')\n short_title = params.pop(0)\n template_title = 'Template:' + short_title\n message = _('The template \"%s\" does not exist or has no approved revision.') % short_title\n template = get_object_fallback(Document, template_title, locale=self.locale, is_template=True)\n if not template or not template.current_revision:\n return message\n if template.id in parser.inclusions:\n return RECURSION_MESSAGE % template_title\n else:\n parser.inclusions.append(template.id)\n c = template.current_revision.content.rstrip()\n parsed = parser.parse(c, show_toc=False, attributes=ALLOWED_ATTRIBUTES, locale=self.locale)\n parser.inclusions.pop()\n if '\\n' not in c:\n parsed = parsed.replace('
', '')\n parsed = parsed.replace('
', '')\n return _format_template_content(parsed, _build_template_params(params))\n<|end_body_3|>\n", "revision_id": "67ec527bfc32c715bf9f29d5e01362c4903aebd2", "skeleton": "<|skeleton|>\nclass WikiParser:\n \"\"\"An extension of the parser from the forums adding more crazy features {for} tags, inclusions, and templates--oh my!\"\"\"\n\n def __init__(self, base_url=None, doc_id=None):\n \"\"\"doc_id -- If you want to be nice, pass the ID of the Document you are rendering. This will make recursive inclusions fail immediately rather than after the first round of recursion.\"\"\"\n <|body_0|>\n\n def parse(self, text, **kwargs):\n \"\"\"Wrap SUMO's parse() to support additional wiki-only features.\"\"\"\n <|body_1|>\n\n def _hook_include(self, parser, space, title):\n \"\"\"Returns the document's parsed content.\"\"\"\n <|body_2|>\n\n def _hook_template(self, parser, space, title):\n \"\"\"Handles Template:Template name, formatting the content using given args\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WikiParser:\n \"\"\"An extension of the parser from the forums adding more crazy features {for} tags, inclusions, and templates--oh my!\"\"\"\n\n def __init__(self, base_url=None, doc_id=None):\n \"\"\"doc_id -- If you want to be nice, pass the ID of the Document you are rendering. This will make recursive inclusions fail immediately rather than after the first round of recursion.\"\"\"\n super(WikiParser, self).__init__(base_url)\n self.inclusions = [doc_id] if doc_id else []\n self.registerInternalLinkHook('Include', self._hook_include)\n self.registerInternalLinkHook('I', self._hook_include)\n self.registerInternalLinkHook('Template', self._hook_template)\n self.registerInternalLinkHook('T', self._hook_template)\n\n def parse(self, text, **kwargs):\n \"\"\"Wrap SUMO's parse() to support additional wiki-only features.\"\"\"\n text, data = ForParser.strip_fors(text)\n text = parse_simple_syntax(text)\n html = super(WikiParser, self).parse(text, youtube_embeds=False, ui_component_embeds=False, **kwargs)\n html = ForParser.unstrip_fors(html, data)\n for_parser = ForParser(html)\n for_parser.expand_fors()\n html = for_parser.serialize(**kwargs)\n html = self.add_youtube_embeds(html)\n html = self.add_ui_component_embeds(html)\n return html\n\n def _hook_include(self, parser, space, title):\n \"\"\"Returns the document's parsed content.\"\"\"\n message = _('The document \"%s\" does not exist.') % title\n include = get_object_fallback(Document, title, locale=self.locale)\n if not include or not include.current_revision:\n return message\n if include.id in parser.inclusions:\n return RECURSION_MESSAGE % title\n else:\n parser.inclusions.append(include.id)\n ret = parser.parse(include.current_revision.content, show_toc=False, locale=self.locale)\n parser.inclusions.pop()\n return ret\n\n def _hook_template(self, parser, space, title):\n \"\"\"Handles Template:Template name, formatting the content using given args\"\"\"\n params = title.split('|')\n short_title = params.pop(0)\n template_title = 'Template:' + short_title\n message = _('The template \"%s\" does not exist or has no approved revision.') % short_title\n template = get_object_fallback(Document, template_title, locale=self.locale, is_template=True)\n if not template or not template.current_revision:\n return message\n if template.id in parser.inclusions:\n return RECURSION_MESSAGE % template_title\n else:\n parser.inclusions.append(template.id)\n c = template.current_revision.content.rstrip()\n parsed = parser.parse(c, show_toc=False, attributes=ALLOWED_ATTRIBUTES, locale=self.locale)\n parser.inclusions.pop()\n if '\\n' not in c:\n parsed = parsed.replace('
', '')\n parsed = parsed.replace('
', '')\n return _format_template_content(parsed, _build_template_params(params))\n", "source": "the_stack_v2_python_sparse", "source_path": "kitsune/wiki/parser.py", "source_repo": "mozilla/kitsune", "split": "val", "star_events_count": 1218}
{"blob_id": "52199d5344bb74983cb53ee0493b9ae79490b3d4", "bodies": ["username = request.user.get_username()\nserializer = ViewSerializer(username=username, repo_base=repo_base, request=request)\nviews = serializer.list_views(repo_name)\nreturn Response(views, status=status.HTTP_200_OK)", "username = request.user.get_username()\nserializer = ViewSerializer(username=username, repo_base=repo_base)\nview_name = request.data['view_name']\nquery = request.data['query']\nserializer.create_view(repo_name, view_name, query)\nview = serializer.describe_view(repo_name, view_name)\nreturn Response(view, status=status.HTTP_201_CREATED)"], "bodies_text": "<|body_start_0|>\n username = request.user.get_username()\n serializer = ViewSerializer(username=username, repo_base=repo_base, request=request)\n views = serializer.list_views(repo_name)\n return Response(views, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.user.get_username()\n serializer = ViewSerializer(username=username, repo_base=repo_base)\n view_name = request.data['view_name']\n query = request.data['query']\n serializer.create_view(repo_name, view_name, query)\n view = serializer.describe_view(repo_name, view_name)\n return Response(view, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Views", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Views:\n\n def get(self, request, repo_base, repo_name, format=None):\n \"\"\"Views in a repo\"\"\"\n <|body_0|>\n\n def post(self, request, repo_base, repo_name, format=None):\n \"\"\"Create a view in a repo --- omit_serializer: true parameters: - name: view_name in: body type: string description: name of the the view to be created required: true - name: query in: body type: string description: select query to create the view from required: true\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n username = request.user.get_username()\n serializer = ViewSerializer(username=username, repo_base=repo_base, request=request)\n views = serializer.list_views(repo_name)\n return Response(views, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.user.get_username()\n serializer = ViewSerializer(username=username, repo_base=repo_base)\n view_name = request.data['view_name']\n query = request.data['query']\n serializer.create_view(repo_name, view_name, query)\n view = serializer.describe_view(repo_name, view_name)\n return Response(view, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000192", "length_bytes": 31465, "license_type": "permissive", "methods": [{"docstring": "Views in a repo", "name": "get", "signature": "def get(self, request, repo_base, repo_name, format=None)"}, {"docstring": "Create a view in a repo --- omit_serializer: true parameters: - name: view_name in: body type: string description: name of the the view to be created required: true - name: query in: body type: string description: select query to create the view from required: true", "name": "post", "signature": "def post(self, request, repo_base, repo_name, format=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_018643", "prompt": "Implement the Python class `Views` described below.\n\nClass description:\nImplement the Views class.\n\nMethod signatures and docstrings:\n- def get(self, request, repo_base, repo_name, format=None): Views in a repo\n- def post(self, request, repo_base, repo_name, format=None): Create a view in a repo --- omit_serializer: true parameters: - name: view_name in: body type: string description: name of the the view to be created required: true - name: query in: body type: string description: select query to create the view from required: true", "prompted_full_text": "Implement the Python class `Views` described below.\n\nClass description:\nImplement the Views class.\n\nMethod signatures and docstrings:\n- def get(self, request, repo_base, repo_name, format=None): Views in a repo\n- def post(self, request, repo_base, repo_name, format=None): Create a view in a repo --- omit_serializer: true parameters: - name: view_name in: body type: string description: name of the the view to be created required: true - name: query in: body type: string description: select query to create the view from required: true\n\n<|skeleton|>\nclass Views:\n\n def get(self, request, repo_base, repo_name, format=None):\n \"\"\"Views in a repo\"\"\"\n <|body_0|>\n\n def post(self, request, repo_base, repo_name, format=None):\n \"\"\"Create a view in a repo --- omit_serializer: true parameters: - name: view_name in: body type: string description: name of the the view to be created required: true - name: query in: body type: string description: select query to create the view from required: true\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n username = request.user.get_username()\n serializer = ViewSerializer(username=username, repo_base=repo_base, request=request)\n views = serializer.list_views(repo_name)\n return Response(views, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.user.get_username()\n serializer = ViewSerializer(username=username, repo_base=repo_base)\n view_name = request.data['view_name']\n query = request.data['query']\n serializer.create_view(repo_name, view_name, query)\n view = serializer.describe_view(repo_name, view_name)\n return Response(view, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "revision_id": "f066b472c2b66cc3b868bbe433aed2d4557aea32", "skeleton": "<|skeleton|>\nclass Views:\n\n def get(self, request, repo_base, repo_name, format=None):\n \"\"\"Views in a repo\"\"\"\n <|body_0|>\n\n def post(self, request, repo_base, repo_name, format=None):\n \"\"\"Create a view in a repo --- omit_serializer: true parameters: - name: view_name in: body type: string description: name of the the view to be created required: true - name: query in: body type: string description: select query to create the view from required: true\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Views:\n def get(self, request, repo_base, repo_name, format=None):\n \"\"\"Views in a repo\"\"\"\n username = request.user.get_username()\n serializer = ViewSerializer(username=username, repo_base=repo_base, request=request)\n views = serializer.list_views(repo_name)\n return Response(views, status=status.HTTP_200_OK)\n\n def post(self, request, repo_base, repo_name, format=None):\n \"\"\"Create a view in a repo --- omit_serializer: true parameters: - name: view_name in: body type: string description: name of the the view to be created required: true - name: query in: body type: string description: select query to create the view from required: true\"\"\"\n username = request.user.get_username()\n serializer = ViewSerializer(username=username, repo_base=repo_base)\n view_name = request.data['view_name']\n query = request.data['query']\n serializer.create_view(repo_name, view_name, query)\n view = serializer.describe_view(repo_name, view_name)\n return Response(view, status=status.HTTP_201_CREATED)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/api/views.py", "source_repo": "datahuborg/datahub", "split": "val", "star_events_count": 199}
{"blob_id": "0c750e7f49c004aa1289ccb48a5e217f86841afa", "bodies": ["dict_nums = set()\nfor i in range(k):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\nfor i in range(k, len(nums)):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n dict_nums.discard(nums[i - k])\n print(i, k, dict_nums)\nreturn False", "memo = set()\nfor i, n in enumerate(nums):\n if i - k > 0:\n memo.remove(nums[i - k - 1])\n if n in memo:\n return True\n memo.add(n)\nreturn False"], "bodies_text": "<|body_start_0|>\n dict_nums = set()\n for i in range(k):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n for i in range(k, len(nums)):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n dict_nums.discard(nums[i - k])\n print(i, k, dict_nums)\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n memo = set()\n for i, n in enumerate(nums):\n if i - k > 0:\n memo.remove(nums[i - k - 1])\n if n in memo:\n return True\n memo.add(n)\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def containsNearbyDuplicate(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_0|>\n\n def containsNearbyDuplicate(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dict_nums = set()\n for i in range(k):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n for i in range(k, len(nums)):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n dict_nums.discard(nums[i - k])\n print(i, k, dict_nums)\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n memo = set()\n for i, n in enumerate(nums):\n if i - k > 0:\n memo.remove(nums[i - k - 1])\n if n in memo:\n return True\n memo.add(n)\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000193", "length_bytes": 1476, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type k: int :rtype: bool", "name": "containsNearbyDuplicate", "signature": "def containsNearbyDuplicate(self, nums, k)"}, {"docstring": ":type nums: List[int] :type k: int :rtype: bool", "name": "containsNearbyDuplicate", "signature": "def containsNearbyDuplicate(self, nums, k)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034816", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def containsNearbyDuplicate(self, nums, k): :type nums: List[int] :type k: int :rtype: bool\n- def containsNearbyDuplicate(self, nums, k): :type nums: List[int] :type k: int :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def containsNearbyDuplicate(self, nums, k): :type nums: List[int] :type k: int :rtype: bool\n- def containsNearbyDuplicate(self, nums, k): :type nums: List[int] :type k: int :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def containsNearbyDuplicate(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_0|>\n\n def containsNearbyDuplicate(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dict_nums = set()\n for i in range(k):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n for i in range(k, len(nums)):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n dict_nums.discard(nums[i - k])\n print(i, k, dict_nums)\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n memo = set()\n for i, n in enumerate(nums):\n if i - k > 0:\n memo.remove(nums[i - k - 1])\n if n in memo:\n return True\n memo.add(n)\n return False\n<|end_body_1|>\n", "revision_id": "f3fc71f344cd758cfce77f16ab72992c99ab288e", "skeleton": "<|skeleton|>\nclass Solution:\n\n def containsNearbyDuplicate(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_0|>\n\n def containsNearbyDuplicate(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def containsNearbyDuplicate(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n dict_nums = set()\n for i in range(k):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n for i in range(k, len(nums)):\n if nums[i] in dict_nums:\n return True\n dict_nums.add(nums[i])\n print(i, k, dict_nums)\n dict_nums.discard(nums[i - k])\n print(i, k, dict_nums)\n return False\n\n def containsNearbyDuplicate(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n memo = set()\n for i, n in enumerate(nums):\n if i - k > 0:\n memo.remove(nums[i - k - 1])\n if n in memo:\n return True\n memo.add(n)\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "219_contain_duplicate.py", "source_repo": "jennyChing/leetCode", "split": "val", "star_events_count": 2}
{"blob_id": "3b0401d4e87768b1f6e20eb4f30eefb933cefd9e", "bodies": ["self.function = function\nself.var = var\nself.finite_support = np.isfinite(support)\nself.support = support / np.sqrt(self.var)", "if self.finite_support:\n return self.support * bw\nelse:\n\n def f(x):\n return self.evaluate(x, bw=bw) - atol\n try:\n xtol = 0.001\n ans = brentq(f, a=0, b=8 * bw, xtol=xtol, full_output=False)\n return ans + xtol\n except ValueError:\n msg = 'Unable to solve for support numerically. Use a ' + 'kernel with finite support or scale data to smaller bw.'\n raise ValueError(msg)", "if isinstance(x, numbers.Number):\n x = np.asarray_chkfinite([x])\nelse:\n x = np.asarray_chkfinite(x)\nif len(x.shape) == 1:\n x = x.reshape(-1, 1)\nreal_bw = bw / np.sqrt(self.var)\nobs, dims = x.shape\nvolume_func = functools.partial(volume_unit_ball, p=norm)\nif dims > 1:\n distances = p_norm(x, norm).ravel()\nelse:\n distances = np.abs(x).ravel()\nreturn self.function(distances / real_bw, dims) / (real_bw ** dims * volume_func(dims))"], "bodies_text": "<|body_start_0|>\n self.function = function\n self.var = var\n self.finite_support = np.isfinite(support)\n self.support = support / np.sqrt(self.var)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.finite_support:\n return self.support * bw\n else:\n\n def f(x):\n return self.evaluate(x, bw=bw) - atol\n try:\n xtol = 0.001\n ans = brentq(f, a=0, b=8 * bw, xtol=xtol, full_output=False)\n return ans + xtol\n except ValueError:\n msg = 'Unable to solve for support numerically. Use a ' + 'kernel with finite support or scale data to smaller bw.'\n raise ValueError(msg)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(x, numbers.Number):\n x = np.asarray_chkfinite([x])\n else:\n x = np.asarray_chkfinite(x)\n if len(x.shape) == 1:\n x = x.reshape(-1, 1)\n real_bw = bw / np.sqrt(self.var)\n obs, dims = x.shape\n volume_func = functools.partial(volume_unit_ball, p=norm)\n if dims > 1:\n distances = p_norm(x, norm).ravel()\n else:\n distances = np.abs(x).ravel()\n return self.function(distances / real_bw, dims) / (real_bw ** dims * volume_func(dims))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Kernel", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Kernel:\n\n def __init__(self, function, var=1, support=3):\n \"\"\"Initialize a new kernel function. function: callable, numpy.arr -> numpy.arr, should integrate to 1 expected_value : peak, typically 0 support: support of the function. Example ------- >>> from scipy.special import gamma >>> # Normalized function of x >>> def exp(x, dims=1): ... normalization = gamma(dims) * dims ... return np.exp(-x) / normalization >>> kernel = Kernel(exp, var=4, support=np.inf) >>> # The function is scaled so that the standard deviation (bw) = 1 >>> kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] True >>> np.allclose(kernel(np.array([0, 1, 2])), kernel([0, 1, 2])) True >>> np.allclose(kernel(0), kernel([0])) True >>> np.allclose(kernel(0), kernel.evaluate(0)) True\"\"\"\n <|body_0|>\n\n def practical_support(self, bw, atol=0.0001):\n \"\"\"Return the support for practical purposes. Used to find a support value for computations for kernel functions without finite (bounded) support. Examples -------- >>> kernel = Kernel(gaussian, var=1, support=np.inf) >>> kernel.practical_support(bw=1) 3.8994... >>> kernel.practical_support(bw=2) 7.4331...\"\"\"\n <|body_1|>\n\n def evaluate(self, x, bw=1, norm=2):\n \"\"\"Evaluate the kernel. Parameters ---------- x : array-like Should have shape (obs, dims). bw : array-like Must have shape (obs, ), or float.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.function = function\n self.var = var\n self.finite_support = np.isfinite(support)\n self.support = support / np.sqrt(self.var)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.finite_support:\n return self.support * bw\n else:\n\n def f(x):\n return self.evaluate(x, bw=bw) - atol\n try:\n xtol = 0.001\n ans = brentq(f, a=0, b=8 * bw, xtol=xtol, full_output=False)\n return ans + xtol\n except ValueError:\n msg = 'Unable to solve for support numerically. Use a ' + 'kernel with finite support or scale data to smaller bw.'\n raise ValueError(msg)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(x, numbers.Number):\n x = np.asarray_chkfinite([x])\n else:\n x = np.asarray_chkfinite(x)\n if len(x.shape) == 1:\n x = x.reshape(-1, 1)\n real_bw = bw / np.sqrt(self.var)\n obs, dims = x.shape\n volume_func = functools.partial(volume_unit_ball, p=norm)\n if dims > 1:\n distances = p_norm(x, norm).ravel()\n else:\n distances = np.abs(x).ravel()\n return self.function(distances / real_bw, dims) / (real_bw ** dims * volume_func(dims))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000194", "length_bytes": 10297, "license_type": "permissive", "methods": [{"docstring": "Initialize a new kernel function. function: callable, numpy.arr -> numpy.arr, should integrate to 1 expected_value : peak, typically 0 support: support of the function. Example ------- >>> from scipy.special import gamma >>> # Normalized function of x >>> def exp(x, dims=1): ... normalization = gamma(dims) * dims ... return np.exp(-x) / normalization >>> kernel = Kernel(exp, var=4, support=np.inf) >>> # The function is scaled so that the standard deviation (bw) = 1 >>> kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] True >>> np.allclose(kernel(np.array([0, 1, 2])), kernel([0, 1, 2])) True >>> np.allclose(kernel(0), kernel([0])) True >>> np.allclose(kernel(0), kernel.evaluate(0)) True", "name": "__init__", "signature": "def __init__(self, function, var=1, support=3)"}, {"docstring": "Return the support for practical purposes. Used to find a support value for computations for kernel functions without finite (bounded) support. Examples -------- >>> kernel = Kernel(gaussian, var=1, support=np.inf) >>> kernel.practical_support(bw=1) 3.8994... >>> kernel.practical_support(bw=2) 7.4331...", "name": "practical_support", "signature": "def practical_support(self, bw, atol=0.0001)"}, {"docstring": "Evaluate the kernel. Parameters ---------- x : array-like Should have shape (obs, dims). bw : array-like Must have shape (obs, ), or float.", "name": "evaluate", "signature": "def evaluate(self, x, bw=1, norm=2)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_019528", "prompt": "Implement the Python class `Kernel` described below.\n\nClass description:\nImplement the Kernel class.\n\nMethod signatures and docstrings:\n- def __init__(self, function, var=1, support=3): Initialize a new kernel function. function: callable, numpy.arr -> numpy.arr, should integrate to 1 expected_value : peak, typically 0 support: support of the function. Example ------- >>> from scipy.special import gamma >>> # Normalized function of x >>> def exp(x, dims=1): ... normalization = gamma(dims) * dims ... return np.exp(-x) / normalization >>> kernel = Kernel(exp, var=4, support=np.inf) >>> # The function is scaled so that the standard deviation (bw) = 1 >>> kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] True >>> np.allclose(kernel(np.array([0, 1, 2])), kernel([0, 1, 2])) True >>> np.allclose(kernel(0), kernel([0])) True >>> np.allclose(kernel(0), kernel.evaluate(0)) True\n- def practical_support(self, bw, atol=0.0001): Return the support for practical purposes. Used to find a support value for computations for kernel functions without finite (bounded) support. Examples -------- >>> kernel = Kernel(gaussian, var=1, support=np.inf) >>> kernel.practical_support(bw=1) 3.8994... >>> kernel.practical_support(bw=2) 7.4331...\n- def evaluate(self, x, bw=1, norm=2): Evaluate the kernel. Parameters ---------- x : array-like Should have shape (obs, dims). bw : array-like Must have shape (obs, ), or float.", "prompted_full_text": "Implement the Python class `Kernel` described below.\n\nClass description:\nImplement the Kernel class.\n\nMethod signatures and docstrings:\n- def __init__(self, function, var=1, support=3): Initialize a new kernel function. function: callable, numpy.arr -> numpy.arr, should integrate to 1 expected_value : peak, typically 0 support: support of the function. Example ------- >>> from scipy.special import gamma >>> # Normalized function of x >>> def exp(x, dims=1): ... normalization = gamma(dims) * dims ... return np.exp(-x) / normalization >>> kernel = Kernel(exp, var=4, support=np.inf) >>> # The function is scaled so that the standard deviation (bw) = 1 >>> kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] True >>> np.allclose(kernel(np.array([0, 1, 2])), kernel([0, 1, 2])) True >>> np.allclose(kernel(0), kernel([0])) True >>> np.allclose(kernel(0), kernel.evaluate(0)) True\n- def practical_support(self, bw, atol=0.0001): Return the support for practical purposes. Used to find a support value for computations for kernel functions without finite (bounded) support. Examples -------- >>> kernel = Kernel(gaussian, var=1, support=np.inf) >>> kernel.practical_support(bw=1) 3.8994... >>> kernel.practical_support(bw=2) 7.4331...\n- def evaluate(self, x, bw=1, norm=2): Evaluate the kernel. Parameters ---------- x : array-like Should have shape (obs, dims). bw : array-like Must have shape (obs, ), or float.\n\n<|skeleton|>\nclass Kernel:\n\n def __init__(self, function, var=1, support=3):\n \"\"\"Initialize a new kernel function. function: callable, numpy.arr -> numpy.arr, should integrate to 1 expected_value : peak, typically 0 support: support of the function. Example ------- >>> from scipy.special import gamma >>> # Normalized function of x >>> def exp(x, dims=1): ... normalization = gamma(dims) * dims ... return np.exp(-x) / normalization >>> kernel = Kernel(exp, var=4, support=np.inf) >>> # The function is scaled so that the standard deviation (bw) = 1 >>> kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] True >>> np.allclose(kernel(np.array([0, 1, 2])), kernel([0, 1, 2])) True >>> np.allclose(kernel(0), kernel([0])) True >>> np.allclose(kernel(0), kernel.evaluate(0)) True\"\"\"\n <|body_0|>\n\n def practical_support(self, bw, atol=0.0001):\n \"\"\"Return the support for practical purposes. Used to find a support value for computations for kernel functions without finite (bounded) support. Examples -------- >>> kernel = Kernel(gaussian, var=1, support=np.inf) >>> kernel.practical_support(bw=1) 3.8994... >>> kernel.practical_support(bw=2) 7.4331...\"\"\"\n <|body_1|>\n\n def evaluate(self, x, bw=1, norm=2):\n \"\"\"Evaluate the kernel. Parameters ---------- x : array-like Should have shape (obs, dims). bw : array-like Must have shape (obs, ), or float.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.function = function\n self.var = var\n self.finite_support = np.isfinite(support)\n self.support = support / np.sqrt(self.var)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.finite_support:\n return self.support * bw\n else:\n\n def f(x):\n return self.evaluate(x, bw=bw) - atol\n try:\n xtol = 0.001\n ans = brentq(f, a=0, b=8 * bw, xtol=xtol, full_output=False)\n return ans + xtol\n except ValueError:\n msg = 'Unable to solve for support numerically. Use a ' + 'kernel with finite support or scale data to smaller bw.'\n raise ValueError(msg)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(x, numbers.Number):\n x = np.asarray_chkfinite([x])\n else:\n x = np.asarray_chkfinite(x)\n if len(x.shape) == 1:\n x = x.reshape(-1, 1)\n real_bw = bw / np.sqrt(self.var)\n obs, dims = x.shape\n volume_func = functools.partial(volume_unit_ball, p=norm)\n if dims > 1:\n distances = p_norm(x, norm).ravel()\n else:\n distances = np.abs(x).ravel()\n return self.function(distances / real_bw, dims) / (real_bw ** dims * volume_func(dims))\n<|end_body_2|>\n", "revision_id": "0f7611ee2f7d534b68dd36c8c34900f100e9a8c7", "skeleton": "<|skeleton|>\nclass Kernel:\n\n def __init__(self, function, var=1, support=3):\n \"\"\"Initialize a new kernel function. function: callable, numpy.arr -> numpy.arr, should integrate to 1 expected_value : peak, typically 0 support: support of the function. Example ------- >>> from scipy.special import gamma >>> # Normalized function of x >>> def exp(x, dims=1): ... normalization = gamma(dims) * dims ... return np.exp(-x) / normalization >>> kernel = Kernel(exp, var=4, support=np.inf) >>> # The function is scaled so that the standard deviation (bw) = 1 >>> kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] True >>> np.allclose(kernel(np.array([0, 1, 2])), kernel([0, 1, 2])) True >>> np.allclose(kernel(0), kernel([0])) True >>> np.allclose(kernel(0), kernel.evaluate(0)) True\"\"\"\n <|body_0|>\n\n def practical_support(self, bw, atol=0.0001):\n \"\"\"Return the support for practical purposes. Used to find a support value for computations for kernel functions without finite (bounded) support. Examples -------- >>> kernel = Kernel(gaussian, var=1, support=np.inf) >>> kernel.practical_support(bw=1) 3.8994... >>> kernel.practical_support(bw=2) 7.4331...\"\"\"\n <|body_1|>\n\n def evaluate(self, x, bw=1, norm=2):\n \"\"\"Evaluate the kernel. Parameters ---------- x : array-like Should have shape (obs, dims). bw : array-like Must have shape (obs, ), or float.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Kernel:\n def __init__(self, function, var=1, support=3):\n \"\"\"Initialize a new kernel function. function: callable, numpy.arr -> numpy.arr, should integrate to 1 expected_value : peak, typically 0 support: support of the function. Example ------- >>> from scipy.special import gamma >>> # Normalized function of x >>> def exp(x, dims=1): ... normalization = gamma(dims) * dims ... return np.exp(-x) / normalization >>> kernel = Kernel(exp, var=4, support=np.inf) >>> # The function is scaled so that the standard deviation (bw) = 1 >>> kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] True >>> np.allclose(kernel(np.array([0, 1, 2])), kernel([0, 1, 2])) True >>> np.allclose(kernel(0), kernel([0])) True >>> np.allclose(kernel(0), kernel.evaluate(0)) True\"\"\"\n self.function = function\n self.var = var\n self.finite_support = np.isfinite(support)\n self.support = support / np.sqrt(self.var)\n\n def practical_support(self, bw, atol=0.0001):\n \"\"\"Return the support for practical purposes. Used to find a support value for computations for kernel functions without finite (bounded) support. Examples -------- >>> kernel = Kernel(gaussian, var=1, support=np.inf) >>> kernel.practical_support(bw=1) 3.8994... >>> kernel.practical_support(bw=2) 7.4331...\"\"\"\n if self.finite_support:\n return self.support * bw\n else:\n\n def f(x):\n return self.evaluate(x, bw=bw) - atol\n try:\n xtol = 0.001\n ans = brentq(f, a=0, b=8 * bw, xtol=xtol, full_output=False)\n return ans + xtol\n except ValueError:\n msg = 'Unable to solve for support numerically. Use a ' + 'kernel with finite support or scale data to smaller bw.'\n raise ValueError(msg)\n\n def evaluate(self, x, bw=1, norm=2):\n \"\"\"Evaluate the kernel. Parameters ---------- x : array-like Should have shape (obs, dims). bw : array-like Must have shape (obs, ), or float.\"\"\"\n if isinstance(x, numbers.Number):\n x = np.asarray_chkfinite([x])\n else:\n x = np.asarray_chkfinite(x)\n if len(x.shape) == 1:\n x = x.reshape(-1, 1)\n real_bw = bw / np.sqrt(self.var)\n obs, dims = x.shape\n volume_func = functools.partial(volume_unit_ball, p=norm)\n if dims > 1:\n distances = p_norm(x, norm).ravel()\n else:\n distances = np.abs(x).ravel()\n return self.function(distances / real_bw, dims) / (real_bw ** dims * volume_func(dims))\n", "source": "the_stack_v2_python_sparse", "source_path": "KDEpy/kernel_funcs.py", "source_repo": "tommyod/KDEpy", "split": "val", "star_events_count": 502}
{"blob_id": "93deaaefc92950bb0a26747f82bc7a4da22fea15", "bodies": ["m = context.accessor.get_metric(name)\nif not m:\n rp.abort(404)\nreturn m.as_string_dict()", "if not context.accessor.has_metric(name):\n return (\"Unknown metric: '%s'\" % name, 404)\npayload = request.json\nmetadata = bg_metric.MetricMetadata.create(aggregator=bg_metric.Aggregator.from_config_name(payload['aggregator']), retention=bg_metric.Retention.from_string(payload['retention']), carbon_xfilesfactor=payload['carbon_xfilesfactor'])\ncontext.accessor.update_metric(name, metadata)\nreturn ('', 204)"], "bodies_text": "<|body_start_0|>\n m = context.accessor.get_metric(name)\n if not m:\n rp.abort(404)\n return m.as_string_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if not context.accessor.has_metric(name):\n return (\"Unknown metric: '%s'\" % name, 404)\n payload = request.json\n metadata = bg_metric.MetricMetadata.create(aggregator=bg_metric.Aggregator.from_config_name(payload['aggregator']), retention=bg_metric.Retention.from_string(payload['retention']), carbon_xfilesfactor=payload['carbon_xfilesfactor'])\n context.accessor.update_metric(name, metadata)\n return ('', 204)\n<|end_body_1|>\n", "class_docstring": "A Metric.", "class_name": "MetricResource", "detected_licenses": ["LicenseRef-scancode-warranty-disclaimer", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MetricResource:\n \"\"\"A Metric.\"\"\"\n\n def get(self, name):\n \"\"\"Get a metric.\"\"\"\n <|body_0|>\n\n def post(self, name):\n \"\"\"Update a metric.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = context.accessor.get_metric(name)\n if not m:\n rp.abort(404)\n return m.as_string_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if not context.accessor.has_metric(name):\n return (\"Unknown metric: '%s'\" % name, 404)\n payload = request.json\n metadata = bg_metric.MetricMetadata.create(aggregator=bg_metric.Aggregator.from_config_name(payload['aggregator']), retention=bg_metric.Retention.from_string(payload['retention']), carbon_xfilesfactor=payload['carbon_xfilesfactor'])\n context.accessor.update_metric(name, metadata)\n return ('', 204)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000195", "length_bytes": 2775, "license_type": "permissive", "methods": [{"docstring": "Get a metric.", "name": "get", "signature": "def get(self, name)"}, {"docstring": "Update a metric.", "name": "post", "signature": "def post(self, name)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_029466", "prompt": "Implement the Python class `MetricResource` described below.\n\nClass description:\nA Metric.\n\nMethod signatures and docstrings:\n- def get(self, name): Get a metric.\n- def post(self, name): Update a metric.", "prompted_full_text": "Implement the Python class `MetricResource` described below.\n\nClass description:\nA Metric.\n\nMethod signatures and docstrings:\n- def get(self, name): Get a metric.\n- def post(self, name): Update a metric.\n\n<|skeleton|>\nclass MetricResource:\n \"\"\"A Metric.\"\"\"\n\n def get(self, name):\n \"\"\"Get a metric.\"\"\"\n <|body_0|>\n\n def post(self, name):\n \"\"\"Update a metric.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = context.accessor.get_metric(name)\n if not m:\n rp.abort(404)\n return m.as_string_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if not context.accessor.has_metric(name):\n return (\"Unknown metric: '%s'\" % name, 404)\n payload = request.json\n metadata = bg_metric.MetricMetadata.create(aggregator=bg_metric.Aggregator.from_config_name(payload['aggregator']), retention=bg_metric.Retention.from_string(payload['retention']), carbon_xfilesfactor=payload['carbon_xfilesfactor'])\n context.accessor.update_metric(name, metadata)\n return ('', 204)\n<|end_body_1|>\n", "revision_id": "1f647ada6b3f2b2f3fb4e59d326f73a2c891fc30", "skeleton": "<|skeleton|>\nclass MetricResource:\n \"\"\"A Metric.\"\"\"\n\n def get(self, name):\n \"\"\"Get a metric.\"\"\"\n <|body_0|>\n\n def post(self, name):\n \"\"\"Update a metric.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MetricResource:\n \"\"\"A Metric.\"\"\"\n\n def get(self, name):\n \"\"\"Get a metric.\"\"\"\n m = context.accessor.get_metric(name)\n if not m:\n rp.abort(404)\n return m.as_string_dict()\n\n def post(self, name):\n \"\"\"Update a metric.\"\"\"\n if not context.accessor.has_metric(name):\n return (\"Unknown metric: '%s'\" % name, 404)\n payload = request.json\n metadata = bg_metric.MetricMetadata.create(aggregator=bg_metric.Aggregator.from_config_name(payload['aggregator']), retention=bg_metric.Retention.from_string(payload['retention']), carbon_xfilesfactor=payload['carbon_xfilesfactor'])\n context.accessor.update_metric(name, metadata)\n return ('', 204)\n", "source": "the_stack_v2_python_sparse", "source_path": "biggraphite/cli/web/namespaces/biggraphite.py", "source_repo": "criteo/biggraphite", "split": "val", "star_events_count": 129}
{"blob_id": "84da6fbcc9fdeb475b9899851c5c7d7e1ca4d78f", "bodies": ["self.mValue = value\nself.mType = type\nself.mSpecialChar = MapReader.FLOOR_CODE\nself.mIsSpecial = True\nif self.mType == ENTRANCE_CODE:\n self.mSpecialChar = 'e'\nelif self.mType == EXIT_CODE:\n self.mSpecialChar = 'x'\nelse:\n self.mIsSpecial = False\nself.mTextList = self.getText()", "filepath = 'hermite files/' + str(self.mValue) + '/' + str(random.randint(0, len(listdir('hermite files/' + str(self.mValue))) - 1)) + '.txt'\nhermiteFile = open(filepath)\nhermiteText = []\nfor line in hermiteFile:\n hermiteText.append(list(line.strip('\\n\\r')))\nhermiteFile.close()\nif self.mIsSpecial:\n hermiteText = self.addSpecialCharacters(hermiteText)\nreturn hermiteText", "lastPos = VectorN((0, 0))\nplaced = False\nfor r in range(len(textList)):\n for c in range(len(textList[r])):\n if textList[r][c] != MapReader.WALL_CODE:\n lastPos = VectorN((r, c))\n if random.random() < 0.1:\n textList[r][c] = self.mSpecialChar\n placed = True\n break\n if placed:\n break\nif not placed:\n textList[lastPos.iTuple()[0]][lastPos.iTuple()[1]] = self.mSpecialChar\nreturn textList"], "bodies_text": "<|body_start_0|>\n self.mValue = value\n self.mType = type\n self.mSpecialChar = MapReader.FLOOR_CODE\n self.mIsSpecial = True\n if self.mType == ENTRANCE_CODE:\n self.mSpecialChar = 'e'\n elif self.mType == EXIT_CODE:\n self.mSpecialChar = 'x'\n else:\n self.mIsSpecial = False\n self.mTextList = self.getText()\n<|end_body_0|>\n\n<|body_start_1|>\n filepath = 'hermite files/' + str(self.mValue) + '/' + str(random.randint(0, len(listdir('hermite files/' + str(self.mValue))) - 1)) + '.txt'\n hermiteFile = open(filepath)\n hermiteText = []\n for line in hermiteFile:\n hermiteText.append(list(line.strip('\\n\\r')))\n hermiteFile.close()\n if self.mIsSpecial:\n hermiteText = self.addSpecialCharacters(hermiteText)\n return hermiteText\n<|end_body_1|>\n\n<|body_start_2|>\n lastPos = VectorN((0, 0))\n placed = False\n for r in range(len(textList)):\n for c in range(len(textList[r])):\n if textList[r][c] != MapReader.WALL_CODE:\n lastPos = VectorN((r, c))\n if random.random() < 0.1:\n textList[r][c] = self.mSpecialChar\n placed = True\n break\n if placed:\n break\n if not placed:\n textList[lastPos.iTuple()[0]][lastPos.iTuple()[1]] = self.mSpecialChar\n return textList\n<|end_body_2|>\n", "class_docstring": "class that contains basic hermite data a hermite is a subsection of the level", "class_name": "Hermite", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Hermite:\n \"\"\"class that contains basic hermite data a hermite is a subsection of the level\"\"\"\n\n def __init__(self, value, type):\n \"\"\":param value: Value of the harmite, represents pathways. similar to neighbor sensitive tiles :param type: Type of hermite. see above for types.\"\"\"\n <|body_0|>\n\n def getText(self):\n \"\"\"just a simple function to retrieve and return a text document containing the layout of the hermite. also adds an entrance/exit if the hermite is that kind of hermite\"\"\"\n <|body_1|>\n\n def addSpecialCharacters(self, textList):\n \"\"\"based on the type of hermite that calls this function is, will add a special character that is needed for that hermite\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.mValue = value\n self.mType = type\n self.mSpecialChar = MapReader.FLOOR_CODE\n self.mIsSpecial = True\n if self.mType == ENTRANCE_CODE:\n self.mSpecialChar = 'e'\n elif self.mType == EXIT_CODE:\n self.mSpecialChar = 'x'\n else:\n self.mIsSpecial = False\n self.mTextList = self.getText()\n<|end_body_0|>\n\n<|body_start_1|>\n filepath = 'hermite files/' + str(self.mValue) + '/' + str(random.randint(0, len(listdir('hermite files/' + str(self.mValue))) - 1)) + '.txt'\n hermiteFile = open(filepath)\n hermiteText = []\n for line in hermiteFile:\n hermiteText.append(list(line.strip('\\n\\r')))\n hermiteFile.close()\n if self.mIsSpecial:\n hermiteText = self.addSpecialCharacters(hermiteText)\n return hermiteText\n<|end_body_1|>\n\n<|body_start_2|>\n lastPos = VectorN((0, 0))\n placed = False\n for r in range(len(textList)):\n for c in range(len(textList[r])):\n if textList[r][c] != MapReader.WALL_CODE:\n lastPos = VectorN((r, c))\n if random.random() < 0.1:\n textList[r][c] = self.mSpecialChar\n placed = True\n break\n if placed:\n break\n if not placed:\n textList[lastPos.iTuple()[0]][lastPos.iTuple()[1]] = self.mSpecialChar\n return textList\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000196", "length_bytes": 13245, "license_type": "no_license", "methods": [{"docstring": ":param value: Value of the harmite, represents pathways. similar to neighbor sensitive tiles :param type: Type of hermite. see above for types.", "name": "__init__", "signature": "def __init__(self, value, type)"}, {"docstring": "just a simple function to retrieve and return a text document containing the layout of the hermite. also adds an entrance/exit if the hermite is that kind of hermite", "name": "getText", "signature": "def getText(self)"}, {"docstring": "based on the type of hermite that calls this function is, will add a special character that is needed for that hermite", "name": "addSpecialCharacters", "signature": "def addSpecialCharacters(self, textList)"}], "n_methods": 3, "prompt": "Implement the Python class `Hermite` described below.\n\nClass description:\nclass that contains basic hermite data a hermite is a subsection of the level\n\nMethod signatures and docstrings:\n- def __init__(self, value, type): :param value: Value of the harmite, represents pathways. similar to neighbor sensitive tiles :param type: Type of hermite. see above for types.\n- def getText(self): just a simple function to retrieve and return a text document containing the layout of the hermite. also adds an entrance/exit if the hermite is that kind of hermite\n- def addSpecialCharacters(self, textList): based on the type of hermite that calls this function is, will add a special character that is needed for that hermite", "prompted_full_text": "Implement the Python class `Hermite` described below.\n\nClass description:\nclass that contains basic hermite data a hermite is a subsection of the level\n\nMethod signatures and docstrings:\n- def __init__(self, value, type): :param value: Value of the harmite, represents pathways. similar to neighbor sensitive tiles :param type: Type of hermite. see above for types.\n- def getText(self): just a simple function to retrieve and return a text document containing the layout of the hermite. also adds an entrance/exit if the hermite is that kind of hermite\n- def addSpecialCharacters(self, textList): based on the type of hermite that calls this function is, will add a special character that is needed for that hermite\n\n<|skeleton|>\nclass Hermite:\n \"\"\"class that contains basic hermite data a hermite is a subsection of the level\"\"\"\n\n def __init__(self, value, type):\n \"\"\":param value: Value of the harmite, represents pathways. similar to neighbor sensitive tiles :param type: Type of hermite. see above for types.\"\"\"\n <|body_0|>\n\n def getText(self):\n \"\"\"just a simple function to retrieve and return a text document containing the layout of the hermite. also adds an entrance/exit if the hermite is that kind of hermite\"\"\"\n <|body_1|>\n\n def addSpecialCharacters(self, textList):\n \"\"\"based on the type of hermite that calls this function is, will add a special character that is needed for that hermite\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.mValue = value\n self.mType = type\n self.mSpecialChar = MapReader.FLOOR_CODE\n self.mIsSpecial = True\n if self.mType == ENTRANCE_CODE:\n self.mSpecialChar = 'e'\n elif self.mType == EXIT_CODE:\n self.mSpecialChar = 'x'\n else:\n self.mIsSpecial = False\n self.mTextList = self.getText()\n<|end_body_0|>\n\n<|body_start_1|>\n filepath = 'hermite files/' + str(self.mValue) + '/' + str(random.randint(0, len(listdir('hermite files/' + str(self.mValue))) - 1)) + '.txt'\n hermiteFile = open(filepath)\n hermiteText = []\n for line in hermiteFile:\n hermiteText.append(list(line.strip('\\n\\r')))\n hermiteFile.close()\n if self.mIsSpecial:\n hermiteText = self.addSpecialCharacters(hermiteText)\n return hermiteText\n<|end_body_1|>\n\n<|body_start_2|>\n lastPos = VectorN((0, 0))\n placed = False\n for r in range(len(textList)):\n for c in range(len(textList[r])):\n if textList[r][c] != MapReader.WALL_CODE:\n lastPos = VectorN((r, c))\n if random.random() < 0.1:\n textList[r][c] = self.mSpecialChar\n placed = True\n break\n if placed:\n break\n if not placed:\n textList[lastPos.iTuple()[0]][lastPos.iTuple()[1]] = self.mSpecialChar\n return textList\n<|end_body_2|>\n", "revision_id": "34e06a9f7b870873cdbeeaf1ce4063af38c7fa3e", "skeleton": "<|skeleton|>\nclass Hermite:\n \"\"\"class that contains basic hermite data a hermite is a subsection of the level\"\"\"\n\n def __init__(self, value, type):\n \"\"\":param value: Value of the harmite, represents pathways. similar to neighbor sensitive tiles :param type: Type of hermite. see above for types.\"\"\"\n <|body_0|>\n\n def getText(self):\n \"\"\"just a simple function to retrieve and return a text document containing the layout of the hermite. also adds an entrance/exit if the hermite is that kind of hermite\"\"\"\n <|body_1|>\n\n def addSpecialCharacters(self, textList):\n \"\"\"based on the type of hermite that calls this function is, will add a special character that is needed for that hermite\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Hermite:\n \"\"\"class that contains basic hermite data a hermite is a subsection of the level\"\"\"\n\n def __init__(self, value, type):\n \"\"\":param value: Value of the harmite, represents pathways. similar to neighbor sensitive tiles :param type: Type of hermite. see above for types.\"\"\"\n self.mValue = value\n self.mType = type\n self.mSpecialChar = MapReader.FLOOR_CODE\n self.mIsSpecial = True\n if self.mType == ENTRANCE_CODE:\n self.mSpecialChar = 'e'\n elif self.mType == EXIT_CODE:\n self.mSpecialChar = 'x'\n else:\n self.mIsSpecial = False\n self.mTextList = self.getText()\n\n def getText(self):\n \"\"\"just a simple function to retrieve and return a text document containing the layout of the hermite. also adds an entrance/exit if the hermite is that kind of hermite\"\"\"\n filepath = 'hermite files/' + str(self.mValue) + '/' + str(random.randint(0, len(listdir('hermite files/' + str(self.mValue))) - 1)) + '.txt'\n hermiteFile = open(filepath)\n hermiteText = []\n for line in hermiteFile:\n hermiteText.append(list(line.strip('\\n\\r')))\n hermiteFile.close()\n if self.mIsSpecial:\n hermiteText = self.addSpecialCharacters(hermiteText)\n return hermiteText\n\n def addSpecialCharacters(self, textList):\n \"\"\"based on the type of hermite that calls this function is, will add a special character that is needed for that hermite\"\"\"\n lastPos = VectorN((0, 0))\n placed = False\n for r in range(len(textList)):\n for c in range(len(textList[r])):\n if textList[r][c] != MapReader.WALL_CODE:\n lastPos = VectorN((r, c))\n if random.random() < 0.1:\n textList[r][c] = self.mSpecialChar\n placed = True\n break\n if placed:\n break\n if not placed:\n textList[lastPos.iTuple()[0]][lastPos.iTuple()[1]] = self.mSpecialChar\n return textList\n", "source": "the_stack_v2_python_sparse", "source_path": "MapGenerator.py", "source_repo": "KoryB/freshman-project", "split": "val", "star_events_count": 0}
{"blob_id": "b7344d986efccd29dea4086d92f2298c174e1360", "bodies": ["_1 = ListNode(3)\n_2 = ListNode(2)\n_3 = ListNode(0)\n_4 = ListNode(-4)\n_1.next = _2\n_2.next = _3\n_3.next = _4\n_4.next = _2\ns = Solution()\nself.assertTrue(s.hasCycle(_1))", "l = [-21, 10, 17, 8, 4, 26, 5, 35, 33, -7, -16, 27, -12, 6, 29, -12, 5, 9, 20, 14, 14, 2, 13, -24, 21, 23, -21, 5]\nn = len(l)\nhead = ListNode(l[0])\nnode = head\nfor i in range(n - 1):\n node.next = ListNode(l[i + 1])\n node = node.next\ns = Solution()\nself.assertFalse(s.hasCycle(head))"], "bodies_text": "<|body_start_0|>\n _1 = ListNode(3)\n _2 = ListNode(2)\n _3 = ListNode(0)\n _4 = ListNode(-4)\n _1.next = _2\n _2.next = _3\n _3.next = _4\n _4.next = _2\n s = Solution()\n self.assertTrue(s.hasCycle(_1))\n<|end_body_0|>\n\n<|body_start_1|>\n l = [-21, 10, 17, 8, 4, 26, 5, 35, 33, -7, -16, 27, -12, 6, 29, -12, 5, 9, 20, 14, 14, 2, 13, -24, 21, 23, -21, 5]\n n = len(l)\n head = ListNode(l[0])\n node = head\n for i in range(n - 1):\n node.next = ListNode(l[i + 1])\n node = node.next\n s = Solution()\n self.assertFalse(s.hasCycle(head))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Test", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test:\n\n def test1(self):\n \"\"\"head = [3,2,0,-4], pos = 1 为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始) 如果 pos 是 -1,则在该链表中没有环\"\"\"\n <|body_0|>\n\n def test2(self):\n \"\"\"[-21,10,17,8,4,26,5,35,33,-7,-16,27,-12,6,29,-12,5,9,20,14,14,2,13,-24,21,23,-21,5] -1\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _1 = ListNode(3)\n _2 = ListNode(2)\n _3 = ListNode(0)\n _4 = ListNode(-4)\n _1.next = _2\n _2.next = _3\n _3.next = _4\n _4.next = _2\n s = Solution()\n self.assertTrue(s.hasCycle(_1))\n<|end_body_0|>\n\n<|body_start_1|>\n l = [-21, 10, 17, 8, 4, 26, 5, 35, 33, -7, -16, 27, -12, 6, 29, -12, 5, 9, 20, 14, 14, 2, 13, -24, 21, 23, -21, 5]\n n = len(l)\n head = ListNode(l[0])\n node = head\n for i in range(n - 1):\n node.next = ListNode(l[i + 1])\n node = node.next\n s = Solution()\n self.assertFalse(s.hasCycle(head))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000197", "length_bytes": 1711, "license_type": "no_license", "methods": [{"docstring": "head = [3,2,0,-4], pos = 1 为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始) 如果 pos 是 -1,则在该链表中没有环", "name": "test1", "signature": "def test1(self)"}, {"docstring": "[-21,10,17,8,4,26,5,35,33,-7,-16,27,-12,6,29,-12,5,9,20,14,14,2,13,-24,21,23,-21,5] -1", "name": "test2", "signature": "def test2(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_015043", "prompt": "Implement the Python class `Test` described below.\n\nClass description:\nImplement the Test class.\n\nMethod signatures and docstrings:\n- def test1(self): head = [3,2,0,-4], pos = 1 为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始) 如果 pos 是 -1,则在该链表中没有环\n- def test2(self): [-21,10,17,8,4,26,5,35,33,-7,-16,27,-12,6,29,-12,5,9,20,14,14,2,13,-24,21,23,-21,5] -1", "prompted_full_text": "Implement the Python class `Test` described below.\n\nClass description:\nImplement the Test class.\n\nMethod signatures and docstrings:\n- def test1(self): head = [3,2,0,-4], pos = 1 为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始) 如果 pos 是 -1,则在该链表中没有环\n- def test2(self): [-21,10,17,8,4,26,5,35,33,-7,-16,27,-12,6,29,-12,5,9,20,14,14,2,13,-24,21,23,-21,5] -1\n\n<|skeleton|>\nclass Test:\n\n def test1(self):\n \"\"\"head = [3,2,0,-4], pos = 1 为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始) 如果 pos 是 -1,则在该链表中没有环\"\"\"\n <|body_0|>\n\n def test2(self):\n \"\"\"[-21,10,17,8,4,26,5,35,33,-7,-16,27,-12,6,29,-12,5,9,20,14,14,2,13,-24,21,23,-21,5] -1\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _1 = ListNode(3)\n _2 = ListNode(2)\n _3 = ListNode(0)\n _4 = ListNode(-4)\n _1.next = _2\n _2.next = _3\n _3.next = _4\n _4.next = _2\n s = Solution()\n self.assertTrue(s.hasCycle(_1))\n<|end_body_0|>\n\n<|body_start_1|>\n l = [-21, 10, 17, 8, 4, 26, 5, 35, 33, -7, -16, 27, -12, 6, 29, -12, 5, 9, 20, 14, 14, 2, 13, -24, 21, 23, -21, 5]\n n = len(l)\n head = ListNode(l[0])\n node = head\n for i in range(n - 1):\n node.next = ListNode(l[i + 1])\n node = node.next\n s = Solution()\n self.assertFalse(s.hasCycle(head))\n<|end_body_1|>\n", "revision_id": "248b620791611001ebb471dcf8284437264b2f20", "skeleton": "<|skeleton|>\nclass Test:\n\n def test1(self):\n \"\"\"head = [3,2,0,-4], pos = 1 为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始) 如果 pos 是 -1,则在该链表中没有环\"\"\"\n <|body_0|>\n\n def test2(self):\n \"\"\"[-21,10,17,8,4,26,5,35,33,-7,-16,27,-12,6,29,-12,5,9,20,14,14,2,13,-24,21,23,-21,5] -1\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Test:\n def test1(self):\n \"\"\"head = [3,2,0,-4], pos = 1 为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始) 如果 pos 是 -1,则在该链表中没有环\"\"\"\n _1 = ListNode(3)\n _2 = ListNode(2)\n _3 = ListNode(0)\n _4 = ListNode(-4)\n _1.next = _2\n _2.next = _3\n _3.next = _4\n _4.next = _2\n s = Solution()\n self.assertTrue(s.hasCycle(_1))\n\n def test2(self):\n \"\"\"[-21,10,17,8,4,26,5,35,33,-7,-16,27,-12,6,29,-12,5,9,20,14,14,2,13,-24,21,23,-21,5] -1\"\"\"\n l = [-21, 10, 17, 8, 4, 26, 5, 35, 33, -7, -16, 27, -12, 6, 29, -12, 5, 9, 20, 14, 14, 2, 13, -24, 21, 23, -21, 5]\n n = len(l)\n head = ListNode(l[0])\n node = head\n for i in range(n - 1):\n node.next = ListNode(l[i + 1])\n node = node.next\n s = Solution()\n self.assertFalse(s.hasCycle(head))\n", "source": "the_stack_v2_python_sparse", "source_path": "141_linked_list_cycle/_2.py", "source_repo": "chxj1992/leetcode-exercise", "split": "val", "star_events_count": 0}
{"blob_id": "9802fc0d3194e76d9f51a72a2ba3a83099790461", "bodies": ["self.do_lower_case = do_lower_case\nself.never_split = never_split if never_split is not None else []\nself.normalize_text = normalize_text\nself.janome_tokenizer = Tokenizer()", "if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\nnever_split = self.never_split + (never_split if never_split is not None else [])\ntokens = self.janome_tokenizer.tokenize(text)\n__tokens = []\nlast_index = 0\nfor t in tokens:\n token = t.surface\n token_start = text.index(token, last_index)\n if last_index != token_start:\n __tokens.append(text[last_index:token_start])\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n __tokens.append(token.lower())\n else:\n __tokens.append(token)\n last_index = token_start + len(token)\nif len(text) != last_index:\n __tokens.append(text[last_index:])\nassert text == ''.join(__tokens), f\"[{text}] != [{''.join(__tokens)}]\"\nreturn __tokens"], "bodies_text": "<|body_start_0|>\n self.do_lower_case = do_lower_case\n self.never_split = never_split if never_split is not None else []\n self.normalize_text = normalize_text\n self.janome_tokenizer = Tokenizer()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = self.janome_tokenizer.tokenize(text)\n __tokens = []\n last_index = 0\n for t in tokens:\n token = t.surface\n token_start = text.index(token, last_index)\n if last_index != token_start:\n __tokens.append(text[last_index:token_start])\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n __tokens.append(token.lower())\n else:\n __tokens.append(token)\n last_index = token_start + len(token)\n if len(text) != last_index:\n __tokens.append(text[last_index:])\n assert text == ''.join(__tokens), f\"[{text}] != [{''.join(__tokens)}]\"\n return __tokens\n<|end_body_1|>\n", "class_docstring": "Runs basic tokenization with Janome morphological parser.", "class_name": "JanomeTokenizer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JanomeTokenizer:\n \"\"\"Runs basic tokenization with Janome morphological parser.\"\"\"\n\n def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True):\n \"\"\"Construct a JanomeTokenizer. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. :arg never_split: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. :arg normalize_text: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization.\"\"\"\n <|body_0|>\n\n def tokenize(self, text: str, *, never_split=None, **kwargs):\n \"\"\"Tokenizes a piece of text.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.do_lower_case = do_lower_case\n self.never_split = never_split if never_split is not None else []\n self.normalize_text = normalize_text\n self.janome_tokenizer = Tokenizer()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = self.janome_tokenizer.tokenize(text)\n __tokens = []\n last_index = 0\n for t in tokens:\n token = t.surface\n token_start = text.index(token, last_index)\n if last_index != token_start:\n __tokens.append(text[last_index:token_start])\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n __tokens.append(token.lower())\n else:\n __tokens.append(token)\n last_index = token_start + len(token)\n if len(text) != last_index:\n __tokens.append(text[last_index:])\n assert text == ''.join(__tokens), f\"[{text}] != [{''.join(__tokens)}]\"\n return __tokens\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000198", "length_bytes": 7489, "license_type": "permissive", "methods": [{"docstring": "Construct a JanomeTokenizer. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. :arg never_split: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. :arg normalize_text: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization.", "name": "__init__", "signature": "def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True)"}, {"docstring": "Tokenizes a piece of text.", "name": "tokenize", "signature": "def tokenize(self, text: str, *, never_split=None, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000959", "prompt": "Implement the Python class `JanomeTokenizer` described below.\n\nClass description:\nRuns basic tokenization with Janome morphological parser.\n\nMethod signatures and docstrings:\n- def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True): Construct a JanomeTokenizer. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. :arg never_split: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. :arg normalize_text: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization.\n- def tokenize(self, text: str, *, never_split=None, **kwargs): Tokenizes a piece of text.", "prompted_full_text": "Implement the Python class `JanomeTokenizer` described below.\n\nClass description:\nRuns basic tokenization with Janome morphological parser.\n\nMethod signatures and docstrings:\n- def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True): Construct a JanomeTokenizer. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. :arg never_split: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. :arg normalize_text: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization.\n- def tokenize(self, text: str, *, never_split=None, **kwargs): Tokenizes a piece of text.\n\n<|skeleton|>\nclass JanomeTokenizer:\n \"\"\"Runs basic tokenization with Janome morphological parser.\"\"\"\n\n def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True):\n \"\"\"Construct a JanomeTokenizer. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. :arg never_split: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. :arg normalize_text: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization.\"\"\"\n <|body_0|>\n\n def tokenize(self, text: str, *, never_split=None, **kwargs):\n \"\"\"Tokenizes a piece of text.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.do_lower_case = do_lower_case\n self.never_split = never_split if never_split is not None else []\n self.normalize_text = normalize_text\n self.janome_tokenizer = Tokenizer()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = self.janome_tokenizer.tokenize(text)\n __tokens = []\n last_index = 0\n for t in tokens:\n token = t.surface\n token_start = text.index(token, last_index)\n if last_index != token_start:\n __tokens.append(text[last_index:token_start])\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n __tokens.append(token.lower())\n else:\n __tokens.append(token)\n last_index = token_start + len(token)\n if len(text) != last_index:\n __tokens.append(text[last_index:])\n assert text == ''.join(__tokens), f\"[{text}] != [{''.join(__tokens)}]\"\n return __tokens\n<|end_body_1|>\n", "revision_id": "6a6da28329fbdde2a53176740d403ef96fab4f28", "skeleton": "<|skeleton|>\nclass JanomeTokenizer:\n \"\"\"Runs basic tokenization with Janome morphological parser.\"\"\"\n\n def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True):\n \"\"\"Construct a JanomeTokenizer. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. :arg never_split: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. :arg normalize_text: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization.\"\"\"\n <|body_0|>\n\n def tokenize(self, text: str, *, never_split=None, **kwargs):\n \"\"\"Tokenizes a piece of text.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class JanomeTokenizer:\n \"\"\"Runs basic tokenization with Janome morphological parser.\"\"\"\n\n def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True):\n \"\"\"Construct a JanomeTokenizer. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. :arg never_split: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. :arg normalize_text: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization.\"\"\"\n self.do_lower_case = do_lower_case\n self.never_split = never_split if never_split is not None else []\n self.normalize_text = normalize_text\n self.janome_tokenizer = Tokenizer()\n\n def tokenize(self, text: str, *, never_split=None, **kwargs):\n \"\"\"Tokenizes a piece of text.\"\"\"\n if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = self.janome_tokenizer.tokenize(text)\n __tokens = []\n last_index = 0\n for t in tokens:\n token = t.surface\n token_start = text.index(token, last_index)\n if last_index != token_start:\n __tokens.append(text[last_index:token_start])\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n __tokens.append(token.lower())\n else:\n __tokens.append(token)\n last_index = token_start + len(token)\n if len(text) != last_index:\n __tokens.append(text[last_index:])\n assert text == ''.join(__tokens), f\"[{text}] != [{''.join(__tokens)}]\"\n return __tokens\n", "source": "the_stack_v2_python_sparse", "source_path": "bunkai/algorithm/lbd/custom_tokenizers.py", "source_repo": "t-yamamura/bunkai", "split": "val", "star_events_count": 0}
{"blob_id": "596234b90debe94eee8d904dfb5045685fbe589c", "bodies": ["candidate = obj\npersisted_candidate = copy.deepcopy(candidate)\nempty = {}\nmetrics = candidate.get('metrics', empty)\nmetrics_dict_converter = MetricsDictionaryConverter()\nclean_metrics = metrics_dict_converter.to_dict(metrics)\nold_interpretation = candidate.get('interpretation', empty)\ninterp_dict_converter = InterpretationDictionaryConverter()\npersisted_interpretation = interp_dict_converter.to_dict(old_interpretation)\nif persisted_candidate is None:\n persisted_candidate = {}\npersisted_candidate['interpretation'] = persisted_interpretation\npersisted_candidate['metrics'] = clean_metrics\nreturn persisted_candidate", "if obj_dict is None or not isinstance(obj_dict, dict):\n no_dict = super(CandidateDictionaryConverter, self).from_dict(obj_dict)\n return no_dict\npersisted_candidate = obj_dict\ncandidate = copy.deepcopy(persisted_candidate)\nempty = {}\npersisted_interpretation = persisted_candidate.get('interpretation', empty)\ninterp_dict_converter = InterpretationDictionaryConverter()\ninterpretation = interp_dict_converter.from_dict(persisted_interpretation)\ncandidate['interpretation'] = interpretation\nreturn candidate"], "bodies_text": "<|body_start_0|>\n candidate = obj\n persisted_candidate = copy.deepcopy(candidate)\n empty = {}\n metrics = candidate.get('metrics', empty)\n metrics_dict_converter = MetricsDictionaryConverter()\n clean_metrics = metrics_dict_converter.to_dict(metrics)\n old_interpretation = candidate.get('interpretation', empty)\n interp_dict_converter = InterpretationDictionaryConverter()\n persisted_interpretation = interp_dict_converter.to_dict(old_interpretation)\n if persisted_candidate is None:\n persisted_candidate = {}\n persisted_candidate['interpretation'] = persisted_interpretation\n persisted_candidate['metrics'] = clean_metrics\n return persisted_candidate\n<|end_body_0|>\n\n<|body_start_1|>\n if obj_dict is None or not isinstance(obj_dict, dict):\n no_dict = super(CandidateDictionaryConverter, self).from_dict(obj_dict)\n return no_dict\n persisted_candidate = obj_dict\n candidate = copy.deepcopy(persisted_candidate)\n empty = {}\n persisted_interpretation = persisted_candidate.get('interpretation', empty)\n interp_dict_converter = InterpretationDictionaryConverter()\n interpretation = interp_dict_converter.from_dict(persisted_interpretation)\n candidate['interpretation'] = interpretation\n return candidate\n<|end_body_1|>\n", "class_docstring": "A DictionaryConverter implementation which knows how to clean up a Candidate dictionary for serialization.", "class_name": "CandidateDictionaryConverter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CandidateDictionaryConverter:\n \"\"\"A DictionaryConverter implementation which knows how to clean up a Candidate dictionary for serialization.\"\"\"\n\n def to_dict(self, obj):\n \"\"\":param obj: The object to be converted into a dictionary :return: A data-only dictionary that represents all the data for the given object, either in primitives (booleans, ints, floats, strings), arrays, or dictionaries. If obj is None, then the returned dictionary should also be None. If obj is not the correct type, it is also reasonable to return None.\"\"\"\n <|body_0|>\n\n def from_dict(self, obj_dict):\n \"\"\":param obj_dict: The data-only dictionary to be converted into an object :return: An object instance created from the given dictionary. If dictionary is None, the returned object should also be None. If obj_dict is None, the returned object should also be None. If obj_dict is not the correct type, it is also reasonable to return None.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n candidate = obj\n persisted_candidate = copy.deepcopy(candidate)\n empty = {}\n metrics = candidate.get('metrics', empty)\n metrics_dict_converter = MetricsDictionaryConverter()\n clean_metrics = metrics_dict_converter.to_dict(metrics)\n old_interpretation = candidate.get('interpretation', empty)\n interp_dict_converter = InterpretationDictionaryConverter()\n persisted_interpretation = interp_dict_converter.to_dict(old_interpretation)\n if persisted_candidate is None:\n persisted_candidate = {}\n persisted_candidate['interpretation'] = persisted_interpretation\n persisted_candidate['metrics'] = clean_metrics\n return persisted_candidate\n<|end_body_0|>\n\n<|body_start_1|>\n if obj_dict is None or not isinstance(obj_dict, dict):\n no_dict = super(CandidateDictionaryConverter, self).from_dict(obj_dict)\n return no_dict\n persisted_candidate = obj_dict\n candidate = copy.deepcopy(persisted_candidate)\n empty = {}\n persisted_interpretation = persisted_candidate.get('interpretation', empty)\n interp_dict_converter = InterpretationDictionaryConverter()\n interpretation = interp_dict_converter.from_dict(persisted_interpretation)\n candidate['interpretation'] = interpretation\n return candidate\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000199", "length_bytes": 4034, "license_type": "no_license", "methods": [{"docstring": ":param obj: The object to be converted into a dictionary :return: A data-only dictionary that represents all the data for the given object, either in primitives (booleans, ints, floats, strings), arrays, or dictionaries. If obj is None, then the returned dictionary should also be None. If obj is not the correct type, it is also reasonable to return None.", "name": "to_dict", "signature": "def to_dict(self, obj)"}, {"docstring": ":param obj_dict: The data-only dictionary to be converted into an object :return: An object instance created from the given dictionary. If dictionary is None, the returned object should also be None. If obj_dict is None, the returned object should also be None. If obj_dict is not the correct type, it is also reasonable to return None.", "name": "from_dict", "signature": "def from_dict(self, obj_dict)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_025928", "prompt": "Implement the Python class `CandidateDictionaryConverter` described below.\n\nClass description:\nA DictionaryConverter implementation which knows how to clean up a Candidate dictionary for serialization.\n\nMethod signatures and docstrings:\n- def to_dict(self, obj): :param obj: The object to be converted into a dictionary :return: A data-only dictionary that represents all the data for the given object, either in primitives (booleans, ints, floats, strings), arrays, or dictionaries. If obj is None, then the returned dictionary should also be None. If obj is not the correct type, it is also reasonable to return None.\n- def from_dict(self, obj_dict): :param obj_dict: The data-only dictionary to be converted into an object :return: An object instance created from the given dictionary. If dictionary is None, the returned object should also be None. If obj_dict is None, the returned object should also be None. If obj_dict is not the correct type, it is also reasonable to return None.", "prompted_full_text": "Implement the Python class `CandidateDictionaryConverter` described below.\n\nClass description:\nA DictionaryConverter implementation which knows how to clean up a Candidate dictionary for serialization.\n\nMethod signatures and docstrings:\n- def to_dict(self, obj): :param obj: The object to be converted into a dictionary :return: A data-only dictionary that represents all the data for the given object, either in primitives (booleans, ints, floats, strings), arrays, or dictionaries. If obj is None, then the returned dictionary should also be None. If obj is not the correct type, it is also reasonable to return None.\n- def from_dict(self, obj_dict): :param obj_dict: The data-only dictionary to be converted into an object :return: An object instance created from the given dictionary. If dictionary is None, the returned object should also be None. If obj_dict is None, the returned object should also be None. If obj_dict is not the correct type, it is also reasonable to return None.\n\n<|skeleton|>\nclass CandidateDictionaryConverter:\n \"\"\"A DictionaryConverter implementation which knows how to clean up a Candidate dictionary for serialization.\"\"\"\n\n def to_dict(self, obj):\n \"\"\":param obj: The object to be converted into a dictionary :return: A data-only dictionary that represents all the data for the given object, either in primitives (booleans, ints, floats, strings), arrays, or dictionaries. If obj is None, then the returned dictionary should also be None. If obj is not the correct type, it is also reasonable to return None.\"\"\"\n <|body_0|>\n\n def from_dict(self, obj_dict):\n \"\"\":param obj_dict: The data-only dictionary to be converted into an object :return: An object instance created from the given dictionary. If dictionary is None, the returned object should also be None. If obj_dict is None, the returned object should also be None. If obj_dict is not the correct type, it is also reasonable to return None.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n candidate = obj\n persisted_candidate = copy.deepcopy(candidate)\n empty = {}\n metrics = candidate.get('metrics', empty)\n metrics_dict_converter = MetricsDictionaryConverter()\n clean_metrics = metrics_dict_converter.to_dict(metrics)\n old_interpretation = candidate.get('interpretation', empty)\n interp_dict_converter = InterpretationDictionaryConverter()\n persisted_interpretation = interp_dict_converter.to_dict(old_interpretation)\n if persisted_candidate is None:\n persisted_candidate = {}\n persisted_candidate['interpretation'] = persisted_interpretation\n persisted_candidate['metrics'] = clean_metrics\n return persisted_candidate\n<|end_body_0|>\n\n<|body_start_1|>\n if obj_dict is None or not isinstance(obj_dict, dict):\n no_dict = super(CandidateDictionaryConverter, self).from_dict(obj_dict)\n return no_dict\n persisted_candidate = obj_dict\n candidate = copy.deepcopy(persisted_candidate)\n empty = {}\n persisted_interpretation = persisted_candidate.get('interpretation', empty)\n interp_dict_converter = InterpretationDictionaryConverter()\n interpretation = interp_dict_converter.from_dict(persisted_interpretation)\n candidate['interpretation'] = interpretation\n return candidate\n<|end_body_1|>\n", "revision_id": "99c2f401d6c4b203ee439ed607985a918d0c3c7e", "skeleton": "<|skeleton|>\nclass CandidateDictionaryConverter:\n \"\"\"A DictionaryConverter implementation which knows how to clean up a Candidate dictionary for serialization.\"\"\"\n\n def to_dict(self, obj):\n \"\"\":param obj: The object to be converted into a dictionary :return: A data-only dictionary that represents all the data for the given object, either in primitives (booleans, ints, floats, strings), arrays, or dictionaries. If obj is None, then the returned dictionary should also be None. If obj is not the correct type, it is also reasonable to return None.\"\"\"\n <|body_0|>\n\n def from_dict(self, obj_dict):\n \"\"\":param obj_dict: The data-only dictionary to be converted into an object :return: An object instance created from the given dictionary. If dictionary is None, the returned object should also be None. If obj_dict is None, the returned object should also be None. If obj_dict is not the correct type, it is also reasonable to return None.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CandidateDictionaryConverter:\n \"\"\"A DictionaryConverter implementation which knows how to clean up a Candidate dictionary for serialization.\"\"\"\n\n def to_dict(self, obj):\n \"\"\":param obj: The object to be converted into a dictionary :return: A data-only dictionary that represents all the data for the given object, either in primitives (booleans, ints, floats, strings), arrays, or dictionaries. If obj is None, then the returned dictionary should also be None. If obj is not the correct type, it is also reasonable to return None.\"\"\"\n candidate = obj\n persisted_candidate = copy.deepcopy(candidate)\n empty = {}\n metrics = candidate.get('metrics', empty)\n metrics_dict_converter = MetricsDictionaryConverter()\n clean_metrics = metrics_dict_converter.to_dict(metrics)\n old_interpretation = candidate.get('interpretation', empty)\n interp_dict_converter = InterpretationDictionaryConverter()\n persisted_interpretation = interp_dict_converter.to_dict(old_interpretation)\n if persisted_candidate is None:\n persisted_candidate = {}\n persisted_candidate['interpretation'] = persisted_interpretation\n persisted_candidate['metrics'] = clean_metrics\n return persisted_candidate\n\n def from_dict(self, obj_dict):\n \"\"\":param obj_dict: The data-only dictionary to be converted into an object :return: An object instance created from the given dictionary. If dictionary is None, the returned object should also be None. If obj_dict is None, the returned object should also be None. If obj_dict is not the correct type, it is also reasonable to return None.\"\"\"\n if obj_dict is None or not isinstance(obj_dict, dict):\n no_dict = super(CandidateDictionaryConverter, self).from_dict(obj_dict)\n return no_dict\n persisted_candidate = obj_dict\n candidate = copy.deepcopy(persisted_candidate)\n empty = {}\n persisted_interpretation = persisted_candidate.get('interpretation', empty)\n interp_dict_converter = InterpretationDictionaryConverter()\n interpretation = interp_dict_converter.from_dict(persisted_interpretation)\n candidate['interpretation'] = interpretation\n return candidate\n", "source": "the_stack_v2_python_sparse", "source_path": "framework/serialization/candidate_dictionary_converter.py", "source_repo": "Cognizant-CDB-AIA-BAI-AI-OI/LEAF-ENN-Training-V2", "split": "val", "star_events_count": 0}
{"blob_id": "76984011c1e07ac6cd07026cd4379ca84849785c", "bodies": ["def serializeHelper(root):\n if root:\n vals.append(str(root.val))\n serializeHelper(root.left)\n serializeHelper(root.right)\n else:\n vals.append('#')\nvals = []\nreturn ' '.join(vals)", "def deserializeHelper():\n val = next(vals)\n if val == '#':\n return None\n Node = TreeNode(val)\n Node.left = deserializeHelper()\n Node.right = deserializeHelper()\n return Node\nvals = iter(data.split())\nreturn deserializeHelper()"], "bodies_text": "<|body_start_0|>\n def serializeHelper(root):\n if root:\n vals.append(str(root.val))\n serializeHelper(root.left)\n serializeHelper(root.right)\n else:\n vals.append('#')\n vals = []\n return ' '.join(vals)\n<|end_body_0|>\n\n<|body_start_1|>\n def deserializeHelper():\n val = next(vals)\n if val == '#':\n return None\n Node = TreeNode(val)\n Node.left = deserializeHelper()\n Node.right = deserializeHelper()\n return Node\n vals = iter(data.split())\n return deserializeHelper()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SerializeDe", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SerializeDe:\n\n def serialize(self, root):\n \"\"\":type root: TreeNode :rtypr: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\":type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def serializeHelper(root):\n if root:\n vals.append(str(root.val))\n serializeHelper(root.left)\n serializeHelper(root.right)\n else:\n vals.append('#')\n vals = []\n return ' '.join(vals)\n<|end_body_0|>\n\n<|body_start_1|>\n def deserializeHelper():\n val = next(vals)\n if val == '#':\n return None\n Node = TreeNode(val)\n Node.left = deserializeHelper()\n Node.right = deserializeHelper()\n return Node\n vals = iter(data.split())\n return deserializeHelper()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000200", "length_bytes": 1612, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtypr: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": ":type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034675", "prompt": "Implement the Python class `SerializeDe` described below.\n\nClass description:\nImplement the SerializeDe class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): :type root: TreeNode :rtypr: str\n- def deserialize(self, data): :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `SerializeDe` described below.\n\nClass description:\nImplement the SerializeDe class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): :type root: TreeNode :rtypr: str\n- def deserialize(self, data): :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass SerializeDe:\n\n def serialize(self, root):\n \"\"\":type root: TreeNode :rtypr: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\":type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def serializeHelper(root):\n if root:\n vals.append(str(root.val))\n serializeHelper(root.left)\n serializeHelper(root.right)\n else:\n vals.append('#')\n vals = []\n return ' '.join(vals)\n<|end_body_0|>\n\n<|body_start_1|>\n def deserializeHelper():\n val = next(vals)\n if val == '#':\n return None\n Node = TreeNode(val)\n Node.left = deserializeHelper()\n Node.right = deserializeHelper()\n return Node\n vals = iter(data.split())\n return deserializeHelper()\n<|end_body_1|>\n", "revision_id": "db38b684fbc966b59428dd1a18e8e3aa0d0df057", "skeleton": "<|skeleton|>\nclass SerializeDe:\n\n def serialize(self, root):\n \"\"\":type root: TreeNode :rtypr: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\":type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SerializeDe:\n def serialize(self, root):\n \"\"\":type root: TreeNode :rtypr: str\"\"\"\n def serializeHelper(root):\n if root:\n vals.append(str(root.val))\n serializeHelper(root.left)\n serializeHelper(root.right)\n else:\n vals.append('#')\n vals = []\n return ' '.join(vals)\n\n def deserialize(self, data):\n \"\"\":type data: str :rtype: TreeNode\"\"\"\n def deserializeHelper():\n val = next(vals)\n if val == '#':\n return None\n Node = TreeNode(val)\n Node.left = deserializeHelper()\n Node.right = deserializeHelper()\n return Node\n vals = iter(data.split())\n return deserializeHelper()\n", "source": "the_stack_v2_python_sparse", "source_path": "Trees/leetcode/serializeDe/index.py", "source_repo": "rpraval1/codeschool", "split": "val", "star_events_count": 0}
{"blob_id": "604302862d9d71679915f730cb6233360bbd34a0", "bodies": ["super(conv_7x1_1x7, self).__init__()\nself.stride = desc.stride\nself.channel_out = desc.C\nself.affine = desc.affine\nself.channel_out = channel_out\nself.data_format = desc.data_format", "x = tf.nn.relu(x)\nx = tf.layers.conv2d(x, self.channel_out, (1, 7), strides=(1, self.stride), padding='same', use_bias=False, data_format=self.data_format)\nx = tf.layers.conv2d(x, self.channel_out, (7, 1), strides=(self.stride, 1), padding='same', use_bias=False, data_format=self.data_format)\nx = tf.layers.batch_normalization(x, axis=1 if self.data_format == 'channels_first' else 3, trainable=self.affine, training=training)\nreturn x"], "bodies_text": "<|body_start_0|>\n super(conv_7x1_1x7, self).__init__()\n self.stride = desc.stride\n self.channel_out = desc.C\n self.affine = desc.affine\n self.channel_out = channel_out\n self.data_format = desc.data_format\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.nn.relu(x)\n x = tf.layers.conv2d(x, self.channel_out, (1, 7), strides=(1, self.stride), padding='same', use_bias=False, data_format=self.data_format)\n x = tf.layers.conv2d(x, self.channel_out, (7, 1), strides=(self.stride, 1), padding='same', use_bias=False, data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=1 if self.data_format == 'channels_first' else 3, trainable=self.affine, training=training)\n return x\n<|end_body_1|>\n", "class_docstring": "Class of 7x1 and 1x7 convolution. :param desc: description of conv_7x1_1x7 :type desc: Config", "class_name": "conv_7x1_1x7", "detected_licenses": ["Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass conv_7x1_1x7:\n \"\"\"Class of 7x1 and 1x7 convolution. :param desc: description of conv_7x1_1x7 :type desc: Config\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init conv_7x1_1x7.\"\"\"\n <|body_0|>\n\n def __call__(self, x, training):\n \"\"\"Forward function of conv_7x1_1x7.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(conv_7x1_1x7, self).__init__()\n self.stride = desc.stride\n self.channel_out = desc.C\n self.affine = desc.affine\n self.channel_out = channel_out\n self.data_format = desc.data_format\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.nn.relu(x)\n x = tf.layers.conv2d(x, self.channel_out, (1, 7), strides=(1, self.stride), padding='same', use_bias=False, data_format=self.data_format)\n x = tf.layers.conv2d(x, self.channel_out, (7, 1), strides=(self.stride, 1), padding='same', use_bias=False, data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=1 if self.data_format == 'channels_first' else 3, trainable=self.affine, training=training)\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000201", "length_bytes": 9137, "license_type": "permissive", "methods": [{"docstring": "Init conv_7x1_1x7.", "name": "__init__", "signature": "def __init__(self, desc)"}, {"docstring": "Forward function of conv_7x1_1x7.", "name": "__call__", "signature": "def __call__(self, x, training)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030595", "prompt": "Implement the Python class `conv_7x1_1x7` described below.\n\nClass description:\nClass of 7x1 and 1x7 convolution. :param desc: description of conv_7x1_1x7 :type desc: Config\n\nMethod signatures and docstrings:\n- def __init__(self, desc): Init conv_7x1_1x7.\n- def __call__(self, x, training): Forward function of conv_7x1_1x7.", "prompted_full_text": "Implement the Python class `conv_7x1_1x7` described below.\n\nClass description:\nClass of 7x1 and 1x7 convolution. :param desc: description of conv_7x1_1x7 :type desc: Config\n\nMethod signatures and docstrings:\n- def __init__(self, desc): Init conv_7x1_1x7.\n- def __call__(self, x, training): Forward function of conv_7x1_1x7.\n\n<|skeleton|>\nclass conv_7x1_1x7:\n \"\"\"Class of 7x1 and 1x7 convolution. :param desc: description of conv_7x1_1x7 :type desc: Config\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init conv_7x1_1x7.\"\"\"\n <|body_0|>\n\n def __call__(self, x, training):\n \"\"\"Forward function of conv_7x1_1x7.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(conv_7x1_1x7, self).__init__()\n self.stride = desc.stride\n self.channel_out = desc.C\n self.affine = desc.affine\n self.channel_out = channel_out\n self.data_format = desc.data_format\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.nn.relu(x)\n x = tf.layers.conv2d(x, self.channel_out, (1, 7), strides=(1, self.stride), padding='same', use_bias=False, data_format=self.data_format)\n x = tf.layers.conv2d(x, self.channel_out, (7, 1), strides=(self.stride, 1), padding='same', use_bias=False, data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=1 if self.data_format == 'channels_first' else 3, trainable=self.affine, training=training)\n return x\n<|end_body_1|>\n", "revision_id": "df51ed9c1d6dbde1deef63f2a037a369f8554406", "skeleton": "<|skeleton|>\nclass conv_7x1_1x7:\n \"\"\"Class of 7x1 and 1x7 convolution. :param desc: description of conv_7x1_1x7 :type desc: Config\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init conv_7x1_1x7.\"\"\"\n <|body_0|>\n\n def __call__(self, x, training):\n \"\"\"Forward function of conv_7x1_1x7.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class conv_7x1_1x7:\n \"\"\"Class of 7x1 and 1x7 convolution. :param desc: description of conv_7x1_1x7 :type desc: Config\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init conv_7x1_1x7.\"\"\"\n super(conv_7x1_1x7, self).__init__()\n self.stride = desc.stride\n self.channel_out = desc.C\n self.affine = desc.affine\n self.channel_out = channel_out\n self.data_format = desc.data_format\n\n def __call__(self, x, training):\n \"\"\"Forward function of conv_7x1_1x7.\"\"\"\n x = tf.nn.relu(x)\n x = tf.layers.conv2d(x, self.channel_out, (1, 7), strides=(1, self.stride), padding='same', use_bias=False, data_format=self.data_format)\n x = tf.layers.conv2d(x, self.channel_out, (7, 1), strides=(self.stride, 1), padding='same', use_bias=False, data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=1 if self.data_format == 'channels_first' else 3, trainable=self.affine, training=training)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/search_space/networks/tensorflow/blocks/darts_ops.py", "source_repo": "Huawei-Ascend/modelzoo", "split": "val", "star_events_count": 1}
{"blob_id": "24fa36f450b8b26e23f2b980de8346347711631f", "bodies": ["if config_path is not None:\n self.config_path = config_path\nif config_environment is not None:\n self.config_environment = config_environment\nif not hasattr(self, 'action_map'):\n self.action_map = {}\n self.load_config()", "action_factory = ActionFactory()\nfor config_filename in glob.glob('%s/actions_*.conf' % self.config_path) + glob.glob('%s/actions.d/actions_*.conf' % self.config_path):\n topic_name = config_filename.split('actions_')[-1].split('.')[0]\n if topic_name not in self.action_map:\n self.action_map[topic_name] = {}\n cnf = configparser.RawConfigParser()\n try:\n cnf.read(config_filename)\n except configparser.Error:\n syslog_error('exception occurred while reading \"%s\": %s' % (config_filename, traceback.format_exc(0)))\n for section in cnf.sections():\n conf = {}\n for act_prop in cnf.items(section):\n conf[act_prop[0]] = act_prop[1]\n action_obj = action_factory.get(environment=self.config_environment, conf=conf)\n target = self.action_map[topic_name]\n sections = section.split('.')\n while sections:\n action_name = sections.pop(0)\n if action_name in target:\n if type(target[action_name]) is not dict or len(sections) == 0:\n syslog_error('unsupported overlay command [%s.%s]' % (topic_name, section))\n break\n elif len(sections) == 0:\n target[action_name] = action_obj\n break\n else:\n target[action_name] = {}\n target = target[action_name]", "if attributes is None:\n attributes = []\nresult = {} if result is None else result\nmap_ptr = self.action_map if map_ptr is None else map_ptr\nfor key in map_ptr:\n this_path = ('%s %s' % (path, key)).strip()\n if type(map_ptr[key]) is dict:\n self.list_actions(attributes, result, map_ptr[key], this_path)\n else:\n result[this_path] = {}\n for actAttr in attributes:\n if hasattr(map_ptr[key], actAttr):\n result[this_path][actAttr] = getattr(map_ptr[key], actAttr)\n else:\n result[this_path][actAttr] = ''\nreturn result", "target = self.action_map\nwhile type(target) is dict and len(action) > 0 and (action[0] in target):\n tmp = action.pop(0)\n target = target[tmp]\nif isinstance(target, BaseAction):\n return (target, action)\nreturn (None, [])", "action_obj, action_params = self.find_action(action)\nif action_obj is not None:\n return '%s\\n' % action_obj.execute(action_params, message_uuid)\nreturn 'Action not found\\n'", "action_obj, parameters = self.find_action(action)\nif action_obj is not None:\n print('---------------------------------------------------------------------')\n print('execute %s ' % ' '.join(action))\n print('action object %s (%s) %s' % (action_obj, action_obj.command, message_uuid))\n print('---------------------------------------------------------------------')"], "bodies_text": "<|body_start_0|>\n if config_path is not None:\n self.config_path = config_path\n if config_environment is not None:\n self.config_environment = config_environment\n if not hasattr(self, 'action_map'):\n self.action_map = {}\n self.load_config()\n<|end_body_0|>\n\n<|body_start_1|>\n action_factory = ActionFactory()\n for config_filename in glob.glob('%s/actions_*.conf' % self.config_path) + glob.glob('%s/actions.d/actions_*.conf' % self.config_path):\n topic_name = config_filename.split('actions_')[-1].split('.')[0]\n if topic_name not in self.action_map:\n self.action_map[topic_name] = {}\n cnf = configparser.RawConfigParser()\n try:\n cnf.read(config_filename)\n except configparser.Error:\n syslog_error('exception occurred while reading \"%s\": %s' % (config_filename, traceback.format_exc(0)))\n for section in cnf.sections():\n conf = {}\n for act_prop in cnf.items(section):\n conf[act_prop[0]] = act_prop[1]\n action_obj = action_factory.get(environment=self.config_environment, conf=conf)\n target = self.action_map[topic_name]\n sections = section.split('.')\n while sections:\n action_name = sections.pop(0)\n if action_name in target:\n if type(target[action_name]) is not dict or len(sections) == 0:\n syslog_error('unsupported overlay command [%s.%s]' % (topic_name, section))\n break\n elif len(sections) == 0:\n target[action_name] = action_obj\n break\n else:\n target[action_name] = {}\n target = target[action_name]\n<|end_body_1|>\n\n<|body_start_2|>\n if attributes is None:\n attributes = []\n result = {} if result is None else result\n map_ptr = self.action_map if map_ptr is None else map_ptr\n for key in map_ptr:\n this_path = ('%s %s' % (path, key)).strip()\n if type(map_ptr[key]) is dict:\n self.list_actions(attributes, result, map_ptr[key], this_path)\n else:\n result[this_path] = {}\n for actAttr in attributes:\n if hasattr(map_ptr[key], actAttr):\n result[this_path][actAttr] = getattr(map_ptr[key], actAttr)\n else:\n result[this_path][actAttr] = ''\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n target = self.action_map\n while type(target) is dict and len(action) > 0 and (action[0] in target):\n tmp = action.pop(0)\n target = target[tmp]\n if isinstance(target, BaseAction):\n return (target, action)\n return (None, [])\n<|end_body_3|>\n\n<|body_start_4|>\n action_obj, action_params = self.find_action(action)\n if action_obj is not None:\n return '%s\\n' % action_obj.execute(action_params, message_uuid)\n return 'Action not found\\n'\n<|end_body_4|>\n\n<|body_start_5|>\n action_obj, parameters = self.find_action(action)\n if action_obj is not None:\n print('---------------------------------------------------------------------')\n print('execute %s ' % ' '.join(action))\n print('action object %s (%s) %s' % (action_obj, action_obj.command, message_uuid))\n print('---------------------------------------------------------------------')\n<|end_body_5|>\n", "class_docstring": "Start/stop services and functions using configuration data defined in conf/actions_.conf", "class_name": "ActionHandler", "detected_licenses": ["BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ActionHandler:\n \"\"\"Start/stop services and functions using configuration data defined in conf/actions_.conf\"\"\"\n\n def __init__(self, config_path=None, config_environment=None):\n \"\"\"Initialize action handler to start system functions :param config_path: full path of configuration data :param config_environment: environment to use (if possible) :return:\"\"\"\n <|body_0|>\n\n def load_config(self):\n \"\"\"load action configuration from config files into local dictionary :return: None\"\"\"\n <|body_1|>\n\n def list_actions(self, attributes=None, result=None, map_ptr=None, path=''):\n \"\"\"list all available actions :param attributes: :param result: (recursion) result dictionary to return :param map_ptr: (recursion) point to the leaves in the tree :param path: (recursion) path (items) :return: dict\"\"\"\n <|body_2|>\n\n def find_action(self, action):\n \"\"\"find action object :param action: list of commands and parameters :return: action object or None if not found\"\"\"\n <|body_3|>\n\n def execute(self, action, message_uuid):\n \"\"\"execute configuration defined action :param action: list of commands and parameters :param message_uuid: message unique id :return: OK on success, else error code\"\"\"\n <|body_4|>\n\n def show_action(self, action, message_uuid):\n \"\"\"debug/simulation mode: show action information :param action: list of commands and parameters :param message_uuid: message unique id :return: None\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config_path is not None:\n self.config_path = config_path\n if config_environment is not None:\n self.config_environment = config_environment\n if not hasattr(self, 'action_map'):\n self.action_map = {}\n self.load_config()\n<|end_body_0|>\n\n<|body_start_1|>\n action_factory = ActionFactory()\n for config_filename in glob.glob('%s/actions_*.conf' % self.config_path) + glob.glob('%s/actions.d/actions_*.conf' % self.config_path):\n topic_name = config_filename.split('actions_')[-1].split('.')[0]\n if topic_name not in self.action_map:\n self.action_map[topic_name] = {}\n cnf = configparser.RawConfigParser()\n try:\n cnf.read(config_filename)\n except configparser.Error:\n syslog_error('exception occurred while reading \"%s\": %s' % (config_filename, traceback.format_exc(0)))\n for section in cnf.sections():\n conf = {}\n for act_prop in cnf.items(section):\n conf[act_prop[0]] = act_prop[1]\n action_obj = action_factory.get(environment=self.config_environment, conf=conf)\n target = self.action_map[topic_name]\n sections = section.split('.')\n while sections:\n action_name = sections.pop(0)\n if action_name in target:\n if type(target[action_name]) is not dict or len(sections) == 0:\n syslog_error('unsupported overlay command [%s.%s]' % (topic_name, section))\n break\n elif len(sections) == 0:\n target[action_name] = action_obj\n break\n else:\n target[action_name] = {}\n target = target[action_name]\n<|end_body_1|>\n\n<|body_start_2|>\n if attributes is None:\n attributes = []\n result = {} if result is None else result\n map_ptr = self.action_map if map_ptr is None else map_ptr\n for key in map_ptr:\n this_path = ('%s %s' % (path, key)).strip()\n if type(map_ptr[key]) is dict:\n self.list_actions(attributes, result, map_ptr[key], this_path)\n else:\n result[this_path] = {}\n for actAttr in attributes:\n if hasattr(map_ptr[key], actAttr):\n result[this_path][actAttr] = getattr(map_ptr[key], actAttr)\n else:\n result[this_path][actAttr] = ''\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n target = self.action_map\n while type(target) is dict and len(action) > 0 and (action[0] in target):\n tmp = action.pop(0)\n target = target[tmp]\n if isinstance(target, BaseAction):\n return (target, action)\n return (None, [])\n<|end_body_3|>\n\n<|body_start_4|>\n action_obj, action_params = self.find_action(action)\n if action_obj is not None:\n return '%s\\n' % action_obj.execute(action_params, message_uuid)\n return 'Action not found\\n'\n<|end_body_4|>\n\n<|body_start_5|>\n action_obj, parameters = self.find_action(action)\n if action_obj is not None:\n print('---------------------------------------------------------------------')\n print('execute %s ' % ' '.join(action))\n print('action object %s (%s) %s' % (action_obj, action_obj.command, message_uuid))\n print('---------------------------------------------------------------------')\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000202", "length_bytes": 14249, "license_type": "permissive", "methods": [{"docstring": "Initialize action handler to start system functions :param config_path: full path of configuration data :param config_environment: environment to use (if possible) :return:", "name": "__init__", "signature": "def __init__(self, config_path=None, config_environment=None)"}, {"docstring": "load action configuration from config files into local dictionary :return: None", "name": "load_config", "signature": "def load_config(self)"}, {"docstring": "list all available actions :param attributes: :param result: (recursion) result dictionary to return :param map_ptr: (recursion) point to the leaves in the tree :param path: (recursion) path (items) :return: dict", "name": "list_actions", "signature": "def list_actions(self, attributes=None, result=None, map_ptr=None, path='')"}, {"docstring": "find action object :param action: list of commands and parameters :return: action object or None if not found", "name": "find_action", "signature": "def find_action(self, action)"}, {"docstring": "execute configuration defined action :param action: list of commands and parameters :param message_uuid: message unique id :return: OK on success, else error code", "name": "execute", "signature": "def execute(self, action, message_uuid)"}, {"docstring": "debug/simulation mode: show action information :param action: list of commands and parameters :param message_uuid: message unique id :return: None", "name": "show_action", "signature": "def show_action(self, action, message_uuid)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_009329", "prompt": "Implement the Python class `ActionHandler` described below.\n\nClass description:\nStart/stop services and functions using configuration data defined in conf/actions_.conf\n\nMethod signatures and docstrings:\n- def __init__(self, config_path=None, config_environment=None): Initialize action handler to start system functions :param config_path: full path of configuration data :param config_environment: environment to use (if possible) :return:\n- def load_config(self): load action configuration from config files into local dictionary :return: None\n- def list_actions(self, attributes=None, result=None, map_ptr=None, path=''): list all available actions :param attributes: :param result: (recursion) result dictionary to return :param map_ptr: (recursion) point to the leaves in the tree :param path: (recursion) path (items) :return: dict\n- def find_action(self, action): find action object :param action: list of commands and parameters :return: action object or None if not found\n- def execute(self, action, message_uuid): execute configuration defined action :param action: list of commands and parameters :param message_uuid: message unique id :return: OK on success, else error code\n- def show_action(self, action, message_uuid): debug/simulation mode: show action information :param action: list of commands and parameters :param message_uuid: message unique id :return: None", "prompted_full_text": "Implement the Python class `ActionHandler` described below.\n\nClass description:\nStart/stop services and functions using configuration data defined in conf/actions_.conf\n\nMethod signatures and docstrings:\n- def __init__(self, config_path=None, config_environment=None): Initialize action handler to start system functions :param config_path: full path of configuration data :param config_environment: environment to use (if possible) :return:\n- def load_config(self): load action configuration from config files into local dictionary :return: None\n- def list_actions(self, attributes=None, result=None, map_ptr=None, path=''): list all available actions :param attributes: :param result: (recursion) result dictionary to return :param map_ptr: (recursion) point to the leaves in the tree :param path: (recursion) path (items) :return: dict\n- def find_action(self, action): find action object :param action: list of commands and parameters :return: action object or None if not found\n- def execute(self, action, message_uuid): execute configuration defined action :param action: list of commands and parameters :param message_uuid: message unique id :return: OK on success, else error code\n- def show_action(self, action, message_uuid): debug/simulation mode: show action information :param action: list of commands and parameters :param message_uuid: message unique id :return: None\n\n<|skeleton|>\nclass ActionHandler:\n \"\"\"Start/stop services and functions using configuration data defined in conf/actions_.conf\"\"\"\n\n def __init__(self, config_path=None, config_environment=None):\n \"\"\"Initialize action handler to start system functions :param config_path: full path of configuration data :param config_environment: environment to use (if possible) :return:\"\"\"\n <|body_0|>\n\n def load_config(self):\n \"\"\"load action configuration from config files into local dictionary :return: None\"\"\"\n <|body_1|>\n\n def list_actions(self, attributes=None, result=None, map_ptr=None, path=''):\n \"\"\"list all available actions :param attributes: :param result: (recursion) result dictionary to return :param map_ptr: (recursion) point to the leaves in the tree :param path: (recursion) path (items) :return: dict\"\"\"\n <|body_2|>\n\n def find_action(self, action):\n \"\"\"find action object :param action: list of commands and parameters :return: action object or None if not found\"\"\"\n <|body_3|>\n\n def execute(self, action, message_uuid):\n \"\"\"execute configuration defined action :param action: list of commands and parameters :param message_uuid: message unique id :return: OK on success, else error code\"\"\"\n <|body_4|>\n\n def show_action(self, action, message_uuid):\n \"\"\"debug/simulation mode: show action information :param action: list of commands and parameters :param message_uuid: message unique id :return: None\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config_path is not None:\n self.config_path = config_path\n if config_environment is not None:\n self.config_environment = config_environment\n if not hasattr(self, 'action_map'):\n self.action_map = {}\n self.load_config()\n<|end_body_0|>\n\n<|body_start_1|>\n action_factory = ActionFactory()\n for config_filename in glob.glob('%s/actions_*.conf' % self.config_path) + glob.glob('%s/actions.d/actions_*.conf' % self.config_path):\n topic_name = config_filename.split('actions_')[-1].split('.')[0]\n if topic_name not in self.action_map:\n self.action_map[topic_name] = {}\n cnf = configparser.RawConfigParser()\n try:\n cnf.read(config_filename)\n except configparser.Error:\n syslog_error('exception occurred while reading \"%s\": %s' % (config_filename, traceback.format_exc(0)))\n for section in cnf.sections():\n conf = {}\n for act_prop in cnf.items(section):\n conf[act_prop[0]] = act_prop[1]\n action_obj = action_factory.get(environment=self.config_environment, conf=conf)\n target = self.action_map[topic_name]\n sections = section.split('.')\n while sections:\n action_name = sections.pop(0)\n if action_name in target:\n if type(target[action_name]) is not dict or len(sections) == 0:\n syslog_error('unsupported overlay command [%s.%s]' % (topic_name, section))\n break\n elif len(sections) == 0:\n target[action_name] = action_obj\n break\n else:\n target[action_name] = {}\n target = target[action_name]\n<|end_body_1|>\n\n<|body_start_2|>\n if attributes is None:\n attributes = []\n result = {} if result is None else result\n map_ptr = self.action_map if map_ptr is None else map_ptr\n for key in map_ptr:\n this_path = ('%s %s' % (path, key)).strip()\n if type(map_ptr[key]) is dict:\n self.list_actions(attributes, result, map_ptr[key], this_path)\n else:\n result[this_path] = {}\n for actAttr in attributes:\n if hasattr(map_ptr[key], actAttr):\n result[this_path][actAttr] = getattr(map_ptr[key], actAttr)\n else:\n result[this_path][actAttr] = ''\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n target = self.action_map\n while type(target) is dict and len(action) > 0 and (action[0] in target):\n tmp = action.pop(0)\n target = target[tmp]\n if isinstance(target, BaseAction):\n return (target, action)\n return (None, [])\n<|end_body_3|>\n\n<|body_start_4|>\n action_obj, action_params = self.find_action(action)\n if action_obj is not None:\n return '%s\\n' % action_obj.execute(action_params, message_uuid)\n return 'Action not found\\n'\n<|end_body_4|>\n\n<|body_start_5|>\n action_obj, parameters = self.find_action(action)\n if action_obj is not None:\n print('---------------------------------------------------------------------')\n print('execute %s ' % ' '.join(action))\n print('action object %s (%s) %s' % (action_obj, action_obj.command, message_uuid))\n print('---------------------------------------------------------------------')\n<|end_body_5|>\n", "revision_id": "a702cf9fb3300e125cd7acc8af3813474606e509", "skeleton": "<|skeleton|>\nclass ActionHandler:\n \"\"\"Start/stop services and functions using configuration data defined in conf/actions_.conf\"\"\"\n\n def __init__(self, config_path=None, config_environment=None):\n \"\"\"Initialize action handler to start system functions :param config_path: full path of configuration data :param config_environment: environment to use (if possible) :return:\"\"\"\n <|body_0|>\n\n def load_config(self):\n \"\"\"load action configuration from config files into local dictionary :return: None\"\"\"\n <|body_1|>\n\n def list_actions(self, attributes=None, result=None, map_ptr=None, path=''):\n \"\"\"list all available actions :param attributes: :param result: (recursion) result dictionary to return :param map_ptr: (recursion) point to the leaves in the tree :param path: (recursion) path (items) :return: dict\"\"\"\n <|body_2|>\n\n def find_action(self, action):\n \"\"\"find action object :param action: list of commands and parameters :return: action object or None if not found\"\"\"\n <|body_3|>\n\n def execute(self, action, message_uuid):\n \"\"\"execute configuration defined action :param action: list of commands and parameters :param message_uuid: message unique id :return: OK on success, else error code\"\"\"\n <|body_4|>\n\n def show_action(self, action, message_uuid):\n \"\"\"debug/simulation mode: show action information :param action: list of commands and parameters :param message_uuid: message unique id :return: None\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ActionHandler:\n \"\"\"Start/stop services and functions using configuration data defined in conf/actions_.conf\"\"\"\n\n def __init__(self, config_path=None, config_environment=None):\n \"\"\"Initialize action handler to start system functions :param config_path: full path of configuration data :param config_environment: environment to use (if possible) :return:\"\"\"\n if config_path is not None:\n self.config_path = config_path\n if config_environment is not None:\n self.config_environment = config_environment\n if not hasattr(self, 'action_map'):\n self.action_map = {}\n self.load_config()\n\n def load_config(self):\n \"\"\"load action configuration from config files into local dictionary :return: None\"\"\"\n action_factory = ActionFactory()\n for config_filename in glob.glob('%s/actions_*.conf' % self.config_path) + glob.glob('%s/actions.d/actions_*.conf' % self.config_path):\n topic_name = config_filename.split('actions_')[-1].split('.')[0]\n if topic_name not in self.action_map:\n self.action_map[topic_name] = {}\n cnf = configparser.RawConfigParser()\n try:\n cnf.read(config_filename)\n except configparser.Error:\n syslog_error('exception occurred while reading \"%s\": %s' % (config_filename, traceback.format_exc(0)))\n for section in cnf.sections():\n conf = {}\n for act_prop in cnf.items(section):\n conf[act_prop[0]] = act_prop[1]\n action_obj = action_factory.get(environment=self.config_environment, conf=conf)\n target = self.action_map[topic_name]\n sections = section.split('.')\n while sections:\n action_name = sections.pop(0)\n if action_name in target:\n if type(target[action_name]) is not dict or len(sections) == 0:\n syslog_error('unsupported overlay command [%s.%s]' % (topic_name, section))\n break\n elif len(sections) == 0:\n target[action_name] = action_obj\n break\n else:\n target[action_name] = {}\n target = target[action_name]\n\n def list_actions(self, attributes=None, result=None, map_ptr=None, path=''):\n \"\"\"list all available actions :param attributes: :param result: (recursion) result dictionary to return :param map_ptr: (recursion) point to the leaves in the tree :param path: (recursion) path (items) :return: dict\"\"\"\n if attributes is None:\n attributes = []\n result = {} if result is None else result\n map_ptr = self.action_map if map_ptr is None else map_ptr\n for key in map_ptr:\n this_path = ('%s %s' % (path, key)).strip()\n if type(map_ptr[key]) is dict:\n self.list_actions(attributes, result, map_ptr[key], this_path)\n else:\n result[this_path] = {}\n for actAttr in attributes:\n if hasattr(map_ptr[key], actAttr):\n result[this_path][actAttr] = getattr(map_ptr[key], actAttr)\n else:\n result[this_path][actAttr] = ''\n return result\n\n def find_action(self, action):\n \"\"\"find action object :param action: list of commands and parameters :return: action object or None if not found\"\"\"\n target = self.action_map\n while type(target) is dict and len(action) > 0 and (action[0] in target):\n tmp = action.pop(0)\n target = target[tmp]\n if isinstance(target, BaseAction):\n return (target, action)\n return (None, [])\n\n def execute(self, action, message_uuid):\n \"\"\"execute configuration defined action :param action: list of commands and parameters :param message_uuid: message unique id :return: OK on success, else error code\"\"\"\n action_obj, action_params = self.find_action(action)\n if action_obj is not None:\n return '%s\\n' % action_obj.execute(action_params, message_uuid)\n return 'Action not found\\n'\n\n def show_action(self, action, message_uuid):\n \"\"\"debug/simulation mode: show action information :param action: list of commands and parameters :param message_uuid: message unique id :return: None\"\"\"\n action_obj, parameters = self.find_action(action)\n if action_obj is not None:\n print('---------------------------------------------------------------------')\n print('execute %s ' % ' '.join(action))\n print('action object %s (%s) %s' % (action_obj, action_obj.command, message_uuid))\n print('---------------------------------------------------------------------')\n", "source": "the_stack_v2_python_sparse", "source_path": "src/opnsense/service/modules/processhandler.py", "source_repo": "opnsense/core", "split": "val", "star_events_count": 2778}
{"blob_id": "0096d34f5486d18c022ed3204d26bb67f956caa1", "bodies": ["if port and nonce and userAgent:\n self.Port = port\n self.Version = 0\n self.Services = NetworkAddressWithTime.NODE_NETWORK\n self.Timestamp = int(datetime.datetime.utcnow().timestamp())\n self.Nonce = nonce\n self.UserAgent = userAgent\n if Blockchain.Default() is not None and Blockchain.Default().Height is not None:\n self.StartHeight = Blockchain.Default().Height\n self.Relay = True", "size1 = ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_ulong) + ctypes.sizeof(ctypes.c_uint)\nsize2 = ctypes.sizeof(ctypes.c_ushort) + ctypes.sizeof(ctypes.c_uint)\nsize3 = sys.getsizeof(self.UserAgent) + ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_bool)\nreturn size1 + size2 + size3", "self.Version = reader.ReadUInt32()\nself.Services = reader.ReadUInt64()\nself.Timestamp = reader.ReadUInt32()\nself.Port = reader.ReadUInt16()\nself.Nonce = reader.ReadUInt32()\nself.UserAgent = reader.ReadVarString().decode('utf-8')\nself.StartHeight = reader.ReadUInt32()\nlogger.debug('Version start height: T %s ' % self.StartHeight)\nself.Relay = reader.ReadBool()", "writer.WriteUInt32(self.Version)\nwriter.WriteUInt64(self.Services)\nwriter.WriteUInt32(self.Timestamp)\nwriter.WriteUInt16(self.Port)\nwriter.WriteUInt32(self.Nonce)\nwriter.WriteVarString(self.UserAgent)\nwriter.WriteUInt32(self.StartHeight)\nwriter.WriteBool(self.Relay)"], "bodies_text": "<|body_start_0|>\n if port and nonce and userAgent:\n self.Port = port\n self.Version = 0\n self.Services = NetworkAddressWithTime.NODE_NETWORK\n self.Timestamp = int(datetime.datetime.utcnow().timestamp())\n self.Nonce = nonce\n self.UserAgent = userAgent\n if Blockchain.Default() is not None and Blockchain.Default().Height is not None:\n self.StartHeight = Blockchain.Default().Height\n self.Relay = True\n<|end_body_0|>\n\n<|body_start_1|>\n size1 = ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_ulong) + ctypes.sizeof(ctypes.c_uint)\n size2 = ctypes.sizeof(ctypes.c_ushort) + ctypes.sizeof(ctypes.c_uint)\n size3 = sys.getsizeof(self.UserAgent) + ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_bool)\n return size1 + size2 + size3\n<|end_body_1|>\n\n<|body_start_2|>\n self.Version = reader.ReadUInt32()\n self.Services = reader.ReadUInt64()\n self.Timestamp = reader.ReadUInt32()\n self.Port = reader.ReadUInt16()\n self.Nonce = reader.ReadUInt32()\n self.UserAgent = reader.ReadVarString().decode('utf-8')\n self.StartHeight = reader.ReadUInt32()\n logger.debug('Version start height: T %s ' % self.StartHeight)\n self.Relay = reader.ReadBool()\n<|end_body_2|>\n\n<|body_start_3|>\n writer.WriteUInt32(self.Version)\n writer.WriteUInt64(self.Services)\n writer.WriteUInt32(self.Timestamp)\n writer.WriteUInt16(self.Port)\n writer.WriteUInt32(self.Nonce)\n writer.WriteVarString(self.UserAgent)\n writer.WriteUInt32(self.StartHeight)\n writer.WriteBool(self.Relay)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "VersionPayload", "detected_licenses": ["LicenseRef-scancode-free-unknown", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VersionPayload:\n\n def __init__(self, port=None, nonce=None, userAgent=None):\n \"\"\"Create an instance. Args: port (int): nonce (int): userAgent (str): client user agent string.\"\"\"\n <|body_0|>\n\n def Size(self):\n \"\"\"Get the total size in bytes of the object. Returns: int: size.\"\"\"\n <|body_1|>\n\n def Deserialize(self, reader):\n \"\"\"Deserialize full object. Args: reader (neo.IO.BinaryReader):\"\"\"\n <|body_2|>\n\n def Serialize(self, writer):\n \"\"\"Serialize object. Args: writer (neo.IO.BinaryWriter):\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if port and nonce and userAgent:\n self.Port = port\n self.Version = 0\n self.Services = NetworkAddressWithTime.NODE_NETWORK\n self.Timestamp = int(datetime.datetime.utcnow().timestamp())\n self.Nonce = nonce\n self.UserAgent = userAgent\n if Blockchain.Default() is not None and Blockchain.Default().Height is not None:\n self.StartHeight = Blockchain.Default().Height\n self.Relay = True\n<|end_body_0|>\n\n<|body_start_1|>\n size1 = ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_ulong) + ctypes.sizeof(ctypes.c_uint)\n size2 = ctypes.sizeof(ctypes.c_ushort) + ctypes.sizeof(ctypes.c_uint)\n size3 = sys.getsizeof(self.UserAgent) + ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_bool)\n return size1 + size2 + size3\n<|end_body_1|>\n\n<|body_start_2|>\n self.Version = reader.ReadUInt32()\n self.Services = reader.ReadUInt64()\n self.Timestamp = reader.ReadUInt32()\n self.Port = reader.ReadUInt16()\n self.Nonce = reader.ReadUInt32()\n self.UserAgent = reader.ReadVarString().decode('utf-8')\n self.StartHeight = reader.ReadUInt32()\n logger.debug('Version start height: T %s ' % self.StartHeight)\n self.Relay = reader.ReadBool()\n<|end_body_2|>\n\n<|body_start_3|>\n writer.WriteUInt32(self.Version)\n writer.WriteUInt64(self.Services)\n writer.WriteUInt32(self.Timestamp)\n writer.WriteUInt16(self.Port)\n writer.WriteUInt32(self.Nonce)\n writer.WriteVarString(self.UserAgent)\n writer.WriteUInt32(self.StartHeight)\n writer.WriteBool(self.Relay)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000203", "length_bytes": 2733, "license_type": "permissive", "methods": [{"docstring": "Create an instance. Args: port (int): nonce (int): userAgent (str): client user agent string.", "name": "__init__", "signature": "def __init__(self, port=None, nonce=None, userAgent=None)"}, {"docstring": "Get the total size in bytes of the object. Returns: int: size.", "name": "Size", "signature": "def Size(self)"}, {"docstring": "Deserialize full object. Args: reader (neo.IO.BinaryReader):", "name": "Deserialize", "signature": "def Deserialize(self, reader)"}, {"docstring": "Serialize object. Args: writer (neo.IO.BinaryWriter):", "name": "Serialize", "signature": "def Serialize(self, writer)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_001704", "prompt": "Implement the Python class `VersionPayload` described below.\n\nClass description:\nImplement the VersionPayload class.\n\nMethod signatures and docstrings:\n- def __init__(self, port=None, nonce=None, userAgent=None): Create an instance. Args: port (int): nonce (int): userAgent (str): client user agent string.\n- def Size(self): Get the total size in bytes of the object. Returns: int: size.\n- def Deserialize(self, reader): Deserialize full object. Args: reader (neo.IO.BinaryReader):\n- def Serialize(self, writer): Serialize object. Args: writer (neo.IO.BinaryWriter):", "prompted_full_text": "Implement the Python class `VersionPayload` described below.\n\nClass description:\nImplement the VersionPayload class.\n\nMethod signatures and docstrings:\n- def __init__(self, port=None, nonce=None, userAgent=None): Create an instance. Args: port (int): nonce (int): userAgent (str): client user agent string.\n- def Size(self): Get the total size in bytes of the object. Returns: int: size.\n- def Deserialize(self, reader): Deserialize full object. Args: reader (neo.IO.BinaryReader):\n- def Serialize(self, writer): Serialize object. Args: writer (neo.IO.BinaryWriter):\n\n<|skeleton|>\nclass VersionPayload:\n\n def __init__(self, port=None, nonce=None, userAgent=None):\n \"\"\"Create an instance. Args: port (int): nonce (int): userAgent (str): client user agent string.\"\"\"\n <|body_0|>\n\n def Size(self):\n \"\"\"Get the total size in bytes of the object. Returns: int: size.\"\"\"\n <|body_1|>\n\n def Deserialize(self, reader):\n \"\"\"Deserialize full object. Args: reader (neo.IO.BinaryReader):\"\"\"\n <|body_2|>\n\n def Serialize(self, writer):\n \"\"\"Serialize object. Args: writer (neo.IO.BinaryWriter):\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if port and nonce and userAgent:\n self.Port = port\n self.Version = 0\n self.Services = NetworkAddressWithTime.NODE_NETWORK\n self.Timestamp = int(datetime.datetime.utcnow().timestamp())\n self.Nonce = nonce\n self.UserAgent = userAgent\n if Blockchain.Default() is not None and Blockchain.Default().Height is not None:\n self.StartHeight = Blockchain.Default().Height\n self.Relay = True\n<|end_body_0|>\n\n<|body_start_1|>\n size1 = ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_ulong) + ctypes.sizeof(ctypes.c_uint)\n size2 = ctypes.sizeof(ctypes.c_ushort) + ctypes.sizeof(ctypes.c_uint)\n size3 = sys.getsizeof(self.UserAgent) + ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_bool)\n return size1 + size2 + size3\n<|end_body_1|>\n\n<|body_start_2|>\n self.Version = reader.ReadUInt32()\n self.Services = reader.ReadUInt64()\n self.Timestamp = reader.ReadUInt32()\n self.Port = reader.ReadUInt16()\n self.Nonce = reader.ReadUInt32()\n self.UserAgent = reader.ReadVarString().decode('utf-8')\n self.StartHeight = reader.ReadUInt32()\n logger.debug('Version start height: T %s ' % self.StartHeight)\n self.Relay = reader.ReadBool()\n<|end_body_2|>\n\n<|body_start_3|>\n writer.WriteUInt32(self.Version)\n writer.WriteUInt64(self.Services)\n writer.WriteUInt32(self.Timestamp)\n writer.WriteUInt16(self.Port)\n writer.WriteUInt32(self.Nonce)\n writer.WriteVarString(self.UserAgent)\n writer.WriteUInt32(self.StartHeight)\n writer.WriteBool(self.Relay)\n<|end_body_3|>\n", "revision_id": "35d967b005741208a7947b2edface5158d177413", "skeleton": "<|skeleton|>\nclass VersionPayload:\n\n def __init__(self, port=None, nonce=None, userAgent=None):\n \"\"\"Create an instance. Args: port (int): nonce (int): userAgent (str): client user agent string.\"\"\"\n <|body_0|>\n\n def Size(self):\n \"\"\"Get the total size in bytes of the object. Returns: int: size.\"\"\"\n <|body_1|>\n\n def Deserialize(self, reader):\n \"\"\"Deserialize full object. Args: reader (neo.IO.BinaryReader):\"\"\"\n <|body_2|>\n\n def Serialize(self, writer):\n \"\"\"Serialize object. Args: writer (neo.IO.BinaryWriter):\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VersionPayload:\n def __init__(self, port=None, nonce=None, userAgent=None):\n \"\"\"Create an instance. Args: port (int): nonce (int): userAgent (str): client user agent string.\"\"\"\n if port and nonce and userAgent:\n self.Port = port\n self.Version = 0\n self.Services = NetworkAddressWithTime.NODE_NETWORK\n self.Timestamp = int(datetime.datetime.utcnow().timestamp())\n self.Nonce = nonce\n self.UserAgent = userAgent\n if Blockchain.Default() is not None and Blockchain.Default().Height is not None:\n self.StartHeight = Blockchain.Default().Height\n self.Relay = True\n\n def Size(self):\n \"\"\"Get the total size in bytes of the object. Returns: int: size.\"\"\"\n size1 = ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_ulong) + ctypes.sizeof(ctypes.c_uint)\n size2 = ctypes.sizeof(ctypes.c_ushort) + ctypes.sizeof(ctypes.c_uint)\n size3 = sys.getsizeof(self.UserAgent) + ctypes.sizeof(ctypes.c_uint) + ctypes.sizeof(ctypes.c_bool)\n return size1 + size2 + size3\n\n def Deserialize(self, reader):\n \"\"\"Deserialize full object. Args: reader (neo.IO.BinaryReader):\"\"\"\n self.Version = reader.ReadUInt32()\n self.Services = reader.ReadUInt64()\n self.Timestamp = reader.ReadUInt32()\n self.Port = reader.ReadUInt16()\n self.Nonce = reader.ReadUInt32()\n self.UserAgent = reader.ReadVarString().decode('utf-8')\n self.StartHeight = reader.ReadUInt32()\n logger.debug('Version start height: T %s ' % self.StartHeight)\n self.Relay = reader.ReadBool()\n\n def Serialize(self, writer):\n \"\"\"Serialize object. Args: writer (neo.IO.BinaryWriter):\"\"\"\n writer.WriteUInt32(self.Version)\n writer.WriteUInt64(self.Services)\n writer.WriteUInt32(self.Timestamp)\n writer.WriteUInt16(self.Port)\n writer.WriteUInt32(self.Nonce)\n writer.WriteVarString(self.UserAgent)\n writer.WriteUInt32(self.StartHeight)\n writer.WriteBool(self.Relay)\n", "source": "the_stack_v2_python_sparse", "source_path": "neo/Network/Payloads/VersionPayload.py", "source_repo": "imusify/crowdfunding-blockchain-middleware", "split": "val", "star_events_count": 3}
{"blob_id": "aa1ca8eabd813ad105ec5ffab99897856b43b965", "bodies": ["super(BalancedNet, self).__init__()\nself.f1 = nn.Linear(D_in, H)\nself.f2 = nn.Linear(H, D_out)\nself.cf1 = nn.Linear(D_in, H)\nself.cf2 = nn.Linear(H, D_out)", "h_relu = F.elu(self.f1(x))\nf = self.f2(h_relu)\nh_relu = F.elu(self.cf1(x))\ncf = self.cf2(h_relu)\nout = torch.cat((f, cf), dim=1)\nreturn out"], "bodies_text": "<|body_start_0|>\n super(BalancedNet, self).__init__()\n self.f1 = nn.Linear(D_in, H)\n self.f2 = nn.Linear(H, D_out)\n self.cf1 = nn.Linear(D_in, H)\n self.cf2 = nn.Linear(H, D_out)\n<|end_body_0|>\n\n<|body_start_1|>\n h_relu = F.elu(self.f1(x))\n f = self.f2(h_relu)\n h_relu = F.elu(self.cf1(x))\n cf = self.cf2(h_relu)\n out = torch.cat((f, cf), dim=1)\n return out\n<|end_body_1|>\n", "class_docstring": "A torch.model used as a component of the HEMM module to determine the outcome as a function of confounders. The balanced net consists of two different neural networks for the outcome and counteractual.", "class_name": "BalancedNet", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BalancedNet:\n \"\"\"A torch.model used as a component of the HEMM module to determine the outcome as a function of confounders. The balanced net consists of two different neural networks for the outcome and counteractual.\"\"\"\n\n def __init__(self, D_in, H, D_out):\n \"\"\"Instantiate two nn.Linear modules and assign them as member variables. Args: D_in: input dimension H: dimension of hidden layer D_out: output dimension\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Accept a Variable of input data and return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BalancedNet, self).__init__()\n self.f1 = nn.Linear(D_in, H)\n self.f2 = nn.Linear(H, D_out)\n self.cf1 = nn.Linear(D_in, H)\n self.cf2 = nn.Linear(H, D_out)\n<|end_body_0|>\n\n<|body_start_1|>\n h_relu = F.elu(self.f1(x))\n f = self.f2(h_relu)\n h_relu = F.elu(self.cf1(x))\n cf = self.cf2(h_relu)\n out = torch.cat((f, cf), dim=1)\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000204", "length_bytes": 2467, "license_type": "permissive", "methods": [{"docstring": "Instantiate two nn.Linear modules and assign them as member variables. Args: D_in: input dimension H: dimension of hidden layer D_out: output dimension", "name": "__init__", "signature": "def __init__(self, D_in, H, D_out)"}, {"docstring": "Accept a Variable of input data and return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables.", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_037056", "prompt": "Implement the Python class `BalancedNet` described below.\n\nClass description:\nA torch.model used as a component of the HEMM module to determine the outcome as a function of confounders. The balanced net consists of two different neural networks for the outcome and counteractual.\n\nMethod signatures and docstrings:\n- def __init__(self, D_in, H, D_out): Instantiate two nn.Linear modules and assign them as member variables. Args: D_in: input dimension H: dimension of hidden layer D_out: output dimension\n- def forward(self, x): Accept a Variable of input data and return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables.", "prompted_full_text": "Implement the Python class `BalancedNet` described below.\n\nClass description:\nA torch.model used as a component of the HEMM module to determine the outcome as a function of confounders. The balanced net consists of two different neural networks for the outcome and counteractual.\n\nMethod signatures and docstrings:\n- def __init__(self, D_in, H, D_out): Instantiate two nn.Linear modules and assign them as member variables. Args: D_in: input dimension H: dimension of hidden layer D_out: output dimension\n- def forward(self, x): Accept a Variable of input data and return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables.\n\n<|skeleton|>\nclass BalancedNet:\n \"\"\"A torch.model used as a component of the HEMM module to determine the outcome as a function of confounders. The balanced net consists of two different neural networks for the outcome and counteractual.\"\"\"\n\n def __init__(self, D_in, H, D_out):\n \"\"\"Instantiate two nn.Linear modules and assign them as member variables. Args: D_in: input dimension H: dimension of hidden layer D_out: output dimension\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Accept a Variable of input data and return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BalancedNet, self).__init__()\n self.f1 = nn.Linear(D_in, H)\n self.f2 = nn.Linear(H, D_out)\n self.cf1 = nn.Linear(D_in, H)\n self.cf2 = nn.Linear(H, D_out)\n<|end_body_0|>\n\n<|body_start_1|>\n h_relu = F.elu(self.f1(x))\n f = self.f2(h_relu)\n h_relu = F.elu(self.cf1(x))\n cf = self.cf2(h_relu)\n out = torch.cat((f, cf), dim=1)\n return out\n<|end_body_1|>\n", "revision_id": "9f0ddb4696d580cf0a529a6c6ce98b40b34e3796", "skeleton": "<|skeleton|>\nclass BalancedNet:\n \"\"\"A torch.model used as a component of the HEMM module to determine the outcome as a function of confounders. The balanced net consists of two different neural networks for the outcome and counteractual.\"\"\"\n\n def __init__(self, D_in, H, D_out):\n \"\"\"Instantiate two nn.Linear modules and assign them as member variables. Args: D_in: input dimension H: dimension of hidden layer D_out: output dimension\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Accept a Variable of input data and return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BalancedNet:\n \"\"\"A torch.model used as a component of the HEMM module to determine the outcome as a function of confounders. The balanced net consists of two different neural networks for the outcome and counteractual.\"\"\"\n\n def __init__(self, D_in, H, D_out):\n \"\"\"Instantiate two nn.Linear modules and assign them as member variables. Args: D_in: input dimension H: dimension of hidden layer D_out: output dimension\"\"\"\n super(BalancedNet, self).__init__()\n self.f1 = nn.Linear(D_in, H)\n self.f2 = nn.Linear(H, D_out)\n self.cf1 = nn.Linear(D_in, H)\n self.cf2 = nn.Linear(H, D_out)\n\n def forward(self, x):\n \"\"\"Accept a Variable of input data and return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables.\"\"\"\n h_relu = F.elu(self.f1(x))\n f = self.f2(h_relu)\n h_relu = F.elu(self.cf1(x))\n cf = self.cf2(h_relu)\n out = torch.cat((f, cf), dim=1)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "causallib/contrib/hemm/hemm_outcome_models.py", "source_repo": "vishalbelsare/causallib", "split": "val", "star_events_count": 0}
{"blob_id": "04eae7f7fd7208ad044ca841d1793d1205b6483f", "bodies": ["graph_template = fg_utils.make_factor_graph(sequence_length=config.train_sequence_length)\npretrained_virtual_sensor_identifier: str = config.pretrained_virtual_sensor_identifier.format(dataset_fold=config.dataset_fold)\nregress_velocities = networks.make_regress_velocities(pretrained_virtual_sensor_identifier)\nregress_uncertainties, learnable_params = networks.make_regress_uncertainties(config.noise_model, pretrained_virtual_sensor_identifier)\noptimizer = optax.chain(optax.clip_by_global_norm(config.max_gradient_norm), optax.adam(learning_rate=utils.warmup_schedule(learning_rate=config.learning_rate, warmup_steps=config.warmup_steps)))\noptimizer_state = optimizer.init(learnable_params)\nreturn TrainState(config=config, optimizer=optimizer, optimizer_state=optimizer_state, regress_velocities=regress_velocities, regress_uncertainties=regress_uncertainties, learnable_params=learnable_params, graph_template=graph_template, prng_key=jax.random.PRNGKey(config.random_seed), steps=0, train=train)", "if trajectory_raw is None:\n trajectory_raw = trajectory.unnormalize()\nif learnable_params is None:\n learnable_params = self.learnable_params\nprng_key_velocities, prng_key_uncertainties = jax.random.split(prng_key)\nvelocities = self.regress_velocities(trajectory.get_stacked_image()[1:, :, :, :])\nuncertainties = self.regress_uncertainties(learnable_params, stacked_images=trajectory.get_stacked_image()[1:, :, :, :], prng_key=prng_key_uncertainties, train=self.train)\ntrajectory_raw = trajectory.unnormalize()\nreturn (fg_utils.update_factor_graph(graph_template=graph_template, trajectory_raw=trajectory_raw, predicted_velocities=velocities, vision_sqrt_precision_diagonal=uncertainties.vision_sqrt_precision_diagonal, dynamics_sqrt_precision_diagonal=uncertainties.dynamics_sqrt_precision_diagonal), (velocities, uncertainties))", "assert len(batched_trajectory.check_shapes_and_get_batch_axes()) == 2\n\ndef compute_loss_single(learnable_params: LearnableParams, trajectory: data.KittiStructNormalized, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute training loss for a single trajectory.\"\"\"\n loss_config = self.config.loss_config\n prng_key0, prng_key1 = jax.random.split(prng_key)\n trajectory_raw = trajectory.unnormalize()\n graph, (velocities, uncertainties) = self.update_factor_graph(graph_template=self.graph_template, trajectory=trajectory, prng_key=prng_key0, trajectory_raw=trajectory_raw, learnable_params=learnable_params)\n loss = fg_losses.compute_loss(graph, trajectory_raw, loss_config, prng_key1)\n metadata = _TrainingPerSampleMetadata(training_loss=loss, regressed_velocities=velocities, regressed_uncertainties=uncertainties)\n return (loss, metadata)\n\ndef compute_loss(learnable_params: LearnableParams, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute average loss for all trajectories in the batch.\"\"\"\n batch_size: int = batched_trajectory.x.shape[0]\n losses, metadata = jax.vmap(compute_loss_single, in_axes=(None, 0, 0))(learnable_params, batched_trajectory, jax.random.split(prng_key, num=batch_size))\n assert len(losses.shape) == 1\n return (jnp.mean(losses), metadata)\nprng_key, prng_key_new = jax.random.split(self.prng_key)\nper_sample_metadata: _TrainingPerSampleMetadata\n(loss, per_sample_metadata), grads = jax.value_and_grad(compute_loss, argnums=0, has_aux=True)(self.learnable_params, prng_key)\nupdates, optimizer_state_new = self.optimizer.update(grads, self.optimizer_state, self.learnable_params)\nlearnable_params_new = optax.apply_updates(self.learnable_params, updates)\nregressed_velocities = per_sample_metadata.regressed_velocities\nregressed_uncertainties = per_sample_metadata.regressed_uncertainties\nlog_data = experiment_files.TensorboardLogData(scalars={'train/training_loss': loss, 'train/gradient_norm': optax.global_norm(grads)}, histograms={'training_losses': per_sample_metadata.training_loss, 'linear_vel': regressed_velocities.linear_vel, 'angular_vel': regressed_velocities.angular_vel, 'linear_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 0], 'angular_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 1], **{f'dynamics_uncertainty_{field}': regressed_uncertainties.dynamics_sqrt_precision_diagonal[..., i] for i, field in enumerate(('x', 'y', 'omega', 'vx', 'vy'))}})\nwith jax_dataclasses.copy_and_mutate(self) as updated_state:\n updated_state.optimizer_state = optimizer_state_new\n updated_state.learnable_params = learnable_params_new\n updated_state.prng_key = prng_key_new\n updated_state.steps = self.steps + 1\nreturn (updated_state, log_data)"], "bodies_text": "<|body_start_0|>\n graph_template = fg_utils.make_factor_graph(sequence_length=config.train_sequence_length)\n pretrained_virtual_sensor_identifier: str = config.pretrained_virtual_sensor_identifier.format(dataset_fold=config.dataset_fold)\n regress_velocities = networks.make_regress_velocities(pretrained_virtual_sensor_identifier)\n regress_uncertainties, learnable_params = networks.make_regress_uncertainties(config.noise_model, pretrained_virtual_sensor_identifier)\n optimizer = optax.chain(optax.clip_by_global_norm(config.max_gradient_norm), optax.adam(learning_rate=utils.warmup_schedule(learning_rate=config.learning_rate, warmup_steps=config.warmup_steps)))\n optimizer_state = optimizer.init(learnable_params)\n return TrainState(config=config, optimizer=optimizer, optimizer_state=optimizer_state, regress_velocities=regress_velocities, regress_uncertainties=regress_uncertainties, learnable_params=learnable_params, graph_template=graph_template, prng_key=jax.random.PRNGKey(config.random_seed), steps=0, train=train)\n<|end_body_0|>\n\n<|body_start_1|>\n if trajectory_raw is None:\n trajectory_raw = trajectory.unnormalize()\n if learnable_params is None:\n learnable_params = self.learnable_params\n prng_key_velocities, prng_key_uncertainties = jax.random.split(prng_key)\n velocities = self.regress_velocities(trajectory.get_stacked_image()[1:, :, :, :])\n uncertainties = self.regress_uncertainties(learnable_params, stacked_images=trajectory.get_stacked_image()[1:, :, :, :], prng_key=prng_key_uncertainties, train=self.train)\n trajectory_raw = trajectory.unnormalize()\n return (fg_utils.update_factor_graph(graph_template=graph_template, trajectory_raw=trajectory_raw, predicted_velocities=velocities, vision_sqrt_precision_diagonal=uncertainties.vision_sqrt_precision_diagonal, dynamics_sqrt_precision_diagonal=uncertainties.dynamics_sqrt_precision_diagonal), (velocities, uncertainties))\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(batched_trajectory.check_shapes_and_get_batch_axes()) == 2\n\n def compute_loss_single(learnable_params: LearnableParams, trajectory: data.KittiStructNormalized, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute training loss for a single trajectory.\"\"\"\n loss_config = self.config.loss_config\n prng_key0, prng_key1 = jax.random.split(prng_key)\n trajectory_raw = trajectory.unnormalize()\n graph, (velocities, uncertainties) = self.update_factor_graph(graph_template=self.graph_template, trajectory=trajectory, prng_key=prng_key0, trajectory_raw=trajectory_raw, learnable_params=learnable_params)\n loss = fg_losses.compute_loss(graph, trajectory_raw, loss_config, prng_key1)\n metadata = _TrainingPerSampleMetadata(training_loss=loss, regressed_velocities=velocities, regressed_uncertainties=uncertainties)\n return (loss, metadata)\n\n def compute_loss(learnable_params: LearnableParams, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute average loss for all trajectories in the batch.\"\"\"\n batch_size: int = batched_trajectory.x.shape[0]\n losses, metadata = jax.vmap(compute_loss_single, in_axes=(None, 0, 0))(learnable_params, batched_trajectory, jax.random.split(prng_key, num=batch_size))\n assert len(losses.shape) == 1\n return (jnp.mean(losses), metadata)\n prng_key, prng_key_new = jax.random.split(self.prng_key)\n per_sample_metadata: _TrainingPerSampleMetadata\n (loss, per_sample_metadata), grads = jax.value_and_grad(compute_loss, argnums=0, has_aux=True)(self.learnable_params, prng_key)\n updates, optimizer_state_new = self.optimizer.update(grads, self.optimizer_state, self.learnable_params)\n learnable_params_new = optax.apply_updates(self.learnable_params, updates)\n regressed_velocities = per_sample_metadata.regressed_velocities\n regressed_uncertainties = per_sample_metadata.regressed_uncertainties\n log_data = experiment_files.TensorboardLogData(scalars={'train/training_loss': loss, 'train/gradient_norm': optax.global_norm(grads)}, histograms={'training_losses': per_sample_metadata.training_loss, 'linear_vel': regressed_velocities.linear_vel, 'angular_vel': regressed_velocities.angular_vel, 'linear_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 0], 'angular_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 1], **{f'dynamics_uncertainty_{field}': regressed_uncertainties.dynamics_sqrt_precision_diagonal[..., i] for i, field in enumerate(('x', 'y', 'omega', 'vx', 'vy'))}})\n with jax_dataclasses.copy_and_mutate(self) as updated_state:\n updated_state.optimizer_state = optimizer_state_new\n updated_state.learnable_params = learnable_params_new\n updated_state.prng_key = prng_key_new\n updated_state.steps = self.steps + 1\n return (updated_state, log_data)\n<|end_body_2|>\n", "class_docstring": "Everything needed for training.", "class_name": "TrainState", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TrainState:\n \"\"\"Everything needed for training.\"\"\"\n\n def initialize(config: experiment_config.FactorGraphExperimentConfig, train: bool) -> 'TrainState':\n \"\"\"Initialize a training state.\"\"\"\n <|body_0|>\n\n def update_factor_graph(self, graph_template: jaxfg.core.StackedFactorGraph, trajectory: data.KittiStructNormalized, prng_key: PRNGKey, *, trajectory_raw: Optional[data.KittiStructRaw]=None, learnable_params: Optional[networks.LearnableParams]=None) -> Tuple[jaxfg.core.StackedFactorGraph, Tuple[networks.RegressedVelocities, networks.RegressedUncertainties]]:\n \"\"\"Update a factor graph for an input trajectory. Optional arguments are generally not needed. Returns new factor graph + a metadata tuple.\"\"\"\n <|body_1|>\n\n def training_step(self, batched_trajectory: data.KittiStructNormalized) -> Tuple['TrainState', experiment_files.TensorboardLogData]:\n \"\"\"Single training step.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n graph_template = fg_utils.make_factor_graph(sequence_length=config.train_sequence_length)\n pretrained_virtual_sensor_identifier: str = config.pretrained_virtual_sensor_identifier.format(dataset_fold=config.dataset_fold)\n regress_velocities = networks.make_regress_velocities(pretrained_virtual_sensor_identifier)\n regress_uncertainties, learnable_params = networks.make_regress_uncertainties(config.noise_model, pretrained_virtual_sensor_identifier)\n optimizer = optax.chain(optax.clip_by_global_norm(config.max_gradient_norm), optax.adam(learning_rate=utils.warmup_schedule(learning_rate=config.learning_rate, warmup_steps=config.warmup_steps)))\n optimizer_state = optimizer.init(learnable_params)\n return TrainState(config=config, optimizer=optimizer, optimizer_state=optimizer_state, regress_velocities=regress_velocities, regress_uncertainties=regress_uncertainties, learnable_params=learnable_params, graph_template=graph_template, prng_key=jax.random.PRNGKey(config.random_seed), steps=0, train=train)\n<|end_body_0|>\n\n<|body_start_1|>\n if trajectory_raw is None:\n trajectory_raw = trajectory.unnormalize()\n if learnable_params is None:\n learnable_params = self.learnable_params\n prng_key_velocities, prng_key_uncertainties = jax.random.split(prng_key)\n velocities = self.regress_velocities(trajectory.get_stacked_image()[1:, :, :, :])\n uncertainties = self.regress_uncertainties(learnable_params, stacked_images=trajectory.get_stacked_image()[1:, :, :, :], prng_key=prng_key_uncertainties, train=self.train)\n trajectory_raw = trajectory.unnormalize()\n return (fg_utils.update_factor_graph(graph_template=graph_template, trajectory_raw=trajectory_raw, predicted_velocities=velocities, vision_sqrt_precision_diagonal=uncertainties.vision_sqrt_precision_diagonal, dynamics_sqrt_precision_diagonal=uncertainties.dynamics_sqrt_precision_diagonal), (velocities, uncertainties))\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(batched_trajectory.check_shapes_and_get_batch_axes()) == 2\n\n def compute_loss_single(learnable_params: LearnableParams, trajectory: data.KittiStructNormalized, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute training loss for a single trajectory.\"\"\"\n loss_config = self.config.loss_config\n prng_key0, prng_key1 = jax.random.split(prng_key)\n trajectory_raw = trajectory.unnormalize()\n graph, (velocities, uncertainties) = self.update_factor_graph(graph_template=self.graph_template, trajectory=trajectory, prng_key=prng_key0, trajectory_raw=trajectory_raw, learnable_params=learnable_params)\n loss = fg_losses.compute_loss(graph, trajectory_raw, loss_config, prng_key1)\n metadata = _TrainingPerSampleMetadata(training_loss=loss, regressed_velocities=velocities, regressed_uncertainties=uncertainties)\n return (loss, metadata)\n\n def compute_loss(learnable_params: LearnableParams, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute average loss for all trajectories in the batch.\"\"\"\n batch_size: int = batched_trajectory.x.shape[0]\n losses, metadata = jax.vmap(compute_loss_single, in_axes=(None, 0, 0))(learnable_params, batched_trajectory, jax.random.split(prng_key, num=batch_size))\n assert len(losses.shape) == 1\n return (jnp.mean(losses), metadata)\n prng_key, prng_key_new = jax.random.split(self.prng_key)\n per_sample_metadata: _TrainingPerSampleMetadata\n (loss, per_sample_metadata), grads = jax.value_and_grad(compute_loss, argnums=0, has_aux=True)(self.learnable_params, prng_key)\n updates, optimizer_state_new = self.optimizer.update(grads, self.optimizer_state, self.learnable_params)\n learnable_params_new = optax.apply_updates(self.learnable_params, updates)\n regressed_velocities = per_sample_metadata.regressed_velocities\n regressed_uncertainties = per_sample_metadata.regressed_uncertainties\n log_data = experiment_files.TensorboardLogData(scalars={'train/training_loss': loss, 'train/gradient_norm': optax.global_norm(grads)}, histograms={'training_losses': per_sample_metadata.training_loss, 'linear_vel': regressed_velocities.linear_vel, 'angular_vel': regressed_velocities.angular_vel, 'linear_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 0], 'angular_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 1], **{f'dynamics_uncertainty_{field}': regressed_uncertainties.dynamics_sqrt_precision_diagonal[..., i] for i, field in enumerate(('x', 'y', 'omega', 'vx', 'vy'))}})\n with jax_dataclasses.copy_and_mutate(self) as updated_state:\n updated_state.optimizer_state = optimizer_state_new\n updated_state.learnable_params = learnable_params_new\n updated_state.prng_key = prng_key_new\n updated_state.steps = self.steps + 1\n return (updated_state, log_data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000205", "length_bytes": 9385, "license_type": "no_license", "methods": [{"docstring": "Initialize a training state.", "name": "initialize", "signature": "def initialize(config: experiment_config.FactorGraphExperimentConfig, train: bool) -> 'TrainState'"}, {"docstring": "Update a factor graph for an input trajectory. Optional arguments are generally not needed. Returns new factor graph + a metadata tuple.", "name": "update_factor_graph", "signature": "def update_factor_graph(self, graph_template: jaxfg.core.StackedFactorGraph, trajectory: data.KittiStructNormalized, prng_key: PRNGKey, *, trajectory_raw: Optional[data.KittiStructRaw]=None, learnable_params: Optional[networks.LearnableParams]=None) -> Tuple[jaxfg.core.StackedFactorGraph, Tuple[networks.RegressedVelocities, networks.RegressedUncertainties]]"}, {"docstring": "Single training step.", "name": "training_step", "signature": "def training_step(self, batched_trajectory: data.KittiStructNormalized) -> Tuple['TrainState', experiment_files.TensorboardLogData]"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_037762", "prompt": "Implement the Python class `TrainState` described below.\n\nClass description:\nEverything needed for training.\n\nMethod signatures and docstrings:\n- def initialize(config: experiment_config.FactorGraphExperimentConfig, train: bool) -> 'TrainState': Initialize a training state.\n- def update_factor_graph(self, graph_template: jaxfg.core.StackedFactorGraph, trajectory: data.KittiStructNormalized, prng_key: PRNGKey, *, trajectory_raw: Optional[data.KittiStructRaw]=None, learnable_params: Optional[networks.LearnableParams]=None) -> Tuple[jaxfg.core.StackedFactorGraph, Tuple[networks.RegressedVelocities, networks.RegressedUncertainties]]: Update a factor graph for an input trajectory. Optional arguments are generally not needed. Returns new factor graph + a metadata tuple.\n- def training_step(self, batched_trajectory: data.KittiStructNormalized) -> Tuple['TrainState', experiment_files.TensorboardLogData]: Single training step.", "prompted_full_text": "Implement the Python class `TrainState` described below.\n\nClass description:\nEverything needed for training.\n\nMethod signatures and docstrings:\n- def initialize(config: experiment_config.FactorGraphExperimentConfig, train: bool) -> 'TrainState': Initialize a training state.\n- def update_factor_graph(self, graph_template: jaxfg.core.StackedFactorGraph, trajectory: data.KittiStructNormalized, prng_key: PRNGKey, *, trajectory_raw: Optional[data.KittiStructRaw]=None, learnable_params: Optional[networks.LearnableParams]=None) -> Tuple[jaxfg.core.StackedFactorGraph, Tuple[networks.RegressedVelocities, networks.RegressedUncertainties]]: Update a factor graph for an input trajectory. Optional arguments are generally not needed. Returns new factor graph + a metadata tuple.\n- def training_step(self, batched_trajectory: data.KittiStructNormalized) -> Tuple['TrainState', experiment_files.TensorboardLogData]: Single training step.\n\n<|skeleton|>\nclass TrainState:\n \"\"\"Everything needed for training.\"\"\"\n\n def initialize(config: experiment_config.FactorGraphExperimentConfig, train: bool) -> 'TrainState':\n \"\"\"Initialize a training state.\"\"\"\n <|body_0|>\n\n def update_factor_graph(self, graph_template: jaxfg.core.StackedFactorGraph, trajectory: data.KittiStructNormalized, prng_key: PRNGKey, *, trajectory_raw: Optional[data.KittiStructRaw]=None, learnable_params: Optional[networks.LearnableParams]=None) -> Tuple[jaxfg.core.StackedFactorGraph, Tuple[networks.RegressedVelocities, networks.RegressedUncertainties]]:\n \"\"\"Update a factor graph for an input trajectory. Optional arguments are generally not needed. Returns new factor graph + a metadata tuple.\"\"\"\n <|body_1|>\n\n def training_step(self, batched_trajectory: data.KittiStructNormalized) -> Tuple['TrainState', experiment_files.TensorboardLogData]:\n \"\"\"Single training step.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n graph_template = fg_utils.make_factor_graph(sequence_length=config.train_sequence_length)\n pretrained_virtual_sensor_identifier: str = config.pretrained_virtual_sensor_identifier.format(dataset_fold=config.dataset_fold)\n regress_velocities = networks.make_regress_velocities(pretrained_virtual_sensor_identifier)\n regress_uncertainties, learnable_params = networks.make_regress_uncertainties(config.noise_model, pretrained_virtual_sensor_identifier)\n optimizer = optax.chain(optax.clip_by_global_norm(config.max_gradient_norm), optax.adam(learning_rate=utils.warmup_schedule(learning_rate=config.learning_rate, warmup_steps=config.warmup_steps)))\n optimizer_state = optimizer.init(learnable_params)\n return TrainState(config=config, optimizer=optimizer, optimizer_state=optimizer_state, regress_velocities=regress_velocities, regress_uncertainties=regress_uncertainties, learnable_params=learnable_params, graph_template=graph_template, prng_key=jax.random.PRNGKey(config.random_seed), steps=0, train=train)\n<|end_body_0|>\n\n<|body_start_1|>\n if trajectory_raw is None:\n trajectory_raw = trajectory.unnormalize()\n if learnable_params is None:\n learnable_params = self.learnable_params\n prng_key_velocities, prng_key_uncertainties = jax.random.split(prng_key)\n velocities = self.regress_velocities(trajectory.get_stacked_image()[1:, :, :, :])\n uncertainties = self.regress_uncertainties(learnable_params, stacked_images=trajectory.get_stacked_image()[1:, :, :, :], prng_key=prng_key_uncertainties, train=self.train)\n trajectory_raw = trajectory.unnormalize()\n return (fg_utils.update_factor_graph(graph_template=graph_template, trajectory_raw=trajectory_raw, predicted_velocities=velocities, vision_sqrt_precision_diagonal=uncertainties.vision_sqrt_precision_diagonal, dynamics_sqrt_precision_diagonal=uncertainties.dynamics_sqrt_precision_diagonal), (velocities, uncertainties))\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(batched_trajectory.check_shapes_and_get_batch_axes()) == 2\n\n def compute_loss_single(learnable_params: LearnableParams, trajectory: data.KittiStructNormalized, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute training loss for a single trajectory.\"\"\"\n loss_config = self.config.loss_config\n prng_key0, prng_key1 = jax.random.split(prng_key)\n trajectory_raw = trajectory.unnormalize()\n graph, (velocities, uncertainties) = self.update_factor_graph(graph_template=self.graph_template, trajectory=trajectory, prng_key=prng_key0, trajectory_raw=trajectory_raw, learnable_params=learnable_params)\n loss = fg_losses.compute_loss(graph, trajectory_raw, loss_config, prng_key1)\n metadata = _TrainingPerSampleMetadata(training_loss=loss, regressed_velocities=velocities, regressed_uncertainties=uncertainties)\n return (loss, metadata)\n\n def compute_loss(learnable_params: LearnableParams, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute average loss for all trajectories in the batch.\"\"\"\n batch_size: int = batched_trajectory.x.shape[0]\n losses, metadata = jax.vmap(compute_loss_single, in_axes=(None, 0, 0))(learnable_params, batched_trajectory, jax.random.split(prng_key, num=batch_size))\n assert len(losses.shape) == 1\n return (jnp.mean(losses), metadata)\n prng_key, prng_key_new = jax.random.split(self.prng_key)\n per_sample_metadata: _TrainingPerSampleMetadata\n (loss, per_sample_metadata), grads = jax.value_and_grad(compute_loss, argnums=0, has_aux=True)(self.learnable_params, prng_key)\n updates, optimizer_state_new = self.optimizer.update(grads, self.optimizer_state, self.learnable_params)\n learnable_params_new = optax.apply_updates(self.learnable_params, updates)\n regressed_velocities = per_sample_metadata.regressed_velocities\n regressed_uncertainties = per_sample_metadata.regressed_uncertainties\n log_data = experiment_files.TensorboardLogData(scalars={'train/training_loss': loss, 'train/gradient_norm': optax.global_norm(grads)}, histograms={'training_losses': per_sample_metadata.training_loss, 'linear_vel': regressed_velocities.linear_vel, 'angular_vel': regressed_velocities.angular_vel, 'linear_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 0], 'angular_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 1], **{f'dynamics_uncertainty_{field}': regressed_uncertainties.dynamics_sqrt_precision_diagonal[..., i] for i, field in enumerate(('x', 'y', 'omega', 'vx', 'vy'))}})\n with jax_dataclasses.copy_and_mutate(self) as updated_state:\n updated_state.optimizer_state = optimizer_state_new\n updated_state.learnable_params = learnable_params_new\n updated_state.prng_key = prng_key_new\n updated_state.steps = self.steps + 1\n return (updated_state, log_data)\n<|end_body_2|>\n", "revision_id": "639f8a603aba0b9a357422aa2a0704a023fe564d", "skeleton": "<|skeleton|>\nclass TrainState:\n \"\"\"Everything needed for training.\"\"\"\n\n def initialize(config: experiment_config.FactorGraphExperimentConfig, train: bool) -> 'TrainState':\n \"\"\"Initialize a training state.\"\"\"\n <|body_0|>\n\n def update_factor_graph(self, graph_template: jaxfg.core.StackedFactorGraph, trajectory: data.KittiStructNormalized, prng_key: PRNGKey, *, trajectory_raw: Optional[data.KittiStructRaw]=None, learnable_params: Optional[networks.LearnableParams]=None) -> Tuple[jaxfg.core.StackedFactorGraph, Tuple[networks.RegressedVelocities, networks.RegressedUncertainties]]:\n \"\"\"Update a factor graph for an input trajectory. Optional arguments are generally not needed. Returns new factor graph + a metadata tuple.\"\"\"\n <|body_1|>\n\n def training_step(self, batched_trajectory: data.KittiStructNormalized) -> Tuple['TrainState', experiment_files.TensorboardLogData]:\n \"\"\"Single training step.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TrainState:\n \"\"\"Everything needed for training.\"\"\"\n\n def initialize(config: experiment_config.FactorGraphExperimentConfig, train: bool) -> 'TrainState':\n \"\"\"Initialize a training state.\"\"\"\n graph_template = fg_utils.make_factor_graph(sequence_length=config.train_sequence_length)\n pretrained_virtual_sensor_identifier: str = config.pretrained_virtual_sensor_identifier.format(dataset_fold=config.dataset_fold)\n regress_velocities = networks.make_regress_velocities(pretrained_virtual_sensor_identifier)\n regress_uncertainties, learnable_params = networks.make_regress_uncertainties(config.noise_model, pretrained_virtual_sensor_identifier)\n optimizer = optax.chain(optax.clip_by_global_norm(config.max_gradient_norm), optax.adam(learning_rate=utils.warmup_schedule(learning_rate=config.learning_rate, warmup_steps=config.warmup_steps)))\n optimizer_state = optimizer.init(learnable_params)\n return TrainState(config=config, optimizer=optimizer, optimizer_state=optimizer_state, regress_velocities=regress_velocities, regress_uncertainties=regress_uncertainties, learnable_params=learnable_params, graph_template=graph_template, prng_key=jax.random.PRNGKey(config.random_seed), steps=0, train=train)\n\n def update_factor_graph(self, graph_template: jaxfg.core.StackedFactorGraph, trajectory: data.KittiStructNormalized, prng_key: PRNGKey, *, trajectory_raw: Optional[data.KittiStructRaw]=None, learnable_params: Optional[networks.LearnableParams]=None) -> Tuple[jaxfg.core.StackedFactorGraph, Tuple[networks.RegressedVelocities, networks.RegressedUncertainties]]:\n \"\"\"Update a factor graph for an input trajectory. Optional arguments are generally not needed. Returns new factor graph + a metadata tuple.\"\"\"\n if trajectory_raw is None:\n trajectory_raw = trajectory.unnormalize()\n if learnable_params is None:\n learnable_params = self.learnable_params\n prng_key_velocities, prng_key_uncertainties = jax.random.split(prng_key)\n velocities = self.regress_velocities(trajectory.get_stacked_image()[1:, :, :, :])\n uncertainties = self.regress_uncertainties(learnable_params, stacked_images=trajectory.get_stacked_image()[1:, :, :, :], prng_key=prng_key_uncertainties, train=self.train)\n trajectory_raw = trajectory.unnormalize()\n return (fg_utils.update_factor_graph(graph_template=graph_template, trajectory_raw=trajectory_raw, predicted_velocities=velocities, vision_sqrt_precision_diagonal=uncertainties.vision_sqrt_precision_diagonal, dynamics_sqrt_precision_diagonal=uncertainties.dynamics_sqrt_precision_diagonal), (velocities, uncertainties))\n\n def training_step(self, batched_trajectory: data.KittiStructNormalized) -> Tuple['TrainState', experiment_files.TensorboardLogData]:\n \"\"\"Single training step.\"\"\"\n assert len(batched_trajectory.check_shapes_and_get_batch_axes()) == 2\n\n def compute_loss_single(learnable_params: LearnableParams, trajectory: data.KittiStructNormalized, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute training loss for a single trajectory.\"\"\"\n loss_config = self.config.loss_config\n prng_key0, prng_key1 = jax.random.split(prng_key)\n trajectory_raw = trajectory.unnormalize()\n graph, (velocities, uncertainties) = self.update_factor_graph(graph_template=self.graph_template, trajectory=trajectory, prng_key=prng_key0, trajectory_raw=trajectory_raw, learnable_params=learnable_params)\n loss = fg_losses.compute_loss(graph, trajectory_raw, loss_config, prng_key1)\n metadata = _TrainingPerSampleMetadata(training_loss=loss, regressed_velocities=velocities, regressed_uncertainties=uncertainties)\n return (loss, metadata)\n\n def compute_loss(learnable_params: LearnableParams, prng_key: PRNGKey) -> Tuple[jnp.ndarray, _TrainingPerSampleMetadata]:\n \"\"\"Compute average loss for all trajectories in the batch.\"\"\"\n batch_size: int = batched_trajectory.x.shape[0]\n losses, metadata = jax.vmap(compute_loss_single, in_axes=(None, 0, 0))(learnable_params, batched_trajectory, jax.random.split(prng_key, num=batch_size))\n assert len(losses.shape) == 1\n return (jnp.mean(losses), metadata)\n prng_key, prng_key_new = jax.random.split(self.prng_key)\n per_sample_metadata: _TrainingPerSampleMetadata\n (loss, per_sample_metadata), grads = jax.value_and_grad(compute_loss, argnums=0, has_aux=True)(self.learnable_params, prng_key)\n updates, optimizer_state_new = self.optimizer.update(grads, self.optimizer_state, self.learnable_params)\n learnable_params_new = optax.apply_updates(self.learnable_params, updates)\n regressed_velocities = per_sample_metadata.regressed_velocities\n regressed_uncertainties = per_sample_metadata.regressed_uncertainties\n log_data = experiment_files.TensorboardLogData(scalars={'train/training_loss': loss, 'train/gradient_norm': optax.global_norm(grads)}, histograms={'training_losses': per_sample_metadata.training_loss, 'linear_vel': regressed_velocities.linear_vel, 'angular_vel': regressed_velocities.angular_vel, 'linear_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 0], 'angular_uncertainty': regressed_uncertainties.vision_sqrt_precision_diagonal[..., 1], **{f'dynamics_uncertainty_{field}': regressed_uncertainties.dynamics_sqrt_precision_diagonal[..., i] for i, field in enumerate(('x', 'y', 'omega', 'vx', 'vy'))}})\n with jax_dataclasses.copy_and_mutate(self) as updated_state:\n updated_state.optimizer_state = optimizer_state_new\n updated_state.learnable_params = learnable_params_new\n updated_state.prng_key = prng_key_new\n updated_state.steps = self.steps + 1\n return (updated_state, log_data)\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/kitti/training_fg.py", "source_repo": "MegaYEye/dfgo", "split": "val", "star_events_count": 0}
{"blob_id": "6308c615f1eaf57354b82a4af1a81cc9abd796b1", "bodies": ["self.__function = function\nself.__args = args\nself.__kwargs = kwargs\nself.__status = False\nself.__thread = False\nself.__lock = _thread.allocate_lock()", "self.__lock.acquire()\nself.__status = True\nif not self.__thread:\n self.__thread = True\n _thread.start_new_thread(self.__run, ())\nself.__lock.release()", "self.__lock.acquire()\nself.__status = False\nself.__lock.release()", "start, next = (_time.time(), 0)\nwhile True:\n next += 1\n sleep = start + next * 0.0864 - _time.time()\n assert sleep >= 0, 'Function Was Too Slow'\n _time.sleep(sleep)\n self.__lock.acquire()\n if not self.__status:\n self.__thread = False\n break\n self.__lock.release()\n self.__function(*self.__args, **self.__kwargs)\nself.__lock.release()"], "bodies_text": "<|body_start_0|>\n self.__function = function\n self.__args = args\n self.__kwargs = kwargs\n self.__status = False\n self.__thread = False\n self.__lock = _thread.allocate_lock()\n<|end_body_0|>\n\n<|body_start_1|>\n self.__lock.acquire()\n self.__status = True\n if not self.__thread:\n self.__thread = True\n _thread.start_new_thread(self.__run, ())\n self.__lock.release()\n<|end_body_1|>\n\n<|body_start_2|>\n self.__lock.acquire()\n self.__status = False\n self.__lock.release()\n<|end_body_2|>\n\n<|body_start_3|>\n start, next = (_time.time(), 0)\n while True:\n next += 1\n sleep = start + next * 0.0864 - _time.time()\n assert sleep >= 0, 'Function Was Too Slow'\n _time.sleep(sleep)\n self.__lock.acquire()\n if not self.__status:\n self.__thread = False\n break\n self.__lock.release()\n self.__function(*self.__args, **self.__kwargs)\n self.__lock.release()\n<|end_body_3|>\n", "class_docstring": "Mille_Timer(function, *args, **kwargs) -> Mille Timer", "class_name": "Mille_Timer", "detected_licenses": ["MIT", "Python-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Mille_Timer:\n \"\"\"Mille_Timer(function, *args, **kwargs) -> Mille Timer\"\"\"\n\n def __init__(self, function, *args, **kwargs):\n \"\"\"Initialize the Mille_Timer object.\"\"\"\n <|body_0|>\n\n def start(self):\n \"\"\"Start the Mille_Timer object.\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stop the Mille_Timer object.\"\"\"\n <|body_2|>\n\n def __run(self):\n \"\"\"Private class method.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__function = function\n self.__args = args\n self.__kwargs = kwargs\n self.__status = False\n self.__thread = False\n self.__lock = _thread.allocate_lock()\n<|end_body_0|>\n\n<|body_start_1|>\n self.__lock.acquire()\n self.__status = True\n if not self.__thread:\n self.__thread = True\n _thread.start_new_thread(self.__run, ())\n self.__lock.release()\n<|end_body_1|>\n\n<|body_start_2|>\n self.__lock.acquire()\n self.__status = False\n self.__lock.release()\n<|end_body_2|>\n\n<|body_start_3|>\n start, next = (_time.time(), 0)\n while True:\n next += 1\n sleep = start + next * 0.0864 - _time.time()\n assert sleep >= 0, 'Function Was Too Slow'\n _time.sleep(sleep)\n self.__lock.acquire()\n if not self.__status:\n self.__thread = False\n break\n self.__lock.release()\n self.__function(*self.__args, **self.__kwargs)\n self.__lock.release()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000206", "length_bytes": 3709, "license_type": "permissive", "methods": [{"docstring": "Initialize the Mille_Timer object.", "name": "__init__", "signature": "def __init__(self, function, *args, **kwargs)"}, {"docstring": "Start the Mille_Timer object.", "name": "start", "signature": "def start(self)"}, {"docstring": "Stop the Mille_Timer object.", "name": "stop", "signature": "def stop(self)"}, {"docstring": "Private class method.", "name": "__run", "signature": "def __run(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_030340", "prompt": "Implement the Python class `Mille_Timer` described below.\n\nClass description:\nMille_Timer(function, *args, **kwargs) -> Mille Timer\n\nMethod signatures and docstrings:\n- def __init__(self, function, *args, **kwargs): Initialize the Mille_Timer object.\n- def start(self): Start the Mille_Timer object.\n- def stop(self): Stop the Mille_Timer object.\n- def __run(self): Private class method.", "prompted_full_text": "Implement the Python class `Mille_Timer` described below.\n\nClass description:\nMille_Timer(function, *args, **kwargs) -> Mille Timer\n\nMethod signatures and docstrings:\n- def __init__(self, function, *args, **kwargs): Initialize the Mille_Timer object.\n- def start(self): Start the Mille_Timer object.\n- def stop(self): Stop the Mille_Timer object.\n- def __run(self): Private class method.\n\n<|skeleton|>\nclass Mille_Timer:\n \"\"\"Mille_Timer(function, *args, **kwargs) -> Mille Timer\"\"\"\n\n def __init__(self, function, *args, **kwargs):\n \"\"\"Initialize the Mille_Timer object.\"\"\"\n <|body_0|>\n\n def start(self):\n \"\"\"Start the Mille_Timer object.\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stop the Mille_Timer object.\"\"\"\n <|body_2|>\n\n def __run(self):\n \"\"\"Private class method.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__function = function\n self.__args = args\n self.__kwargs = kwargs\n self.__status = False\n self.__thread = False\n self.__lock = _thread.allocate_lock()\n<|end_body_0|>\n\n<|body_start_1|>\n self.__lock.acquire()\n self.__status = True\n if not self.__thread:\n self.__thread = True\n _thread.start_new_thread(self.__run, ())\n self.__lock.release()\n<|end_body_1|>\n\n<|body_start_2|>\n self.__lock.acquire()\n self.__status = False\n self.__lock.release()\n<|end_body_2|>\n\n<|body_start_3|>\n start, next = (_time.time(), 0)\n while True:\n next += 1\n sleep = start + next * 0.0864 - _time.time()\n assert sleep >= 0, 'Function Was Too Slow'\n _time.sleep(sleep)\n self.__lock.acquire()\n if not self.__status:\n self.__thread = False\n break\n self.__lock.release()\n self.__function(*self.__args, **self.__kwargs)\n self.__lock.release()\n<|end_body_3|>\n", "revision_id": "d097ca0ad6a6aee2180d32dce6a3322621f655fd", "skeleton": "<|skeleton|>\nclass Mille_Timer:\n \"\"\"Mille_Timer(function, *args, **kwargs) -> Mille Timer\"\"\"\n\n def __init__(self, function, *args, **kwargs):\n \"\"\"Initialize the Mille_Timer object.\"\"\"\n <|body_0|>\n\n def start(self):\n \"\"\"Start the Mille_Timer object.\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stop the Mille_Timer object.\"\"\"\n <|body_2|>\n\n def __run(self):\n \"\"\"Private class method.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Mille_Timer:\n \"\"\"Mille_Timer(function, *args, **kwargs) -> Mille Timer\"\"\"\n\n def __init__(self, function, *args, **kwargs):\n \"\"\"Initialize the Mille_Timer object.\"\"\"\n self.__function = function\n self.__args = args\n self.__kwargs = kwargs\n self.__status = False\n self.__thread = False\n self.__lock = _thread.allocate_lock()\n\n def start(self):\n \"\"\"Start the Mille_Timer object.\"\"\"\n self.__lock.acquire()\n self.__status = True\n if not self.__thread:\n self.__thread = True\n _thread.start_new_thread(self.__run, ())\n self.__lock.release()\n\n def stop(self):\n \"\"\"Stop the Mille_Timer object.\"\"\"\n self.__lock.acquire()\n self.__status = False\n self.__lock.release()\n\n def __run(self):\n \"\"\"Private class method.\"\"\"\n start, next = (_time.time(), 0)\n while True:\n next += 1\n sleep = start + next * 0.0864 - _time.time()\n assert sleep >= 0, 'Function Was Too Slow'\n _time.sleep(sleep)\n self.__lock.acquire()\n if not self.__status:\n self.__thread = False\n break\n self.__lock.release()\n self.__function(*self.__args, **self.__kwargs)\n self.__lock.release()\n", "source": "the_stack_v2_python_sparse", "source_path": "recipes/Python/502238_Aens_Time/recipe-502238.py", "source_repo": "betty29/code-1", "split": "val", "star_events_count": 0}
{"blob_id": "fc1bcccfa34db06e8bb0dd22ff9de0e01c72d05a", "bodies": ["airplanes_list = []\nairplanes_file = open('csv_files/Airplane.csv', 'r')\nairplane_type_file = open('csv_files/AirplaneType.csv', 'r')\nplanes_reader = csv.DictReader(airplanes_file)\nplane_type_reader = csv.DictReader(airplane_type_file)\nfor row in planes_reader:\n plane_name = row['name']\n plane_id = row['planeId']\n plane_type = row['planeTypeId']\n for row in plane_type_reader:\n if row['planeTypeId'] == plane_type:\n plane_capacity = row['capacity']\n airplane = Airplane(plane_name, plane_id, plane_type, plane_capacity)\n airplanes_list.append(airplane)\nreturn airplanes_list", "airplane_types_list = []\nplaneType_file = open('csv_files/AirplaneType.csv', 'r')\ntype_reader = csv.DictReader(planeType_file)\nfor row in type_reader:\n type_id = row['planeTypeId']\n type_capacity = ['capacity']\n airplaneType = AirplaneType(type_id, type_capacity)\n airplane_types_list.append(airplaneType)\nreturn airplane_types_list", "airplane_csv_str = new_airplane.instance_to_csv_string() + '\\n'\nairplane_file = open('csv_files/Airplane.csv', 'a+')\nairplane_file.write(airplane_csv_str)\nairplane_file.close()"], "bodies_text": "<|body_start_0|>\n airplanes_list = []\n airplanes_file = open('csv_files/Airplane.csv', 'r')\n airplane_type_file = open('csv_files/AirplaneType.csv', 'r')\n planes_reader = csv.DictReader(airplanes_file)\n plane_type_reader = csv.DictReader(airplane_type_file)\n for row in planes_reader:\n plane_name = row['name']\n plane_id = row['planeId']\n plane_type = row['planeTypeId']\n for row in plane_type_reader:\n if row['planeTypeId'] == plane_type:\n plane_capacity = row['capacity']\n airplane = Airplane(plane_name, plane_id, plane_type, plane_capacity)\n airplanes_list.append(airplane)\n return airplanes_list\n<|end_body_0|>\n\n<|body_start_1|>\n airplane_types_list = []\n planeType_file = open('csv_files/AirplaneType.csv', 'r')\n type_reader = csv.DictReader(planeType_file)\n for row in type_reader:\n type_id = row['planeTypeId']\n type_capacity = ['capacity']\n airplaneType = AirplaneType(type_id, type_capacity)\n airplane_types_list.append(airplaneType)\n return airplane_types_list\n<|end_body_1|>\n\n<|body_start_2|>\n airplane_csv_str = new_airplane.instance_to_csv_string() + '\\n'\n airplane_file = open('csv_files/Airplane.csv', 'a+')\n airplane_file.write(airplane_csv_str)\n airplane_file.close()\n<|end_body_2|>\n", "class_docstring": "", "class_name": "AirplaneIO", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AirplaneIO:\n\n def load_all_airplanes(self):\n \"\"\"Reads into the database. Returns an list of all airplanes as instances\"\"\"\n <|body_0|>\n\n def load_airplane_types(self):\n \"\"\"Reads into the database. Returns a list of all instances of airplane types\"\"\"\n <|body_1|>\n\n def store_new_airplane(self, new_airplane):\n \"\"\"Stores new airplane to the existing file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n airplanes_list = []\n airplanes_file = open('csv_files/Airplane.csv', 'r')\n airplane_type_file = open('csv_files/AirplaneType.csv', 'r')\n planes_reader = csv.DictReader(airplanes_file)\n plane_type_reader = csv.DictReader(airplane_type_file)\n for row in planes_reader:\n plane_name = row['name']\n plane_id = row['planeId']\n plane_type = row['planeTypeId']\n for row in plane_type_reader:\n if row['planeTypeId'] == plane_type:\n plane_capacity = row['capacity']\n airplane = Airplane(plane_name, plane_id, plane_type, plane_capacity)\n airplanes_list.append(airplane)\n return airplanes_list\n<|end_body_0|>\n\n<|body_start_1|>\n airplane_types_list = []\n planeType_file = open('csv_files/AirplaneType.csv', 'r')\n type_reader = csv.DictReader(planeType_file)\n for row in type_reader:\n type_id = row['planeTypeId']\n type_capacity = ['capacity']\n airplaneType = AirplaneType(type_id, type_capacity)\n airplane_types_list.append(airplaneType)\n return airplane_types_list\n<|end_body_1|>\n\n<|body_start_2|>\n airplane_csv_str = new_airplane.instance_to_csv_string() + '\\n'\n airplane_file = open('csv_files/Airplane.csv', 'a+')\n airplane_file.write(airplane_csv_str)\n airplane_file.close()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000207", "length_bytes": 1913, "license_type": "no_license", "methods": [{"docstring": "Reads into the database. Returns an list of all airplanes as instances", "name": "load_all_airplanes", "signature": "def load_all_airplanes(self)"}, {"docstring": "Reads into the database. Returns a list of all instances of airplane types", "name": "load_airplane_types", "signature": "def load_airplane_types(self)"}, {"docstring": "Stores new airplane to the existing file", "name": "store_new_airplane", "signature": "def store_new_airplane(self, new_airplane)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004278", "prompt": "Implement the Python class `AirplaneIO` described below.\n\nClass description:\nImplement the AirplaneIO class.\n\nMethod signatures and docstrings:\n- def load_all_airplanes(self): Reads into the database. Returns an list of all airplanes as instances\n- def load_airplane_types(self): Reads into the database. Returns a list of all instances of airplane types\n- def store_new_airplane(self, new_airplane): Stores new airplane to the existing file", "prompted_full_text": "Implement the Python class `AirplaneIO` described below.\n\nClass description:\nImplement the AirplaneIO class.\n\nMethod signatures and docstrings:\n- def load_all_airplanes(self): Reads into the database. Returns an list of all airplanes as instances\n- def load_airplane_types(self): Reads into the database. Returns a list of all instances of airplane types\n- def store_new_airplane(self, new_airplane): Stores new airplane to the existing file\n\n<|skeleton|>\nclass AirplaneIO:\n\n def load_all_airplanes(self):\n \"\"\"Reads into the database. Returns an list of all airplanes as instances\"\"\"\n <|body_0|>\n\n def load_airplane_types(self):\n \"\"\"Reads into the database. Returns a list of all instances of airplane types\"\"\"\n <|body_1|>\n\n def store_new_airplane(self, new_airplane):\n \"\"\"Stores new airplane to the existing file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n airplanes_list = []\n airplanes_file = open('csv_files/Airplane.csv', 'r')\n airplane_type_file = open('csv_files/AirplaneType.csv', 'r')\n planes_reader = csv.DictReader(airplanes_file)\n plane_type_reader = csv.DictReader(airplane_type_file)\n for row in planes_reader:\n plane_name = row['name']\n plane_id = row['planeId']\n plane_type = row['planeTypeId']\n for row in plane_type_reader:\n if row['planeTypeId'] == plane_type:\n plane_capacity = row['capacity']\n airplane = Airplane(plane_name, plane_id, plane_type, plane_capacity)\n airplanes_list.append(airplane)\n return airplanes_list\n<|end_body_0|>\n\n<|body_start_1|>\n airplane_types_list = []\n planeType_file = open('csv_files/AirplaneType.csv', 'r')\n type_reader = csv.DictReader(planeType_file)\n for row in type_reader:\n type_id = row['planeTypeId']\n type_capacity = ['capacity']\n airplaneType = AirplaneType(type_id, type_capacity)\n airplane_types_list.append(airplaneType)\n return airplane_types_list\n<|end_body_1|>\n\n<|body_start_2|>\n airplane_csv_str = new_airplane.instance_to_csv_string() + '\\n'\n airplane_file = open('csv_files/Airplane.csv', 'a+')\n airplane_file.write(airplane_csv_str)\n airplane_file.close()\n<|end_body_2|>\n", "revision_id": "5dbce2a3d1cdc8a0614252fb77685211b395c2df", "skeleton": "<|skeleton|>\nclass AirplaneIO:\n\n def load_all_airplanes(self):\n \"\"\"Reads into the database. Returns an list of all airplanes as instances\"\"\"\n <|body_0|>\n\n def load_airplane_types(self):\n \"\"\"Reads into the database. Returns a list of all instances of airplane types\"\"\"\n <|body_1|>\n\n def store_new_airplane(self, new_airplane):\n \"\"\"Stores new airplane to the existing file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AirplaneIO:\n def load_all_airplanes(self):\n \"\"\"Reads into the database. Returns an list of all airplanes as instances\"\"\"\n airplanes_list = []\n airplanes_file = open('csv_files/Airplane.csv', 'r')\n airplane_type_file = open('csv_files/AirplaneType.csv', 'r')\n planes_reader = csv.DictReader(airplanes_file)\n plane_type_reader = csv.DictReader(airplane_type_file)\n for row in planes_reader:\n plane_name = row['name']\n plane_id = row['planeId']\n plane_type = row['planeTypeId']\n for row in plane_type_reader:\n if row['planeTypeId'] == plane_type:\n plane_capacity = row['capacity']\n airplane = Airplane(plane_name, plane_id, plane_type, plane_capacity)\n airplanes_list.append(airplane)\n return airplanes_list\n\n def load_airplane_types(self):\n \"\"\"Reads into the database. Returns a list of all instances of airplane types\"\"\"\n airplane_types_list = []\n planeType_file = open('csv_files/AirplaneType.csv', 'r')\n type_reader = csv.DictReader(planeType_file)\n for row in type_reader:\n type_id = row['planeTypeId']\n type_capacity = ['capacity']\n airplaneType = AirplaneType(type_id, type_capacity)\n airplane_types_list.append(airplaneType)\n return airplane_types_list\n\n def store_new_airplane(self, new_airplane):\n \"\"\"Stores new airplane to the existing file\"\"\"\n airplane_csv_str = new_airplane.instance_to_csv_string() + '\\n'\n airplane_file = open('csv_files/Airplane.csv', 'a+')\n airplane_file.write(airplane_csv_str)\n airplane_file.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "DataLayer/AirplaneIO.py", "source_repo": "svana00/VLN1-NaN-Air", "split": "val", "star_events_count": 0}
{"blob_id": "24a98631f87f41fd6d4bfa5878846e671d8f5814", "bodies": ["QtGui.QLabel.__init__(self, parent)\nself.setAutoFillBackground(True)\nself.setScaledContents(True)\nself.setMargin(0)\nself.setFocusPolicy(QtCore.Qt.ClickFocus)\nself.cellWidget = None\nlayout = QtGui.QGridLayout(self)\nlayout.setSpacing(2)\nlayout.setMargin(self.margin())\nlayout.setRowStretch(1, 1)\nself.setLayout(layout)\nself.info = QPipelineInfo()\nlayout.addWidget(self.info, 0, 0, 1, 2)\nself.manipulator = QCellManipulator()\nlayout.addWidget(self.manipulator, 1, 0, 1, 2)", "self.cellWidget = cellWidget\nif cellWidget:\n if hasattr(cellWidget, 'grabWindowPixmap'):\n bgPixmap = cellWidget.grabWindowPixmap()\n else:\n bgPixmap = QtGui.QPixmap.grabWidget(cellWidget)\n self.info.show()\nelse:\n self.info.hide()\n bgPixmap = QtGui.QPixmap.grabWidget(self)\nself.thumbnail = QtGui.QPixmap(bgPixmap)\npainter = QtGui.QPainter(bgPixmap)\npainter.fillRect(bgPixmap.rect(), QtGui.QBrush(QtGui.QColor(175, 198, 229, 196)))\npainter.end()\nself.setPixmap(bgPixmap)", "self.manipulator.assignCell(sheet, row, col)\nself.assignCellWidget(sheet.getCell(row, col))\ninfo = sheet.getCellPipelineInfo(row, col)\nself.info.updateInfo(info)", "cellWidget = self.cellWidget\nself.assignCellWidget(None)\nself.manipulator.assignCell(None, -1, -1)\nif cellWidget:\n cellWidget.setParent(None)\nreturn cellWidget", "if self.cellWidget:\n self.cellWidget.deleteLater()\nQtGui.QLabel.deleteLater(self)"], "bodies_text": "<|body_start_0|>\n QtGui.QLabel.__init__(self, parent)\n self.setAutoFillBackground(True)\n self.setScaledContents(True)\n self.setMargin(0)\n self.setFocusPolicy(QtCore.Qt.ClickFocus)\n self.cellWidget = None\n layout = QtGui.QGridLayout(self)\n layout.setSpacing(2)\n layout.setMargin(self.margin())\n layout.setRowStretch(1, 1)\n self.setLayout(layout)\n self.info = QPipelineInfo()\n layout.addWidget(self.info, 0, 0, 1, 2)\n self.manipulator = QCellManipulator()\n layout.addWidget(self.manipulator, 1, 0, 1, 2)\n<|end_body_0|>\n\n<|body_start_1|>\n self.cellWidget = cellWidget\n if cellWidget:\n if hasattr(cellWidget, 'grabWindowPixmap'):\n bgPixmap = cellWidget.grabWindowPixmap()\n else:\n bgPixmap = QtGui.QPixmap.grabWidget(cellWidget)\n self.info.show()\n else:\n self.info.hide()\n bgPixmap = QtGui.QPixmap.grabWidget(self)\n self.thumbnail = QtGui.QPixmap(bgPixmap)\n painter = QtGui.QPainter(bgPixmap)\n painter.fillRect(bgPixmap.rect(), QtGui.QBrush(QtGui.QColor(175, 198, 229, 196)))\n painter.end()\n self.setPixmap(bgPixmap)\n<|end_body_1|>\n\n<|body_start_2|>\n self.manipulator.assignCell(sheet, row, col)\n self.assignCellWidget(sheet.getCell(row, col))\n info = sheet.getCellPipelineInfo(row, col)\n self.info.updateInfo(info)\n<|end_body_2|>\n\n<|body_start_3|>\n cellWidget = self.cellWidget\n self.assignCellWidget(None)\n self.manipulator.assignCell(None, -1, -1)\n if cellWidget:\n cellWidget.setParent(None)\n return cellWidget\n<|end_body_3|>\n\n<|body_start_4|>\n if self.cellWidget:\n self.cellWidget.deleteLater()\n QtGui.QLabel.deleteLater(self)\n<|end_body_4|>\n", "class_docstring": "QCellPresenter represents a cell in the Editing Mode. It has an info bar on top and control dragable icons on the bottom", "class_name": "QCellPresenter", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QCellPresenter:\n \"\"\"QCellPresenter represents a cell in the Editing Mode. It has an info bar on top and control dragable icons on the bottom\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"QCellPresenter(parent: QWidget) -> QCellPresenter Create the layout of the widget\"\"\"\n <|body_0|>\n\n def assignCellWidget(self, cellWidget):\n \"\"\"updateFromCellWidget(cellWidget: QWidget) -> None Assign a cell widget to this presenter\"\"\"\n <|body_1|>\n\n def assignCell(self, sheet, row, col):\n \"\"\"assignCell(sheet: Sheet, row: int, col: int) -> None Assign a sheet cell to the presenter\"\"\"\n <|body_2|>\n\n def releaseCellWidget(self):\n \"\"\"releaseCellWidget() -> QWidget Return the ownership of self.cellWidget to the caller\"\"\"\n <|body_3|>\n\n def deleteLater(self):\n \"\"\"deleteLater() -> None Make sure to delete the cell widget if it exists\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n QtGui.QLabel.__init__(self, parent)\n self.setAutoFillBackground(True)\n self.setScaledContents(True)\n self.setMargin(0)\n self.setFocusPolicy(QtCore.Qt.ClickFocus)\n self.cellWidget = None\n layout = QtGui.QGridLayout(self)\n layout.setSpacing(2)\n layout.setMargin(self.margin())\n layout.setRowStretch(1, 1)\n self.setLayout(layout)\n self.info = QPipelineInfo()\n layout.addWidget(self.info, 0, 0, 1, 2)\n self.manipulator = QCellManipulator()\n layout.addWidget(self.manipulator, 1, 0, 1, 2)\n<|end_body_0|>\n\n<|body_start_1|>\n self.cellWidget = cellWidget\n if cellWidget:\n if hasattr(cellWidget, 'grabWindowPixmap'):\n bgPixmap = cellWidget.grabWindowPixmap()\n else:\n bgPixmap = QtGui.QPixmap.grabWidget(cellWidget)\n self.info.show()\n else:\n self.info.hide()\n bgPixmap = QtGui.QPixmap.grabWidget(self)\n self.thumbnail = QtGui.QPixmap(bgPixmap)\n painter = QtGui.QPainter(bgPixmap)\n painter.fillRect(bgPixmap.rect(), QtGui.QBrush(QtGui.QColor(175, 198, 229, 196)))\n painter.end()\n self.setPixmap(bgPixmap)\n<|end_body_1|>\n\n<|body_start_2|>\n self.manipulator.assignCell(sheet, row, col)\n self.assignCellWidget(sheet.getCell(row, col))\n info = sheet.getCellPipelineInfo(row, col)\n self.info.updateInfo(info)\n<|end_body_2|>\n\n<|body_start_3|>\n cellWidget = self.cellWidget\n self.assignCellWidget(None)\n self.manipulator.assignCell(None, -1, -1)\n if cellWidget:\n cellWidget.setParent(None)\n return cellWidget\n<|end_body_3|>\n\n<|body_start_4|>\n if self.cellWidget:\n self.cellWidget.deleteLater()\n QtGui.QLabel.deleteLater(self)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000208", "length_bytes": 43413, "license_type": "permissive", "methods": [{"docstring": "QCellPresenter(parent: QWidget) -> QCellPresenter Create the layout of the widget", "name": "__init__", "signature": "def __init__(self, parent=None)"}, {"docstring": "updateFromCellWidget(cellWidget: QWidget) -> None Assign a cell widget to this presenter", "name": "assignCellWidget", "signature": "def assignCellWidget(self, cellWidget)"}, {"docstring": "assignCell(sheet: Sheet, row: int, col: int) -> None Assign a sheet cell to the presenter", "name": "assignCell", "signature": "def assignCell(self, sheet, row, col)"}, {"docstring": "releaseCellWidget() -> QWidget Return the ownership of self.cellWidget to the caller", "name": "releaseCellWidget", "signature": "def releaseCellWidget(self)"}, {"docstring": "deleteLater() -> None Make sure to delete the cell widget if it exists", "name": "deleteLater", "signature": "def deleteLater(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_038175", "prompt": "Implement the Python class `QCellPresenter` described below.\n\nClass description:\nQCellPresenter represents a cell in the Editing Mode. It has an info bar on top and control dragable icons on the bottom\n\nMethod signatures and docstrings:\n- def __init__(self, parent=None): QCellPresenter(parent: QWidget) -> QCellPresenter Create the layout of the widget\n- def assignCellWidget(self, cellWidget): updateFromCellWidget(cellWidget: QWidget) -> None Assign a cell widget to this presenter\n- def assignCell(self, sheet, row, col): assignCell(sheet: Sheet, row: int, col: int) -> None Assign a sheet cell to the presenter\n- def releaseCellWidget(self): releaseCellWidget() -> QWidget Return the ownership of self.cellWidget to the caller\n- def deleteLater(self): deleteLater() -> None Make sure to delete the cell widget if it exists", "prompted_full_text": "Implement the Python class `QCellPresenter` described below.\n\nClass description:\nQCellPresenter represents a cell in the Editing Mode. It has an info bar on top and control dragable icons on the bottom\n\nMethod signatures and docstrings:\n- def __init__(self, parent=None): QCellPresenter(parent: QWidget) -> QCellPresenter Create the layout of the widget\n- def assignCellWidget(self, cellWidget): updateFromCellWidget(cellWidget: QWidget) -> None Assign a cell widget to this presenter\n- def assignCell(self, sheet, row, col): assignCell(sheet: Sheet, row: int, col: int) -> None Assign a sheet cell to the presenter\n- def releaseCellWidget(self): releaseCellWidget() -> QWidget Return the ownership of self.cellWidget to the caller\n- def deleteLater(self): deleteLater() -> None Make sure to delete the cell widget if it exists\n\n<|skeleton|>\nclass QCellPresenter:\n \"\"\"QCellPresenter represents a cell in the Editing Mode. It has an info bar on top and control dragable icons on the bottom\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"QCellPresenter(parent: QWidget) -> QCellPresenter Create the layout of the widget\"\"\"\n <|body_0|>\n\n def assignCellWidget(self, cellWidget):\n \"\"\"updateFromCellWidget(cellWidget: QWidget) -> None Assign a cell widget to this presenter\"\"\"\n <|body_1|>\n\n def assignCell(self, sheet, row, col):\n \"\"\"assignCell(sheet: Sheet, row: int, col: int) -> None Assign a sheet cell to the presenter\"\"\"\n <|body_2|>\n\n def releaseCellWidget(self):\n \"\"\"releaseCellWidget() -> QWidget Return the ownership of self.cellWidget to the caller\"\"\"\n <|body_3|>\n\n def deleteLater(self):\n \"\"\"deleteLater() -> None Make sure to delete the cell widget if it exists\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n QtGui.QLabel.__init__(self, parent)\n self.setAutoFillBackground(True)\n self.setScaledContents(True)\n self.setMargin(0)\n self.setFocusPolicy(QtCore.Qt.ClickFocus)\n self.cellWidget = None\n layout = QtGui.QGridLayout(self)\n layout.setSpacing(2)\n layout.setMargin(self.margin())\n layout.setRowStretch(1, 1)\n self.setLayout(layout)\n self.info = QPipelineInfo()\n layout.addWidget(self.info, 0, 0, 1, 2)\n self.manipulator = QCellManipulator()\n layout.addWidget(self.manipulator, 1, 0, 1, 2)\n<|end_body_0|>\n\n<|body_start_1|>\n self.cellWidget = cellWidget\n if cellWidget:\n if hasattr(cellWidget, 'grabWindowPixmap'):\n bgPixmap = cellWidget.grabWindowPixmap()\n else:\n bgPixmap = QtGui.QPixmap.grabWidget(cellWidget)\n self.info.show()\n else:\n self.info.hide()\n bgPixmap = QtGui.QPixmap.grabWidget(self)\n self.thumbnail = QtGui.QPixmap(bgPixmap)\n painter = QtGui.QPainter(bgPixmap)\n painter.fillRect(bgPixmap.rect(), QtGui.QBrush(QtGui.QColor(175, 198, 229, 196)))\n painter.end()\n self.setPixmap(bgPixmap)\n<|end_body_1|>\n\n<|body_start_2|>\n self.manipulator.assignCell(sheet, row, col)\n self.assignCellWidget(sheet.getCell(row, col))\n info = sheet.getCellPipelineInfo(row, col)\n self.info.updateInfo(info)\n<|end_body_2|>\n\n<|body_start_3|>\n cellWidget = self.cellWidget\n self.assignCellWidget(None)\n self.manipulator.assignCell(None, -1, -1)\n if cellWidget:\n cellWidget.setParent(None)\n return cellWidget\n<|end_body_3|>\n\n<|body_start_4|>\n if self.cellWidget:\n self.cellWidget.deleteLater()\n QtGui.QLabel.deleteLater(self)\n<|end_body_4|>\n", "revision_id": "23ef56ec24b85c82416e1437a08381635328abe5", "skeleton": "<|skeleton|>\nclass QCellPresenter:\n \"\"\"QCellPresenter represents a cell in the Editing Mode. It has an info bar on top and control dragable icons on the bottom\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"QCellPresenter(parent: QWidget) -> QCellPresenter Create the layout of the widget\"\"\"\n <|body_0|>\n\n def assignCellWidget(self, cellWidget):\n \"\"\"updateFromCellWidget(cellWidget: QWidget) -> None Assign a cell widget to this presenter\"\"\"\n <|body_1|>\n\n def assignCell(self, sheet, row, col):\n \"\"\"assignCell(sheet: Sheet, row: int, col: int) -> None Assign a sheet cell to the presenter\"\"\"\n <|body_2|>\n\n def releaseCellWidget(self):\n \"\"\"releaseCellWidget() -> QWidget Return the ownership of self.cellWidget to the caller\"\"\"\n <|body_3|>\n\n def deleteLater(self):\n \"\"\"deleteLater() -> None Make sure to delete the cell widget if it exists\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QCellPresenter:\n \"\"\"QCellPresenter represents a cell in the Editing Mode. It has an info bar on top and control dragable icons on the bottom\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"QCellPresenter(parent: QWidget) -> QCellPresenter Create the layout of the widget\"\"\"\n QtGui.QLabel.__init__(self, parent)\n self.setAutoFillBackground(True)\n self.setScaledContents(True)\n self.setMargin(0)\n self.setFocusPolicy(QtCore.Qt.ClickFocus)\n self.cellWidget = None\n layout = QtGui.QGridLayout(self)\n layout.setSpacing(2)\n layout.setMargin(self.margin())\n layout.setRowStretch(1, 1)\n self.setLayout(layout)\n self.info = QPipelineInfo()\n layout.addWidget(self.info, 0, 0, 1, 2)\n self.manipulator = QCellManipulator()\n layout.addWidget(self.manipulator, 1, 0, 1, 2)\n\n def assignCellWidget(self, cellWidget):\n \"\"\"updateFromCellWidget(cellWidget: QWidget) -> None Assign a cell widget to this presenter\"\"\"\n self.cellWidget = cellWidget\n if cellWidget:\n if hasattr(cellWidget, 'grabWindowPixmap'):\n bgPixmap = cellWidget.grabWindowPixmap()\n else:\n bgPixmap = QtGui.QPixmap.grabWidget(cellWidget)\n self.info.show()\n else:\n self.info.hide()\n bgPixmap = QtGui.QPixmap.grabWidget(self)\n self.thumbnail = QtGui.QPixmap(bgPixmap)\n painter = QtGui.QPainter(bgPixmap)\n painter.fillRect(bgPixmap.rect(), QtGui.QBrush(QtGui.QColor(175, 198, 229, 196)))\n painter.end()\n self.setPixmap(bgPixmap)\n\n def assignCell(self, sheet, row, col):\n \"\"\"assignCell(sheet: Sheet, row: int, col: int) -> None Assign a sheet cell to the presenter\"\"\"\n self.manipulator.assignCell(sheet, row, col)\n self.assignCellWidget(sheet.getCell(row, col))\n info = sheet.getCellPipelineInfo(row, col)\n self.info.updateInfo(info)\n\n def releaseCellWidget(self):\n \"\"\"releaseCellWidget() -> QWidget Return the ownership of self.cellWidget to the caller\"\"\"\n cellWidget = self.cellWidget\n self.assignCellWidget(None)\n self.manipulator.assignCell(None, -1, -1)\n if cellWidget:\n cellWidget.setParent(None)\n return cellWidget\n\n def deleteLater(self):\n \"\"\"deleteLater() -> None Make sure to delete the cell widget if it exists\"\"\"\n if self.cellWidget:\n self.cellWidget.deleteLater()\n QtGui.QLabel.deleteLater(self)\n", "source": "the_stack_v2_python_sparse", "source_path": "vistrails_current/vistrails/packages/spreadsheet/spreadsheet_cell.py", "source_repo": "lumig242/VisTrailsRecommendation", "split": "val", "star_events_count": 3}
{"blob_id": "e39b630edda163eba2eaa06134069b7e454b84ff", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn PlannerTask()", "from .entity import Entity\nfrom .identity_set import IdentitySet\nfrom .planner_applied_categories import PlannerAppliedCategories\nfrom .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\nfrom .planner_assignments import PlannerAssignments\nfrom .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\nfrom .planner_preview_type import PlannerPreviewType\nfrom .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\nfrom .planner_task_details import PlannerTaskDetails\nfrom .entity import Entity\nfrom .identity_set import IdentitySet\nfrom .planner_applied_categories import PlannerAppliedCategories\nfrom .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\nfrom .planner_assignments import PlannerAssignments\nfrom .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\nfrom .planner_preview_type import PlannerPreviewType\nfrom .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\nfrom .planner_task_details import PlannerTaskDetails\nfields: Dict[str, Callable[[Any], None]] = {'activeChecklistItemCount': lambda n: setattr(self, 'active_checklist_item_count', n.get_int_value()), 'appliedCategories': lambda n: setattr(self, 'applied_categories', n.get_object_value(PlannerAppliedCategories)), 'assignedToTaskBoardFormat': lambda n: setattr(self, 'assigned_to_task_board_format', n.get_object_value(PlannerAssignedToTaskBoardTaskFormat)), 'assigneePriority': lambda n: setattr(self, 'assignee_priority', n.get_str_value()), 'assignments': lambda n: setattr(self, 'assignments', n.get_object_value(PlannerAssignments)), 'bucketId': lambda n: setattr(self, 'bucket_id', n.get_str_value()), 'bucketTaskBoardFormat': lambda n: setattr(self, 'bucket_task_board_format', n.get_object_value(PlannerBucketTaskBoardTaskFormat)), 'checklistItemCount': lambda n: setattr(self, 'checklist_item_count', n.get_int_value()), 'completedBy': lambda n: setattr(self, 'completed_by', n.get_object_value(IdentitySet)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'conversationThreadId': lambda n: setattr(self, 'conversation_thread_id', n.get_str_value()), 'createdBy': lambda n: setattr(self, 'created_by', n.get_object_value(IdentitySet)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'details': lambda n: setattr(self, 'details', n.get_object_value(PlannerTaskDetails)), 'dueDateTime': lambda n: setattr(self, 'due_date_time', n.get_datetime_value()), 'hasDescription': lambda n: setattr(self, 'has_description', n.get_bool_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'percentComplete': lambda n: setattr(self, 'percent_complete', n.get_int_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'previewType': lambda n: setattr(self, 'preview_type', n.get_enum_value(PlannerPreviewType)), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'progressTaskBoardFormat': lambda n: setattr(self, 'progress_task_board_format', n.get_object_value(PlannerProgressTaskBoardTaskFormat)), 'referenceCount': lambda n: setattr(self, 'reference_count', n.get_int_value()), 'startDateTime': lambda n: setattr(self, 'start_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_int_value('activeChecklistItemCount', self.active_checklist_item_count)\nwriter.write_object_value('appliedCategories', self.applied_categories)\nwriter.write_object_value('assignedToTaskBoardFormat', self.assigned_to_task_board_format)\nwriter.write_str_value('assigneePriority', self.assignee_priority)\nwriter.write_object_value('assignments', self.assignments)\nwriter.write_str_value('bucketId', self.bucket_id)\nwriter.write_object_value('bucketTaskBoardFormat', self.bucket_task_board_format)\nwriter.write_int_value('checklistItemCount', self.checklist_item_count)\nwriter.write_object_value('completedBy', self.completed_by)\nwriter.write_datetime_value('completedDateTime', self.completed_date_time)\nwriter.write_str_value('conversationThreadId', self.conversation_thread_id)\nwriter.write_object_value('createdBy', self.created_by)\nwriter.write_datetime_value('createdDateTime', self.created_date_time)\nwriter.write_object_value('details', self.details)\nwriter.write_datetime_value('dueDateTime', self.due_date_time)\nwriter.write_bool_value('hasDescription', self.has_description)\nwriter.write_str_value('orderHint', self.order_hint)\nwriter.write_int_value('percentComplete', self.percent_complete)\nwriter.write_str_value('planId', self.plan_id)\nwriter.write_enum_value('previewType', self.preview_type)\nwriter.write_int_value('priority', self.priority)\nwriter.write_object_value('progressTaskBoardFormat', self.progress_task_board_format)\nwriter.write_int_value('referenceCount', self.reference_count)\nwriter.write_datetime_value('startDateTime', self.start_date_time)\nwriter.write_str_value('title', self.title)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return PlannerTask()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .identity_set import IdentitySet\n from .planner_applied_categories import PlannerAppliedCategories\n from .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\n from .planner_assignments import PlannerAssignments\n from .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\n from .planner_preview_type import PlannerPreviewType\n from .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\n from .planner_task_details import PlannerTaskDetails\n from .entity import Entity\n from .identity_set import IdentitySet\n from .planner_applied_categories import PlannerAppliedCategories\n from .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\n from .planner_assignments import PlannerAssignments\n from .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\n from .planner_preview_type import PlannerPreviewType\n from .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\n from .planner_task_details import PlannerTaskDetails\n fields: Dict[str, Callable[[Any], None]] = {'activeChecklistItemCount': lambda n: setattr(self, 'active_checklist_item_count', n.get_int_value()), 'appliedCategories': lambda n: setattr(self, 'applied_categories', n.get_object_value(PlannerAppliedCategories)), 'assignedToTaskBoardFormat': lambda n: setattr(self, 'assigned_to_task_board_format', n.get_object_value(PlannerAssignedToTaskBoardTaskFormat)), 'assigneePriority': lambda n: setattr(self, 'assignee_priority', n.get_str_value()), 'assignments': lambda n: setattr(self, 'assignments', n.get_object_value(PlannerAssignments)), 'bucketId': lambda n: setattr(self, 'bucket_id', n.get_str_value()), 'bucketTaskBoardFormat': lambda n: setattr(self, 'bucket_task_board_format', n.get_object_value(PlannerBucketTaskBoardTaskFormat)), 'checklistItemCount': lambda n: setattr(self, 'checklist_item_count', n.get_int_value()), 'completedBy': lambda n: setattr(self, 'completed_by', n.get_object_value(IdentitySet)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'conversationThreadId': lambda n: setattr(self, 'conversation_thread_id', n.get_str_value()), 'createdBy': lambda n: setattr(self, 'created_by', n.get_object_value(IdentitySet)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'details': lambda n: setattr(self, 'details', n.get_object_value(PlannerTaskDetails)), 'dueDateTime': lambda n: setattr(self, 'due_date_time', n.get_datetime_value()), 'hasDescription': lambda n: setattr(self, 'has_description', n.get_bool_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'percentComplete': lambda n: setattr(self, 'percent_complete', n.get_int_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'previewType': lambda n: setattr(self, 'preview_type', n.get_enum_value(PlannerPreviewType)), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'progressTaskBoardFormat': lambda n: setattr(self, 'progress_task_board_format', n.get_object_value(PlannerProgressTaskBoardTaskFormat)), 'referenceCount': lambda n: setattr(self, 'reference_count', n.get_int_value()), 'startDateTime': lambda n: setattr(self, 'start_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_int_value('activeChecklistItemCount', self.active_checklist_item_count)\n writer.write_object_value('appliedCategories', self.applied_categories)\n writer.write_object_value('assignedToTaskBoardFormat', self.assigned_to_task_board_format)\n writer.write_str_value('assigneePriority', self.assignee_priority)\n writer.write_object_value('assignments', self.assignments)\n writer.write_str_value('bucketId', self.bucket_id)\n writer.write_object_value('bucketTaskBoardFormat', self.bucket_task_board_format)\n writer.write_int_value('checklistItemCount', self.checklist_item_count)\n writer.write_object_value('completedBy', self.completed_by)\n writer.write_datetime_value('completedDateTime', self.completed_date_time)\n writer.write_str_value('conversationThreadId', self.conversation_thread_id)\n writer.write_object_value('createdBy', self.created_by)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_object_value('details', self.details)\n writer.write_datetime_value('dueDateTime', self.due_date_time)\n writer.write_bool_value('hasDescription', self.has_description)\n writer.write_str_value('orderHint', self.order_hint)\n writer.write_int_value('percentComplete', self.percent_complete)\n writer.write_str_value('planId', self.plan_id)\n writer.write_enum_value('previewType', self.preview_type)\n writer.write_int_value('priority', self.priority)\n writer.write_object_value('progressTaskBoardFormat', self.progress_task_board_format)\n writer.write_int_value('referenceCount', self.reference_count)\n writer.write_datetime_value('startDateTime', self.start_date_time)\n writer.write_str_value('title', self.title)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "PlannerTask", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PlannerTask:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerTask:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerTask\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return PlannerTask()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .identity_set import IdentitySet\n from .planner_applied_categories import PlannerAppliedCategories\n from .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\n from .planner_assignments import PlannerAssignments\n from .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\n from .planner_preview_type import PlannerPreviewType\n from .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\n from .planner_task_details import PlannerTaskDetails\n from .entity import Entity\n from .identity_set import IdentitySet\n from .planner_applied_categories import PlannerAppliedCategories\n from .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\n from .planner_assignments import PlannerAssignments\n from .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\n from .planner_preview_type import PlannerPreviewType\n from .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\n from .planner_task_details import PlannerTaskDetails\n fields: Dict[str, Callable[[Any], None]] = {'activeChecklistItemCount': lambda n: setattr(self, 'active_checklist_item_count', n.get_int_value()), 'appliedCategories': lambda n: setattr(self, 'applied_categories', n.get_object_value(PlannerAppliedCategories)), 'assignedToTaskBoardFormat': lambda n: setattr(self, 'assigned_to_task_board_format', n.get_object_value(PlannerAssignedToTaskBoardTaskFormat)), 'assigneePriority': lambda n: setattr(self, 'assignee_priority', n.get_str_value()), 'assignments': lambda n: setattr(self, 'assignments', n.get_object_value(PlannerAssignments)), 'bucketId': lambda n: setattr(self, 'bucket_id', n.get_str_value()), 'bucketTaskBoardFormat': lambda n: setattr(self, 'bucket_task_board_format', n.get_object_value(PlannerBucketTaskBoardTaskFormat)), 'checklistItemCount': lambda n: setattr(self, 'checklist_item_count', n.get_int_value()), 'completedBy': lambda n: setattr(self, 'completed_by', n.get_object_value(IdentitySet)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'conversationThreadId': lambda n: setattr(self, 'conversation_thread_id', n.get_str_value()), 'createdBy': lambda n: setattr(self, 'created_by', n.get_object_value(IdentitySet)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'details': lambda n: setattr(self, 'details', n.get_object_value(PlannerTaskDetails)), 'dueDateTime': lambda n: setattr(self, 'due_date_time', n.get_datetime_value()), 'hasDescription': lambda n: setattr(self, 'has_description', n.get_bool_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'percentComplete': lambda n: setattr(self, 'percent_complete', n.get_int_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'previewType': lambda n: setattr(self, 'preview_type', n.get_enum_value(PlannerPreviewType)), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'progressTaskBoardFormat': lambda n: setattr(self, 'progress_task_board_format', n.get_object_value(PlannerProgressTaskBoardTaskFormat)), 'referenceCount': lambda n: setattr(self, 'reference_count', n.get_int_value()), 'startDateTime': lambda n: setattr(self, 'start_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_int_value('activeChecklistItemCount', self.active_checklist_item_count)\n writer.write_object_value('appliedCategories', self.applied_categories)\n writer.write_object_value('assignedToTaskBoardFormat', self.assigned_to_task_board_format)\n writer.write_str_value('assigneePriority', self.assignee_priority)\n writer.write_object_value('assignments', self.assignments)\n writer.write_str_value('bucketId', self.bucket_id)\n writer.write_object_value('bucketTaskBoardFormat', self.bucket_task_board_format)\n writer.write_int_value('checklistItemCount', self.checklist_item_count)\n writer.write_object_value('completedBy', self.completed_by)\n writer.write_datetime_value('completedDateTime', self.completed_date_time)\n writer.write_str_value('conversationThreadId', self.conversation_thread_id)\n writer.write_object_value('createdBy', self.created_by)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_object_value('details', self.details)\n writer.write_datetime_value('dueDateTime', self.due_date_time)\n writer.write_bool_value('hasDescription', self.has_description)\n writer.write_str_value('orderHint', self.order_hint)\n writer.write_int_value('percentComplete', self.percent_complete)\n writer.write_str_value('planId', self.plan_id)\n writer.write_enum_value('previewType', self.preview_type)\n writer.write_int_value('priority', self.priority)\n writer.write_object_value('progressTaskBoardFormat', self.progress_task_board_format)\n writer.write_int_value('referenceCount', self.reference_count)\n writer.write_datetime_value('startDateTime', self.start_date_time)\n writer.write_str_value('title', self.title)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000209", "length_bytes": 12334, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerTask", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerTask"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `PlannerTask` described below.\n\nClass description:\nImplement the PlannerTask class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerTask: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerTask\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `PlannerTask` described below.\n\nClass description:\nImplement the PlannerTask class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerTask: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerTask\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass PlannerTask:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerTask:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerTask\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return PlannerTask()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .identity_set import IdentitySet\n from .planner_applied_categories import PlannerAppliedCategories\n from .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\n from .planner_assignments import PlannerAssignments\n from .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\n from .planner_preview_type import PlannerPreviewType\n from .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\n from .planner_task_details import PlannerTaskDetails\n from .entity import Entity\n from .identity_set import IdentitySet\n from .planner_applied_categories import PlannerAppliedCategories\n from .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\n from .planner_assignments import PlannerAssignments\n from .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\n from .planner_preview_type import PlannerPreviewType\n from .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\n from .planner_task_details import PlannerTaskDetails\n fields: Dict[str, Callable[[Any], None]] = {'activeChecklistItemCount': lambda n: setattr(self, 'active_checklist_item_count', n.get_int_value()), 'appliedCategories': lambda n: setattr(self, 'applied_categories', n.get_object_value(PlannerAppliedCategories)), 'assignedToTaskBoardFormat': lambda n: setattr(self, 'assigned_to_task_board_format', n.get_object_value(PlannerAssignedToTaskBoardTaskFormat)), 'assigneePriority': lambda n: setattr(self, 'assignee_priority', n.get_str_value()), 'assignments': lambda n: setattr(self, 'assignments', n.get_object_value(PlannerAssignments)), 'bucketId': lambda n: setattr(self, 'bucket_id', n.get_str_value()), 'bucketTaskBoardFormat': lambda n: setattr(self, 'bucket_task_board_format', n.get_object_value(PlannerBucketTaskBoardTaskFormat)), 'checklistItemCount': lambda n: setattr(self, 'checklist_item_count', n.get_int_value()), 'completedBy': lambda n: setattr(self, 'completed_by', n.get_object_value(IdentitySet)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'conversationThreadId': lambda n: setattr(self, 'conversation_thread_id', n.get_str_value()), 'createdBy': lambda n: setattr(self, 'created_by', n.get_object_value(IdentitySet)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'details': lambda n: setattr(self, 'details', n.get_object_value(PlannerTaskDetails)), 'dueDateTime': lambda n: setattr(self, 'due_date_time', n.get_datetime_value()), 'hasDescription': lambda n: setattr(self, 'has_description', n.get_bool_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'percentComplete': lambda n: setattr(self, 'percent_complete', n.get_int_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'previewType': lambda n: setattr(self, 'preview_type', n.get_enum_value(PlannerPreviewType)), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'progressTaskBoardFormat': lambda n: setattr(self, 'progress_task_board_format', n.get_object_value(PlannerProgressTaskBoardTaskFormat)), 'referenceCount': lambda n: setattr(self, 'reference_count', n.get_int_value()), 'startDateTime': lambda n: setattr(self, 'start_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_int_value('activeChecklistItemCount', self.active_checklist_item_count)\n writer.write_object_value('appliedCategories', self.applied_categories)\n writer.write_object_value('assignedToTaskBoardFormat', self.assigned_to_task_board_format)\n writer.write_str_value('assigneePriority', self.assignee_priority)\n writer.write_object_value('assignments', self.assignments)\n writer.write_str_value('bucketId', self.bucket_id)\n writer.write_object_value('bucketTaskBoardFormat', self.bucket_task_board_format)\n writer.write_int_value('checklistItemCount', self.checklist_item_count)\n writer.write_object_value('completedBy', self.completed_by)\n writer.write_datetime_value('completedDateTime', self.completed_date_time)\n writer.write_str_value('conversationThreadId', self.conversation_thread_id)\n writer.write_object_value('createdBy', self.created_by)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_object_value('details', self.details)\n writer.write_datetime_value('dueDateTime', self.due_date_time)\n writer.write_bool_value('hasDescription', self.has_description)\n writer.write_str_value('orderHint', self.order_hint)\n writer.write_int_value('percentComplete', self.percent_complete)\n writer.write_str_value('planId', self.plan_id)\n writer.write_enum_value('previewType', self.preview_type)\n writer.write_int_value('priority', self.priority)\n writer.write_object_value('progressTaskBoardFormat', self.progress_task_board_format)\n writer.write_int_value('referenceCount', self.reference_count)\n writer.write_datetime_value('startDateTime', self.start_date_time)\n writer.write_str_value('title', self.title)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass PlannerTask:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerTask:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerTask\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PlannerTask:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerTask:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerTask\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return PlannerTask()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .entity import Entity\n from .identity_set import IdentitySet\n from .planner_applied_categories import PlannerAppliedCategories\n from .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\n from .planner_assignments import PlannerAssignments\n from .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\n from .planner_preview_type import PlannerPreviewType\n from .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\n from .planner_task_details import PlannerTaskDetails\n from .entity import Entity\n from .identity_set import IdentitySet\n from .planner_applied_categories import PlannerAppliedCategories\n from .planner_assigned_to_task_board_task_format import PlannerAssignedToTaskBoardTaskFormat\n from .planner_assignments import PlannerAssignments\n from .planner_bucket_task_board_task_format import PlannerBucketTaskBoardTaskFormat\n from .planner_preview_type import PlannerPreviewType\n from .planner_progress_task_board_task_format import PlannerProgressTaskBoardTaskFormat\n from .planner_task_details import PlannerTaskDetails\n fields: Dict[str, Callable[[Any], None]] = {'activeChecklistItemCount': lambda n: setattr(self, 'active_checklist_item_count', n.get_int_value()), 'appliedCategories': lambda n: setattr(self, 'applied_categories', n.get_object_value(PlannerAppliedCategories)), 'assignedToTaskBoardFormat': lambda n: setattr(self, 'assigned_to_task_board_format', n.get_object_value(PlannerAssignedToTaskBoardTaskFormat)), 'assigneePriority': lambda n: setattr(self, 'assignee_priority', n.get_str_value()), 'assignments': lambda n: setattr(self, 'assignments', n.get_object_value(PlannerAssignments)), 'bucketId': lambda n: setattr(self, 'bucket_id', n.get_str_value()), 'bucketTaskBoardFormat': lambda n: setattr(self, 'bucket_task_board_format', n.get_object_value(PlannerBucketTaskBoardTaskFormat)), 'checklistItemCount': lambda n: setattr(self, 'checklist_item_count', n.get_int_value()), 'completedBy': lambda n: setattr(self, 'completed_by', n.get_object_value(IdentitySet)), 'completedDateTime': lambda n: setattr(self, 'completed_date_time', n.get_datetime_value()), 'conversationThreadId': lambda n: setattr(self, 'conversation_thread_id', n.get_str_value()), 'createdBy': lambda n: setattr(self, 'created_by', n.get_object_value(IdentitySet)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'details': lambda n: setattr(self, 'details', n.get_object_value(PlannerTaskDetails)), 'dueDateTime': lambda n: setattr(self, 'due_date_time', n.get_datetime_value()), 'hasDescription': lambda n: setattr(self, 'has_description', n.get_bool_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'percentComplete': lambda n: setattr(self, 'percent_complete', n.get_int_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'previewType': lambda n: setattr(self, 'preview_type', n.get_enum_value(PlannerPreviewType)), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'progressTaskBoardFormat': lambda n: setattr(self, 'progress_task_board_format', n.get_object_value(PlannerProgressTaskBoardTaskFormat)), 'referenceCount': lambda n: setattr(self, 'reference_count', n.get_int_value()), 'startDateTime': lambda n: setattr(self, 'start_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_int_value('activeChecklistItemCount', self.active_checklist_item_count)\n writer.write_object_value('appliedCategories', self.applied_categories)\n writer.write_object_value('assignedToTaskBoardFormat', self.assigned_to_task_board_format)\n writer.write_str_value('assigneePriority', self.assignee_priority)\n writer.write_object_value('assignments', self.assignments)\n writer.write_str_value('bucketId', self.bucket_id)\n writer.write_object_value('bucketTaskBoardFormat', self.bucket_task_board_format)\n writer.write_int_value('checklistItemCount', self.checklist_item_count)\n writer.write_object_value('completedBy', self.completed_by)\n writer.write_datetime_value('completedDateTime', self.completed_date_time)\n writer.write_str_value('conversationThreadId', self.conversation_thread_id)\n writer.write_object_value('createdBy', self.created_by)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_object_value('details', self.details)\n writer.write_datetime_value('dueDateTime', self.due_date_time)\n writer.write_bool_value('hasDescription', self.has_description)\n writer.write_str_value('orderHint', self.order_hint)\n writer.write_int_value('percentComplete', self.percent_complete)\n writer.write_str_value('planId', self.plan_id)\n writer.write_enum_value('previewType', self.preview_type)\n writer.write_int_value('priority', self.priority)\n writer.write_object_value('progressTaskBoardFormat', self.progress_task_board_format)\n writer.write_int_value('referenceCount', self.reference_count)\n writer.write_datetime_value('startDateTime', self.start_date_time)\n writer.write_str_value('title', self.title)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/planner_task.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135}
{"blob_id": "a70e8c7d6e009e2e4edd8c0a16d64ea8c954f8b7", "bodies": ["UserModel = get_user_model()\ntry:\n user = UserModel._default_manager.get(mobile=username)\n if user.check_password(password):\n return user\nexcept UserModel.DoesNotExist:\n return None", "UserModel = get_user_model()\ntry:\n return UserModel.objects.get(pk=user_id)\nexcept UserModel.DoesNotExist:\n return None"], "bodies_text": "<|body_start_0|>\n UserModel = get_user_model()\n try:\n user = UserModel._default_manager.get(mobile=username)\n if user.check_password(password):\n return user\n except UserModel.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n UserModel = get_user_model()\n try:\n return UserModel.objects.get(pk=user_id)\n except UserModel.DoesNotExist:\n return None\n<|end_body_1|>\n", "class_docstring": "This Authentication Backend Authenticates a User Against the Mobile No. Possible Usage Can Be Facebook Login Where User Can Also Use Mobile No. to Create an Account.", "class_name": "MobileAuthenticationBackend", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MobileAuthenticationBackend:\n \"\"\"This Authentication Backend Authenticates a User Against the Mobile No. Possible Usage Can Be Facebook Login Where User Can Also Use Mobile No. to Create an Account.\"\"\"\n\n def authenticate(self, username=None, password=None):\n \"\"\"Authenticate Using the Mobile/password And Return a User\"\"\"\n <|body_0|>\n\n def get_user(self, user_id):\n \"\"\"Returns a User Against a Given User Id\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n UserModel = get_user_model()\n try:\n user = UserModel._default_manager.get(mobile=username)\n if user.check_password(password):\n return user\n except UserModel.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n UserModel = get_user_model()\n try:\n return UserModel.objects.get(pk=user_id)\n except UserModel.DoesNotExist:\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000210", "length_bytes": 2708, "license_type": "no_license", "methods": [{"docstring": "Authenticate Using the Mobile/password And Return a User", "name": "authenticate", "signature": "def authenticate(self, username=None, password=None)"}, {"docstring": "Returns a User Against a Given User Id", "name": "get_user", "signature": "def get_user(self, user_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001242", "prompt": "Implement the Python class `MobileAuthenticationBackend` described below.\n\nClass description:\nThis Authentication Backend Authenticates a User Against the Mobile No. Possible Usage Can Be Facebook Login Where User Can Also Use Mobile No. to Create an Account.\n\nMethod signatures and docstrings:\n- def authenticate(self, username=None, password=None): Authenticate Using the Mobile/password And Return a User\n- def get_user(self, user_id): Returns a User Against a Given User Id", "prompted_full_text": "Implement the Python class `MobileAuthenticationBackend` described below.\n\nClass description:\nThis Authentication Backend Authenticates a User Against the Mobile No. Possible Usage Can Be Facebook Login Where User Can Also Use Mobile No. to Create an Account.\n\nMethod signatures and docstrings:\n- def authenticate(self, username=None, password=None): Authenticate Using the Mobile/password And Return a User\n- def get_user(self, user_id): Returns a User Against a Given User Id\n\n<|skeleton|>\nclass MobileAuthenticationBackend:\n \"\"\"This Authentication Backend Authenticates a User Against the Mobile No. Possible Usage Can Be Facebook Login Where User Can Also Use Mobile No. to Create an Account.\"\"\"\n\n def authenticate(self, username=None, password=None):\n \"\"\"Authenticate Using the Mobile/password And Return a User\"\"\"\n <|body_0|>\n\n def get_user(self, user_id):\n \"\"\"Returns a User Against a Given User Id\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n UserModel = get_user_model()\n try:\n user = UserModel._default_manager.get(mobile=username)\n if user.check_password(password):\n return user\n except UserModel.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n UserModel = get_user_model()\n try:\n return UserModel.objects.get(pk=user_id)\n except UserModel.DoesNotExist:\n return None\n<|end_body_1|>\n", "revision_id": "3bb9fe2e3fe8d876519631233fb29c7e04e2e8c3", "skeleton": "<|skeleton|>\nclass MobileAuthenticationBackend:\n \"\"\"This Authentication Backend Authenticates a User Against the Mobile No. Possible Usage Can Be Facebook Login Where User Can Also Use Mobile No. to Create an Account.\"\"\"\n\n def authenticate(self, username=None, password=None):\n \"\"\"Authenticate Using the Mobile/password And Return a User\"\"\"\n <|body_0|>\n\n def get_user(self, user_id):\n \"\"\"Returns a User Against a Given User Id\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MobileAuthenticationBackend:\n \"\"\"This Authentication Backend Authenticates a User Against the Mobile No. Possible Usage Can Be Facebook Login Where User Can Also Use Mobile No. to Create an Account.\"\"\"\n\n def authenticate(self, username=None, password=None):\n \"\"\"Authenticate Using the Mobile/password And Return a User\"\"\"\n UserModel = get_user_model()\n try:\n user = UserModel._default_manager.get(mobile=username)\n if user.check_password(password):\n return user\n except UserModel.DoesNotExist:\n return None\n\n def get_user(self, user_id):\n \"\"\"Returns a User Against a Given User Id\"\"\"\n UserModel = get_user_model()\n try:\n return UserModel.objects.get(pk=user_id)\n except UserModel.DoesNotExist:\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "accounts/backends.py", "source_repo": "Mr4x3/competition_mania", "split": "val", "star_events_count": 0}
{"blob_id": "4072fcdc01f9cbdfec09d1200b0ec7ec86ad7b8f", "bodies": ["if author.has_perm('social.add_score'):\n score = self.get_or_create(author=author, user=user, axis=axis)\n score.score = count\n score.save()", "points = self.filter(axis=axis)\nresult = points.aggregate(average=Avg('score'))\nreturn result['average']", "points = self.filter(axis=axis)\nresult = points.aggregate(total=Sum('score'))\nreturn result['total']"], "bodies_text": "<|body_start_0|>\n if author.has_perm('social.add_score'):\n score = self.get_or_create(author=author, user=user, axis=axis)\n score.score = count\n score.save()\n<|end_body_0|>\n\n<|body_start_1|>\n points = self.filter(axis=axis)\n result = points.aggregate(average=Avg('score'))\n return result['average']\n<|end_body_1|>\n\n<|body_start_2|>\n points = self.filter(axis=axis)\n result = points.aggregate(total=Sum('score'))\n return result['total']\n<|end_body_2|>\n", "class_docstring": "Manager de points", "class_name": "ScoreManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScoreManager:\n \"\"\"Manager de points\"\"\"\n\n def add(self, author, user, count=1, axis=0):\n \"\"\"Ajouter des points à un utilisateur sur un axe\"\"\"\n <|body_0|>\n\n def get_average(self, user, axis=0):\n \"\"\"Renvoyer le score moyen d'un utilisateur sur un axe\"\"\"\n <|body_1|>\n\n def get_total(self, user, axis=0):\n \"\"\"Renvoyer le score total d'un utilisateur sur un axe\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if author.has_perm('social.add_score'):\n score = self.get_or_create(author=author, user=user, axis=axis)\n score.score = count\n score.save()\n<|end_body_0|>\n\n<|body_start_1|>\n points = self.filter(axis=axis)\n result = points.aggregate(average=Avg('score'))\n return result['average']\n<|end_body_1|>\n\n<|body_start_2|>\n points = self.filter(axis=axis)\n result = points.aggregate(total=Sum('score'))\n return result['total']\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000211", "length_bytes": 2106, "license_type": "no_license", "methods": [{"docstring": "Ajouter des points à un utilisateur sur un axe", "name": "add", "signature": "def add(self, author, user, count=1, axis=0)"}, {"docstring": "Renvoyer le score moyen d'un utilisateur sur un axe", "name": "get_average", "signature": "def get_average(self, user, axis=0)"}, {"docstring": "Renvoyer le score total d'un utilisateur sur un axe", "name": "get_total", "signature": "def get_total(self, user, axis=0)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_053545", "prompt": "Implement the Python class `ScoreManager` described below.\n\nClass description:\nManager de points\n\nMethod signatures and docstrings:\n- def add(self, author, user, count=1, axis=0): Ajouter des points à un utilisateur sur un axe\n- def get_average(self, user, axis=0): Renvoyer le score moyen d'un utilisateur sur un axe\n- def get_total(self, user, axis=0): Renvoyer le score total d'un utilisateur sur un axe", "prompted_full_text": "Implement the Python class `ScoreManager` described below.\n\nClass description:\nManager de points\n\nMethod signatures and docstrings:\n- def add(self, author, user, count=1, axis=0): Ajouter des points à un utilisateur sur un axe\n- def get_average(self, user, axis=0): Renvoyer le score moyen d'un utilisateur sur un axe\n- def get_total(self, user, axis=0): Renvoyer le score total d'un utilisateur sur un axe\n\n<|skeleton|>\nclass ScoreManager:\n \"\"\"Manager de points\"\"\"\n\n def add(self, author, user, count=1, axis=0):\n \"\"\"Ajouter des points à un utilisateur sur un axe\"\"\"\n <|body_0|>\n\n def get_average(self, user, axis=0):\n \"\"\"Renvoyer le score moyen d'un utilisateur sur un axe\"\"\"\n <|body_1|>\n\n def get_total(self, user, axis=0):\n \"\"\"Renvoyer le score total d'un utilisateur sur un axe\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if author.has_perm('social.add_score'):\n score = self.get_or_create(author=author, user=user, axis=axis)\n score.score = count\n score.save()\n<|end_body_0|>\n\n<|body_start_1|>\n points = self.filter(axis=axis)\n result = points.aggregate(average=Avg('score'))\n return result['average']\n<|end_body_1|>\n\n<|body_start_2|>\n points = self.filter(axis=axis)\n result = points.aggregate(total=Sum('score'))\n return result['total']\n<|end_body_2|>\n", "revision_id": "8cef6f6e89c1990e2b25f83e54e0c3481d83b6d7", "skeleton": "<|skeleton|>\nclass ScoreManager:\n \"\"\"Manager de points\"\"\"\n\n def add(self, author, user, count=1, axis=0):\n \"\"\"Ajouter des points à un utilisateur sur un axe\"\"\"\n <|body_0|>\n\n def get_average(self, user, axis=0):\n \"\"\"Renvoyer le score moyen d'un utilisateur sur un axe\"\"\"\n <|body_1|>\n\n def get_total(self, user, axis=0):\n \"\"\"Renvoyer le score total d'un utilisateur sur un axe\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ScoreManager:\n \"\"\"Manager de points\"\"\"\n\n def add(self, author, user, count=1, axis=0):\n \"\"\"Ajouter des points à un utilisateur sur un axe\"\"\"\n if author.has_perm('social.add_score'):\n score = self.get_or_create(author=author, user=user, axis=axis)\n score.score = count\n score.save()\n\n def get_average(self, user, axis=0):\n \"\"\"Renvoyer le score moyen d'un utilisateur sur un axe\"\"\"\n points = self.filter(axis=axis)\n result = points.aggregate(average=Avg('score'))\n return result['average']\n\n def get_total(self, user, axis=0):\n \"\"\"Renvoyer le score total d'un utilisateur sur un axe\"\"\"\n points = self.filter(axis=axis)\n result = points.aggregate(total=Sum('score'))\n return result['total']\n", "source": "the_stack_v2_python_sparse", "source_path": "scoop/user/social/models/rating/score.py", "source_repo": "artscoop/scoop", "split": "val", "star_events_count": 0}
{"blob_id": "daf7590bcb63b94adba1746befec5aa7d4099be7", "bodies": ["data = data_input('data/aoc_06_data_test_1.txt')\norbits = calc_orbits(data)\nresult = orbit_counter(orbits, 'D', 0)\nself.assertEqual(result, 3)", "data = data_input('data/aoc_06_data_test_1.txt')\nresult = part_1(data)\nself.assertEqual(result, 42)", "data = data_input('data/aoc_06_data_test_2.txt')\norbits = calc_orbits(data)\nresult = orbit_chain(orbits, 'YOU', [])\nself.assertEqual(result, ['K', 'J', 'E', 'D', 'C', 'B', 'COM'])\ndata = data_input('data/aoc_06_data_test_2.txt')\norbits = calc_orbits(data)\nresult = orbit_chain(orbits, 'SAN', [])\nself.assertEqual(result, ['I', 'D', 'C', 'B', 'COM'])", "data = data_input('data/aoc_06_data_test_2.txt')\nresult = part_2(data)\nself.assertEqual(result, 4)"], "bodies_text": "<|body_start_0|>\n data = data_input('data/aoc_06_data_test_1.txt')\n orbits = calc_orbits(data)\n result = orbit_counter(orbits, 'D', 0)\n self.assertEqual(result, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n data = data_input('data/aoc_06_data_test_1.txt')\n result = part_1(data)\n self.assertEqual(result, 42)\n<|end_body_1|>\n\n<|body_start_2|>\n data = data_input('data/aoc_06_data_test_2.txt')\n orbits = calc_orbits(data)\n result = orbit_chain(orbits, 'YOU', [])\n self.assertEqual(result, ['K', 'J', 'E', 'D', 'C', 'B', 'COM'])\n data = data_input('data/aoc_06_data_test_2.txt')\n orbits = calc_orbits(data)\n result = orbit_chain(orbits, 'SAN', [])\n self.assertEqual(result, ['I', 'D', 'C', 'B', 'COM'])\n<|end_body_2|>\n\n<|body_start_3|>\n data = data_input('data/aoc_06_data_test_2.txt')\n result = part_2(data)\n self.assertEqual(result, 4)\n<|end_body_3|>\n", "class_docstring": "()", "class_name": "TestAoC06", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestAoC06:\n \"\"\"()\"\"\"\n\n def test_orbit_counter(self):\n \"\"\"()\"\"\"\n <|body_0|>\n\n def test_part_1(self):\n \"\"\"()\"\"\"\n <|body_1|>\n\n def test_orbit_chain(self):\n \"\"\"()\"\"\"\n <|body_2|>\n\n def test_part_2(self):\n \"\"\"()\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = data_input('data/aoc_06_data_test_1.txt')\n orbits = calc_orbits(data)\n result = orbit_counter(orbits, 'D', 0)\n self.assertEqual(result, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n data = data_input('data/aoc_06_data_test_1.txt')\n result = part_1(data)\n self.assertEqual(result, 42)\n<|end_body_1|>\n\n<|body_start_2|>\n data = data_input('data/aoc_06_data_test_2.txt')\n orbits = calc_orbits(data)\n result = orbit_chain(orbits, 'YOU', [])\n self.assertEqual(result, ['K', 'J', 'E', 'D', 'C', 'B', 'COM'])\n data = data_input('data/aoc_06_data_test_2.txt')\n orbits = calc_orbits(data)\n result = orbit_chain(orbits, 'SAN', [])\n self.assertEqual(result, ['I', 'D', 'C', 'B', 'COM'])\n<|end_body_2|>\n\n<|body_start_3|>\n data = data_input('data/aoc_06_data_test_2.txt')\n result = part_2(data)\n self.assertEqual(result, 4)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000212", "length_bytes": 1303, "license_type": "no_license", "methods": [{"docstring": "()", "name": "test_orbit_counter", "signature": "def test_orbit_counter(self)"}, {"docstring": "()", "name": "test_part_1", "signature": "def test_part_1(self)"}, {"docstring": "()", "name": "test_orbit_chain", "signature": "def test_orbit_chain(self)"}, {"docstring": "()", "name": "test_part_2", "signature": "def test_part_2(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000350", "prompt": "Implement the Python class `TestAoC06` described below.\n\nClass description:\n()\n\nMethod signatures and docstrings:\n- def test_orbit_counter(self): ()\n- def test_part_1(self): ()\n- def test_orbit_chain(self): ()\n- def test_part_2(self): ()", "prompted_full_text": "Implement the Python class `TestAoC06` described below.\n\nClass description:\n()\n\nMethod signatures and docstrings:\n- def test_orbit_counter(self): ()\n- def test_part_1(self): ()\n- def test_orbit_chain(self): ()\n- def test_part_2(self): ()\n\n<|skeleton|>\nclass TestAoC06:\n \"\"\"()\"\"\"\n\n def test_orbit_counter(self):\n \"\"\"()\"\"\"\n <|body_0|>\n\n def test_part_1(self):\n \"\"\"()\"\"\"\n <|body_1|>\n\n def test_orbit_chain(self):\n \"\"\"()\"\"\"\n <|body_2|>\n\n def test_part_2(self):\n \"\"\"()\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = data_input('data/aoc_06_data_test_1.txt')\n orbits = calc_orbits(data)\n result = orbit_counter(orbits, 'D', 0)\n self.assertEqual(result, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n data = data_input('data/aoc_06_data_test_1.txt')\n result = part_1(data)\n self.assertEqual(result, 42)\n<|end_body_1|>\n\n<|body_start_2|>\n data = data_input('data/aoc_06_data_test_2.txt')\n orbits = calc_orbits(data)\n result = orbit_chain(orbits, 'YOU', [])\n self.assertEqual(result, ['K', 'J', 'E', 'D', 'C', 'B', 'COM'])\n data = data_input('data/aoc_06_data_test_2.txt')\n orbits = calc_orbits(data)\n result = orbit_chain(orbits, 'SAN', [])\n self.assertEqual(result, ['I', 'D', 'C', 'B', 'COM'])\n<|end_body_2|>\n\n<|body_start_3|>\n data = data_input('data/aoc_06_data_test_2.txt')\n result = part_2(data)\n self.assertEqual(result, 4)\n<|end_body_3|>\n", "revision_id": "4c49273b8f9846ccd2df54c2249a63bb4f8a4ddd", "skeleton": "<|skeleton|>\nclass TestAoC06:\n \"\"\"()\"\"\"\n\n def test_orbit_counter(self):\n \"\"\"()\"\"\"\n <|body_0|>\n\n def test_part_1(self):\n \"\"\"()\"\"\"\n <|body_1|>\n\n def test_orbit_chain(self):\n \"\"\"()\"\"\"\n <|body_2|>\n\n def test_part_2(self):\n \"\"\"()\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestAoC06:\n \"\"\"()\"\"\"\n\n def test_orbit_counter(self):\n \"\"\"()\"\"\"\n data = data_input('data/aoc_06_data_test_1.txt')\n orbits = calc_orbits(data)\n result = orbit_counter(orbits, 'D', 0)\n self.assertEqual(result, 3)\n\n def test_part_1(self):\n \"\"\"()\"\"\"\n data = data_input('data/aoc_06_data_test_1.txt')\n result = part_1(data)\n self.assertEqual(result, 42)\n\n def test_orbit_chain(self):\n \"\"\"()\"\"\"\n data = data_input('data/aoc_06_data_test_2.txt')\n orbits = calc_orbits(data)\n result = orbit_chain(orbits, 'YOU', [])\n self.assertEqual(result, ['K', 'J', 'E', 'D', 'C', 'B', 'COM'])\n data = data_input('data/aoc_06_data_test_2.txt')\n orbits = calc_orbits(data)\n result = orbit_chain(orbits, 'SAN', [])\n self.assertEqual(result, ['I', 'D', 'C', 'B', 'COM'])\n\n def test_part_2(self):\n \"\"\"()\"\"\"\n data = data_input('data/aoc_06_data_test_2.txt')\n result = part_2(data)\n self.assertEqual(result, 4)\n", "source": "the_stack_v2_python_sparse", "source_path": "test_aoc_06.py", "source_repo": "iveL91/Advent-of-Code-2019", "split": "val", "star_events_count": 0}
{"blob_id": "3f371674b637eebd7d577e3502f85956ed77028a", "bodies": ["norms = utils.bnorm(x)\nmask = norms > self.alpha\nprojected = x.clone().detach()\nprojected[mask] = self.alpha * utils.bdiv(projected[mask], norms[mask])\nreturn projected", "update_direction = -iterate.clone().detach()\ngrad_norms = torch.norm(grad.view(grad.size(0), -1), p=2, dim=-1)\nupdate_direction += self.alpha * (grad.view(grad.size(0), -1).T / grad_norms).T.view_as(iterate)\nreturn (update_direction, torch.ones(iterate.size(0), device=iterate.device, dtype=iterate.dtype))"], "bodies_text": "<|body_start_0|>\n norms = utils.bnorm(x)\n mask = norms > self.alpha\n projected = x.clone().detach()\n projected[mask] = self.alpha * utils.bdiv(projected[mask], norms[mask])\n return projected\n<|end_body_0|>\n\n<|body_start_1|>\n update_direction = -iterate.clone().detach()\n grad_norms = torch.norm(grad.view(grad.size(0), -1), p=2, dim=-1)\n update_direction += self.alpha * (grad.view(grad.size(0), -1).T / grad_norms).T.view_as(iterate)\n return (update_direction, torch.ones(iterate.size(0), device=iterate.device, dtype=iterate.dtype))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "L2Ball", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass L2Ball:\n\n def prox(self, x, step_size=None):\n \"\"\"Projection onto the L2 ball. Args: x: torch.Tensor of shape (batchs_size, *) tensor to project step_size: Any Not used here Returns: p: torch.Tensor, same shape as x projection of x onto the L2 ball.\"\"\"\n <|body_0|>\n\n def lmo(self, grad, iterate):\n \"\"\"Linear Maximization Oracle. Return s - iterate with s solving the linear problem ..math:: max_{||s||_2 <= alpha} Args: grad: torch.Tensor of shape (batch_size, *) usually -gradient iterate: torch.Tensor of shape (batch_size, *) usually the iterate of the considered algorithm Returns: update_direction: torch.Tensor, same shape as grad and iterate, s - iterate, where s is the vertex of the constraint most correlated with u max_step_size: torch.Tensor of shape (batch_size,) 1. for a Frank-Wolfe step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n norms = utils.bnorm(x)\n mask = norms > self.alpha\n projected = x.clone().detach()\n projected[mask] = self.alpha * utils.bdiv(projected[mask], norms[mask])\n return projected\n<|end_body_0|>\n\n<|body_start_1|>\n update_direction = -iterate.clone().detach()\n grad_norms = torch.norm(grad.view(grad.size(0), -1), p=2, dim=-1)\n update_direction += self.alpha * (grad.view(grad.size(0), -1).T / grad_norms).T.view_as(iterate)\n return (update_direction, torch.ones(iterate.size(0), device=iterate.device, dtype=iterate.dtype))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000213", "length_bytes": 26475, "license_type": "permissive", "methods": [{"docstring": "Projection onto the L2 ball. Args: x: torch.Tensor of shape (batchs_size, *) tensor to project step_size: Any Not used here Returns: p: torch.Tensor, same shape as x projection of x onto the L2 ball.", "name": "prox", "signature": "def prox(self, x, step_size=None)"}, {"docstring": "Linear Maximization Oracle. Return s - iterate with s solving the linear problem ..math:: max_{||s||_2 <= alpha} Args: grad: torch.Tensor of shape (batch_size, *) usually -gradient iterate: torch.Tensor of shape (batch_size, *) usually the iterate of the considered algorithm Returns: update_direction: torch.Tensor, same shape as grad and iterate, s - iterate, where s is the vertex of the constraint most correlated with u max_step_size: torch.Tensor of shape (batch_size,) 1. for a Frank-Wolfe step.", "name": "lmo", "signature": "def lmo(self, grad, iterate)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020517", "prompt": "Implement the Python class `L2Ball` described below.\n\nClass description:\nImplement the L2Ball class.\n\nMethod signatures and docstrings:\n- def prox(self, x, step_size=None): Projection onto the L2 ball. Args: x: torch.Tensor of shape (batchs_size, *) tensor to project step_size: Any Not used here Returns: p: torch.Tensor, same shape as x projection of x onto the L2 ball.\n- def lmo(self, grad, iterate): Linear Maximization Oracle. Return s - iterate with s solving the linear problem ..math:: max_{||s||_2 <= alpha} Args: grad: torch.Tensor of shape (batch_size, *) usually -gradient iterate: torch.Tensor of shape (batch_size, *) usually the iterate of the considered algorithm Returns: update_direction: torch.Tensor, same shape as grad and iterate, s - iterate, where s is the vertex of the constraint most correlated with u max_step_size: torch.Tensor of shape (batch_size,) 1. for a Frank-Wolfe step.", "prompted_full_text": "Implement the Python class `L2Ball` described below.\n\nClass description:\nImplement the L2Ball class.\n\nMethod signatures and docstrings:\n- def prox(self, x, step_size=None): Projection onto the L2 ball. Args: x: torch.Tensor of shape (batchs_size, *) tensor to project step_size: Any Not used here Returns: p: torch.Tensor, same shape as x projection of x onto the L2 ball.\n- def lmo(self, grad, iterate): Linear Maximization Oracle. Return s - iterate with s solving the linear problem ..math:: max_{||s||_2 <= alpha} Args: grad: torch.Tensor of shape (batch_size, *) usually -gradient iterate: torch.Tensor of shape (batch_size, *) usually the iterate of the considered algorithm Returns: update_direction: torch.Tensor, same shape as grad and iterate, s - iterate, where s is the vertex of the constraint most correlated with u max_step_size: torch.Tensor of shape (batch_size,) 1. for a Frank-Wolfe step.\n\n<|skeleton|>\nclass L2Ball:\n\n def prox(self, x, step_size=None):\n \"\"\"Projection onto the L2 ball. Args: x: torch.Tensor of shape (batchs_size, *) tensor to project step_size: Any Not used here Returns: p: torch.Tensor, same shape as x projection of x onto the L2 ball.\"\"\"\n <|body_0|>\n\n def lmo(self, grad, iterate):\n \"\"\"Linear Maximization Oracle. Return s - iterate with s solving the linear problem ..math:: max_{||s||_2 <= alpha} Args: grad: torch.Tensor of shape (batch_size, *) usually -gradient iterate: torch.Tensor of shape (batch_size, *) usually the iterate of the considered algorithm Returns: update_direction: torch.Tensor, same shape as grad and iterate, s - iterate, where s is the vertex of the constraint most correlated with u max_step_size: torch.Tensor of shape (batch_size,) 1. for a Frank-Wolfe step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n norms = utils.bnorm(x)\n mask = norms > self.alpha\n projected = x.clone().detach()\n projected[mask] = self.alpha * utils.bdiv(projected[mask], norms[mask])\n return projected\n<|end_body_0|>\n\n<|body_start_1|>\n update_direction = -iterate.clone().detach()\n grad_norms = torch.norm(grad.view(grad.size(0), -1), p=2, dim=-1)\n update_direction += self.alpha * (grad.view(grad.size(0), -1).T / grad_norms).T.view_as(iterate)\n return (update_direction, torch.ones(iterate.size(0), device=iterate.device, dtype=iterate.dtype))\n<|end_body_1|>\n", "revision_id": "4b7a066e3385279673437ea370d56a40594fd1f7", "skeleton": "<|skeleton|>\nclass L2Ball:\n\n def prox(self, x, step_size=None):\n \"\"\"Projection onto the L2 ball. Args: x: torch.Tensor of shape (batchs_size, *) tensor to project step_size: Any Not used here Returns: p: torch.Tensor, same shape as x projection of x onto the L2 ball.\"\"\"\n <|body_0|>\n\n def lmo(self, grad, iterate):\n \"\"\"Linear Maximization Oracle. Return s - iterate with s solving the linear problem ..math:: max_{||s||_2 <= alpha} Args: grad: torch.Tensor of shape (batch_size, *) usually -gradient iterate: torch.Tensor of shape (batch_size, *) usually the iterate of the considered algorithm Returns: update_direction: torch.Tensor, same shape as grad and iterate, s - iterate, where s is the vertex of the constraint most correlated with u max_step_size: torch.Tensor of shape (batch_size,) 1. for a Frank-Wolfe step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class L2Ball:\n def prox(self, x, step_size=None):\n \"\"\"Projection onto the L2 ball. Args: x: torch.Tensor of shape (batchs_size, *) tensor to project step_size: Any Not used here Returns: p: torch.Tensor, same shape as x projection of x onto the L2 ball.\"\"\"\n norms = utils.bnorm(x)\n mask = norms > self.alpha\n projected = x.clone().detach()\n projected[mask] = self.alpha * utils.bdiv(projected[mask], norms[mask])\n return projected\n\n def lmo(self, grad, iterate):\n \"\"\"Linear Maximization Oracle. Return s - iterate with s solving the linear problem ..math:: max_{||s||_2 <= alpha} Args: grad: torch.Tensor of shape (batch_size, *) usually -gradient iterate: torch.Tensor of shape (batch_size, *) usually the iterate of the considered algorithm Returns: update_direction: torch.Tensor, same shape as grad and iterate, s - iterate, where s is the vertex of the constraint most correlated with u max_step_size: torch.Tensor of shape (batch_size,) 1. for a Frank-Wolfe step.\"\"\"\n update_direction = -iterate.clone().detach()\n grad_norms = torch.norm(grad.view(grad.size(0), -1), p=2, dim=-1)\n update_direction += self.alpha * (grad.view(grad.size(0), -1).T / grad_norms).T.view_as(iterate)\n return (update_direction, torch.ones(iterate.size(0), device=iterate.device, dtype=iterate.dtype))\n", "source": "the_stack_v2_python_sparse", "source_path": "chop/constraints.py", "source_repo": "rotcx/chop", "split": "val", "star_events_count": 0}
{"blob_id": "97a0f651a576c299c69ef1f251446fb5869b7d81", "bodies": ["with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n tpu_computation = tpu.rewrite(graph_fn, placeholders)\n sess.run(tpu.initialize_system())\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(tpu_computation, feed_dict=dict(zip(placeholders, inputs)))\n sess.run(tpu.shutdown_system())\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\nreturn materialized_results", "with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n results = graph_fn(*placeholders)\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(results, feed_dict=dict(zip(placeholders, inputs)))\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\nreturn materialized_results", "if FLAGS.tpu_test:\n return self.execute_tpu(graph_fn, inputs)\nelse:\n return self.execute_cpu(graph_fn, inputs)"], "bodies_text": "<|body_start_0|>\n with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n tpu_computation = tpu.rewrite(graph_fn, placeholders)\n sess.run(tpu.initialize_system())\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(tpu_computation, feed_dict=dict(zip(placeholders, inputs)))\n sess.run(tpu.shutdown_system())\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\n return materialized_results\n<|end_body_0|>\n\n<|body_start_1|>\n with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n results = graph_fn(*placeholders)\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(results, feed_dict=dict(zip(placeholders, inputs)))\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\n return materialized_results\n<|end_body_1|>\n\n<|body_start_2|>\n if FLAGS.tpu_test:\n return self.execute_tpu(graph_fn, inputs)\n else:\n return self.execute_cpu(graph_fn, inputs)\n<|end_body_2|>\n", "class_docstring": "Extends tf.test.TestCase to optionally allow running tests on TPU.", "class_name": "TestCase", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestCase:\n \"\"\"Extends tf.test.TestCase to optionally allow running tests on TPU.\"\"\"\n\n def execute_tpu(self, graph_fn, inputs):\n \"\"\"Constructs the graph, executes it on TPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_0|>\n\n def execute_cpu(self, graph_fn, inputs):\n \"\"\"Constructs the graph, executes it on CPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_1|>\n\n def execute(self, graph_fn, inputs):\n \"\"\"Constructs the graph, creates a test session and returns the results. The graph is executed either on TPU or CPU based on the `tpu_test` flag. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n tpu_computation = tpu.rewrite(graph_fn, placeholders)\n sess.run(tpu.initialize_system())\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(tpu_computation, feed_dict=dict(zip(placeholders, inputs)))\n sess.run(tpu.shutdown_system())\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\n return materialized_results\n<|end_body_0|>\n\n<|body_start_1|>\n with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n results = graph_fn(*placeholders)\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(results, feed_dict=dict(zip(placeholders, inputs)))\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\n return materialized_results\n<|end_body_1|>\n\n<|body_start_2|>\n if FLAGS.tpu_test:\n return self.execute_tpu(graph_fn, inputs)\n else:\n return self.execute_cpu(graph_fn, inputs)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000214", "length_bytes": 4121, "license_type": "permissive", "methods": [{"docstring": "Constructs the graph, executes it on TPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.", "name": "execute_tpu", "signature": "def execute_tpu(self, graph_fn, inputs)"}, {"docstring": "Constructs the graph, executes it on CPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.", "name": "execute_cpu", "signature": "def execute_cpu(self, graph_fn, inputs)"}, {"docstring": "Constructs the graph, creates a test session and returns the results. The graph is executed either on TPU or CPU based on the `tpu_test` flag. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.", "name": "execute", "signature": "def execute(self, graph_fn, inputs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_009838", "prompt": "Implement the Python class `TestCase` described below.\n\nClass description:\nExtends tf.test.TestCase to optionally allow running tests on TPU.\n\nMethod signatures and docstrings:\n- def execute_tpu(self, graph_fn, inputs): Constructs the graph, executes it on TPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\n- def execute_cpu(self, graph_fn, inputs): Constructs the graph, executes it on CPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\n- def execute(self, graph_fn, inputs): Constructs the graph, creates a test session and returns the results. The graph is executed either on TPU or CPU based on the `tpu_test` flag. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.", "prompted_full_text": "Implement the Python class `TestCase` described below.\n\nClass description:\nExtends tf.test.TestCase to optionally allow running tests on TPU.\n\nMethod signatures and docstrings:\n- def execute_tpu(self, graph_fn, inputs): Constructs the graph, executes it on TPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\n- def execute_cpu(self, graph_fn, inputs): Constructs the graph, executes it on CPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\n- def execute(self, graph_fn, inputs): Constructs the graph, creates a test session and returns the results. The graph is executed either on TPU or CPU based on the `tpu_test` flag. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\n\n<|skeleton|>\nclass TestCase:\n \"\"\"Extends tf.test.TestCase to optionally allow running tests on TPU.\"\"\"\n\n def execute_tpu(self, graph_fn, inputs):\n \"\"\"Constructs the graph, executes it on TPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_0|>\n\n def execute_cpu(self, graph_fn, inputs):\n \"\"\"Constructs the graph, executes it on CPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_1|>\n\n def execute(self, graph_fn, inputs):\n \"\"\"Constructs the graph, creates a test session and returns the results. The graph is executed either on TPU or CPU based on the `tpu_test` flag. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n tpu_computation = tpu.rewrite(graph_fn, placeholders)\n sess.run(tpu.initialize_system())\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(tpu_computation, feed_dict=dict(zip(placeholders, inputs)))\n sess.run(tpu.shutdown_system())\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\n return materialized_results\n<|end_body_0|>\n\n<|body_start_1|>\n with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n results = graph_fn(*placeholders)\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(results, feed_dict=dict(zip(placeholders, inputs)))\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\n return materialized_results\n<|end_body_1|>\n\n<|body_start_2|>\n if FLAGS.tpu_test:\n return self.execute_tpu(graph_fn, inputs)\n else:\n return self.execute_cpu(graph_fn, inputs)\n<|end_body_2|>\n", "revision_id": "39272caea30ab01faa3795156af76a08aaf1455f", "skeleton": "<|skeleton|>\nclass TestCase:\n \"\"\"Extends tf.test.TestCase to optionally allow running tests on TPU.\"\"\"\n\n def execute_tpu(self, graph_fn, inputs):\n \"\"\"Constructs the graph, executes it on TPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_0|>\n\n def execute_cpu(self, graph_fn, inputs):\n \"\"\"Constructs the graph, executes it on CPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_1|>\n\n def execute(self, graph_fn, inputs):\n \"\"\"Constructs the graph, creates a test session and returns the results. The graph is executed either on TPU or CPU based on the `tpu_test` flag. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestCase:\n \"\"\"Extends tf.test.TestCase to optionally allow running tests on TPU.\"\"\"\n\n def execute_tpu(self, graph_fn, inputs):\n \"\"\"Constructs the graph, executes it on TPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n tpu_computation = tpu.rewrite(graph_fn, placeholders)\n sess.run(tpu.initialize_system())\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(tpu_computation, feed_dict=dict(zip(placeholders, inputs)))\n sess.run(tpu.shutdown_system())\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\n return materialized_results\n\n def execute_cpu(self, graph_fn, inputs):\n \"\"\"Constructs the graph, executes it on CPU and returns the result. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n with self.test_session(graph=tf.Graph()) as sess:\n placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]\n results = graph_fn(*placeholders)\n sess.run([tf.global_variables_initializer(), tf.tables_initializer(), tf.local_variables_initializer()])\n materialized_results = sess.run(results, feed_dict=dict(zip(placeholders, inputs)))\n if len(materialized_results) == 1 and (isinstance(materialized_results, list) or isinstance(materialized_results, tuple)):\n materialized_results = materialized_results[0]\n return materialized_results\n\n def execute(self, graph_fn, inputs):\n \"\"\"Constructs the graph, creates a test session and returns the results. The graph is executed either on TPU or CPU based on the `tpu_test` flag. Args: graph_fn: a callable that constructs the tensorflow graph to test. The arguments of this function should correspond to `inputs`. inputs: a list of numpy arrays to feed input to the computation graph. Returns: A list of numpy arrays or a scalar returned from executing the tensorflow graph.\"\"\"\n if FLAGS.tpu_test:\n return self.execute_tpu(graph_fn, inputs)\n else:\n return self.execute_cpu(graph_fn, inputs)\n", "source": "the_stack_v2_python_sparse", "source_path": "utils/test_case.py", "source_repo": "ambakick/Person-Detection-and-Tracking", "split": "val", "star_events_count": 262}
{"blob_id": "3f82691b92dfc22af3e214fa0294d79e8f03ad08", "bodies": ["super(CentroidEstimator, self).__init__()\nself.domains_count = config.domains_count\nself.features_size = config.encoder_features_count\nself.num_clusters = num_clusters\nself.alpha = config.centroid_estimation_alpha\nself.global_centroids_estimator = FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters)))\nself.domain_centroids_estimators = nn.ModuleList([FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters))) for i in range(self.domains_count)])", "numerator = torch.sum(numerator, dim=0, keepdim=False)\ncluster_probabilities = torch.sum(cluster_probabilities, dim=0, keepdim=False)\ncluster_probabilities = cluster_probabilities + eps\nreturn numerator / cluster_probabilities", "unsqueezed_features = features.unsqueeze(-1)\nunsqueezed_cluster_probabilities = cluster_probabilities.unsqueeze(1)\nunsummed_numerator = unsqueezed_features * unsqueezed_cluster_probabilities\nglobal_centroids = self.compute_centroids(unsummed_numerator, unsqueezed_cluster_probabilities)\nestimated_global_centroids = self.global_centroids_estimator(global_centroids)\nestimated_domain_centroids = []\nfor domain_idx in range(self.domains_count):\n slice_indexes = torch.nonzero(domains.view(-1) == domain_idx)\n domain_numerator = unsummed_numerator[slice_indexes].squeeze(1)\n domain_cluster_probabilities = unsqueezed_cluster_probabilities[slice_indexes].squeeze(1)\n domain_centroid = self.compute_centroids(domain_numerator, domain_cluster_probabilities)\n current_domain_centroids = self.domain_centroids_estimators[domain_idx](domain_centroid)\n estimated_domain_centroids.append(current_domain_centroids)\nreturn (estimated_global_centroids, estimated_domain_centroids)"], "bodies_text": "<|body_start_0|>\n super(CentroidEstimator, self).__init__()\n self.domains_count = config.domains_count\n self.features_size = config.encoder_features_count\n self.num_clusters = num_clusters\n self.alpha = config.centroid_estimation_alpha\n self.global_centroids_estimator = FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters)))\n self.domain_centroids_estimators = nn.ModuleList([FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters))) for i in range(self.domains_count)])\n<|end_body_0|>\n\n<|body_start_1|>\n numerator = torch.sum(numerator, dim=0, keepdim=False)\n cluster_probabilities = torch.sum(cluster_probabilities, dim=0, keepdim=False)\n cluster_probabilities = cluster_probabilities + eps\n return numerator / cluster_probabilities\n<|end_body_1|>\n\n<|body_start_2|>\n unsqueezed_features = features.unsqueeze(-1)\n unsqueezed_cluster_probabilities = cluster_probabilities.unsqueeze(1)\n unsummed_numerator = unsqueezed_features * unsqueezed_cluster_probabilities\n global_centroids = self.compute_centroids(unsummed_numerator, unsqueezed_cluster_probabilities)\n estimated_global_centroids = self.global_centroids_estimator(global_centroids)\n estimated_domain_centroids = []\n for domain_idx in range(self.domains_count):\n slice_indexes = torch.nonzero(domains.view(-1) == domain_idx)\n domain_numerator = unsummed_numerator[slice_indexes].squeeze(1)\n domain_cluster_probabilities = unsqueezed_cluster_probabilities[slice_indexes].squeeze(1)\n domain_centroid = self.compute_centroids(domain_numerator, domain_cluster_probabilities)\n current_domain_centroids = self.domain_centroids_estimators[domain_idx](domain_centroid)\n estimated_domain_centroids.append(current_domain_centroids)\n return (estimated_global_centroids, estimated_domain_centroids)\n<|end_body_2|>\n", "class_docstring": "Estimator for cluster centroids positions across domains", "class_name": "CentroidEstimator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CentroidEstimator:\n \"\"\"Estimator for cluster centroids positions across domains\"\"\"\n\n def __init__(self, config, num_clusters):\n \"\"\"Initializes the centroid estimator based on the provided configuration and number of clusters that will need to be estimated :param config: configuration file :param num_clusters: number of clusters for which to estimate centroids\"\"\"\n <|body_0|>\n\n def compute_centroids(self, numerator: torch.Tensor, cluster_probabilities: torch.Tensor, eps=0.001):\n \"\"\"Computes the centroids over the whole numerator and cluster probabilities tensors :param numerator: (num_points, features_size, num_clusters) tensor :param cluster_probabilities: (num_points, num_clusters) tensor :return: (features_size, num_clusters) tensor representing the centroids for each cluster\"\"\"\n <|body_1|>\n\n def forward(self, features: torch.Tensor, domains: torch.Tensor, cluster_probabilities: torch.Tensor):\n \"\"\":param features: (batch_size, features_size) tensor of features to which to estimate centroids :param domains: (1, batch_size) tensor representing the domain of each sample :param cluster_probabilities: (batch_size, num_clusters) tensor of cluster assigment probabilities :return: global_centroid, domain_centroids_list where global centroid is a (feaures_size, num_clusters) tensor representing the global centroids and domain_centroids_list is a list of length domains_count with such centroid tensors computed on each domain\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CentroidEstimator, self).__init__()\n self.domains_count = config.domains_count\n self.features_size = config.encoder_features_count\n self.num_clusters = num_clusters\n self.alpha = config.centroid_estimation_alpha\n self.global_centroids_estimator = FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters)))\n self.domain_centroids_estimators = nn.ModuleList([FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters))) for i in range(self.domains_count)])\n<|end_body_0|>\n\n<|body_start_1|>\n numerator = torch.sum(numerator, dim=0, keepdim=False)\n cluster_probabilities = torch.sum(cluster_probabilities, dim=0, keepdim=False)\n cluster_probabilities = cluster_probabilities + eps\n return numerator / cluster_probabilities\n<|end_body_1|>\n\n<|body_start_2|>\n unsqueezed_features = features.unsqueeze(-1)\n unsqueezed_cluster_probabilities = cluster_probabilities.unsqueeze(1)\n unsummed_numerator = unsqueezed_features * unsqueezed_cluster_probabilities\n global_centroids = self.compute_centroids(unsummed_numerator, unsqueezed_cluster_probabilities)\n estimated_global_centroids = self.global_centroids_estimator(global_centroids)\n estimated_domain_centroids = []\n for domain_idx in range(self.domains_count):\n slice_indexes = torch.nonzero(domains.view(-1) == domain_idx)\n domain_numerator = unsummed_numerator[slice_indexes].squeeze(1)\n domain_cluster_probabilities = unsqueezed_cluster_probabilities[slice_indexes].squeeze(1)\n domain_centroid = self.compute_centroids(domain_numerator, domain_cluster_probabilities)\n current_domain_centroids = self.domain_centroids_estimators[domain_idx](domain_centroid)\n estimated_domain_centroids.append(current_domain_centroids)\n return (estimated_global_centroids, estimated_domain_centroids)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000215", "length_bytes": 6285, "license_type": "permissive", "methods": [{"docstring": "Initializes the centroid estimator based on the provided configuration and number of clusters that will need to be estimated :param config: configuration file :param num_clusters: number of clusters for which to estimate centroids", "name": "__init__", "signature": "def __init__(self, config, num_clusters)"}, {"docstring": "Computes the centroids over the whole numerator and cluster probabilities tensors :param numerator: (num_points, features_size, num_clusters) tensor :param cluster_probabilities: (num_points, num_clusters) tensor :return: (features_size, num_clusters) tensor representing the centroids for each cluster", "name": "compute_centroids", "signature": "def compute_centroids(self, numerator: torch.Tensor, cluster_probabilities: torch.Tensor, eps=0.001)"}, {"docstring": ":param features: (batch_size, features_size) tensor of features to which to estimate centroids :param domains: (1, batch_size) tensor representing the domain of each sample :param cluster_probabilities: (batch_size, num_clusters) tensor of cluster assigment probabilities :return: global_centroid, domain_centroids_list where global centroid is a (feaures_size, num_clusters) tensor representing the global centroids and domain_centroids_list is a list of length domains_count with such centroid tensors computed on each domain", "name": "forward", "signature": "def forward(self, features: torch.Tensor, domains: torch.Tensor, cluster_probabilities: torch.Tensor)"}], "n_methods": 3, "prompt": "Implement the Python class `CentroidEstimator` described below.\n\nClass description:\nEstimator for cluster centroids positions across domains\n\nMethod signatures and docstrings:\n- def __init__(self, config, num_clusters): Initializes the centroid estimator based on the provided configuration and number of clusters that will need to be estimated :param config: configuration file :param num_clusters: number of clusters for which to estimate centroids\n- def compute_centroids(self, numerator: torch.Tensor, cluster_probabilities: torch.Tensor, eps=0.001): Computes the centroids over the whole numerator and cluster probabilities tensors :param numerator: (num_points, features_size, num_clusters) tensor :param cluster_probabilities: (num_points, num_clusters) tensor :return: (features_size, num_clusters) tensor representing the centroids for each cluster\n- def forward(self, features: torch.Tensor, domains: torch.Tensor, cluster_probabilities: torch.Tensor): :param features: (batch_size, features_size) tensor of features to which to estimate centroids :param domains: (1, batch_size) tensor representing the domain of each sample :param cluster_probabilities: (batch_size, num_clusters) tensor of cluster assigment probabilities :return: global_centroid, domain_centroids_list where global centroid is a (feaures_size, num_clusters) tensor representing the global centroids and domain_centroids_list is a list of length domains_count with such centroid tensors computed on each domain", "prompted_full_text": "Implement the Python class `CentroidEstimator` described below.\n\nClass description:\nEstimator for cluster centroids positions across domains\n\nMethod signatures and docstrings:\n- def __init__(self, config, num_clusters): Initializes the centroid estimator based on the provided configuration and number of clusters that will need to be estimated :param config: configuration file :param num_clusters: number of clusters for which to estimate centroids\n- def compute_centroids(self, numerator: torch.Tensor, cluster_probabilities: torch.Tensor, eps=0.001): Computes the centroids over the whole numerator and cluster probabilities tensors :param numerator: (num_points, features_size, num_clusters) tensor :param cluster_probabilities: (num_points, num_clusters) tensor :return: (features_size, num_clusters) tensor representing the centroids for each cluster\n- def forward(self, features: torch.Tensor, domains: torch.Tensor, cluster_probabilities: torch.Tensor): :param features: (batch_size, features_size) tensor of features to which to estimate centroids :param domains: (1, batch_size) tensor representing the domain of each sample :param cluster_probabilities: (batch_size, num_clusters) tensor of cluster assigment probabilities :return: global_centroid, domain_centroids_list where global centroid is a (feaures_size, num_clusters) tensor representing the global centroids and domain_centroids_list is a list of length domains_count with such centroid tensors computed on each domain\n\n<|skeleton|>\nclass CentroidEstimator:\n \"\"\"Estimator for cluster centroids positions across domains\"\"\"\n\n def __init__(self, config, num_clusters):\n \"\"\"Initializes the centroid estimator based on the provided configuration and number of clusters that will need to be estimated :param config: configuration file :param num_clusters: number of clusters for which to estimate centroids\"\"\"\n <|body_0|>\n\n def compute_centroids(self, numerator: torch.Tensor, cluster_probabilities: torch.Tensor, eps=0.001):\n \"\"\"Computes the centroids over the whole numerator and cluster probabilities tensors :param numerator: (num_points, features_size, num_clusters) tensor :param cluster_probabilities: (num_points, num_clusters) tensor :return: (features_size, num_clusters) tensor representing the centroids for each cluster\"\"\"\n <|body_1|>\n\n def forward(self, features: torch.Tensor, domains: torch.Tensor, cluster_probabilities: torch.Tensor):\n \"\"\":param features: (batch_size, features_size) tensor of features to which to estimate centroids :param domains: (1, batch_size) tensor representing the domain of each sample :param cluster_probabilities: (batch_size, num_clusters) tensor of cluster assigment probabilities :return: global_centroid, domain_centroids_list where global centroid is a (feaures_size, num_clusters) tensor representing the global centroids and domain_centroids_list is a list of length domains_count with such centroid tensors computed on each domain\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CentroidEstimator, self).__init__()\n self.domains_count = config.domains_count\n self.features_size = config.encoder_features_count\n self.num_clusters = num_clusters\n self.alpha = config.centroid_estimation_alpha\n self.global_centroids_estimator = FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters)))\n self.domain_centroids_estimators = nn.ModuleList([FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters))) for i in range(self.domains_count)])\n<|end_body_0|>\n\n<|body_start_1|>\n numerator = torch.sum(numerator, dim=0, keepdim=False)\n cluster_probabilities = torch.sum(cluster_probabilities, dim=0, keepdim=False)\n cluster_probabilities = cluster_probabilities + eps\n return numerator / cluster_probabilities\n<|end_body_1|>\n\n<|body_start_2|>\n unsqueezed_features = features.unsqueeze(-1)\n unsqueezed_cluster_probabilities = cluster_probabilities.unsqueeze(1)\n unsummed_numerator = unsqueezed_features * unsqueezed_cluster_probabilities\n global_centroids = self.compute_centroids(unsummed_numerator, unsqueezed_cluster_probabilities)\n estimated_global_centroids = self.global_centroids_estimator(global_centroids)\n estimated_domain_centroids = []\n for domain_idx in range(self.domains_count):\n slice_indexes = torch.nonzero(domains.view(-1) == domain_idx)\n domain_numerator = unsummed_numerator[slice_indexes].squeeze(1)\n domain_cluster_probabilities = unsqueezed_cluster_probabilities[slice_indexes].squeeze(1)\n domain_centroid = self.compute_centroids(domain_numerator, domain_cluster_probabilities)\n current_domain_centroids = self.domain_centroids_estimators[domain_idx](domain_centroid)\n estimated_domain_centroids.append(current_domain_centroids)\n return (estimated_global_centroids, estimated_domain_centroids)\n<|end_body_2|>\n", "revision_id": "4c5e4d573446cefc26f5307a225e5b8223e9eb47", "skeleton": "<|skeleton|>\nclass CentroidEstimator:\n \"\"\"Estimator for cluster centroids positions across domains\"\"\"\n\n def __init__(self, config, num_clusters):\n \"\"\"Initializes the centroid estimator based on the provided configuration and number of clusters that will need to be estimated :param config: configuration file :param num_clusters: number of clusters for which to estimate centroids\"\"\"\n <|body_0|>\n\n def compute_centroids(self, numerator: torch.Tensor, cluster_probabilities: torch.Tensor, eps=0.001):\n \"\"\"Computes the centroids over the whole numerator and cluster probabilities tensors :param numerator: (num_points, features_size, num_clusters) tensor :param cluster_probabilities: (num_points, num_clusters) tensor :return: (features_size, num_clusters) tensor representing the centroids for each cluster\"\"\"\n <|body_1|>\n\n def forward(self, features: torch.Tensor, domains: torch.Tensor, cluster_probabilities: torch.Tensor):\n \"\"\":param features: (batch_size, features_size) tensor of features to which to estimate centroids :param domains: (1, batch_size) tensor representing the domain of each sample :param cluster_probabilities: (batch_size, num_clusters) tensor of cluster assigment probabilities :return: global_centroid, domain_centroids_list where global centroid is a (feaures_size, num_clusters) tensor representing the global centroids and domain_centroids_list is a list of length domains_count with such centroid tensors computed on each domain\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CentroidEstimator:\n \"\"\"Estimator for cluster centroids positions across domains\"\"\"\n\n def __init__(self, config, num_clusters):\n \"\"\"Initializes the centroid estimator based on the provided configuration and number of clusters that will need to be estimated :param config: configuration file :param num_clusters: number of clusters for which to estimate centroids\"\"\"\n super(CentroidEstimator, self).__init__()\n self.domains_count = config.domains_count\n self.features_size = config.encoder_features_count\n self.num_clusters = num_clusters\n self.alpha = config.centroid_estimation_alpha\n self.global_centroids_estimator = FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters)))\n self.domain_centroids_estimators = nn.ModuleList([FixedMatrixEstimator(self.features_size, self.num_clusters, self.alpha, initial_value=torch.zeros((self.features_size, self.num_clusters))) for i in range(self.domains_count)])\n\n def compute_centroids(self, numerator: torch.Tensor, cluster_probabilities: torch.Tensor, eps=0.001):\n \"\"\"Computes the centroids over the whole numerator and cluster probabilities tensors :param numerator: (num_points, features_size, num_clusters) tensor :param cluster_probabilities: (num_points, num_clusters) tensor :return: (features_size, num_clusters) tensor representing the centroids for each cluster\"\"\"\n numerator = torch.sum(numerator, dim=0, keepdim=False)\n cluster_probabilities = torch.sum(cluster_probabilities, dim=0, keepdim=False)\n cluster_probabilities = cluster_probabilities + eps\n return numerator / cluster_probabilities\n\n def forward(self, features: torch.Tensor, domains: torch.Tensor, cluster_probabilities: torch.Tensor):\n \"\"\":param features: (batch_size, features_size) tensor of features to which to estimate centroids :param domains: (1, batch_size) tensor representing the domain of each sample :param cluster_probabilities: (batch_size, num_clusters) tensor of cluster assigment probabilities :return: global_centroid, domain_centroids_list where global centroid is a (feaures_size, num_clusters) tensor representing the global centroids and domain_centroids_list is a list of length domains_count with such centroid tensors computed on each domain\"\"\"\n unsqueezed_features = features.unsqueeze(-1)\n unsqueezed_cluster_probabilities = cluster_probabilities.unsqueeze(1)\n unsummed_numerator = unsqueezed_features * unsqueezed_cluster_probabilities\n global_centroids = self.compute_centroids(unsummed_numerator, unsqueezed_cluster_probabilities)\n estimated_global_centroids = self.global_centroids_estimator(global_centroids)\n estimated_domain_centroids = []\n for domain_idx in range(self.domains_count):\n slice_indexes = torch.nonzero(domains.view(-1) == domain_idx)\n domain_numerator = unsummed_numerator[slice_indexes].squeeze(1)\n domain_cluster_probabilities = unsqueezed_cluster_probabilities[slice_indexes].squeeze(1)\n domain_centroid = self.compute_centroids(domain_numerator, domain_cluster_probabilities)\n current_domain_centroids = self.domain_centroids_estimators[domain_idx](domain_centroid)\n estimated_domain_centroids.append(current_domain_centroids)\n return (estimated_global_centroids, estimated_domain_centroids)\n", "source": "the_stack_v2_python_sparse", "source_path": "codebase/archs/cluster/estimator.py", "source_repo": "sudipansaha/acids-clustering-domain-shift", "split": "val", "star_events_count": 0}
{"blob_id": "d6ed850c2215bd27f976d8f3ce2cda6bbe8d4a17", "bodies": ["self.d = len(a)\nassert len(b) == self.d\nassert len(orders) == self.d\nself.a = np.array(a, dtype=float)\nself.b = np.array(b, dtype=float)\nself.orders = np.array(orders, dtype=int)\nself.__mcoeffs__ = None\nif values is not None:\n self.set_values(values)", "mvalues = np.array(mvalues, dtype=float)\nn_sp = mvalues.shape[-1]\nmvalues = mvalues.reshape(list(self.orders) + [n_sp])\nif not np.all(np.isfinite(mvalues)):\n raise Exception('Trying to interpolate non-finite values')\nfrom .filter_cubic import filter_mcoeffs\nself.__mcoeffs__ = filter_mcoeffs(self.a, self.b, self.orders, mvalues)\nself.__mvalues__ = mvalues", "import time\nif points.ndim == 1:\n raise Exception('Expected 2d array. Received {}d array'.format(points.ndim))\nif points.shape[1] != self.d:\n raise Exception('Second dimension should be {}. Received : {}.'.format(self.d, points.shape[0]))\nif not np.all(np.isfinite(points)):\n raise Exception('Spline interpolator evaluated at non-finite points.')\nn_sp = self.__mcoeffs__.shape[-1]\nN = points.shape[0]\nd = points.shape[1]\nfrom .eval_splines import eval_cubic\nif not diff:\n grid = tuple(((self.a[i], self.b[i], self.orders[i]) for i in range(len(self.a))))\n from .eval_splines import eval_cubic\n values = np.empty((N, n_sp), dtype=float)\n eval_cubic(grid, self.__mcoeffs__, points, values)\n return values\nelse:\n from .eval_cubic import vec_eval_cubic_splines_G\n values = np.empty((N, n_sp), dtype=float)\n dvalues = np.empty((N, d, n_sp), dtype=float)\n vec_eval_cubic_splines_G(self.a, self.b, self.orders, self.__mcoeffs__, points, values, dvalues)\n return [values, dvalues]", "if self.__grid__ is None:\n self.__grid__ = mlinspace(self.a, self.b, self.orders)\nreturn self.__grid__", "if s.ndim == 1:\n res = self.__call__(numpy.atleast_2d(s))\n return res.ravel()\nreturn self.interpolate(s)"], "bodies_text": "<|body_start_0|>\n self.d = len(a)\n assert len(b) == self.d\n assert len(orders) == self.d\n self.a = np.array(a, dtype=float)\n self.b = np.array(b, dtype=float)\n self.orders = np.array(orders, dtype=int)\n self.__mcoeffs__ = None\n if values is not None:\n self.set_values(values)\n<|end_body_0|>\n\n<|body_start_1|>\n mvalues = np.array(mvalues, dtype=float)\n n_sp = mvalues.shape[-1]\n mvalues = mvalues.reshape(list(self.orders) + [n_sp])\n if not np.all(np.isfinite(mvalues)):\n raise Exception('Trying to interpolate non-finite values')\n from .filter_cubic import filter_mcoeffs\n self.__mcoeffs__ = filter_mcoeffs(self.a, self.b, self.orders, mvalues)\n self.__mvalues__ = mvalues\n<|end_body_1|>\n\n<|body_start_2|>\n import time\n if points.ndim == 1:\n raise Exception('Expected 2d array. Received {}d array'.format(points.ndim))\n if points.shape[1] != self.d:\n raise Exception('Second dimension should be {}. Received : {}.'.format(self.d, points.shape[0]))\n if not np.all(np.isfinite(points)):\n raise Exception('Spline interpolator evaluated at non-finite points.')\n n_sp = self.__mcoeffs__.shape[-1]\n N = points.shape[0]\n d = points.shape[1]\n from .eval_splines import eval_cubic\n if not diff:\n grid = tuple(((self.a[i], self.b[i], self.orders[i]) for i in range(len(self.a))))\n from .eval_splines import eval_cubic\n values = np.empty((N, n_sp), dtype=float)\n eval_cubic(grid, self.__mcoeffs__, points, values)\n return values\n else:\n from .eval_cubic import vec_eval_cubic_splines_G\n values = np.empty((N, n_sp), dtype=float)\n dvalues = np.empty((N, d, n_sp), dtype=float)\n vec_eval_cubic_splines_G(self.a, self.b, self.orders, self.__mcoeffs__, points, values, dvalues)\n return [values, dvalues]\n<|end_body_2|>\n\n<|body_start_3|>\n if self.__grid__ is None:\n self.__grid__ = mlinspace(self.a, self.b, self.orders)\n return self.__grid__\n<|end_body_3|>\n\n<|body_start_4|>\n if s.ndim == 1:\n res = self.__call__(numpy.atleast_2d(s))\n return res.ravel()\n return self.interpolate(s)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "CubicSplines", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CubicSplines:\n\n def __init__(self, a, b, orders, values=None):\n \"\"\"Creates a cubic multi-spline interpolator for many functions on a regular cartesian grid.\"\"\"\n <|body_0|>\n\n def set_values(self, mvalues):\n \"\"\"Change values on the nodes of the functions to approximate.\"\"\"\n <|body_1|>\n\n def interpolate(self, points, diff=False):\n \"\"\"Interpolate splines at manu points.\"\"\"\n <|body_2|>\n\n def grid(self):\n \"\"\"Cartesian enumeration of all nodes.\"\"\"\n <|body_3|>\n\n def __call__(self, s):\n \"\"\"Interpolate the splines at one or many points. Parameters ---------- s : (array-like with 1 or 2 dimensions) Coordinates of one point, or list of coordinates, at which the splines are interpolated. Returns: -------- res : (array-like with 1 or 2 dimensions) Vector or list of vectors containing the interpolator evaluated at `s`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = len(a)\n assert len(b) == self.d\n assert len(orders) == self.d\n self.a = np.array(a, dtype=float)\n self.b = np.array(b, dtype=float)\n self.orders = np.array(orders, dtype=int)\n self.__mcoeffs__ = None\n if values is not None:\n self.set_values(values)\n<|end_body_0|>\n\n<|body_start_1|>\n mvalues = np.array(mvalues, dtype=float)\n n_sp = mvalues.shape[-1]\n mvalues = mvalues.reshape(list(self.orders) + [n_sp])\n if not np.all(np.isfinite(mvalues)):\n raise Exception('Trying to interpolate non-finite values')\n from .filter_cubic import filter_mcoeffs\n self.__mcoeffs__ = filter_mcoeffs(self.a, self.b, self.orders, mvalues)\n self.__mvalues__ = mvalues\n<|end_body_1|>\n\n<|body_start_2|>\n import time\n if points.ndim == 1:\n raise Exception('Expected 2d array. Received {}d array'.format(points.ndim))\n if points.shape[1] != self.d:\n raise Exception('Second dimension should be {}. Received : {}.'.format(self.d, points.shape[0]))\n if not np.all(np.isfinite(points)):\n raise Exception('Spline interpolator evaluated at non-finite points.')\n n_sp = self.__mcoeffs__.shape[-1]\n N = points.shape[0]\n d = points.shape[1]\n from .eval_splines import eval_cubic\n if not diff:\n grid = tuple(((self.a[i], self.b[i], self.orders[i]) for i in range(len(self.a))))\n from .eval_splines import eval_cubic\n values = np.empty((N, n_sp), dtype=float)\n eval_cubic(grid, self.__mcoeffs__, points, values)\n return values\n else:\n from .eval_cubic import vec_eval_cubic_splines_G\n values = np.empty((N, n_sp), dtype=float)\n dvalues = np.empty((N, d, n_sp), dtype=float)\n vec_eval_cubic_splines_G(self.a, self.b, self.orders, self.__mcoeffs__, points, values, dvalues)\n return [values, dvalues]\n<|end_body_2|>\n\n<|body_start_3|>\n if self.__grid__ is None:\n self.__grid__ = mlinspace(self.a, self.b, self.orders)\n return self.__grid__\n<|end_body_3|>\n\n<|body_start_4|>\n if s.ndim == 1:\n res = self.__call__(numpy.atleast_2d(s))\n return res.ravel()\n return self.interpolate(s)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000216", "length_bytes": 7068, "license_type": "permissive", "methods": [{"docstring": "Creates a cubic multi-spline interpolator for many functions on a regular cartesian grid.", "name": "__init__", "signature": "def __init__(self, a, b, orders, values=None)"}, {"docstring": "Change values on the nodes of the functions to approximate.", "name": "set_values", "signature": "def set_values(self, mvalues)"}, {"docstring": "Interpolate splines at manu points.", "name": "interpolate", "signature": "def interpolate(self, points, diff=False)"}, {"docstring": "Cartesian enumeration of all nodes.", "name": "grid", "signature": "def grid(self)"}, {"docstring": "Interpolate the splines at one or many points. Parameters ---------- s : (array-like with 1 or 2 dimensions) Coordinates of one point, or list of coordinates, at which the splines are interpolated. Returns: -------- res : (array-like with 1 or 2 dimensions) Vector or list of vectors containing the interpolator evaluated at `s`.", "name": "__call__", "signature": "def __call__(self, s)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_022782", "prompt": "Implement the Python class `CubicSplines` described below.\n\nClass description:\nImplement the CubicSplines class.\n\nMethod signatures and docstrings:\n- def __init__(self, a, b, orders, values=None): Creates a cubic multi-spline interpolator for many functions on a regular cartesian grid.\n- def set_values(self, mvalues): Change values on the nodes of the functions to approximate.\n- def interpolate(self, points, diff=False): Interpolate splines at manu points.\n- def grid(self): Cartesian enumeration of all nodes.\n- def __call__(self, s): Interpolate the splines at one or many points. Parameters ---------- s : (array-like with 1 or 2 dimensions) Coordinates of one point, or list of coordinates, at which the splines are interpolated. Returns: -------- res : (array-like with 1 or 2 dimensions) Vector or list of vectors containing the interpolator evaluated at `s`.", "prompted_full_text": "Implement the Python class `CubicSplines` described below.\n\nClass description:\nImplement the CubicSplines class.\n\nMethod signatures and docstrings:\n- def __init__(self, a, b, orders, values=None): Creates a cubic multi-spline interpolator for many functions on a regular cartesian grid.\n- def set_values(self, mvalues): Change values on the nodes of the functions to approximate.\n- def interpolate(self, points, diff=False): Interpolate splines at manu points.\n- def grid(self): Cartesian enumeration of all nodes.\n- def __call__(self, s): Interpolate the splines at one or many points. Parameters ---------- s : (array-like with 1 or 2 dimensions) Coordinates of one point, or list of coordinates, at which the splines are interpolated. Returns: -------- res : (array-like with 1 or 2 dimensions) Vector or list of vectors containing the interpolator evaluated at `s`.\n\n<|skeleton|>\nclass CubicSplines:\n\n def __init__(self, a, b, orders, values=None):\n \"\"\"Creates a cubic multi-spline interpolator for many functions on a regular cartesian grid.\"\"\"\n <|body_0|>\n\n def set_values(self, mvalues):\n \"\"\"Change values on the nodes of the functions to approximate.\"\"\"\n <|body_1|>\n\n def interpolate(self, points, diff=False):\n \"\"\"Interpolate splines at manu points.\"\"\"\n <|body_2|>\n\n def grid(self):\n \"\"\"Cartesian enumeration of all nodes.\"\"\"\n <|body_3|>\n\n def __call__(self, s):\n \"\"\"Interpolate the splines at one or many points. Parameters ---------- s : (array-like with 1 or 2 dimensions) Coordinates of one point, or list of coordinates, at which the splines are interpolated. Returns: -------- res : (array-like with 1 or 2 dimensions) Vector or list of vectors containing the interpolator evaluated at `s`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = len(a)\n assert len(b) == self.d\n assert len(orders) == self.d\n self.a = np.array(a, dtype=float)\n self.b = np.array(b, dtype=float)\n self.orders = np.array(orders, dtype=int)\n self.__mcoeffs__ = None\n if values is not None:\n self.set_values(values)\n<|end_body_0|>\n\n<|body_start_1|>\n mvalues = np.array(mvalues, dtype=float)\n n_sp = mvalues.shape[-1]\n mvalues = mvalues.reshape(list(self.orders) + [n_sp])\n if not np.all(np.isfinite(mvalues)):\n raise Exception('Trying to interpolate non-finite values')\n from .filter_cubic import filter_mcoeffs\n self.__mcoeffs__ = filter_mcoeffs(self.a, self.b, self.orders, mvalues)\n self.__mvalues__ = mvalues\n<|end_body_1|>\n\n<|body_start_2|>\n import time\n if points.ndim == 1:\n raise Exception('Expected 2d array. Received {}d array'.format(points.ndim))\n if points.shape[1] != self.d:\n raise Exception('Second dimension should be {}. Received : {}.'.format(self.d, points.shape[0]))\n if not np.all(np.isfinite(points)):\n raise Exception('Spline interpolator evaluated at non-finite points.')\n n_sp = self.__mcoeffs__.shape[-1]\n N = points.shape[0]\n d = points.shape[1]\n from .eval_splines import eval_cubic\n if not diff:\n grid = tuple(((self.a[i], self.b[i], self.orders[i]) for i in range(len(self.a))))\n from .eval_splines import eval_cubic\n values = np.empty((N, n_sp), dtype=float)\n eval_cubic(grid, self.__mcoeffs__, points, values)\n return values\n else:\n from .eval_cubic import vec_eval_cubic_splines_G\n values = np.empty((N, n_sp), dtype=float)\n dvalues = np.empty((N, d, n_sp), dtype=float)\n vec_eval_cubic_splines_G(self.a, self.b, self.orders, self.__mcoeffs__, points, values, dvalues)\n return [values, dvalues]\n<|end_body_2|>\n\n<|body_start_3|>\n if self.__grid__ is None:\n self.__grid__ = mlinspace(self.a, self.b, self.orders)\n return self.__grid__\n<|end_body_3|>\n\n<|body_start_4|>\n if s.ndim == 1:\n res = self.__call__(numpy.atleast_2d(s))\n return res.ravel()\n return self.interpolate(s)\n<|end_body_4|>\n", "revision_id": "19b2cd3882003c19b7aeb7c35fca5cdad3fe1d5e", "skeleton": "<|skeleton|>\nclass CubicSplines:\n\n def __init__(self, a, b, orders, values=None):\n \"\"\"Creates a cubic multi-spline interpolator for many functions on a regular cartesian grid.\"\"\"\n <|body_0|>\n\n def set_values(self, mvalues):\n \"\"\"Change values on the nodes of the functions to approximate.\"\"\"\n <|body_1|>\n\n def interpolate(self, points, diff=False):\n \"\"\"Interpolate splines at manu points.\"\"\"\n <|body_2|>\n\n def grid(self):\n \"\"\"Cartesian enumeration of all nodes.\"\"\"\n <|body_3|>\n\n def __call__(self, s):\n \"\"\"Interpolate the splines at one or many points. Parameters ---------- s : (array-like with 1 or 2 dimensions) Coordinates of one point, or list of coordinates, at which the splines are interpolated. Returns: -------- res : (array-like with 1 or 2 dimensions) Vector or list of vectors containing the interpolator evaluated at `s`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CubicSplines:\n def __init__(self, a, b, orders, values=None):\n \"\"\"Creates a cubic multi-spline interpolator for many functions on a regular cartesian grid.\"\"\"\n self.d = len(a)\n assert len(b) == self.d\n assert len(orders) == self.d\n self.a = np.array(a, dtype=float)\n self.b = np.array(b, dtype=float)\n self.orders = np.array(orders, dtype=int)\n self.__mcoeffs__ = None\n if values is not None:\n self.set_values(values)\n\n def set_values(self, mvalues):\n \"\"\"Change values on the nodes of the functions to approximate.\"\"\"\n mvalues = np.array(mvalues, dtype=float)\n n_sp = mvalues.shape[-1]\n mvalues = mvalues.reshape(list(self.orders) + [n_sp])\n if not np.all(np.isfinite(mvalues)):\n raise Exception('Trying to interpolate non-finite values')\n from .filter_cubic import filter_mcoeffs\n self.__mcoeffs__ = filter_mcoeffs(self.a, self.b, self.orders, mvalues)\n self.__mvalues__ = mvalues\n\n def interpolate(self, points, diff=False):\n \"\"\"Interpolate splines at manu points.\"\"\"\n import time\n if points.ndim == 1:\n raise Exception('Expected 2d array. Received {}d array'.format(points.ndim))\n if points.shape[1] != self.d:\n raise Exception('Second dimension should be {}. Received : {}.'.format(self.d, points.shape[0]))\n if not np.all(np.isfinite(points)):\n raise Exception('Spline interpolator evaluated at non-finite points.')\n n_sp = self.__mcoeffs__.shape[-1]\n N = points.shape[0]\n d = points.shape[1]\n from .eval_splines import eval_cubic\n if not diff:\n grid = tuple(((self.a[i], self.b[i], self.orders[i]) for i in range(len(self.a))))\n from .eval_splines import eval_cubic\n values = np.empty((N, n_sp), dtype=float)\n eval_cubic(grid, self.__mcoeffs__, points, values)\n return values\n else:\n from .eval_cubic import vec_eval_cubic_splines_G\n values = np.empty((N, n_sp), dtype=float)\n dvalues = np.empty((N, d, n_sp), dtype=float)\n vec_eval_cubic_splines_G(self.a, self.b, self.orders, self.__mcoeffs__, points, values, dvalues)\n return [values, dvalues]\n\n def grid(self):\n \"\"\"Cartesian enumeration of all nodes.\"\"\"\n if self.__grid__ is None:\n self.__grid__ = mlinspace(self.a, self.b, self.orders)\n return self.__grid__\n\n def __call__(self, s):\n \"\"\"Interpolate the splines at one or many points. Parameters ---------- s : (array-like with 1 or 2 dimensions) Coordinates of one point, or list of coordinates, at which the splines are interpolated. Returns: -------- res : (array-like with 1 or 2 dimensions) Vector or list of vectors containing the interpolator evaluated at `s`.\"\"\"\n if s.ndim == 1:\n res = self.__call__(numpy.atleast_2d(s))\n return res.ravel()\n return self.interpolate(s)\n", "source": "the_stack_v2_python_sparse", "source_path": "interpolation/splines/splines.py", "source_repo": "EconForge/interpolation.py", "split": "val", "star_events_count": 116}
{"blob_id": "ed14c6418f01256846b51fcfb941fedaf9355987", "bodies": ["s_length = len(s)\nif s_length < 2:\n return s\nresult_start, result_length = (0, 1)\ncenter1, center2 = (0, 1)\ni = 1\nwhile i < s_length:\n start = center1 - (i - center2)\n if s[i] == s[start]:\n length = i - start + 1\n if length > result_length:\n result_start, result_length = (start, length)\n if start > 0:\n i += 1\n continue\n if center1 < center2:\n center1 += 1\n elif s[center1] == s[center1 + 1]:\n center2 += 1\n else:\n center1 += 1\n center2 += 1\n if s_length * 2 - center1 - center2 - 1 <= result_length:\n break\n i = center2 + 1\nreturn s[result_start:result_start + result_length]", "s_length = len(s)\nif s_length < 2:\n return s\nresult_start, result_length = (0, 1)\nfor i in range(s_length - 1):\n i1, i2 = (i, i)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n i1, i2 = (i, i + 1)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\nreturn s[result_start:result_start + result_length]"], "bodies_text": "<|body_start_0|>\n s_length = len(s)\n if s_length < 2:\n return s\n result_start, result_length = (0, 1)\n center1, center2 = (0, 1)\n i = 1\n while i < s_length:\n start = center1 - (i - center2)\n if s[i] == s[start]:\n length = i - start + 1\n if length > result_length:\n result_start, result_length = (start, length)\n if start > 0:\n i += 1\n continue\n if center1 < center2:\n center1 += 1\n elif s[center1] == s[center1 + 1]:\n center2 += 1\n else:\n center1 += 1\n center2 += 1\n if s_length * 2 - center1 - center2 - 1 <= result_length:\n break\n i = center2 + 1\n return s[result_start:result_start + result_length]\n<|end_body_0|>\n\n<|body_start_1|>\n s_length = len(s)\n if s_length < 2:\n return s\n result_start, result_length = (0, 1)\n for i in range(s_length - 1):\n i1, i2 = (i, i)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n i1, i2 = (i, i + 1)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n return s[result_start:result_start + result_length]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def longestPalindrome1(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_0|>\n\n def longestPalindrome(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s_length = len(s)\n if s_length < 2:\n return s\n result_start, result_length = (0, 1)\n center1, center2 = (0, 1)\n i = 1\n while i < s_length:\n start = center1 - (i - center2)\n if s[i] == s[start]:\n length = i - start + 1\n if length > result_length:\n result_start, result_length = (start, length)\n if start > 0:\n i += 1\n continue\n if center1 < center2:\n center1 += 1\n elif s[center1] == s[center1 + 1]:\n center2 += 1\n else:\n center1 += 1\n center2 += 1\n if s_length * 2 - center1 - center2 - 1 <= result_length:\n break\n i = center2 + 1\n return s[result_start:result_start + result_length]\n<|end_body_0|>\n\n<|body_start_1|>\n s_length = len(s)\n if s_length < 2:\n return s\n result_start, result_length = (0, 1)\n for i in range(s_length - 1):\n i1, i2 = (i, i)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n i1, i2 = (i, i + 1)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n return s[result_start:result_start + result_length]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000217", "length_bytes": 4400, "license_type": "no_license", "methods": [{"docstring": ":type s: str :rtype: str", "name": "longestPalindrome1", "signature": "def longestPalindrome1(self, s)"}, {"docstring": ":type s: str :rtype: str", "name": "longestPalindrome", "signature": "def longestPalindrome(self, s)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031439", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestPalindrome1(self, s): :type s: str :rtype: str\n- def longestPalindrome(self, s): :type s: str :rtype: str", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestPalindrome1(self, s): :type s: str :rtype: str\n- def longestPalindrome(self, s): :type s: str :rtype: str\n\n<|skeleton|>\nclass Solution:\n\n def longestPalindrome1(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_0|>\n\n def longestPalindrome(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s_length = len(s)\n if s_length < 2:\n return s\n result_start, result_length = (0, 1)\n center1, center2 = (0, 1)\n i = 1\n while i < s_length:\n start = center1 - (i - center2)\n if s[i] == s[start]:\n length = i - start + 1\n if length > result_length:\n result_start, result_length = (start, length)\n if start > 0:\n i += 1\n continue\n if center1 < center2:\n center1 += 1\n elif s[center1] == s[center1 + 1]:\n center2 += 1\n else:\n center1 += 1\n center2 += 1\n if s_length * 2 - center1 - center2 - 1 <= result_length:\n break\n i = center2 + 1\n return s[result_start:result_start + result_length]\n<|end_body_0|>\n\n<|body_start_1|>\n s_length = len(s)\n if s_length < 2:\n return s\n result_start, result_length = (0, 1)\n for i in range(s_length - 1):\n i1, i2 = (i, i)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n i1, i2 = (i, i + 1)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n return s[result_start:result_start + result_length]\n<|end_body_1|>\n", "revision_id": "e07b85a4121f2665393f1176befbdbe06f1e1ad0", "skeleton": "<|skeleton|>\nclass Solution:\n\n def longestPalindrome1(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_0|>\n\n def longestPalindrome(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def longestPalindrome1(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n s_length = len(s)\n if s_length < 2:\n return s\n result_start, result_length = (0, 1)\n center1, center2 = (0, 1)\n i = 1\n while i < s_length:\n start = center1 - (i - center2)\n if s[i] == s[start]:\n length = i - start + 1\n if length > result_length:\n result_start, result_length = (start, length)\n if start > 0:\n i += 1\n continue\n if center1 < center2:\n center1 += 1\n elif s[center1] == s[center1 + 1]:\n center2 += 1\n else:\n center1 += 1\n center2 += 1\n if s_length * 2 - center1 - center2 - 1 <= result_length:\n break\n i = center2 + 1\n return s[result_start:result_start + result_length]\n\n def longestPalindrome(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n s_length = len(s)\n if s_length < 2:\n return s\n result_start, result_length = (0, 1)\n for i in range(s_length - 1):\n i1, i2 = (i, i)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n i1, i2 = (i, i + 1)\n while i1 >= 0 and i2 < s_length and (s[i1] == s[i2]):\n i1 -= 1\n i2 += 1\n p_length = i2 - i1 - 1\n if result_length < p_length:\n result_start = i1 + 1\n result_length = p_length\n return s[result_start:result_start + result_length]\n", "source": "the_stack_v2_python_sparse", "source_path": "Algorithms/longest-palindromic-substring.py", "source_repo": "feilniu/LeetCode", "split": "val", "star_events_count": 0}
{"blob_id": "c7832914b90dc8456aa4978a9b2241af7038cd2d", "bodies": ["self.webservername = webservername\nself.processToFileMap = {}\nself._backend = backend\nself.registerdProcesses = []\nself.cacheCapacity = 5\nself.cache = defaultdict(list)", "self.cacheCapacity = cacheCapacity\nprint('Initializing the cache and processNames are : {}'.format(processList))\nfor key in processList:\n self.cache[key] = deque([], self.cacheCapacity)\nprint('Items in {} cache {}'.format(self.webservername, self.cache))", "self.registerdProcesses.append(processname)\nfilepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\nself._backend.createFile(filepath)\nself.processToFileMap[processname] = filepath", "def getKey(item):\n return item[0]\nprint('Current Information in cache is {}'.format(self.cache))\nprint()\nif requestedCount <= self.cacheCapacity and len(self.cache[processname]) >= requestedCount:\n t0 = time()\n print('Information found in Cache')\n data = list(self.cache[processname])\n print('Top {} records for {} from cache are {}'.format(requestedCount, processname, data[:requestedCount]))\n print('Time to fetch the information is {}'.format(time() - t0))\n return data[:requestedCount]\nelse:\n print('Information not in Cache fetching from database')\n t0 = time()\n result = self._backend.readFromFile(self.processToFileMap[processname], requestedCount)\n print('Requested information is {}'.format(result))\n print()\n print('Now toring the requested information in cache')\n print()\n self.cache[processname].clear()\n sortedData = sorted(result, key=getKey)\n for index in range(self.cacheCapacity):\n if index < len(sortedData):\n self.cache[processname].append([sortedData[index][0], sortedData[index][1]])\n print('Time to fetch the information is {}'.format(time() - t0))", "print('Contacting Backend to push state for {}'.format(processname))\nstring = str(logtime) + ',' + str(processname) + ',' + str(state) + '\\n'\nif processname not in self.processToFileMap:\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\nfilepath = self.processToFileMap[processname]\nif self._backend.writeToFile(string, filepath):\n print('Writing information to cache for {}'.format(processname))\n self.writeCache(processname, [str(logtime), str(state)])", "if self.cache[processname] and len(self.cache[processname]) == self.cacheCapacity:\n self.cache[processname].pop()\nself.cache[processname].appendleft(data)"], "bodies_text": "<|body_start_0|>\n self.webservername = webservername\n self.processToFileMap = {}\n self._backend = backend\n self.registerdProcesses = []\n self.cacheCapacity = 5\n self.cache = defaultdict(list)\n<|end_body_0|>\n\n<|body_start_1|>\n self.cacheCapacity = cacheCapacity\n print('Initializing the cache and processNames are : {}'.format(processList))\n for key in processList:\n self.cache[key] = deque([], self.cacheCapacity)\n print('Items in {} cache {}'.format(self.webservername, self.cache))\n<|end_body_1|>\n\n<|body_start_2|>\n self.registerdProcesses.append(processname)\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\n<|end_body_2|>\n\n<|body_start_3|>\n def getKey(item):\n return item[0]\n print('Current Information in cache is {}'.format(self.cache))\n print()\n if requestedCount <= self.cacheCapacity and len(self.cache[processname]) >= requestedCount:\n t0 = time()\n print('Information found in Cache')\n data = list(self.cache[processname])\n print('Top {} records for {} from cache are {}'.format(requestedCount, processname, data[:requestedCount]))\n print('Time to fetch the information is {}'.format(time() - t0))\n return data[:requestedCount]\n else:\n print('Information not in Cache fetching from database')\n t0 = time()\n result = self._backend.readFromFile(self.processToFileMap[processname], requestedCount)\n print('Requested information is {}'.format(result))\n print()\n print('Now toring the requested information in cache')\n print()\n self.cache[processname].clear()\n sortedData = sorted(result, key=getKey)\n for index in range(self.cacheCapacity):\n if index < len(sortedData):\n self.cache[processname].append([sortedData[index][0], sortedData[index][1]])\n print('Time to fetch the information is {}'.format(time() - t0))\n<|end_body_3|>\n\n<|body_start_4|>\n print('Contacting Backend to push state for {}'.format(processname))\n string = str(logtime) + ',' + str(processname) + ',' + str(state) + '\\n'\n if processname not in self.processToFileMap:\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\n filepath = self.processToFileMap[processname]\n if self._backend.writeToFile(string, filepath):\n print('Writing information to cache for {}'.format(processname))\n self.writeCache(processname, [str(logtime), str(state)])\n<|end_body_4|>\n\n<|body_start_5|>\n if self.cache[processname] and len(self.cache[processname]) == self.cacheCapacity:\n self.cache[processname].pop()\n self.cache[processname].appendleft(data)\n<|end_body_5|>\n", "class_docstring": "", "class_name": "FrontEnd", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FrontEnd:\n\n def __init__(self, backend, webservername):\n \"\"\"Front End class which manages cache operations and accesses backend to write into database. :param backend: :param webservername:\"\"\"\n <|body_0|>\n\n def initializeCache(self, cacheCapacity, processList):\n \"\"\"Initialize the cache. Cache is write-through. :param cacheCapacity: The cache capacity. :return: None\"\"\"\n <|body_1|>\n\n def registerProcesses(self, processname):\n \"\"\"The incoming new process registers itself through this. A new database file is created. :param processname: The process to be reigstered. :return: None\"\"\"\n <|body_2|>\n\n def getData(self, processname, requestedCount):\n \"\"\"Checks cache if the requsted recent entries are found. If not, then database is queried. :param processname: process for which entries are requested :param requestedCount: The latest count of entries. :return: The entries\"\"\"\n <|body_3|>\n\n def pushData(self, processname, state, logtime):\n \"\"\"The new data is written into database first. Then its written into cache. :param processname: :param state: :param logtime: :return:\"\"\"\n <|body_4|>\n\n def writeCache(self, processname, data):\n \"\"\"Writes data to cache of the process :param processname: The processname :param data: The data to be written :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.webservername = webservername\n self.processToFileMap = {}\n self._backend = backend\n self.registerdProcesses = []\n self.cacheCapacity = 5\n self.cache = defaultdict(list)\n<|end_body_0|>\n\n<|body_start_1|>\n self.cacheCapacity = cacheCapacity\n print('Initializing the cache and processNames are : {}'.format(processList))\n for key in processList:\n self.cache[key] = deque([], self.cacheCapacity)\n print('Items in {} cache {}'.format(self.webservername, self.cache))\n<|end_body_1|>\n\n<|body_start_2|>\n self.registerdProcesses.append(processname)\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\n<|end_body_2|>\n\n<|body_start_3|>\n def getKey(item):\n return item[0]\n print('Current Information in cache is {}'.format(self.cache))\n print()\n if requestedCount <= self.cacheCapacity and len(self.cache[processname]) >= requestedCount:\n t0 = time()\n print('Information found in Cache')\n data = list(self.cache[processname])\n print('Top {} records for {} from cache are {}'.format(requestedCount, processname, data[:requestedCount]))\n print('Time to fetch the information is {}'.format(time() - t0))\n return data[:requestedCount]\n else:\n print('Information not in Cache fetching from database')\n t0 = time()\n result = self._backend.readFromFile(self.processToFileMap[processname], requestedCount)\n print('Requested information is {}'.format(result))\n print()\n print('Now toring the requested information in cache')\n print()\n self.cache[processname].clear()\n sortedData = sorted(result, key=getKey)\n for index in range(self.cacheCapacity):\n if index < len(sortedData):\n self.cache[processname].append([sortedData[index][0], sortedData[index][1]])\n print('Time to fetch the information is {}'.format(time() - t0))\n<|end_body_3|>\n\n<|body_start_4|>\n print('Contacting Backend to push state for {}'.format(processname))\n string = str(logtime) + ',' + str(processname) + ',' + str(state) + '\\n'\n if processname not in self.processToFileMap:\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\n filepath = self.processToFileMap[processname]\n if self._backend.writeToFile(string, filepath):\n print('Writing information to cache for {}'.format(processname))\n self.writeCache(processname, [str(logtime), str(state)])\n<|end_body_4|>\n\n<|body_start_5|>\n if self.cache[processname] and len(self.cache[processname]) == self.cacheCapacity:\n self.cache[processname].pop()\n self.cache[processname].appendleft(data)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000218", "length_bytes": 4922, "license_type": "no_license", "methods": [{"docstring": "Front End class which manages cache operations and accesses backend to write into database. :param backend: :param webservername:", "name": "__init__", "signature": "def __init__(self, backend, webservername)"}, {"docstring": "Initialize the cache. Cache is write-through. :param cacheCapacity: The cache capacity. :return: None", "name": "initializeCache", "signature": "def initializeCache(self, cacheCapacity, processList)"}, {"docstring": "The incoming new process registers itself through this. A new database file is created. :param processname: The process to be reigstered. :return: None", "name": "registerProcesses", "signature": "def registerProcesses(self, processname)"}, {"docstring": "Checks cache if the requsted recent entries are found. If not, then database is queried. :param processname: process for which entries are requested :param requestedCount: The latest count of entries. :return: The entries", "name": "getData", "signature": "def getData(self, processname, requestedCount)"}, {"docstring": "The new data is written into database first. Then its written into cache. :param processname: :param state: :param logtime: :return:", "name": "pushData", "signature": "def pushData(self, processname, state, logtime)"}, {"docstring": "Writes data to cache of the process :param processname: The processname :param data: The data to be written :return:", "name": "writeCache", "signature": "def writeCache(self, processname, data)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_020585", "prompt": "Implement the Python class `FrontEnd` described below.\n\nClass description:\nImplement the FrontEnd class.\n\nMethod signatures and docstrings:\n- def __init__(self, backend, webservername): Front End class which manages cache operations and accesses backend to write into database. :param backend: :param webservername:\n- def initializeCache(self, cacheCapacity, processList): Initialize the cache. Cache is write-through. :param cacheCapacity: The cache capacity. :return: None\n- def registerProcesses(self, processname): The incoming new process registers itself through this. A new database file is created. :param processname: The process to be reigstered. :return: None\n- def getData(self, processname, requestedCount): Checks cache if the requsted recent entries are found. If not, then database is queried. :param processname: process for which entries are requested :param requestedCount: The latest count of entries. :return: The entries\n- def pushData(self, processname, state, logtime): The new data is written into database first. Then its written into cache. :param processname: :param state: :param logtime: :return:\n- def writeCache(self, processname, data): Writes data to cache of the process :param processname: The processname :param data: The data to be written :return:", "prompted_full_text": "Implement the Python class `FrontEnd` described below.\n\nClass description:\nImplement the FrontEnd class.\n\nMethod signatures and docstrings:\n- def __init__(self, backend, webservername): Front End class which manages cache operations and accesses backend to write into database. :param backend: :param webservername:\n- def initializeCache(self, cacheCapacity, processList): Initialize the cache. Cache is write-through. :param cacheCapacity: The cache capacity. :return: None\n- def registerProcesses(self, processname): The incoming new process registers itself through this. A new database file is created. :param processname: The process to be reigstered. :return: None\n- def getData(self, processname, requestedCount): Checks cache if the requsted recent entries are found. If not, then database is queried. :param processname: process for which entries are requested :param requestedCount: The latest count of entries. :return: The entries\n- def pushData(self, processname, state, logtime): The new data is written into database first. Then its written into cache. :param processname: :param state: :param logtime: :return:\n- def writeCache(self, processname, data): Writes data to cache of the process :param processname: The processname :param data: The data to be written :return:\n\n<|skeleton|>\nclass FrontEnd:\n\n def __init__(self, backend, webservername):\n \"\"\"Front End class which manages cache operations and accesses backend to write into database. :param backend: :param webservername:\"\"\"\n <|body_0|>\n\n def initializeCache(self, cacheCapacity, processList):\n \"\"\"Initialize the cache. Cache is write-through. :param cacheCapacity: The cache capacity. :return: None\"\"\"\n <|body_1|>\n\n def registerProcesses(self, processname):\n \"\"\"The incoming new process registers itself through this. A new database file is created. :param processname: The process to be reigstered. :return: None\"\"\"\n <|body_2|>\n\n def getData(self, processname, requestedCount):\n \"\"\"Checks cache if the requsted recent entries are found. If not, then database is queried. :param processname: process for which entries are requested :param requestedCount: The latest count of entries. :return: The entries\"\"\"\n <|body_3|>\n\n def pushData(self, processname, state, logtime):\n \"\"\"The new data is written into database first. Then its written into cache. :param processname: :param state: :param logtime: :return:\"\"\"\n <|body_4|>\n\n def writeCache(self, processname, data):\n \"\"\"Writes data to cache of the process :param processname: The processname :param data: The data to be written :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.webservername = webservername\n self.processToFileMap = {}\n self._backend = backend\n self.registerdProcesses = []\n self.cacheCapacity = 5\n self.cache = defaultdict(list)\n<|end_body_0|>\n\n<|body_start_1|>\n self.cacheCapacity = cacheCapacity\n print('Initializing the cache and processNames are : {}'.format(processList))\n for key in processList:\n self.cache[key] = deque([], self.cacheCapacity)\n print('Items in {} cache {}'.format(self.webservername, self.cache))\n<|end_body_1|>\n\n<|body_start_2|>\n self.registerdProcesses.append(processname)\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\n<|end_body_2|>\n\n<|body_start_3|>\n def getKey(item):\n return item[0]\n print('Current Information in cache is {}'.format(self.cache))\n print()\n if requestedCount <= self.cacheCapacity and len(self.cache[processname]) >= requestedCount:\n t0 = time()\n print('Information found in Cache')\n data = list(self.cache[processname])\n print('Top {} records for {} from cache are {}'.format(requestedCount, processname, data[:requestedCount]))\n print('Time to fetch the information is {}'.format(time() - t0))\n return data[:requestedCount]\n else:\n print('Information not in Cache fetching from database')\n t0 = time()\n result = self._backend.readFromFile(self.processToFileMap[processname], requestedCount)\n print('Requested information is {}'.format(result))\n print()\n print('Now toring the requested information in cache')\n print()\n self.cache[processname].clear()\n sortedData = sorted(result, key=getKey)\n for index in range(self.cacheCapacity):\n if index < len(sortedData):\n self.cache[processname].append([sortedData[index][0], sortedData[index][1]])\n print('Time to fetch the information is {}'.format(time() - t0))\n<|end_body_3|>\n\n<|body_start_4|>\n print('Contacting Backend to push state for {}'.format(processname))\n string = str(logtime) + ',' + str(processname) + ',' + str(state) + '\\n'\n if processname not in self.processToFileMap:\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\n filepath = self.processToFileMap[processname]\n if self._backend.writeToFile(string, filepath):\n print('Writing information to cache for {}'.format(processname))\n self.writeCache(processname, [str(logtime), str(state)])\n<|end_body_4|>\n\n<|body_start_5|>\n if self.cache[processname] and len(self.cache[processname]) == self.cacheCapacity:\n self.cache[processname].pop()\n self.cache[processname].appendleft(data)\n<|end_body_5|>\n", "revision_id": "f36779ce2f1a1071391ffcd32f695d6d8cd7ff92", "skeleton": "<|skeleton|>\nclass FrontEnd:\n\n def __init__(self, backend, webservername):\n \"\"\"Front End class which manages cache operations and accesses backend to write into database. :param backend: :param webservername:\"\"\"\n <|body_0|>\n\n def initializeCache(self, cacheCapacity, processList):\n \"\"\"Initialize the cache. Cache is write-through. :param cacheCapacity: The cache capacity. :return: None\"\"\"\n <|body_1|>\n\n def registerProcesses(self, processname):\n \"\"\"The incoming new process registers itself through this. A new database file is created. :param processname: The process to be reigstered. :return: None\"\"\"\n <|body_2|>\n\n def getData(self, processname, requestedCount):\n \"\"\"Checks cache if the requsted recent entries are found. If not, then database is queried. :param processname: process for which entries are requested :param requestedCount: The latest count of entries. :return: The entries\"\"\"\n <|body_3|>\n\n def pushData(self, processname, state, logtime):\n \"\"\"The new data is written into database first. Then its written into cache. :param processname: :param state: :param logtime: :return:\"\"\"\n <|body_4|>\n\n def writeCache(self, processname, data):\n \"\"\"Writes data to cache of the process :param processname: The processname :param data: The data to be written :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FrontEnd:\n def __init__(self, backend, webservername):\n \"\"\"Front End class which manages cache operations and accesses backend to write into database. :param backend: :param webservername:\"\"\"\n self.webservername = webservername\n self.processToFileMap = {}\n self._backend = backend\n self.registerdProcesses = []\n self.cacheCapacity = 5\n self.cache = defaultdict(list)\n\n def initializeCache(self, cacheCapacity, processList):\n \"\"\"Initialize the cache. Cache is write-through. :param cacheCapacity: The cache capacity. :return: None\"\"\"\n self.cacheCapacity = cacheCapacity\n print('Initializing the cache and processNames are : {}'.format(processList))\n for key in processList:\n self.cache[key] = deque([], self.cacheCapacity)\n print('Items in {} cache {}'.format(self.webservername, self.cache))\n\n def registerProcesses(self, processname):\n \"\"\"The incoming new process registers itself through this. A new database file is created. :param processname: The process to be reigstered. :return: None\"\"\"\n self.registerdProcesses.append(processname)\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\n\n def getData(self, processname, requestedCount):\n \"\"\"Checks cache if the requsted recent entries are found. If not, then database is queried. :param processname: process for which entries are requested :param requestedCount: The latest count of entries. :return: The entries\"\"\"\n def getKey(item):\n return item[0]\n print('Current Information in cache is {}'.format(self.cache))\n print()\n if requestedCount <= self.cacheCapacity and len(self.cache[processname]) >= requestedCount:\n t0 = time()\n print('Information found in Cache')\n data = list(self.cache[processname])\n print('Top {} records for {} from cache are {}'.format(requestedCount, processname, data[:requestedCount]))\n print('Time to fetch the information is {}'.format(time() - t0))\n return data[:requestedCount]\n else:\n print('Information not in Cache fetching from database')\n t0 = time()\n result = self._backend.readFromFile(self.processToFileMap[processname], requestedCount)\n print('Requested information is {}'.format(result))\n print()\n print('Now toring the requested information in cache')\n print()\n self.cache[processname].clear()\n sortedData = sorted(result, key=getKey)\n for index in range(self.cacheCapacity):\n if index < len(sortedData):\n self.cache[processname].append([sortedData[index][0], sortedData[index][1]])\n print('Time to fetch the information is {}'.format(time() - t0))\n\n def pushData(self, processname, state, logtime):\n \"\"\"The new data is written into database first. Then its written into cache. :param processname: :param state: :param logtime: :return:\"\"\"\n print('Contacting Backend to push state for {}'.format(processname))\n string = str(logtime) + ',' + str(processname) + ',' + str(state) + '\\n'\n if processname not in self.processToFileMap:\n filepath = Constants.FileConstants.FILE_DIR + self.webservername + processname + '.txt'\n self._backend.createFile(filepath)\n self.processToFileMap[processname] = filepath\n filepath = self.processToFileMap[processname]\n if self._backend.writeToFile(string, filepath):\n print('Writing information to cache for {}'.format(processname))\n self.writeCache(processname, [str(logtime), str(state)])\n\n def writeCache(self, processname, data):\n \"\"\"Writes data to cache of the process :param processname: The processname :param data: The data to be written :return:\"\"\"\n if self.cache[processname] and len(self.cache[processname]) == self.cacheCapacity:\n self.cache[processname].pop()\n self.cache[processname].appendleft(data)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/webServer/FrontEnd.py", "source_repo": "nehay06/Internet-of-Things-Part-2", "split": "val", "star_events_count": 0}
{"blob_id": "d3344bf7fd49b0b04fab159618a5eb00efeb0558", "bodies": ["super().__init__(**kwargs)\nself.width = width\nself.height = height\nself.x = x\nself.y = y\nself.x = max(self.x, 2)\nself.y = max(self.y, 2)\nself.grid: List[List[float]] = []\nself.color_grid: List[List[List[float]]] = []\nself.populate_grid()", "for i, j in product(range(self.x), range(self.y)):\n self._vertices[self.x * i + j][2] = self.grid[i][j]\n self._vertex_colors[self.x * i + j] = self.color_grid[i][j]\nif self.material.display == SOLID:\n self.fix_normals()\nself.refresh()", "self.clear_triangles()\nself.grid = [[0.0] * self.y for _i in range(self.x)]\nself.color_grid = [[WHITE] * self.y for _i in range(self.x)]\nstep_x = self.width / (self.x - 1)\nstep_y = self.height / (self.y - 1)\nstep_u = 1.0 / (self.x - 1)\nstep_v = 1.0 / (self.y - 1)\nc_x = self.width / 2.0\nc_y = self.height / 2.0\nfor i, j in product(range(self.x), range(self.y)):\n self._vertices.append([i * step_x - c_x, j * step_y - c_y, self.grid[i][j]])\n self._texcoords.append([i * step_u, j * step_v])\n self._vertex_colors.append(WHITE)\n if i < self.x - 1 and j < self.y - 1:\n left = self.x * i + j\n right = self.x * i + (j + 1)\n top_left = self.x * (i + 1) + j\n top_right = self.x * (i + 1) + (j + 1)\n self._indices.append([top_right, right, left])\n self._indices.append([left, top_left, top_right])\nself.material._indices = self._indices\nself.fix_normals()\nself.refresh()"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n self.x = max(self.x, 2)\n self.y = max(self.y, 2)\n self.grid: List[List[float]] = []\n self.color_grid: List[List[List[float]]] = []\n self.populate_grid()\n<|end_body_0|>\n\n<|body_start_1|>\n for i, j in product(range(self.x), range(self.y)):\n self._vertices[self.x * i + j][2] = self.grid[i][j]\n self._vertex_colors[self.x * i + j] = self.color_grid[i][j]\n if self.material.display == SOLID:\n self.fix_normals()\n self.refresh()\n<|end_body_1|>\n\n<|body_start_2|>\n self.clear_triangles()\n self.grid = [[0.0] * self.y for _i in range(self.x)]\n self.color_grid = [[WHITE] * self.y for _i in range(self.x)]\n step_x = self.width / (self.x - 1)\n step_y = self.height / (self.y - 1)\n step_u = 1.0 / (self.x - 1)\n step_v = 1.0 / (self.y - 1)\n c_x = self.width / 2.0\n c_y = self.height / 2.0\n for i, j in product(range(self.x), range(self.y)):\n self._vertices.append([i * step_x - c_x, j * step_y - c_y, self.grid[i][j]])\n self._texcoords.append([i * step_u, j * step_v])\n self._vertex_colors.append(WHITE)\n if i < self.x - 1 and j < self.y - 1:\n left = self.x * i + j\n right = self.x * i + (j + 1)\n top_left = self.x * (i + 1) + j\n top_right = self.x * (i + 1) + (j + 1)\n self._indices.append([top_right, right, left])\n self._indices.append([left, top_left, top_right])\n self.material._indices = self._indices\n self.fix_normals()\n self.refresh()\n<|end_body_2|>\n", "class_docstring": "Matrix plane.", "class_name": "MatrixPlane", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MatrixPlane:\n \"\"\"Matrix plane.\"\"\"\n\n def __init__(self, width: float=1.0, height: float=1.0, x: int=2, y: int=2, **kwargs: Any) -> None:\n \"\"\"Define a Matrix Plane. Matrix plane is an MxN grid matrix where you can change the height / color of each individual vertex in the matrix by ease Keyword arguments: width -- Width of the plane height -- Height of the plane x -- Number of points in X direction y -- Number of points in Y direction\"\"\"\n <|body_0|>\n\n def update_grid(self) -> None:\n \"\"\"Update the grid for changes.\"\"\"\n <|body_1|>\n\n def populate_grid(self) -> None:\n \"\"\"Turn grid data into plane.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n self.x = max(self.x, 2)\n self.y = max(self.y, 2)\n self.grid: List[List[float]] = []\n self.color_grid: List[List[List[float]]] = []\n self.populate_grid()\n<|end_body_0|>\n\n<|body_start_1|>\n for i, j in product(range(self.x), range(self.y)):\n self._vertices[self.x * i + j][2] = self.grid[i][j]\n self._vertex_colors[self.x * i + j] = self.color_grid[i][j]\n if self.material.display == SOLID:\n self.fix_normals()\n self.refresh()\n<|end_body_1|>\n\n<|body_start_2|>\n self.clear_triangles()\n self.grid = [[0.0] * self.y for _i in range(self.x)]\n self.color_grid = [[WHITE] * self.y for _i in range(self.x)]\n step_x = self.width / (self.x - 1)\n step_y = self.height / (self.y - 1)\n step_u = 1.0 / (self.x - 1)\n step_v = 1.0 / (self.y - 1)\n c_x = self.width / 2.0\n c_y = self.height / 2.0\n for i, j in product(range(self.x), range(self.y)):\n self._vertices.append([i * step_x - c_x, j * step_y - c_y, self.grid[i][j]])\n self._texcoords.append([i * step_u, j * step_v])\n self._vertex_colors.append(WHITE)\n if i < self.x - 1 and j < self.y - 1:\n left = self.x * i + j\n right = self.x * i + (j + 1)\n top_left = self.x * (i + 1) + j\n top_right = self.x * (i + 1) + (j + 1)\n self._indices.append([top_right, right, left])\n self._indices.append([left, top_left, top_right])\n self.material._indices = self._indices\n self.fix_normals()\n self.refresh()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000219", "length_bytes": 3809, "license_type": "permissive", "methods": [{"docstring": "Define a Matrix Plane. Matrix plane is an MxN grid matrix where you can change the height / color of each individual vertex in the matrix by ease Keyword arguments: width -- Width of the plane height -- Height of the plane x -- Number of points in X direction y -- Number of points in Y direction", "name": "__init__", "signature": "def __init__(self, width: float=1.0, height: float=1.0, x: int=2, y: int=2, **kwargs: Any) -> None"}, {"docstring": "Update the grid for changes.", "name": "update_grid", "signature": "def update_grid(self) -> None"}, {"docstring": "Turn grid data into plane.", "name": "populate_grid", "signature": "def populate_grid(self) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_010641", "prompt": "Implement the Python class `MatrixPlane` described below.\n\nClass description:\nMatrix plane.\n\nMethod signatures and docstrings:\n- def __init__(self, width: float=1.0, height: float=1.0, x: int=2, y: int=2, **kwargs: Any) -> None: Define a Matrix Plane. Matrix plane is an MxN grid matrix where you can change the height / color of each individual vertex in the matrix by ease Keyword arguments: width -- Width of the plane height -- Height of the plane x -- Number of points in X direction y -- Number of points in Y direction\n- def update_grid(self) -> None: Update the grid for changes.\n- def populate_grid(self) -> None: Turn grid data into plane.", "prompted_full_text": "Implement the Python class `MatrixPlane` described below.\n\nClass description:\nMatrix plane.\n\nMethod signatures and docstrings:\n- def __init__(self, width: float=1.0, height: float=1.0, x: int=2, y: int=2, **kwargs: Any) -> None: Define a Matrix Plane. Matrix plane is an MxN grid matrix where you can change the height / color of each individual vertex in the matrix by ease Keyword arguments: width -- Width of the plane height -- Height of the plane x -- Number of points in X direction y -- Number of points in Y direction\n- def update_grid(self) -> None: Update the grid for changes.\n- def populate_grid(self) -> None: Turn grid data into plane.\n\n<|skeleton|>\nclass MatrixPlane:\n \"\"\"Matrix plane.\"\"\"\n\n def __init__(self, width: float=1.0, height: float=1.0, x: int=2, y: int=2, **kwargs: Any) -> None:\n \"\"\"Define a Matrix Plane. Matrix plane is an MxN grid matrix where you can change the height / color of each individual vertex in the matrix by ease Keyword arguments: width -- Width of the plane height -- Height of the plane x -- Number of points in X direction y -- Number of points in Y direction\"\"\"\n <|body_0|>\n\n def update_grid(self) -> None:\n \"\"\"Update the grid for changes.\"\"\"\n <|body_1|>\n\n def populate_grid(self) -> None:\n \"\"\"Turn grid data into plane.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n self.x = max(self.x, 2)\n self.y = max(self.y, 2)\n self.grid: List[List[float]] = []\n self.color_grid: List[List[List[float]]] = []\n self.populate_grid()\n<|end_body_0|>\n\n<|body_start_1|>\n for i, j in product(range(self.x), range(self.y)):\n self._vertices[self.x * i + j][2] = self.grid[i][j]\n self._vertex_colors[self.x * i + j] = self.color_grid[i][j]\n if self.material.display == SOLID:\n self.fix_normals()\n self.refresh()\n<|end_body_1|>\n\n<|body_start_2|>\n self.clear_triangles()\n self.grid = [[0.0] * self.y for _i in range(self.x)]\n self.color_grid = [[WHITE] * self.y for _i in range(self.x)]\n step_x = self.width / (self.x - 1)\n step_y = self.height / (self.y - 1)\n step_u = 1.0 / (self.x - 1)\n step_v = 1.0 / (self.y - 1)\n c_x = self.width / 2.0\n c_y = self.height / 2.0\n for i, j in product(range(self.x), range(self.y)):\n self._vertices.append([i * step_x - c_x, j * step_y - c_y, self.grid[i][j]])\n self._texcoords.append([i * step_u, j * step_v])\n self._vertex_colors.append(WHITE)\n if i < self.x - 1 and j < self.y - 1:\n left = self.x * i + j\n right = self.x * i + (j + 1)\n top_left = self.x * (i + 1) + j\n top_right = self.x * (i + 1) + (j + 1)\n self._indices.append([top_right, right, left])\n self._indices.append([left, top_left, top_right])\n self.material._indices = self._indices\n self.fix_normals()\n self.refresh()\n<|end_body_2|>\n", "revision_id": "906f107050915800209fb884c626a8dff1b06291", "skeleton": "<|skeleton|>\nclass MatrixPlane:\n \"\"\"Matrix plane.\"\"\"\n\n def __init__(self, width: float=1.0, height: float=1.0, x: int=2, y: int=2, **kwargs: Any) -> None:\n \"\"\"Define a Matrix Plane. Matrix plane is an MxN grid matrix where you can change the height / color of each individual vertex in the matrix by ease Keyword arguments: width -- Width of the plane height -- Height of the plane x -- Number of points in X direction y -- Number of points in Y direction\"\"\"\n <|body_0|>\n\n def update_grid(self) -> None:\n \"\"\"Update the grid for changes.\"\"\"\n <|body_1|>\n\n def populate_grid(self) -> None:\n \"\"\"Turn grid data into plane.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MatrixPlane:\n \"\"\"Matrix plane.\"\"\"\n\n def __init__(self, width: float=1.0, height: float=1.0, x: int=2, y: int=2, **kwargs: Any) -> None:\n \"\"\"Define a Matrix Plane. Matrix plane is an MxN grid matrix where you can change the height / color of each individual vertex in the matrix by ease Keyword arguments: width -- Width of the plane height -- Height of the plane x -- Number of points in X direction y -- Number of points in Y direction\"\"\"\n super().__init__(**kwargs)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n self.x = max(self.x, 2)\n self.y = max(self.y, 2)\n self.grid: List[List[float]] = []\n self.color_grid: List[List[List[float]]] = []\n self.populate_grid()\n\n def update_grid(self) -> None:\n \"\"\"Update the grid for changes.\"\"\"\n for i, j in product(range(self.x), range(self.y)):\n self._vertices[self.x * i + j][2] = self.grid[i][j]\n self._vertex_colors[self.x * i + j] = self.color_grid[i][j]\n if self.material.display == SOLID:\n self.fix_normals()\n self.refresh()\n\n def populate_grid(self) -> None:\n \"\"\"Turn grid data into plane.\"\"\"\n self.clear_triangles()\n self.grid = [[0.0] * self.y for _i in range(self.x)]\n self.color_grid = [[WHITE] * self.y for _i in range(self.x)]\n step_x = self.width / (self.x - 1)\n step_y = self.height / (self.y - 1)\n step_u = 1.0 / (self.x - 1)\n step_v = 1.0 / (self.y - 1)\n c_x = self.width / 2.0\n c_y = self.height / 2.0\n for i, j in product(range(self.x), range(self.y)):\n self._vertices.append([i * step_x - c_x, j * step_y - c_y, self.grid[i][j]])\n self._texcoords.append([i * step_u, j * step_v])\n self._vertex_colors.append(WHITE)\n if i < self.x - 1 and j < self.y - 1:\n left = self.x * i + j\n right = self.x * i + (j + 1)\n top_left = self.x * (i + 1) + j\n top_right = self.x * (i + 1) + (j + 1)\n self._indices.append([top_right, right, left])\n self._indices.append([left, top_left, top_right])\n self.material._indices = self._indices\n self.fix_normals()\n self.refresh()\n", "source": "the_stack_v2_python_sparse", "source_path": "payton/scene/geometry/plane.py", "source_repo": "sinanislekdemir/payton", "split": "val", "star_events_count": 59}
{"blob_id": "28f3b94a7a12a1488a97ce531dc05def414e40d4", "bodies": ["self.pad_amounts = pad_amounts\nself.mode = mode\nself.pad_value = pad_value", "if np.count_nonzero(np.asarray(self.pad_amounts)) == 0:\n return img_obj\nimg = img_obj.get_data()\nmsk = img_obj.get_mask()\nkwargs = {}\nif self.mode == 'constant':\n kwargs = {'constant_values': self.pad_value}\nif len(img.shape) == 2:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\nelif len(img.shape) == 3:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\nelse:\n raise RuntimeError('Unexpected image shape: {}'.format(img.shape))\nif len(msk.shape) == 2:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\nelif len(msk.shape) == 3:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\nelse:\n raise RuntimeError('Unexpected mask shape: {}'.format(msk.shape))\nlogger.info('Padded image of shape=%s to shape=%s' % (str(img_obj.get_data().shape), str(img.shape)))\nreturn GenericImageEntity(img_out, mask_out)"], "bodies_text": "<|body_start_0|>\n self.pad_amounts = pad_amounts\n self.mode = mode\n self.pad_value = pad_value\n<|end_body_0|>\n\n<|body_start_1|>\n if np.count_nonzero(np.asarray(self.pad_amounts)) == 0:\n return img_obj\n img = img_obj.get_data()\n msk = img_obj.get_mask()\n kwargs = {}\n if self.mode == 'constant':\n kwargs = {'constant_values': self.pad_value}\n if len(img.shape) == 2:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\n elif len(img.shape) == 3:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\n else:\n raise RuntimeError('Unexpected image shape: {}'.format(img.shape))\n if len(msk.shape) == 2:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\n elif len(msk.shape) == 3:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\n else:\n raise RuntimeError('Unexpected mask shape: {}'.format(msk.shape))\n logger.info('Padded image of shape=%s to shape=%s' % (str(img_obj.get_data().shape), str(img.shape)))\n return GenericImageEntity(img_out, mask_out)\n<|end_body_1|>\n", "class_docstring": "Resizes an Entity", "class_name": "Pad", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Pad:\n \"\"\"Resizes an Entity\"\"\"\n\n def __init__(self, pad_amounts: tuple=(0, 0, 0, 0), mode: str='constant', pad_value: int=0) -> None:\n \"\"\"Initialize the resizer object :param pad_amounts: a tuple of the pixel count o add to each side (y_pre, y_post, x_pre, x_post) :param mode: what type of padding to use, supports numpy.pad options :param pad_value: the value to use when padding\"\"\"\n <|body_0|>\n\n def do(self, img_obj: ImageEntity, random_state_obj: RandomState) -> ImageEntity:\n \"\"\"Perform the resizing :param img_obj: The input object to be resized according the specified configuration :param random_state_obj: ignored :return: The resized object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pad_amounts = pad_amounts\n self.mode = mode\n self.pad_value = pad_value\n<|end_body_0|>\n\n<|body_start_1|>\n if np.count_nonzero(np.asarray(self.pad_amounts)) == 0:\n return img_obj\n img = img_obj.get_data()\n msk = img_obj.get_mask()\n kwargs = {}\n if self.mode == 'constant':\n kwargs = {'constant_values': self.pad_value}\n if len(img.shape) == 2:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\n elif len(img.shape) == 3:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\n else:\n raise RuntimeError('Unexpected image shape: {}'.format(img.shape))\n if len(msk.shape) == 2:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\n elif len(msk.shape) == 3:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\n else:\n raise RuntimeError('Unexpected mask shape: {}'.format(msk.shape))\n logger.info('Padded image of shape=%s to shape=%s' % (str(img_obj.get_data().shape), str(img.shape)))\n return GenericImageEntity(img_out, mask_out)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000220", "length_bytes": 8873, "license_type": "permissive", "methods": [{"docstring": "Initialize the resizer object :param pad_amounts: a tuple of the pixel count o add to each side (y_pre, y_post, x_pre, x_post) :param mode: what type of padding to use, supports numpy.pad options :param pad_value: the value to use when padding", "name": "__init__", "signature": "def __init__(self, pad_amounts: tuple=(0, 0, 0, 0), mode: str='constant', pad_value: int=0) -> None"}, {"docstring": "Perform the resizing :param img_obj: The input object to be resized according the specified configuration :param random_state_obj: ignored :return: The resized object", "name": "do", "signature": "def do(self, img_obj: ImageEntity, random_state_obj: RandomState) -> ImageEntity"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034687", "prompt": "Implement the Python class `Pad` described below.\n\nClass description:\nResizes an Entity\n\nMethod signatures and docstrings:\n- def __init__(self, pad_amounts: tuple=(0, 0, 0, 0), mode: str='constant', pad_value: int=0) -> None: Initialize the resizer object :param pad_amounts: a tuple of the pixel count o add to each side (y_pre, y_post, x_pre, x_post) :param mode: what type of padding to use, supports numpy.pad options :param pad_value: the value to use when padding\n- def do(self, img_obj: ImageEntity, random_state_obj: RandomState) -> ImageEntity: Perform the resizing :param img_obj: The input object to be resized according the specified configuration :param random_state_obj: ignored :return: The resized object", "prompted_full_text": "Implement the Python class `Pad` described below.\n\nClass description:\nResizes an Entity\n\nMethod signatures and docstrings:\n- def __init__(self, pad_amounts: tuple=(0, 0, 0, 0), mode: str='constant', pad_value: int=0) -> None: Initialize the resizer object :param pad_amounts: a tuple of the pixel count o add to each side (y_pre, y_post, x_pre, x_post) :param mode: what type of padding to use, supports numpy.pad options :param pad_value: the value to use when padding\n- def do(self, img_obj: ImageEntity, random_state_obj: RandomState) -> ImageEntity: Perform the resizing :param img_obj: The input object to be resized according the specified configuration :param random_state_obj: ignored :return: The resized object\n\n<|skeleton|>\nclass Pad:\n \"\"\"Resizes an Entity\"\"\"\n\n def __init__(self, pad_amounts: tuple=(0, 0, 0, 0), mode: str='constant', pad_value: int=0) -> None:\n \"\"\"Initialize the resizer object :param pad_amounts: a tuple of the pixel count o add to each side (y_pre, y_post, x_pre, x_post) :param mode: what type of padding to use, supports numpy.pad options :param pad_value: the value to use when padding\"\"\"\n <|body_0|>\n\n def do(self, img_obj: ImageEntity, random_state_obj: RandomState) -> ImageEntity:\n \"\"\"Perform the resizing :param img_obj: The input object to be resized according the specified configuration :param random_state_obj: ignored :return: The resized object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pad_amounts = pad_amounts\n self.mode = mode\n self.pad_value = pad_value\n<|end_body_0|>\n\n<|body_start_1|>\n if np.count_nonzero(np.asarray(self.pad_amounts)) == 0:\n return img_obj\n img = img_obj.get_data()\n msk = img_obj.get_mask()\n kwargs = {}\n if self.mode == 'constant':\n kwargs = {'constant_values': self.pad_value}\n if len(img.shape) == 2:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\n elif len(img.shape) == 3:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\n else:\n raise RuntimeError('Unexpected image shape: {}'.format(img.shape))\n if len(msk.shape) == 2:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\n elif len(msk.shape) == 3:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\n else:\n raise RuntimeError('Unexpected mask shape: {}'.format(msk.shape))\n logger.info('Padded image of shape=%s to shape=%s' % (str(img_obj.get_data().shape), str(img.shape)))\n return GenericImageEntity(img_out, mask_out)\n<|end_body_1|>\n", "revision_id": "6ee5912f1fa57f49a4dd4feeeaf7862153bb6a9f", "skeleton": "<|skeleton|>\nclass Pad:\n \"\"\"Resizes an Entity\"\"\"\n\n def __init__(self, pad_amounts: tuple=(0, 0, 0, 0), mode: str='constant', pad_value: int=0) -> None:\n \"\"\"Initialize the resizer object :param pad_amounts: a tuple of the pixel count o add to each side (y_pre, y_post, x_pre, x_post) :param mode: what type of padding to use, supports numpy.pad options :param pad_value: the value to use when padding\"\"\"\n <|body_0|>\n\n def do(self, img_obj: ImageEntity, random_state_obj: RandomState) -> ImageEntity:\n \"\"\"Perform the resizing :param img_obj: The input object to be resized according the specified configuration :param random_state_obj: ignored :return: The resized object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Pad:\n \"\"\"Resizes an Entity\"\"\"\n\n def __init__(self, pad_amounts: tuple=(0, 0, 0, 0), mode: str='constant', pad_value: int=0) -> None:\n \"\"\"Initialize the resizer object :param pad_amounts: a tuple of the pixel count o add to each side (y_pre, y_post, x_pre, x_post) :param mode: what type of padding to use, supports numpy.pad options :param pad_value: the value to use when padding\"\"\"\n self.pad_amounts = pad_amounts\n self.mode = mode\n self.pad_value = pad_value\n\n def do(self, img_obj: ImageEntity, random_state_obj: RandomState) -> ImageEntity:\n \"\"\"Perform the resizing :param img_obj: The input object to be resized according the specified configuration :param random_state_obj: ignored :return: The resized object\"\"\"\n if np.count_nonzero(np.asarray(self.pad_amounts)) == 0:\n return img_obj\n img = img_obj.get_data()\n msk = img_obj.get_mask()\n kwargs = {}\n if self.mode == 'constant':\n kwargs = {'constant_values': self.pad_value}\n if len(img.shape) == 2:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\n elif len(img.shape) == 3:\n img_out = np.pad(img, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\n else:\n raise RuntimeError('Unexpected image shape: {}'.format(img.shape))\n if len(msk.shape) == 2:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3])), mode=self.mode, **kwargs)\n elif len(msk.shape) == 3:\n mask_out = np.pad(msk, pad_width=((self.pad_amounts[0], self.pad_amounts[1]), (self.pad_amounts[2], self.pad_amounts[3]), (0, 0)), mode=self.mode, **kwargs)\n else:\n raise RuntimeError('Unexpected mask shape: {}'.format(msk.shape))\n logger.info('Padded image of shape=%s to shape=%s' % (str(img_obj.get_data().shape), str(img.shape)))\n return GenericImageEntity(img_out, mask_out)\n", "source": "the_stack_v2_python_sparse", "source_path": "trojai/trojai/datagen/image_size_xforms.py", "source_repo": "ionutmodo/TrojAI-UMD", "split": "val", "star_events_count": 1}
{"blob_id": "3b872b98e962a7d8064f7c645edea82fa1c72bb5", "bodies": ["InfiniteMultinomial.__init__(self)\nself.strength = strength\nself.alpha = alpha", "j = self.num_partitions()\nphi_j = rBeta(self.strength * self.alpha.x(j), self.strength * (1.0 - self.alpha.partition_start(j + 1)))\nassert 0.0 <= phi_j\nassert phi_j <= 1.0\ntop = self.last()\ntop = top + (1.0 - self.partition_end(j - 1)) * phi_j\nreturn top"], "bodies_text": "<|body_start_0|>\n InfiniteMultinomial.__init__(self)\n self.strength = strength\n self.alpha = alpha\n<|end_body_0|>\n\n<|body_start_1|>\n j = self.num_partitions()\n phi_j = rBeta(self.strength * self.alpha.x(j), self.strength * (1.0 - self.alpha.partition_start(j + 1)))\n assert 0.0 <= phi_j\n assert phi_j <= 1.0\n top = self.last()\n top = top + (1.0 - self.partition_end(j - 1)) * phi_j\n return top\n<|end_body_1|>\n", "class_docstring": "Lazy sampling from an infinite Dirchlet distribution.", "class_name": "InfiniteDirichlet", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InfiniteDirichlet:\n \"\"\"Lazy sampling from an infinite Dirchlet distribution.\"\"\"\n\n def __init__(self, strength, alpha):\n \"\"\"Initialise with a strength parameter and an infinite prior. The infinite prior should be an infinite multinomial parameterisation itself.\"\"\"\n <|body_0|>\n\n def _extend(self):\n \"\"\"Extend the stick breaking construction by one partition.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n InfiniteMultinomial.__init__(self)\n self.strength = strength\n self.alpha = alpha\n<|end_body_0|>\n\n<|body_start_1|>\n j = self.num_partitions()\n phi_j = rBeta(self.strength * self.alpha.x(j), self.strength * (1.0 - self.alpha.partition_start(j + 1)))\n assert 0.0 <= phi_j\n assert phi_j <= 1.0\n top = self.last()\n top = top + (1.0 - self.partition_end(j - 1)) * phi_j\n return top\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000221", "length_bytes": 6764, "license_type": "permissive", "methods": [{"docstring": "Initialise with a strength parameter and an infinite prior. The infinite prior should be an infinite multinomial parameterisation itself.", "name": "__init__", "signature": "def __init__(self, strength, alpha)"}, {"docstring": "Extend the stick breaking construction by one partition.", "name": "_extend", "signature": "def _extend(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_027659", "prompt": "Implement the Python class `InfiniteDirichlet` described below.\n\nClass description:\nLazy sampling from an infinite Dirchlet distribution.\n\nMethod signatures and docstrings:\n- def __init__(self, strength, alpha): Initialise with a strength parameter and an infinite prior. The infinite prior should be an infinite multinomial parameterisation itself.\n- def _extend(self): Extend the stick breaking construction by one partition.", "prompted_full_text": "Implement the Python class `InfiniteDirichlet` described below.\n\nClass description:\nLazy sampling from an infinite Dirchlet distribution.\n\nMethod signatures and docstrings:\n- def __init__(self, strength, alpha): Initialise with a strength parameter and an infinite prior. The infinite prior should be an infinite multinomial parameterisation itself.\n- def _extend(self): Extend the stick breaking construction by one partition.\n\n<|skeleton|>\nclass InfiniteDirichlet:\n \"\"\"Lazy sampling from an infinite Dirchlet distribution.\"\"\"\n\n def __init__(self, strength, alpha):\n \"\"\"Initialise with a strength parameter and an infinite prior. The infinite prior should be an infinite multinomial parameterisation itself.\"\"\"\n <|body_0|>\n\n def _extend(self):\n \"\"\"Extend the stick breaking construction by one partition.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n InfiniteMultinomial.__init__(self)\n self.strength = strength\n self.alpha = alpha\n<|end_body_0|>\n\n<|body_start_1|>\n j = self.num_partitions()\n phi_j = rBeta(self.strength * self.alpha.x(j), self.strength * (1.0 - self.alpha.partition_start(j + 1)))\n assert 0.0 <= phi_j\n assert phi_j <= 1.0\n top = self.last()\n top = top + (1.0 - self.partition_end(j - 1)) * phi_j\n return top\n<|end_body_1|>\n", "revision_id": "1b825ba7a60f0a0489df5f41b273374aef628a60", "skeleton": "<|skeleton|>\nclass InfiniteDirichlet:\n \"\"\"Lazy sampling from an infinite Dirchlet distribution.\"\"\"\n\n def __init__(self, strength, alpha):\n \"\"\"Initialise with a strength parameter and an infinite prior. The infinite prior should be an infinite multinomial parameterisation itself.\"\"\"\n <|body_0|>\n\n def _extend(self):\n \"\"\"Extend the stick breaking construction by one partition.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InfiniteDirichlet:\n \"\"\"Lazy sampling from an infinite Dirchlet distribution.\"\"\"\n\n def __init__(self, strength, alpha):\n \"\"\"Initialise with a strength parameter and an infinite prior. The infinite prior should be an infinite multinomial parameterisation itself.\"\"\"\n InfiniteMultinomial.__init__(self)\n self.strength = strength\n self.alpha = alpha\n\n def _extend(self):\n \"\"\"Extend the stick breaking construction by one partition.\"\"\"\n j = self.num_partitions()\n phi_j = rBeta(self.strength * self.alpha.x(j), self.strength * (1.0 - self.alpha.partition_start(j + 1)))\n assert 0.0 <= phi_j\n assert phi_j <= 1.0\n top = self.last()\n top = top + (1.0 - self.partition_end(j - 1)) * phi_j\n return top\n", "source": "the_stack_v2_python_sparse", "source_path": "python/infpy/dp/infinite_multinomials.py", "source_repo": "JohnReid/infpy", "split": "val", "star_events_count": 5}
{"blob_id": "8e2c8ad319edba510d8c4e9988f1164f7bd67784", "bodies": ["categories = []\nfor value in resp['categories']:\n category = self.to_category(value)\n categories.append(category)\nreturn categories", "category = Category()\nif 'id' in resp:\n category.set_id(resp['id'])\nif 'name' in resp:\n category.set_name(resp['name'])\nreturn category", "data = {}\nif category.get_name() != '':\n data['name'] = category.get_name()\nreturn data"], "bodies_text": "<|body_start_0|>\n categories = []\n for value in resp['categories']:\n category = self.to_category(value)\n categories.append(category)\n return categories\n<|end_body_0|>\n\n<|body_start_1|>\n category = Category()\n if 'id' in resp:\n category.set_id(resp['id'])\n if 'name' in resp:\n category.set_name(resp['name'])\n return category\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n if category.get_name() != '':\n data['name'] = category.get_name()\n return data\n<|end_body_2|>\n", "class_docstring": "This class is used to parse the json object for category.", "class_name": "CategoryParser", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CategoryParser:\n \"\"\"This class is used to parse the json object for category.\"\"\"\n\n def get_categories(self, resp):\n \"\"\"This method parses the json for categories list. Args: resp(dict):Response containing json for categories list. Returns: list of instance: List of categories object.\"\"\"\n <|body_0|>\n\n def to_category(self, resp):\n \"\"\"This method parses the given response and returns category object. Args: resp(dict): Dictionary containing json object for category. Returns: instance: Category object.\"\"\"\n <|body_1|>\n\n def to_json(self, category):\n \"\"\"This method is used to convert category object to json format. Args: category(instance): Category object. Returns: dict: Dictionary containing json object for category object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n categories = []\n for value in resp['categories']:\n category = self.to_category(value)\n categories.append(category)\n return categories\n<|end_body_0|>\n\n<|body_start_1|>\n category = Category()\n if 'id' in resp:\n category.set_id(resp['id'])\n if 'name' in resp:\n category.set_name(resp['name'])\n return category\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n if category.get_name() != '':\n data['name'] = category.get_name()\n return data\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000222", "length_bytes": 1464, "license_type": "permissive", "methods": [{"docstring": "This method parses the json for categories list. Args: resp(dict):Response containing json for categories list. Returns: list of instance: List of categories object.", "name": "get_categories", "signature": "def get_categories(self, resp)"}, {"docstring": "This method parses the given response and returns category object. Args: resp(dict): Dictionary containing json object for category. Returns: instance: Category object.", "name": "to_category", "signature": "def to_category(self, resp)"}, {"docstring": "This method is used to convert category object to json format. Args: category(instance): Category object. Returns: dict: Dictionary containing json object for category object.", "name": "to_json", "signature": "def to_json(self, category)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_027235", "prompt": "Implement the Python class `CategoryParser` described below.\n\nClass description:\nThis class is used to parse the json object for category.\n\nMethod signatures and docstrings:\n- def get_categories(self, resp): This method parses the json for categories list. Args: resp(dict):Response containing json for categories list. Returns: list of instance: List of categories object.\n- def to_category(self, resp): This method parses the given response and returns category object. Args: resp(dict): Dictionary containing json object for category. Returns: instance: Category object.\n- def to_json(self, category): This method is used to convert category object to json format. Args: category(instance): Category object. Returns: dict: Dictionary containing json object for category object.", "prompted_full_text": "Implement the Python class `CategoryParser` described below.\n\nClass description:\nThis class is used to parse the json object for category.\n\nMethod signatures and docstrings:\n- def get_categories(self, resp): This method parses the json for categories list. Args: resp(dict):Response containing json for categories list. Returns: list of instance: List of categories object.\n- def to_category(self, resp): This method parses the given response and returns category object. Args: resp(dict): Dictionary containing json object for category. Returns: instance: Category object.\n- def to_json(self, category): This method is used to convert category object to json format. Args: category(instance): Category object. Returns: dict: Dictionary containing json object for category object.\n\n<|skeleton|>\nclass CategoryParser:\n \"\"\"This class is used to parse the json object for category.\"\"\"\n\n def get_categories(self, resp):\n \"\"\"This method parses the json for categories list. Args: resp(dict):Response containing json for categories list. Returns: list of instance: List of categories object.\"\"\"\n <|body_0|>\n\n def to_category(self, resp):\n \"\"\"This method parses the given response and returns category object. Args: resp(dict): Dictionary containing json object for category. Returns: instance: Category object.\"\"\"\n <|body_1|>\n\n def to_json(self, category):\n \"\"\"This method is used to convert category object to json format. Args: category(instance): Category object. Returns: dict: Dictionary containing json object for category object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n categories = []\n for value in resp['categories']:\n category = self.to_category(value)\n categories.append(category)\n return categories\n<|end_body_0|>\n\n<|body_start_1|>\n category = Category()\n if 'id' in resp:\n category.set_id(resp['id'])\n if 'name' in resp:\n category.set_name(resp['name'])\n return category\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n if category.get_name() != '':\n data['name'] = category.get_name()\n return data\n<|end_body_2|>\n", "revision_id": "33e9f6bccba16a581b115c582033a93d43bb159c", "skeleton": "<|skeleton|>\nclass CategoryParser:\n \"\"\"This class is used to parse the json object for category.\"\"\"\n\n def get_categories(self, resp):\n \"\"\"This method parses the json for categories list. Args: resp(dict):Response containing json for categories list. Returns: list of instance: List of categories object.\"\"\"\n <|body_0|>\n\n def to_category(self, resp):\n \"\"\"This method parses the given response and returns category object. Args: resp(dict): Dictionary containing json object for category. Returns: instance: Category object.\"\"\"\n <|body_1|>\n\n def to_json(self, category):\n \"\"\"This method is used to convert category object to json format. Args: category(instance): Category object. Returns: dict: Dictionary containing json object for category object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CategoryParser:\n \"\"\"This class is used to parse the json object for category.\"\"\"\n\n def get_categories(self, resp):\n \"\"\"This method parses the json for categories list. Args: resp(dict):Response containing json for categories list. Returns: list of instance: List of categories object.\"\"\"\n categories = []\n for value in resp['categories']:\n category = self.to_category(value)\n categories.append(category)\n return categories\n\n def to_category(self, resp):\n \"\"\"This method parses the given response and returns category object. Args: resp(dict): Dictionary containing json object for category. Returns: instance: Category object.\"\"\"\n category = Category()\n if 'id' in resp:\n category.set_id(resp['id'])\n if 'name' in resp:\n category.set_name(resp['name'])\n return category\n\n def to_json(self, category):\n \"\"\"This method is used to convert category object to json format. Args: category(instance): Category object. Returns: dict: Dictionary containing json object for category object.\"\"\"\n data = {}\n if category.get_name() != '':\n data['name'] = category.get_name()\n return data\n", "source": "the_stack_v2_python_sparse", "source_path": "projects/parser/CategoryParser.py", "source_repo": "vhatgithub/projects-python-wrappers", "split": "val", "star_events_count": 0}
{"blob_id": "2add1c74240e929699c8c39345b8b032f5093026", "bodies": ["\"\"\"This will handel all the GET request.\"\"\"\ntrack_info = ArtistInformation.objects.all()\nserializer = ArtistInformationSerializer(track_info, many=True)\nreturn Response({'artist_info': serializer.data})", "artist_name = request.data.get('name')\nserializer = ArtistInformationSerializer(data={'artistName': artist_name})\nif serializer.is_valid(raise_exception=True):\n artist_save = serializer.save()\nreturn Response({'response': 'SUCCESS', 'artistId': '{}'.format(artist_save.artistId), 'message': \"Artist'{}' created successfully\".format(artist_save.artistName)})", "artistId = request.data.get('artistId')\ntry:\n del_obj = ArtistInformation.objects.get(artistId=artistId)\n del_obj.delete()\n return Response({'response': 'SUCCESS', 'message': \"Artist '{}' Deleted Successfully\".format(artistId)})\nexcept ArtistInformation.DoesNotExist:\n return Response({'response': 'FAIL'})"], "bodies_text": "<|body_start_0|>\n \"\"\"This will handel all the GET request.\"\"\"\n track_info = ArtistInformation.objects.all()\n serializer = ArtistInformationSerializer(track_info, many=True)\n return Response({'artist_info': serializer.data})\n<|end_body_0|>\n\n<|body_start_1|>\n artist_name = request.data.get('name')\n serializer = ArtistInformationSerializer(data={'artistName': artist_name})\n if serializer.is_valid(raise_exception=True):\n artist_save = serializer.save()\n return Response({'response': 'SUCCESS', 'artistId': '{}'.format(artist_save.artistId), 'message': \"Artist'{}' created successfully\".format(artist_save.artistName)})\n<|end_body_1|>\n\n<|body_start_2|>\n artistId = request.data.get('artistId')\n try:\n del_obj = ArtistInformation.objects.get(artistId=artistId)\n del_obj.delete()\n return Response({'response': 'SUCCESS', 'message': \"Artist '{}' Deleted Successfully\".format(artistId)})\n except ArtistInformation.DoesNotExist:\n return Response({'response': 'FAIL'})\n<|end_body_2|>\n", "class_docstring": "This view is for handling operation for Artist Information. It will perform GET, POST, DELETE request for artist information.", "class_name": "ArtistInformationView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ArtistInformationView:\n \"\"\"This view is for handling operation for Artist Information. It will perform GET, POST, DELETE request for artist information.\"\"\"\n\n def get(self, pk=None):\n \"\"\"Performs GET operation. Returns a Response with all artist information\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Performs POST operation. Creates new artist record. Returns a Response with artist information\"\"\"\n <|body_1|>\n\n def delete(self, request):\n \"\"\"Performs DELETE operation. Deleted the artisrt ID record from the DB. Returns a Response with all artist information\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"This will handel all the GET request.\"\"\"\n track_info = ArtistInformation.objects.all()\n serializer = ArtistInformationSerializer(track_info, many=True)\n return Response({'artist_info': serializer.data})\n<|end_body_0|>\n\n<|body_start_1|>\n artist_name = request.data.get('name')\n serializer = ArtistInformationSerializer(data={'artistName': artist_name})\n if serializer.is_valid(raise_exception=True):\n artist_save = serializer.save()\n return Response({'response': 'SUCCESS', 'artistId': '{}'.format(artist_save.artistId), 'message': \"Artist'{}' created successfully\".format(artist_save.artistName)})\n<|end_body_1|>\n\n<|body_start_2|>\n artistId = request.data.get('artistId')\n try:\n del_obj = ArtistInformation.objects.get(artistId=artistId)\n del_obj.delete()\n return Response({'response': 'SUCCESS', 'message': \"Artist '{}' Deleted Successfully\".format(artistId)})\n except ArtistInformation.DoesNotExist:\n return Response({'response': 'FAIL'})\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000223", "length_bytes": 5113, "license_type": "no_license", "methods": [{"docstring": "Performs GET operation. Returns a Response with all artist information", "name": "get", "signature": "def get(self, pk=None)"}, {"docstring": "Performs POST operation. Creates new artist record. Returns a Response with artist information", "name": "post", "signature": "def post(self, request)"}, {"docstring": "Performs DELETE operation. Deleted the artisrt ID record from the DB. Returns a Response with all artist information", "name": "delete", "signature": "def delete(self, request)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_013006", "prompt": "Implement the Python class `ArtistInformationView` described below.\n\nClass description:\nThis view is for handling operation for Artist Information. It will perform GET, POST, DELETE request for artist information.\n\nMethod signatures and docstrings:\n- def get(self, pk=None): Performs GET operation. Returns a Response with all artist information\n- def post(self, request): Performs POST operation. Creates new artist record. Returns a Response with artist information\n- def delete(self, request): Performs DELETE operation. Deleted the artisrt ID record from the DB. Returns a Response with all artist information", "prompted_full_text": "Implement the Python class `ArtistInformationView` described below.\n\nClass description:\nThis view is for handling operation for Artist Information. It will perform GET, POST, DELETE request for artist information.\n\nMethod signatures and docstrings:\n- def get(self, pk=None): Performs GET operation. Returns a Response with all artist information\n- def post(self, request): Performs POST operation. Creates new artist record. Returns a Response with artist information\n- def delete(self, request): Performs DELETE operation. Deleted the artisrt ID record from the DB. Returns a Response with all artist information\n\n<|skeleton|>\nclass ArtistInformationView:\n \"\"\"This view is for handling operation for Artist Information. It will perform GET, POST, DELETE request for artist information.\"\"\"\n\n def get(self, pk=None):\n \"\"\"Performs GET operation. Returns a Response with all artist information\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Performs POST operation. Creates new artist record. Returns a Response with artist information\"\"\"\n <|body_1|>\n\n def delete(self, request):\n \"\"\"Performs DELETE operation. Deleted the artisrt ID record from the DB. Returns a Response with all artist information\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"This will handel all the GET request.\"\"\"\n track_info = ArtistInformation.objects.all()\n serializer = ArtistInformationSerializer(track_info, many=True)\n return Response({'artist_info': serializer.data})\n<|end_body_0|>\n\n<|body_start_1|>\n artist_name = request.data.get('name')\n serializer = ArtistInformationSerializer(data={'artistName': artist_name})\n if serializer.is_valid(raise_exception=True):\n artist_save = serializer.save()\n return Response({'response': 'SUCCESS', 'artistId': '{}'.format(artist_save.artistId), 'message': \"Artist'{}' created successfully\".format(artist_save.artistName)})\n<|end_body_1|>\n\n<|body_start_2|>\n artistId = request.data.get('artistId')\n try:\n del_obj = ArtistInformation.objects.get(artistId=artistId)\n del_obj.delete()\n return Response({'response': 'SUCCESS', 'message': \"Artist '{}' Deleted Successfully\".format(artistId)})\n except ArtistInformation.DoesNotExist:\n return Response({'response': 'FAIL'})\n<|end_body_2|>\n", "revision_id": "c2174592ea5a074579f02509590e4ebef34e9348", "skeleton": "<|skeleton|>\nclass ArtistInformationView:\n \"\"\"This view is for handling operation for Artist Information. It will perform GET, POST, DELETE request for artist information.\"\"\"\n\n def get(self, pk=None):\n \"\"\"Performs GET operation. Returns a Response with all artist information\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Performs POST operation. Creates new artist record. Returns a Response with artist information\"\"\"\n <|body_1|>\n\n def delete(self, request):\n \"\"\"Performs DELETE operation. Deleted the artisrt ID record from the DB. Returns a Response with all artist information\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ArtistInformationView:\n \"\"\"This view is for handling operation for Artist Information. It will perform GET, POST, DELETE request for artist information.\"\"\"\n\n def get(self, pk=None):\n \"\"\"Performs GET operation. Returns a Response with all artist information\"\"\"\n \"\"\"This will handel all the GET request.\"\"\"\n track_info = ArtistInformation.objects.all()\n serializer = ArtistInformationSerializer(track_info, many=True)\n return Response({'artist_info': serializer.data})\n\n def post(self, request):\n \"\"\"Performs POST operation. Creates new artist record. Returns a Response with artist information\"\"\"\n artist_name = request.data.get('name')\n serializer = ArtistInformationSerializer(data={'artistName': artist_name})\n if serializer.is_valid(raise_exception=True):\n artist_save = serializer.save()\n return Response({'response': 'SUCCESS', 'artistId': '{}'.format(artist_save.artistId), 'message': \"Artist'{}' created successfully\".format(artist_save.artistName)})\n\n def delete(self, request):\n \"\"\"Performs DELETE operation. Deleted the artisrt ID record from the DB. Returns a Response with all artist information\"\"\"\n artistId = request.data.get('artistId')\n try:\n del_obj = ArtistInformation.objects.get(artistId=artistId)\n del_obj.delete()\n return Response({'response': 'SUCCESS', 'message': \"Artist '{}' Deleted Successfully\".format(artistId)})\n except ArtistInformation.DoesNotExist:\n return Response({'response': 'FAIL'})\n", "source": "the_stack_v2_python_sparse", "source_path": "symphony/music_api/views.py", "source_repo": "vdkotian/symphony_api", "split": "val", "star_events_count": 0}
{"blob_id": "fce9fc5746de5011cdde9b9413abc67f84a3dceb", "bodies": ["super(self.__class__, self).__init__(graph, seed)\nself.available_statuses = {'Susceptible': 0, 'Infected': 1}\nself.name = 'Voter'", "self.clean_initial_status(self.available_statuses.values())\nif self.actual_iteration == 0:\n self.actual_iteration += 1\n delta, node_count, status_delta = self.status_delta(self.status)\n if node_status:\n return {'iteration': 0, 'status': self.status.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': 0, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\nlistener = list(self.graph.nodes)[np.random.randint(0, self.graph.number_of_nodes())]\nneighbours = list(self.graph.neighbors(listener))\nif self.graph.directed:\n neighbours = list(self.graph.predecessors(listener))\nspeaker = neighbours[np.random.randint(0, len(neighbours))]\ndelta = {listener: self.status[speaker]}\nself.status[listener] = self.status[speaker]\nnode_count = {st: len([n for n in self.status if self.status[n] == st]) for st in self.available_statuses.values()}\nstatus_delta = {st: 0 for st in self.available_statuses.values()}\nstatus_delta[self.status[speaker]] += 1\nfor x in self.available_statuses.values():\n if x != self.status[speaker]:\n status_delta[x] -= 1\nself.actual_iteration += 1\nif node_status:\n return {'iteration': self.actual_iteration - 1, 'status': delta.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\nelse:\n return {'iteration': self.actual_iteration - 1, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}"], "bodies_text": "<|body_start_0|>\n super(self.__class__, self).__init__(graph, seed)\n self.available_statuses = {'Susceptible': 0, 'Infected': 1}\n self.name = 'Voter'\n<|end_body_0|>\n\n<|body_start_1|>\n self.clean_initial_status(self.available_statuses.values())\n if self.actual_iteration == 0:\n self.actual_iteration += 1\n delta, node_count, status_delta = self.status_delta(self.status)\n if node_status:\n return {'iteration': 0, 'status': self.status.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': 0, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n listener = list(self.graph.nodes)[np.random.randint(0, self.graph.number_of_nodes())]\n neighbours = list(self.graph.neighbors(listener))\n if self.graph.directed:\n neighbours = list(self.graph.predecessors(listener))\n speaker = neighbours[np.random.randint(0, len(neighbours))]\n delta = {listener: self.status[speaker]}\n self.status[listener] = self.status[speaker]\n node_count = {st: len([n for n in self.status if self.status[n] == st]) for st in self.available_statuses.values()}\n status_delta = {st: 0 for st in self.available_statuses.values()}\n status_delta[self.status[speaker]] += 1\n for x in self.available_statuses.values():\n if x != self.status[speaker]:\n status_delta[x] -= 1\n self.actual_iteration += 1\n if node_status:\n return {'iteration': self.actual_iteration - 1, 'status': delta.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': self.actual_iteration - 1, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n<|end_body_1|>\n", "class_docstring": "", "class_name": "VoterModel", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VoterModel:\n\n def __init__(self, graph, seed=None):\n \"\"\"Model Constructor :param graph: A networkx graph object\"\"\"\n <|body_0|>\n\n def iteration(self, node_status=True):\n \"\"\"Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(self.__class__, self).__init__(graph, seed)\n self.available_statuses = {'Susceptible': 0, 'Infected': 1}\n self.name = 'Voter'\n<|end_body_0|>\n\n<|body_start_1|>\n self.clean_initial_status(self.available_statuses.values())\n if self.actual_iteration == 0:\n self.actual_iteration += 1\n delta, node_count, status_delta = self.status_delta(self.status)\n if node_status:\n return {'iteration': 0, 'status': self.status.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': 0, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n listener = list(self.graph.nodes)[np.random.randint(0, self.graph.number_of_nodes())]\n neighbours = list(self.graph.neighbors(listener))\n if self.graph.directed:\n neighbours = list(self.graph.predecessors(listener))\n speaker = neighbours[np.random.randint(0, len(neighbours))]\n delta = {listener: self.status[speaker]}\n self.status[listener] = self.status[speaker]\n node_count = {st: len([n for n in self.status if self.status[n] == st]) for st in self.available_statuses.values()}\n status_delta = {st: 0 for st in self.available_statuses.values()}\n status_delta[self.status[speaker]] += 1\n for x in self.available_statuses.values():\n if x != self.status[speaker]:\n status_delta[x] -= 1\n self.actual_iteration += 1\n if node_status:\n return {'iteration': self.actual_iteration - 1, 'status': delta.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': self.actual_iteration - 1, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000224", "length_bytes": 3326, "license_type": "permissive", "methods": [{"docstring": "Model Constructor :param graph: A networkx graph object", "name": "__init__", "signature": "def __init__(self, graph, seed=None)"}, {"docstring": "Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)", "name": "iteration", "signature": "def iteration(self, node_status=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007894", "prompt": "Implement the Python class `VoterModel` described below.\n\nClass description:\nImplement the VoterModel class.\n\nMethod signatures and docstrings:\n- def __init__(self, graph, seed=None): Model Constructor :param graph: A networkx graph object\n- def iteration(self, node_status=True): Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)", "prompted_full_text": "Implement the Python class `VoterModel` described below.\n\nClass description:\nImplement the VoterModel class.\n\nMethod signatures and docstrings:\n- def __init__(self, graph, seed=None): Model Constructor :param graph: A networkx graph object\n- def iteration(self, node_status=True): Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)\n\n<|skeleton|>\nclass VoterModel:\n\n def __init__(self, graph, seed=None):\n \"\"\"Model Constructor :param graph: A networkx graph object\"\"\"\n <|body_0|>\n\n def iteration(self, node_status=True):\n \"\"\"Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(self.__class__, self).__init__(graph, seed)\n self.available_statuses = {'Susceptible': 0, 'Infected': 1}\n self.name = 'Voter'\n<|end_body_0|>\n\n<|body_start_1|>\n self.clean_initial_status(self.available_statuses.values())\n if self.actual_iteration == 0:\n self.actual_iteration += 1\n delta, node_count, status_delta = self.status_delta(self.status)\n if node_status:\n return {'iteration': 0, 'status': self.status.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': 0, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n listener = list(self.graph.nodes)[np.random.randint(0, self.graph.number_of_nodes())]\n neighbours = list(self.graph.neighbors(listener))\n if self.graph.directed:\n neighbours = list(self.graph.predecessors(listener))\n speaker = neighbours[np.random.randint(0, len(neighbours))]\n delta = {listener: self.status[speaker]}\n self.status[listener] = self.status[speaker]\n node_count = {st: len([n for n in self.status if self.status[n] == st]) for st in self.available_statuses.values()}\n status_delta = {st: 0 for st in self.available_statuses.values()}\n status_delta[self.status[speaker]] += 1\n for x in self.available_statuses.values():\n if x != self.status[speaker]:\n status_delta[x] -= 1\n self.actual_iteration += 1\n if node_status:\n return {'iteration': self.actual_iteration - 1, 'status': delta.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': self.actual_iteration - 1, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n<|end_body_1|>\n", "revision_id": "900cb3727795c97a73e59fdb736aa736c4d17157", "skeleton": "<|skeleton|>\nclass VoterModel:\n\n def __init__(self, graph, seed=None):\n \"\"\"Model Constructor :param graph: A networkx graph object\"\"\"\n <|body_0|>\n\n def iteration(self, node_status=True):\n \"\"\"Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VoterModel:\n def __init__(self, graph, seed=None):\n \"\"\"Model Constructor :param graph: A networkx graph object\"\"\"\n super(self.__class__, self).__init__(graph, seed)\n self.available_statuses = {'Susceptible': 0, 'Infected': 1}\n self.name = 'Voter'\n\n def iteration(self, node_status=True):\n \"\"\"Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)\"\"\"\n self.clean_initial_status(self.available_statuses.values())\n if self.actual_iteration == 0:\n self.actual_iteration += 1\n delta, node_count, status_delta = self.status_delta(self.status)\n if node_status:\n return {'iteration': 0, 'status': self.status.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': 0, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n listener = list(self.graph.nodes)[np.random.randint(0, self.graph.number_of_nodes())]\n neighbours = list(self.graph.neighbors(listener))\n if self.graph.directed:\n neighbours = list(self.graph.predecessors(listener))\n speaker = neighbours[np.random.randint(0, len(neighbours))]\n delta = {listener: self.status[speaker]}\n self.status[listener] = self.status[speaker]\n node_count = {st: len([n for n in self.status if self.status[n] == st]) for st in self.available_statuses.values()}\n status_delta = {st: 0 for st in self.available_statuses.values()}\n status_delta[self.status[speaker]] += 1\n for x in self.available_statuses.values():\n if x != self.status[speaker]:\n status_delta[x] -= 1\n self.actual_iteration += 1\n if node_status:\n return {'iteration': self.actual_iteration - 1, 'status': delta.copy(), 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n else:\n return {'iteration': self.actual_iteration - 1, 'status': {}, 'node_count': node_count.copy(), 'status_delta': status_delta.copy()}\n", "source": "the_stack_v2_python_sparse", "source_path": "ndlib/models/opinions/VoterModel.py", "source_repo": "GiulioRossetti/ndlib", "split": "val", "star_events_count": 265}
{"blob_id": "605fe583dae60407af5edde0ca4d7a4d76925af2", "bodies": ["super().__init__()\nself.device = device\nself.softmax = nn.Softmax(dim=-1)\nself.dropout = None if p is None else nn.Dropout(p=p)", "d_head = q.size(-1)\nQ_uT = q + uT\nAC = Q_uT @ k.transpose(-1, -2)\nQ_vT = q + vT\nBD = Q_vT @ _r.transpose(-1, -2)\nBD = self.left_shift(BD)\nweight = AC + BD\nweight /= math.sqrt(d_head)\nif mask is not None:\n weight.masked_fill(mask == False, 1e-12)\nweight = self.softmax(weight)\nif self.dropout is not None:\n weight = self.dropout(weight)\noutput = weight @ v\nreturn output", "bs, head, q_len, k_len = x.size()\nm_len = k_len - q_len\nrow_pad = torch.zeros(bs, head, q_len, 1).to(self.device)\ncol_pad = torch.zeros(bs, head, m_len, k_len + 1).to(self.device)\nx = torch.cat([row_pad, x], dim=-1)\nx = torch.cat([x, col_pad], dim=-2)\nx = x.view(bs, head, -1, k_len)\nx_1 = x[:, :, 1:q_len + 1, :q_len].tril(diagonal=1)\nx_2 = x[:, :, :q_len, q_len:k_len]\noutput = torch.cat([x_2, x_1], dim=-1)\nreturn output"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.device = device\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = None if p is None else nn.Dropout(p=p)\n<|end_body_0|>\n\n<|body_start_1|>\n d_head = q.size(-1)\n Q_uT = q + uT\n AC = Q_uT @ k.transpose(-1, -2)\n Q_vT = q + vT\n BD = Q_vT @ _r.transpose(-1, -2)\n BD = self.left_shift(BD)\n weight = AC + BD\n weight /= math.sqrt(d_head)\n if mask is not None:\n weight.masked_fill(mask == False, 1e-12)\n weight = self.softmax(weight)\n if self.dropout is not None:\n weight = self.dropout(weight)\n output = weight @ v\n return output\n<|end_body_1|>\n\n<|body_start_2|>\n bs, head, q_len, k_len = x.size()\n m_len = k_len - q_len\n row_pad = torch.zeros(bs, head, q_len, 1).to(self.device)\n col_pad = torch.zeros(bs, head, m_len, k_len + 1).to(self.device)\n x = torch.cat([row_pad, x], dim=-1)\n x = torch.cat([x, col_pad], dim=-2)\n x = x.view(bs, head, -1, k_len)\n x_1 = x[:, :, 1:q_len + 1, :q_len].tril(diagonal=1)\n x_2 = x[:, :, :q_len, q_len:k_len]\n output = torch.cat([x_2, x_1], dim=-1)\n return output\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ScaledDotProductAttention", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScaledDotProductAttention:\n\n def __init__(self, device, p: float=None):\n \"\"\"scaled dot product attention 구현 클래스 Args: device: device type p (float): dropout probability\"\"\"\n <|body_0|>\n\n def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, _r: torch.Tensor, uT: torch.Tensor, vT: torch.Tensor, mask: torch.Tensor):\n \"\"\"forward 함수 Args: q (torch.Tensor(bs, head, q_len, d_head)): query k (torch.Tensor(bs, head, k_len, d_head)): key v (torch.Tensor(bs, head, k_len, d_head)): value _r (torch.Tensor(head, m_len + q_len, d_head)): position embedding uT (torch.Tensor(n_head, 1, d_head)): segment bias vT (torch.Tensor(n_head, 1, d_head)): position bias mask (torch.Tenso(bs, 1, q_len, k_len)): masking idx Returns output (torch.Tensor(bs, head, q_len, d_head)): forward 출력값\"\"\"\n <|body_1|>\n\n def left_shift(self, x: torch.Tensor):\n \"\"\"left shift 함수 Args: x (torch.Tensor(bs, head, q_len, k_len)): before left shift BD Returns: x (torch.Tensor(bs, head, q_len, k_len)): left shifted BD ex. (0) (2) (3) (4) (5) a00 a01 a02 0 a00 a01 a02 0 a00 a01 a02 0 a10 a02 0 0 a10 a11 a12 => 0 a10 a11 a12 => a02 0 a10 => a11 a12 0 => a11 a12 0 a20 a21 a22 0 a20 a21 a22 a11 a12 0 a20 a21 a22 a20 a21 a22 a19 a21 a22\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.device = device\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = None if p is None else nn.Dropout(p=p)\n<|end_body_0|>\n\n<|body_start_1|>\n d_head = q.size(-1)\n Q_uT = q + uT\n AC = Q_uT @ k.transpose(-1, -2)\n Q_vT = q + vT\n BD = Q_vT @ _r.transpose(-1, -2)\n BD = self.left_shift(BD)\n weight = AC + BD\n weight /= math.sqrt(d_head)\n if mask is not None:\n weight.masked_fill(mask == False, 1e-12)\n weight = self.softmax(weight)\n if self.dropout is not None:\n weight = self.dropout(weight)\n output = weight @ v\n return output\n<|end_body_1|>\n\n<|body_start_2|>\n bs, head, q_len, k_len = x.size()\n m_len = k_len - q_len\n row_pad = torch.zeros(bs, head, q_len, 1).to(self.device)\n col_pad = torch.zeros(bs, head, m_len, k_len + 1).to(self.device)\n x = torch.cat([row_pad, x], dim=-1)\n x = torch.cat([x, col_pad], dim=-2)\n x = x.view(bs, head, -1, k_len)\n x_1 = x[:, :, 1:q_len + 1, :q_len].tril(diagonal=1)\n x_2 = x[:, :, :q_len, q_len:k_len]\n output = torch.cat([x_2, x_1], dim=-1)\n return output\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000225", "length_bytes": 3093, "license_type": "no_license", "methods": [{"docstring": "scaled dot product attention 구현 클래스 Args: device: device type p (float): dropout probability", "name": "__init__", "signature": "def __init__(self, device, p: float=None)"}, {"docstring": "forward 함수 Args: q (torch.Tensor(bs, head, q_len, d_head)): query k (torch.Tensor(bs, head, k_len, d_head)): key v (torch.Tensor(bs, head, k_len, d_head)): value _r (torch.Tensor(head, m_len + q_len, d_head)): position embedding uT (torch.Tensor(n_head, 1, d_head)): segment bias vT (torch.Tensor(n_head, 1, d_head)): position bias mask (torch.Tenso(bs, 1, q_len, k_len)): masking idx Returns output (torch.Tensor(bs, head, q_len, d_head)): forward 출력값", "name": "forward", "signature": "def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, _r: torch.Tensor, uT: torch.Tensor, vT: torch.Tensor, mask: torch.Tensor)"}, {"docstring": "left shift 함수 Args: x (torch.Tensor(bs, head, q_len, k_len)): before left shift BD Returns: x (torch.Tensor(bs, head, q_len, k_len)): left shifted BD ex. (0) (2) (3) (4) (5) a00 a01 a02 0 a00 a01 a02 0 a00 a01 a02 0 a10 a02 0 0 a10 a11 a12 => 0 a10 a11 a12 => a02 0 a10 => a11 a12 0 => a11 a12 0 a20 a21 a22 0 a20 a21 a22 a11 a12 0 a20 a21 a22 a20 a21 a22 a19 a21 a22", "name": "left_shift", "signature": "def left_shift(self, x: torch.Tensor)"}], "n_methods": 3, "prompt": "Implement the Python class `ScaledDotProductAttention` described below.\n\nClass description:\nImplement the ScaledDotProductAttention class.\n\nMethod signatures and docstrings:\n- def __init__(self, device, p: float=None): scaled dot product attention 구현 클래스 Args: device: device type p (float): dropout probability\n- def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, _r: torch.Tensor, uT: torch.Tensor, vT: torch.Tensor, mask: torch.Tensor): forward 함수 Args: q (torch.Tensor(bs, head, q_len, d_head)): query k (torch.Tensor(bs, head, k_len, d_head)): key v (torch.Tensor(bs, head, k_len, d_head)): value _r (torch.Tensor(head, m_len + q_len, d_head)): position embedding uT (torch.Tensor(n_head, 1, d_head)): segment bias vT (torch.Tensor(n_head, 1, d_head)): position bias mask (torch.Tenso(bs, 1, q_len, k_len)): masking idx Returns output (torch.Tensor(bs, head, q_len, d_head)): forward 출력값\n- def left_shift(self, x: torch.Tensor): left shift 함수 Args: x (torch.Tensor(bs, head, q_len, k_len)): before left shift BD Returns: x (torch.Tensor(bs, head, q_len, k_len)): left shifted BD ex. (0) (2) (3) (4) (5) a00 a01 a02 0 a00 a01 a02 0 a00 a01 a02 0 a10 a02 0 0 a10 a11 a12 => 0 a10 a11 a12 => a02 0 a10 => a11 a12 0 => a11 a12 0 a20 a21 a22 0 a20 a21 a22 a11 a12 0 a20 a21 a22 a20 a21 a22 a19 a21 a22", "prompted_full_text": "Implement the Python class `ScaledDotProductAttention` described below.\n\nClass description:\nImplement the ScaledDotProductAttention class.\n\nMethod signatures and docstrings:\n- def __init__(self, device, p: float=None): scaled dot product attention 구현 클래스 Args: device: device type p (float): dropout probability\n- def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, _r: torch.Tensor, uT: torch.Tensor, vT: torch.Tensor, mask: torch.Tensor): forward 함수 Args: q (torch.Tensor(bs, head, q_len, d_head)): query k (torch.Tensor(bs, head, k_len, d_head)): key v (torch.Tensor(bs, head, k_len, d_head)): value _r (torch.Tensor(head, m_len + q_len, d_head)): position embedding uT (torch.Tensor(n_head, 1, d_head)): segment bias vT (torch.Tensor(n_head, 1, d_head)): position bias mask (torch.Tenso(bs, 1, q_len, k_len)): masking idx Returns output (torch.Tensor(bs, head, q_len, d_head)): forward 출력값\n- def left_shift(self, x: torch.Tensor): left shift 함수 Args: x (torch.Tensor(bs, head, q_len, k_len)): before left shift BD Returns: x (torch.Tensor(bs, head, q_len, k_len)): left shifted BD ex. (0) (2) (3) (4) (5) a00 a01 a02 0 a00 a01 a02 0 a00 a01 a02 0 a10 a02 0 0 a10 a11 a12 => 0 a10 a11 a12 => a02 0 a10 => a11 a12 0 => a11 a12 0 a20 a21 a22 0 a20 a21 a22 a11 a12 0 a20 a21 a22 a20 a21 a22 a19 a21 a22\n\n<|skeleton|>\nclass ScaledDotProductAttention:\n\n def __init__(self, device, p: float=None):\n \"\"\"scaled dot product attention 구현 클래스 Args: device: device type p (float): dropout probability\"\"\"\n <|body_0|>\n\n def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, _r: torch.Tensor, uT: torch.Tensor, vT: torch.Tensor, mask: torch.Tensor):\n \"\"\"forward 함수 Args: q (torch.Tensor(bs, head, q_len, d_head)): query k (torch.Tensor(bs, head, k_len, d_head)): key v (torch.Tensor(bs, head, k_len, d_head)): value _r (torch.Tensor(head, m_len + q_len, d_head)): position embedding uT (torch.Tensor(n_head, 1, d_head)): segment bias vT (torch.Tensor(n_head, 1, d_head)): position bias mask (torch.Tenso(bs, 1, q_len, k_len)): masking idx Returns output (torch.Tensor(bs, head, q_len, d_head)): forward 출력값\"\"\"\n <|body_1|>\n\n def left_shift(self, x: torch.Tensor):\n \"\"\"left shift 함수 Args: x (torch.Tensor(bs, head, q_len, k_len)): before left shift BD Returns: x (torch.Tensor(bs, head, q_len, k_len)): left shifted BD ex. (0) (2) (3) (4) (5) a00 a01 a02 0 a00 a01 a02 0 a00 a01 a02 0 a10 a02 0 0 a10 a11 a12 => 0 a10 a11 a12 => a02 0 a10 => a11 a12 0 => a11 a12 0 a20 a21 a22 0 a20 a21 a22 a11 a12 0 a20 a21 a22 a20 a21 a22 a19 a21 a22\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.device = device\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = None if p is None else nn.Dropout(p=p)\n<|end_body_0|>\n\n<|body_start_1|>\n d_head = q.size(-1)\n Q_uT = q + uT\n AC = Q_uT @ k.transpose(-1, -2)\n Q_vT = q + vT\n BD = Q_vT @ _r.transpose(-1, -2)\n BD = self.left_shift(BD)\n weight = AC + BD\n weight /= math.sqrt(d_head)\n if mask is not None:\n weight.masked_fill(mask == False, 1e-12)\n weight = self.softmax(weight)\n if self.dropout is not None:\n weight = self.dropout(weight)\n output = weight @ v\n return output\n<|end_body_1|>\n\n<|body_start_2|>\n bs, head, q_len, k_len = x.size()\n m_len = k_len - q_len\n row_pad = torch.zeros(bs, head, q_len, 1).to(self.device)\n col_pad = torch.zeros(bs, head, m_len, k_len + 1).to(self.device)\n x = torch.cat([row_pad, x], dim=-1)\n x = torch.cat([x, col_pad], dim=-2)\n x = x.view(bs, head, -1, k_len)\n x_1 = x[:, :, 1:q_len + 1, :q_len].tril(diagonal=1)\n x_2 = x[:, :, :q_len, q_len:k_len]\n output = torch.cat([x_2, x_1], dim=-1)\n return output\n<|end_body_2|>\n", "revision_id": "e53a8f0639d004ddad9d33db4884fc4bd98dfcfd", "skeleton": "<|skeleton|>\nclass ScaledDotProductAttention:\n\n def __init__(self, device, p: float=None):\n \"\"\"scaled dot product attention 구현 클래스 Args: device: device type p (float): dropout probability\"\"\"\n <|body_0|>\n\n def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, _r: torch.Tensor, uT: torch.Tensor, vT: torch.Tensor, mask: torch.Tensor):\n \"\"\"forward 함수 Args: q (torch.Tensor(bs, head, q_len, d_head)): query k (torch.Tensor(bs, head, k_len, d_head)): key v (torch.Tensor(bs, head, k_len, d_head)): value _r (torch.Tensor(head, m_len + q_len, d_head)): position embedding uT (torch.Tensor(n_head, 1, d_head)): segment bias vT (torch.Tensor(n_head, 1, d_head)): position bias mask (torch.Tenso(bs, 1, q_len, k_len)): masking idx Returns output (torch.Tensor(bs, head, q_len, d_head)): forward 출력값\"\"\"\n <|body_1|>\n\n def left_shift(self, x: torch.Tensor):\n \"\"\"left shift 함수 Args: x (torch.Tensor(bs, head, q_len, k_len)): before left shift BD Returns: x (torch.Tensor(bs, head, q_len, k_len)): left shifted BD ex. (0) (2) (3) (4) (5) a00 a01 a02 0 a00 a01 a02 0 a00 a01 a02 0 a10 a02 0 0 a10 a11 a12 => 0 a10 a11 a12 => a02 0 a10 => a11 a12 0 => a11 a12 0 a20 a21 a22 0 a20 a21 a22 a11 a12 0 a20 a21 a22 a20 a21 a22 a19 a21 a22\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ScaledDotProductAttention:\n def __init__(self, device, p: float=None):\n \"\"\"scaled dot product attention 구현 클래스 Args: device: device type p (float): dropout probability\"\"\"\n super().__init__()\n self.device = device\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = None if p is None else nn.Dropout(p=p)\n\n def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, _r: torch.Tensor, uT: torch.Tensor, vT: torch.Tensor, mask: torch.Tensor):\n \"\"\"forward 함수 Args: q (torch.Tensor(bs, head, q_len, d_head)): query k (torch.Tensor(bs, head, k_len, d_head)): key v (torch.Tensor(bs, head, k_len, d_head)): value _r (torch.Tensor(head, m_len + q_len, d_head)): position embedding uT (torch.Tensor(n_head, 1, d_head)): segment bias vT (torch.Tensor(n_head, 1, d_head)): position bias mask (torch.Tenso(bs, 1, q_len, k_len)): masking idx Returns output (torch.Tensor(bs, head, q_len, d_head)): forward 출력값\"\"\"\n d_head = q.size(-1)\n Q_uT = q + uT\n AC = Q_uT @ k.transpose(-1, -2)\n Q_vT = q + vT\n BD = Q_vT @ _r.transpose(-1, -2)\n BD = self.left_shift(BD)\n weight = AC + BD\n weight /= math.sqrt(d_head)\n if mask is not None:\n weight.masked_fill(mask == False, 1e-12)\n weight = self.softmax(weight)\n if self.dropout is not None:\n weight = self.dropout(weight)\n output = weight @ v\n return output\n\n def left_shift(self, x: torch.Tensor):\n \"\"\"left shift 함수 Args: x (torch.Tensor(bs, head, q_len, k_len)): before left shift BD Returns: x (torch.Tensor(bs, head, q_len, k_len)): left shifted BD ex. (0) (2) (3) (4) (5) a00 a01 a02 0 a00 a01 a02 0 a00 a01 a02 0 a10 a02 0 0 a10 a11 a12 => 0 a10 a11 a12 => a02 0 a10 => a11 a12 0 => a11 a12 0 a20 a21 a22 0 a20 a21 a22 a11 a12 0 a20 a21 a22 a20 a21 a22 a19 a21 a22\"\"\"\n bs, head, q_len, k_len = x.size()\n m_len = k_len - q_len\n row_pad = torch.zeros(bs, head, q_len, 1).to(self.device)\n col_pad = torch.zeros(bs, head, m_len, k_len + 1).to(self.device)\n x = torch.cat([row_pad, x], dim=-1)\n x = torch.cat([x, col_pad], dim=-2)\n x = x.view(bs, head, -1, k_len)\n x_1 = x[:, :, 1:q_len + 1, :q_len].tril(diagonal=1)\n x_2 = x[:, :, :q_len, q_len:k_len]\n output = torch.cat([x_2, x_1], dim=-1)\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "model/attentions/scale_dot_product.py", "source_repo": "GJ98/transformer-xl", "split": "val", "star_events_count": 1}
{"blob_id": "0090491565b34f4fa1d292a1c1da76421e1b3b4e", "bodies": ["self.min_cutoff = float(min_cutoff)\nself.beta = float(beta)\nself.d_cutoff = float(d_cutoff)\nself.x_prev = float(x0)\nself.dx_prev = float(dx0)\nself.t_prev = float(t0)", "t_e = t - self.t_prev\na_d = smoothing_factor(t_e, self.d_cutoff)\ndx = (x - self.x_prev) / t_e\ndx_hat = exponential_smoothing(a_d, dx, self.dx_prev)\ncutoff = self.min_cutoff + self.beta * abs(dx_hat)\na = smoothing_factor(t_e, cutoff)\nx_hat = exponential_smoothing(a, x, self.x_prev)\nself.x_prev = x_hat\nself.dx_prev = dx_hat\nself.t_prev = t\nreturn x_hat"], "bodies_text": "<|body_start_0|>\n self.min_cutoff = float(min_cutoff)\n self.beta = float(beta)\n self.d_cutoff = float(d_cutoff)\n self.x_prev = float(x0)\n self.dx_prev = float(dx0)\n self.t_prev = float(t0)\n<|end_body_0|>\n\n<|body_start_1|>\n t_e = t - self.t_prev\n a_d = smoothing_factor(t_e, self.d_cutoff)\n dx = (x - self.x_prev) / t_e\n dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)\n cutoff = self.min_cutoff + self.beta * abs(dx_hat)\n a = smoothing_factor(t_e, cutoff)\n x_hat = exponential_smoothing(a, x, self.x_prev)\n self.x_prev = x_hat\n self.dx_prev = dx_hat\n self.t_prev = t\n return x_hat\n<|end_body_1|>\n", "class_docstring": "This code was taken from: https://github.com/jaantollander/OneEuroFilter and written by Jaan Tollander de Balsch We first need to init the OneEuroFilter with the base parameters and one data point. Then we repeatedly call OneEuroFilter(t,x) on the data points to filter i.e. oneEuro = OneEuroFilter(t0,x0,min_cutoff=50.0,beta=4.0) ... x_filt = oneEuro(ti,xi)", "class_name": "OneEuroFilter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OneEuroFilter:\n \"\"\"This code was taken from: https://github.com/jaantollander/OneEuroFilter and written by Jaan Tollander de Balsch We first need to init the OneEuroFilter with the base parameters and one data point. Then we repeatedly call OneEuroFilter(t,x) on the data points to filter i.e. oneEuro = OneEuroFilter(t0,x0,min_cutoff=50.0,beta=4.0) ... x_filt = oneEuro(ti,xi)\"\"\"\n\n def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0, d_cutoff=1.0):\n \"\"\"Initialize the one euro filter.\"\"\"\n <|body_0|>\n\n def __call__(self, t, x):\n \"\"\"Compute the filtered signal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.min_cutoff = float(min_cutoff)\n self.beta = float(beta)\n self.d_cutoff = float(d_cutoff)\n self.x_prev = float(x0)\n self.dx_prev = float(dx0)\n self.t_prev = float(t0)\n<|end_body_0|>\n\n<|body_start_1|>\n t_e = t - self.t_prev\n a_d = smoothing_factor(t_e, self.d_cutoff)\n dx = (x - self.x_prev) / t_e\n dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)\n cutoff = self.min_cutoff + self.beta * abs(dx_hat)\n a = smoothing_factor(t_e, cutoff)\n x_hat = exponential_smoothing(a, x, self.x_prev)\n self.x_prev = x_hat\n self.dx_prev = dx_hat\n self.t_prev = t\n return x_hat\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000226", "length_bytes": 5769, "license_type": "no_license", "methods": [{"docstring": "Initialize the one euro filter.", "name": "__init__", "signature": "def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0, d_cutoff=1.0)"}, {"docstring": "Compute the filtered signal.", "name": "__call__", "signature": "def __call__(self, t, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040026", "prompt": "Implement the Python class `OneEuroFilter` described below.\n\nClass description:\nThis code was taken from: https://github.com/jaantollander/OneEuroFilter and written by Jaan Tollander de Balsch We first need to init the OneEuroFilter with the base parameters and one data point. Then we repeatedly call OneEuroFilter(t,x) on the data points to filter i.e. oneEuro = OneEuroFilter(t0,x0,min_cutoff=50.0,beta=4.0) ... x_filt = oneEuro(ti,xi)\n\nMethod signatures and docstrings:\n- def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0, d_cutoff=1.0): Initialize the one euro filter.\n- def __call__(self, t, x): Compute the filtered signal.", "prompted_full_text": "Implement the Python class `OneEuroFilter` described below.\n\nClass description:\nThis code was taken from: https://github.com/jaantollander/OneEuroFilter and written by Jaan Tollander de Balsch We first need to init the OneEuroFilter with the base parameters and one data point. Then we repeatedly call OneEuroFilter(t,x) on the data points to filter i.e. oneEuro = OneEuroFilter(t0,x0,min_cutoff=50.0,beta=4.0) ... x_filt = oneEuro(ti,xi)\n\nMethod signatures and docstrings:\n- def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0, d_cutoff=1.0): Initialize the one euro filter.\n- def __call__(self, t, x): Compute the filtered signal.\n\n<|skeleton|>\nclass OneEuroFilter:\n \"\"\"This code was taken from: https://github.com/jaantollander/OneEuroFilter and written by Jaan Tollander de Balsch We first need to init the OneEuroFilter with the base parameters and one data point. Then we repeatedly call OneEuroFilter(t,x) on the data points to filter i.e. oneEuro = OneEuroFilter(t0,x0,min_cutoff=50.0,beta=4.0) ... x_filt = oneEuro(ti,xi)\"\"\"\n\n def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0, d_cutoff=1.0):\n \"\"\"Initialize the one euro filter.\"\"\"\n <|body_0|>\n\n def __call__(self, t, x):\n \"\"\"Compute the filtered signal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.min_cutoff = float(min_cutoff)\n self.beta = float(beta)\n self.d_cutoff = float(d_cutoff)\n self.x_prev = float(x0)\n self.dx_prev = float(dx0)\n self.t_prev = float(t0)\n<|end_body_0|>\n\n<|body_start_1|>\n t_e = t - self.t_prev\n a_d = smoothing_factor(t_e, self.d_cutoff)\n dx = (x - self.x_prev) / t_e\n dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)\n cutoff = self.min_cutoff + self.beta * abs(dx_hat)\n a = smoothing_factor(t_e, cutoff)\n x_hat = exponential_smoothing(a, x, self.x_prev)\n self.x_prev = x_hat\n self.dx_prev = dx_hat\n self.t_prev = t\n return x_hat\n<|end_body_1|>\n", "revision_id": "9ee5016c79b4768dd44492136a3c020516cc43e5", "skeleton": "<|skeleton|>\nclass OneEuroFilter:\n \"\"\"This code was taken from: https://github.com/jaantollander/OneEuroFilter and written by Jaan Tollander de Balsch We first need to init the OneEuroFilter with the base parameters and one data point. Then we repeatedly call OneEuroFilter(t,x) on the data points to filter i.e. oneEuro = OneEuroFilter(t0,x0,min_cutoff=50.0,beta=4.0) ... x_filt = oneEuro(ti,xi)\"\"\"\n\n def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0, d_cutoff=1.0):\n \"\"\"Initialize the one euro filter.\"\"\"\n <|body_0|>\n\n def __call__(self, t, x):\n \"\"\"Compute the filtered signal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OneEuroFilter:\n \"\"\"This code was taken from: https://github.com/jaantollander/OneEuroFilter and written by Jaan Tollander de Balsch We first need to init the OneEuroFilter with the base parameters and one data point. Then we repeatedly call OneEuroFilter(t,x) on the data points to filter i.e. oneEuro = OneEuroFilter(t0,x0,min_cutoff=50.0,beta=4.0) ... x_filt = oneEuro(ti,xi)\"\"\"\n\n def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0, d_cutoff=1.0):\n \"\"\"Initialize the one euro filter.\"\"\"\n self.min_cutoff = float(min_cutoff)\n self.beta = float(beta)\n self.d_cutoff = float(d_cutoff)\n self.x_prev = float(x0)\n self.dx_prev = float(dx0)\n self.t_prev = float(t0)\n\n def __call__(self, t, x):\n \"\"\"Compute the filtered signal.\"\"\"\n t_e = t - self.t_prev\n a_d = smoothing_factor(t_e, self.d_cutoff)\n dx = (x - self.x_prev) / t_e\n dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)\n cutoff = self.min_cutoff + self.beta * abs(dx_hat)\n a = smoothing_factor(t_e, cutoff)\n x_hat = exponential_smoothing(a, x, self.x_prev)\n self.x_prev = x_hat\n self.dx_prev = dx_hat\n self.t_prev = t\n return x_hat\n", "source": "the_stack_v2_python_sparse", "source_path": "SSI/signals.py", "source_repo": "BIAPT/Scripts", "split": "val", "star_events_count": 8}
{"blob_id": "e058866aab8b0d075db8236bb6da253417dfcecc", "bodies": ["with self.assertRaisesRegex(TypeError, 'cirq.sim.simulator.SimulatesExpectationValues.'):\n cirq_ops._get_cirq_analytical_expectation('junk')\ncirq_ops._get_cirq_analytical_expectation()\ncirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\ncirq_ops._get_cirq_analytical_expectation(cirq.DensityMatrixSimulator())", "test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\nbits = cirq.GridQubit.rect(1, 5)\ntest_circuit = serializer.serialize_circuit(cirq.testing.random_circuit(bits, MOMENT_DEPTH, 0.9)).SerializeToString()\ntest_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\nwith self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'symbol_names tensor must be of type string'):\n _ = test_op([test_circuit], [0], [[0]], [[test_pauli_sum]])\nwith self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'programs tensor must be of type string'):\n _ = test_op([0], ['rx'], [[0]], [test_pauli_sum])\nwith self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'real-valued numeric tensor.'):\n _ = test_op([test_circuit], ['rx'], 'junk', [[test_pauli_sum]])\nwith self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx'], [[1, 1]], [[test_pauli_sum]])\nwith self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx', 'ry'], [[1]], [[test_pauli_sum]])\nwith self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'first dimension of symbol_values tensor'):\n _ = test_op([test_circuit, test_circuit], ['rx'], [[1]], [test_pauli_sum])\nwith self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must be of type string.'):\n _ = test_op([test_circuit], ['rx'], [[1]], 0)\nwith self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must have the same batch shape'):\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum], [test_pauli_sum]])\n_ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]])\n_ = test_op([test_circuit], [], [[]], [[test_pauli_sum]])", "test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\nbits = cirq.GridQubit.rect(1, 5)\ntest_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\ntest_empty_circuit = serializer.serialize_circuit(cirq.Circuit()).SerializeToString()\n_ = test_op([test_empty_circuit], [], [[]], [[test_pauli_sum]])", "test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\nempty_programs = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)\nempty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\nempty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n_ = test_op(empty_programs, [], empty_values, empty_paulis)"], "bodies_text": "<|body_start_0|>\n with self.assertRaisesRegex(TypeError, 'cirq.sim.simulator.SimulatesExpectationValues.'):\n cirq_ops._get_cirq_analytical_expectation('junk')\n cirq_ops._get_cirq_analytical_expectation()\n cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n cirq_ops._get_cirq_analytical_expectation(cirq.DensityMatrixSimulator())\n<|end_body_0|>\n\n<|body_start_1|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n bits = cirq.GridQubit.rect(1, 5)\n test_circuit = serializer.serialize_circuit(cirq.testing.random_circuit(bits, MOMENT_DEPTH, 0.9)).SerializeToString()\n test_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'symbol_names tensor must be of type string'):\n _ = test_op([test_circuit], [0], [[0]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'programs tensor must be of type string'):\n _ = test_op([0], ['rx'], [[0]], [test_pauli_sum])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'real-valued numeric tensor.'):\n _ = test_op([test_circuit], ['rx'], 'junk', [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx'], [[1, 1]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx', 'ry'], [[1]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'first dimension of symbol_values tensor'):\n _ = test_op([test_circuit, test_circuit], ['rx'], [[1]], [test_pauli_sum])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must be of type string.'):\n _ = test_op([test_circuit], ['rx'], [[1]], 0)\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must have the same batch shape'):\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum], [test_pauli_sum]])\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]])\n _ = test_op([test_circuit], [], [[]], [[test_pauli_sum]])\n<|end_body_1|>\n\n<|body_start_2|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n bits = cirq.GridQubit.rect(1, 5)\n test_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\n test_empty_circuit = serializer.serialize_circuit(cirq.Circuit()).SerializeToString()\n _ = test_op([test_empty_circuit], [], [[]], [[test_pauli_sum]])\n<|end_body_2|>\n\n<|body_start_3|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n empty_programs = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)\n empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n _ = test_op(empty_programs, [], empty_values, empty_paulis)\n<|end_body_3|>\n", "class_docstring": "Tests get_cirq_analytical_expectation.", "class_name": "CirqAnalyticalExpectationTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CirqAnalyticalExpectationTest:\n \"\"\"Tests get_cirq_analytical_expectation.\"\"\"\n\n def test_get_cirq_analytical_expectation_op(self):\n \"\"\"Input check the wrapper for the cirq analytical expectation op.\"\"\"\n <|body_0|>\n\n def test_cirq_analytical_expectation_op_inputs(self):\n \"\"\"Test input checking in the state sim op.\"\"\"\n <|body_1|>\n\n def test_analytic_expectation_empty_circuit(self):\n \"\"\"Test empty circuits\"\"\"\n <|body_2|>\n\n def test_analytic_expectation_no_circuit(self):\n \"\"\"Test empty tensors with no circuits at all.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with self.assertRaisesRegex(TypeError, 'cirq.sim.simulator.SimulatesExpectationValues.'):\n cirq_ops._get_cirq_analytical_expectation('junk')\n cirq_ops._get_cirq_analytical_expectation()\n cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n cirq_ops._get_cirq_analytical_expectation(cirq.DensityMatrixSimulator())\n<|end_body_0|>\n\n<|body_start_1|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n bits = cirq.GridQubit.rect(1, 5)\n test_circuit = serializer.serialize_circuit(cirq.testing.random_circuit(bits, MOMENT_DEPTH, 0.9)).SerializeToString()\n test_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'symbol_names tensor must be of type string'):\n _ = test_op([test_circuit], [0], [[0]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'programs tensor must be of type string'):\n _ = test_op([0], ['rx'], [[0]], [test_pauli_sum])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'real-valued numeric tensor.'):\n _ = test_op([test_circuit], ['rx'], 'junk', [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx'], [[1, 1]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx', 'ry'], [[1]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'first dimension of symbol_values tensor'):\n _ = test_op([test_circuit, test_circuit], ['rx'], [[1]], [test_pauli_sum])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must be of type string.'):\n _ = test_op([test_circuit], ['rx'], [[1]], 0)\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must have the same batch shape'):\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum], [test_pauli_sum]])\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]])\n _ = test_op([test_circuit], [], [[]], [[test_pauli_sum]])\n<|end_body_1|>\n\n<|body_start_2|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n bits = cirq.GridQubit.rect(1, 5)\n test_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\n test_empty_circuit = serializer.serialize_circuit(cirq.Circuit()).SerializeToString()\n _ = test_op([test_empty_circuit], [], [[]], [[test_pauli_sum]])\n<|end_body_2|>\n\n<|body_start_3|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n empty_programs = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)\n empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n _ = test_op(empty_programs, [], empty_values, empty_paulis)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000227", "length_bytes": 23553, "license_type": "permissive", "methods": [{"docstring": "Input check the wrapper for the cirq analytical expectation op.", "name": "test_get_cirq_analytical_expectation_op", "signature": "def test_get_cirq_analytical_expectation_op(self)"}, {"docstring": "Test input checking in the state sim op.", "name": "test_cirq_analytical_expectation_op_inputs", "signature": "def test_cirq_analytical_expectation_op_inputs(self)"}, {"docstring": "Test empty circuits", "name": "test_analytic_expectation_empty_circuit", "signature": "def test_analytic_expectation_empty_circuit(self)"}, {"docstring": "Test empty tensors with no circuits at all.", "name": "test_analytic_expectation_no_circuit", "signature": "def test_analytic_expectation_no_circuit(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_001443", "prompt": "Implement the Python class `CirqAnalyticalExpectationTest` described below.\n\nClass description:\nTests get_cirq_analytical_expectation.\n\nMethod signatures and docstrings:\n- def test_get_cirq_analytical_expectation_op(self): Input check the wrapper for the cirq analytical expectation op.\n- def test_cirq_analytical_expectation_op_inputs(self): Test input checking in the state sim op.\n- def test_analytic_expectation_empty_circuit(self): Test empty circuits\n- def test_analytic_expectation_no_circuit(self): Test empty tensors with no circuits at all.", "prompted_full_text": "Implement the Python class `CirqAnalyticalExpectationTest` described below.\n\nClass description:\nTests get_cirq_analytical_expectation.\n\nMethod signatures and docstrings:\n- def test_get_cirq_analytical_expectation_op(self): Input check the wrapper for the cirq analytical expectation op.\n- def test_cirq_analytical_expectation_op_inputs(self): Test input checking in the state sim op.\n- def test_analytic_expectation_empty_circuit(self): Test empty circuits\n- def test_analytic_expectation_no_circuit(self): Test empty tensors with no circuits at all.\n\n<|skeleton|>\nclass CirqAnalyticalExpectationTest:\n \"\"\"Tests get_cirq_analytical_expectation.\"\"\"\n\n def test_get_cirq_analytical_expectation_op(self):\n \"\"\"Input check the wrapper for the cirq analytical expectation op.\"\"\"\n <|body_0|>\n\n def test_cirq_analytical_expectation_op_inputs(self):\n \"\"\"Test input checking in the state sim op.\"\"\"\n <|body_1|>\n\n def test_analytic_expectation_empty_circuit(self):\n \"\"\"Test empty circuits\"\"\"\n <|body_2|>\n\n def test_analytic_expectation_no_circuit(self):\n \"\"\"Test empty tensors with no circuits at all.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with self.assertRaisesRegex(TypeError, 'cirq.sim.simulator.SimulatesExpectationValues.'):\n cirq_ops._get_cirq_analytical_expectation('junk')\n cirq_ops._get_cirq_analytical_expectation()\n cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n cirq_ops._get_cirq_analytical_expectation(cirq.DensityMatrixSimulator())\n<|end_body_0|>\n\n<|body_start_1|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n bits = cirq.GridQubit.rect(1, 5)\n test_circuit = serializer.serialize_circuit(cirq.testing.random_circuit(bits, MOMENT_DEPTH, 0.9)).SerializeToString()\n test_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'symbol_names tensor must be of type string'):\n _ = test_op([test_circuit], [0], [[0]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'programs tensor must be of type string'):\n _ = test_op([0], ['rx'], [[0]], [test_pauli_sum])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'real-valued numeric tensor.'):\n _ = test_op([test_circuit], ['rx'], 'junk', [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx'], [[1, 1]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx', 'ry'], [[1]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'first dimension of symbol_values tensor'):\n _ = test_op([test_circuit, test_circuit], ['rx'], [[1]], [test_pauli_sum])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must be of type string.'):\n _ = test_op([test_circuit], ['rx'], [[1]], 0)\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must have the same batch shape'):\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum], [test_pauli_sum]])\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]])\n _ = test_op([test_circuit], [], [[]], [[test_pauli_sum]])\n<|end_body_1|>\n\n<|body_start_2|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n bits = cirq.GridQubit.rect(1, 5)\n test_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\n test_empty_circuit = serializer.serialize_circuit(cirq.Circuit()).SerializeToString()\n _ = test_op([test_empty_circuit], [], [[]], [[test_pauli_sum]])\n<|end_body_2|>\n\n<|body_start_3|>\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n empty_programs = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)\n empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n _ = test_op(empty_programs, [], empty_values, empty_paulis)\n<|end_body_3|>\n", "revision_id": "f56257bceb988b743790e1e480eac76fd036d4ff", "skeleton": "<|skeleton|>\nclass CirqAnalyticalExpectationTest:\n \"\"\"Tests get_cirq_analytical_expectation.\"\"\"\n\n def test_get_cirq_analytical_expectation_op(self):\n \"\"\"Input check the wrapper for the cirq analytical expectation op.\"\"\"\n <|body_0|>\n\n def test_cirq_analytical_expectation_op_inputs(self):\n \"\"\"Test input checking in the state sim op.\"\"\"\n <|body_1|>\n\n def test_analytic_expectation_empty_circuit(self):\n \"\"\"Test empty circuits\"\"\"\n <|body_2|>\n\n def test_analytic_expectation_no_circuit(self):\n \"\"\"Test empty tensors with no circuits at all.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CirqAnalyticalExpectationTest:\n \"\"\"Tests get_cirq_analytical_expectation.\"\"\"\n\n def test_get_cirq_analytical_expectation_op(self):\n \"\"\"Input check the wrapper for the cirq analytical expectation op.\"\"\"\n with self.assertRaisesRegex(TypeError, 'cirq.sim.simulator.SimulatesExpectationValues.'):\n cirq_ops._get_cirq_analytical_expectation('junk')\n cirq_ops._get_cirq_analytical_expectation()\n cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n cirq_ops._get_cirq_analytical_expectation(cirq.DensityMatrixSimulator())\n\n def test_cirq_analytical_expectation_op_inputs(self):\n \"\"\"Test input checking in the state sim op.\"\"\"\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n bits = cirq.GridQubit.rect(1, 5)\n test_circuit = serializer.serialize_circuit(cirq.testing.random_circuit(bits, MOMENT_DEPTH, 0.9)).SerializeToString()\n test_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'symbol_names tensor must be of type string'):\n _ = test_op([test_circuit], [0], [[0]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'programs tensor must be of type string'):\n _ = test_op([0], ['rx'], [[0]], [test_pauli_sum])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'real-valued numeric tensor.'):\n _ = test_op([test_circuit], ['rx'], 'junk', [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx'], [[1, 1]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'size of symbol_names tensor must match'):\n _ = test_op([test_circuit], ['rx', 'ry'], [[1]], [[test_pauli_sum]])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'first dimension of symbol_values tensor'):\n _ = test_op([test_circuit, test_circuit], ['rx'], [[1]], [test_pauli_sum])\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must be of type string.'):\n _ = test_op([test_circuit], ['rx'], [[1]], 0)\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'pauli_sums tensor must have the same batch shape'):\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum], [test_pauli_sum]])\n _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]])\n _ = test_op([test_circuit], [], [[]], [[test_pauli_sum]])\n\n def test_analytic_expectation_empty_circuit(self):\n \"\"\"Test empty circuits\"\"\"\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n bits = cirq.GridQubit.rect(1, 5)\n test_pauli_sum = serializer.serialize_paulisum(cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0])])).SerializeToString()\n test_empty_circuit = serializer.serialize_circuit(cirq.Circuit()).SerializeToString()\n _ = test_op([test_empty_circuit], [], [[]], [[test_pauli_sum]])\n\n def test_analytic_expectation_no_circuit(self):\n \"\"\"Test empty tensors with no circuits at all.\"\"\"\n test_op = cirq_ops._get_cirq_analytical_expectation(cirq.Simulator())\n empty_programs = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)\n empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n _ = test_op(empty_programs, [], empty_values, empty_paulis)\n", "source": "the_stack_v2_python_sparse", "source_path": "tensorflow_quantum/core/ops/cirq_ops_test.py", "source_repo": "tensorflow/quantum", "split": "val", "star_events_count": 1799}
{"blob_id": "4aa213b2f92d16aef200db1ca853977c7304eb47", "bodies": ["ctx.save_for_backward(input, indices)\nop, ip, o, h, w = input.size()\no, h, w, r = indices.size()\noutput = input.new_zeros((op * r, ip * o, h, w))\next_module.active_rotated_filter_forward(input, indices, output)\nreturn output", "input, indices = ctx.saved_tensors\ngrad_in = torch.zeros_like(input)\next_module.active_rotated_filter_backward(grad_out, indices, grad_in)\nreturn (grad_in, None)"], "bodies_text": "<|body_start_0|>\n ctx.save_for_backward(input, indices)\n op, ip, o, h, w = input.size()\n o, h, w, r = indices.size()\n output = input.new_zeros((op * r, ip * o, h, w))\n ext_module.active_rotated_filter_forward(input, indices, output)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n input, indices = ctx.saved_tensors\n grad_in = torch.zeros_like(input)\n ext_module.active_rotated_filter_backward(grad_out, indices, grad_in)\n return (grad_in, None)\n<|end_body_1|>\n", "class_docstring": "Encoding the orientation information and generating orientation- sensitive features. The details are described in the paper `Align Deep Features for Oriented Object Detection _`.", "class_name": "ActiveRotatedFilterFunction", "detected_licenses": ["Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ActiveRotatedFilterFunction:\n \"\"\"Encoding the orientation information and generating orientation- sensitive features. The details are described in the paper `Align Deep Features for Oriented Object Detection _`.\"\"\"\n\n def forward(ctx, input: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:\n \"\"\"Args: input (torch.Tensor): Input features with shape [num_output_planes, num_input_planes, num_orientations, H, W]. indices (torch.Tensor): Indices with shape [num_orientations, H, W, num_rotations]. Returns: torch.Tensor: Refined features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W].\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]:\n \"\"\"Args: grad_output (torch.Tensor): The gradiant of output features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W]. Returns: torch.Tensor: The gradiant of input features with shape [num_output_planes, num_input_planes, num_orientations, H, W].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ctx.save_for_backward(input, indices)\n op, ip, o, h, w = input.size()\n o, h, w, r = indices.size()\n output = input.new_zeros((op * r, ip * o, h, w))\n ext_module.active_rotated_filter_forward(input, indices, output)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n input, indices = ctx.saved_tensors\n grad_in = torch.zeros_like(input)\n ext_module.active_rotated_filter_backward(grad_out, indices, grad_in)\n return (grad_in, None)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000228", "length_bytes": 5626, "license_type": "permissive", "methods": [{"docstring": "Args: input (torch.Tensor): Input features with shape [num_output_planes, num_input_planes, num_orientations, H, W]. indices (torch.Tensor): Indices with shape [num_orientations, H, W, num_rotations]. Returns: torch.Tensor: Refined features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W].", "name": "forward", "signature": "def forward(ctx, input: torch.Tensor, indices: torch.Tensor) -> torch.Tensor"}, {"docstring": "Args: grad_output (torch.Tensor): The gradiant of output features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W]. Returns: torch.Tensor: The gradiant of input features with shape [num_output_planes, num_input_planes, num_orientations, H, W].", "name": "backward", "signature": "def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_038974", "prompt": "Implement the Python class `ActiveRotatedFilterFunction` described below.\n\nClass description:\nEncoding the orientation information and generating orientation- sensitive features. The details are described in the paper `Align Deep Features for Oriented Object Detection _`.\n\nMethod signatures and docstrings:\n- def forward(ctx, input: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: Args: input (torch.Tensor): Input features with shape [num_output_planes, num_input_planes, num_orientations, H, W]. indices (torch.Tensor): Indices with shape [num_orientations, H, W, num_rotations]. Returns: torch.Tensor: Refined features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W].\n- def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]: Args: grad_output (torch.Tensor): The gradiant of output features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W]. Returns: torch.Tensor: The gradiant of input features with shape [num_output_planes, num_input_planes, num_orientations, H, W].", "prompted_full_text": "Implement the Python class `ActiveRotatedFilterFunction` described below.\n\nClass description:\nEncoding the orientation information and generating orientation- sensitive features. The details are described in the paper `Align Deep Features for Oriented Object Detection _`.\n\nMethod signatures and docstrings:\n- def forward(ctx, input: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: Args: input (torch.Tensor): Input features with shape [num_output_planes, num_input_planes, num_orientations, H, W]. indices (torch.Tensor): Indices with shape [num_orientations, H, W, num_rotations]. Returns: torch.Tensor: Refined features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W].\n- def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]: Args: grad_output (torch.Tensor): The gradiant of output features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W]. Returns: torch.Tensor: The gradiant of input features with shape [num_output_planes, num_input_planes, num_orientations, H, W].\n\n<|skeleton|>\nclass ActiveRotatedFilterFunction:\n \"\"\"Encoding the orientation information and generating orientation- sensitive features. The details are described in the paper `Align Deep Features for Oriented Object Detection _`.\"\"\"\n\n def forward(ctx, input: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:\n \"\"\"Args: input (torch.Tensor): Input features with shape [num_output_planes, num_input_planes, num_orientations, H, W]. indices (torch.Tensor): Indices with shape [num_orientations, H, W, num_rotations]. Returns: torch.Tensor: Refined features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W].\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]:\n \"\"\"Args: grad_output (torch.Tensor): The gradiant of output features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W]. Returns: torch.Tensor: The gradiant of input features with shape [num_output_planes, num_input_planes, num_orientations, H, W].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ctx.save_for_backward(input, indices)\n op, ip, o, h, w = input.size()\n o, h, w, r = indices.size()\n output = input.new_zeros((op * r, ip * o, h, w))\n ext_module.active_rotated_filter_forward(input, indices, output)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n input, indices = ctx.saved_tensors\n grad_in = torch.zeros_like(input)\n ext_module.active_rotated_filter_backward(grad_out, indices, grad_in)\n return (grad_in, None)\n<|end_body_1|>\n", "revision_id": "92acc188d3a0f634de58463b6676e70df83ef808", "skeleton": "<|skeleton|>\nclass ActiveRotatedFilterFunction:\n \"\"\"Encoding the orientation information and generating orientation- sensitive features. The details are described in the paper `Align Deep Features for Oriented Object Detection _`.\"\"\"\n\n def forward(ctx, input: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:\n \"\"\"Args: input (torch.Tensor): Input features with shape [num_output_planes, num_input_planes, num_orientations, H, W]. indices (torch.Tensor): Indices with shape [num_orientations, H, W, num_rotations]. Returns: torch.Tensor: Refined features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W].\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]:\n \"\"\"Args: grad_output (torch.Tensor): The gradiant of output features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W]. Returns: torch.Tensor: The gradiant of input features with shape [num_output_planes, num_input_planes, num_orientations, H, W].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ActiveRotatedFilterFunction:\n \"\"\"Encoding the orientation information and generating orientation- sensitive features. The details are described in the paper `Align Deep Features for Oriented Object Detection _`.\"\"\"\n\n def forward(ctx, input: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:\n \"\"\"Args: input (torch.Tensor): Input features with shape [num_output_planes, num_input_planes, num_orientations, H, W]. indices (torch.Tensor): Indices with shape [num_orientations, H, W, num_rotations]. Returns: torch.Tensor: Refined features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W].\"\"\"\n ctx.save_for_backward(input, indices)\n op, ip, o, h, w = input.size()\n o, h, w, r = indices.size()\n output = input.new_zeros((op * r, ip * o, h, w))\n ext_module.active_rotated_filter_forward(input, indices, output)\n return output\n\n def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, None]:\n \"\"\"Args: grad_output (torch.Tensor): The gradiant of output features with shape [num_output_planes * num_rotations, num_input_planes * num_orientations, H, W]. Returns: torch.Tensor: The gradiant of input features with shape [num_output_planes, num_input_planes, num_orientations, H, W].\"\"\"\n input, indices = ctx.saved_tensors\n grad_in = torch.zeros_like(input)\n ext_module.active_rotated_filter_backward(grad_out, indices, grad_in)\n return (grad_in, None)\n", "source": "the_stack_v2_python_sparse", "source_path": "PyTorch/contrib/cv/semantic_segmentation/MMseg-swin/mmcv/mmcv/ops/active_rotated_filter.py", "source_repo": "Ascend/ModelZoo-PyTorch", "split": "val", "star_events_count": 23}
{"blob_id": "01e0f349625578d8179fc04c3624cb23bf02d8de", "bodies": ["selected_counselor_email = request.data.get('counselor')\ncounselor = User.objects.get(email=selected_counselor_email)\ncontent = request.data.get('content')\nreview = Review(client=request.user, counselor=counselor, create_date=datetime.now(), content=content)\nserializer = ReviewSerializer(review)\nif serializer.is_valid:\n review.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\nreturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "try:\n selected_review = Review.objects.get(id=request.data.get('review_id'))\nexcept:\n return Response('Review not exist', status=status.HTTP_400_BAD_REQUEST)\ncontent = request.data.get('content')\nselected_review.content = content\ntry:\n selected_review.save()\nexcept:\n return Response('Review updated failed', status=status.HTTP_400_BAD_REQUEST)\nreturn Response('Review updated success', status=status.HTTP_201_CREATED)", "if kwargs.get('id') is None:\n return Response('invalid request', status=status.HTTP_400_BAD_REQUEST)\nelse:\n review_id = kwargs.get('id')\n try:\n review_obj = Review.objects.get(id=review_id)\n review_obj.delete()\n return Response('Review was deleted', status=status.HTTP_200_OK)\n except:\n return Response('Review not Found', status=status.HTTP_400_BAD_REQUEST)"], "bodies_text": "<|body_start_0|>\n selected_counselor_email = request.data.get('counselor')\n counselor = User.objects.get(email=selected_counselor_email)\n content = request.data.get('content')\n review = Review(client=request.user, counselor=counselor, create_date=datetime.now(), content=content)\n serializer = ReviewSerializer(review)\n if serializer.is_valid:\n review.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n selected_review = Review.objects.get(id=request.data.get('review_id'))\n except:\n return Response('Review not exist', status=status.HTTP_400_BAD_REQUEST)\n content = request.data.get('content')\n selected_review.content = content\n try:\n selected_review.save()\n except:\n return Response('Review updated failed', status=status.HTTP_400_BAD_REQUEST)\n return Response('Review updated success', status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n if kwargs.get('id') is None:\n return Response('invalid request', status=status.HTTP_400_BAD_REQUEST)\n else:\n review_id = kwargs.get('id')\n try:\n review_obj = Review.objects.get(id=review_id)\n review_obj.delete()\n return Response('Review was deleted', status=status.HTTP_200_OK)\n except:\n return Response('Review not Found', status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Review_upload", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Review_upload:\n\n def post(self, request):\n \"\"\"리뷰 생성 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - counselor : 상담사 user - client : 내담자 user - content : 후기 내용\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"리뷰 수정 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - review_id : 후기 id 값 - content : 후기 내용\"\"\"\n <|body_1|>\n\n def delete(self, request, **kwargs):\n \"\"\"리뷰 삭제 --- # /reviews// ## headers - Authorization : Token \"key 값\" [ex> Token 822a24a314dfbc387128d82af6b952191dd71651]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n selected_counselor_email = request.data.get('counselor')\n counselor = User.objects.get(email=selected_counselor_email)\n content = request.data.get('content')\n review = Review(client=request.user, counselor=counselor, create_date=datetime.now(), content=content)\n serializer = ReviewSerializer(review)\n if serializer.is_valid:\n review.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n selected_review = Review.objects.get(id=request.data.get('review_id'))\n except:\n return Response('Review not exist', status=status.HTTP_400_BAD_REQUEST)\n content = request.data.get('content')\n selected_review.content = content\n try:\n selected_review.save()\n except:\n return Response('Review updated failed', status=status.HTTP_400_BAD_REQUEST)\n return Response('Review updated success', status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n if kwargs.get('id') is None:\n return Response('invalid request', status=status.HTTP_400_BAD_REQUEST)\n else:\n review_id = kwargs.get('id')\n try:\n review_obj = Review.objects.get(id=review_id)\n review_obj.delete()\n return Response('Review was deleted', status=status.HTTP_200_OK)\n except:\n return Response('Review not Found', status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000229", "length_bytes": 4323, "license_type": "no_license", "methods": [{"docstring": "리뷰 생성 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - counselor : 상담사 user - client : 내담자 user - content : 후기 내용", "name": "post", "signature": "def post(self, request)"}, {"docstring": "리뷰 수정 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - review_id : 후기 id 값 - content : 후기 내용", "name": "put", "signature": "def put(self, request)"}, {"docstring": "리뷰 삭제 --- # /reviews// ## headers - Authorization : Token \"key 값\" [ex> Token 822a24a314dfbc387128d82af6b952191dd71651]", "name": "delete", "signature": "def delete(self, request, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000222", "prompt": "Implement the Python class `Review_upload` described below.\n\nClass description:\nImplement the Review_upload class.\n\nMethod signatures and docstrings:\n- def post(self, request): 리뷰 생성 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - counselor : 상담사 user - client : 내담자 user - content : 후기 내용\n- def put(self, request): 리뷰 수정 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - review_id : 후기 id 값 - content : 후기 내용\n- def delete(self, request, **kwargs): 리뷰 삭제 --- # /reviews// ## headers - Authorization : Token \"key 값\" [ex> Token 822a24a314dfbc387128d82af6b952191dd71651]", "prompted_full_text": "Implement the Python class `Review_upload` described below.\n\nClass description:\nImplement the Review_upload class.\n\nMethod signatures and docstrings:\n- def post(self, request): 리뷰 생성 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - counselor : 상담사 user - client : 내담자 user - content : 후기 내용\n- def put(self, request): 리뷰 수정 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - review_id : 후기 id 값 - content : 후기 내용\n- def delete(self, request, **kwargs): 리뷰 삭제 --- # /reviews// ## headers - Authorization : Token \"key 값\" [ex> Token 822a24a314dfbc387128d82af6b952191dd71651]\n\n<|skeleton|>\nclass Review_upload:\n\n def post(self, request):\n \"\"\"리뷰 생성 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - counselor : 상담사 user - client : 내담자 user - content : 후기 내용\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"리뷰 수정 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - review_id : 후기 id 값 - content : 후기 내용\"\"\"\n <|body_1|>\n\n def delete(self, request, **kwargs):\n \"\"\"리뷰 삭제 --- # /reviews// ## headers - Authorization : Token \"key 값\" [ex> Token 822a24a314dfbc387128d82af6b952191dd71651]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n selected_counselor_email = request.data.get('counselor')\n counselor = User.objects.get(email=selected_counselor_email)\n content = request.data.get('content')\n review = Review(client=request.user, counselor=counselor, create_date=datetime.now(), content=content)\n serializer = ReviewSerializer(review)\n if serializer.is_valid:\n review.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n selected_review = Review.objects.get(id=request.data.get('review_id'))\n except:\n return Response('Review not exist', status=status.HTTP_400_BAD_REQUEST)\n content = request.data.get('content')\n selected_review.content = content\n try:\n selected_review.save()\n except:\n return Response('Review updated failed', status=status.HTTP_400_BAD_REQUEST)\n return Response('Review updated success', status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n if kwargs.get('id') is None:\n return Response('invalid request', status=status.HTTP_400_BAD_REQUEST)\n else:\n review_id = kwargs.get('id')\n try:\n review_obj = Review.objects.get(id=review_id)\n review_obj.delete()\n return Response('Review was deleted', status=status.HTTP_200_OK)\n except:\n return Response('Review not Found', status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n", "revision_id": "751f3a8d7ef139c5d6fa17bcfe59fd05fbe3818c", "skeleton": "<|skeleton|>\nclass Review_upload:\n\n def post(self, request):\n \"\"\"리뷰 생성 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - counselor : 상담사 user - client : 내담자 user - content : 후기 내용\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"리뷰 수정 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - review_id : 후기 id 값 - content : 후기 내용\"\"\"\n <|body_1|>\n\n def delete(self, request, **kwargs):\n \"\"\"리뷰 삭제 --- # /reviews// ## headers - Authorization : Token \"key 값\" [ex> Token 822a24a314dfbc387128d82af6b952191dd71651]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Review_upload:\n def post(self, request):\n \"\"\"리뷰 생성 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - counselor : 상담사 user - client : 내담자 user - content : 후기 내용\"\"\"\n selected_counselor_email = request.data.get('counselor')\n counselor = User.objects.get(email=selected_counselor_email)\n content = request.data.get('content')\n review = Review(client=request.user, counselor=counselor, create_date=datetime.now(), content=content)\n serializer = ReviewSerializer(review)\n if serializer.is_valid:\n review.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def put(self, request):\n \"\"\"리뷰 수정 --- # /reviews/ ## headers - Authorization : Token \"key 값\" ## body parameter - review_id : 후기 id 값 - content : 후기 내용\"\"\"\n try:\n selected_review = Review.objects.get(id=request.data.get('review_id'))\n except:\n return Response('Review not exist', status=status.HTTP_400_BAD_REQUEST)\n content = request.data.get('content')\n selected_review.content = content\n try:\n selected_review.save()\n except:\n return Response('Review updated failed', status=status.HTTP_400_BAD_REQUEST)\n return Response('Review updated success', status=status.HTTP_201_CREATED)\n\n def delete(self, request, **kwargs):\n \"\"\"리뷰 삭제 --- # /reviews// ## headers - Authorization : Token \"key 값\" [ex> Token 822a24a314dfbc387128d82af6b952191dd71651]\"\"\"\n if kwargs.get('id') is None:\n return Response('invalid request', status=status.HTTP_400_BAD_REQUEST)\n else:\n review_id = kwargs.get('id')\n try:\n review_obj = Review.objects.get(id=review_id)\n review_obj.delete()\n return Response('Review was deleted', status=status.HTTP_200_OK)\n except:\n return Response('Review not Found', status=status.HTTP_400_BAD_REQUEST)\n", "source": "the_stack_v2_python_sparse", "source_path": "findme/review/views.py", "source_repo": "real-kk/findme-backend", "split": "val", "star_events_count": 4}
{"blob_id": "eb5f7fbe47d6475aa218edec5072073087264c1c", "bodies": ["data_frame = self.result.pd_frame()\ncategories = set(['pedestrian', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'OVERALL'])\nself.assertSetEqual(categories, set(data_frame.index.values))\ndata_arr = data_frame.to_numpy()\nAPs = np.array([100.0, 90.0, 15.81683168, -1.0, -1.0, -1.0, -1.0, -1.0, 68.60561056])\nself.assertTrue(np.isclose(np.nan_to_num(data_arr[:, 0], nan=-1.0), APs).all())\noverall_scores = np.array([68.60561056, 89.68646865, 66.66666667, 68.60561056, 70.92409241, 70.92409241, 65.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333])\nself.assertTrue(np.isclose(np.nan_to_num(data_arr[-1], nan=-1.0), overall_scores).all())", "summary = self.result.summary()\noverall_reference = {'AP/pedestrian': 99.99999999999997, 'AP/rider': 89.99999999999999, 'AP/car': 15.816831683168317, 'AP/truck': -1.0, 'AP/bus': -1.0, 'AP/train': -1.0, 'AP/motorcycle': -1.0, 'AP/bicycle': -1.0, 'AP': 68.60561056105611, 'AP50': 89.68646864686468, 'AP75': 66.66666666666666, 'APs': 68.60561056105611, 'APm': 70.92409240924093, 'APl': 70.92409240924093, 'AR1': 65.83333333333333, 'AR10': 70.83333333333334, 'AR100': 70.83333333333334, 'ARs': 70.83333333333334, 'ARm': 70.83333333333334, 'ARl': 70.83333333333334}\nself.assertSetEqual(set(summary.keys()), set(overall_reference.keys()))\nfor name, score in summary.items():\n if np.isnan(score):\n score = np.nan_to_num(score, nan=-1.0)\n self.assertAlmostEqual(score, overall_reference[name])"], "bodies_text": "<|body_start_0|>\n data_frame = self.result.pd_frame()\n categories = set(['pedestrian', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'OVERALL'])\n self.assertSetEqual(categories, set(data_frame.index.values))\n data_arr = data_frame.to_numpy()\n APs = np.array([100.0, 90.0, 15.81683168, -1.0, -1.0, -1.0, -1.0, -1.0, 68.60561056])\n self.assertTrue(np.isclose(np.nan_to_num(data_arr[:, 0], nan=-1.0), APs).all())\n overall_scores = np.array([68.60561056, 89.68646865, 66.66666667, 68.60561056, 70.92409241, 70.92409241, 65.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333])\n self.assertTrue(np.isclose(np.nan_to_num(data_arr[-1], nan=-1.0), overall_scores).all())\n<|end_body_0|>\n\n<|body_start_1|>\n summary = self.result.summary()\n overall_reference = {'AP/pedestrian': 99.99999999999997, 'AP/rider': 89.99999999999999, 'AP/car': 15.816831683168317, 'AP/truck': -1.0, 'AP/bus': -1.0, 'AP/train': -1.0, 'AP/motorcycle': -1.0, 'AP/bicycle': -1.0, 'AP': 68.60561056105611, 'AP50': 89.68646864686468, 'AP75': 66.66666666666666, 'APs': 68.60561056105611, 'APm': 70.92409240924093, 'APl': 70.92409240924093, 'AR1': 65.83333333333333, 'AR10': 70.83333333333334, 'AR100': 70.83333333333334, 'ARs': 70.83333333333334, 'ARm': 70.83333333333334, 'ARl': 70.83333333333334}\n self.assertSetEqual(set(summary.keys()), set(overall_reference.keys()))\n for name, score in summary.items():\n if np.isnan(score):\n score = np.nan_to_num(score, nan=-1.0)\n self.assertAlmostEqual(score, overall_reference[name])\n<|end_body_1|>\n", "class_docstring": "Test cases for BDD100K detection evaluation.", "class_name": "TestBDD100KInsSegEval", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestBDD100KInsSegEval:\n \"\"\"Test cases for BDD100K detection evaluation.\"\"\"\n\n def test_frame(self) -> None:\n \"\"\"Test case for the function frame().\"\"\"\n <|body_0|>\n\n def test_summary(self) -> None:\n \"\"\"Check evaluation scores' correctness.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data_frame = self.result.pd_frame()\n categories = set(['pedestrian', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'OVERALL'])\n self.assertSetEqual(categories, set(data_frame.index.values))\n data_arr = data_frame.to_numpy()\n APs = np.array([100.0, 90.0, 15.81683168, -1.0, -1.0, -1.0, -1.0, -1.0, 68.60561056])\n self.assertTrue(np.isclose(np.nan_to_num(data_arr[:, 0], nan=-1.0), APs).all())\n overall_scores = np.array([68.60561056, 89.68646865, 66.66666667, 68.60561056, 70.92409241, 70.92409241, 65.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333])\n self.assertTrue(np.isclose(np.nan_to_num(data_arr[-1], nan=-1.0), overall_scores).all())\n<|end_body_0|>\n\n<|body_start_1|>\n summary = self.result.summary()\n overall_reference = {'AP/pedestrian': 99.99999999999997, 'AP/rider': 89.99999999999999, 'AP/car': 15.816831683168317, 'AP/truck': -1.0, 'AP/bus': -1.0, 'AP/train': -1.0, 'AP/motorcycle': -1.0, 'AP/bicycle': -1.0, 'AP': 68.60561056105611, 'AP50': 89.68646864686468, 'AP75': 66.66666666666666, 'APs': 68.60561056105611, 'APm': 70.92409240924093, 'APl': 70.92409240924093, 'AR1': 65.83333333333333, 'AR10': 70.83333333333334, 'AR100': 70.83333333333334, 'ARs': 70.83333333333334, 'ARm': 70.83333333333334, 'ARl': 70.83333333333334}\n self.assertSetEqual(set(summary.keys()), set(overall_reference.keys()))\n for name, score in summary.items():\n if np.isnan(score):\n score = np.nan_to_num(score, nan=-1.0)\n self.assertAlmostEqual(score, overall_reference[name])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000230", "length_bytes": 5629, "license_type": "permissive", "methods": [{"docstring": "Test case for the function frame().", "name": "test_frame", "signature": "def test_frame(self) -> None"}, {"docstring": "Check evaluation scores' correctness.", "name": "test_summary", "signature": "def test_summary(self) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001243", "prompt": "Implement the Python class `TestBDD100KInsSegEval` described below.\n\nClass description:\nTest cases for BDD100K detection evaluation.\n\nMethod signatures and docstrings:\n- def test_frame(self) -> None: Test case for the function frame().\n- def test_summary(self) -> None: Check evaluation scores' correctness.", "prompted_full_text": "Implement the Python class `TestBDD100KInsSegEval` described below.\n\nClass description:\nTest cases for BDD100K detection evaluation.\n\nMethod signatures and docstrings:\n- def test_frame(self) -> None: Test case for the function frame().\n- def test_summary(self) -> None: Check evaluation scores' correctness.\n\n<|skeleton|>\nclass TestBDD100KInsSegEval:\n \"\"\"Test cases for BDD100K detection evaluation.\"\"\"\n\n def test_frame(self) -> None:\n \"\"\"Test case for the function frame().\"\"\"\n <|body_0|>\n\n def test_summary(self) -> None:\n \"\"\"Check evaluation scores' correctness.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data_frame = self.result.pd_frame()\n categories = set(['pedestrian', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'OVERALL'])\n self.assertSetEqual(categories, set(data_frame.index.values))\n data_arr = data_frame.to_numpy()\n APs = np.array([100.0, 90.0, 15.81683168, -1.0, -1.0, -1.0, -1.0, -1.0, 68.60561056])\n self.assertTrue(np.isclose(np.nan_to_num(data_arr[:, 0], nan=-1.0), APs).all())\n overall_scores = np.array([68.60561056, 89.68646865, 66.66666667, 68.60561056, 70.92409241, 70.92409241, 65.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333])\n self.assertTrue(np.isclose(np.nan_to_num(data_arr[-1], nan=-1.0), overall_scores).all())\n<|end_body_0|>\n\n<|body_start_1|>\n summary = self.result.summary()\n overall_reference = {'AP/pedestrian': 99.99999999999997, 'AP/rider': 89.99999999999999, 'AP/car': 15.816831683168317, 'AP/truck': -1.0, 'AP/bus': -1.0, 'AP/train': -1.0, 'AP/motorcycle': -1.0, 'AP/bicycle': -1.0, 'AP': 68.60561056105611, 'AP50': 89.68646864686468, 'AP75': 66.66666666666666, 'APs': 68.60561056105611, 'APm': 70.92409240924093, 'APl': 70.92409240924093, 'AR1': 65.83333333333333, 'AR10': 70.83333333333334, 'AR100': 70.83333333333334, 'ARs': 70.83333333333334, 'ARm': 70.83333333333334, 'ARl': 70.83333333333334}\n self.assertSetEqual(set(summary.keys()), set(overall_reference.keys()))\n for name, score in summary.items():\n if np.isnan(score):\n score = np.nan_to_num(score, nan=-1.0)\n self.assertAlmostEqual(score, overall_reference[name])\n<|end_body_1|>\n", "revision_id": "a4bfa9dc0c79abe90b2c06d20e84b79fbd9f2e38", "skeleton": "<|skeleton|>\nclass TestBDD100KInsSegEval:\n \"\"\"Test cases for BDD100K detection evaluation.\"\"\"\n\n def test_frame(self) -> None:\n \"\"\"Test case for the function frame().\"\"\"\n <|body_0|>\n\n def test_summary(self) -> None:\n \"\"\"Check evaluation scores' correctness.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestBDD100KInsSegEval:\n \"\"\"Test cases for BDD100K detection evaluation.\"\"\"\n\n def test_frame(self) -> None:\n \"\"\"Test case for the function frame().\"\"\"\n data_frame = self.result.pd_frame()\n categories = set(['pedestrian', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'OVERALL'])\n self.assertSetEqual(categories, set(data_frame.index.values))\n data_arr = data_frame.to_numpy()\n APs = np.array([100.0, 90.0, 15.81683168, -1.0, -1.0, -1.0, -1.0, -1.0, 68.60561056])\n self.assertTrue(np.isclose(np.nan_to_num(data_arr[:, 0], nan=-1.0), APs).all())\n overall_scores = np.array([68.60561056, 89.68646865, 66.66666667, 68.60561056, 70.92409241, 70.92409241, 65.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333, 70.83333333])\n self.assertTrue(np.isclose(np.nan_to_num(data_arr[-1], nan=-1.0), overall_scores).all())\n\n def test_summary(self) -> None:\n \"\"\"Check evaluation scores' correctness.\"\"\"\n summary = self.result.summary()\n overall_reference = {'AP/pedestrian': 99.99999999999997, 'AP/rider': 89.99999999999999, 'AP/car': 15.816831683168317, 'AP/truck': -1.0, 'AP/bus': -1.0, 'AP/train': -1.0, 'AP/motorcycle': -1.0, 'AP/bicycle': -1.0, 'AP': 68.60561056105611, 'AP50': 89.68646864686468, 'AP75': 66.66666666666666, 'APs': 68.60561056105611, 'APm': 70.92409240924093, 'APl': 70.92409240924093, 'AR1': 65.83333333333333, 'AR10': 70.83333333333334, 'AR100': 70.83333333333334, 'ARs': 70.83333333333334, 'ARm': 70.83333333333334, 'ARl': 70.83333333333334}\n self.assertSetEqual(set(summary.keys()), set(overall_reference.keys()))\n for name, score in summary.items():\n if np.isnan(score):\n score = np.nan_to_num(score, nan=-1.0)\n self.assertAlmostEqual(score, overall_reference[name])\n", "source": "the_stack_v2_python_sparse", "source_path": "bdd100k/eval/ins_seg_test.py", "source_repo": "navcul3108/bdd100k", "split": "val", "star_events_count": 0}
{"blob_id": "e87b4df3c0f50e1ea52af2f404ba45c786bbfb51", "bodies": ["s = set()\nfor p in points:\n s.add((p[0], p[1]))\n\ndef ok(p1, p2, p3):\n row, col = (False, False)\n if p1[0] == p2[0] or p1[0] == p3[0] or p2[0] == p3[0]:\n row = True\n if p1[1] == p2[1] or p1[1] == p3[1] or p2[1] == p3[1]:\n col = True\n return row and col\nans = 40000 * 40001\nfor i in range(len(points)):\n for j in range(i + 1, len(points)):\n for k in range(j + 1, len(points)):\n if ok(points[i], points[j], points[k]) and (points[i][0] ^ points[j][0] ^ points[k][0], points[i][1] ^ points[j][1] ^ points[k][1]) in s:\n r1, r2, r3 = (points[i][0], points[j][0], points[k][0])\n c1, c2, c3 = (points[i][1], points[j][1], points[k][1])\n ans = min(ans, (max(r1, r2, r3) - min(r1, r2, r3)) * (max(c1, c2, c3) - min(c1, c2, c3)))\nreturn 0 if ans == 40000 * 40001 else ans", "points = sorted(points, key=lambda x: x[0])\ns = set()\nfor p in points:\n s.add((p[0], p[1]))\nans = 1600000001\nfor i in range(len(points)):\n for j in range(i + 1, len(points)):\n r1, c1, r2, c2 = (points[i][0], points[i][1], points[j][0], points[j][1])\n if points[i][0] == points[j][0] or points[i][1] == points[j][1] or (r2 - r1) * abs(c1 - c2) > ans:\n continue\n if (r1, c2) in s and (r2, c1) in s:\n ans = (r2 - r1) * abs(c1 - c2)\nreturn 0 if ans == 1600000001 else ans"], "bodies_text": "<|body_start_0|>\n s = set()\n for p in points:\n s.add((p[0], p[1]))\n\n def ok(p1, p2, p3):\n row, col = (False, False)\n if p1[0] == p2[0] or p1[0] == p3[0] or p2[0] == p3[0]:\n row = True\n if p1[1] == p2[1] or p1[1] == p3[1] or p2[1] == p3[1]:\n col = True\n return row and col\n ans = 40000 * 40001\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n for k in range(j + 1, len(points)):\n if ok(points[i], points[j], points[k]) and (points[i][0] ^ points[j][0] ^ points[k][0], points[i][1] ^ points[j][1] ^ points[k][1]) in s:\n r1, r2, r3 = (points[i][0], points[j][0], points[k][0])\n c1, c2, c3 = (points[i][1], points[j][1], points[k][1])\n ans = min(ans, (max(r1, r2, r3) - min(r1, r2, r3)) * (max(c1, c2, c3) - min(c1, c2, c3)))\n return 0 if ans == 40000 * 40001 else ans\n<|end_body_0|>\n\n<|body_start_1|>\n points = sorted(points, key=lambda x: x[0])\n s = set()\n for p in points:\n s.add((p[0], p[1]))\n ans = 1600000001\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n r1, c1, r2, c2 = (points[i][0], points[i][1], points[j][0], points[j][1])\n if points[i][0] == points[j][0] or points[i][1] == points[j][1] or (r2 - r1) * abs(c1 - c2) > ans:\n continue\n if (r1, c2) in s and (r2, c1) in s:\n ans = (r2 - r1) * abs(c1 - c2)\n return 0 if ans == 1600000001 else ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minAreaRect(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def minAreaRect2(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s = set()\n for p in points:\n s.add((p[0], p[1]))\n\n def ok(p1, p2, p3):\n row, col = (False, False)\n if p1[0] == p2[0] or p1[0] == p3[0] or p2[0] == p3[0]:\n row = True\n if p1[1] == p2[1] or p1[1] == p3[1] or p2[1] == p3[1]:\n col = True\n return row and col\n ans = 40000 * 40001\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n for k in range(j + 1, len(points)):\n if ok(points[i], points[j], points[k]) and (points[i][0] ^ points[j][0] ^ points[k][0], points[i][1] ^ points[j][1] ^ points[k][1]) in s:\n r1, r2, r3 = (points[i][0], points[j][0], points[k][0])\n c1, c2, c3 = (points[i][1], points[j][1], points[k][1])\n ans = min(ans, (max(r1, r2, r3) - min(r1, r2, r3)) * (max(c1, c2, c3) - min(c1, c2, c3)))\n return 0 if ans == 40000 * 40001 else ans\n<|end_body_0|>\n\n<|body_start_1|>\n points = sorted(points, key=lambda x: x[0])\n s = set()\n for p in points:\n s.add((p[0], p[1]))\n ans = 1600000001\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n r1, c1, r2, c2 = (points[i][0], points[i][1], points[j][0], points[j][1])\n if points[i][0] == points[j][0] or points[i][1] == points[j][1] or (r2 - r1) * abs(c1 - c2) > ans:\n continue\n if (r1, c2) in s and (r2, c1) in s:\n ans = (r2 - r1) * abs(c1 - c2)\n return 0 if ans == 1600000001 else ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000231", "length_bytes": 10493, "license_type": "no_license", "methods": [{"docstring": ":type points: List[List[int]] :rtype: int", "name": "minAreaRect", "signature": "def minAreaRect(self, points)"}, {"docstring": ":type points: List[List[int]] :rtype: int", "name": "minAreaRect2", "signature": "def minAreaRect2(self, points)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049856", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minAreaRect(self, points): :type points: List[List[int]] :rtype: int\n- def minAreaRect2(self, points): :type points: List[List[int]] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minAreaRect(self, points): :type points: List[List[int]] :rtype: int\n- def minAreaRect2(self, points): :type points: List[List[int]] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def minAreaRect(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def minAreaRect2(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s = set()\n for p in points:\n s.add((p[0], p[1]))\n\n def ok(p1, p2, p3):\n row, col = (False, False)\n if p1[0] == p2[0] or p1[0] == p3[0] or p2[0] == p3[0]:\n row = True\n if p1[1] == p2[1] or p1[1] == p3[1] or p2[1] == p3[1]:\n col = True\n return row and col\n ans = 40000 * 40001\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n for k in range(j + 1, len(points)):\n if ok(points[i], points[j], points[k]) and (points[i][0] ^ points[j][0] ^ points[k][0], points[i][1] ^ points[j][1] ^ points[k][1]) in s:\n r1, r2, r3 = (points[i][0], points[j][0], points[k][0])\n c1, c2, c3 = (points[i][1], points[j][1], points[k][1])\n ans = min(ans, (max(r1, r2, r3) - min(r1, r2, r3)) * (max(c1, c2, c3) - min(c1, c2, c3)))\n return 0 if ans == 40000 * 40001 else ans\n<|end_body_0|>\n\n<|body_start_1|>\n points = sorted(points, key=lambda x: x[0])\n s = set()\n for p in points:\n s.add((p[0], p[1]))\n ans = 1600000001\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n r1, c1, r2, c2 = (points[i][0], points[i][1], points[j][0], points[j][1])\n if points[i][0] == points[j][0] or points[i][1] == points[j][1] or (r2 - r1) * abs(c1 - c2) > ans:\n continue\n if (r1, c2) in s and (r2, c1) in s:\n ans = (r2 - r1) * abs(c1 - c2)\n return 0 if ans == 1600000001 else ans\n<|end_body_1|>\n", "revision_id": "85128e7d26157b3c36eb43868269de42ea2fcb98", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minAreaRect(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def minAreaRect2(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def minAreaRect(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n s = set()\n for p in points:\n s.add((p[0], p[1]))\n\n def ok(p1, p2, p3):\n row, col = (False, False)\n if p1[0] == p2[0] or p1[0] == p3[0] or p2[0] == p3[0]:\n row = True\n if p1[1] == p2[1] or p1[1] == p3[1] or p2[1] == p3[1]:\n col = True\n return row and col\n ans = 40000 * 40001\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n for k in range(j + 1, len(points)):\n if ok(points[i], points[j], points[k]) and (points[i][0] ^ points[j][0] ^ points[k][0], points[i][1] ^ points[j][1] ^ points[k][1]) in s:\n r1, r2, r3 = (points[i][0], points[j][0], points[k][0])\n c1, c2, c3 = (points[i][1], points[j][1], points[k][1])\n ans = min(ans, (max(r1, r2, r3) - min(r1, r2, r3)) * (max(c1, c2, c3) - min(c1, c2, c3)))\n return 0 if ans == 40000 * 40001 else ans\n\n def minAreaRect2(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n points = sorted(points, key=lambda x: x[0])\n s = set()\n for p in points:\n s.add((p[0], p[1]))\n ans = 1600000001\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n r1, c1, r2, c2 = (points[i][0], points[i][1], points[j][0], points[j][1])\n if points[i][0] == points[j][0] or points[i][1] == points[j][1] or (r2 - r1) * abs(c1 - c2) > ans:\n continue\n if (r1, c2) in s and (r2, c1) in s:\n ans = (r2 - r1) * abs(c1 - c2)\n return 0 if ans == 1600000001 else ans\n", "source": "the_stack_v2_python_sparse", "source_path": "src/Minimum Area Rectangle.py", "source_repo": "jsdiuf/leetcode", "split": "val", "star_events_count": 1}
{"blob_id": "8e19d12d47a691c20487c9639ef431bdec8d0945", "bodies": ["try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.get(id)\nexcept ValueError as ve:\n raise SearchError(ve)", "try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.save(id, dict_of_data, force_refresh=force_refresh)\nexcept ValueError as ve:\n raise IndexError(ve)", "try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_date_range(date_field, max_days)\nexcept ValueError as ve:\n raise SearchError(ve)", "try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.create_index()\nexcept ValueError as ve:\n raise IndexError(ve)", "try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_index()\nexcept ValueError as ve:\n raise IndexError(ve)"], "bodies_text": "<|body_start_0|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.get(id)\n except ValueError as ve:\n raise SearchError(ve)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.save(id, dict_of_data, force_refresh=force_refresh)\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_date_range(date_field, max_days)\n except ValueError as ve:\n raise SearchError(ve)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.create_index()\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_index()\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_4|>\n", "class_docstring": "static interface to manage elasticsearch indices and data", "class_name": "ElasticCommand", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ElasticCommand:\n \"\"\"static interface to manage elasticsearch indices and data\"\"\"\n\n def get(zone, id):\n \"\"\"raise SearchError :Example: >>> ElasticCommand.get(zone, id) True\"\"\"\n <|body_0|>\n\n def save(zone, id, dict_of_data, force_refresh=False):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.save(zone, id, dict_of_data, force_refresh=False) True\"\"\"\n <|body_1|>\n\n def delete_date_range(zone, date_field, max_days):\n \"\"\"raise SearchError\"\"\"\n <|body_2|>\n\n def create_index(zone):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.create_index(zone) True\"\"\"\n <|body_3|>\n\n def delete_index(zone):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.delete_index(zone) True\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.get(id)\n except ValueError as ve:\n raise SearchError(ve)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.save(id, dict_of_data, force_refresh=force_refresh)\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_date_range(date_field, max_days)\n except ValueError as ve:\n raise SearchError(ve)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.create_index()\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_index()\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000232", "length_bytes": 9648, "license_type": "no_license", "methods": [{"docstring": "raise SearchError :Example: >>> ElasticCommand.get(zone, id) True", "name": "get", "signature": "def get(zone, id)"}, {"docstring": "raise IndexError :Example: >>> ElasticCommand.save(zone, id, dict_of_data, force_refresh=False) True", "name": "save", "signature": "def save(zone, id, dict_of_data, force_refresh=False)"}, {"docstring": "raise SearchError", "name": "delete_date_range", "signature": "def delete_date_range(zone, date_field, max_days)"}, {"docstring": "raise IndexError :Example: >>> ElasticCommand.create_index(zone) True", "name": "create_index", "signature": "def create_index(zone)"}, {"docstring": "raise IndexError :Example: >>> ElasticCommand.delete_index(zone) True", "name": "delete_index", "signature": "def delete_index(zone)"}], "n_methods": 5, "prompt": "Implement the Python class `ElasticCommand` described below.\n\nClass description:\nstatic interface to manage elasticsearch indices and data\n\nMethod signatures and docstrings:\n- def get(zone, id): raise SearchError :Example: >>> ElasticCommand.get(zone, id) True\n- def save(zone, id, dict_of_data, force_refresh=False): raise IndexError :Example: >>> ElasticCommand.save(zone, id, dict_of_data, force_refresh=False) True\n- def delete_date_range(zone, date_field, max_days): raise SearchError\n- def create_index(zone): raise IndexError :Example: >>> ElasticCommand.create_index(zone) True\n- def delete_index(zone): raise IndexError :Example: >>> ElasticCommand.delete_index(zone) True", "prompted_full_text": "Implement the Python class `ElasticCommand` described below.\n\nClass description:\nstatic interface to manage elasticsearch indices and data\n\nMethod signatures and docstrings:\n- def get(zone, id): raise SearchError :Example: >>> ElasticCommand.get(zone, id) True\n- def save(zone, id, dict_of_data, force_refresh=False): raise IndexError :Example: >>> ElasticCommand.save(zone, id, dict_of_data, force_refresh=False) True\n- def delete_date_range(zone, date_field, max_days): raise SearchError\n- def create_index(zone): raise IndexError :Example: >>> ElasticCommand.create_index(zone) True\n- def delete_index(zone): raise IndexError :Example: >>> ElasticCommand.delete_index(zone) True\n\n<|skeleton|>\nclass ElasticCommand:\n \"\"\"static interface to manage elasticsearch indices and data\"\"\"\n\n def get(zone, id):\n \"\"\"raise SearchError :Example: >>> ElasticCommand.get(zone, id) True\"\"\"\n <|body_0|>\n\n def save(zone, id, dict_of_data, force_refresh=False):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.save(zone, id, dict_of_data, force_refresh=False) True\"\"\"\n <|body_1|>\n\n def delete_date_range(zone, date_field, max_days):\n \"\"\"raise SearchError\"\"\"\n <|body_2|>\n\n def create_index(zone):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.create_index(zone) True\"\"\"\n <|body_3|>\n\n def delete_index(zone):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.delete_index(zone) True\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.get(id)\n except ValueError as ve:\n raise SearchError(ve)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.save(id, dict_of_data, force_refresh=force_refresh)\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_date_range(date_field, max_days)\n except ValueError as ve:\n raise SearchError(ve)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.create_index()\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_index()\n except ValueError as ve:\n raise IndexError(ve)\n<|end_body_4|>\n", "revision_id": "5b591b3b86725cb1a64d67ce356d852683626fd8", "skeleton": "<|skeleton|>\nclass ElasticCommand:\n \"\"\"static interface to manage elasticsearch indices and data\"\"\"\n\n def get(zone, id):\n \"\"\"raise SearchError :Example: >>> ElasticCommand.get(zone, id) True\"\"\"\n <|body_0|>\n\n def save(zone, id, dict_of_data, force_refresh=False):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.save(zone, id, dict_of_data, force_refresh=False) True\"\"\"\n <|body_1|>\n\n def delete_date_range(zone, date_field, max_days):\n \"\"\"raise SearchError\"\"\"\n <|body_2|>\n\n def create_index(zone):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.create_index(zone) True\"\"\"\n <|body_3|>\n\n def delete_index(zone):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.delete_index(zone) True\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ElasticCommand:\n \"\"\"static interface to manage elasticsearch indices and data\"\"\"\n\n def get(zone, id):\n \"\"\"raise SearchError :Example: >>> ElasticCommand.get(zone, id) True\"\"\"\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.get(id)\n except ValueError as ve:\n raise SearchError(ve)\n\n def save(zone, id, dict_of_data, force_refresh=False):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.save(zone, id, dict_of_data, force_refresh=False) True\"\"\"\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.save(id, dict_of_data, force_refresh=force_refresh)\n except ValueError as ve:\n raise IndexError(ve)\n\n def delete_date_range(zone, date_field, max_days):\n \"\"\"raise SearchError\"\"\"\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_date_range(date_field, max_days)\n except ValueError as ve:\n raise SearchError(ve)\n\n def create_index(zone):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.create_index(zone) True\"\"\"\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.create_index()\n except ValueError as ve:\n raise IndexError(ve)\n\n def delete_index(zone):\n \"\"\"raise IndexError :Example: >>> ElasticCommand.delete_index(zone) True\"\"\"\n try:\n session = ElasticSession(hosts=[config.ES.ES_HOST], zone=zone)\n return session.delete_index()\n except ValueError as ve:\n raise IndexError(ve)\n", "source": "the_stack_v2_python_sparse", "source_path": "backend/search_index.py", "source_repo": "gustelle/rebot", "split": "val", "star_events_count": 0}
{"blob_id": "b0de1ff2cdf8d39f5f7538e3fec9ee9e6b55140b", "bodies": ["temp = ''\ncur_cnt = 0\npre_cnt = 0\nsum_total = 0\nfor str in s:\n if str == temp:\n cur_cnt += 1\n else:\n temp = str\n sum_total += min((cur_cnt, pre_cnt))\n pre_cnt = cur_cnt\n cur_cnt = 1\nsum_total += min((cur_cnt, pre_cnt))\nreturn sum_total", "sum_total = 0\npre = s[0]\npos = 0\npre_len = 0\nfor i in range(1, len(s)):\n if s[i] != pre:\n sum_total += 1\n pre_len = i - pos\n pos = i\n elif i - pos + 1 <= pre_len:\n sum_total += 1\n pre = s[i]\nreturn sum_total"], "bodies_text": "<|body_start_0|>\n temp = ''\n cur_cnt = 0\n pre_cnt = 0\n sum_total = 0\n for str in s:\n if str == temp:\n cur_cnt += 1\n else:\n temp = str\n sum_total += min((cur_cnt, pre_cnt))\n pre_cnt = cur_cnt\n cur_cnt = 1\n sum_total += min((cur_cnt, pre_cnt))\n return sum_total\n<|end_body_0|>\n\n<|body_start_1|>\n sum_total = 0\n pre = s[0]\n pos = 0\n pre_len = 0\n for i in range(1, len(s)):\n if s[i] != pre:\n sum_total += 1\n pre_len = i - pos\n pos = i\n elif i - pos + 1 <= pre_len:\n sum_total += 1\n pre = s[i]\n return sum_total\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def countBinarySubstrings(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def countBinarySubstrings2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n temp = ''\n cur_cnt = 0\n pre_cnt = 0\n sum_total = 0\n for str in s:\n if str == temp:\n cur_cnt += 1\n else:\n temp = str\n sum_total += min((cur_cnt, pre_cnt))\n pre_cnt = cur_cnt\n cur_cnt = 1\n sum_total += min((cur_cnt, pre_cnt))\n return sum_total\n<|end_body_0|>\n\n<|body_start_1|>\n sum_total = 0\n pre = s[0]\n pos = 0\n pre_len = 0\n for i in range(1, len(s)):\n if s[i] != pre:\n sum_total += 1\n pre_len = i - pos\n pos = i\n elif i - pos + 1 <= pre_len:\n sum_total += 1\n pre = s[i]\n return sum_total\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000233", "length_bytes": 2862, "license_type": "no_license", "methods": [{"docstring": ":type s: str :rtype: int", "name": "countBinarySubstrings", "signature": "def countBinarySubstrings(self, s)"}, {"docstring": ":type s: str :rtype: int", "name": "countBinarySubstrings2", "signature": "def countBinarySubstrings2(self, s)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_029581", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countBinarySubstrings(self, s): :type s: str :rtype: int\n- def countBinarySubstrings2(self, s): :type s: str :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countBinarySubstrings(self, s): :type s: str :rtype: int\n- def countBinarySubstrings2(self, s): :type s: str :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def countBinarySubstrings(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def countBinarySubstrings2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n temp = ''\n cur_cnt = 0\n pre_cnt = 0\n sum_total = 0\n for str in s:\n if str == temp:\n cur_cnt += 1\n else:\n temp = str\n sum_total += min((cur_cnt, pre_cnt))\n pre_cnt = cur_cnt\n cur_cnt = 1\n sum_total += min((cur_cnt, pre_cnt))\n return sum_total\n<|end_body_0|>\n\n<|body_start_1|>\n sum_total = 0\n pre = s[0]\n pos = 0\n pre_len = 0\n for i in range(1, len(s)):\n if s[i] != pre:\n sum_total += 1\n pre_len = i - pos\n pos = i\n elif i - pos + 1 <= pre_len:\n sum_total += 1\n pre = s[i]\n return sum_total\n<|end_body_1|>\n", "revision_id": "b68b6fb256cae18f18517e2ec3060019c1e2cd3f", "skeleton": "<|skeleton|>\nclass Solution:\n\n def countBinarySubstrings(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def countBinarySubstrings2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def countBinarySubstrings(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n temp = ''\n cur_cnt = 0\n pre_cnt = 0\n sum_total = 0\n for str in s:\n if str == temp:\n cur_cnt += 1\n else:\n temp = str\n sum_total += min((cur_cnt, pre_cnt))\n pre_cnt = cur_cnt\n cur_cnt = 1\n sum_total += min((cur_cnt, pre_cnt))\n return sum_total\n\n def countBinarySubstrings2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n sum_total = 0\n pre = s[0]\n pos = 0\n pre_len = 0\n for i in range(1, len(s)):\n if s[i] != pre:\n sum_total += 1\n pre_len = i - pos\n pos = i\n elif i - pos + 1 <= pre_len:\n sum_total += 1\n pre = s[i]\n return sum_total\n", "source": "the_stack_v2_python_sparse", "source_path": "countBinarySubstrings.py", "source_repo": "vinlinch/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "4835d640c7f395215cdbfa655c6d3132e9c56a2c", "bodies": ["self.image_version = image_version\nself.cpu = cpu\nself.description = description\nself.id = id\nself.iface_group_name = iface_group_name\nself.memory = memory\nself.mtu = mtu\nself.state = state\nself.subnet = subnet\nself.tenant_id = tenant_id\nself.mtype = mtype\nself.vlan_name = vlan_name", "if dictionary is None:\n return None\nimage_version = dictionary.get('ImageVersion')\ncpu = dictionary.get('cpu')\ndescription = dictionary.get('description')\nid = dictionary.get('id')\niface_group_name = dictionary.get('ifaceGroupName')\nmemory = dictionary.get('memory')\nmtu = dictionary.get('mtu')\nstate = dictionary.get('state')\nsubnet = cohesity_management_sdk.models.bifrost_subnet.BifrostSubnet.from_dictionary(dictionary.get('subnet')) if dictionary.get('subnet') else None\ntenant_id = dictionary.get('tenantId')\nmtype = dictionary.get('type')\nvlan_name = dictionary.get('vlanName')\nreturn cls(image_version, cpu, description, id, iface_group_name, memory, mtu, state, subnet, tenant_id, mtype, vlan_name)"], "bodies_text": "<|body_start_0|>\n self.image_version = image_version\n self.cpu = cpu\n self.description = description\n self.id = id\n self.iface_group_name = iface_group_name\n self.memory = memory\n self.mtu = mtu\n self.state = state\n self.subnet = subnet\n self.tenant_id = tenant_id\n self.mtype = mtype\n self.vlan_name = vlan_name\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n image_version = dictionary.get('ImageVersion')\n cpu = dictionary.get('cpu')\n description = dictionary.get('description')\n id = dictionary.get('id')\n iface_group_name = dictionary.get('ifaceGroupName')\n memory = dictionary.get('memory')\n mtu = dictionary.get('mtu')\n state = dictionary.get('state')\n subnet = cohesity_management_sdk.models.bifrost_subnet.BifrostSubnet.from_dictionary(dictionary.get('subnet')) if dictionary.get('subnet') else None\n tenant_id = dictionary.get('tenantId')\n mtype = dictionary.get('type')\n vlan_name = dictionary.get('vlanName')\n return cls(image_version, cpu, description, id, iface_group_name, memory, mtu, state, subnet, tenant_id, mtype, vlan_name)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'BifrostConfig' model. Specifies the settings of a BifrostConfig. Its used by both Request and Response structures. Attributes: image_version (string): Specifies the bifrost image version. cpu (int): Specifies the cpu for the bifrost config. description (string): Specifies a description of the VLAN. id (int): Specifies the id of the VLAN tag. iface_group_name (string): Specifies the interface group name of the VLAN. It is in the format of .. memory (int): Specifies the memory for the bifrost config. mtu (int): Specifies the mtu for the bifrost vlan. state (string): 4 types of States UNKNOWN ACTIVE DISABLED DELETING subnet (BifrostSubn", "class_name": "BifrostConfig", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BifrostConfig:\n \"\"\"Implementation of the 'BifrostConfig' model. Specifies the settings of a BifrostConfig. Its used by both Request and Response structures. Attributes: image_version (string): Specifies the bifrost image version. cpu (int): Specifies the cpu for the bifrost config. description (string): Specifies a description of the VLAN. id (int): Specifies the id of the VLAN tag. iface_group_name (string): Specifies the interface group name of the VLAN. It is in the format of .. memory (int): Specifies the memory for the bifrost config. mtu (int): Specifies the mtu for the bifrost vlan. state (string): 4 types of States UNKNOWN ACTIVE DISABLED DELETING subnet (BifrostSubn\"\"\"\n\n def __init__(self, image_version=None, cpu=None, description=None, id=None, iface_group_name=None, memory=None, mtu=None, state=None, subnet=None, tenant_id=None, mtype=None, vlan_name=None):\n \"\"\"Constructor for the BifrostConfig class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.image_version = image_version\n self.cpu = cpu\n self.description = description\n self.id = id\n self.iface_group_name = iface_group_name\n self.memory = memory\n self.mtu = mtu\n self.state = state\n self.subnet = subnet\n self.tenant_id = tenant_id\n self.mtype = mtype\n self.vlan_name = vlan_name\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n image_version = dictionary.get('ImageVersion')\n cpu = dictionary.get('cpu')\n description = dictionary.get('description')\n id = dictionary.get('id')\n iface_group_name = dictionary.get('ifaceGroupName')\n memory = dictionary.get('memory')\n mtu = dictionary.get('mtu')\n state = dictionary.get('state')\n subnet = cohesity_management_sdk.models.bifrost_subnet.BifrostSubnet.from_dictionary(dictionary.get('subnet')) if dictionary.get('subnet') else None\n tenant_id = dictionary.get('tenantId')\n mtype = dictionary.get('type')\n vlan_name = dictionary.get('vlanName')\n return cls(image_version, cpu, description, id, iface_group_name, memory, mtu, state, subnet, tenant_id, mtype, vlan_name)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000234", "length_bytes": 4366, "license_type": "permissive", "methods": [{"docstring": "Constructor for the BifrostConfig class", "name": "__init__", "signature": "def __init__(self, image_version=None, cpu=None, description=None, id=None, iface_group_name=None, memory=None, mtu=None, state=None, subnet=None, tenant_id=None, mtype=None, vlan_name=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_054548", "prompt": "Implement the Python class `BifrostConfig` described below.\n\nClass description:\nImplementation of the 'BifrostConfig' model. Specifies the settings of a BifrostConfig. Its used by both Request and Response structures. Attributes: image_version (string): Specifies the bifrost image version. cpu (int): Specifies the cpu for the bifrost config. description (string): Specifies a description of the VLAN. id (int): Specifies the id of the VLAN tag. iface_group_name (string): Specifies the interface group name of the VLAN. It is in the format of .. memory (int): Specifies the memory for the bifrost config. mtu (int): Specifies the mtu for the bifrost vlan. state (string): 4 types of States UNKNOWN ACTIVE DISABLED DELETING subnet (BifrostSubn\n\nMethod signatures and docstrings:\n- def __init__(self, image_version=None, cpu=None, description=None, id=None, iface_group_name=None, memory=None, mtu=None, state=None, subnet=None, tenant_id=None, mtype=None, vlan_name=None): Constructor for the BifrostConfig class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `BifrostConfig` described below.\n\nClass description:\nImplementation of the 'BifrostConfig' model. Specifies the settings of a BifrostConfig. Its used by both Request and Response structures. Attributes: image_version (string): Specifies the bifrost image version. cpu (int): Specifies the cpu for the bifrost config. description (string): Specifies a description of the VLAN. id (int): Specifies the id of the VLAN tag. iface_group_name (string): Specifies the interface group name of the VLAN. It is in the format of .. memory (int): Specifies the memory for the bifrost config. mtu (int): Specifies the mtu for the bifrost vlan. state (string): 4 types of States UNKNOWN ACTIVE DISABLED DELETING subnet (BifrostSubn\n\nMethod signatures and docstrings:\n- def __init__(self, image_version=None, cpu=None, description=None, id=None, iface_group_name=None, memory=None, mtu=None, state=None, subnet=None, tenant_id=None, mtype=None, vlan_name=None): Constructor for the BifrostConfig class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass BifrostConfig:\n \"\"\"Implementation of the 'BifrostConfig' model. Specifies the settings of a BifrostConfig. Its used by both Request and Response structures. Attributes: image_version (string): Specifies the bifrost image version. cpu (int): Specifies the cpu for the bifrost config. description (string): Specifies a description of the VLAN. id (int): Specifies the id of the VLAN tag. iface_group_name (string): Specifies the interface group name of the VLAN. It is in the format of .. memory (int): Specifies the memory for the bifrost config. mtu (int): Specifies the mtu for the bifrost vlan. state (string): 4 types of States UNKNOWN ACTIVE DISABLED DELETING subnet (BifrostSubn\"\"\"\n\n def __init__(self, image_version=None, cpu=None, description=None, id=None, iface_group_name=None, memory=None, mtu=None, state=None, subnet=None, tenant_id=None, mtype=None, vlan_name=None):\n \"\"\"Constructor for the BifrostConfig class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.image_version = image_version\n self.cpu = cpu\n self.description = description\n self.id = id\n self.iface_group_name = iface_group_name\n self.memory = memory\n self.mtu = mtu\n self.state = state\n self.subnet = subnet\n self.tenant_id = tenant_id\n self.mtype = mtype\n self.vlan_name = vlan_name\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n image_version = dictionary.get('ImageVersion')\n cpu = dictionary.get('cpu')\n description = dictionary.get('description')\n id = dictionary.get('id')\n iface_group_name = dictionary.get('ifaceGroupName')\n memory = dictionary.get('memory')\n mtu = dictionary.get('mtu')\n state = dictionary.get('state')\n subnet = cohesity_management_sdk.models.bifrost_subnet.BifrostSubnet.from_dictionary(dictionary.get('subnet')) if dictionary.get('subnet') else None\n tenant_id = dictionary.get('tenantId')\n mtype = dictionary.get('type')\n vlan_name = dictionary.get('vlanName')\n return cls(image_version, cpu, description, id, iface_group_name, memory, mtu, state, subnet, tenant_id, mtype, vlan_name)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass BifrostConfig:\n \"\"\"Implementation of the 'BifrostConfig' model. Specifies the settings of a BifrostConfig. Its used by both Request and Response structures. Attributes: image_version (string): Specifies the bifrost image version. cpu (int): Specifies the cpu for the bifrost config. description (string): Specifies a description of the VLAN. id (int): Specifies the id of the VLAN tag. iface_group_name (string): Specifies the interface group name of the VLAN. It is in the format of .. memory (int): Specifies the memory for the bifrost config. mtu (int): Specifies the mtu for the bifrost vlan. state (string): 4 types of States UNKNOWN ACTIVE DISABLED DELETING subnet (BifrostSubn\"\"\"\n\n def __init__(self, image_version=None, cpu=None, description=None, id=None, iface_group_name=None, memory=None, mtu=None, state=None, subnet=None, tenant_id=None, mtype=None, vlan_name=None):\n \"\"\"Constructor for the BifrostConfig class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BifrostConfig:\n \"\"\"Implementation of the 'BifrostConfig' model. Specifies the settings of a BifrostConfig. Its used by both Request and Response structures. Attributes: image_version (string): Specifies the bifrost image version. cpu (int): Specifies the cpu for the bifrost config. description (string): Specifies a description of the VLAN. id (int): Specifies the id of the VLAN tag. iface_group_name (string): Specifies the interface group name of the VLAN. It is in the format of .. memory (int): Specifies the memory for the bifrost config. mtu (int): Specifies the mtu for the bifrost vlan. state (string): 4 types of States UNKNOWN ACTIVE DISABLED DELETING subnet (BifrostSubn\"\"\"\n\n def __init__(self, image_version=None, cpu=None, description=None, id=None, iface_group_name=None, memory=None, mtu=None, state=None, subnet=None, tenant_id=None, mtype=None, vlan_name=None):\n \"\"\"Constructor for the BifrostConfig class\"\"\"\n self.image_version = image_version\n self.cpu = cpu\n self.description = description\n self.id = id\n self.iface_group_name = iface_group_name\n self.memory = memory\n self.mtu = mtu\n self.state = state\n self.subnet = subnet\n self.tenant_id = tenant_id\n self.mtype = mtype\n self.vlan_name = vlan_name\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n image_version = dictionary.get('ImageVersion')\n cpu = dictionary.get('cpu')\n description = dictionary.get('description')\n id = dictionary.get('id')\n iface_group_name = dictionary.get('ifaceGroupName')\n memory = dictionary.get('memory')\n mtu = dictionary.get('mtu')\n state = dictionary.get('state')\n subnet = cohesity_management_sdk.models.bifrost_subnet.BifrostSubnet.from_dictionary(dictionary.get('subnet')) if dictionary.get('subnet') else None\n tenant_id = dictionary.get('tenantId')\n mtype = dictionary.get('type')\n vlan_name = dictionary.get('vlanName')\n return cls(image_version, cpu, description, id, iface_group_name, memory, mtu, state, subnet, tenant_id, mtype, vlan_name)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/bifrost_config.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24}
{"blob_id": "4a7b41217d5256d97d7e3a38790ddda81607c7d4", "bodies": ["super().__init__()\nself.dist_dim = dist_dim\nself.network = network\nself.use_tied_cov = use_tied_cov\nself.use_trainable_cov = use_trainable_cov\nself.max_scale = max_scale\nself.scale_nonlinearity = scale_nonlinearity\nif not self.use_trainable_cov:\n assert sigma is not None, 'sigma cannot be None for non-trainable covariance!'\n self.sigma = sigma\nif self.use_trainable_cov and self.use_tied_cov:\n self.usigma = nn.Parameter(inverse_softplus(torch.full([1, dist_dim], sigma if sigma is not None else 0.1)))", "args_tensor = self.network(tensor)\nmean_tensor = args_tensor[..., :self.dist_dim]\nif self.use_trainable_cov:\n if self.use_tied_cov:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(self.usigma)\n else:\n scale_tensor = F.softplus(self.usigma)\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(args_tensor[..., self.dist_dim:])\n else:\n scale_tensor = F.softplus(args_tensor[..., self.dist_dim:])\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\nelse:\n out_dist = td.normal.Normal(mean_tensor, self.sigma)\n out_dist = td.independent.Independent(out_dist, 1)\nreturn out_dist"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.dist_dim = dist_dim\n self.network = network\n self.use_tied_cov = use_tied_cov\n self.use_trainable_cov = use_trainable_cov\n self.max_scale = max_scale\n self.scale_nonlinearity = scale_nonlinearity\n if not self.use_trainable_cov:\n assert sigma is not None, 'sigma cannot be None for non-trainable covariance!'\n self.sigma = sigma\n if self.use_trainable_cov and self.use_tied_cov:\n self.usigma = nn.Parameter(inverse_softplus(torch.full([1, dist_dim], sigma if sigma is not None else 0.1)))\n<|end_body_0|>\n\n<|body_start_1|>\n args_tensor = self.network(tensor)\n mean_tensor = args_tensor[..., :self.dist_dim]\n if self.use_trainable_cov:\n if self.use_tied_cov:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(self.usigma)\n else:\n scale_tensor = F.softplus(self.usigma)\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(args_tensor[..., self.dist_dim:])\n else:\n scale_tensor = F.softplus(args_tensor[..., self.dist_dim:])\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n out_dist = td.normal.Normal(mean_tensor, self.sigma)\n out_dist = td.independent.Independent(out_dist, 1)\n return out_dist\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GaussianDistributionOutput", "detected_licenses": ["MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GaussianDistributionOutput:\n\n def __init__(self, network, dist_dim, use_tied_cov=False, use_trainable_cov=True, sigma=None, max_scale=1.0, scale_nonlinearity='softplus'):\n \"\"\"A Gaussian distribution on top of a neural network. Args: network (nn.Module): A torch module that outputs the parameters of the Gaussian distribution. dist_dim ([type]): The dimension of the Gaussian distribution. use_tied_cov (bool, optional): Whether to use a tied covariance matrix. Defaults to False. use_trainable_cov (bool, optional): True if the covariance matrix is to be learned. Defaults to True. If False, the covariance matrix is set to I. sigma (float, optional): Initial value of scale. max_scale (float, optional): Maximum scale when using sigmoid non-linearity. scale_nonlinearity (str, optional): Which non-linearity to use for scale -- sigmoid or softplus. Defaults to softplus.\"\"\"\n <|body_0|>\n\n def forward(self, tensor):\n \"\"\"The forward pass. Args: tensor (torch.Tensor): The input tensor. Returns: out_dist: The Gaussian distribution with parameters obtained by passing the input tensor through self.network.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.dist_dim = dist_dim\n self.network = network\n self.use_tied_cov = use_tied_cov\n self.use_trainable_cov = use_trainable_cov\n self.max_scale = max_scale\n self.scale_nonlinearity = scale_nonlinearity\n if not self.use_trainable_cov:\n assert sigma is not None, 'sigma cannot be None for non-trainable covariance!'\n self.sigma = sigma\n if self.use_trainable_cov and self.use_tied_cov:\n self.usigma = nn.Parameter(inverse_softplus(torch.full([1, dist_dim], sigma if sigma is not None else 0.1)))\n<|end_body_0|>\n\n<|body_start_1|>\n args_tensor = self.network(tensor)\n mean_tensor = args_tensor[..., :self.dist_dim]\n if self.use_trainable_cov:\n if self.use_tied_cov:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(self.usigma)\n else:\n scale_tensor = F.softplus(self.usigma)\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(args_tensor[..., self.dist_dim:])\n else:\n scale_tensor = F.softplus(args_tensor[..., self.dist_dim:])\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n out_dist = td.normal.Normal(mean_tensor, self.sigma)\n out_dist = td.independent.Independent(out_dist, 1)\n return out_dist\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000235", "length_bytes": 23795, "license_type": "permissive", "methods": [{"docstring": "A Gaussian distribution on top of a neural network. Args: network (nn.Module): A torch module that outputs the parameters of the Gaussian distribution. dist_dim ([type]): The dimension of the Gaussian distribution. use_tied_cov (bool, optional): Whether to use a tied covariance matrix. Defaults to False. use_trainable_cov (bool, optional): True if the covariance matrix is to be learned. Defaults to True. If False, the covariance matrix is set to I. sigma (float, optional): Initial value of scale. max_scale (float, optional): Maximum scale when using sigmoid non-linearity. scale_nonlinearity (str, optional): Which non-linearity to use for scale -- sigmoid or softplus. Defaults to softplus.", "name": "__init__", "signature": "def __init__(self, network, dist_dim, use_tied_cov=False, use_trainable_cov=True, sigma=None, max_scale=1.0, scale_nonlinearity='softplus')"}, {"docstring": "The forward pass. Args: tensor (torch.Tensor): The input tensor. Returns: out_dist: The Gaussian distribution with parameters obtained by passing the input tensor through self.network.", "name": "forward", "signature": "def forward(self, tensor)"}], "n_methods": 2, "prompt": "Implement the Python class `GaussianDistributionOutput` described below.\n\nClass description:\nImplement the GaussianDistributionOutput class.\n\nMethod signatures and docstrings:\n- def __init__(self, network, dist_dim, use_tied_cov=False, use_trainable_cov=True, sigma=None, max_scale=1.0, scale_nonlinearity='softplus'): A Gaussian distribution on top of a neural network. Args: network (nn.Module): A torch module that outputs the parameters of the Gaussian distribution. dist_dim ([type]): The dimension of the Gaussian distribution. use_tied_cov (bool, optional): Whether to use a tied covariance matrix. Defaults to False. use_trainable_cov (bool, optional): True if the covariance matrix is to be learned. Defaults to True. If False, the covariance matrix is set to I. sigma (float, optional): Initial value of scale. max_scale (float, optional): Maximum scale when using sigmoid non-linearity. scale_nonlinearity (str, optional): Which non-linearity to use for scale -- sigmoid or softplus. Defaults to softplus.\n- def forward(self, tensor): The forward pass. Args: tensor (torch.Tensor): The input tensor. Returns: out_dist: The Gaussian distribution with parameters obtained by passing the input tensor through self.network.", "prompted_full_text": "Implement the Python class `GaussianDistributionOutput` described below.\n\nClass description:\nImplement the GaussianDistributionOutput class.\n\nMethod signatures and docstrings:\n- def __init__(self, network, dist_dim, use_tied_cov=False, use_trainable_cov=True, sigma=None, max_scale=1.0, scale_nonlinearity='softplus'): A Gaussian distribution on top of a neural network. Args: network (nn.Module): A torch module that outputs the parameters of the Gaussian distribution. dist_dim ([type]): The dimension of the Gaussian distribution. use_tied_cov (bool, optional): Whether to use a tied covariance matrix. Defaults to False. use_trainable_cov (bool, optional): True if the covariance matrix is to be learned. Defaults to True. If False, the covariance matrix is set to I. sigma (float, optional): Initial value of scale. max_scale (float, optional): Maximum scale when using sigmoid non-linearity. scale_nonlinearity (str, optional): Which non-linearity to use for scale -- sigmoid or softplus. Defaults to softplus.\n- def forward(self, tensor): The forward pass. Args: tensor (torch.Tensor): The input tensor. Returns: out_dist: The Gaussian distribution with parameters obtained by passing the input tensor through self.network.\n\n<|skeleton|>\nclass GaussianDistributionOutput:\n\n def __init__(self, network, dist_dim, use_tied_cov=False, use_trainable_cov=True, sigma=None, max_scale=1.0, scale_nonlinearity='softplus'):\n \"\"\"A Gaussian distribution on top of a neural network. Args: network (nn.Module): A torch module that outputs the parameters of the Gaussian distribution. dist_dim ([type]): The dimension of the Gaussian distribution. use_tied_cov (bool, optional): Whether to use a tied covariance matrix. Defaults to False. use_trainable_cov (bool, optional): True if the covariance matrix is to be learned. Defaults to True. If False, the covariance matrix is set to I. sigma (float, optional): Initial value of scale. max_scale (float, optional): Maximum scale when using sigmoid non-linearity. scale_nonlinearity (str, optional): Which non-linearity to use for scale -- sigmoid or softplus. Defaults to softplus.\"\"\"\n <|body_0|>\n\n def forward(self, tensor):\n \"\"\"The forward pass. Args: tensor (torch.Tensor): The input tensor. Returns: out_dist: The Gaussian distribution with parameters obtained by passing the input tensor through self.network.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.dist_dim = dist_dim\n self.network = network\n self.use_tied_cov = use_tied_cov\n self.use_trainable_cov = use_trainable_cov\n self.max_scale = max_scale\n self.scale_nonlinearity = scale_nonlinearity\n if not self.use_trainable_cov:\n assert sigma is not None, 'sigma cannot be None for non-trainable covariance!'\n self.sigma = sigma\n if self.use_trainable_cov and self.use_tied_cov:\n self.usigma = nn.Parameter(inverse_softplus(torch.full([1, dist_dim], sigma if sigma is not None else 0.1)))\n<|end_body_0|>\n\n<|body_start_1|>\n args_tensor = self.network(tensor)\n mean_tensor = args_tensor[..., :self.dist_dim]\n if self.use_trainable_cov:\n if self.use_tied_cov:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(self.usigma)\n else:\n scale_tensor = F.softplus(self.usigma)\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(args_tensor[..., self.dist_dim:])\n else:\n scale_tensor = F.softplus(args_tensor[..., self.dist_dim:])\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n out_dist = td.normal.Normal(mean_tensor, self.sigma)\n out_dist = td.independent.Independent(out_dist, 1)\n return out_dist\n<|end_body_1|>\n", "revision_id": "74885285e825ecf8a3aaf9b68df802b22f5cebdb", "skeleton": "<|skeleton|>\nclass GaussianDistributionOutput:\n\n def __init__(self, network, dist_dim, use_tied_cov=False, use_trainable_cov=True, sigma=None, max_scale=1.0, scale_nonlinearity='softplus'):\n \"\"\"A Gaussian distribution on top of a neural network. Args: network (nn.Module): A torch module that outputs the parameters of the Gaussian distribution. dist_dim ([type]): The dimension of the Gaussian distribution. use_tied_cov (bool, optional): Whether to use a tied covariance matrix. Defaults to False. use_trainable_cov (bool, optional): True if the covariance matrix is to be learned. Defaults to True. If False, the covariance matrix is set to I. sigma (float, optional): Initial value of scale. max_scale (float, optional): Maximum scale when using sigmoid non-linearity. scale_nonlinearity (str, optional): Which non-linearity to use for scale -- sigmoid or softplus. Defaults to softplus.\"\"\"\n <|body_0|>\n\n def forward(self, tensor):\n \"\"\"The forward pass. Args: tensor (torch.Tensor): The input tensor. Returns: out_dist: The Gaussian distribution with parameters obtained by passing the input tensor through self.network.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GaussianDistributionOutput:\n def __init__(self, network, dist_dim, use_tied_cov=False, use_trainable_cov=True, sigma=None, max_scale=1.0, scale_nonlinearity='softplus'):\n \"\"\"A Gaussian distribution on top of a neural network. Args: network (nn.Module): A torch module that outputs the parameters of the Gaussian distribution. dist_dim ([type]): The dimension of the Gaussian distribution. use_tied_cov (bool, optional): Whether to use a tied covariance matrix. Defaults to False. use_trainable_cov (bool, optional): True if the covariance matrix is to be learned. Defaults to True. If False, the covariance matrix is set to I. sigma (float, optional): Initial value of scale. max_scale (float, optional): Maximum scale when using sigmoid non-linearity. scale_nonlinearity (str, optional): Which non-linearity to use for scale -- sigmoid or softplus. Defaults to softplus.\"\"\"\n super().__init__()\n self.dist_dim = dist_dim\n self.network = network\n self.use_tied_cov = use_tied_cov\n self.use_trainable_cov = use_trainable_cov\n self.max_scale = max_scale\n self.scale_nonlinearity = scale_nonlinearity\n if not self.use_trainable_cov:\n assert sigma is not None, 'sigma cannot be None for non-trainable covariance!'\n self.sigma = sigma\n if self.use_trainable_cov and self.use_tied_cov:\n self.usigma = nn.Parameter(inverse_softplus(torch.full([1, dist_dim], sigma if sigma is not None else 0.1)))\n\n def forward(self, tensor):\n \"\"\"The forward pass. Args: tensor (torch.Tensor): The input tensor. Returns: out_dist: The Gaussian distribution with parameters obtained by passing the input tensor through self.network.\"\"\"\n args_tensor = self.network(tensor)\n mean_tensor = args_tensor[..., :self.dist_dim]\n if self.use_trainable_cov:\n if self.use_tied_cov:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(self.usigma)\n else:\n scale_tensor = F.softplus(self.usigma)\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n if self.scale_nonlinearity == 'sigmoid':\n scale_tensor = self.max_scale * torch.sigmoid(args_tensor[..., self.dist_dim:])\n else:\n scale_tensor = F.softplus(args_tensor[..., self.dist_dim:])\n out_dist = td.normal.Normal(mean_tensor, scale_tensor + SCALE_OFFSET)\n out_dist = td.independent.Independent(out_dist, 1)\n else:\n out_dist = td.normal.Normal(mean_tensor, self.sigma)\n out_dist = td.independent.Independent(out_dist, 1)\n return out_dist\n", "source": "the_stack_v2_python_sparse", "source_path": "src/subnetworks.py", "source_repo": "bassemfg/REDSDS", "split": "val", "star_events_count": 0}
{"blob_id": "fa1a0e61b648992aa7975aca11cc8f43a59eafa5", "bodies": ["if len(s1) != len(s2):\n return False\nlist1 = list(s1)\nlist2 = list(s2)\nlist1.sort()\nlist2.sort()\nfor i in range(len(list1)):\n if list1[i] != list2[i]:\n return False\nreturn True", "if len(s1) != len(s2):\n return False\nc1 = [0] * 26\nc2 = [0] * 26\nfor i in range(len(s1)):\n c1[ord(s1[i]) - ord('a')] += 1\n c2[ord(s1[i]) - ord('a')] += 1\nfor i in range(26):\n if c1[i] != c2[i]:\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n if len(s1) != len(s2):\n return False\n list1 = list(s1)\n list2 = list(s2)\n list1.sort()\n list2.sort()\n for i in range(len(list1)):\n if list1[i] != list2[i]:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if len(s1) != len(s2):\n return False\n c1 = [0] * 26\n c2 = [0] * 26\n for i in range(len(s1)):\n c1[ord(s1[i]) - ord('a')] += 1\n c2[ord(s1[i]) - ord('a')] += 1\n for i in range(26):\n if c1[i] != c2[i]:\n return False\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isAnagram(self, s1, s2):\n \"\"\"排序对比法 思路: 1. 将两个字符串转为list; 2. 堆两个list进行排序; 3. 然后注意比较对应位置的字符是否相等即可\"\"\"\n <|body_0|>\n\n def isAnagram2(self, s1, s2):\n \"\"\"字符统计对比法(Knowledge) 思路: 1. 创建两个长度位26的数组; 2. 各自统计两个字符串中每个字符出现的次数; 3. 接着对比两个数组即可 PS: ord => python中的一个内置函数,传入一个字符,会返回其对应的ASCII值\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(s1) != len(s2):\n return False\n list1 = list(s1)\n list2 = list(s2)\n list1.sort()\n list2.sort()\n for i in range(len(list1)):\n if list1[i] != list2[i]:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if len(s1) != len(s2):\n return False\n c1 = [0] * 26\n c2 = [0] * 26\n for i in range(len(s1)):\n c1[ord(s1[i]) - ord('a')] += 1\n c2[ord(s1[i]) - ord('a')] += 1\n for i in range(26):\n if c1[i] != c2[i]:\n return False\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000236", "length_bytes": 2669, "license_type": "no_license", "methods": [{"docstring": "排序对比法 思路: 1. 将两个字符串转为list; 2. 堆两个list进行排序; 3. 然后注意比较对应位置的字符是否相等即可", "name": "isAnagram", "signature": "def isAnagram(self, s1, s2)"}, {"docstring": "字符统计对比法(Knowledge) 思路: 1. 创建两个长度位26的数组; 2. 各自统计两个字符串中每个字符出现的次数; 3. 接着对比两个数组即可 PS: ord => python中的一个内置函数,传入一个字符,会返回其对应的ASCII值", "name": "isAnagram2", "signature": "def isAnagram2(self, s1, s2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_015525", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isAnagram(self, s1, s2): 排序对比法 思路: 1. 将两个字符串转为list; 2. 堆两个list进行排序; 3. 然后注意比较对应位置的字符是否相等即可\n- def isAnagram2(self, s1, s2): 字符统计对比法(Knowledge) 思路: 1. 创建两个长度位26的数组; 2. 各自统计两个字符串中每个字符出现的次数; 3. 接着对比两个数组即可 PS: ord => python中的一个内置函数,传入一个字符,会返回其对应的ASCII值", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isAnagram(self, s1, s2): 排序对比法 思路: 1. 将两个字符串转为list; 2. 堆两个list进行排序; 3. 然后注意比较对应位置的字符是否相等即可\n- def isAnagram2(self, s1, s2): 字符统计对比法(Knowledge) 思路: 1. 创建两个长度位26的数组; 2. 各自统计两个字符串中每个字符出现的次数; 3. 接着对比两个数组即可 PS: ord => python中的一个内置函数,传入一个字符,会返回其对应的ASCII值\n\n<|skeleton|>\nclass Solution:\n\n def isAnagram(self, s1, s2):\n \"\"\"排序对比法 思路: 1. 将两个字符串转为list; 2. 堆两个list进行排序; 3. 然后注意比较对应位置的字符是否相等即可\"\"\"\n <|body_0|>\n\n def isAnagram2(self, s1, s2):\n \"\"\"字符统计对比法(Knowledge) 思路: 1. 创建两个长度位26的数组; 2. 各自统计两个字符串中每个字符出现的次数; 3. 接着对比两个数组即可 PS: ord => python中的一个内置函数,传入一个字符,会返回其对应的ASCII值\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(s1) != len(s2):\n return False\n list1 = list(s1)\n list2 = list(s2)\n list1.sort()\n list2.sort()\n for i in range(len(list1)):\n if list1[i] != list2[i]:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if len(s1) != len(s2):\n return False\n c1 = [0] * 26\n c2 = [0] * 26\n for i in range(len(s1)):\n c1[ord(s1[i]) - ord('a')] += 1\n c2[ord(s1[i]) - ord('a')] += 1\n for i in range(26):\n if c1[i] != c2[i]:\n return False\n return True\n<|end_body_1|>\n", "revision_id": "19ea28c38762c65318275007932786e648a8b415", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isAnagram(self, s1, s2):\n \"\"\"排序对比法 思路: 1. 将两个字符串转为list; 2. 堆两个list进行排序; 3. 然后注意比较对应位置的字符是否相等即可\"\"\"\n <|body_0|>\n\n def isAnagram2(self, s1, s2):\n \"\"\"字符统计对比法(Knowledge) 思路: 1. 创建两个长度位26的数组; 2. 各自统计两个字符串中每个字符出现的次数; 3. 接着对比两个数组即可 PS: ord => python中的一个内置函数,传入一个字符,会返回其对应的ASCII值\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isAnagram(self, s1, s2):\n \"\"\"排序对比法 思路: 1. 将两个字符串转为list; 2. 堆两个list进行排序; 3. 然后注意比较对应位置的字符是否相等即可\"\"\"\n if len(s1) != len(s2):\n return False\n list1 = list(s1)\n list2 = list(s2)\n list1.sort()\n list2.sort()\n for i in range(len(list1)):\n if list1[i] != list2[i]:\n return False\n return True\n\n def isAnagram2(self, s1, s2):\n \"\"\"字符统计对比法(Knowledge) 思路: 1. 创建两个长度位26的数组; 2. 各自统计两个字符串中每个字符出现的次数; 3. 接着对比两个数组即可 PS: ord => python中的一个内置函数,传入一个字符,会返回其对应的ASCII值\"\"\"\n if len(s1) != len(s2):\n return False\n c1 = [0] * 26\n c2 = [0] * 26\n for i in range(len(s1)):\n c1[ord(s1[i]) - ord('a')] += 1\n c2[ord(s1[i]) - ord('a')] += 1\n for i in range(26):\n if c1[i] != c2[i]:\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "chapter3/14_valid-anagram.py", "source_repo": "SunnyQjm/algorithm-review", "split": "val", "star_events_count": 2}
{"blob_id": "f04380d5bf05644f99ad2093e3cfd33eed75a80d", "bodies": ["if scenario:\n scenario_text = ' When ' + ' and '.join(['condition \"%s\" is %s' % (k, v) for k, v in scenario.items()])\n return message + scenario_text\nreturn message", "matches = []\nif len(stages) < 2:\n message = 'CodePipeline has {} stages. There must be at least two stages.'.format(len(stages))\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\nreturn matches", "matches = []\nif len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\nfirst_stage = set([a.get('ActionTypeId').get('Category') for a in stages[0]['Actions']])\nif first_stage and 'Source' not in first_stage:\n message = 'The first stage of a pipeline must contain at least one source action.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\nif len(first_stage) != 1:\n message = 'The first stage of a pipeline must contain only source actions.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\nreturn matches", "matches = []\ncategories = set()\nif len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\nfor sidx, stage in enumerate(stages):\n for aidx, action in enumerate(stage.get('Actions', [])):\n action_type_id = action.get('ActionTypeId')\n categories.add(action_type_id.get('Category'))\n if sidx > 0 and action_type_id.get('Category') == 'Source':\n message = 'Only the first stage of a pipeline may contain source actions.'\n matches.append(RuleMatch(path + [sidx, 'Actions', aidx], self._format_error_message(message, scenario)))\nif not categories - set(['Source']):\n message = 'At least one stage in pipeline must contain an action that is not a source action.'\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\nreturn matches", "matches = []\nstage_names = set()\nfor sidx, stage in enumerate(value):\n stage_name = stage.get('Name')\n if isinstance(stage_name, six.string_types):\n if stage_name in stage_names:\n message = 'All stage names within a pipeline must be unique. ({name})'.format(name=stage_name)\n matches.append(RuleMatch(path + [sidx, 'Name'], self._format_error_message(message, scenario)))\n stage_names.add(stage_name)\n else:\n self.logger.debug('Found non string for stage name: %s', stage_name)\nreturn matches", "matches = []\nresources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])\nfor resource in resources:\n path = resource['Path'] + ['Stages']\n properties = resource['Value']\n s_stages = cfn.get_object_without_nested_conditions(properties.get('Stages'), path)\n for s_stage in s_stages:\n s_stage_obj = s_stage.get('Object')\n s_scenario = s_stage.get('Scenario')\n if not isinstance(s_stage_obj, list):\n self.logger.debug('Stages not list. Should have been caught by generic linting.')\n continue\n try:\n matches.extend(self.check_stage_count(s_stage_obj, path, s_scenario))\n matches.extend(self.check_first_stage(s_stage_obj, path, s_scenario))\n matches.extend(self.check_source_actions(s_stage_obj, path, s_scenario))\n matches.extend(self.check_names_unique(s_stage_obj, path, s_scenario))\n except AttributeError as err:\n self.logger.debug('Got AttributeError. Should have been caught by generic linting. Ignoring the error here: %s', str(err))\nreturn matches"], "bodies_text": "<|body_start_0|>\n if scenario:\n scenario_text = ' When ' + ' and '.join(['condition \"%s\" is %s' % (k, v) for k, v in scenario.items()])\n return message + scenario_text\n return message\n<|end_body_0|>\n\n<|body_start_1|>\n matches = []\n if len(stages) < 2:\n message = 'CodePipeline has {} stages. There must be at least two stages.'.format(len(stages))\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\n return matches\n<|end_body_1|>\n\n<|body_start_2|>\n matches = []\n if len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\n first_stage = set([a.get('ActionTypeId').get('Category') for a in stages[0]['Actions']])\n if first_stage and 'Source' not in first_stage:\n message = 'The first stage of a pipeline must contain at least one source action.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\n if len(first_stage) != 1:\n message = 'The first stage of a pipeline must contain only source actions.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\n return matches\n<|end_body_2|>\n\n<|body_start_3|>\n matches = []\n categories = set()\n if len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\n for sidx, stage in enumerate(stages):\n for aidx, action in enumerate(stage.get('Actions', [])):\n action_type_id = action.get('ActionTypeId')\n categories.add(action_type_id.get('Category'))\n if sidx > 0 and action_type_id.get('Category') == 'Source':\n message = 'Only the first stage of a pipeline may contain source actions.'\n matches.append(RuleMatch(path + [sidx, 'Actions', aidx], self._format_error_message(message, scenario)))\n if not categories - set(['Source']):\n message = 'At least one stage in pipeline must contain an action that is not a source action.'\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\n return matches\n<|end_body_3|>\n\n<|body_start_4|>\n matches = []\n stage_names = set()\n for sidx, stage in enumerate(value):\n stage_name = stage.get('Name')\n if isinstance(stage_name, six.string_types):\n if stage_name in stage_names:\n message = 'All stage names within a pipeline must be unique. ({name})'.format(name=stage_name)\n matches.append(RuleMatch(path + [sidx, 'Name'], self._format_error_message(message, scenario)))\n stage_names.add(stage_name)\n else:\n self.logger.debug('Found non string for stage name: %s', stage_name)\n return matches\n<|end_body_4|>\n\n<|body_start_5|>\n matches = []\n resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])\n for resource in resources:\n path = resource['Path'] + ['Stages']\n properties = resource['Value']\n s_stages = cfn.get_object_without_nested_conditions(properties.get('Stages'), path)\n for s_stage in s_stages:\n s_stage_obj = s_stage.get('Object')\n s_scenario = s_stage.get('Scenario')\n if not isinstance(s_stage_obj, list):\n self.logger.debug('Stages not list. Should have been caught by generic linting.')\n continue\n try:\n matches.extend(self.check_stage_count(s_stage_obj, path, s_scenario))\n matches.extend(self.check_first_stage(s_stage_obj, path, s_scenario))\n matches.extend(self.check_source_actions(s_stage_obj, path, s_scenario))\n matches.extend(self.check_names_unique(s_stage_obj, path, s_scenario))\n except AttributeError as err:\n self.logger.debug('Got AttributeError. Should have been caught by generic linting. Ignoring the error here: %s', str(err))\n return matches\n<|end_body_5|>\n", "class_docstring": "Check if CodePipeline Stages are set up properly.", "class_name": "CodepipelineStages", "detected_licenses": ["MIT-0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CodepipelineStages:\n \"\"\"Check if CodePipeline Stages are set up properly.\"\"\"\n\n def _format_error_message(self, message, scenario):\n \"\"\"Format error message with scenario text\"\"\"\n <|body_0|>\n\n def check_stage_count(self, stages, path, scenario):\n \"\"\"Check that there is minimum 2 stages.\"\"\"\n <|body_1|>\n\n def check_first_stage(self, stages, path, scenario):\n \"\"\"Validate the first stage of a pipeline has source actions.\"\"\"\n <|body_2|>\n\n def check_source_actions(self, stages, path, scenario):\n \"\"\"Validate the all of the stages.\"\"\"\n <|body_3|>\n\n def check_names_unique(self, value, path, scenario):\n \"\"\"Check that stage names are unique.\"\"\"\n <|body_4|>\n\n def match(self, cfn):\n \"\"\"Check CodePipeline stages\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if scenario:\n scenario_text = ' When ' + ' and '.join(['condition \"%s\" is %s' % (k, v) for k, v in scenario.items()])\n return message + scenario_text\n return message\n<|end_body_0|>\n\n<|body_start_1|>\n matches = []\n if len(stages) < 2:\n message = 'CodePipeline has {} stages. There must be at least two stages.'.format(len(stages))\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\n return matches\n<|end_body_1|>\n\n<|body_start_2|>\n matches = []\n if len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\n first_stage = set([a.get('ActionTypeId').get('Category') for a in stages[0]['Actions']])\n if first_stage and 'Source' not in first_stage:\n message = 'The first stage of a pipeline must contain at least one source action.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\n if len(first_stage) != 1:\n message = 'The first stage of a pipeline must contain only source actions.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\n return matches\n<|end_body_2|>\n\n<|body_start_3|>\n matches = []\n categories = set()\n if len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\n for sidx, stage in enumerate(stages):\n for aidx, action in enumerate(stage.get('Actions', [])):\n action_type_id = action.get('ActionTypeId')\n categories.add(action_type_id.get('Category'))\n if sidx > 0 and action_type_id.get('Category') == 'Source':\n message = 'Only the first stage of a pipeline may contain source actions.'\n matches.append(RuleMatch(path + [sidx, 'Actions', aidx], self._format_error_message(message, scenario)))\n if not categories - set(['Source']):\n message = 'At least one stage in pipeline must contain an action that is not a source action.'\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\n return matches\n<|end_body_3|>\n\n<|body_start_4|>\n matches = []\n stage_names = set()\n for sidx, stage in enumerate(value):\n stage_name = stage.get('Name')\n if isinstance(stage_name, six.string_types):\n if stage_name in stage_names:\n message = 'All stage names within a pipeline must be unique. ({name})'.format(name=stage_name)\n matches.append(RuleMatch(path + [sidx, 'Name'], self._format_error_message(message, scenario)))\n stage_names.add(stage_name)\n else:\n self.logger.debug('Found non string for stage name: %s', stage_name)\n return matches\n<|end_body_4|>\n\n<|body_start_5|>\n matches = []\n resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])\n for resource in resources:\n path = resource['Path'] + ['Stages']\n properties = resource['Value']\n s_stages = cfn.get_object_without_nested_conditions(properties.get('Stages'), path)\n for s_stage in s_stages:\n s_stage_obj = s_stage.get('Object')\n s_scenario = s_stage.get('Scenario')\n if not isinstance(s_stage_obj, list):\n self.logger.debug('Stages not list. Should have been caught by generic linting.')\n continue\n try:\n matches.extend(self.check_stage_count(s_stage_obj, path, s_scenario))\n matches.extend(self.check_first_stage(s_stage_obj, path, s_scenario))\n matches.extend(self.check_source_actions(s_stage_obj, path, s_scenario))\n matches.extend(self.check_names_unique(s_stage_obj, path, s_scenario))\n except AttributeError as err:\n self.logger.debug('Got AttributeError. Should have been caught by generic linting. Ignoring the error here: %s', str(err))\n return matches\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000237", "length_bytes": 6607, "license_type": "permissive", "methods": [{"docstring": "Format error message with scenario text", "name": "_format_error_message", "signature": "def _format_error_message(self, message, scenario)"}, {"docstring": "Check that there is minimum 2 stages.", "name": "check_stage_count", "signature": "def check_stage_count(self, stages, path, scenario)"}, {"docstring": "Validate the first stage of a pipeline has source actions.", "name": "check_first_stage", "signature": "def check_first_stage(self, stages, path, scenario)"}, {"docstring": "Validate the all of the stages.", "name": "check_source_actions", "signature": "def check_source_actions(self, stages, path, scenario)"}, {"docstring": "Check that stage names are unique.", "name": "check_names_unique", "signature": "def check_names_unique(self, value, path, scenario)"}, {"docstring": "Check CodePipeline stages", "name": "match", "signature": "def match(self, cfn)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_032530", "prompt": "Implement the Python class `CodepipelineStages` described below.\n\nClass description:\nCheck if CodePipeline Stages are set up properly.\n\nMethod signatures and docstrings:\n- def _format_error_message(self, message, scenario): Format error message with scenario text\n- def check_stage_count(self, stages, path, scenario): Check that there is minimum 2 stages.\n- def check_first_stage(self, stages, path, scenario): Validate the first stage of a pipeline has source actions.\n- def check_source_actions(self, stages, path, scenario): Validate the all of the stages.\n- def check_names_unique(self, value, path, scenario): Check that stage names are unique.\n- def match(self, cfn): Check CodePipeline stages", "prompted_full_text": "Implement the Python class `CodepipelineStages` described below.\n\nClass description:\nCheck if CodePipeline Stages are set up properly.\n\nMethod signatures and docstrings:\n- def _format_error_message(self, message, scenario): Format error message with scenario text\n- def check_stage_count(self, stages, path, scenario): Check that there is minimum 2 stages.\n- def check_first_stage(self, stages, path, scenario): Validate the first stage of a pipeline has source actions.\n- def check_source_actions(self, stages, path, scenario): Validate the all of the stages.\n- def check_names_unique(self, value, path, scenario): Check that stage names are unique.\n- def match(self, cfn): Check CodePipeline stages\n\n<|skeleton|>\nclass CodepipelineStages:\n \"\"\"Check if CodePipeline Stages are set up properly.\"\"\"\n\n def _format_error_message(self, message, scenario):\n \"\"\"Format error message with scenario text\"\"\"\n <|body_0|>\n\n def check_stage_count(self, stages, path, scenario):\n \"\"\"Check that there is minimum 2 stages.\"\"\"\n <|body_1|>\n\n def check_first_stage(self, stages, path, scenario):\n \"\"\"Validate the first stage of a pipeline has source actions.\"\"\"\n <|body_2|>\n\n def check_source_actions(self, stages, path, scenario):\n \"\"\"Validate the all of the stages.\"\"\"\n <|body_3|>\n\n def check_names_unique(self, value, path, scenario):\n \"\"\"Check that stage names are unique.\"\"\"\n <|body_4|>\n\n def match(self, cfn):\n \"\"\"Check CodePipeline stages\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if scenario:\n scenario_text = ' When ' + ' and '.join(['condition \"%s\" is %s' % (k, v) for k, v in scenario.items()])\n return message + scenario_text\n return message\n<|end_body_0|>\n\n<|body_start_1|>\n matches = []\n if len(stages) < 2:\n message = 'CodePipeline has {} stages. There must be at least two stages.'.format(len(stages))\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\n return matches\n<|end_body_1|>\n\n<|body_start_2|>\n matches = []\n if len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\n first_stage = set([a.get('ActionTypeId').get('Category') for a in stages[0]['Actions']])\n if first_stage and 'Source' not in first_stage:\n message = 'The first stage of a pipeline must contain at least one source action.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\n if len(first_stage) != 1:\n message = 'The first stage of a pipeline must contain only source actions.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\n return matches\n<|end_body_2|>\n\n<|body_start_3|>\n matches = []\n categories = set()\n if len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\n for sidx, stage in enumerate(stages):\n for aidx, action in enumerate(stage.get('Actions', [])):\n action_type_id = action.get('ActionTypeId')\n categories.add(action_type_id.get('Category'))\n if sidx > 0 and action_type_id.get('Category') == 'Source':\n message = 'Only the first stage of a pipeline may contain source actions.'\n matches.append(RuleMatch(path + [sidx, 'Actions', aidx], self._format_error_message(message, scenario)))\n if not categories - set(['Source']):\n message = 'At least one stage in pipeline must contain an action that is not a source action.'\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\n return matches\n<|end_body_3|>\n\n<|body_start_4|>\n matches = []\n stage_names = set()\n for sidx, stage in enumerate(value):\n stage_name = stage.get('Name')\n if isinstance(stage_name, six.string_types):\n if stage_name in stage_names:\n message = 'All stage names within a pipeline must be unique. ({name})'.format(name=stage_name)\n matches.append(RuleMatch(path + [sidx, 'Name'], self._format_error_message(message, scenario)))\n stage_names.add(stage_name)\n else:\n self.logger.debug('Found non string for stage name: %s', stage_name)\n return matches\n<|end_body_4|>\n\n<|body_start_5|>\n matches = []\n resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])\n for resource in resources:\n path = resource['Path'] + ['Stages']\n properties = resource['Value']\n s_stages = cfn.get_object_without_nested_conditions(properties.get('Stages'), path)\n for s_stage in s_stages:\n s_stage_obj = s_stage.get('Object')\n s_scenario = s_stage.get('Scenario')\n if not isinstance(s_stage_obj, list):\n self.logger.debug('Stages not list. Should have been caught by generic linting.')\n continue\n try:\n matches.extend(self.check_stage_count(s_stage_obj, path, s_scenario))\n matches.extend(self.check_first_stage(s_stage_obj, path, s_scenario))\n matches.extend(self.check_source_actions(s_stage_obj, path, s_scenario))\n matches.extend(self.check_names_unique(s_stage_obj, path, s_scenario))\n except AttributeError as err:\n self.logger.debug('Got AttributeError. Should have been caught by generic linting. Ignoring the error here: %s', str(err))\n return matches\n<|end_body_5|>\n", "revision_id": "3f5324cfd000e14d9324a242bb7fad528b22a7df", "skeleton": "<|skeleton|>\nclass CodepipelineStages:\n \"\"\"Check if CodePipeline Stages are set up properly.\"\"\"\n\n def _format_error_message(self, message, scenario):\n \"\"\"Format error message with scenario text\"\"\"\n <|body_0|>\n\n def check_stage_count(self, stages, path, scenario):\n \"\"\"Check that there is minimum 2 stages.\"\"\"\n <|body_1|>\n\n def check_first_stage(self, stages, path, scenario):\n \"\"\"Validate the first stage of a pipeline has source actions.\"\"\"\n <|body_2|>\n\n def check_source_actions(self, stages, path, scenario):\n \"\"\"Validate the all of the stages.\"\"\"\n <|body_3|>\n\n def check_names_unique(self, value, path, scenario):\n \"\"\"Check that stage names are unique.\"\"\"\n <|body_4|>\n\n def match(self, cfn):\n \"\"\"Check CodePipeline stages\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CodepipelineStages:\n \"\"\"Check if CodePipeline Stages are set up properly.\"\"\"\n\n def _format_error_message(self, message, scenario):\n \"\"\"Format error message with scenario text\"\"\"\n if scenario:\n scenario_text = ' When ' + ' and '.join(['condition \"%s\" is %s' % (k, v) for k, v in scenario.items()])\n return message + scenario_text\n return message\n\n def check_stage_count(self, stages, path, scenario):\n \"\"\"Check that there is minimum 2 stages.\"\"\"\n matches = []\n if len(stages) < 2:\n message = 'CodePipeline has {} stages. There must be at least two stages.'.format(len(stages))\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\n return matches\n\n def check_first_stage(self, stages, path, scenario):\n \"\"\"Validate the first stage of a pipeline has source actions.\"\"\"\n matches = []\n if len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\n first_stage = set([a.get('ActionTypeId').get('Category') for a in stages[0]['Actions']])\n if first_stage and 'Source' not in first_stage:\n message = 'The first stage of a pipeline must contain at least one source action.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\n if len(first_stage) != 1:\n message = 'The first stage of a pipeline must contain only source actions.'\n matches.append(RuleMatch(path + [0, 'Name'], self._format_error_message(message, scenario)))\n return matches\n\n def check_source_actions(self, stages, path, scenario):\n \"\"\"Validate the all of the stages.\"\"\"\n matches = []\n categories = set()\n if len(stages) < 1:\n self.logger.debug('Stages was empty. Should have been caught by generic linting.')\n return matches\n for sidx, stage in enumerate(stages):\n for aidx, action in enumerate(stage.get('Actions', [])):\n action_type_id = action.get('ActionTypeId')\n categories.add(action_type_id.get('Category'))\n if sidx > 0 and action_type_id.get('Category') == 'Source':\n message = 'Only the first stage of a pipeline may contain source actions.'\n matches.append(RuleMatch(path + [sidx, 'Actions', aidx], self._format_error_message(message, scenario)))\n if not categories - set(['Source']):\n message = 'At least one stage in pipeline must contain an action that is not a source action.'\n matches.append(RuleMatch(path, self._format_error_message(message, scenario)))\n return matches\n\n def check_names_unique(self, value, path, scenario):\n \"\"\"Check that stage names are unique.\"\"\"\n matches = []\n stage_names = set()\n for sidx, stage in enumerate(value):\n stage_name = stage.get('Name')\n if isinstance(stage_name, six.string_types):\n if stage_name in stage_names:\n message = 'All stage names within a pipeline must be unique. ({name})'.format(name=stage_name)\n matches.append(RuleMatch(path + [sidx, 'Name'], self._format_error_message(message, scenario)))\n stage_names.add(stage_name)\n else:\n self.logger.debug('Found non string for stage name: %s', stage_name)\n return matches\n\n def match(self, cfn):\n \"\"\"Check CodePipeline stages\"\"\"\n matches = []\n resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])\n for resource in resources:\n path = resource['Path'] + ['Stages']\n properties = resource['Value']\n s_stages = cfn.get_object_without_nested_conditions(properties.get('Stages'), path)\n for s_stage in s_stages:\n s_stage_obj = s_stage.get('Object')\n s_scenario = s_stage.get('Scenario')\n if not isinstance(s_stage_obj, list):\n self.logger.debug('Stages not list. Should have been caught by generic linting.')\n continue\n try:\n matches.extend(self.check_stage_count(s_stage_obj, path, s_scenario))\n matches.extend(self.check_first_stage(s_stage_obj, path, s_scenario))\n matches.extend(self.check_source_actions(s_stage_obj, path, s_scenario))\n matches.extend(self.check_names_unique(s_stage_obj, path, s_scenario))\n except AttributeError as err:\n self.logger.debug('Got AttributeError. Should have been caught by generic linting. Ignoring the error here: %s', str(err))\n return matches\n", "source": "the_stack_v2_python_sparse", "source_path": "src/cfnlint/rules/resources/codepipeline/CodepipelineStages.py", "source_repo": "jlongtine/cfn-python-lint", "split": "val", "star_events_count": 1}
{"blob_id": "b306e1291863ee315d5e3c1aed74e1901cf2a9b0", "bodies": ["if isinstance(value, int):\n buffer = None\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n return buffer\n else:\n raise ValueError('value is not in valid cip range')\nelse:\n raise TypeError('value must be int')", "if isinstance(buffer, bytes):\n value = None\n if len(buffer) == 2:\n value = int.from_bytes(buffer, 'little', signed=False)\n return value\n else:\n raise ValueError('buffer length mitsmatch with USINT encoding')\nelse:\n raise TypeError('buffer must be bytes')", "if isinstance(value, int) and isinstance(offset, int) and isinstance(flag, bool):\n if cls.validate_range(value):\n buffer = bytearray(value.to_bytes(2, 'little'))\n if offset >= 0 and offset <= 7:\n buffer[0] = BYTE.set_flag(buffer[0], offset, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n elif offset >= 8 and offset <= 15:\n buffer[1] = BYTE.set_flag(buffer[1], offset - 8, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\nelse:\n raise TypeError('value must be int')", "if isinstance(value, int) and isinstance(offset, int):\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n if offset >= 0 and offset <= 7:\n return BYTE.get_flag(buffer[0], offset)\n elif offset >= 8 and offset <= 15:\n return BYTE.get_flag(buffer[1], offset - 8)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\nelse:\n raise TypeError('value must be int')"], "bodies_text": "<|body_start_0|>\n if isinstance(value, int):\n buffer = None\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n return buffer\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(buffer, bytes):\n value = None\n if len(buffer) == 2:\n value = int.from_bytes(buffer, 'little', signed=False)\n return value\n else:\n raise ValueError('buffer length mitsmatch with USINT encoding')\n else:\n raise TypeError('buffer must be bytes')\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(value, int) and isinstance(offset, int) and isinstance(flag, bool):\n if cls.validate_range(value):\n buffer = bytearray(value.to_bytes(2, 'little'))\n if offset >= 0 and offset <= 7:\n buffer[0] = BYTE.set_flag(buffer[0], offset, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n elif offset >= 8 and offset <= 15:\n buffer[1] = BYTE.set_flag(buffer[1], offset - 8, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(value, int) and isinstance(offset, int):\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n if offset >= 0 and offset <= 7:\n return BYTE.get_flag(buffer[0], offset)\n elif offset >= 8 and offset <= 15:\n return BYTE.get_flag(buffer[1], offset - 8)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_3|>\n", "class_docstring": "Class to implement WORD datatype of CIP especification. Methods ------- class encode class decode classmethod validate_range classmethod GetIDCode staticmethod Identify classmethod set_flag classmethod get_flag", "class_name": "WORD", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WORD:\n \"\"\"Class to implement WORD datatype of CIP especification. Methods ------- class encode class decode classmethod validate_range classmethod GetIDCode staticmethod Identify classmethod set_flag classmethod get_flag\"\"\"\n\n def encode(cls, value):\n \"\"\"encode a value in a byte array Parameters ----------- value: int range from -2^63 to 2^63-1 Value to encode Return ------- Byte Array -- encoded value in a byte array to send trough a network\"\"\"\n <|body_0|>\n\n def decode(cls, buffer):\n \"\"\"decode a value from a byte array Parameters ----------- buffer: bytes buffer to decode Return ------- value : int encoded value in the byte array received\"\"\"\n <|body_1|>\n\n def set_flag(cls, value, offset=0, flag=True):\n \"\"\"set the boolean flag value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 flag : boolean Return ------- return int\"\"\"\n <|body_2|>\n\n def get_flag(cls, value, offset=0):\n \"\"\"get the boolean value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 Return ------- return int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(value, int):\n buffer = None\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n return buffer\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(buffer, bytes):\n value = None\n if len(buffer) == 2:\n value = int.from_bytes(buffer, 'little', signed=False)\n return value\n else:\n raise ValueError('buffer length mitsmatch with USINT encoding')\n else:\n raise TypeError('buffer must be bytes')\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(value, int) and isinstance(offset, int) and isinstance(flag, bool):\n if cls.validate_range(value):\n buffer = bytearray(value.to_bytes(2, 'little'))\n if offset >= 0 and offset <= 7:\n buffer[0] = BYTE.set_flag(buffer[0], offset, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n elif offset >= 8 and offset <= 15:\n buffer[1] = BYTE.set_flag(buffer[1], offset - 8, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(value, int) and isinstance(offset, int):\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n if offset >= 0 and offset <= 7:\n return BYTE.get_flag(buffer[0], offset)\n elif offset >= 8 and offset <= 15:\n return BYTE.get_flag(buffer[1], offset - 8)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000238", "length_bytes": 4009, "license_type": "permissive", "methods": [{"docstring": "encode a value in a byte array Parameters ----------- value: int range from -2^63 to 2^63-1 Value to encode Return ------- Byte Array -- encoded value in a byte array to send trough a network", "name": "encode", "signature": "def encode(cls, value)"}, {"docstring": "decode a value from a byte array Parameters ----------- buffer: bytes buffer to decode Return ------- value : int encoded value in the byte array received", "name": "decode", "signature": "def decode(cls, buffer)"}, {"docstring": "set the boolean flag value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 flag : boolean Return ------- return int", "name": "set_flag", "signature": "def set_flag(cls, value, offset=0, flag=True)"}, {"docstring": "get the boolean value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 Return ------- return int", "name": "get_flag", "signature": "def get_flag(cls, value, offset=0)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_011164", "prompt": "Implement the Python class `WORD` described below.\n\nClass description:\nClass to implement WORD datatype of CIP especification. Methods ------- class encode class decode classmethod validate_range classmethod GetIDCode staticmethod Identify classmethod set_flag classmethod get_flag\n\nMethod signatures and docstrings:\n- def encode(cls, value): encode a value in a byte array Parameters ----------- value: int range from -2^63 to 2^63-1 Value to encode Return ------- Byte Array -- encoded value in a byte array to send trough a network\n- def decode(cls, buffer): decode a value from a byte array Parameters ----------- buffer: bytes buffer to decode Return ------- value : int encoded value in the byte array received\n- def set_flag(cls, value, offset=0, flag=True): set the boolean flag value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 flag : boolean Return ------- return int\n- def get_flag(cls, value, offset=0): get the boolean value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 Return ------- return int", "prompted_full_text": "Implement the Python class `WORD` described below.\n\nClass description:\nClass to implement WORD datatype of CIP especification. Methods ------- class encode class decode classmethod validate_range classmethod GetIDCode staticmethod Identify classmethod set_flag classmethod get_flag\n\nMethod signatures and docstrings:\n- def encode(cls, value): encode a value in a byte array Parameters ----------- value: int range from -2^63 to 2^63-1 Value to encode Return ------- Byte Array -- encoded value in a byte array to send trough a network\n- def decode(cls, buffer): decode a value from a byte array Parameters ----------- buffer: bytes buffer to decode Return ------- value : int encoded value in the byte array received\n- def set_flag(cls, value, offset=0, flag=True): set the boolean flag value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 flag : boolean Return ------- return int\n- def get_flag(cls, value, offset=0): get the boolean value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 Return ------- return int\n\n<|skeleton|>\nclass WORD:\n \"\"\"Class to implement WORD datatype of CIP especification. Methods ------- class encode class decode classmethod validate_range classmethod GetIDCode staticmethod Identify classmethod set_flag classmethod get_flag\"\"\"\n\n def encode(cls, value):\n \"\"\"encode a value in a byte array Parameters ----------- value: int range from -2^63 to 2^63-1 Value to encode Return ------- Byte Array -- encoded value in a byte array to send trough a network\"\"\"\n <|body_0|>\n\n def decode(cls, buffer):\n \"\"\"decode a value from a byte array Parameters ----------- buffer: bytes buffer to decode Return ------- value : int encoded value in the byte array received\"\"\"\n <|body_1|>\n\n def set_flag(cls, value, offset=0, flag=True):\n \"\"\"set the boolean flag value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 flag : boolean Return ------- return int\"\"\"\n <|body_2|>\n\n def get_flag(cls, value, offset=0):\n \"\"\"get the boolean value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 Return ------- return int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(value, int):\n buffer = None\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n return buffer\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(buffer, bytes):\n value = None\n if len(buffer) == 2:\n value = int.from_bytes(buffer, 'little', signed=False)\n return value\n else:\n raise ValueError('buffer length mitsmatch with USINT encoding')\n else:\n raise TypeError('buffer must be bytes')\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(value, int) and isinstance(offset, int) and isinstance(flag, bool):\n if cls.validate_range(value):\n buffer = bytearray(value.to_bytes(2, 'little'))\n if offset >= 0 and offset <= 7:\n buffer[0] = BYTE.set_flag(buffer[0], offset, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n elif offset >= 8 and offset <= 15:\n buffer[1] = BYTE.set_flag(buffer[1], offset - 8, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(value, int) and isinstance(offset, int):\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n if offset >= 0 and offset <= 7:\n return BYTE.get_flag(buffer[0], offset)\n elif offset >= 8 and offset <= 15:\n return BYTE.get_flag(buffer[1], offset - 8)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n<|end_body_3|>\n", "revision_id": "288a741e5cf1e9df366ed62437e0b99f6920ef90", "skeleton": "<|skeleton|>\nclass WORD:\n \"\"\"Class to implement WORD datatype of CIP especification. Methods ------- class encode class decode classmethod validate_range classmethod GetIDCode staticmethod Identify classmethod set_flag classmethod get_flag\"\"\"\n\n def encode(cls, value):\n \"\"\"encode a value in a byte array Parameters ----------- value: int range from -2^63 to 2^63-1 Value to encode Return ------- Byte Array -- encoded value in a byte array to send trough a network\"\"\"\n <|body_0|>\n\n def decode(cls, buffer):\n \"\"\"decode a value from a byte array Parameters ----------- buffer: bytes buffer to decode Return ------- value : int encoded value in the byte array received\"\"\"\n <|body_1|>\n\n def set_flag(cls, value, offset=0, flag=True):\n \"\"\"set the boolean flag value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 flag : boolean Return ------- return int\"\"\"\n <|body_2|>\n\n def get_flag(cls, value, offset=0):\n \"\"\"get the boolean value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 Return ------- return int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WORD:\n \"\"\"Class to implement WORD datatype of CIP especification. Methods ------- class encode class decode classmethod validate_range classmethod GetIDCode staticmethod Identify classmethod set_flag classmethod get_flag\"\"\"\n\n def encode(cls, value):\n \"\"\"encode a value in a byte array Parameters ----------- value: int range from -2^63 to 2^63-1 Value to encode Return ------- Byte Array -- encoded value in a byte array to send trough a network\"\"\"\n if isinstance(value, int):\n buffer = None\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n return buffer\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n\n def decode(cls, buffer):\n \"\"\"decode a value from a byte array Parameters ----------- buffer: bytes buffer to decode Return ------- value : int encoded value in the byte array received\"\"\"\n if isinstance(buffer, bytes):\n value = None\n if len(buffer) == 2:\n value = int.from_bytes(buffer, 'little', signed=False)\n return value\n else:\n raise ValueError('buffer length mitsmatch with USINT encoding')\n else:\n raise TypeError('buffer must be bytes')\n\n def set_flag(cls, value, offset=0, flag=True):\n \"\"\"set the boolean flag value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 flag : boolean Return ------- return int\"\"\"\n if isinstance(value, int) and isinstance(offset, int) and isinstance(flag, bool):\n if cls.validate_range(value):\n buffer = bytearray(value.to_bytes(2, 'little'))\n if offset >= 0 and offset <= 7:\n buffer[0] = BYTE.set_flag(buffer[0], offset, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n elif offset >= 8 and offset <= 15:\n buffer[1] = BYTE.set_flag(buffer[1], offset - 8, flag)\n return int.from_bytes(buffer, 'little', signed=False)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n\n def get_flag(cls, value, offset=0):\n \"\"\"get the boolean value in a byte's offset position Parameters ----------- value: int range from 0 to 0xFFFF offset: int range 0 to 15 Return ------- return int\"\"\"\n if isinstance(value, int) and isinstance(offset, int):\n if cls.validate_range(value):\n buffer = value.to_bytes(2, 'little')\n if offset >= 0 and offset <= 7:\n return BYTE.get_flag(buffer[0], offset)\n elif offset >= 8 and offset <= 15:\n return BYTE.get_flag(buffer[1], offset - 8)\n else:\n raise ValueError('offset is nat in valid range')\n else:\n raise ValueError('value is not in valid cip range')\n else:\n raise TypeError('value must be int')\n", "source": "the_stack_v2_python_sparse", "source_path": "data_type/word.py", "source_repo": "hsocarras/pycip", "split": "val", "star_events_count": 0}
{"blob_id": "3d9f8ef5d4ede1146129f057d496182c9fd7a155", "bodies": ["if isinstance(value, UUID):\n return value\ntry:\n return UUID(value)\nexcept (TypeError, ValueError) as e:\n return value", "pid_value = self.parse_pid(pid_value)\nif isinstance(pid_value, UUID):\n field_name = self.field._id_field\nelse:\n field_name = self.field._slug_field\n if not pid_value:\n raise PIDDoesNotExistError('comid', '')\nwith db.session.no_autoflush:\n model = self.record_cls.model_cls.query.filter_by(**{field_name: pid_value}).one_or_none()\n if model is None:\n raise PIDDoesNotExistError('comid', str(pid_value))\n record = self.record_cls(model.data, model=model)\n if record.is_deleted:\n raise PIDDeletedError(PersistentIdentifierWrapper(pid_value), record)\n return record"], "bodies_text": "<|body_start_0|>\n if isinstance(value, UUID):\n return value\n try:\n return UUID(value)\n except (TypeError, ValueError) as e:\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n pid_value = self.parse_pid(pid_value)\n if isinstance(pid_value, UUID):\n field_name = self.field._id_field\n else:\n field_name = self.field._slug_field\n if not pid_value:\n raise PIDDoesNotExistError('comid', '')\n with db.session.no_autoflush:\n model = self.record_cls.model_cls.query.filter_by(**{field_name: pid_value}).one_or_none()\n if model is None:\n raise PIDDoesNotExistError('comid', str(pid_value))\n record = self.record_cls(model.data, model=model)\n if record.is_deleted:\n raise PIDDeletedError(PersistentIdentifierWrapper(pid_value), record)\n return record\n<|end_body_1|>\n", "class_docstring": "PID Slug Field Context.", "class_name": "PIDSlugFieldContext", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PIDSlugFieldContext:\n \"\"\"PID Slug Field Context.\"\"\"\n\n def parse_pid(self, value):\n \"\"\"Parse pid.\"\"\"\n <|body_0|>\n\n def resolve(self, pid_value, registered_only=True):\n \"\"\"Resolve identifier (either uuid or slug).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(value, UUID):\n return value\n try:\n return UUID(value)\n except (TypeError, ValueError) as e:\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n pid_value = self.parse_pid(pid_value)\n if isinstance(pid_value, UUID):\n field_name = self.field._id_field\n else:\n field_name = self.field._slug_field\n if not pid_value:\n raise PIDDoesNotExistError('comid', '')\n with db.session.no_autoflush:\n model = self.record_cls.model_cls.query.filter_by(**{field_name: pid_value}).one_or_none()\n if model is None:\n raise PIDDoesNotExistError('comid', str(pid_value))\n record = self.record_cls(model.data, model=model)\n if record.is_deleted:\n raise PIDDeletedError(PersistentIdentifierWrapper(pid_value), record)\n return record\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000239", "length_bytes": 2887, "license_type": "permissive", "methods": [{"docstring": "Parse pid.", "name": "parse_pid", "signature": "def parse_pid(self, value)"}, {"docstring": "Resolve identifier (either uuid or slug).", "name": "resolve", "signature": "def resolve(self, pid_value, registered_only=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039824", "prompt": "Implement the Python class `PIDSlugFieldContext` described below.\n\nClass description:\nPID Slug Field Context.\n\nMethod signatures and docstrings:\n- def parse_pid(self, value): Parse pid.\n- def resolve(self, pid_value, registered_only=True): Resolve identifier (either uuid or slug).", "prompted_full_text": "Implement the Python class `PIDSlugFieldContext` described below.\n\nClass description:\nPID Slug Field Context.\n\nMethod signatures and docstrings:\n- def parse_pid(self, value): Parse pid.\n- def resolve(self, pid_value, registered_only=True): Resolve identifier (either uuid or slug).\n\n<|skeleton|>\nclass PIDSlugFieldContext:\n \"\"\"PID Slug Field Context.\"\"\"\n\n def parse_pid(self, value):\n \"\"\"Parse pid.\"\"\"\n <|body_0|>\n\n def resolve(self, pid_value, registered_only=True):\n \"\"\"Resolve identifier (either uuid or slug).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(value, UUID):\n return value\n try:\n return UUID(value)\n except (TypeError, ValueError) as e:\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n pid_value = self.parse_pid(pid_value)\n if isinstance(pid_value, UUID):\n field_name = self.field._id_field\n else:\n field_name = self.field._slug_field\n if not pid_value:\n raise PIDDoesNotExistError('comid', '')\n with db.session.no_autoflush:\n model = self.record_cls.model_cls.query.filter_by(**{field_name: pid_value}).one_or_none()\n if model is None:\n raise PIDDoesNotExistError('comid', str(pid_value))\n record = self.record_cls(model.data, model=model)\n if record.is_deleted:\n raise PIDDeletedError(PersistentIdentifierWrapper(pid_value), record)\n return record\n<|end_body_1|>\n", "revision_id": "9a17455c06bf606c19c6b1367e4e3d36bf017be9", "skeleton": "<|skeleton|>\nclass PIDSlugFieldContext:\n \"\"\"PID Slug Field Context.\"\"\"\n\n def parse_pid(self, value):\n \"\"\"Parse pid.\"\"\"\n <|body_0|>\n\n def resolve(self, pid_value, registered_only=True):\n \"\"\"Resolve identifier (either uuid or slug).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PIDSlugFieldContext:\n \"\"\"PID Slug Field Context.\"\"\"\n\n def parse_pid(self, value):\n \"\"\"Parse pid.\"\"\"\n if isinstance(value, UUID):\n return value\n try:\n return UUID(value)\n except (TypeError, ValueError) as e:\n return value\n\n def resolve(self, pid_value, registered_only=True):\n \"\"\"Resolve identifier (either uuid or slug).\"\"\"\n pid_value = self.parse_pid(pid_value)\n if isinstance(pid_value, UUID):\n field_name = self.field._id_field\n else:\n field_name = self.field._slug_field\n if not pid_value:\n raise PIDDoesNotExistError('comid', '')\n with db.session.no_autoflush:\n model = self.record_cls.model_cls.query.filter_by(**{field_name: pid_value}).one_or_none()\n if model is None:\n raise PIDDoesNotExistError('comid', str(pid_value))\n record = self.record_cls(model.data, model=model)\n if record.is_deleted:\n raise PIDDeletedError(PersistentIdentifierWrapper(pid_value), record)\n return record\n", "source": "the_stack_v2_python_sparse", "source_path": "invenio_communities/communities/records/systemfields/pidslug.py", "source_repo": "inveniosoftware/invenio-communities", "split": "val", "star_events_count": 5}
{"blob_id": "97c64a6357519af25bd3d45238862776da382e6d", "bodies": ["self.p = initp\nself.np = len(initp)\nim = resid_in.copy()\nim /= np.sum(im)\nmean_psf = psf_in.copy()\nmean_psf /= np.sum(mean_psf)\nif im.shape[0] != im.shape[1]:\n raise ValueError('Model Image must be square')\nif im.shape != mean_psf.shape:\n raise ValueError('PSF must have the same shape as input residuals.')\nself.mod_ft = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im))) / np.fft.fftshift(np.fft.fft2(np.fft.fftshift(mean_psf)))\nuv_coord = 2 * np.pi * (np.arange(im.shape[0]) - im.shape[0] // 2) / float(im.shape[0])\nself.mod_ft_realfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.real, kx=1, ky=1)\nself.mod_ft_imagfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.imag, kx=1, ky=1)", "ret_array = self.mod_ft_realfunc(uv[0], uv[1], grid=False).astype(np.complex)\nret_array += 1j * self.mod_ft_imagfunc(uv[0], uv[1], grid=False)\nreturn p_in[0] * ret_array + (1 - p_in[0])"], "bodies_text": "<|body_start_0|>\n self.p = initp\n self.np = len(initp)\n im = resid_in.copy()\n im /= np.sum(im)\n mean_psf = psf_in.copy()\n mean_psf /= np.sum(mean_psf)\n if im.shape[0] != im.shape[1]:\n raise ValueError('Model Image must be square')\n if im.shape != mean_psf.shape:\n raise ValueError('PSF must have the same shape as input residuals.')\n self.mod_ft = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im))) / np.fft.fftshift(np.fft.fft2(np.fft.fftshift(mean_psf)))\n uv_coord = 2 * np.pi * (np.arange(im.shape[0]) - im.shape[0] // 2) / float(im.shape[0])\n self.mod_ft_realfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.real, kx=1, ky=1)\n self.mod_ft_imagfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.imag, kx=1, ky=1)\n<|end_body_0|>\n\n<|body_start_1|>\n ret_array = self.mod_ft_realfunc(uv[0], uv[1], grid=False).astype(np.complex)\n ret_array += 1j * self.mod_ft_imagfunc(uv[0], uv[1], grid=False)\n return p_in[0] * ret_array + (1 - p_in[0])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ResidObject", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ResidObject:\n\n def __init__(self, initp=[], resid_in=None, psf_in=None):\n \"\"\"A model for the object on sky, consisting of a point source and a map that has been convolved with the PSF map. The idea is that, iteratively, the fit residuals can be added to to the input residuals to last model residuals, and the final problem is a standard deconvolution problem with a known PSF. Parameters ---------- initp: array-like A single parameter, the relative flux of the resolved part of the image. resid_in: numpy array Residuals from the previous iteration. Same size and format as the input image, but with N down and E left when displayed with imshow. psf_in: numpy array The PSF that should be used for the residuals, weighted in the same way.\"\"\"\n <|body_0|>\n\n def model_uv(self, p_in, uv):\n \"\"\"Return a model of the Fourier transform of the object given a set of points in the uv plane Parameters ---------- p_in: array-like model parameters. Can be None if if the model has no parameters! uv: array-like Coordinates in the uv plane\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.p = initp\n self.np = len(initp)\n im = resid_in.copy()\n im /= np.sum(im)\n mean_psf = psf_in.copy()\n mean_psf /= np.sum(mean_psf)\n if im.shape[0] != im.shape[1]:\n raise ValueError('Model Image must be square')\n if im.shape != mean_psf.shape:\n raise ValueError('PSF must have the same shape as input residuals.')\n self.mod_ft = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im))) / np.fft.fftshift(np.fft.fft2(np.fft.fftshift(mean_psf)))\n uv_coord = 2 * np.pi * (np.arange(im.shape[0]) - im.shape[0] // 2) / float(im.shape[0])\n self.mod_ft_realfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.real, kx=1, ky=1)\n self.mod_ft_imagfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.imag, kx=1, ky=1)\n<|end_body_0|>\n\n<|body_start_1|>\n ret_array = self.mod_ft_realfunc(uv[0], uv[1], grid=False).astype(np.complex)\n ret_array += 1j * self.mod_ft_imagfunc(uv[0], uv[1], grid=False)\n return p_in[0] * ret_array + (1 - p_in[0])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000240", "length_bytes": 37909, "license_type": "permissive", "methods": [{"docstring": "A model for the object on sky, consisting of a point source and a map that has been convolved with the PSF map. The idea is that, iteratively, the fit residuals can be added to to the input residuals to last model residuals, and the final problem is a standard deconvolution problem with a known PSF. Parameters ---------- initp: array-like A single parameter, the relative flux of the resolved part of the image. resid_in: numpy array Residuals from the previous iteration. Same size and format as the input image, but with N down and E left when displayed with imshow. psf_in: numpy array The PSF that should be used for the residuals, weighted in the same way.", "name": "__init__", "signature": "def __init__(self, initp=[], resid_in=None, psf_in=None)"}, {"docstring": "Return a model of the Fourier transform of the object given a set of points in the uv plane Parameters ---------- p_in: array-like model parameters. Can be None if if the model has no parameters! uv: array-like Coordinates in the uv plane", "name": "model_uv", "signature": "def model_uv(self, p_in, uv)"}], "n_methods": 2, "prompt": "Implement the Python class `ResidObject` described below.\n\nClass description:\nImplement the ResidObject class.\n\nMethod signatures and docstrings:\n- def __init__(self, initp=[], resid_in=None, psf_in=None): A model for the object on sky, consisting of a point source and a map that has been convolved with the PSF map. The idea is that, iteratively, the fit residuals can be added to to the input residuals to last model residuals, and the final problem is a standard deconvolution problem with a known PSF. Parameters ---------- initp: array-like A single parameter, the relative flux of the resolved part of the image. resid_in: numpy array Residuals from the previous iteration. Same size and format as the input image, but with N down and E left when displayed with imshow. psf_in: numpy array The PSF that should be used for the residuals, weighted in the same way.\n- def model_uv(self, p_in, uv): Return a model of the Fourier transform of the object given a set of points in the uv plane Parameters ---------- p_in: array-like model parameters. Can be None if if the model has no parameters! uv: array-like Coordinates in the uv plane", "prompted_full_text": "Implement the Python class `ResidObject` described below.\n\nClass description:\nImplement the ResidObject class.\n\nMethod signatures and docstrings:\n- def __init__(self, initp=[], resid_in=None, psf_in=None): A model for the object on sky, consisting of a point source and a map that has been convolved with the PSF map. The idea is that, iteratively, the fit residuals can be added to to the input residuals to last model residuals, and the final problem is a standard deconvolution problem with a known PSF. Parameters ---------- initp: array-like A single parameter, the relative flux of the resolved part of the image. resid_in: numpy array Residuals from the previous iteration. Same size and format as the input image, but with N down and E left when displayed with imshow. psf_in: numpy array The PSF that should be used for the residuals, weighted in the same way.\n- def model_uv(self, p_in, uv): Return a model of the Fourier transform of the object given a set of points in the uv plane Parameters ---------- p_in: array-like model parameters. Can be None if if the model has no parameters! uv: array-like Coordinates in the uv plane\n\n<|skeleton|>\nclass ResidObject:\n\n def __init__(self, initp=[], resid_in=None, psf_in=None):\n \"\"\"A model for the object on sky, consisting of a point source and a map that has been convolved with the PSF map. The idea is that, iteratively, the fit residuals can be added to to the input residuals to last model residuals, and the final problem is a standard deconvolution problem with a known PSF. Parameters ---------- initp: array-like A single parameter, the relative flux of the resolved part of the image. resid_in: numpy array Residuals from the previous iteration. Same size and format as the input image, but with N down and E left when displayed with imshow. psf_in: numpy array The PSF that should be used for the residuals, weighted in the same way.\"\"\"\n <|body_0|>\n\n def model_uv(self, p_in, uv):\n \"\"\"Return a model of the Fourier transform of the object given a set of points in the uv plane Parameters ---------- p_in: array-like model parameters. Can be None if if the model has no parameters! uv: array-like Coordinates in the uv plane\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.p = initp\n self.np = len(initp)\n im = resid_in.copy()\n im /= np.sum(im)\n mean_psf = psf_in.copy()\n mean_psf /= np.sum(mean_psf)\n if im.shape[0] != im.shape[1]:\n raise ValueError('Model Image must be square')\n if im.shape != mean_psf.shape:\n raise ValueError('PSF must have the same shape as input residuals.')\n self.mod_ft = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im))) / np.fft.fftshift(np.fft.fft2(np.fft.fftshift(mean_psf)))\n uv_coord = 2 * np.pi * (np.arange(im.shape[0]) - im.shape[0] // 2) / float(im.shape[0])\n self.mod_ft_realfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.real, kx=1, ky=1)\n self.mod_ft_imagfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.imag, kx=1, ky=1)\n<|end_body_0|>\n\n<|body_start_1|>\n ret_array = self.mod_ft_realfunc(uv[0], uv[1], grid=False).astype(np.complex)\n ret_array += 1j * self.mod_ft_imagfunc(uv[0], uv[1], grid=False)\n return p_in[0] * ret_array + (1 - p_in[0])\n<|end_body_1|>\n", "revision_id": "ae25b01ba590ee581232dacdb906de6effc7870b", "skeleton": "<|skeleton|>\nclass ResidObject:\n\n def __init__(self, initp=[], resid_in=None, psf_in=None):\n \"\"\"A model for the object on sky, consisting of a point source and a map that has been convolved with the PSF map. The idea is that, iteratively, the fit residuals can be added to to the input residuals to last model residuals, and the final problem is a standard deconvolution problem with a known PSF. Parameters ---------- initp: array-like A single parameter, the relative flux of the resolved part of the image. resid_in: numpy array Residuals from the previous iteration. Same size and format as the input image, but with N down and E left when displayed with imshow. psf_in: numpy array The PSF that should be used for the residuals, weighted in the same way.\"\"\"\n <|body_0|>\n\n def model_uv(self, p_in, uv):\n \"\"\"Return a model of the Fourier transform of the object given a set of points in the uv plane Parameters ---------- p_in: array-like model parameters. Can be None if if the model has no parameters! uv: array-like Coordinates in the uv plane\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ResidObject:\n def __init__(self, initp=[], resid_in=None, psf_in=None):\n \"\"\"A model for the object on sky, consisting of a point source and a map that has been convolved with the PSF map. The idea is that, iteratively, the fit residuals can be added to to the input residuals to last model residuals, and the final problem is a standard deconvolution problem with a known PSF. Parameters ---------- initp: array-like A single parameter, the relative flux of the resolved part of the image. resid_in: numpy array Residuals from the previous iteration. Same size and format as the input image, but with N down and E left when displayed with imshow. psf_in: numpy array The PSF that should be used for the residuals, weighted in the same way.\"\"\"\n self.p = initp\n self.np = len(initp)\n im = resid_in.copy()\n im /= np.sum(im)\n mean_psf = psf_in.copy()\n mean_psf /= np.sum(mean_psf)\n if im.shape[0] != im.shape[1]:\n raise ValueError('Model Image must be square')\n if im.shape != mean_psf.shape:\n raise ValueError('PSF must have the same shape as input residuals.')\n self.mod_ft = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(im))) / np.fft.fftshift(np.fft.fft2(np.fft.fftshift(mean_psf)))\n uv_coord = 2 * np.pi * (np.arange(im.shape[0]) - im.shape[0] // 2) / float(im.shape[0])\n self.mod_ft_realfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.real, kx=1, ky=1)\n self.mod_ft_imagfunc = RectBivariateSpline(uv_coord, uv_coord, self.mod_ft.imag, kx=1, ky=1)\n\n def model_uv(self, p_in, uv):\n \"\"\"Return a model of the Fourier transform of the object given a set of points in the uv plane Parameters ---------- p_in: array-like model parameters. Can be None if if the model has no parameters! uv: array-like Coordinates in the uv plane\"\"\"\n ret_array = self.mod_ft_realfunc(uv[0], uv[1], grid=False).astype(np.complex)\n ret_array += 1j * self.mod_ft_imagfunc(uv[0], uv[1], grid=False)\n return p_in[0] * ret_array + (1 - p_in[0])\n", "source": "the_stack_v2_python_sparse", "source_path": "psf_marginalise.py", "source_repo": "mikeireland/pynrm", "split": "val", "star_events_count": 0}
{"blob_id": "a6baefee293177b5f32bb3be0ade3a0113d477b4", "bodies": ["L = self.codomain().base_ring()\nWR = self.codomain().weil_restriction()\nif L.is_finite():\n d = L.degree()\n if d == 1:\n return self\n newP = []\n for t in self:\n c = t.polynomial().coefficients(sparse=False)\n c = c + (d - len(c)) * [0]\n newP += c\nelse:\n d = L.relative_degree()\n if d == 1:\n return self\n from sage.rings.number_field.number_field_element import CoordinateFunction\n v = L.gen()\n V, from_V, to_V = L.relative_vector_space()\n h = L(1)\n B = [to_V(h)]\n f = v.minpoly()\n for i in range(f.degree() - 1):\n h *= v\n B.append(to_V(h))\n W = V.span_of_basis(B)\n p = CoordinateFunction(v, W, to_V)\n newP = []\n for t in self:\n newP += p(t)\nreturn WR(newP)", "from sage.schemes.affine.affine_space import is_AffineSpace\nif is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\nreturn self.codomain().intersection_multiplicity(X, self)", "from sage.schemes.affine.affine_space import is_AffineSpace\nif is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\nreturn self.codomain().multiplicity(self)"], "bodies_text": "<|body_start_0|>\n L = self.codomain().base_ring()\n WR = self.codomain().weil_restriction()\n if L.is_finite():\n d = L.degree()\n if d == 1:\n return self\n newP = []\n for t in self:\n c = t.polynomial().coefficients(sparse=False)\n c = c + (d - len(c)) * [0]\n newP += c\n else:\n d = L.relative_degree()\n if d == 1:\n return self\n from sage.rings.number_field.number_field_element import CoordinateFunction\n v = L.gen()\n V, from_V, to_V = L.relative_vector_space()\n h = L(1)\n B = [to_V(h)]\n f = v.minpoly()\n for i in range(f.degree() - 1):\n h *= v\n B.append(to_V(h))\n W = V.span_of_basis(B)\n p = CoordinateFunction(v, W, to_V)\n newP = []\n for t in self:\n newP += p(t)\n return WR(newP)\n<|end_body_0|>\n\n<|body_start_1|>\n from sage.schemes.affine.affine_space import is_AffineSpace\n if is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\n return self.codomain().intersection_multiplicity(X, self)\n<|end_body_1|>\n\n<|body_start_2|>\n from sage.schemes.affine.affine_space import is_AffineSpace\n if is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\n return self.codomain().multiplicity(self)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SchemeMorphism_point_affine_field", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SchemeMorphism_point_affine_field:\n\n def weil_restriction(self):\n \"\"\"Compute the Weil restriction of this point over some extension field. If the field is a finite field, then this computes the Weil restriction to the prime subfield. A Weil restriction of scalars - denoted `Res_{L/k}` - is a functor which, for any finite extension of fields `L/k` and any algebraic variety `X` over `L`, produces another corresponding variety `Res_{L/k}(X)`, defined over `k`. It is useful for reducing questions about varieties over large fields to questions about more complicated varieties over smaller fields. This functor applied to a point gives the equivalent point on the Weil restriction of its codomain. OUTPUT: Scheme point on the Weil restriction of the codomain of this p\"\"\"\n <|body_0|>\n\n def intersection_multiplicity(self, X):\n \"\"\"Return the intersection multiplicity of the codomain of this point and ``X`` at this point. This uses the intersection_multiplicity implementations for projective/affine subschemes. This point must be a point on an affine subscheme. INPUT: - ``X`` -- a subscheme in the same ambient space as that of the codomain of this point. OUTPUT: Integer. EXAMPLES:: sage: A. = AffineSpace(GF(17), 2) sage: X = A.subscheme([y^2 - x^3 + 2*x^2 - x]) sage: Y = A.subscheme([y - 2*x + 2]) sage: Q1 = Y([1,0]) sage: Q1.intersection_multiplicity(X) 2 sage: Q2 = X([4,6]) sage: Q2.intersection_multiplicity(Y) 1 :: sage: A. = AffineSpace(QQ, 4) sage: X = A.subscheme([x^2 - y*z^2, z - 2*w^2]) sage: Q = A\"\"\"\n <|body_1|>\n\n def multiplicity(self):\n \"\"\"Return the multiplicity of this point on its codomain. Uses the subscheme multiplicity implementation. This point must be a point on an affine subscheme. OUTPUT: an integer. EXAMPLES:: sage: A. = AffineSpace(QQ, 3) sage: X = A.subscheme([y^2 - x^7*z]) sage: Q1 = X([1,1,1]) sage: Q1.multiplicity() 1 sage: Q2 = X([0,0,2]) sage: Q2.multiplicity() 2\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n L = self.codomain().base_ring()\n WR = self.codomain().weil_restriction()\n if L.is_finite():\n d = L.degree()\n if d == 1:\n return self\n newP = []\n for t in self:\n c = t.polynomial().coefficients(sparse=False)\n c = c + (d - len(c)) * [0]\n newP += c\n else:\n d = L.relative_degree()\n if d == 1:\n return self\n from sage.rings.number_field.number_field_element import CoordinateFunction\n v = L.gen()\n V, from_V, to_V = L.relative_vector_space()\n h = L(1)\n B = [to_V(h)]\n f = v.minpoly()\n for i in range(f.degree() - 1):\n h *= v\n B.append(to_V(h))\n W = V.span_of_basis(B)\n p = CoordinateFunction(v, W, to_V)\n newP = []\n for t in self:\n newP += p(t)\n return WR(newP)\n<|end_body_0|>\n\n<|body_start_1|>\n from sage.schemes.affine.affine_space import is_AffineSpace\n if is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\n return self.codomain().intersection_multiplicity(X, self)\n<|end_body_1|>\n\n<|body_start_2|>\n from sage.schemes.affine.affine_space import is_AffineSpace\n if is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\n return self.codomain().multiplicity(self)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000241", "length_bytes": 15505, "license_type": "no_license", "methods": [{"docstring": "Compute the Weil restriction of this point over some extension field. If the field is a finite field, then this computes the Weil restriction to the prime subfield. A Weil restriction of scalars - denoted `Res_{L/k}` - is a functor which, for any finite extension of fields `L/k` and any algebraic variety `X` over `L`, produces another corresponding variety `Res_{L/k}(X)`, defined over `k`. It is useful for reducing questions about varieties over large fields to questions about more complicated varieties over smaller fields. This functor applied to a point gives the equivalent point on the Weil restriction of its codomain. OUTPUT: Scheme point on the Weil restriction of the codomain of this p", "name": "weil_restriction", "signature": "def weil_restriction(self)"}, {"docstring": "Return the intersection multiplicity of the codomain of this point and ``X`` at this point. This uses the intersection_multiplicity implementations for projective/affine subschemes. This point must be a point on an affine subscheme. INPUT: - ``X`` -- a subscheme in the same ambient space as that of the codomain of this point. OUTPUT: Integer. EXAMPLES:: sage: A. = AffineSpace(GF(17), 2) sage: X = A.subscheme([y^2 - x^3 + 2*x^2 - x]) sage: Y = A.subscheme([y - 2*x + 2]) sage: Q1 = Y([1,0]) sage: Q1.intersection_multiplicity(X) 2 sage: Q2 = X([4,6]) sage: Q2.intersection_multiplicity(Y) 1 :: sage: A. = AffineSpace(QQ, 4) sage: X = A.subscheme([x^2 - y*z^2, z - 2*w^2]) sage: Q = A", "name": "intersection_multiplicity", "signature": "def intersection_multiplicity(self, X)"}, {"docstring": "Return the multiplicity of this point on its codomain. Uses the subscheme multiplicity implementation. This point must be a point on an affine subscheme. OUTPUT: an integer. EXAMPLES:: sage: A. = AffineSpace(QQ, 3) sage: X = A.subscheme([y^2 - x^7*z]) sage: Q1 = X([1,1,1]) sage: Q1.multiplicity() 1 sage: Q2 = X([0,0,2]) sage: Q2.multiplicity() 2", "name": "multiplicity", "signature": "def multiplicity(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_019555", "prompt": "Implement the Python class `SchemeMorphism_point_affine_field` described below.\n\nClass description:\nImplement the SchemeMorphism_point_affine_field class.\n\nMethod signatures and docstrings:\n- def weil_restriction(self): Compute the Weil restriction of this point over some extension field. If the field is a finite field, then this computes the Weil restriction to the prime subfield. A Weil restriction of scalars - denoted `Res_{L/k}` - is a functor which, for any finite extension of fields `L/k` and any algebraic variety `X` over `L`, produces another corresponding variety `Res_{L/k}(X)`, defined over `k`. It is useful for reducing questions about varieties over large fields to questions about more complicated varieties over smaller fields. This functor applied to a point gives the equivalent point on the Weil restriction of its codomain. OUTPUT: Scheme point on the Weil restriction of the codomain of this p\n- def intersection_multiplicity(self, X): Return the intersection multiplicity of the codomain of this point and ``X`` at this point. This uses the intersection_multiplicity implementations for projective/affine subschemes. This point must be a point on an affine subscheme. INPUT: - ``X`` -- a subscheme in the same ambient space as that of the codomain of this point. OUTPUT: Integer. EXAMPLES:: sage: A. = AffineSpace(GF(17), 2) sage: X = A.subscheme([y^2 - x^3 + 2*x^2 - x]) sage: Y = A.subscheme([y - 2*x + 2]) sage: Q1 = Y([1,0]) sage: Q1.intersection_multiplicity(X) 2 sage: Q2 = X([4,6]) sage: Q2.intersection_multiplicity(Y) 1 :: sage: A. = AffineSpace(QQ, 4) sage: X = A.subscheme([x^2 - y*z^2, z - 2*w^2]) sage: Q = A\n- def multiplicity(self): Return the multiplicity of this point on its codomain. Uses the subscheme multiplicity implementation. This point must be a point on an affine subscheme. OUTPUT: an integer. EXAMPLES:: sage: A. = AffineSpace(QQ, 3) sage: X = A.subscheme([y^2 - x^7*z]) sage: Q1 = X([1,1,1]) sage: Q1.multiplicity() 1 sage: Q2 = X([0,0,2]) sage: Q2.multiplicity() 2", "prompted_full_text": "Implement the Python class `SchemeMorphism_point_affine_field` described below.\n\nClass description:\nImplement the SchemeMorphism_point_affine_field class.\n\nMethod signatures and docstrings:\n- def weil_restriction(self): Compute the Weil restriction of this point over some extension field. If the field is a finite field, then this computes the Weil restriction to the prime subfield. A Weil restriction of scalars - denoted `Res_{L/k}` - is a functor which, for any finite extension of fields `L/k` and any algebraic variety `X` over `L`, produces another corresponding variety `Res_{L/k}(X)`, defined over `k`. It is useful for reducing questions about varieties over large fields to questions about more complicated varieties over smaller fields. This functor applied to a point gives the equivalent point on the Weil restriction of its codomain. OUTPUT: Scheme point on the Weil restriction of the codomain of this p\n- def intersection_multiplicity(self, X): Return the intersection multiplicity of the codomain of this point and ``X`` at this point. This uses the intersection_multiplicity implementations for projective/affine subschemes. This point must be a point on an affine subscheme. INPUT: - ``X`` -- a subscheme in the same ambient space as that of the codomain of this point. OUTPUT: Integer. EXAMPLES:: sage: A. = AffineSpace(GF(17), 2) sage: X = A.subscheme([y^2 - x^3 + 2*x^2 - x]) sage: Y = A.subscheme([y - 2*x + 2]) sage: Q1 = Y([1,0]) sage: Q1.intersection_multiplicity(X) 2 sage: Q2 = X([4,6]) sage: Q2.intersection_multiplicity(Y) 1 :: sage: A. = AffineSpace(QQ, 4) sage: X = A.subscheme([x^2 - y*z^2, z - 2*w^2]) sage: Q = A\n- def multiplicity(self): Return the multiplicity of this point on its codomain. Uses the subscheme multiplicity implementation. This point must be a point on an affine subscheme. OUTPUT: an integer. EXAMPLES:: sage: A. = AffineSpace(QQ, 3) sage: X = A.subscheme([y^2 - x^7*z]) sage: Q1 = X([1,1,1]) sage: Q1.multiplicity() 1 sage: Q2 = X([0,0,2]) sage: Q2.multiplicity() 2\n\n<|skeleton|>\nclass SchemeMorphism_point_affine_field:\n\n def weil_restriction(self):\n \"\"\"Compute the Weil restriction of this point over some extension field. If the field is a finite field, then this computes the Weil restriction to the prime subfield. A Weil restriction of scalars - denoted `Res_{L/k}` - is a functor which, for any finite extension of fields `L/k` and any algebraic variety `X` over `L`, produces another corresponding variety `Res_{L/k}(X)`, defined over `k`. It is useful for reducing questions about varieties over large fields to questions about more complicated varieties over smaller fields. This functor applied to a point gives the equivalent point on the Weil restriction of its codomain. OUTPUT: Scheme point on the Weil restriction of the codomain of this p\"\"\"\n <|body_0|>\n\n def intersection_multiplicity(self, X):\n \"\"\"Return the intersection multiplicity of the codomain of this point and ``X`` at this point. This uses the intersection_multiplicity implementations for projective/affine subschemes. This point must be a point on an affine subscheme. INPUT: - ``X`` -- a subscheme in the same ambient space as that of the codomain of this point. OUTPUT: Integer. EXAMPLES:: sage: A. = AffineSpace(GF(17), 2) sage: X = A.subscheme([y^2 - x^3 + 2*x^2 - x]) sage: Y = A.subscheme([y - 2*x + 2]) sage: Q1 = Y([1,0]) sage: Q1.intersection_multiplicity(X) 2 sage: Q2 = X([4,6]) sage: Q2.intersection_multiplicity(Y) 1 :: sage: A. = AffineSpace(QQ, 4) sage: X = A.subscheme([x^2 - y*z^2, z - 2*w^2]) sage: Q = A\"\"\"\n <|body_1|>\n\n def multiplicity(self):\n \"\"\"Return the multiplicity of this point on its codomain. Uses the subscheme multiplicity implementation. This point must be a point on an affine subscheme. OUTPUT: an integer. EXAMPLES:: sage: A. = AffineSpace(QQ, 3) sage: X = A.subscheme([y^2 - x^7*z]) sage: Q1 = X([1,1,1]) sage: Q1.multiplicity() 1 sage: Q2 = X([0,0,2]) sage: Q2.multiplicity() 2\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n L = self.codomain().base_ring()\n WR = self.codomain().weil_restriction()\n if L.is_finite():\n d = L.degree()\n if d == 1:\n return self\n newP = []\n for t in self:\n c = t.polynomial().coefficients(sparse=False)\n c = c + (d - len(c)) * [0]\n newP += c\n else:\n d = L.relative_degree()\n if d == 1:\n return self\n from sage.rings.number_field.number_field_element import CoordinateFunction\n v = L.gen()\n V, from_V, to_V = L.relative_vector_space()\n h = L(1)\n B = [to_V(h)]\n f = v.minpoly()\n for i in range(f.degree() - 1):\n h *= v\n B.append(to_V(h))\n W = V.span_of_basis(B)\n p = CoordinateFunction(v, W, to_V)\n newP = []\n for t in self:\n newP += p(t)\n return WR(newP)\n<|end_body_0|>\n\n<|body_start_1|>\n from sage.schemes.affine.affine_space import is_AffineSpace\n if is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\n return self.codomain().intersection_multiplicity(X, self)\n<|end_body_1|>\n\n<|body_start_2|>\n from sage.schemes.affine.affine_space import is_AffineSpace\n if is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\n return self.codomain().multiplicity(self)\n<|end_body_2|>\n", "revision_id": "0d9eacbf74e2acffefde93e39f8bcbec745cdaba", "skeleton": "<|skeleton|>\nclass SchemeMorphism_point_affine_field:\n\n def weil_restriction(self):\n \"\"\"Compute the Weil restriction of this point over some extension field. If the field is a finite field, then this computes the Weil restriction to the prime subfield. A Weil restriction of scalars - denoted `Res_{L/k}` - is a functor which, for any finite extension of fields `L/k` and any algebraic variety `X` over `L`, produces another corresponding variety `Res_{L/k}(X)`, defined over `k`. It is useful for reducing questions about varieties over large fields to questions about more complicated varieties over smaller fields. This functor applied to a point gives the equivalent point on the Weil restriction of its codomain. OUTPUT: Scheme point on the Weil restriction of the codomain of this p\"\"\"\n <|body_0|>\n\n def intersection_multiplicity(self, X):\n \"\"\"Return the intersection multiplicity of the codomain of this point and ``X`` at this point. This uses the intersection_multiplicity implementations for projective/affine subschemes. This point must be a point on an affine subscheme. INPUT: - ``X`` -- a subscheme in the same ambient space as that of the codomain of this point. OUTPUT: Integer. EXAMPLES:: sage: A. = AffineSpace(GF(17), 2) sage: X = A.subscheme([y^2 - x^3 + 2*x^2 - x]) sage: Y = A.subscheme([y - 2*x + 2]) sage: Q1 = Y([1,0]) sage: Q1.intersection_multiplicity(X) 2 sage: Q2 = X([4,6]) sage: Q2.intersection_multiplicity(Y) 1 :: sage: A. = AffineSpace(QQ, 4) sage: X = A.subscheme([x^2 - y*z^2, z - 2*w^2]) sage: Q = A\"\"\"\n <|body_1|>\n\n def multiplicity(self):\n \"\"\"Return the multiplicity of this point on its codomain. Uses the subscheme multiplicity implementation. This point must be a point on an affine subscheme. OUTPUT: an integer. EXAMPLES:: sage: A. = AffineSpace(QQ, 3) sage: X = A.subscheme([y^2 - x^7*z]) sage: Q1 = X([1,1,1]) sage: Q1.multiplicity() 1 sage: Q2 = X([0,0,2]) sage: Q2.multiplicity() 2\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SchemeMorphism_point_affine_field:\n def weil_restriction(self):\n \"\"\"Compute the Weil restriction of this point over some extension field. If the field is a finite field, then this computes the Weil restriction to the prime subfield. A Weil restriction of scalars - denoted `Res_{L/k}` - is a functor which, for any finite extension of fields `L/k` and any algebraic variety `X` over `L`, produces another corresponding variety `Res_{L/k}(X)`, defined over `k`. It is useful for reducing questions about varieties over large fields to questions about more complicated varieties over smaller fields. This functor applied to a point gives the equivalent point on the Weil restriction of its codomain. OUTPUT: Scheme point on the Weil restriction of the codomain of this p\"\"\"\n L = self.codomain().base_ring()\n WR = self.codomain().weil_restriction()\n if L.is_finite():\n d = L.degree()\n if d == 1:\n return self\n newP = []\n for t in self:\n c = t.polynomial().coefficients(sparse=False)\n c = c + (d - len(c)) * [0]\n newP += c\n else:\n d = L.relative_degree()\n if d == 1:\n return self\n from sage.rings.number_field.number_field_element import CoordinateFunction\n v = L.gen()\n V, from_V, to_V = L.relative_vector_space()\n h = L(1)\n B = [to_V(h)]\n f = v.minpoly()\n for i in range(f.degree() - 1):\n h *= v\n B.append(to_V(h))\n W = V.span_of_basis(B)\n p = CoordinateFunction(v, W, to_V)\n newP = []\n for t in self:\n newP += p(t)\n return WR(newP)\n\n def intersection_multiplicity(self, X):\n \"\"\"Return the intersection multiplicity of the codomain of this point and ``X`` at this point. This uses the intersection_multiplicity implementations for projective/affine subschemes. This point must be a point on an affine subscheme. INPUT: - ``X`` -- a subscheme in the same ambient space as that of the codomain of this point. OUTPUT: Integer. EXAMPLES:: sage: A. = AffineSpace(GF(17), 2) sage: X = A.subscheme([y^2 - x^3 + 2*x^2 - x]) sage: Y = A.subscheme([y - 2*x + 2]) sage: Q1 = Y([1,0]) sage: Q1.intersection_multiplicity(X) 2 sage: Q2 = X([4,6]) sage: Q2.intersection_multiplicity(Y) 1 :: sage: A. = AffineSpace(QQ, 4) sage: X = A.subscheme([x^2 - y*z^2, z - 2*w^2]) sage: Q = A\"\"\"\n from sage.schemes.affine.affine_space import is_AffineSpace\n if is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\n return self.codomain().intersection_multiplicity(X, self)\n\n def multiplicity(self):\n \"\"\"Return the multiplicity of this point on its codomain. Uses the subscheme multiplicity implementation. This point must be a point on an affine subscheme. OUTPUT: an integer. EXAMPLES:: sage: A. = AffineSpace(QQ, 3) sage: X = A.subscheme([y^2 - x^7*z]) sage: Q1 = X([1,1,1]) sage: Q1.multiplicity() 1 sage: Q2 = X([0,0,2]) sage: Q2.multiplicity() 2\"\"\"\n from sage.schemes.affine.affine_space import is_AffineSpace\n if is_AffineSpace(self.codomain()):\n raise TypeError('this point must be a point on an affine subscheme')\n return self.codomain().multiplicity(self)\n", "source": "the_stack_v2_python_sparse", "source_path": "sage/src/sage/schemes/affine/affine_point.py", "source_repo": "bopopescu/geosci", "split": "val", "star_events_count": 0}
{"blob_id": "86b29c124a6d7d98b63778ff019def3f07a52df4", "bodies": ["super(MLPerceptron, self).__init__()\nself.fc1 = nn.Linear(in_features=input_dim, out_features=hidden_dim)\nself.fc2 = nn.Linear(in_features=hidden_dim, out_features=output_dim)", "intermediate_vector = F.relu(self.fc1(x_in))\nprediciton_vector = self.fc2(F.dropout(intermediate_vector, p=0.2))\nif apply_softmax:\n prediciton_vector = F.softmax(prediciton_vector, dim=1)\nreturn prediciton_vector"], "bodies_text": "<|body_start_0|>\n super(MLPerceptron, self).__init__()\n self.fc1 = nn.Linear(in_features=input_dim, out_features=hidden_dim)\n self.fc2 = nn.Linear(in_features=hidden_dim, out_features=output_dim)\n<|end_body_0|>\n\n<|body_start_1|>\n intermediate_vector = F.relu(self.fc1(x_in))\n prediciton_vector = self.fc2(F.dropout(intermediate_vector, p=0.2))\n if apply_softmax:\n prediciton_vector = F.softmax(prediciton_vector, dim=1)\n return prediciton_vector\n<|end_body_1|>\n", "class_docstring": "a simple perceptron based classifier", "class_name": "MLPerceptron", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MLPerceptron:\n \"\"\"a simple perceptron based classifier\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim):\n \"\"\"Args: input_dim (int): the size of the input feature vector hidden_dim (int): the size of the hidden feature vector output_dim (int): the size of the output feature vector\"\"\"\n <|body_0|>\n\n def forward(self, x_in, apply_softmax=False):\n \"\"\"The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, input_dim) apply_sigmoid (bool): a flag for the sigmoid activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch,)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MLPerceptron, self).__init__()\n self.fc1 = nn.Linear(in_features=input_dim, out_features=hidden_dim)\n self.fc2 = nn.Linear(in_features=hidden_dim, out_features=output_dim)\n<|end_body_0|>\n\n<|body_start_1|>\n intermediate_vector = F.relu(self.fc1(x_in))\n prediciton_vector = self.fc2(F.dropout(intermediate_vector, p=0.2))\n if apply_softmax:\n prediciton_vector = F.softmax(prediciton_vector, dim=1)\n return prediciton_vector\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000242", "length_bytes": 1428, "license_type": "no_license", "methods": [{"docstring": "Args: input_dim (int): the size of the input feature vector hidden_dim (int): the size of the hidden feature vector output_dim (int): the size of the output feature vector", "name": "__init__", "signature": "def __init__(self, input_dim, hidden_dim, output_dim)"}, {"docstring": "The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, input_dim) apply_sigmoid (bool): a flag for the sigmoid activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch,)", "name": "forward", "signature": "def forward(self, x_in, apply_softmax=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_019887", "prompt": "Implement the Python class `MLPerceptron` described below.\n\nClass description:\na simple perceptron based classifier\n\nMethod signatures and docstrings:\n- def __init__(self, input_dim, hidden_dim, output_dim): Args: input_dim (int): the size of the input feature vector hidden_dim (int): the size of the hidden feature vector output_dim (int): the size of the output feature vector\n- def forward(self, x_in, apply_softmax=False): The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, input_dim) apply_sigmoid (bool): a flag for the sigmoid activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch,)", "prompted_full_text": "Implement the Python class `MLPerceptron` described below.\n\nClass description:\na simple perceptron based classifier\n\nMethod signatures and docstrings:\n- def __init__(self, input_dim, hidden_dim, output_dim): Args: input_dim (int): the size of the input feature vector hidden_dim (int): the size of the hidden feature vector output_dim (int): the size of the output feature vector\n- def forward(self, x_in, apply_softmax=False): The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, input_dim) apply_sigmoid (bool): a flag for the sigmoid activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch,)\n\n<|skeleton|>\nclass MLPerceptron:\n \"\"\"a simple perceptron based classifier\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim):\n \"\"\"Args: input_dim (int): the size of the input feature vector hidden_dim (int): the size of the hidden feature vector output_dim (int): the size of the output feature vector\"\"\"\n <|body_0|>\n\n def forward(self, x_in, apply_softmax=False):\n \"\"\"The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, input_dim) apply_sigmoid (bool): a flag for the sigmoid activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch,)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MLPerceptron, self).__init__()\n self.fc1 = nn.Linear(in_features=input_dim, out_features=hidden_dim)\n self.fc2 = nn.Linear(in_features=hidden_dim, out_features=output_dim)\n<|end_body_0|>\n\n<|body_start_1|>\n intermediate_vector = F.relu(self.fc1(x_in))\n prediciton_vector = self.fc2(F.dropout(intermediate_vector, p=0.2))\n if apply_softmax:\n prediciton_vector = F.softmax(prediciton_vector, dim=1)\n return prediciton_vector\n<|end_body_1|>\n", "revision_id": "0c0c9ffa68e84b1269083d071108a97761ff509d", "skeleton": "<|skeleton|>\nclass MLPerceptron:\n \"\"\"a simple perceptron based classifier\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim):\n \"\"\"Args: input_dim (int): the size of the input feature vector hidden_dim (int): the size of the hidden feature vector output_dim (int): the size of the output feature vector\"\"\"\n <|body_0|>\n\n def forward(self, x_in, apply_softmax=False):\n \"\"\"The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, input_dim) apply_sigmoid (bool): a flag for the sigmoid activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch,)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MLPerceptron:\n \"\"\"a simple perceptron based classifier\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim):\n \"\"\"Args: input_dim (int): the size of the input feature vector hidden_dim (int): the size of the hidden feature vector output_dim (int): the size of the output feature vector\"\"\"\n super(MLPerceptron, self).__init__()\n self.fc1 = nn.Linear(in_features=input_dim, out_features=hidden_dim)\n self.fc2 = nn.Linear(in_features=hidden_dim, out_features=output_dim)\n\n def forward(self, x_in, apply_softmax=False):\n \"\"\"The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, input_dim) apply_sigmoid (bool): a flag for the sigmoid activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch,)\"\"\"\n intermediate_vector = F.relu(self.fc1(x_in))\n prediciton_vector = self.fc2(F.dropout(intermediate_vector, p=0.2))\n if apply_softmax:\n prediciton_vector = F.softmax(prediciton_vector, dim=1)\n return prediciton_vector\n", "source": "the_stack_v2_python_sparse", "source_path": "surname_classification/nn_models.py", "source_repo": "badrex/xNLP", "split": "val", "star_events_count": 0}
{"blob_id": "458090b2507a2a0c3971b26d26739aa775269574", "bodies": ["favs = get_favs(request)\nfavs = favs.filter(tour_operator__pk=self.kwargs.get('operator_pk'))\nif favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\nreturn Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})", "for fav_id in self.request.POST.getlist('favs[]'):\n favs = get_favs(self.request)\n favs = favs.filter(tour_operator__pk=fav_id)\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\nreturn Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})"], "bodies_text": "<|body_start_0|>\n favs = get_favs(request)\n favs = favs.filter(tour_operator__pk=self.kwargs.get('operator_pk'))\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\n return Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})\n<|end_body_0|>\n\n<|body_start_1|>\n for fav_id in self.request.POST.getlist('favs[]'):\n favs = get_favs(self.request)\n favs = favs.filter(tour_operator__pk=fav_id)\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\n return Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DeleteOperatorFavAPIView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeleteOperatorFavAPIView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Delete a Fav set date_deleted = now\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Delete several Favs at a time set date_deleted = now\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n favs = get_favs(request)\n favs = favs.filter(tour_operator__pk=self.kwargs.get('operator_pk'))\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\n return Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})\n<|end_body_0|>\n\n<|body_start_1|>\n for fav_id in self.request.POST.getlist('favs[]'):\n favs = get_favs(self.request)\n favs = favs.filter(tour_operator__pk=fav_id)\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\n return Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000243", "length_bytes": 15319, "license_type": "no_license", "methods": [{"docstring": "Delete a Fav set date_deleted = now", "name": "get", "signature": "def get(self, request, *args, **kwargs)"}, {"docstring": "Delete several Favs at a time set date_deleted = now", "name": "post", "signature": "def post(self, request, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007026", "prompt": "Implement the Python class `DeleteOperatorFavAPIView` described below.\n\nClass description:\nImplement the DeleteOperatorFavAPIView class.\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): Delete a Fav set date_deleted = now\n- def post(self, request, *args, **kwargs): Delete several Favs at a time set date_deleted = now", "prompted_full_text": "Implement the Python class `DeleteOperatorFavAPIView` described below.\n\nClass description:\nImplement the DeleteOperatorFavAPIView class.\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): Delete a Fav set date_deleted = now\n- def post(self, request, *args, **kwargs): Delete several Favs at a time set date_deleted = now\n\n<|skeleton|>\nclass DeleteOperatorFavAPIView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Delete a Fav set date_deleted = now\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Delete several Favs at a time set date_deleted = now\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n favs = get_favs(request)\n favs = favs.filter(tour_operator__pk=self.kwargs.get('operator_pk'))\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\n return Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})\n<|end_body_0|>\n\n<|body_start_1|>\n for fav_id in self.request.POST.getlist('favs[]'):\n favs = get_favs(self.request)\n favs = favs.filter(tour_operator__pk=fav_id)\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\n return Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})\n<|end_body_1|>\n", "revision_id": "8a15fc387d20b12d16c171c2d8928a9b9d4ba5e1", "skeleton": "<|skeleton|>\nclass DeleteOperatorFavAPIView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Delete a Fav set date_deleted = now\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Delete several Favs at a time set date_deleted = now\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DeleteOperatorFavAPIView:\n def get(self, request, *args, **kwargs):\n \"\"\"Delete a Fav set date_deleted = now\"\"\"\n favs = get_favs(request)\n favs = favs.filter(tour_operator__pk=self.kwargs.get('operator_pk'))\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\n return Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})\n\n def post(self, request, *args, **kwargs):\n \"\"\"Delete several Favs at a time set date_deleted = now\"\"\"\n for fav_id in self.request.POST.getlist('favs[]'):\n favs = get_favs(self.request)\n favs = favs.filter(tour_operator__pk=fav_id)\n if favs.exists():\n for fav in favs.all():\n fav.date_deleted = datetime.today()\n fav.save()\n return Response({'status': 'ok', 'count': get_favs_count(request), 'count_to': get_to_favs_count(request)})\n", "source": "the_stack_v2_python_sparse", "source_path": "users/views.py", "source_repo": "montenegrop/djangotravelportal", "split": "val", "star_events_count": 0}
{"blob_id": "536ba8a99d456debef57b6f369044f506762cfbf", "bodies": ["context = {'updating': False}\ndata = self._parse_data(self._get_schema_instance(kwargs, context=context), flask.request)\ndata.pop('id', None)\ncreated = self._perform_create(data, **kwargs)\ncreated.creator = g.user\ndb.session.commit()\nreturn (self._dump(created, kwargs), 201)", "obj = self.model_class(**data)\ntry:\n db.session.add(obj)\n db.session.commit()\nexcept sqlalchemy.exc.IntegrityError as ex:\n if not is_unique_constraint_violation(ex):\n raise\n db.session.rollback()\n conflict_obj = get_conflict_object(db.session, obj, data)\n if conflict_obj:\n flask.abort(409, ValidationError({'message': 'Existing value', 'object': self._get_schema_class()().dump(conflict_obj).data}))\n else:\n raise\nreturn obj"], "bodies_text": "<|body_start_0|>\n context = {'updating': False}\n data = self._parse_data(self._get_schema_instance(kwargs, context=context), flask.request)\n data.pop('id', None)\n created = self._perform_create(data, **kwargs)\n created.creator = g.user\n db.session.commit()\n return (self._dump(created, kwargs), 201)\n<|end_body_0|>\n\n<|body_start_1|>\n obj = self.model_class(**data)\n try:\n db.session.add(obj)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError as ex:\n if not is_unique_constraint_violation(ex):\n raise\n db.session.rollback()\n conflict_obj = get_conflict_object(db.session, obj, data)\n if conflict_obj:\n flask.abort(409, ValidationError({'message': 'Existing value', 'object': self._get_schema_class()().dump(conflict_obj).data}))\n else:\n raise\n return obj\n<|end_body_1|>\n", "class_docstring": "Add POST / route", "class_name": "CreateMixin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CreateMixin:\n \"\"\"Add POST / route\"\"\"\n\n def post(self, **kwargs):\n \"\"\"--- tags: [\"{tag_name}\"] summary: Creates {class_model} requestBody: required: true content: application/json: schema: {schema_class} responses: 201: description: Created content: application/json: schema: {schema_class} 409: description: Duplicated key found content: application/json: schema: {schema_class}\"\"\"\n <|body_0|>\n\n def _perform_create(self, data, **kwargs):\n \"\"\"Check for conflicts and create a new object Is is passed the data parsed by the marshmallow schema (it transform from raw post data to a JSON)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = {'updating': False}\n data = self._parse_data(self._get_schema_instance(kwargs, context=context), flask.request)\n data.pop('id', None)\n created = self._perform_create(data, **kwargs)\n created.creator = g.user\n db.session.commit()\n return (self._dump(created, kwargs), 201)\n<|end_body_0|>\n\n<|body_start_1|>\n obj = self.model_class(**data)\n try:\n db.session.add(obj)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError as ex:\n if not is_unique_constraint_violation(ex):\n raise\n db.session.rollback()\n conflict_obj = get_conflict_object(db.session, obj, data)\n if conflict_obj:\n flask.abort(409, ValidationError({'message': 'Existing value', 'object': self._get_schema_class()().dump(conflict_obj).data}))\n else:\n raise\n return obj\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000244", "length_bytes": 45466, "license_type": "no_license", "methods": [{"docstring": "--- tags: [\"{tag_name}\"] summary: Creates {class_model} requestBody: required: true content: application/json: schema: {schema_class} responses: 201: description: Created content: application/json: schema: {schema_class} 409: description: Duplicated key found content: application/json: schema: {schema_class}", "name": "post", "signature": "def post(self, **kwargs)"}, {"docstring": "Check for conflicts and create a new object Is is passed the data parsed by the marshmallow schema (it transform from raw post data to a JSON)", "name": "_perform_create", "signature": "def _perform_create(self, data, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009710", "prompt": "Implement the Python class `CreateMixin` described below.\n\nClass description:\nAdd POST / route\n\nMethod signatures and docstrings:\n- def post(self, **kwargs): --- tags: [\"{tag_name}\"] summary: Creates {class_model} requestBody: required: true content: application/json: schema: {schema_class} responses: 201: description: Created content: application/json: schema: {schema_class} 409: description: Duplicated key found content: application/json: schema: {schema_class}\n- def _perform_create(self, data, **kwargs): Check for conflicts and create a new object Is is passed the data parsed by the marshmallow schema (it transform from raw post data to a JSON)", "prompted_full_text": "Implement the Python class `CreateMixin` described below.\n\nClass description:\nAdd POST / route\n\nMethod signatures and docstrings:\n- def post(self, **kwargs): --- tags: [\"{tag_name}\"] summary: Creates {class_model} requestBody: required: true content: application/json: schema: {schema_class} responses: 201: description: Created content: application/json: schema: {schema_class} 409: description: Duplicated key found content: application/json: schema: {schema_class}\n- def _perform_create(self, data, **kwargs): Check for conflicts and create a new object Is is passed the data parsed by the marshmallow schema (it transform from raw post data to a JSON)\n\n<|skeleton|>\nclass CreateMixin:\n \"\"\"Add POST / route\"\"\"\n\n def post(self, **kwargs):\n \"\"\"--- tags: [\"{tag_name}\"] summary: Creates {class_model} requestBody: required: true content: application/json: schema: {schema_class} responses: 201: description: Created content: application/json: schema: {schema_class} 409: description: Duplicated key found content: application/json: schema: {schema_class}\"\"\"\n <|body_0|>\n\n def _perform_create(self, data, **kwargs):\n \"\"\"Check for conflicts and create a new object Is is passed the data parsed by the marshmallow schema (it transform from raw post data to a JSON)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = {'updating': False}\n data = self._parse_data(self._get_schema_instance(kwargs, context=context), flask.request)\n data.pop('id', None)\n created = self._perform_create(data, **kwargs)\n created.creator = g.user\n db.session.commit()\n return (self._dump(created, kwargs), 201)\n<|end_body_0|>\n\n<|body_start_1|>\n obj = self.model_class(**data)\n try:\n db.session.add(obj)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError as ex:\n if not is_unique_constraint_violation(ex):\n raise\n db.session.rollback()\n conflict_obj = get_conflict_object(db.session, obj, data)\n if conflict_obj:\n flask.abort(409, ValidationError({'message': 'Existing value', 'object': self._get_schema_class()().dump(conflict_obj).data}))\n else:\n raise\n return obj\n<|end_body_1|>\n", "revision_id": "1bde0faae4a20c36b0a568e95e00fb517c646f81", "skeleton": "<|skeleton|>\nclass CreateMixin:\n \"\"\"Add POST / route\"\"\"\n\n def post(self, **kwargs):\n \"\"\"--- tags: [\"{tag_name}\"] summary: Creates {class_model} requestBody: required: true content: application/json: schema: {schema_class} responses: 201: description: Created content: application/json: schema: {schema_class} 409: description: Duplicated key found content: application/json: schema: {schema_class}\"\"\"\n <|body_0|>\n\n def _perform_create(self, data, **kwargs):\n \"\"\"Check for conflicts and create a new object Is is passed the data parsed by the marshmallow schema (it transform from raw post data to a JSON)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CreateMixin:\n \"\"\"Add POST / route\"\"\"\n\n def post(self, **kwargs):\n \"\"\"--- tags: [\"{tag_name}\"] summary: Creates {class_model} requestBody: required: true content: application/json: schema: {schema_class} responses: 201: description: Created content: application/json: schema: {schema_class} 409: description: Duplicated key found content: application/json: schema: {schema_class}\"\"\"\n context = {'updating': False}\n data = self._parse_data(self._get_schema_instance(kwargs, context=context), flask.request)\n data.pop('id', None)\n created = self._perform_create(data, **kwargs)\n created.creator = g.user\n db.session.commit()\n return (self._dump(created, kwargs), 201)\n\n def _perform_create(self, data, **kwargs):\n \"\"\"Check for conflicts and create a new object Is is passed the data parsed by the marshmallow schema (it transform from raw post data to a JSON)\"\"\"\n obj = self.model_class(**data)\n try:\n db.session.add(obj)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError as ex:\n if not is_unique_constraint_violation(ex):\n raise\n db.session.rollback()\n conflict_obj = get_conflict_object(db.session, obj, data)\n if conflict_obj:\n flask.abort(409, ValidationError({'message': 'Existing value', 'object': self._get_schema_class()().dump(conflict_obj).data}))\n else:\n raise\n return obj\n", "source": "the_stack_v2_python_sparse", "source_path": "faraday/server/api/base.py", "source_repo": "zxc135781/faraday", "split": "val", "star_events_count": 1}
{"blob_id": "87a368408756c0dfec2f5f4fd0813045dbd19d0b", "bodies": ["self.num_units = num_units\nself.layer_norm = layer_norm\nself.recurrent_dropout = recurrent_dropout\nself.leak_factor = leak_factor", "with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakchLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakchLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs"], "bodies_text": "<|body_start_0|>\n self.num_units = num_units\n self.layer_norm = layer_norm\n self.recurrent_dropout = recurrent_dropout\n self.leak_factor = leak_factor\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakchLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakchLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs\n<|end_body_1|>\n", "class_docstring": "a leaky ch BLSTM layer", "class_name": "LeakychBLSTMLayer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LeakychBLSTMLayer:\n \"\"\"a leaky ch BLSTM layer\"\"\"\n\n def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):\n \"\"\"LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\"\"\"\n <|body_0|>\n\n def __call__(self, inputs, sequence_length, scope=None):\n \"\"\"Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_units = num_units\n self.layer_norm = layer_norm\n self.recurrent_dropout = recurrent_dropout\n self.leak_factor = leak_factor\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakchLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakchLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000245", "length_bytes": 49091, "license_type": "permissive", "methods": [{"docstring": "LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)", "name": "__init__", "signature": "def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0)"}, {"docstring": "Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer", "name": "__call__", "signature": "def __call__(self, inputs, sequence_length, scope=None)"}], "n_methods": 2, "prompt": "Implement the Python class `LeakychBLSTMLayer` described below.\n\nClass description:\na leaky ch BLSTM layer\n\nMethod signatures and docstrings:\n- def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0): LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\n- def __call__(self, inputs, sequence_length, scope=None): Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer", "prompted_full_text": "Implement the Python class `LeakychBLSTMLayer` described below.\n\nClass description:\na leaky ch BLSTM layer\n\nMethod signatures and docstrings:\n- def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0): LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\n- def __call__(self, inputs, sequence_length, scope=None): Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\n\n<|skeleton|>\nclass LeakychBLSTMLayer:\n \"\"\"a leaky ch BLSTM layer\"\"\"\n\n def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):\n \"\"\"LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\"\"\"\n <|body_0|>\n\n def __call__(self, inputs, sequence_length, scope=None):\n \"\"\"Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_units = num_units\n self.layer_norm = layer_norm\n self.recurrent_dropout = recurrent_dropout\n self.leak_factor = leak_factor\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakchLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakchLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs\n<|end_body_1|>\n", "revision_id": "5e862cbf846d45b8a317f87588533f3fde9f0726", "skeleton": "<|skeleton|>\nclass LeakychBLSTMLayer:\n \"\"\"a leaky ch BLSTM layer\"\"\"\n\n def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):\n \"\"\"LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\"\"\"\n <|body_0|>\n\n def __call__(self, inputs, sequence_length, scope=None):\n \"\"\"Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LeakychBLSTMLayer:\n \"\"\"a leaky ch BLSTM layer\"\"\"\n\n def __init__(self, num_units, layer_norm=False, recurrent_dropout=1.0, leak_factor=1.0):\n \"\"\"LeakyBLSTMLayer constructor Args: num_units: The number of units in the one directon layer_norm: whether layer normalization should be applied recurrent_dropout: the recurrent dropout keep probability leak_factor: the leak factor (if 1, there is no leakage)\"\"\"\n self.num_units = num_units\n self.layer_norm = layer_norm\n self.recurrent_dropout = recurrent_dropout\n self.leak_factor = leak_factor\n\n def __call__(self, inputs, sequence_length, scope=None):\n \"\"\"Create the variables and do the forward computation Args: inputs: the input to the layer as a [batch_size, max_length, dim] tensor sequence_length: the length of the input sequences as a [batch_size] tensor scope: The variable scope sets the namespace under which the variables created during this call will be stored. Returns: the output of the layer\"\"\"\n with tf.variable_scope(scope or type(self).__name__):\n lstm_cell_fw = rnn_cell.LayerNormBasicLeakchLSTMCell(num_units=self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n lstm_cell_bw = rnn_cell.LayerNormBasicLeakchLSTMCell(self.num_units, leak_factor=self.leak_factor, layer_norm=self.layer_norm, dropout_keep_prob=self.recurrent_dropout, reuse=tf.get_variable_scope().reuse)\n outputs_tupple, _ = bidirectional_dynamic_rnn(lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32, sequence_length=sequence_length)\n outputs = tf.concat(outputs_tupple, 2)\n return outputs\n", "source": "the_stack_v2_python_sparse", "source_path": "nabu/neuralnetworks/components/layer.py", "source_repo": "JeroenZegers/Nabu-MSSS", "split": "val", "star_events_count": 19}
{"blob_id": "b607cab0befa0d2d17fe31950f60a2bfa01ba1e9", "bodies": ["self.sensor = Sensor('http://127.0.0.1', '8000')\nself.pump = Pump('http://127.0.0.1', '8000')\nself.decider = Decider(100, 0.05)\nself.controller = Controller(self.sensor, self.pump, self.decider)\nself.actions = {'PUMP_IN': self.pump.PUMP_IN, 'PUMP_OUT': self.pump.PUMP_OUT, 'PUMP_OFF': self.pump.PUMP_OFF}", "self.sensor.measure = MagicMock()\nself.pump.get_state = MagicMock()\nself.pump.set_state = MagicMock(return_value=True)\nself.assertTrue(self.controller.tick())", "self.sensor.measure = MagicMock()\nself.pump.get_state = MagicMock()\nself.pump.set_state = MagicMock(return_value=False)\nself.assertFalse(self.controller.tick())"], "bodies_text": "<|body_start_0|>\n self.sensor = Sensor('http://127.0.0.1', '8000')\n self.pump = Pump('http://127.0.0.1', '8000')\n self.decider = Decider(100, 0.05)\n self.controller = Controller(self.sensor, self.pump, self.decider)\n self.actions = {'PUMP_IN': self.pump.PUMP_IN, 'PUMP_OUT': self.pump.PUMP_OUT, 'PUMP_OFF': self.pump.PUMP_OFF}\n<|end_body_0|>\n\n<|body_start_1|>\n self.sensor.measure = MagicMock()\n self.pump.get_state = MagicMock()\n self.pump.set_state = MagicMock(return_value=True)\n self.assertTrue(self.controller.tick())\n<|end_body_1|>\n\n<|body_start_2|>\n self.sensor.measure = MagicMock()\n self.pump.get_state = MagicMock()\n self.pump.set_state = MagicMock(return_value=False)\n self.assertFalse(self.controller.tick())\n<|end_body_2|>\n", "class_docstring": "Module tests for the water-regulation module", "class_name": "ModuleTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModuleTests:\n \"\"\"Module tests for the water-regulation module\"\"\"\n\n def setUp(self):\n \"\"\"Run each time before any test method\"\"\"\n <|body_0|>\n\n def test_module_true(self):\n \"\"\"True state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\"\"\"\n <|body_1|>\n\n def test_module_false(self):\n \"\"\"False state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.sensor = Sensor('http://127.0.0.1', '8000')\n self.pump = Pump('http://127.0.0.1', '8000')\n self.decider = Decider(100, 0.05)\n self.controller = Controller(self.sensor, self.pump, self.decider)\n self.actions = {'PUMP_IN': self.pump.PUMP_IN, 'PUMP_OUT': self.pump.PUMP_OUT, 'PUMP_OFF': self.pump.PUMP_OFF}\n<|end_body_0|>\n\n<|body_start_1|>\n self.sensor.measure = MagicMock()\n self.pump.get_state = MagicMock()\n self.pump.set_state = MagicMock(return_value=True)\n self.assertTrue(self.controller.tick())\n<|end_body_1|>\n\n<|body_start_2|>\n self.sensor.measure = MagicMock()\n self.pump.get_state = MagicMock()\n self.pump.set_state = MagicMock(return_value=False)\n self.assertFalse(self.controller.tick())\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000246", "length_bytes": 1573, "license_type": "no_license", "methods": [{"docstring": "Run each time before any test method", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "True state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.", "name": "test_module_true", "signature": "def test_module_true(self)"}, {"docstring": "False state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.", "name": "test_module_false", "signature": "def test_module_false(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_052521", "prompt": "Implement the Python class `ModuleTests` described below.\n\nClass description:\nModule tests for the water-regulation module\n\nMethod signatures and docstrings:\n- def setUp(self): Run each time before any test method\n- def test_module_true(self): True state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\n- def test_module_false(self): False state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.", "prompted_full_text": "Implement the Python class `ModuleTests` described below.\n\nClass description:\nModule tests for the water-regulation module\n\nMethod signatures and docstrings:\n- def setUp(self): Run each time before any test method\n- def test_module_true(self): True state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\n- def test_module_false(self): False state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\n\n<|skeleton|>\nclass ModuleTests:\n \"\"\"Module tests for the water-regulation module\"\"\"\n\n def setUp(self):\n \"\"\"Run each time before any test method\"\"\"\n <|body_0|>\n\n def test_module_true(self):\n \"\"\"True state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\"\"\"\n <|body_1|>\n\n def test_module_false(self):\n \"\"\"False state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.sensor = Sensor('http://127.0.0.1', '8000')\n self.pump = Pump('http://127.0.0.1', '8000')\n self.decider = Decider(100, 0.05)\n self.controller = Controller(self.sensor, self.pump, self.decider)\n self.actions = {'PUMP_IN': self.pump.PUMP_IN, 'PUMP_OUT': self.pump.PUMP_OUT, 'PUMP_OFF': self.pump.PUMP_OFF}\n<|end_body_0|>\n\n<|body_start_1|>\n self.sensor.measure = MagicMock()\n self.pump.get_state = MagicMock()\n self.pump.set_state = MagicMock(return_value=True)\n self.assertTrue(self.controller.tick())\n<|end_body_1|>\n\n<|body_start_2|>\n self.sensor.measure = MagicMock()\n self.pump.get_state = MagicMock()\n self.pump.set_state = MagicMock(return_value=False)\n self.assertFalse(self.controller.tick())\n<|end_body_2|>\n", "revision_id": "263685ca90110609bfd05d621516727f8cd0028f", "skeleton": "<|skeleton|>\nclass ModuleTests:\n \"\"\"Module tests for the water-regulation module\"\"\"\n\n def setUp(self):\n \"\"\"Run each time before any test method\"\"\"\n <|body_0|>\n\n def test_module_true(self):\n \"\"\"True state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\"\"\"\n <|body_1|>\n\n def test_module_false(self):\n \"\"\"False state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ModuleTests:\n \"\"\"Module tests for the water-regulation module\"\"\"\n\n def setUp(self):\n \"\"\"Run each time before any test method\"\"\"\n self.sensor = Sensor('http://127.0.0.1', '8000')\n self.pump = Pump('http://127.0.0.1', '8000')\n self.decider = Decider(100, 0.05)\n self.controller = Controller(self.sensor, self.pump, self.decider)\n self.actions = {'PUMP_IN': self.pump.PUMP_IN, 'PUMP_OUT': self.pump.PUMP_OUT, 'PUMP_OFF': self.pump.PUMP_OFF}\n\n def test_module_true(self):\n \"\"\"True state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\"\"\"\n self.sensor.measure = MagicMock()\n self.pump.get_state = MagicMock()\n self.pump.set_state = MagicMock(return_value=True)\n self.assertTrue(self.controller.tick())\n\n def test_module_false(self):\n \"\"\"False state: Write an integration test that combines controller and decider, using a MOCKED sensor and pump.\"\"\"\n self.sensor.measure = MagicMock()\n self.pump.get_state = MagicMock()\n self.pump.set_state = MagicMock(return_value=False)\n self.assertFalse(self.controller.tick())\n", "source": "the_stack_v2_python_sparse", "source_path": "students/ScottL/lesson06/water-regulation-master/waterregulation/integrationtest.py", "source_repo": "aurel1212/Sp2018-Online", "split": "val", "star_events_count": 0}
{"blob_id": "18b1993b3bdf4a5ec065c9be107b3b7436bf6b33", "bodies": ["super().__init__()\nself._in_channel = in_channel\nself._out_channel = out_channel\nself._spatial_dims = spatial_dims\nif self._spatial_dims not in (2, 3):\n raise ValueError('spatial_dims must be 2 or 3.')\nconv_type = Conv[Conv.CONV, self._spatial_dims]\nself.act = get_act_layer(name=act_name)\nself.conv_1 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\nself.conv_2 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel - self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\nself.norm = get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel)", "x = self.act(x)\nif self._spatial_dims == 3:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:, 1:])], dim=1)\nelse:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\nout = self.norm(out)\nreturn out"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self._in_channel = in_channel\n self._out_channel = out_channel\n self._spatial_dims = spatial_dims\n if self._spatial_dims not in (2, 3):\n raise ValueError('spatial_dims must be 2 or 3.')\n conv_type = Conv[Conv.CONV, self._spatial_dims]\n self.act = get_act_layer(name=act_name)\n self.conv_1 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\n self.conv_2 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel - self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\n self.norm = get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.act(x)\n if self._spatial_dims == 3:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:, 1:])], dim=1)\n else:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.norm(out)\n return out\n<|end_body_1|>\n", "class_docstring": "Down-sampling the feature by 2 using stride. The length along each spatial dimension must be a multiple of 2.", "class_name": "FactorizedReduceBlock", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FactorizedReduceBlock:\n \"\"\"Down-sampling the feature by 2 using stride. The length along each spatial dimension must be a multiple of 2.\"\"\"\n\n def __init__(self, in_channel: int, out_channel: int, spatial_dims: int=3, act_name: tuple | str='RELU', norm_name: tuple | str=('INSTANCE', {'affine': True})):\n \"\"\"Args: in_channel: number of input channels out_channel: number of output channels. spatial_dims: number of spatial dimensions. act_name: activation layer type and arguments. norm_name: feature normalization type and arguments.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"The length along each spatial dimension must be a multiple of 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self._in_channel = in_channel\n self._out_channel = out_channel\n self._spatial_dims = spatial_dims\n if self._spatial_dims not in (2, 3):\n raise ValueError('spatial_dims must be 2 or 3.')\n conv_type = Conv[Conv.CONV, self._spatial_dims]\n self.act = get_act_layer(name=act_name)\n self.conv_1 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\n self.conv_2 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel - self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\n self.norm = get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.act(x)\n if self._spatial_dims == 3:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:, 1:])], dim=1)\n else:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.norm(out)\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000247", "length_bytes": 9255, "license_type": "permissive", "methods": [{"docstring": "Args: in_channel: number of input channels out_channel: number of output channels. spatial_dims: number of spatial dimensions. act_name: activation layer type and arguments. norm_name: feature normalization type and arguments.", "name": "__init__", "signature": "def __init__(self, in_channel: int, out_channel: int, spatial_dims: int=3, act_name: tuple | str='RELU', norm_name: tuple | str=('INSTANCE', {'affine': True}))"}, {"docstring": "The length along each spatial dimension must be a multiple of 2.", "name": "forward", "signature": "def forward(self, x: torch.Tensor) -> torch.Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_008564", "prompt": "Implement the Python class `FactorizedReduceBlock` described below.\n\nClass description:\nDown-sampling the feature by 2 using stride. The length along each spatial dimension must be a multiple of 2.\n\nMethod signatures and docstrings:\n- def __init__(self, in_channel: int, out_channel: int, spatial_dims: int=3, act_name: tuple | str='RELU', norm_name: tuple | str=('INSTANCE', {'affine': True})): Args: in_channel: number of input channels out_channel: number of output channels. spatial_dims: number of spatial dimensions. act_name: activation layer type and arguments. norm_name: feature normalization type and arguments.\n- def forward(self, x: torch.Tensor) -> torch.Tensor: The length along each spatial dimension must be a multiple of 2.", "prompted_full_text": "Implement the Python class `FactorizedReduceBlock` described below.\n\nClass description:\nDown-sampling the feature by 2 using stride. The length along each spatial dimension must be a multiple of 2.\n\nMethod signatures and docstrings:\n- def __init__(self, in_channel: int, out_channel: int, spatial_dims: int=3, act_name: tuple | str='RELU', norm_name: tuple | str=('INSTANCE', {'affine': True})): Args: in_channel: number of input channels out_channel: number of output channels. spatial_dims: number of spatial dimensions. act_name: activation layer type and arguments. norm_name: feature normalization type and arguments.\n- def forward(self, x: torch.Tensor) -> torch.Tensor: The length along each spatial dimension must be a multiple of 2.\n\n<|skeleton|>\nclass FactorizedReduceBlock:\n \"\"\"Down-sampling the feature by 2 using stride. The length along each spatial dimension must be a multiple of 2.\"\"\"\n\n def __init__(self, in_channel: int, out_channel: int, spatial_dims: int=3, act_name: tuple | str='RELU', norm_name: tuple | str=('INSTANCE', {'affine': True})):\n \"\"\"Args: in_channel: number of input channels out_channel: number of output channels. spatial_dims: number of spatial dimensions. act_name: activation layer type and arguments. norm_name: feature normalization type and arguments.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"The length along each spatial dimension must be a multiple of 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self._in_channel = in_channel\n self._out_channel = out_channel\n self._spatial_dims = spatial_dims\n if self._spatial_dims not in (2, 3):\n raise ValueError('spatial_dims must be 2 or 3.')\n conv_type = Conv[Conv.CONV, self._spatial_dims]\n self.act = get_act_layer(name=act_name)\n self.conv_1 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\n self.conv_2 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel - self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\n self.norm = get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.act(x)\n if self._spatial_dims == 3:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:, 1:])], dim=1)\n else:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.norm(out)\n return out\n<|end_body_1|>\n", "revision_id": "e48c3e2c741fa3fc705c4425d17ac4a5afac6c47", "skeleton": "<|skeleton|>\nclass FactorizedReduceBlock:\n \"\"\"Down-sampling the feature by 2 using stride. The length along each spatial dimension must be a multiple of 2.\"\"\"\n\n def __init__(self, in_channel: int, out_channel: int, spatial_dims: int=3, act_name: tuple | str='RELU', norm_name: tuple | str=('INSTANCE', {'affine': True})):\n \"\"\"Args: in_channel: number of input channels out_channel: number of output channels. spatial_dims: number of spatial dimensions. act_name: activation layer type and arguments. norm_name: feature normalization type and arguments.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"The length along each spatial dimension must be a multiple of 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FactorizedReduceBlock:\n \"\"\"Down-sampling the feature by 2 using stride. The length along each spatial dimension must be a multiple of 2.\"\"\"\n\n def __init__(self, in_channel: int, out_channel: int, spatial_dims: int=3, act_name: tuple | str='RELU', norm_name: tuple | str=('INSTANCE', {'affine': True})):\n \"\"\"Args: in_channel: number of input channels out_channel: number of output channels. spatial_dims: number of spatial dimensions. act_name: activation layer type and arguments. norm_name: feature normalization type and arguments.\"\"\"\n super().__init__()\n self._in_channel = in_channel\n self._out_channel = out_channel\n self._spatial_dims = spatial_dims\n if self._spatial_dims not in (2, 3):\n raise ValueError('spatial_dims must be 2 or 3.')\n conv_type = Conv[Conv.CONV, self._spatial_dims]\n self.act = get_act_layer(name=act_name)\n self.conv_1 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\n self.conv_2 = conv_type(in_channels=self._in_channel, out_channels=self._out_channel - self._out_channel // 2, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)\n self.norm = get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"The length along each spatial dimension must be a multiple of 2.\"\"\"\n x = self.act(x)\n if self._spatial_dims == 3:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:, 1:])], dim=1)\n else:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.norm(out)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "monai/networks/blocks/dints_block.py", "source_repo": "Project-MONAI/MONAI", "split": "val", "star_events_count": 4805}
{"blob_id": "7a94ad9d127a18976a1a955a87de10af5a8a8653", "bodies": ["item = response.meta['item']\ning_list = []\ning_li = response.xpath('//*[@id=\"__layout\"]//ul[@class=\"recipe-ingredients__list\"]/li')\nfor li in ing_li:\n ing = li.xpath('.//a/text()').extract_first()\n if ing is not None:\n ing_list.append(ing.strip())\nitem['ingredients'] = ', '.join(ing_list)\nstep_list = []\nstep_li = response.xpath('//*[@id=\"__layout\"]//li[@class=\"recipe-directions__step\"]')\nfor li in step_li:\n step = li.xpath('.//text()').extract_first()\n if step is not None:\n step_list.append(step.strip())\nitem['steps'] = '\\n'.join(step_list)\nyield item", "recipes = json.loads(response.text)['response']['results']\nfor recipe in recipes:\n if recipe['record_type'] == 'Recipe':\n item = RecipespidersItem()\n self.recipe_count += 1\n item['id'] = self.recipe_count\n item['name'] = recipe['main_title']\n item['description'] = recipe['main_description']\n item['rating_num'] = int(recipe['main_num_ratings'])\n item['rating_star'] = int(recipe['main_rating_mapping'])\n item['rating_score'] = float(recipe['main_rating'])\n item['total_time'] = int(recipe['recipe_totaltime'])\n if recipe.get('recipe_photo_url') is None:\n continue\n else:\n item['photo_url'] = recipe['recipe_photo_url']\n item['record_url'] = recipe['record_url']\n yield scrapy.Request(url=recipe['record_url'], callback=self.parse_detail, meta={'item': item})\nif self.page_num <= 21000:\n print(self.page_num)\n new_url = format(self.base_url % self.page_num)\n self.page_num += 1\n yield scrapy.Request(url=new_url, callback=self.parse)"], "bodies_text": "<|body_start_0|>\n item = response.meta['item']\n ing_list = []\n ing_li = response.xpath('//*[@id=\"__layout\"]//ul[@class=\"recipe-ingredients__list\"]/li')\n for li in ing_li:\n ing = li.xpath('.//a/text()').extract_first()\n if ing is not None:\n ing_list.append(ing.strip())\n item['ingredients'] = ', '.join(ing_list)\n step_list = []\n step_li = response.xpath('//*[@id=\"__layout\"]//li[@class=\"recipe-directions__step\"]')\n for li in step_li:\n step = li.xpath('.//text()').extract_first()\n if step is not None:\n step_list.append(step.strip())\n item['steps'] = '\\n'.join(step_list)\n yield item\n<|end_body_0|>\n\n<|body_start_1|>\n recipes = json.loads(response.text)['response']['results']\n for recipe in recipes:\n if recipe['record_type'] == 'Recipe':\n item = RecipespidersItem()\n self.recipe_count += 1\n item['id'] = self.recipe_count\n item['name'] = recipe['main_title']\n item['description'] = recipe['main_description']\n item['rating_num'] = int(recipe['main_num_ratings'])\n item['rating_star'] = int(recipe['main_rating_mapping'])\n item['rating_score'] = float(recipe['main_rating'])\n item['total_time'] = int(recipe['recipe_totaltime'])\n if recipe.get('recipe_photo_url') is None:\n continue\n else:\n item['photo_url'] = recipe['recipe_photo_url']\n item['record_url'] = recipe['record_url']\n yield scrapy.Request(url=recipe['record_url'], callback=self.parse_detail, meta={'item': item})\n if self.page_num <= 21000:\n print(self.page_num)\n new_url = format(self.base_url % self.page_num)\n self.page_num += 1\n yield scrapy.Request(url=new_url, callback=self.parse)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RecipeSpider", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecipeSpider:\n\n def parse_detail(self, response):\n \"\"\"parse the detail page :param response: :return:\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"parse the search page :param response: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n item = response.meta['item']\n ing_list = []\n ing_li = response.xpath('//*[@id=\"__layout\"]//ul[@class=\"recipe-ingredients__list\"]/li')\n for li in ing_li:\n ing = li.xpath('.//a/text()').extract_first()\n if ing is not None:\n ing_list.append(ing.strip())\n item['ingredients'] = ', '.join(ing_list)\n step_list = []\n step_li = response.xpath('//*[@id=\"__layout\"]//li[@class=\"recipe-directions__step\"]')\n for li in step_li:\n step = li.xpath('.//text()').extract_first()\n if step is not None:\n step_list.append(step.strip())\n item['steps'] = '\\n'.join(step_list)\n yield item\n<|end_body_0|>\n\n<|body_start_1|>\n recipes = json.loads(response.text)['response']['results']\n for recipe in recipes:\n if recipe['record_type'] == 'Recipe':\n item = RecipespidersItem()\n self.recipe_count += 1\n item['id'] = self.recipe_count\n item['name'] = recipe['main_title']\n item['description'] = recipe['main_description']\n item['rating_num'] = int(recipe['main_num_ratings'])\n item['rating_star'] = int(recipe['main_rating_mapping'])\n item['rating_score'] = float(recipe['main_rating'])\n item['total_time'] = int(recipe['recipe_totaltime'])\n if recipe.get('recipe_photo_url') is None:\n continue\n else:\n item['photo_url'] = recipe['recipe_photo_url']\n item['record_url'] = recipe['record_url']\n yield scrapy.Request(url=recipe['record_url'], callback=self.parse_detail, meta={'item': item})\n if self.page_num <= 21000:\n print(self.page_num)\n new_url = format(self.base_url % self.page_num)\n self.page_num += 1\n yield scrapy.Request(url=new_url, callback=self.parse)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000248", "length_bytes": 2885, "license_type": "no_license", "methods": [{"docstring": "parse the detail page :param response: :return:", "name": "parse_detail", "signature": "def parse_detail(self, response)"}, {"docstring": "parse the search page :param response: :return:", "name": "parse", "signature": "def parse(self, response)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_014953", "prompt": "Implement the Python class `RecipeSpider` described below.\n\nClass description:\nImplement the RecipeSpider class.\n\nMethod signatures and docstrings:\n- def parse_detail(self, response): parse the detail page :param response: :return:\n- def parse(self, response): parse the search page :param response: :return:", "prompted_full_text": "Implement the Python class `RecipeSpider` described below.\n\nClass description:\nImplement the RecipeSpider class.\n\nMethod signatures and docstrings:\n- def parse_detail(self, response): parse the detail page :param response: :return:\n- def parse(self, response): parse the search page :param response: :return:\n\n<|skeleton|>\nclass RecipeSpider:\n\n def parse_detail(self, response):\n \"\"\"parse the detail page :param response: :return:\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"parse the search page :param response: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n item = response.meta['item']\n ing_list = []\n ing_li = response.xpath('//*[@id=\"__layout\"]//ul[@class=\"recipe-ingredients__list\"]/li')\n for li in ing_li:\n ing = li.xpath('.//a/text()').extract_first()\n if ing is not None:\n ing_list.append(ing.strip())\n item['ingredients'] = ', '.join(ing_list)\n step_list = []\n step_li = response.xpath('//*[@id=\"__layout\"]//li[@class=\"recipe-directions__step\"]')\n for li in step_li:\n step = li.xpath('.//text()').extract_first()\n if step is not None:\n step_list.append(step.strip())\n item['steps'] = '\\n'.join(step_list)\n yield item\n<|end_body_0|>\n\n<|body_start_1|>\n recipes = json.loads(response.text)['response']['results']\n for recipe in recipes:\n if recipe['record_type'] == 'Recipe':\n item = RecipespidersItem()\n self.recipe_count += 1\n item['id'] = self.recipe_count\n item['name'] = recipe['main_title']\n item['description'] = recipe['main_description']\n item['rating_num'] = int(recipe['main_num_ratings'])\n item['rating_star'] = int(recipe['main_rating_mapping'])\n item['rating_score'] = float(recipe['main_rating'])\n item['total_time'] = int(recipe['recipe_totaltime'])\n if recipe.get('recipe_photo_url') is None:\n continue\n else:\n item['photo_url'] = recipe['recipe_photo_url']\n item['record_url'] = recipe['record_url']\n yield scrapy.Request(url=recipe['record_url'], callback=self.parse_detail, meta={'item': item})\n if self.page_num <= 21000:\n print(self.page_num)\n new_url = format(self.base_url % self.page_num)\n self.page_num += 1\n yield scrapy.Request(url=new_url, callback=self.parse)\n<|end_body_1|>\n", "revision_id": "24deb2f2ca7f859a351ecafe6fb03123a1b7685d", "skeleton": "<|skeleton|>\nclass RecipeSpider:\n\n def parse_detail(self, response):\n \"\"\"parse the detail page :param response: :return:\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"parse the search page :param response: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RecipeSpider:\n def parse_detail(self, response):\n \"\"\"parse the detail page :param response: :return:\"\"\"\n item = response.meta['item']\n ing_list = []\n ing_li = response.xpath('//*[@id=\"__layout\"]//ul[@class=\"recipe-ingredients__list\"]/li')\n for li in ing_li:\n ing = li.xpath('.//a/text()').extract_first()\n if ing is not None:\n ing_list.append(ing.strip())\n item['ingredients'] = ', '.join(ing_list)\n step_list = []\n step_li = response.xpath('//*[@id=\"__layout\"]//li[@class=\"recipe-directions__step\"]')\n for li in step_li:\n step = li.xpath('.//text()').extract_first()\n if step is not None:\n step_list.append(step.strip())\n item['steps'] = '\\n'.join(step_list)\n yield item\n\n def parse(self, response):\n \"\"\"parse the search page :param response: :return:\"\"\"\n recipes = json.loads(response.text)['response']['results']\n for recipe in recipes:\n if recipe['record_type'] == 'Recipe':\n item = RecipespidersItem()\n self.recipe_count += 1\n item['id'] = self.recipe_count\n item['name'] = recipe['main_title']\n item['description'] = recipe['main_description']\n item['rating_num'] = int(recipe['main_num_ratings'])\n item['rating_star'] = int(recipe['main_rating_mapping'])\n item['rating_score'] = float(recipe['main_rating'])\n item['total_time'] = int(recipe['recipe_totaltime'])\n if recipe.get('recipe_photo_url') is None:\n continue\n else:\n item['photo_url'] = recipe['recipe_photo_url']\n item['record_url'] = recipe['record_url']\n yield scrapy.Request(url=recipe['record_url'], callback=self.parse_detail, meta={'item': item})\n if self.page_num <= 21000:\n print(self.page_num)\n new_url = format(self.base_url % self.page_num)\n self.page_num += 1\n yield scrapy.Request(url=new_url, callback=self.parse)\n", "source": "the_stack_v2_python_sparse", "source_path": "recipespiders/recipespiders/spiders/recipe.py", "source_repo": "yefeichen99/RecipeSE", "split": "val", "star_events_count": 0}
{"blob_id": "635d1e6f89fa02e92ec851ee6c8d4f5f5fe4193a", "bodies": ["if opt.get('multigpu'):\n print('| WARNING: Multi-GPU is not supported for the Perplexity ' + 'Evaluator Agent. Setting this option to False.')\n opt['multigpu'] = False\nsuper().__init__(opt, shared)\nself.prev_enc = None\nself.last_xs = None", "obs = self.observation\nxs = obs['text_vec'].unsqueeze(0)\nys = self._vectorize_text(' '.join(partial_out), False, True, self.truncate).unsqueeze(0)\nif self.prev_enc is not None and self.last_xs is not None and (xs.shape[1] != self.last_xs.shape[1] or (xs == self.last_xs).sum().item() != xs.shape[1]):\n self.prev_enc = None\nself.last_xs = xs\nself.model.eval()\nout = self.model(xs, ys=ys if len(partial_out) > 0 else None, prev_enc=self.prev_enc, maxlen=1)\nscores, self.prev_enc = (out[0], out[2])\nprobs = F.softmax(scores.select(1, -1), dim=1).squeeze()\ndist = mydefaultdict(lambda: 1e-07)\nfor i in range(len(probs)):\n dist[self.dict[i]] = probs[i].item()\nreturn dist"], "bodies_text": "<|body_start_0|>\n if opt.get('multigpu'):\n print('| WARNING: Multi-GPU is not supported for the Perplexity ' + 'Evaluator Agent. Setting this option to False.')\n opt['multigpu'] = False\n super().__init__(opt, shared)\n self.prev_enc = None\n self.last_xs = None\n<|end_body_0|>\n\n<|body_start_1|>\n obs = self.observation\n xs = obs['text_vec'].unsqueeze(0)\n ys = self._vectorize_text(' '.join(partial_out), False, True, self.truncate).unsqueeze(0)\n if self.prev_enc is not None and self.last_xs is not None and (xs.shape[1] != self.last_xs.shape[1] or (xs == self.last_xs).sum().item() != xs.shape[1]):\n self.prev_enc = None\n self.last_xs = xs\n self.model.eval()\n out = self.model(xs, ys=ys if len(partial_out) > 0 else None, prev_enc=self.prev_enc, maxlen=1)\n scores, self.prev_enc = (out[0], out[2])\n probs = F.softmax(scores.select(1, -1), dim=1).squeeze()\n dist = mydefaultdict(lambda: 1e-07)\n for i in range(len(probs)):\n dist[self.dict[i]] = probs[i].item()\n return dist\n<|end_body_1|>\n", "class_docstring": "Subclass for doing standardized perplexity evaluation. This is designed to be used in conjunction with the PerplexityWorld at parlai/scripts/eval_ppl.py. It uses the `next_word_probability` function to calculate the probability of tokens one token at a time.", "class_name": "PerplexityEvaluatorAgent", "detected_licenses": ["MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PerplexityEvaluatorAgent:\n \"\"\"Subclass for doing standardized perplexity evaluation. This is designed to be used in conjunction with the PerplexityWorld at parlai/scripts/eval_ppl.py. It uses the `next_word_probability` function to calculate the probability of tokens one token at a time.\"\"\"\n\n def __init__(self, opt, shared=None):\n \"\"\"Initialize evaluator.\"\"\"\n <|body_0|>\n\n def next_word_probability(self, partial_out):\n \"\"\"Return probability distribution over next words. This probability is based on both nn input and partial true output. This is used to calculate the per-word perplexity. Arguments: observation -- input observation dict partial_out -- list of previous \"true\" words Returns a dict, where each key is a word and each value is a probability score for that word. Unset keys will use a probability of 1e-7. e.g. {'text': 'Run test program.'}, ['hello'] => {'world': 1.0}\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if opt.get('multigpu'):\n print('| WARNING: Multi-GPU is not supported for the Perplexity ' + 'Evaluator Agent. Setting this option to False.')\n opt['multigpu'] = False\n super().__init__(opt, shared)\n self.prev_enc = None\n self.last_xs = None\n<|end_body_0|>\n\n<|body_start_1|>\n obs = self.observation\n xs = obs['text_vec'].unsqueeze(0)\n ys = self._vectorize_text(' '.join(partial_out), False, True, self.truncate).unsqueeze(0)\n if self.prev_enc is not None and self.last_xs is not None and (xs.shape[1] != self.last_xs.shape[1] or (xs == self.last_xs).sum().item() != xs.shape[1]):\n self.prev_enc = None\n self.last_xs = xs\n self.model.eval()\n out = self.model(xs, ys=ys if len(partial_out) > 0 else None, prev_enc=self.prev_enc, maxlen=1)\n scores, self.prev_enc = (out[0], out[2])\n probs = F.softmax(scores.select(1, -1), dim=1).squeeze()\n dist = mydefaultdict(lambda: 1e-07)\n for i in range(len(probs)):\n dist[self.dict[i]] = probs[i].item()\n return dist\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000249", "length_bytes": 33788, "license_type": "permissive", "methods": [{"docstring": "Initialize evaluator.", "name": "__init__", "signature": "def __init__(self, opt, shared=None)"}, {"docstring": "Return probability distribution over next words. This probability is based on both nn input and partial true output. This is used to calculate the per-word perplexity. Arguments: observation -- input observation dict partial_out -- list of previous \"true\" words Returns a dict, where each key is a word and each value is a probability score for that word. Unset keys will use a probability of 1e-7. e.g. {'text': 'Run test program.'}, ['hello'] => {'world': 1.0}", "name": "next_word_probability", "signature": "def next_word_probability(self, partial_out)"}], "n_methods": 2, "prompt": "Implement the Python class `PerplexityEvaluatorAgent` described below.\n\nClass description:\nSubclass for doing standardized perplexity evaluation. This is designed to be used in conjunction with the PerplexityWorld at parlai/scripts/eval_ppl.py. It uses the `next_word_probability` function to calculate the probability of tokens one token at a time.\n\nMethod signatures and docstrings:\n- def __init__(self, opt, shared=None): Initialize evaluator.\n- def next_word_probability(self, partial_out): Return probability distribution over next words. This probability is based on both nn input and partial true output. This is used to calculate the per-word perplexity. Arguments: observation -- input observation dict partial_out -- list of previous \"true\" words Returns a dict, where each key is a word and each value is a probability score for that word. Unset keys will use a probability of 1e-7. e.g. {'text': 'Run test program.'}, ['hello'] => {'world': 1.0}", "prompted_full_text": "Implement the Python class `PerplexityEvaluatorAgent` described below.\n\nClass description:\nSubclass for doing standardized perplexity evaluation. This is designed to be used in conjunction with the PerplexityWorld at parlai/scripts/eval_ppl.py. It uses the `next_word_probability` function to calculate the probability of tokens one token at a time.\n\nMethod signatures and docstrings:\n- def __init__(self, opt, shared=None): Initialize evaluator.\n- def next_word_probability(self, partial_out): Return probability distribution over next words. This probability is based on both nn input and partial true output. This is used to calculate the per-word perplexity. Arguments: observation -- input observation dict partial_out -- list of previous \"true\" words Returns a dict, where each key is a word and each value is a probability score for that word. Unset keys will use a probability of 1e-7. e.g. {'text': 'Run test program.'}, ['hello'] => {'world': 1.0}\n\n<|skeleton|>\nclass PerplexityEvaluatorAgent:\n \"\"\"Subclass for doing standardized perplexity evaluation. This is designed to be used in conjunction with the PerplexityWorld at parlai/scripts/eval_ppl.py. It uses the `next_word_probability` function to calculate the probability of tokens one token at a time.\"\"\"\n\n def __init__(self, opt, shared=None):\n \"\"\"Initialize evaluator.\"\"\"\n <|body_0|>\n\n def next_word_probability(self, partial_out):\n \"\"\"Return probability distribution over next words. This probability is based on both nn input and partial true output. This is used to calculate the per-word perplexity. Arguments: observation -- input observation dict partial_out -- list of previous \"true\" words Returns a dict, where each key is a word and each value is a probability score for that word. Unset keys will use a probability of 1e-7. e.g. {'text': 'Run test program.'}, ['hello'] => {'world': 1.0}\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if opt.get('multigpu'):\n print('| WARNING: Multi-GPU is not supported for the Perplexity ' + 'Evaluator Agent. Setting this option to False.')\n opt['multigpu'] = False\n super().__init__(opt, shared)\n self.prev_enc = None\n self.last_xs = None\n<|end_body_0|>\n\n<|body_start_1|>\n obs = self.observation\n xs = obs['text_vec'].unsqueeze(0)\n ys = self._vectorize_text(' '.join(partial_out), False, True, self.truncate).unsqueeze(0)\n if self.prev_enc is not None and self.last_xs is not None and (xs.shape[1] != self.last_xs.shape[1] or (xs == self.last_xs).sum().item() != xs.shape[1]):\n self.prev_enc = None\n self.last_xs = xs\n self.model.eval()\n out = self.model(xs, ys=ys if len(partial_out) > 0 else None, prev_enc=self.prev_enc, maxlen=1)\n scores, self.prev_enc = (out[0], out[2])\n probs = F.softmax(scores.select(1, -1), dim=1).squeeze()\n dist = mydefaultdict(lambda: 1e-07)\n for i in range(len(probs)):\n dist[self.dict[i]] = probs[i].item()\n return dist\n<|end_body_1|>\n", "revision_id": "ccf60824b28f0ce8ceda44a7ce52a0d117669115", "skeleton": "<|skeleton|>\nclass PerplexityEvaluatorAgent:\n \"\"\"Subclass for doing standardized perplexity evaluation. This is designed to be used in conjunction with the PerplexityWorld at parlai/scripts/eval_ppl.py. It uses the `next_word_probability` function to calculate the probability of tokens one token at a time.\"\"\"\n\n def __init__(self, opt, shared=None):\n \"\"\"Initialize evaluator.\"\"\"\n <|body_0|>\n\n def next_word_probability(self, partial_out):\n \"\"\"Return probability distribution over next words. This probability is based on both nn input and partial true output. This is used to calculate the per-word perplexity. Arguments: observation -- input observation dict partial_out -- list of previous \"true\" words Returns a dict, where each key is a word and each value is a probability score for that word. Unset keys will use a probability of 1e-7. e.g. {'text': 'Run test program.'}, ['hello'] => {'world': 1.0}\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PerplexityEvaluatorAgent:\n \"\"\"Subclass for doing standardized perplexity evaluation. This is designed to be used in conjunction with the PerplexityWorld at parlai/scripts/eval_ppl.py. It uses the `next_word_probability` function to calculate the probability of tokens one token at a time.\"\"\"\n\n def __init__(self, opt, shared=None):\n \"\"\"Initialize evaluator.\"\"\"\n if opt.get('multigpu'):\n print('| WARNING: Multi-GPU is not supported for the Perplexity ' + 'Evaluator Agent. Setting this option to False.')\n opt['multigpu'] = False\n super().__init__(opt, shared)\n self.prev_enc = None\n self.last_xs = None\n\n def next_word_probability(self, partial_out):\n \"\"\"Return probability distribution over next words. This probability is based on both nn input and partial true output. This is used to calculate the per-word perplexity. Arguments: observation -- input observation dict partial_out -- list of previous \"true\" words Returns a dict, where each key is a word and each value is a probability score for that word. Unset keys will use a probability of 1e-7. e.g. {'text': 'Run test program.'}, ['hello'] => {'world': 1.0}\"\"\"\n obs = self.observation\n xs = obs['text_vec'].unsqueeze(0)\n ys = self._vectorize_text(' '.join(partial_out), False, True, self.truncate).unsqueeze(0)\n if self.prev_enc is not None and self.last_xs is not None and (xs.shape[1] != self.last_xs.shape[1] or (xs == self.last_xs).sum().item() != xs.shape[1]):\n self.prev_enc = None\n self.last_xs = xs\n self.model.eval()\n out = self.model(xs, ys=ys if len(partial_out) > 0 else None, prev_enc=self.prev_enc, maxlen=1)\n scores, self.prev_enc = (out[0], out[2])\n probs = F.softmax(scores.select(1, -1), dim=1).squeeze()\n dist = mydefaultdict(lambda: 1e-07)\n for i in range(len(probs)):\n dist[self.dict[i]] = probs[i].item()\n return dist\n", "source": "the_stack_v2_python_sparse", "source_path": "ParlAI/parlai/agents/legacy_agents/seq2seq/seq2seq_v1.py", "source_repo": "ethanjperez/convince", "split": "val", "star_events_count": 27}
{"blob_id": "48e98fd8b552613dd7e0fa4ec1d9ac6299b32494", "bodies": ["if not root:\n return ''\nthe_list = list()\nbfs_queue = deque([root])\nwhile bfs_queue:\n node = bfs_queue.popleft()\n newList = ['-1', '-1', '-1']\n newList[0] = str(node.val)\n if node.left:\n bfs_queue.append(node.left)\n newList[1] = str(node.left.val)\n if node.right:\n bfs_queue.append(node.right)\n newList[2] = str(node.right.val)\n the_list.append(','.join(newList))\nresult = 'z'.join(the_list)\nreturn result", "if len(data) <= 0:\n return None\nnodes_strings = data.split('z')\nnodes_valus = list()\nfor string in nodes_strings:\n values = string.split(',')\n temp_list = list()\n for value in values:\n temp_list.append(int(value))\n nodes_valus.append(temp_list)\nnodeDic = {}\nfor node_value in nodes_valus:\n nodeDic[node_value[0]] = TreeNode(node_value[0])\nfor node_value in nodes_valus:\n node = nodeDic[node_value[0]]\n if node_value[1] >= 0:\n node.left = nodeDic[node_value[1]]\n if node_value[2] >= 0:\n node.right = nodeDic[node_value[2]]\nreturn nodeDic[nodes_valus[0][0]]"], "bodies_text": "<|body_start_0|>\n if not root:\n return ''\n the_list = list()\n bfs_queue = deque([root])\n while bfs_queue:\n node = bfs_queue.popleft()\n newList = ['-1', '-1', '-1']\n newList[0] = str(node.val)\n if node.left:\n bfs_queue.append(node.left)\n newList[1] = str(node.left.val)\n if node.right:\n bfs_queue.append(node.right)\n newList[2] = str(node.right.val)\n the_list.append(','.join(newList))\n result = 'z'.join(the_list)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) <= 0:\n return None\n nodes_strings = data.split('z')\n nodes_valus = list()\n for string in nodes_strings:\n values = string.split(',')\n temp_list = list()\n for value in values:\n temp_list.append(int(value))\n nodes_valus.append(temp_list)\n nodeDic = {}\n for node_value in nodes_valus:\n nodeDic[node_value[0]] = TreeNode(node_value[0])\n for node_value in nodes_valus:\n node = nodeDic[node_value[0]]\n if node_value[1] >= 0:\n node.left = nodeDic[node_value[1]]\n if node_value[2] >= 0:\n node.right = nodeDic[node_value[2]]\n return nodeDic[nodes_valus[0][0]]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return ''\n the_list = list()\n bfs_queue = deque([root])\n while bfs_queue:\n node = bfs_queue.popleft()\n newList = ['-1', '-1', '-1']\n newList[0] = str(node.val)\n if node.left:\n bfs_queue.append(node.left)\n newList[1] = str(node.left.val)\n if node.right:\n bfs_queue.append(node.right)\n newList[2] = str(node.right.val)\n the_list.append(','.join(newList))\n result = 'z'.join(the_list)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) <= 0:\n return None\n nodes_strings = data.split('z')\n nodes_valus = list()\n for string in nodes_strings:\n values = string.split(',')\n temp_list = list()\n for value in values:\n temp_list.append(int(value))\n nodes_valus.append(temp_list)\n nodeDic = {}\n for node_value in nodes_valus:\n nodeDic[node_value[0]] = TreeNode(node_value[0])\n for node_value in nodes_valus:\n node = nodeDic[node_value[0]]\n if node_value[1] >= 0:\n node.left = nodeDic[node_value[1]]\n if node_value[2] >= 0:\n node.right = nodeDic[node_value[2]]\n return nodeDic[nodes_valus[0][0]]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000250", "length_bytes": 1775, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string.", "name": "serialize", "signature": "def serialize(self, root: TreeNode) -> str"}, {"docstring": "Decodes your encoded data to tree.", "name": "deserialize", "signature": "def deserialize(self, data: str) -> TreeNode"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_033092", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: TreeNode) -> str: Encodes a tree to a single string.\n- def deserialize(self, data: str) -> TreeNode: Decodes your encoded data to tree.", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: TreeNode) -> str: Encodes a tree to a single string.\n- def deserialize(self, data: str) -> TreeNode: Decodes your encoded data to tree.\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return ''\n the_list = list()\n bfs_queue = deque([root])\n while bfs_queue:\n node = bfs_queue.popleft()\n newList = ['-1', '-1', '-1']\n newList[0] = str(node.val)\n if node.left:\n bfs_queue.append(node.left)\n newList[1] = str(node.left.val)\n if node.right:\n bfs_queue.append(node.right)\n newList[2] = str(node.right.val)\n the_list.append(','.join(newList))\n result = 'z'.join(the_list)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) <= 0:\n return None\n nodes_strings = data.split('z')\n nodes_valus = list()\n for string in nodes_strings:\n values = string.split(',')\n temp_list = list()\n for value in values:\n temp_list.append(int(value))\n nodes_valus.append(temp_list)\n nodeDic = {}\n for node_value in nodes_valus:\n nodeDic[node_value[0]] = TreeNode(node_value[0])\n for node_value in nodes_valus:\n node = nodeDic[node_value[0]]\n if node_value[1] >= 0:\n node.left = nodeDic[node_value[1]]\n if node_value[2] >= 0:\n node.right = nodeDic[node_value[2]]\n return nodeDic[nodes_valus[0][0]]\n<|end_body_1|>\n", "revision_id": "7818b55f22afb178dfd250f26019653faadfee87", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n if not root:\n return ''\n the_list = list()\n bfs_queue = deque([root])\n while bfs_queue:\n node = bfs_queue.popleft()\n newList = ['-1', '-1', '-1']\n newList[0] = str(node.val)\n if node.left:\n bfs_queue.append(node.left)\n newList[1] = str(node.left.val)\n if node.right:\n bfs_queue.append(node.right)\n newList[2] = str(node.right.val)\n the_list.append(','.join(newList))\n result = 'z'.join(the_list)\n return result\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n if len(data) <= 0:\n return None\n nodes_strings = data.split('z')\n nodes_valus = list()\n for string in nodes_strings:\n values = string.split(',')\n temp_list = list()\n for value in values:\n temp_list.append(int(value))\n nodes_valus.append(temp_list)\n nodeDic = {}\n for node_value in nodes_valus:\n nodeDic[node_value[0]] = TreeNode(node_value[0])\n for node_value in nodes_valus:\n node = nodeDic[node_value[0]]\n if node_value[1] >= 0:\n node.left = nodeDic[node_value[1]]\n if node_value[2] >= 0:\n node.right = nodeDic[node_value[2]]\n return nodeDic[nodes_valus[0][0]]\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetcodePython3/q0449.py", "source_repo": "YujiaY/leetCodePractice", "split": "val", "star_events_count": 0}
{"blob_id": "61cf8c3ae47f8e9473a8557ab667459fe920baaf", "bodies": ["if self._pg is None:\n self._pg = await create_pg_driver()\nreturn self._pg", "if not isinstance(data, dict):\n try:\n data = ujson.loads(data)\n except Exception as e:\n err = 'Ошибка загрузки данных из JSON: %s' % str(e)\n self.error(err)\nfor self_name, data_name in self.MAPPING_JSON.items():\n if data_name not in data:\n continue\n _item = self\n _item.__setattr__(self_name, data[data_name])\nreturn self"], "bodies_text": "<|body_start_0|>\n if self._pg is None:\n self._pg = await create_pg_driver()\n return self._pg\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(data, dict):\n try:\n data = ujson.loads(data)\n except Exception as e:\n err = 'Ошибка загрузки данных из JSON: %s' % str(e)\n self.error(err)\n for self_name, data_name in self.MAPPING_JSON.items():\n if data_name not in data:\n continue\n _item = self\n _item.__setattr__(self_name, data[data_name])\n return self\n<|end_body_1|>\n", "class_docstring": "Базовый класс для сущностей. :param list[tuple]|dict data: Значения для инициализации", "class_name": "BaseItem", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseItem:\n \"\"\"Базовый класс для сущностей. :param list[tuple]|dict data: Значения для инициализации\"\"\"\n\n async def pg(self) -> PgDriver:\n \"\"\"Получение объекта подключения к БД.\"\"\"\n <|body_0|>\n\n def load_from_json(self, data):\n \"\"\"Загрузка полей сущности из данных (json|dict).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self._pg is None:\n self._pg = await create_pg_driver()\n return self._pg\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(data, dict):\n try:\n data = ujson.loads(data)\n except Exception as e:\n err = 'Ошибка загрузки данных из JSON: %s' % str(e)\n self.error(err)\n for self_name, data_name in self.MAPPING_JSON.items():\n if data_name not in data:\n continue\n _item = self\n _item.__setattr__(self_name, data[data_name])\n return self\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000251", "length_bytes": 1144, "license_type": "no_license", "methods": [{"docstring": "Получение объекта подключения к БД.", "name": "pg", "signature": "async def pg(self) -> PgDriver"}, {"docstring": "Загрузка полей сущности из данных (json|dict).", "name": "load_from_json", "signature": "def load_from_json(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006734", "prompt": "Implement the Python class `BaseItem` described below.\n\nClass description:\nБазовый класс для сущностей. :param list[tuple]|dict data: Значения для инициализации\n\nMethod signatures and docstrings:\n- async def pg(self) -> PgDriver: Получение объекта подключения к БД.\n- def load_from_json(self, data): Загрузка полей сущности из данных (json|dict).", "prompted_full_text": "Implement the Python class `BaseItem` described below.\n\nClass description:\nБазовый класс для сущностей. :param list[tuple]|dict data: Значения для инициализации\n\nMethod signatures and docstrings:\n- async def pg(self) -> PgDriver: Получение объекта подключения к БД.\n- def load_from_json(self, data): Загрузка полей сущности из данных (json|dict).\n\n<|skeleton|>\nclass BaseItem:\n \"\"\"Базовый класс для сущностей. :param list[tuple]|dict data: Значения для инициализации\"\"\"\n\n async def pg(self) -> PgDriver:\n \"\"\"Получение объекта подключения к БД.\"\"\"\n <|body_0|>\n\n def load_from_json(self, data):\n \"\"\"Загрузка полей сущности из данных (json|dict).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self._pg is None:\n self._pg = await create_pg_driver()\n return self._pg\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(data, dict):\n try:\n data = ujson.loads(data)\n except Exception as e:\n err = 'Ошибка загрузки данных из JSON: %s' % str(e)\n self.error(err)\n for self_name, data_name in self.MAPPING_JSON.items():\n if data_name not in data:\n continue\n _item = self\n _item.__setattr__(self_name, data[data_name])\n return self\n<|end_body_1|>\n", "revision_id": "18a08f5b872af47c67404100d7004bb0fa6ba06e", "skeleton": "<|skeleton|>\nclass BaseItem:\n \"\"\"Базовый класс для сущностей. :param list[tuple]|dict data: Значения для инициализации\"\"\"\n\n async def pg(self) -> PgDriver:\n \"\"\"Получение объекта подключения к БД.\"\"\"\n <|body_0|>\n\n def load_from_json(self, data):\n \"\"\"Загрузка полей сущности из данных (json|dict).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BaseItem:\n \"\"\"Базовый класс для сущностей. :param list[tuple]|dict data: Значения для инициализации\"\"\"\n\n async def pg(self) -> PgDriver:\n \"\"\"Получение объекта подключения к БД.\"\"\"\n if self._pg is None:\n self._pg = await create_pg_driver()\n return self._pg\n\n def load_from_json(self, data):\n \"\"\"Загрузка полей сущности из данных (json|dict).\"\"\"\n if not isinstance(data, dict):\n try:\n data = ujson.loads(data)\n except Exception as e:\n err = 'Ошибка загрузки данных из JSON: %s' % str(e)\n self.error(err)\n for self_name, data_name in self.MAPPING_JSON.items():\n if data_name not in data:\n continue\n _item = self\n _item.__setattr__(self_name, data[data_name])\n return self\n", "source": "the_stack_v2_python_sparse", "source_path": "app/core/base/v1/BaseItem.py", "source_repo": "sector84/py-sanic-core", "split": "val", "star_events_count": 0}
{"blob_id": "d0f1c9ccecbef357568e8c206c1c94b4617e6855", "bodies": ["self.dataset_path = dataset_path\nself.cls_name = cls_name\nself.val_ratio = val_ratio\nself.dev_ratio = dev_ratio\nself.is_train = is_train\nself.use_augmentation = use_augmentation\nif self.is_train:\n self.data, self.label = load_dataset(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n self._train_val_split()\n if self.use_augmentation:\n print('training set with data augmentation is %d' % self.train_data.shape[0])\nelse:\n self.data = load_test_data(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\nprint('From DataManager: Loaded dataset size is %d, name is %s' % (self.num_data, cls_name))", "idx = np.arange(self.num_data)\nnp.random.shuffle(idx)\nval_num = int(self.val_ratio * self.num_data)\ndev_num = int(self.dev_ratio * self.num_data)\nself.num_train = self.num_data - val_num\nself.val_data = self.data[idx[:val_num]]\nself.val_label = self.label[idx[:val_num]]\nif self.use_augmentation:\n self.train_data = np.vstack((self.data[idx[val_num:]], self.data[idx[val_num:], :, ::-1]))\n self.train_label = np.hstack((self.label[idx[val_num:]], self.label[idx[val_num:]]))\nelse:\n self.train_data = self.data[idx[val_num:]]\n self.train_label = self.label[idx[val_num:]]\nself.dev_data = self.data[idx[:dev_num]]\nself.dev_label = self.label[idx[:dev_num]]"], "bodies_text": "<|body_start_0|>\n self.dataset_path = dataset_path\n self.cls_name = cls_name\n self.val_ratio = val_ratio\n self.dev_ratio = dev_ratio\n self.is_train = is_train\n self.use_augmentation = use_augmentation\n if self.is_train:\n self.data, self.label = load_dataset(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n self._train_val_split()\n if self.use_augmentation:\n print('training set with data augmentation is %d' % self.train_data.shape[0])\n else:\n self.data = load_test_data(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n print('From DataManager: Loaded dataset size is %d, name is %s' % (self.num_data, cls_name))\n<|end_body_0|>\n\n<|body_start_1|>\n idx = np.arange(self.num_data)\n np.random.shuffle(idx)\n val_num = int(self.val_ratio * self.num_data)\n dev_num = int(self.dev_ratio * self.num_data)\n self.num_train = self.num_data - val_num\n self.val_data = self.data[idx[:val_num]]\n self.val_label = self.label[idx[:val_num]]\n if self.use_augmentation:\n self.train_data = np.vstack((self.data[idx[val_num:]], self.data[idx[val_num:], :, ::-1]))\n self.train_label = np.hstack((self.label[idx[val_num:]], self.label[idx[val_num:]]))\n else:\n self.train_data = self.data[idx[val_num:]]\n self.train_label = self.label[idx[val_num:]]\n self.dev_data = self.data[idx[:dev_num]]\n self.dev_label = self.label[idx[:dev_num]]\n<|end_body_1|>\n", "class_docstring": "The class of data manager", "class_name": "DataManager", "detected_licenses": ["WTFPL"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataManager:\n \"\"\"The class of data manager\"\"\"\n\n def __init__(self, dataset_path, cls_name, val_ratio=0.1, dev_ratio=0.1, is_train=True, use_augmentation=False):\n \"\"\":param dataset_path: The path of the dataset :param ratio: (validation_set_size) / (total_dataset_size)\"\"\"\n <|body_0|>\n\n def _train_val_split(self):\n \"\"\"Split the training set ,validation set and the development set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dataset_path = dataset_path\n self.cls_name = cls_name\n self.val_ratio = val_ratio\n self.dev_ratio = dev_ratio\n self.is_train = is_train\n self.use_augmentation = use_augmentation\n if self.is_train:\n self.data, self.label = load_dataset(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n self._train_val_split()\n if self.use_augmentation:\n print('training set with data augmentation is %d' % self.train_data.shape[0])\n else:\n self.data = load_test_data(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n print('From DataManager: Loaded dataset size is %d, name is %s' % (self.num_data, cls_name))\n<|end_body_0|>\n\n<|body_start_1|>\n idx = np.arange(self.num_data)\n np.random.shuffle(idx)\n val_num = int(self.val_ratio * self.num_data)\n dev_num = int(self.dev_ratio * self.num_data)\n self.num_train = self.num_data - val_num\n self.val_data = self.data[idx[:val_num]]\n self.val_label = self.label[idx[:val_num]]\n if self.use_augmentation:\n self.train_data = np.vstack((self.data[idx[val_num:]], self.data[idx[val_num:], :, ::-1]))\n self.train_label = np.hstack((self.label[idx[val_num:]], self.label[idx[val_num:]]))\n else:\n self.train_data = self.data[idx[val_num:]]\n self.train_label = self.label[idx[val_num:]]\n self.dev_data = self.data[idx[:dev_num]]\n self.dev_label = self.label[idx[:dev_num]]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000252", "length_bytes": 3635, "license_type": "permissive", "methods": [{"docstring": ":param dataset_path: The path of the dataset :param ratio: (validation_set_size) / (total_dataset_size)", "name": "__init__", "signature": "def __init__(self, dataset_path, cls_name, val_ratio=0.1, dev_ratio=0.1, is_train=True, use_augmentation=False)"}, {"docstring": "Split the training set ,validation set and the development set", "name": "_train_val_split", "signature": "def _train_val_split(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024164", "prompt": "Implement the Python class `DataManager` described below.\n\nClass description:\nThe class of data manager\n\nMethod signatures and docstrings:\n- def __init__(self, dataset_path, cls_name, val_ratio=0.1, dev_ratio=0.1, is_train=True, use_augmentation=False): :param dataset_path: The path of the dataset :param ratio: (validation_set_size) / (total_dataset_size)\n- def _train_val_split(self): Split the training set ,validation set and the development set", "prompted_full_text": "Implement the Python class `DataManager` described below.\n\nClass description:\nThe class of data manager\n\nMethod signatures and docstrings:\n- def __init__(self, dataset_path, cls_name, val_ratio=0.1, dev_ratio=0.1, is_train=True, use_augmentation=False): :param dataset_path: The path of the dataset :param ratio: (validation_set_size) / (total_dataset_size)\n- def _train_val_split(self): Split the training set ,validation set and the development set\n\n<|skeleton|>\nclass DataManager:\n \"\"\"The class of data manager\"\"\"\n\n def __init__(self, dataset_path, cls_name, val_ratio=0.1, dev_ratio=0.1, is_train=True, use_augmentation=False):\n \"\"\":param dataset_path: The path of the dataset :param ratio: (validation_set_size) / (total_dataset_size)\"\"\"\n <|body_0|>\n\n def _train_val_split(self):\n \"\"\"Split the training set ,validation set and the development set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dataset_path = dataset_path\n self.cls_name = cls_name\n self.val_ratio = val_ratio\n self.dev_ratio = dev_ratio\n self.is_train = is_train\n self.use_augmentation = use_augmentation\n if self.is_train:\n self.data, self.label = load_dataset(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n self._train_val_split()\n if self.use_augmentation:\n print('training set with data augmentation is %d' % self.train_data.shape[0])\n else:\n self.data = load_test_data(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n print('From DataManager: Loaded dataset size is %d, name is %s' % (self.num_data, cls_name))\n<|end_body_0|>\n\n<|body_start_1|>\n idx = np.arange(self.num_data)\n np.random.shuffle(idx)\n val_num = int(self.val_ratio * self.num_data)\n dev_num = int(self.dev_ratio * self.num_data)\n self.num_train = self.num_data - val_num\n self.val_data = self.data[idx[:val_num]]\n self.val_label = self.label[idx[:val_num]]\n if self.use_augmentation:\n self.train_data = np.vstack((self.data[idx[val_num:]], self.data[idx[val_num:], :, ::-1]))\n self.train_label = np.hstack((self.label[idx[val_num:]], self.label[idx[val_num:]]))\n else:\n self.train_data = self.data[idx[val_num:]]\n self.train_label = self.label[idx[val_num:]]\n self.dev_data = self.data[idx[:dev_num]]\n self.dev_label = self.label[idx[:dev_num]]\n<|end_body_1|>\n", "revision_id": "e0c61ae58f61fa04d264008f35fceaac5999708b", "skeleton": "<|skeleton|>\nclass DataManager:\n \"\"\"The class of data manager\"\"\"\n\n def __init__(self, dataset_path, cls_name, val_ratio=0.1, dev_ratio=0.1, is_train=True, use_augmentation=False):\n \"\"\":param dataset_path: The path of the dataset :param ratio: (validation_set_size) / (total_dataset_size)\"\"\"\n <|body_0|>\n\n def _train_val_split(self):\n \"\"\"Split the training set ,validation set and the development set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DataManager:\n \"\"\"The class of data manager\"\"\"\n\n def __init__(self, dataset_path, cls_name, val_ratio=0.1, dev_ratio=0.1, is_train=True, use_augmentation=False):\n \"\"\":param dataset_path: The path of the dataset :param ratio: (validation_set_size) / (total_dataset_size)\"\"\"\n self.dataset_path = dataset_path\n self.cls_name = cls_name\n self.val_ratio = val_ratio\n self.dev_ratio = dev_ratio\n self.is_train = is_train\n self.use_augmentation = use_augmentation\n if self.is_train:\n self.data, self.label = load_dataset(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n self._train_val_split()\n if self.use_augmentation:\n print('training set with data augmentation is %d' % self.train_data.shape[0])\n else:\n self.data = load_test_data(cls_name, dataset_path)\n self.num_data = self.data.shape[0]\n print('From DataManager: Loaded dataset size is %d, name is %s' % (self.num_data, cls_name))\n\n def _train_val_split(self):\n \"\"\"Split the training set ,validation set and the development set\"\"\"\n idx = np.arange(self.num_data)\n np.random.shuffle(idx)\n val_num = int(self.val_ratio * self.num_data)\n dev_num = int(self.dev_ratio * self.num_data)\n self.num_train = self.num_data - val_num\n self.val_data = self.data[idx[:val_num]]\n self.val_label = self.label[idx[:val_num]]\n if self.use_augmentation:\n self.train_data = np.vstack((self.data[idx[val_num:]], self.data[idx[val_num:], :, ::-1]))\n self.train_label = np.hstack((self.label[idx[val_num:]], self.label[idx[val_num:]]))\n else:\n self.train_data = self.data[idx[val_num:]]\n self.train_label = self.label[idx[val_num:]]\n self.dev_data = self.data[idx[:dev_num]]\n self.dev_label = self.label[idx[:dev_num]]\n", "source": "the_stack_v2_python_sparse", "source_path": "data_utils/data_manager.py", "source_repo": "lygztq/RBP-detector-using-RNA-sequence", "split": "val", "star_events_count": 2}
{"blob_id": "89fe96847ecdbbbff36e2b1b3c71ea3142ca86a1", "bodies": ["num = len(adj)\ndist = [float('inf') for i in range(num)]\nprev = [float('inf') for i in range(num)]\ndist[start] = 0\nq = []\nheapq.heappush(q, (0, start))\nwhile len(q) != 0:\n prov_cost, src = heapq.heappop(q)\n if dist[src] < prov_cost:\n continue\n for dest in range(num):\n cost = adj[src][dest]\n if cost != float('inf') and dist[dest] > dist[src] + cost:\n dist[dest] = dist[src] + cost\n heapq.heappush(q, (dist[dest], dest))\n prev[dest] = src\nif goal is not None:\n return self.get_path(goal, prev)\nelse:\n return dist", "path = [goal]\ndest = goal\nwhile prev[dest] != float('inf'):\n path.append(prev[dest])\n dest = prev[dest]\nreturn list(reversed(path))"], "bodies_text": "<|body_start_0|>\n num = len(adj)\n dist = [float('inf') for i in range(num)]\n prev = [float('inf') for i in range(num)]\n dist[start] = 0\n q = []\n heapq.heappush(q, (0, start))\n while len(q) != 0:\n prov_cost, src = heapq.heappop(q)\n if dist[src] < prov_cost:\n continue\n for dest in range(num):\n cost = adj[src][dest]\n if cost != float('inf') and dist[dest] > dist[src] + cost:\n dist[dest] = dist[src] + cost\n heapq.heappush(q, (dist[dest], dest))\n prev[dest] = src\n if goal is not None:\n return self.get_path(goal, prev)\n else:\n return dist\n<|end_body_0|>\n\n<|body_start_1|>\n path = [goal]\n dest = goal\n while prev[dest] != float('inf'):\n path.append(prev[dest])\n dest = prev[dest]\n return list(reversed(path))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Dijkstra", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Dijkstra:\n\n def dijkstra(self, adj, start, goal=None):\n \"\"\"ダイクストラアルゴリズムによる最短経路を求めるメソッド 入力 adj: adj[i][j]の値が頂点iから頂点jまでの距離(頂点iから頂点jに枝がない場合,値はfloat('inf'))となるような2次元リスト(正方行列) start: 始点のID goal: オプション引数.終点のID 出力 goalを引数に持つ場合,startからgoalまでの最短経路を格納したリストを返す 持たない場合は,startから各頂点までの最短距離を格納したリストを返す >>> d = Dijkstra() >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'), float('inf'), 4, 3, float('inf')]], 0) [0, 2, 4, 5, 8] # 例えば,始点0から頂点3までの最短距離は5となる >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'\"\"\"\n <|body_0|>\n\n def get_path(self, goal, prev):\n \"\"\"始点startから終点goalまでの最短経路を求める\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n num = len(adj)\n dist = [float('inf') for i in range(num)]\n prev = [float('inf') for i in range(num)]\n dist[start] = 0\n q = []\n heapq.heappush(q, (0, start))\n while len(q) != 0:\n prov_cost, src = heapq.heappop(q)\n if dist[src] < prov_cost:\n continue\n for dest in range(num):\n cost = adj[src][dest]\n if cost != float('inf') and dist[dest] > dist[src] + cost:\n dist[dest] = dist[src] + cost\n heapq.heappush(q, (dist[dest], dest))\n prev[dest] = src\n if goal is not None:\n return self.get_path(goal, prev)\n else:\n return dist\n<|end_body_0|>\n\n<|body_start_1|>\n path = [goal]\n dest = goal\n while prev[dest] != float('inf'):\n path.append(prev[dest])\n dest = prev[dest]\n return list(reversed(path))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000253", "length_bytes": 3901, "license_type": "no_license", "methods": [{"docstring": "ダイクストラアルゴリズムによる最短経路を求めるメソッド 入力 adj: adj[i][j]の値が頂点iから頂点jまでの距離(頂点iから頂点jに枝がない場合,値はfloat('inf'))となるような2次元リスト(正方行列) start: 始点のID goal: オプション引数.終点のID 出力 goalを引数に持つ場合,startからgoalまでの最短経路を格納したリストを返す 持たない場合は,startから各頂点までの最短距離を格納したリストを返す >>> d = Dijkstra() >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'), float('inf'), 4, 3, float('inf')]], 0) [0, 2, 4, 5, 8] # 例えば,始点0から頂点3までの最短距離は5となる >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'", "name": "dijkstra", "signature": "def dijkstra(self, adj, start, goal=None)"}, {"docstring": "始点startから終点goalまでの最短経路を求める", "name": "get_path", "signature": "def get_path(self, goal, prev)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_044657", "prompt": "Implement the Python class `Dijkstra` described below.\n\nClass description:\nImplement the Dijkstra class.\n\nMethod signatures and docstrings:\n- def dijkstra(self, adj, start, goal=None): ダイクストラアルゴリズムによる最短経路を求めるメソッド 入力 adj: adj[i][j]の値が頂点iから頂点jまでの距離(頂点iから頂点jに枝がない場合,値はfloat('inf'))となるような2次元リスト(正方行列) start: 始点のID goal: オプション引数.終点のID 出力 goalを引数に持つ場合,startからgoalまでの最短経路を格納したリストを返す 持たない場合は,startから各頂点までの最短距離を格納したリストを返す >>> d = Dijkstra() >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'), float('inf'), 4, 3, float('inf')]], 0) [0, 2, 4, 5, 8] # 例えば,始点0から頂点3までの最短距離は5となる >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'\n- def get_path(self, goal, prev): 始点startから終点goalまでの最短経路を求める", "prompted_full_text": "Implement the Python class `Dijkstra` described below.\n\nClass description:\nImplement the Dijkstra class.\n\nMethod signatures and docstrings:\n- def dijkstra(self, adj, start, goal=None): ダイクストラアルゴリズムによる最短経路を求めるメソッド 入力 adj: adj[i][j]の値が頂点iから頂点jまでの距離(頂点iから頂点jに枝がない場合,値はfloat('inf'))となるような2次元リスト(正方行列) start: 始点のID goal: オプション引数.終点のID 出力 goalを引数に持つ場合,startからgoalまでの最短経路を格納したリストを返す 持たない場合は,startから各頂点までの最短距離を格納したリストを返す >>> d = Dijkstra() >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'), float('inf'), 4, 3, float('inf')]], 0) [0, 2, 4, 5, 8] # 例えば,始点0から頂点3までの最短距離は5となる >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'\n- def get_path(self, goal, prev): 始点startから終点goalまでの最短経路を求める\n\n<|skeleton|>\nclass Dijkstra:\n\n def dijkstra(self, adj, start, goal=None):\n \"\"\"ダイクストラアルゴリズムによる最短経路を求めるメソッド 入力 adj: adj[i][j]の値が頂点iから頂点jまでの距離(頂点iから頂点jに枝がない場合,値はfloat('inf'))となるような2次元リスト(正方行列) start: 始点のID goal: オプション引数.終点のID 出力 goalを引数に持つ場合,startからgoalまでの最短経路を格納したリストを返す 持たない場合は,startから各頂点までの最短距離を格納したリストを返す >>> d = Dijkstra() >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'), float('inf'), 4, 3, float('inf')]], 0) [0, 2, 4, 5, 8] # 例えば,始点0から頂点3までの最短距離は5となる >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'\"\"\"\n <|body_0|>\n\n def get_path(self, goal, prev):\n \"\"\"始点startから終点goalまでの最短経路を求める\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n num = len(adj)\n dist = [float('inf') for i in range(num)]\n prev = [float('inf') for i in range(num)]\n dist[start] = 0\n q = []\n heapq.heappush(q, (0, start))\n while len(q) != 0:\n prov_cost, src = heapq.heappop(q)\n if dist[src] < prov_cost:\n continue\n for dest in range(num):\n cost = adj[src][dest]\n if cost != float('inf') and dist[dest] > dist[src] + cost:\n dist[dest] = dist[src] + cost\n heapq.heappush(q, (dist[dest], dest))\n prev[dest] = src\n if goal is not None:\n return self.get_path(goal, prev)\n else:\n return dist\n<|end_body_0|>\n\n<|body_start_1|>\n path = [goal]\n dest = goal\n while prev[dest] != float('inf'):\n path.append(prev[dest])\n dest = prev[dest]\n return list(reversed(path))\n<|end_body_1|>\n", "revision_id": "a12d30e0d1eeb58235b6fc51a558f409a2ee3792", "skeleton": "<|skeleton|>\nclass Dijkstra:\n\n def dijkstra(self, adj, start, goal=None):\n \"\"\"ダイクストラアルゴリズムによる最短経路を求めるメソッド 入力 adj: adj[i][j]の値が頂点iから頂点jまでの距離(頂点iから頂点jに枝がない場合,値はfloat('inf'))となるような2次元リスト(正方行列) start: 始点のID goal: オプション引数.終点のID 出力 goalを引数に持つ場合,startからgoalまでの最短経路を格納したリストを返す 持たない場合は,startから各頂点までの最短距離を格納したリストを返す >>> d = Dijkstra() >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'), float('inf'), 4, 3, float('inf')]], 0) [0, 2, 4, 5, 8] # 例えば,始点0から頂点3までの最短距離は5となる >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'\"\"\"\n <|body_0|>\n\n def get_path(self, goal, prev):\n \"\"\"始点startから終点goalまでの最短経路を求める\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Dijkstra:\n def dijkstra(self, adj, start, goal=None):\n \"\"\"ダイクストラアルゴリズムによる最短経路を求めるメソッド 入力 adj: adj[i][j]の値が頂点iから頂点jまでの距離(頂点iから頂点jに枝がない場合,値はfloat('inf'))となるような2次元リスト(正方行列) start: 始点のID goal: オプション引数.終点のID 出力 goalを引数に持つ場合,startからgoalまでの最短経路を格納したリストを返す 持たない場合は,startから各頂点までの最短距離を格納したリストを返す >>> d = Dijkstra() >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'), float('inf'), 4, 3, float('inf')]], 0) [0, 2, 4, 5, 8] # 例えば,始点0から頂点3までの最短距離は5となる >>> d.dijkstra([[float('inf'), 2, 4, float('inf'), float('inf')], [2, float('inf'), 3, 5, float('inf')], [4, 3, float('inf'), 1, 4], [float('inf'), 5, 1, float('inf'), 3], [float('inf'\"\"\"\n num = len(adj)\n dist = [float('inf') for i in range(num)]\n prev = [float('inf') for i in range(num)]\n dist[start] = 0\n q = []\n heapq.heappush(q, (0, start))\n while len(q) != 0:\n prov_cost, src = heapq.heappop(q)\n if dist[src] < prov_cost:\n continue\n for dest in range(num):\n cost = adj[src][dest]\n if cost != float('inf') and dist[dest] > dist[src] + cost:\n dist[dest] = dist[src] + cost\n heapq.heappush(q, (dist[dest], dest))\n prev[dest] = src\n if goal is not None:\n return self.get_path(goal, prev)\n else:\n return dist\n\n def get_path(self, goal, prev):\n \"\"\"始点startから終点goalまでの最短経路を求める\"\"\"\n path = [goal]\n dest = goal\n while prev[dest] != float('inf'):\n path.append(prev[dest])\n dest = prev[dest]\n return list(reversed(path))\n", "source": "the_stack_v2_python_sparse", "source_path": "utils/graph/dijkstra2.py", "source_repo": "masa3141/procon", "split": "val", "star_events_count": 0}
{"blob_id": "86dd0a043a02a4ab9dd7ed88263d414085244f4c", "bodies": ["if isinstance(key, int):\n return SeedID(key)\nif key not in SeedID._member_map_:\n return extend_enum(SeedID, key, default)\nreturn SeedID[key]", "if not (isinstance(value, int) and 0 <= value <= 3):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\nreturn extend_enum(cls, 'Unassigned_0b%s' % bin(value)[2:].zfill(2), value)"], "bodies_text": "<|body_start_0|>\n if isinstance(key, int):\n return SeedID(key)\n if key not in SeedID._member_map_:\n return extend_enum(SeedID, key, default)\n return SeedID[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 3):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n return extend_enum(cls, 'Unassigned_0b%s' % bin(value)[2:].zfill(2), value)\n<|end_body_1|>\n", "class_docstring": "[SeedID] Seed-ID Types", "class_name": "SeedID", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SeedID:\n \"\"\"[SeedID] Seed-ID Types\"\"\"\n\n def get(key: 'int | str', default: 'int'=-1) -> 'SeedID':\n \"\"\"Backport support for original codes. Args: key: Key to get enum item. default: Default value if not found. :meta private:\"\"\"\n <|body_0|>\n\n def _missing_(cls, value: 'int') -> 'SeedID':\n \"\"\"Lookup function used when value is not found. Args: value: Value to get enum item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(key, int):\n return SeedID(key)\n if key not in SeedID._member_map_:\n return extend_enum(SeedID, key, default)\n return SeedID[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 3):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n return extend_enum(cls, 'Unassigned_0b%s' % bin(value)[2:].zfill(2), value)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000254", "length_bytes": 1529, "license_type": "permissive", "methods": [{"docstring": "Backport support for original codes. Args: key: Key to get enum item. default: Default value if not found. :meta private:", "name": "get", "signature": "def get(key: 'int | str', default: 'int'=-1) -> 'SeedID'"}, {"docstring": "Lookup function used when value is not found. Args: value: Value to get enum item.", "name": "_missing_", "signature": "def _missing_(cls, value: 'int') -> 'SeedID'"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002969", "prompt": "Implement the Python class `SeedID` described below.\n\nClass description:\n[SeedID] Seed-ID Types\n\nMethod signatures and docstrings:\n- def get(key: 'int | str', default: 'int'=-1) -> 'SeedID': Backport support for original codes. Args: key: Key to get enum item. default: Default value if not found. :meta private:\n- def _missing_(cls, value: 'int') -> 'SeedID': Lookup function used when value is not found. Args: value: Value to get enum item.", "prompted_full_text": "Implement the Python class `SeedID` described below.\n\nClass description:\n[SeedID] Seed-ID Types\n\nMethod signatures and docstrings:\n- def get(key: 'int | str', default: 'int'=-1) -> 'SeedID': Backport support for original codes. Args: key: Key to get enum item. default: Default value if not found. :meta private:\n- def _missing_(cls, value: 'int') -> 'SeedID': Lookup function used when value is not found. Args: value: Value to get enum item.\n\n<|skeleton|>\nclass SeedID:\n \"\"\"[SeedID] Seed-ID Types\"\"\"\n\n def get(key: 'int | str', default: 'int'=-1) -> 'SeedID':\n \"\"\"Backport support for original codes. Args: key: Key to get enum item. default: Default value if not found. :meta private:\"\"\"\n <|body_0|>\n\n def _missing_(cls, value: 'int') -> 'SeedID':\n \"\"\"Lookup function used when value is not found. Args: value: Value to get enum item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(key, int):\n return SeedID(key)\n if key not in SeedID._member_map_:\n return extend_enum(SeedID, key, default)\n return SeedID[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 3):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n return extend_enum(cls, 'Unassigned_0b%s' % bin(value)[2:].zfill(2), value)\n<|end_body_1|>\n", "revision_id": "a6fe49ec58f09e105bec5a00fb66d9b3f22730d9", "skeleton": "<|skeleton|>\nclass SeedID:\n \"\"\"[SeedID] Seed-ID Types\"\"\"\n\n def get(key: 'int | str', default: 'int'=-1) -> 'SeedID':\n \"\"\"Backport support for original codes. Args: key: Key to get enum item. default: Default value if not found. :meta private:\"\"\"\n <|body_0|>\n\n def _missing_(cls, value: 'int') -> 'SeedID':\n \"\"\"Lookup function used when value is not found. Args: value: Value to get enum item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SeedID:\n \"\"\"[SeedID] Seed-ID Types\"\"\"\n\n def get(key: 'int | str', default: 'int'=-1) -> 'SeedID':\n \"\"\"Backport support for original codes. Args: key: Key to get enum item. default: Default value if not found. :meta private:\"\"\"\n if isinstance(key, int):\n return SeedID(key)\n if key not in SeedID._member_map_:\n return extend_enum(SeedID, key, default)\n return SeedID[key]\n\n def _missing_(cls, value: 'int') -> 'SeedID':\n \"\"\"Lookup function used when value is not found. Args: value: Value to get enum item.\"\"\"\n if not (isinstance(value, int) and 0 <= value <= 3):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n return extend_enum(cls, 'Unassigned_0b%s' % bin(value)[2:].zfill(2), value)\n", "source": "the_stack_v2_python_sparse", "source_path": "pcapkit/const/ipv6/seed_id.py", "source_repo": "JarryShaw/PyPCAPKit", "split": "val", "star_events_count": 204}
{"blob_id": "599e2fc5c6b112eed4b7744efae3c7f0da0e6138", "bodies": ["self.achieveSystem and self.achieveSystem.triggerCatchFishEvent(event)\nself.activitySystem and self.activitySystem.dealCatchFish(event)\ncoinAddition = 0\nif 0 < event.gainChip < self.catchBonus:\n coinAddition = event.gainChip\n self.catchBonus -= coinAddition\nif ftlog.is_debug():\n ftlog.debug('triggerCatchFishEvent', event.userId, self.catchBonus, event.gainChip, coinAddition)\nfor player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealCatchEvent(event, coinAddition)", "for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealComboEvent(event)", "self.activitySystem and self.activitySystem.useSkill(event.skillId)\nfor player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealUserSkillEvent(event)"], "bodies_text": "<|body_start_0|>\n self.achieveSystem and self.achieveSystem.triggerCatchFishEvent(event)\n self.activitySystem and self.activitySystem.dealCatchFish(event)\n coinAddition = 0\n if 0 < event.gainChip < self.catchBonus:\n coinAddition = event.gainChip\n self.catchBonus -= coinAddition\n if ftlog.is_debug():\n ftlog.debug('triggerCatchFishEvent', event.userId, self.catchBonus, event.gainChip, coinAddition)\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealCatchEvent(event, coinAddition)\n<|end_body_0|>\n\n<|body_start_1|>\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealComboEvent(event)\n<|end_body_1|>\n\n<|body_start_2|>\n self.activitySystem and self.activitySystem.useSkill(event.skillId)\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealUserSkillEvent(event)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "FishFriendPlayer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FishFriendPlayer:\n\n def triggerCatchFishEvent(self, event):\n \"\"\"覆盖父类的方法\"\"\"\n <|body_0|>\n\n def triggerComboEvent(self, event):\n \"\"\"触发连击事件\"\"\"\n <|body_1|>\n\n def triggerUseSkillEvent(self, event):\n \"\"\"处理使用技能事件\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.achieveSystem and self.achieveSystem.triggerCatchFishEvent(event)\n self.activitySystem and self.activitySystem.dealCatchFish(event)\n coinAddition = 0\n if 0 < event.gainChip < self.catchBonus:\n coinAddition = event.gainChip\n self.catchBonus -= coinAddition\n if ftlog.is_debug():\n ftlog.debug('triggerCatchFishEvent', event.userId, self.catchBonus, event.gainChip, coinAddition)\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealCatchEvent(event, coinAddition)\n<|end_body_0|>\n\n<|body_start_1|>\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealComboEvent(event)\n<|end_body_1|>\n\n<|body_start_2|>\n self.activitySystem and self.activitySystem.useSkill(event.skillId)\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealUserSkillEvent(event)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000255", "length_bytes": 1501, "license_type": "no_license", "methods": [{"docstring": "覆盖父类的方法", "name": "triggerCatchFishEvent", "signature": "def triggerCatchFishEvent(self, event)"}, {"docstring": "触发连击事件", "name": "triggerComboEvent", "signature": "def triggerComboEvent(self, event)"}, {"docstring": "处理使用技能事件", "name": "triggerUseSkillEvent", "signature": "def triggerUseSkillEvent(self, event)"}], "n_methods": 3, "prompt": "Implement the Python class `FishFriendPlayer` described below.\n\nClass description:\nImplement the FishFriendPlayer class.\n\nMethod signatures and docstrings:\n- def triggerCatchFishEvent(self, event): 覆盖父类的方法\n- def triggerComboEvent(self, event): 触发连击事件\n- def triggerUseSkillEvent(self, event): 处理使用技能事件", "prompted_full_text": "Implement the Python class `FishFriendPlayer` described below.\n\nClass description:\nImplement the FishFriendPlayer class.\n\nMethod signatures and docstrings:\n- def triggerCatchFishEvent(self, event): 覆盖父类的方法\n- def triggerComboEvent(self, event): 触发连击事件\n- def triggerUseSkillEvent(self, event): 处理使用技能事件\n\n<|skeleton|>\nclass FishFriendPlayer:\n\n def triggerCatchFishEvent(self, event):\n \"\"\"覆盖父类的方法\"\"\"\n <|body_0|>\n\n def triggerComboEvent(self, event):\n \"\"\"触发连击事件\"\"\"\n <|body_1|>\n\n def triggerUseSkillEvent(self, event):\n \"\"\"处理使用技能事件\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.achieveSystem and self.achieveSystem.triggerCatchFishEvent(event)\n self.activitySystem and self.activitySystem.dealCatchFish(event)\n coinAddition = 0\n if 0 < event.gainChip < self.catchBonus:\n coinAddition = event.gainChip\n self.catchBonus -= coinAddition\n if ftlog.is_debug():\n ftlog.debug('triggerCatchFishEvent', event.userId, self.catchBonus, event.gainChip, coinAddition)\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealCatchEvent(event, coinAddition)\n<|end_body_0|>\n\n<|body_start_1|>\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealComboEvent(event)\n<|end_body_1|>\n\n<|body_start_2|>\n self.activitySystem and self.activitySystem.useSkill(event.skillId)\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealUserSkillEvent(event)\n<|end_body_2|>\n", "revision_id": "fa1591863985a418fd361eb6dac36d1301bc1231", "skeleton": "<|skeleton|>\nclass FishFriendPlayer:\n\n def triggerCatchFishEvent(self, event):\n \"\"\"覆盖父类的方法\"\"\"\n <|body_0|>\n\n def triggerComboEvent(self, event):\n \"\"\"触发连击事件\"\"\"\n <|body_1|>\n\n def triggerUseSkillEvent(self, event):\n \"\"\"处理使用技能事件\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FishFriendPlayer:\n def triggerCatchFishEvent(self, event):\n \"\"\"覆盖父类的方法\"\"\"\n self.achieveSystem and self.achieveSystem.triggerCatchFishEvent(event)\n self.activitySystem and self.activitySystem.dealCatchFish(event)\n coinAddition = 0\n if 0 < event.gainChip < self.catchBonus:\n coinAddition = event.gainChip\n self.catchBonus -= coinAddition\n if ftlog.is_debug():\n ftlog.debug('triggerCatchFishEvent', event.userId, self.catchBonus, event.gainChip, coinAddition)\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealCatchEvent(event, coinAddition)\n\n def triggerComboEvent(self, event):\n \"\"\"触发连击事件\"\"\"\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealComboEvent(event)\n\n def triggerUseSkillEvent(self, event):\n \"\"\"处理使用技能事件\"\"\"\n self.activitySystem and self.activitySystem.useSkill(event.skillId)\n for player in self.table.players:\n if player and player.taskSystemUser:\n player.taskSystemUser.dealUserSkillEvent(event)\n", "source": "the_stack_v2_python_sparse", "source_path": "learn_tu_you/wx_superboss/trunk/hall37-newfish/src/newfish/player/friend_player.py", "source_repo": "isoundy000/learn_python", "split": "val", "star_events_count": 0}
{"blob_id": "d1332e7c017393a3dcf4c97f7f3b02e225346838", "bodies": ["plain = [ord(i) for i in text]\nkey = []\ncipher = []\nfor i in plain:\n k = random.randint(1, 300)\n c = (i + k) * k\n cipher.append(c)\n key.append(k)\nreturn (cipher, key)", "plain = []\nfor i in range(len(key)):\n p = int((cipher[i] - key[i] ** 2) / key[i])\n plain.append(chr(p))\nreturn ''.join(plain)"], "bodies_text": "<|body_start_0|>\n plain = [ord(i) for i in text]\n key = []\n cipher = []\n for i in plain:\n k = random.randint(1, 300)\n c = (i + k) * k\n cipher.append(c)\n key.append(k)\n return (cipher, key)\n<|end_body_0|>\n\n<|body_start_1|>\n plain = []\n for i in range(len(key)):\n p = int((cipher[i] - key[i] ** 2) / key[i])\n plain.append(chr(p))\n return ''.join(plain)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Onepad", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Onepad:\n\n def encrypt(text: str) -> tuple[list[int], list[int]]:\n \"\"\"Function to encrypt text using pseudo-random numbers\"\"\"\n <|body_0|>\n\n def decrypt(cipher: list[int], key: list[int]) -> str:\n \"\"\"Function to decrypt text using pseudo-random numbers.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n plain = [ord(i) for i in text]\n key = []\n cipher = []\n for i in plain:\n k = random.randint(1, 300)\n c = (i + k) * k\n cipher.append(c)\n key.append(k)\n return (cipher, key)\n<|end_body_0|>\n\n<|body_start_1|>\n plain = []\n for i in range(len(key)):\n p = int((cipher[i] - key[i] ** 2) / key[i])\n plain.append(chr(p))\n return ''.join(plain)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000256", "length_bytes": 859, "license_type": "permissive", "methods": [{"docstring": "Function to encrypt text using pseudo-random numbers", "name": "encrypt", "signature": "def encrypt(text: str) -> tuple[list[int], list[int]]"}, {"docstring": "Function to decrypt text using pseudo-random numbers.", "name": "decrypt", "signature": "def decrypt(cipher: list[int], key: list[int]) -> str"}], "n_methods": 2, "prompt": "Implement the Python class `Onepad` described below.\n\nClass description:\nImplement the Onepad class.\n\nMethod signatures and docstrings:\n- def encrypt(text: str) -> tuple[list[int], list[int]]: Function to encrypt text using pseudo-random numbers\n- def decrypt(cipher: list[int], key: list[int]) -> str: Function to decrypt text using pseudo-random numbers.", "prompted_full_text": "Implement the Python class `Onepad` described below.\n\nClass description:\nImplement the Onepad class.\n\nMethod signatures and docstrings:\n- def encrypt(text: str) -> tuple[list[int], list[int]]: Function to encrypt text using pseudo-random numbers\n- def decrypt(cipher: list[int], key: list[int]) -> str: Function to decrypt text using pseudo-random numbers.\n\n<|skeleton|>\nclass Onepad:\n\n def encrypt(text: str) -> tuple[list[int], list[int]]:\n \"\"\"Function to encrypt text using pseudo-random numbers\"\"\"\n <|body_0|>\n\n def decrypt(cipher: list[int], key: list[int]) -> str:\n \"\"\"Function to decrypt text using pseudo-random numbers.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n plain = [ord(i) for i in text]\n key = []\n cipher = []\n for i in plain:\n k = random.randint(1, 300)\n c = (i + k) * k\n cipher.append(c)\n key.append(k)\n return (cipher, key)\n<|end_body_0|>\n\n<|body_start_1|>\n plain = []\n for i in range(len(key)):\n p = int((cipher[i] - key[i] ** 2) / key[i])\n plain.append(chr(p))\n return ''.join(plain)\n<|end_body_1|>\n", "revision_id": "421ace81edb0d9af3a173f4ca7e66cc900078c1d", "skeleton": "<|skeleton|>\nclass Onepad:\n\n def encrypt(text: str) -> tuple[list[int], list[int]]:\n \"\"\"Function to encrypt text using pseudo-random numbers\"\"\"\n <|body_0|>\n\n def decrypt(cipher: list[int], key: list[int]) -> str:\n \"\"\"Function to decrypt text using pseudo-random numbers.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Onepad:\n def encrypt(text: str) -> tuple[list[int], list[int]]:\n \"\"\"Function to encrypt text using pseudo-random numbers\"\"\"\n plain = [ord(i) for i in text]\n key = []\n cipher = []\n for i in plain:\n k = random.randint(1, 300)\n c = (i + k) * k\n cipher.append(c)\n key.append(k)\n return (cipher, key)\n\n def decrypt(cipher: list[int], key: list[int]) -> str:\n \"\"\"Function to decrypt text using pseudo-random numbers.\"\"\"\n plain = []\n for i in range(len(key)):\n p = int((cipher[i] - key[i] ** 2) / key[i])\n plain.append(chr(p))\n return ''.join(plain)\n", "source": "the_stack_v2_python_sparse", "source_path": "ciphers/onepad_cipher.py", "source_repo": "TheAlgorithms/Python", "split": "val", "star_events_count": 184217}
{"blob_id": "f5b9759b2171b993b8644f063ff8b762c92c6927", "bodies": ["self.heap = nums\nself.k = k\nheapq.heapify(self.heap)\nwhile len(self.heap) > k:\n heapq.heappop(self.heap)", "if len(self.heap) < self.k:\n heapq.heappush(self.heap, val)\nelse:\n heapq.heappushpop(self.heap, val)\nreturn self.heap[0]"], "bodies_text": "<|body_start_0|>\n self.heap = nums\n self.k = k\n heapq.heapify(self.heap)\n while len(self.heap) > k:\n heapq.heappop(self.heap)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.heap) < self.k:\n heapq.heappush(self.heap, val)\n else:\n heapq.heappushpop(self.heap, val)\n return self.heap[0]\n<|end_body_1|>\n", "class_docstring": "- time limit exceeded - Overkill, only need a heap for largest elements, rest not care def __init__(self, k, nums): self.heap = [-n for n in nums] self.k = k heapq.heapify(self.heap) def add(self, val): kLargest = [] heapq.heappush(self.heap, -val) for i in range(self.k): kLargest.append(heapq.heappop(self.heap)) res = -kLargest[-1] for i in range(self.k): heapq.heappush(self.heap, kLargest[i]) return res", "class_name": "KthLargest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KthLargest:\n \"\"\"- time limit exceeded - Overkill, only need a heap for largest elements, rest not care def __init__(self, k, nums): self.heap = [-n for n in nums] self.k = k heapq.heapify(self.heap) def add(self, val): kLargest = [] heapq.heappush(self.heap, -val) for i in range(self.k): kLargest.append(heapq.heappop(self.heap)) res = -kLargest[-1] for i in range(self.k): heapq.heappush(self.heap, kLargest[i]) return res\"\"\"\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.heap = nums\n self.k = k\n heapq.heapify(self.heap)\n while len(self.heap) > k:\n heapq.heappop(self.heap)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.heap) < self.k:\n heapq.heappush(self.heap, val)\n else:\n heapq.heappushpop(self.heap, val)\n return self.heap[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000257", "length_bytes": 1373, "license_type": "no_license", "methods": [{"docstring": ":type k: int :type nums: List[int]", "name": "__init__", "signature": "def __init__(self, k, nums)"}, {"docstring": ":type val: int :rtype: int", "name": "add", "signature": "def add(self, val)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_037268", "prompt": "Implement the Python class `KthLargest` described below.\n\nClass description:\n- time limit exceeded - Overkill, only need a heap for largest elements, rest not care def __init__(self, k, nums): self.heap = [-n for n in nums] self.k = k heapq.heapify(self.heap) def add(self, val): kLargest = [] heapq.heappush(self.heap, -val) for i in range(self.k): kLargest.append(heapq.heappop(self.heap)) res = -kLargest[-1] for i in range(self.k): heapq.heappush(self.heap, kLargest[i]) return res\n\nMethod signatures and docstrings:\n- def __init__(self, k, nums): :type k: int :type nums: List[int]\n- def add(self, val): :type val: int :rtype: int", "prompted_full_text": "Implement the Python class `KthLargest` described below.\n\nClass description:\n- time limit exceeded - Overkill, only need a heap for largest elements, rest not care def __init__(self, k, nums): self.heap = [-n for n in nums] self.k = k heapq.heapify(self.heap) def add(self, val): kLargest = [] heapq.heappush(self.heap, -val) for i in range(self.k): kLargest.append(heapq.heappop(self.heap)) res = -kLargest[-1] for i in range(self.k): heapq.heappush(self.heap, kLargest[i]) return res\n\nMethod signatures and docstrings:\n- def __init__(self, k, nums): :type k: int :type nums: List[int]\n- def add(self, val): :type val: int :rtype: int\n\n<|skeleton|>\nclass KthLargest:\n \"\"\"- time limit exceeded - Overkill, only need a heap for largest elements, rest not care def __init__(self, k, nums): self.heap = [-n for n in nums] self.k = k heapq.heapify(self.heap) def add(self, val): kLargest = [] heapq.heappush(self.heap, -val) for i in range(self.k): kLargest.append(heapq.heappop(self.heap)) res = -kLargest[-1] for i in range(self.k): heapq.heappush(self.heap, kLargest[i]) return res\"\"\"\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.heap = nums\n self.k = k\n heapq.heapify(self.heap)\n while len(self.heap) > k:\n heapq.heappop(self.heap)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.heap) < self.k:\n heapq.heappush(self.heap, val)\n else:\n heapq.heappushpop(self.heap, val)\n return self.heap[0]\n<|end_body_1|>\n", "revision_id": "085d868ba0458fc8e6b5549aa00fa151c335fa7f", "skeleton": "<|skeleton|>\nclass KthLargest:\n \"\"\"- time limit exceeded - Overkill, only need a heap for largest elements, rest not care def __init__(self, k, nums): self.heap = [-n for n in nums] self.k = k heapq.heapify(self.heap) def add(self, val): kLargest = [] heapq.heappush(self.heap, -val) for i in range(self.k): kLargest.append(heapq.heappop(self.heap)) res = -kLargest[-1] for i in range(self.k): heapq.heappush(self.heap, kLargest[i]) return res\"\"\"\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class KthLargest:\n \"\"\"- time limit exceeded - Overkill, only need a heap for largest elements, rest not care def __init__(self, k, nums): self.heap = [-n for n in nums] self.k = k heapq.heapify(self.heap) def add(self, val): kLargest = [] heapq.heappush(self.heap, -val) for i in range(self.k): kLargest.append(heapq.heappop(self.heap)) res = -kLargest[-1] for i in range(self.k): heapq.heappush(self.heap, kLargest[i]) return res\"\"\"\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n self.heap = nums\n self.k = k\n heapq.heapify(self.heap)\n while len(self.heap) > k:\n heapq.heappop(self.heap)\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n if len(self.heap) < self.k:\n heapq.heappush(self.heap, val)\n else:\n heapq.heappushpop(self.heap, val)\n return self.heap[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "703-Kth_Largest_Element_in_A_Stream.py", "source_repo": "chanyoonzhu/leetcode-python", "split": "val", "star_events_count": 0}
{"blob_id": "63c9e1edd41c6b55f3f94ae8e117486fa67fd681", "bodies": ["password = os.environ.get('CLICKHOUSE_PASSWORD')\ndb = os.environ.get('CLICKHOUSE_DATABASE')\nport = os.environ.get('CLICKHOUSE_PORT')\nself.client = Client('localhost', password=password, database=db, port=port)", "fields = '(UserId, ChannelId, Reaction, MessageId,' + ' EventTime, EventDate, LikeTime, Url)'\nquery = f'INSERT INTO likes{fields} VALUES {str(likes)}'\nself.client.execute(query)", "query = f\"\\n SELECT\\n unique_messages.cid as Channel, unique_messages.mid as Message,\\n unique_messages.url as Url,\\n all_reactions.likes as Likes, all_reactions.dislikes as Dislikes,\\n unique_messages.lt as Placed\\n FROM\\n (\\n SELECT\\n seq1.cid as cid1, seq1.mid as mid1,\\n seq2.cid as cid2, seq2.mid as mid2,\\n seq1.uuid as likes, seq2.uuid as dislikes\\n FROM\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'like' GROUP BY (cid, mid)\\n ) as seq1\\n FULL OUTER JOIN\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'dislike' GROUP BY (cid, mid)\\n ) as seq2\\n ON (seq1.cid = seq2.cid AND seq1.mid = seq2.mid)\\n ) as all_reactions\\n JOIN\\n (\\n SELECT\\n DISTINCT (ChannelId, MessageId, Url),\\n MessageId AS mid, ChannelId AS cid, Url as url,\\n max(EventTime) AS et, max(LikeTime) as lt\\n FROM\\n likes WHERE LikeTime > {tm}\\n GROUP BY (ChannelId, MessageId, Url)\\n ) as unique_messages\\n ON (\\n COALESCE(\\n nullIf(all_reactions.mid1, 0),\\n nullIf(all_reactions.mid2, 0)\\n ) = unique_messages.mid\\n AND\\n COALESCE(\\n nullIf(all_reactions.cid1, ''),\\n nullIf(all_reactions.cid2, '')\\n ) = unique_messages.cid\\n )\\n ORDER BY Placed DESC\"\nreactions = self.client.execute(query)\nreturn reactions"], "bodies_text": "<|body_start_0|>\n password = os.environ.get('CLICKHOUSE_PASSWORD')\n db = os.environ.get('CLICKHOUSE_DATABASE')\n port = os.environ.get('CLICKHOUSE_PORT')\n self.client = Client('localhost', password=password, database=db, port=port)\n<|end_body_0|>\n\n<|body_start_1|>\n fields = '(UserId, ChannelId, Reaction, MessageId,' + ' EventTime, EventDate, LikeTime, Url)'\n query = f'INSERT INTO likes{fields} VALUES {str(likes)}'\n self.client.execute(query)\n<|end_body_1|>\n\n<|body_start_2|>\n query = f\"\\n SELECT\\n unique_messages.cid as Channel, unique_messages.mid as Message,\\n unique_messages.url as Url,\\n all_reactions.likes as Likes, all_reactions.dislikes as Dislikes,\\n unique_messages.lt as Placed\\n FROM\\n (\\n SELECT\\n seq1.cid as cid1, seq1.mid as mid1,\\n seq2.cid as cid2, seq2.mid as mid2,\\n seq1.uuid as likes, seq2.uuid as dislikes\\n FROM\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'like' GROUP BY (cid, mid)\\n ) as seq1\\n FULL OUTER JOIN\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'dislike' GROUP BY (cid, mid)\\n ) as seq2\\n ON (seq1.cid = seq2.cid AND seq1.mid = seq2.mid)\\n ) as all_reactions\\n JOIN\\n (\\n SELECT\\n DISTINCT (ChannelId, MessageId, Url),\\n MessageId AS mid, ChannelId AS cid, Url as url,\\n max(EventTime) AS et, max(LikeTime) as lt\\n FROM\\n likes WHERE LikeTime > {tm}\\n GROUP BY (ChannelId, MessageId, Url)\\n ) as unique_messages\\n ON (\\n COALESCE(\\n nullIf(all_reactions.mid1, 0),\\n nullIf(all_reactions.mid2, 0)\\n ) = unique_messages.mid\\n AND\\n COALESCE(\\n nullIf(all_reactions.cid1, ''),\\n nullIf(all_reactions.cid2, '')\\n ) = unique_messages.cid\\n )\\n ORDER BY Placed DESC\"\n reactions = self.client.execute(query)\n return reactions\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ClickHouseSaver", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ClickHouseSaver:\n\n def __init__(self):\n \"\"\"Creating connection to clickhouse database\"\"\"\n <|body_0|>\n\n def add_likes(self, likes):\n \"\"\"Function creating and executing query inserting likes to database\"\"\"\n <|body_1|>\n\n def get_likes(self, tm):\n \"\"\"Function getting likes from database added after last check tm describes the max like time which was received on previous iteration\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n password = os.environ.get('CLICKHOUSE_PASSWORD')\n db = os.environ.get('CLICKHOUSE_DATABASE')\n port = os.environ.get('CLICKHOUSE_PORT')\n self.client = Client('localhost', password=password, database=db, port=port)\n<|end_body_0|>\n\n<|body_start_1|>\n fields = '(UserId, ChannelId, Reaction, MessageId,' + ' EventTime, EventDate, LikeTime, Url)'\n query = f'INSERT INTO likes{fields} VALUES {str(likes)}'\n self.client.execute(query)\n<|end_body_1|>\n\n<|body_start_2|>\n query = f\"\\n SELECT\\n unique_messages.cid as Channel, unique_messages.mid as Message,\\n unique_messages.url as Url,\\n all_reactions.likes as Likes, all_reactions.dislikes as Dislikes,\\n unique_messages.lt as Placed\\n FROM\\n (\\n SELECT\\n seq1.cid as cid1, seq1.mid as mid1,\\n seq2.cid as cid2, seq2.mid as mid2,\\n seq1.uuid as likes, seq2.uuid as dislikes\\n FROM\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'like' GROUP BY (cid, mid)\\n ) as seq1\\n FULL OUTER JOIN\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'dislike' GROUP BY (cid, mid)\\n ) as seq2\\n ON (seq1.cid = seq2.cid AND seq1.mid = seq2.mid)\\n ) as all_reactions\\n JOIN\\n (\\n SELECT\\n DISTINCT (ChannelId, MessageId, Url),\\n MessageId AS mid, ChannelId AS cid, Url as url,\\n max(EventTime) AS et, max(LikeTime) as lt\\n FROM\\n likes WHERE LikeTime > {tm}\\n GROUP BY (ChannelId, MessageId, Url)\\n ) as unique_messages\\n ON (\\n COALESCE(\\n nullIf(all_reactions.mid1, 0),\\n nullIf(all_reactions.mid2, 0)\\n ) = unique_messages.mid\\n AND\\n COALESCE(\\n nullIf(all_reactions.cid1, ''),\\n nullIf(all_reactions.cid2, '')\\n ) = unique_messages.cid\\n )\\n ORDER BY Placed DESC\"\n reactions = self.client.execute(query)\n return reactions\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000258", "length_bytes": 3926, "license_type": "no_license", "methods": [{"docstring": "Creating connection to clickhouse database", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Function creating and executing query inserting likes to database", "name": "add_likes", "signature": "def add_likes(self, likes)"}, {"docstring": "Function getting likes from database added after last check tm describes the max like time which was received on previous iteration", "name": "get_likes", "signature": "def get_likes(self, tm)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001607", "prompt": "Implement the Python class `ClickHouseSaver` described below.\n\nClass description:\nImplement the ClickHouseSaver class.\n\nMethod signatures and docstrings:\n- def __init__(self): Creating connection to clickhouse database\n- def add_likes(self, likes): Function creating and executing query inserting likes to database\n- def get_likes(self, tm): Function getting likes from database added after last check tm describes the max like time which was received on previous iteration", "prompted_full_text": "Implement the Python class `ClickHouseSaver` described below.\n\nClass description:\nImplement the ClickHouseSaver class.\n\nMethod signatures and docstrings:\n- def __init__(self): Creating connection to clickhouse database\n- def add_likes(self, likes): Function creating and executing query inserting likes to database\n- def get_likes(self, tm): Function getting likes from database added after last check tm describes the max like time which was received on previous iteration\n\n<|skeleton|>\nclass ClickHouseSaver:\n\n def __init__(self):\n \"\"\"Creating connection to clickhouse database\"\"\"\n <|body_0|>\n\n def add_likes(self, likes):\n \"\"\"Function creating and executing query inserting likes to database\"\"\"\n <|body_1|>\n\n def get_likes(self, tm):\n \"\"\"Function getting likes from database added after last check tm describes the max like time which was received on previous iteration\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n password = os.environ.get('CLICKHOUSE_PASSWORD')\n db = os.environ.get('CLICKHOUSE_DATABASE')\n port = os.environ.get('CLICKHOUSE_PORT')\n self.client = Client('localhost', password=password, database=db, port=port)\n<|end_body_0|>\n\n<|body_start_1|>\n fields = '(UserId, ChannelId, Reaction, MessageId,' + ' EventTime, EventDate, LikeTime, Url)'\n query = f'INSERT INTO likes{fields} VALUES {str(likes)}'\n self.client.execute(query)\n<|end_body_1|>\n\n<|body_start_2|>\n query = f\"\\n SELECT\\n unique_messages.cid as Channel, unique_messages.mid as Message,\\n unique_messages.url as Url,\\n all_reactions.likes as Likes, all_reactions.dislikes as Dislikes,\\n unique_messages.lt as Placed\\n FROM\\n (\\n SELECT\\n seq1.cid as cid1, seq1.mid as mid1,\\n seq2.cid as cid2, seq2.mid as mid2,\\n seq1.uuid as likes, seq2.uuid as dislikes\\n FROM\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'like' GROUP BY (cid, mid)\\n ) as seq1\\n FULL OUTER JOIN\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'dislike' GROUP BY (cid, mid)\\n ) as seq2\\n ON (seq1.cid = seq2.cid AND seq1.mid = seq2.mid)\\n ) as all_reactions\\n JOIN\\n (\\n SELECT\\n DISTINCT (ChannelId, MessageId, Url),\\n MessageId AS mid, ChannelId AS cid, Url as url,\\n max(EventTime) AS et, max(LikeTime) as lt\\n FROM\\n likes WHERE LikeTime > {tm}\\n GROUP BY (ChannelId, MessageId, Url)\\n ) as unique_messages\\n ON (\\n COALESCE(\\n nullIf(all_reactions.mid1, 0),\\n nullIf(all_reactions.mid2, 0)\\n ) = unique_messages.mid\\n AND\\n COALESCE(\\n nullIf(all_reactions.cid1, ''),\\n nullIf(all_reactions.cid2, '')\\n ) = unique_messages.cid\\n )\\n ORDER BY Placed DESC\"\n reactions = self.client.execute(query)\n return reactions\n<|end_body_2|>\n", "revision_id": "c2bd51df8af3c6249400f3dcb1c11783e6346f9d", "skeleton": "<|skeleton|>\nclass ClickHouseSaver:\n\n def __init__(self):\n \"\"\"Creating connection to clickhouse database\"\"\"\n <|body_0|>\n\n def add_likes(self, likes):\n \"\"\"Function creating and executing query inserting likes to database\"\"\"\n <|body_1|>\n\n def get_likes(self, tm):\n \"\"\"Function getting likes from database added after last check tm describes the max like time which was received on previous iteration\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ClickHouseSaver:\n def __init__(self):\n \"\"\"Creating connection to clickhouse database\"\"\"\n password = os.environ.get('CLICKHOUSE_PASSWORD')\n db = os.environ.get('CLICKHOUSE_DATABASE')\n port = os.environ.get('CLICKHOUSE_PORT')\n self.client = Client('localhost', password=password, database=db, port=port)\n\n def add_likes(self, likes):\n \"\"\"Function creating and executing query inserting likes to database\"\"\"\n fields = '(UserId, ChannelId, Reaction, MessageId,' + ' EventTime, EventDate, LikeTime, Url)'\n query = f'INSERT INTO likes{fields} VALUES {str(likes)}'\n self.client.execute(query)\n\n def get_likes(self, tm):\n \"\"\"Function getting likes from database added after last check tm describes the max like time which was received on previous iteration\"\"\"\n query = f\"\\n SELECT\\n unique_messages.cid as Channel, unique_messages.mid as Message,\\n unique_messages.url as Url,\\n all_reactions.likes as Likes, all_reactions.dislikes as Dislikes,\\n unique_messages.lt as Placed\\n FROM\\n (\\n SELECT\\n seq1.cid as cid1, seq1.mid as mid1,\\n seq2.cid as cid2, seq2.mid as mid2,\\n seq1.uuid as likes, seq2.uuid as dislikes\\n FROM\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'like' GROUP BY (cid, mid)\\n ) as seq1\\n FULL OUTER JOIN\\n (\\n SELECT\\n cid, mid, COUNT(uid) as uuid, max(rc)\\n FROM\\n (\\n SELECT\\n cid, mid, uid, any(reac) as rc\\n FROM\\n (\\n SELECT\\n DISTINCT(ChannelId, MessageId, UserId) as msg,\\n ChannelId as cid, MessageId as mid, UserId as uid,\\n Reaction as reac\\n FROM likes\\n ORDER BY (EventTime, MessageId) DESC\\n )\\n GROUP BY (cid, mid, uid)\\n )\\n WHERE rc = 'dislike' GROUP BY (cid, mid)\\n ) as seq2\\n ON (seq1.cid = seq2.cid AND seq1.mid = seq2.mid)\\n ) as all_reactions\\n JOIN\\n (\\n SELECT\\n DISTINCT (ChannelId, MessageId, Url),\\n MessageId AS mid, ChannelId AS cid, Url as url,\\n max(EventTime) AS et, max(LikeTime) as lt\\n FROM\\n likes WHERE LikeTime > {tm}\\n GROUP BY (ChannelId, MessageId, Url)\\n ) as unique_messages\\n ON (\\n COALESCE(\\n nullIf(all_reactions.mid1, 0),\\n nullIf(all_reactions.mid2, 0)\\n ) = unique_messages.mid\\n AND\\n COALESCE(\\n nullIf(all_reactions.cid1, ''),\\n nullIf(all_reactions.cid2, '')\\n ) = unique_messages.cid\\n )\\n ORDER BY Placed DESC\"\n reactions = self.client.execute(query)\n return reactions\n", "source": "the_stack_v2_python_sparse", "source_path": "model/clickhouse_saver.py", "source_repo": "MykKos/telegram_likes", "split": "val", "star_events_count": 0}
{"blob_id": "7776fca1408bc2170bef0554f52b54f584dd285b", "bodies": ["def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\ndef decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\nchars = set(target)\nencoded = [encode(word) for word in stickers if set(word) & chars]\nN = len(encoded)\ntg = encode(target)\nfor i, c in enumerate(tg):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n@lru_cache(None)\ndef dp(i, tg):\n if all((x == 0 for x in tg)):\n return 0\n if i == N:\n return float('inf')\n cur = encoded[i]\n ret = dp(i + 1, tg)\n for k in range(1, max(((b + (a - 1)) // a for a, b in zip(cur, tg) if a > 0)) + 1):\n ret = min(ret, k + dp(i + 1, tuple((max(0, b - a * k) for a, b in zip(cur, tg)))))\n return ret\nret = dp(0, tg)\nreturn ret if ret != float('inf') else -1", "def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\ndef decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\nchars = set(target)\nencoded = [encode(word) for word in stickers if set(word) & chars]\nN = len(encoded)\nfor i, c in enumerate(encode(target)):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n@lru_cache(None)\ndef dp(target):\n if not target:\n return 0\n ret = float('inf')\n for cnt in encoded:\n if cnt[ord(target[0]) - ord('a')] == 0:\n continue\n remain = tuple((max(0, b - a) for a, b in zip(cnt, encode(target))))\n ret = min(ret, 1 + dp(decode(remain)))\n return ret\nret = dp(target)\nreturn ret if ret != float('inf') else -1"], "bodies_text": "<|body_start_0|>\n def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\n def decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\n chars = set(target)\n encoded = [encode(word) for word in stickers if set(word) & chars]\n N = len(encoded)\n tg = encode(target)\n for i, c in enumerate(tg):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n @lru_cache(None)\n def dp(i, tg):\n if all((x == 0 for x in tg)):\n return 0\n if i == N:\n return float('inf')\n cur = encoded[i]\n ret = dp(i + 1, tg)\n for k in range(1, max(((b + (a - 1)) // a for a, b in zip(cur, tg) if a > 0)) + 1):\n ret = min(ret, k + dp(i + 1, tuple((max(0, b - a * k) for a, b in zip(cur, tg)))))\n return ret\n ret = dp(0, tg)\n return ret if ret != float('inf') else -1\n<|end_body_0|>\n\n<|body_start_1|>\n def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\n def decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\n chars = set(target)\n encoded = [encode(word) for word in stickers if set(word) & chars]\n N = len(encoded)\n for i, c in enumerate(encode(target)):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n @lru_cache(None)\n def dp(target):\n if not target:\n return 0\n ret = float('inf')\n for cnt in encoded:\n if cnt[ord(target[0]) - ord('a')] == 0:\n continue\n remain = tuple((max(0, b - a) for a, b in zip(cnt, encode(target))))\n ret = min(ret, 1 + dp(decode(remain)))\n return ret\n ret = dp(target)\n return ret if ret != float('inf') else -1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minStickers(self, stickers: List[str], target: str) -> int:\n \"\"\"11/28/2022 15:49 TLE\"\"\"\n <|body_0|>\n\n def minStickers(self, stickers: List[str], target: str) -> int:\n \"\"\"11/28/2022 16:12\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\n def decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\n chars = set(target)\n encoded = [encode(word) for word in stickers if set(word) & chars]\n N = len(encoded)\n tg = encode(target)\n for i, c in enumerate(tg):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n @lru_cache(None)\n def dp(i, tg):\n if all((x == 0 for x in tg)):\n return 0\n if i == N:\n return float('inf')\n cur = encoded[i]\n ret = dp(i + 1, tg)\n for k in range(1, max(((b + (a - 1)) // a for a, b in zip(cur, tg) if a > 0)) + 1):\n ret = min(ret, k + dp(i + 1, tuple((max(0, b - a * k) for a, b in zip(cur, tg)))))\n return ret\n ret = dp(0, tg)\n return ret if ret != float('inf') else -1\n<|end_body_0|>\n\n<|body_start_1|>\n def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\n def decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\n chars = set(target)\n encoded = [encode(word) for word in stickers if set(word) & chars]\n N = len(encoded)\n for i, c in enumerate(encode(target)):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n @lru_cache(None)\n def dp(target):\n if not target:\n return 0\n ret = float('inf')\n for cnt in encoded:\n if cnt[ord(target[0]) - ord('a')] == 0:\n continue\n remain = tuple((max(0, b - a) for a, b in zip(cnt, encode(target))))\n ret = min(ret, 1 + dp(decode(remain)))\n return ret\n ret = dp(target)\n return ret if ret != float('inf') else -1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000259", "length_bytes": 5124, "license_type": "no_license", "methods": [{"docstring": "11/28/2022 15:49 TLE", "name": "minStickers", "signature": "def minStickers(self, stickers: List[str], target: str) -> int"}, {"docstring": "11/28/2022 16:12", "name": "minStickers", "signature": "def minStickers(self, stickers: List[str], target: str) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013446", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minStickers(self, stickers: List[str], target: str) -> int: 11/28/2022 15:49 TLE\n- def minStickers(self, stickers: List[str], target: str) -> int: 11/28/2022 16:12", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minStickers(self, stickers: List[str], target: str) -> int: 11/28/2022 15:49 TLE\n- def minStickers(self, stickers: List[str], target: str) -> int: 11/28/2022 16:12\n\n<|skeleton|>\nclass Solution:\n\n def minStickers(self, stickers: List[str], target: str) -> int:\n \"\"\"11/28/2022 15:49 TLE\"\"\"\n <|body_0|>\n\n def minStickers(self, stickers: List[str], target: str) -> int:\n \"\"\"11/28/2022 16:12\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\n def decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\n chars = set(target)\n encoded = [encode(word) for word in stickers if set(word) & chars]\n N = len(encoded)\n tg = encode(target)\n for i, c in enumerate(tg):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n @lru_cache(None)\n def dp(i, tg):\n if all((x == 0 for x in tg)):\n return 0\n if i == N:\n return float('inf')\n cur = encoded[i]\n ret = dp(i + 1, tg)\n for k in range(1, max(((b + (a - 1)) // a for a, b in zip(cur, tg) if a > 0)) + 1):\n ret = min(ret, k + dp(i + 1, tuple((max(0, b - a * k) for a, b in zip(cur, tg)))))\n return ret\n ret = dp(0, tg)\n return ret if ret != float('inf') else -1\n<|end_body_0|>\n\n<|body_start_1|>\n def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\n def decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\n chars = set(target)\n encoded = [encode(word) for word in stickers if set(word) & chars]\n N = len(encoded)\n for i, c in enumerate(encode(target)):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n @lru_cache(None)\n def dp(target):\n if not target:\n return 0\n ret = float('inf')\n for cnt in encoded:\n if cnt[ord(target[0]) - ord('a')] == 0:\n continue\n remain = tuple((max(0, b - a) for a, b in zip(cnt, encode(target))))\n ret = min(ret, 1 + dp(decode(remain)))\n return ret\n ret = dp(target)\n return ret if ret != float('inf') else -1\n<|end_body_1|>\n", "revision_id": "1389a009a02e90e8700a7a00e0b7f797c129cdf4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minStickers(self, stickers: List[str], target: str) -> int:\n \"\"\"11/28/2022 15:49 TLE\"\"\"\n <|body_0|>\n\n def minStickers(self, stickers: List[str], target: str) -> int:\n \"\"\"11/28/2022 16:12\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def minStickers(self, stickers: List[str], target: str) -> int:\n \"\"\"11/28/2022 15:49 TLE\"\"\"\n def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\n def decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\n chars = set(target)\n encoded = [encode(word) for word in stickers if set(word) & chars]\n N = len(encoded)\n tg = encode(target)\n for i, c in enumerate(tg):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n @lru_cache(None)\n def dp(i, tg):\n if all((x == 0 for x in tg)):\n return 0\n if i == N:\n return float('inf')\n cur = encoded[i]\n ret = dp(i + 1, tg)\n for k in range(1, max(((b + (a - 1)) // a for a, b in zip(cur, tg) if a > 0)) + 1):\n ret = min(ret, k + dp(i + 1, tuple((max(0, b - a * k) for a, b in zip(cur, tg)))))\n return ret\n ret = dp(0, tg)\n return ret if ret != float('inf') else -1\n\n def minStickers(self, stickers: List[str], target: str) -> int:\n \"\"\"11/28/2022 16:12\"\"\"\n def encode(word):\n cnt = [0] * 26\n for c in word:\n cnt[ord(c) - ord('a')] += 1\n return tuple(cnt)\n\n def decode(cnt):\n ret = ''\n for i, c in enumerate(cnt):\n ret += chr(ord('a') + i) * c\n return ret\n chars = set(target)\n encoded = [encode(word) for word in stickers if set(word) & chars]\n N = len(encoded)\n for i, c in enumerate(encode(target)):\n if c > 0 and all((cnt[i] == 0 for cnt in encoded)):\n return -1\n\n @lru_cache(None)\n def dp(target):\n if not target:\n return 0\n ret = float('inf')\n for cnt in encoded:\n if cnt[ord(target[0]) - ord('a')] == 0:\n continue\n remain = tuple((max(0, b - a) for a, b in zip(cnt, encode(target))))\n ret = min(ret, 1 + dp(decode(remain)))\n return ret\n ret = dp(target)\n return ret if ret != float('inf') else -1\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/solved/691_Stickers_to_Spell_Word/solution.py", "source_repo": "sungminoh/algorithms", "split": "val", "star_events_count": 0}
{"blob_id": "6304cad70e7277a510bc9bdeaf2f5276d8b41273", "bodies": ["self.ensure_one()\nmove_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\nif move_line:\n move_line.ensure_one()\nreturn move_line", "self.ensure_one()\nmove_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\nreturn move_line.account_id"], "bodies_text": "<|body_start_0|>\n self.ensure_one()\n move_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\n if move_line:\n move_line.ensure_one()\n return move_line\n<|end_body_0|>\n\n<|body_start_1|>\n self.ensure_one()\n move_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\n return move_line.account_id\n<|end_body_1|>\n", "class_docstring": "Added some functionalities to payment and add some group", "class_name": "AccountPayment", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccountPayment:\n \"\"\"Added some functionalities to payment and add some group\"\"\"\n\n def get_receivable_line(self):\n \"\"\"Return the receivable line\"\"\"\n <|body_0|>\n\n def get_receivable_account(self):\n \"\"\"Return the receivable account\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ensure_one()\n move_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\n if move_line:\n move_line.ensure_one()\n return move_line\n<|end_body_0|>\n\n<|body_start_1|>\n self.ensure_one()\n move_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\n return move_line.account_id\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000260", "length_bytes": 873, "license_type": "no_license", "methods": [{"docstring": "Return the receivable line", "name": "get_receivable_line", "signature": "def get_receivable_line(self)"}, {"docstring": "Return the receivable account", "name": "get_receivable_account", "signature": "def get_receivable_account(self)"}], "n_methods": 2, "prompt": "Implement the Python class `AccountPayment` described below.\n\nClass description:\nAdded some functionalities to payment and add some group\n\nMethod signatures and docstrings:\n- def get_receivable_line(self): Return the receivable line\n- def get_receivable_account(self): Return the receivable account", "prompted_full_text": "Implement the Python class `AccountPayment` described below.\n\nClass description:\nAdded some functionalities to payment and add some group\n\nMethod signatures and docstrings:\n- def get_receivable_line(self): Return the receivable line\n- def get_receivable_account(self): Return the receivable account\n\n<|skeleton|>\nclass AccountPayment:\n \"\"\"Added some functionalities to payment and add some group\"\"\"\n\n def get_receivable_line(self):\n \"\"\"Return the receivable line\"\"\"\n <|body_0|>\n\n def get_receivable_account(self):\n \"\"\"Return the receivable account\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ensure_one()\n move_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\n if move_line:\n move_line.ensure_one()\n return move_line\n<|end_body_0|>\n\n<|body_start_1|>\n self.ensure_one()\n move_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\n return move_line.account_id\n<|end_body_1|>\n", "revision_id": "f83efeb54e22313e8b533036ff4a5befa5d3a59b", "skeleton": "<|skeleton|>\nclass AccountPayment:\n \"\"\"Added some functionalities to payment and add some group\"\"\"\n\n def get_receivable_line(self):\n \"\"\"Return the receivable line\"\"\"\n <|body_0|>\n\n def get_receivable_account(self):\n \"\"\"Return the receivable account\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AccountPayment:\n \"\"\"Added some functionalities to payment and add some group\"\"\"\n\n def get_receivable_line(self):\n \"\"\"Return the receivable line\"\"\"\n self.ensure_one()\n move_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\n if move_line:\n move_line.ensure_one()\n return move_line\n\n def get_receivable_account(self):\n \"\"\"Return the receivable account\"\"\"\n self.ensure_one()\n move_line = self.mapped('move_line_ids').filtered(lambda move_line: move_line.account_id.user_type_id.type == 'receivable')\n return move_line.account_id\n", "source": "the_stack_v2_python_sparse", "source_path": "pos_pr/models/account_payment.py", "source_repo": "LuisMalave2001/GarryTesting", "split": "val", "star_events_count": 2}
{"blob_id": "c188c7d5519946c0894cc0465caf51a6f79290c6", "bodies": ["def bivariate(data):\n return holoviews.Bivariate(data, *args, **kwargs)\ndefault_bokeh_opts = {'height': 350, 'width': 400, 'tools': ['hover'], 'shared_axes': False}\ndefault_mpl_opts = {}\nmpl_opts, bokeh_opts = self.update_default_opts(default_mpl_opts, mpl_opts, default_bokeh_opts, bokeh_opts)\nsuper(Bivariate, self).__init__(stream=Pipe, plot=bivariate, data=data, bokeh_opts=bokeh_opts, mpl_opts=mpl_opts)", "kwargs = self.update_kwargs(**kwargs)\nscatter_kwargs = dict(kwargs)\nif Store.current_backend == 'bokeh':\n scatter_kwargs['size'] = scatter_kwargs.get('size', 3.5)\nelif Store.current_backend == 'matplotlib':\n scatter_kwargs['s'] = scatter_kwargs.get('s', 15)\nself.plot = self.plot.opts(holoviews.opts.Bivariate(*args, title=title, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **kwargs), holoviews.opts.Scatter(*args, alpha=0.7, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **scatter_kwargs), holoviews.opts.NdOverlay(normalize=normalize, framewise=framewise, axiswise=axiswise))"], "bodies_text": "<|body_start_0|>\n def bivariate(data):\n return holoviews.Bivariate(data, *args, **kwargs)\n default_bokeh_opts = {'height': 350, 'width': 400, 'tools': ['hover'], 'shared_axes': False}\n default_mpl_opts = {}\n mpl_opts, bokeh_opts = self.update_default_opts(default_mpl_opts, mpl_opts, default_bokeh_opts, bokeh_opts)\n super(Bivariate, self).__init__(stream=Pipe, plot=bivariate, data=data, bokeh_opts=bokeh_opts, mpl_opts=mpl_opts)\n<|end_body_0|>\n\n<|body_start_1|>\n kwargs = self.update_kwargs(**kwargs)\n scatter_kwargs = dict(kwargs)\n if Store.current_backend == 'bokeh':\n scatter_kwargs['size'] = scatter_kwargs.get('size', 3.5)\n elif Store.current_backend == 'matplotlib':\n scatter_kwargs['s'] = scatter_kwargs.get('s', 15)\n self.plot = self.plot.opts(holoviews.opts.Bivariate(*args, title=title, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **kwargs), holoviews.opts.Scatter(*args, alpha=0.7, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **scatter_kwargs), holoviews.opts.NdOverlay(normalize=normalize, framewise=framewise, axiswise=axiswise))\n<|end_body_1|>\n", "class_docstring": "Create a ``holoviews.Bivariate`` plot that plots steaming data. The streaming process is handled using a :class:`Pipe`.", "class_name": "Bivariate", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Bivariate:\n \"\"\"Create a ``holoviews.Bivariate`` plot that plots steaming data. The streaming process is handled using a :class:`Pipe`.\"\"\"\n\n def __init__(self, data=None, bokeh_opts=None, mpl_opts=None, *args, **kwargs):\n \"\"\"Initialize a :class:`Bivariate`. Args: data: Passed to ``holoviews.Bivariate``. bokeh_opts: Default options for the plot when rendered using the \"bokeh\" backend. mpl_opts: Default options for the plot when rendered using the \"matplotlib\" backend. *args: Passed to ``holoviews.Bivariate``. **kwargs: Passed to ``holoviews.Bivariate``.\"\"\"\n <|body_0|>\n\n def opts(self, title='', xlabel: str='x', ylabel: str='y', framewise: bool=True, axiswise: bool=True, normalize: bool=True, *args, **kwargs):\n \"\"\"Update the plot parameters. Same as ``holoviews`` ``opts``. The default values updates the plot axes independently when being displayed in a :class:`Holomap`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def bivariate(data):\n return holoviews.Bivariate(data, *args, **kwargs)\n default_bokeh_opts = {'height': 350, 'width': 400, 'tools': ['hover'], 'shared_axes': False}\n default_mpl_opts = {}\n mpl_opts, bokeh_opts = self.update_default_opts(default_mpl_opts, mpl_opts, default_bokeh_opts, bokeh_opts)\n super(Bivariate, self).__init__(stream=Pipe, plot=bivariate, data=data, bokeh_opts=bokeh_opts, mpl_opts=mpl_opts)\n<|end_body_0|>\n\n<|body_start_1|>\n kwargs = self.update_kwargs(**kwargs)\n scatter_kwargs = dict(kwargs)\n if Store.current_backend == 'bokeh':\n scatter_kwargs['size'] = scatter_kwargs.get('size', 3.5)\n elif Store.current_backend == 'matplotlib':\n scatter_kwargs['s'] = scatter_kwargs.get('s', 15)\n self.plot = self.plot.opts(holoviews.opts.Bivariate(*args, title=title, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **kwargs), holoviews.opts.Scatter(*args, alpha=0.7, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **scatter_kwargs), holoviews.opts.NdOverlay(normalize=normalize, framewise=framewise, axiswise=axiswise))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000261", "length_bytes": 22669, "license_type": "permissive", "methods": [{"docstring": "Initialize a :class:`Bivariate`. Args: data: Passed to ``holoviews.Bivariate``. bokeh_opts: Default options for the plot when rendered using the \"bokeh\" backend. mpl_opts: Default options for the plot when rendered using the \"matplotlib\" backend. *args: Passed to ``holoviews.Bivariate``. **kwargs: Passed to ``holoviews.Bivariate``.", "name": "__init__", "signature": "def __init__(self, data=None, bokeh_opts=None, mpl_opts=None, *args, **kwargs)"}, {"docstring": "Update the plot parameters. Same as ``holoviews`` ``opts``. The default values updates the plot axes independently when being displayed in a :class:`Holomap`.", "name": "opts", "signature": "def opts(self, title='', xlabel: str='x', ylabel: str='y', framewise: bool=True, axiswise: bool=True, normalize: bool=True, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_016551", "prompt": "Implement the Python class `Bivariate` described below.\n\nClass description:\nCreate a ``holoviews.Bivariate`` plot that plots steaming data. The streaming process is handled using a :class:`Pipe`.\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, bokeh_opts=None, mpl_opts=None, *args, **kwargs): Initialize a :class:`Bivariate`. Args: data: Passed to ``holoviews.Bivariate``. bokeh_opts: Default options for the plot when rendered using the \"bokeh\" backend. mpl_opts: Default options for the plot when rendered using the \"matplotlib\" backend. *args: Passed to ``holoviews.Bivariate``. **kwargs: Passed to ``holoviews.Bivariate``.\n- def opts(self, title='', xlabel: str='x', ylabel: str='y', framewise: bool=True, axiswise: bool=True, normalize: bool=True, *args, **kwargs): Update the plot parameters. Same as ``holoviews`` ``opts``. The default values updates the plot axes independently when being displayed in a :class:`Holomap`.", "prompted_full_text": "Implement the Python class `Bivariate` described below.\n\nClass description:\nCreate a ``holoviews.Bivariate`` plot that plots steaming data. The streaming process is handled using a :class:`Pipe`.\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, bokeh_opts=None, mpl_opts=None, *args, **kwargs): Initialize a :class:`Bivariate`. Args: data: Passed to ``holoviews.Bivariate``. bokeh_opts: Default options for the plot when rendered using the \"bokeh\" backend. mpl_opts: Default options for the plot when rendered using the \"matplotlib\" backend. *args: Passed to ``holoviews.Bivariate``. **kwargs: Passed to ``holoviews.Bivariate``.\n- def opts(self, title='', xlabel: str='x', ylabel: str='y', framewise: bool=True, axiswise: bool=True, normalize: bool=True, *args, **kwargs): Update the plot parameters. Same as ``holoviews`` ``opts``. The default values updates the plot axes independently when being displayed in a :class:`Holomap`.\n\n<|skeleton|>\nclass Bivariate:\n \"\"\"Create a ``holoviews.Bivariate`` plot that plots steaming data. The streaming process is handled using a :class:`Pipe`.\"\"\"\n\n def __init__(self, data=None, bokeh_opts=None, mpl_opts=None, *args, **kwargs):\n \"\"\"Initialize a :class:`Bivariate`. Args: data: Passed to ``holoviews.Bivariate``. bokeh_opts: Default options for the plot when rendered using the \"bokeh\" backend. mpl_opts: Default options for the plot when rendered using the \"matplotlib\" backend. *args: Passed to ``holoviews.Bivariate``. **kwargs: Passed to ``holoviews.Bivariate``.\"\"\"\n <|body_0|>\n\n def opts(self, title='', xlabel: str='x', ylabel: str='y', framewise: bool=True, axiswise: bool=True, normalize: bool=True, *args, **kwargs):\n \"\"\"Update the plot parameters. Same as ``holoviews`` ``opts``. The default values updates the plot axes independently when being displayed in a :class:`Holomap`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def bivariate(data):\n return holoviews.Bivariate(data, *args, **kwargs)\n default_bokeh_opts = {'height': 350, 'width': 400, 'tools': ['hover'], 'shared_axes': False}\n default_mpl_opts = {}\n mpl_opts, bokeh_opts = self.update_default_opts(default_mpl_opts, mpl_opts, default_bokeh_opts, bokeh_opts)\n super(Bivariate, self).__init__(stream=Pipe, plot=bivariate, data=data, bokeh_opts=bokeh_opts, mpl_opts=mpl_opts)\n<|end_body_0|>\n\n<|body_start_1|>\n kwargs = self.update_kwargs(**kwargs)\n scatter_kwargs = dict(kwargs)\n if Store.current_backend == 'bokeh':\n scatter_kwargs['size'] = scatter_kwargs.get('size', 3.5)\n elif Store.current_backend == 'matplotlib':\n scatter_kwargs['s'] = scatter_kwargs.get('s', 15)\n self.plot = self.plot.opts(holoviews.opts.Bivariate(*args, title=title, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **kwargs), holoviews.opts.Scatter(*args, alpha=0.7, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **scatter_kwargs), holoviews.opts.NdOverlay(normalize=normalize, framewise=framewise, axiswise=axiswise))\n<|end_body_1|>\n", "revision_id": "5e69c50e5b220859d65406d803086406b50a8e78", "skeleton": "<|skeleton|>\nclass Bivariate:\n \"\"\"Create a ``holoviews.Bivariate`` plot that plots steaming data. The streaming process is handled using a :class:`Pipe`.\"\"\"\n\n def __init__(self, data=None, bokeh_opts=None, mpl_opts=None, *args, **kwargs):\n \"\"\"Initialize a :class:`Bivariate`. Args: data: Passed to ``holoviews.Bivariate``. bokeh_opts: Default options for the plot when rendered using the \"bokeh\" backend. mpl_opts: Default options for the plot when rendered using the \"matplotlib\" backend. *args: Passed to ``holoviews.Bivariate``. **kwargs: Passed to ``holoviews.Bivariate``.\"\"\"\n <|body_0|>\n\n def opts(self, title='', xlabel: str='x', ylabel: str='y', framewise: bool=True, axiswise: bool=True, normalize: bool=True, *args, **kwargs):\n \"\"\"Update the plot parameters. Same as ``holoviews`` ``opts``. The default values updates the plot axes independently when being displayed in a :class:`Holomap`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Bivariate:\n \"\"\"Create a ``holoviews.Bivariate`` plot that plots steaming data. The streaming process is handled using a :class:`Pipe`.\"\"\"\n\n def __init__(self, data=None, bokeh_opts=None, mpl_opts=None, *args, **kwargs):\n \"\"\"Initialize a :class:`Bivariate`. Args: data: Passed to ``holoviews.Bivariate``. bokeh_opts: Default options for the plot when rendered using the \"bokeh\" backend. mpl_opts: Default options for the plot when rendered using the \"matplotlib\" backend. *args: Passed to ``holoviews.Bivariate``. **kwargs: Passed to ``holoviews.Bivariate``.\"\"\"\n def bivariate(data):\n return holoviews.Bivariate(data, *args, **kwargs)\n default_bokeh_opts = {'height': 350, 'width': 400, 'tools': ['hover'], 'shared_axes': False}\n default_mpl_opts = {}\n mpl_opts, bokeh_opts = self.update_default_opts(default_mpl_opts, mpl_opts, default_bokeh_opts, bokeh_opts)\n super(Bivariate, self).__init__(stream=Pipe, plot=bivariate, data=data, bokeh_opts=bokeh_opts, mpl_opts=mpl_opts)\n\n def opts(self, title='', xlabel: str='x', ylabel: str='y', framewise: bool=True, axiswise: bool=True, normalize: bool=True, *args, **kwargs):\n \"\"\"Update the plot parameters. Same as ``holoviews`` ``opts``. The default values updates the plot axes independently when being displayed in a :class:`Holomap`.\"\"\"\n kwargs = self.update_kwargs(**kwargs)\n scatter_kwargs = dict(kwargs)\n if Store.current_backend == 'bokeh':\n scatter_kwargs['size'] = scatter_kwargs.get('size', 3.5)\n elif Store.current_backend == 'matplotlib':\n scatter_kwargs['s'] = scatter_kwargs.get('s', 15)\n self.plot = self.plot.opts(holoviews.opts.Bivariate(*args, title=title, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **kwargs), holoviews.opts.Scatter(*args, alpha=0.7, xlabel=xlabel, ylabel=ylabel, framewise=framewise, axiswise=axiswise, normalize=normalize, **scatter_kwargs), holoviews.opts.NdOverlay(normalize=normalize, framewise=framewise, axiswise=axiswise))\n", "source": "the_stack_v2_python_sparse", "source_path": "fragile/dataviz/streaming.py", "source_repo": "sergio-hcsoft/fragile-1", "split": "val", "star_events_count": 0}
{"blob_id": "5c6e912a21a6d0de0f2f57fb904120a1d42d2f68", "bodies": ["if request.user.is_superuser:\n academic_groups = AcademicGroup.objects.all()\nelse:\n department = Department.objects.filter(head=request.user).first()\n academic_groups = AcademicGroup.objects.filter(department=department)\nreturn ((academic_group.id, academic_group.name) for academic_group in academic_groups)", "if self.value():\n return queryset.filter(report_event__academic_group=self.value())\nelse:\n return queryset"], "bodies_text": "<|body_start_0|>\n if request.user.is_superuser:\n academic_groups = AcademicGroup.objects.all()\n else:\n department = Department.objects.filter(head=request.user).first()\n academic_groups = AcademicGroup.objects.filter(department=department)\n return ((academic_group.id, academic_group.name) for academic_group in academic_groups)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.value():\n return queryset.filter(report_event__academic_group=self.value())\n else:\n return queryset\n<|end_body_1|>\n", "class_docstring": "", "class_name": "EventListFilter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EventListFilter:\n\n def lookups(self, request, model_admin):\n \"\"\"Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.\"\"\"\n <|body_0|>\n\n def queryset(self, request, queryset):\n \"\"\"Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if request.user.is_superuser:\n academic_groups = AcademicGroup.objects.all()\n else:\n department = Department.objects.filter(head=request.user).first()\n academic_groups = AcademicGroup.objects.filter(department=department)\n return ((academic_group.id, academic_group.name) for academic_group in academic_groups)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.value():\n return queryset.filter(report_event__academic_group=self.value())\n else:\n return queryset\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000262", "length_bytes": 36821, "license_type": "no_license", "methods": [{"docstring": "Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.", "name": "lookups", "signature": "def lookups(self, request, model_admin)"}, {"docstring": "Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.", "name": "queryset", "signature": "def queryset(self, request, queryset)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_038290", "prompt": "Implement the Python class `EventListFilter` described below.\n\nClass description:\nImplement the EventListFilter class.\n\nMethod signatures and docstrings:\n- def lookups(self, request, model_admin): Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.\n- def queryset(self, request, queryset): Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.", "prompted_full_text": "Implement the Python class `EventListFilter` described below.\n\nClass description:\nImplement the EventListFilter class.\n\nMethod signatures and docstrings:\n- def lookups(self, request, model_admin): Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.\n- def queryset(self, request, queryset): Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.\n\n<|skeleton|>\nclass EventListFilter:\n\n def lookups(self, request, model_admin):\n \"\"\"Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.\"\"\"\n <|body_0|>\n\n def queryset(self, request, queryset):\n \"\"\"Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if request.user.is_superuser:\n academic_groups = AcademicGroup.objects.all()\n else:\n department = Department.objects.filter(head=request.user).first()\n academic_groups = AcademicGroup.objects.filter(department=department)\n return ((academic_group.id, academic_group.name) for academic_group in academic_groups)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.value():\n return queryset.filter(report_event__academic_group=self.value())\n else:\n return queryset\n<|end_body_1|>\n", "revision_id": "ae9eacef12fddf9cce48fa74b0f286ca1e10ed80", "skeleton": "<|skeleton|>\nclass EventListFilter:\n\n def lookups(self, request, model_admin):\n \"\"\"Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.\"\"\"\n <|body_0|>\n\n def queryset(self, request, queryset):\n \"\"\"Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EventListFilter:\n def lookups(self, request, model_admin):\n \"\"\"Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.\"\"\"\n if request.user.is_superuser:\n academic_groups = AcademicGroup.objects.all()\n else:\n department = Department.objects.filter(head=request.user).first()\n academic_groups = AcademicGroup.objects.filter(department=department)\n return ((academic_group.id, academic_group.name) for academic_group in academic_groups)\n\n def queryset(self, request, queryset):\n \"\"\"Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.\"\"\"\n if self.value():\n return queryset.filter(report_event__academic_group=self.value())\n else:\n return queryset\n", "source": "the_stack_v2_python_sparse", "source_path": "src/nubip/admin.py", "source_repo": "dmitruyk/crm_nubip_jurnal", "split": "val", "star_events_count": 0}
{"blob_id": "33b39a811de11d4136c51fc8c096431e27293abd", "bodies": ["super(MyAlexNet, self).__init__()\nself.cnn_layers = nn.Sequential()\nself.fc_layers = nn.Sequential()\nself.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\nmodel = alexnet(pretrained=True)\nself.cnn_layers = list(model.children())[0]\ncount = 0\nno_grad_layers = [0, 3, 6, 8, 10]\nfor layer in self.cnn_layers:\n if count in no_grad_layers:\n layer.weight.requires_grad = False\n count += 1\nself.fc_layers = nn.Sequential(*[model.classifier[num] for num in range(6)], nn.Linear(in_features=4096, out_features=15, bias=True))\nlayer_count_2 = 0\nspec_layers = [1, 4]\nfor layer in self.fc_layers:\n if layer_count_2 in spec_layers:\n layer.weight.requires_grad = False\n layer.bias.requires_grad = False\n layer.weight.grad = None\n layer_count_2 += 1", "model_output = None\nx = x.repeat(1, 3, 1, 1)\ncnn_layers = self.cnn_layers(x)\nfirst_dim = cnn_layers.shape[0]\ncnn_layers = cnn_layers.reshape((first_dim, 9216))\nmodel_output = self.fc_layers(cnn_layers)\nreturn model_output"], "bodies_text": "<|body_start_0|>\n super(MyAlexNet, self).__init__()\n self.cnn_layers = nn.Sequential()\n self.fc_layers = nn.Sequential()\n self.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\n model = alexnet(pretrained=True)\n self.cnn_layers = list(model.children())[0]\n count = 0\n no_grad_layers = [0, 3, 6, 8, 10]\n for layer in self.cnn_layers:\n if count in no_grad_layers:\n layer.weight.requires_grad = False\n count += 1\n self.fc_layers = nn.Sequential(*[model.classifier[num] for num in range(6)], nn.Linear(in_features=4096, out_features=15, bias=True))\n layer_count_2 = 0\n spec_layers = [1, 4]\n for layer in self.fc_layers:\n if layer_count_2 in spec_layers:\n layer.weight.requires_grad = False\n layer.bias.requires_grad = False\n layer.weight.grad = None\n layer_count_2 += 1\n<|end_body_0|>\n\n<|body_start_1|>\n model_output = None\n x = x.repeat(1, 3, 1, 1)\n cnn_layers = self.cnn_layers(x)\n first_dim = cnn_layers.shape[0]\n cnn_layers = cnn_layers.reshape((first_dim, 9216))\n model_output = self.fc_layers(cnn_layers)\n return model_output\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MyAlexNet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MyAlexNet:\n\n def __init__(self):\n \"\"\"Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Ready Pytorch documention to understand what it means Note: Do not forget to freeze the layers of alexnet except the last one Download pretrained alexnet using pytorch's API (Hint: see the import statements)\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n \"\"\"Perform the forward pass with the net Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MyAlexNet, self).__init__()\n self.cnn_layers = nn.Sequential()\n self.fc_layers = nn.Sequential()\n self.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\n model = alexnet(pretrained=True)\n self.cnn_layers = list(model.children())[0]\n count = 0\n no_grad_layers = [0, 3, 6, 8, 10]\n for layer in self.cnn_layers:\n if count in no_grad_layers:\n layer.weight.requires_grad = False\n count += 1\n self.fc_layers = nn.Sequential(*[model.classifier[num] for num in range(6)], nn.Linear(in_features=4096, out_features=15, bias=True))\n layer_count_2 = 0\n spec_layers = [1, 4]\n for layer in self.fc_layers:\n if layer_count_2 in spec_layers:\n layer.weight.requires_grad = False\n layer.bias.requires_grad = False\n layer.weight.grad = None\n layer_count_2 += 1\n<|end_body_0|>\n\n<|body_start_1|>\n model_output = None\n x = x.repeat(1, 3, 1, 1)\n cnn_layers = self.cnn_layers(x)\n first_dim = cnn_layers.shape[0]\n cnn_layers = cnn_layers.reshape((first_dim, 9216))\n model_output = self.fc_layers(cnn_layers)\n return model_output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000263", "length_bytes": 3019, "license_type": "no_license", "methods": [{"docstring": "Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Ready Pytorch documention to understand what it means Note: Do not forget to freeze the layers of alexnet except the last one Download pretrained alexnet using pytorch's API (Hint: see the import statements)", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Perform the forward pass with the net Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]", "name": "forward", "signature": "def forward(self, x: torch.tensor) -> torch.tensor"}], "n_methods": 2, "prompt": "Implement the Python class `MyAlexNet` described below.\n\nClass description:\nImplement the MyAlexNet class.\n\nMethod signatures and docstrings:\n- def __init__(self): Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Ready Pytorch documention to understand what it means Note: Do not forget to freeze the layers of alexnet except the last one Download pretrained alexnet using pytorch's API (Hint: see the import statements)\n- def forward(self, x: torch.tensor) -> torch.tensor: Perform the forward pass with the net Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]", "prompted_full_text": "Implement the Python class `MyAlexNet` described below.\n\nClass description:\nImplement the MyAlexNet class.\n\nMethod signatures and docstrings:\n- def __init__(self): Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Ready Pytorch documention to understand what it means Note: Do not forget to freeze the layers of alexnet except the last one Download pretrained alexnet using pytorch's API (Hint: see the import statements)\n- def forward(self, x: torch.tensor) -> torch.tensor: Perform the forward pass with the net Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\n\n<|skeleton|>\nclass MyAlexNet:\n\n def __init__(self):\n \"\"\"Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Ready Pytorch documention to understand what it means Note: Do not forget to freeze the layers of alexnet except the last one Download pretrained alexnet using pytorch's API (Hint: see the import statements)\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n \"\"\"Perform the forward pass with the net Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MyAlexNet, self).__init__()\n self.cnn_layers = nn.Sequential()\n self.fc_layers = nn.Sequential()\n self.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\n model = alexnet(pretrained=True)\n self.cnn_layers = list(model.children())[0]\n count = 0\n no_grad_layers = [0, 3, 6, 8, 10]\n for layer in self.cnn_layers:\n if count in no_grad_layers:\n layer.weight.requires_grad = False\n count += 1\n self.fc_layers = nn.Sequential(*[model.classifier[num] for num in range(6)], nn.Linear(in_features=4096, out_features=15, bias=True))\n layer_count_2 = 0\n spec_layers = [1, 4]\n for layer in self.fc_layers:\n if layer_count_2 in spec_layers:\n layer.weight.requires_grad = False\n layer.bias.requires_grad = False\n layer.weight.grad = None\n layer_count_2 += 1\n<|end_body_0|>\n\n<|body_start_1|>\n model_output = None\n x = x.repeat(1, 3, 1, 1)\n cnn_layers = self.cnn_layers(x)\n first_dim = cnn_layers.shape[0]\n cnn_layers = cnn_layers.reshape((first_dim, 9216))\n model_output = self.fc_layers(cnn_layers)\n return model_output\n<|end_body_1|>\n", "revision_id": "0c4fdb1f978799ed428d8cdc536f25eafb0415f5", "skeleton": "<|skeleton|>\nclass MyAlexNet:\n\n def __init__(self):\n \"\"\"Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Ready Pytorch documention to understand what it means Note: Do not forget to freeze the layers of alexnet except the last one Download pretrained alexnet using pytorch's API (Hint: see the import statements)\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n \"\"\"Perform the forward pass with the net Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MyAlexNet:\n def __init__(self):\n \"\"\"Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Ready Pytorch documention to understand what it means Note: Do not forget to freeze the layers of alexnet except the last one Download pretrained alexnet using pytorch's API (Hint: see the import statements)\"\"\"\n super(MyAlexNet, self).__init__()\n self.cnn_layers = nn.Sequential()\n self.fc_layers = nn.Sequential()\n self.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\n model = alexnet(pretrained=True)\n self.cnn_layers = list(model.children())[0]\n count = 0\n no_grad_layers = [0, 3, 6, 8, 10]\n for layer in self.cnn_layers:\n if count in no_grad_layers:\n layer.weight.requires_grad = False\n count += 1\n self.fc_layers = nn.Sequential(*[model.classifier[num] for num in range(6)], nn.Linear(in_features=4096, out_features=15, bias=True))\n layer_count_2 = 0\n spec_layers = [1, 4]\n for layer in self.fc_layers:\n if layer_count_2 in spec_layers:\n layer.weight.requires_grad = False\n layer.bias.requires_grad = False\n layer.weight.grad = None\n layer_count_2 += 1\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n \"\"\"Perform the forward pass with the net Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\"\"\"\n model_output = None\n x = x.repeat(1, 3, 1, 1)\n cnn_layers = self.cnn_layers(x)\n first_dim = cnn_layers.shape[0]\n cnn_layers = cnn_layers.reshape((first_dim, 9216))\n model_output = self.fc_layers(cnn_layers)\n return model_output\n", "source": "the_stack_v2_python_sparse", "source_path": "proj6_v1/proj6_code/my_alexnet.py", "source_repo": "ARathie/Computer-Vision", "split": "val", "star_events_count": 0}
{"blob_id": "99c6980f56ea260a53a3540276d9f69adfe91907", "bodies": ["if isinstance(index, str):\n return self.get(index)\nelse:\n return super().__getitem__(index)", "if isinstance(o, str):\n return o in [p.name for p in self]\nelse:\n return o in self", "for p in self:\n if p.name == name:\n return p", "super().append(p)\nif glob.config.debug:\n log(f'{p} added to mappools list.')", "super().remove(p)\nif glob.config.debug:\n log(f'{p} removed from mappools list.')"], "bodies_text": "<|body_start_0|>\n if isinstance(index, str):\n return self.get(index)\n else:\n return super().__getitem__(index)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(o, str):\n return o in [p.name for p in self]\n else:\n return o in self\n<|end_body_1|>\n\n<|body_start_2|>\n for p in self:\n if p.name == name:\n return p\n<|end_body_2|>\n\n<|body_start_3|>\n super().append(p)\n if glob.config.debug:\n log(f'{p} added to mappools list.')\n<|end_body_3|>\n\n<|body_start_4|>\n super().remove(p)\n if glob.config.debug:\n log(f'{p} removed from mappools list.')\n<|end_body_4|>\n", "class_docstring": "The currently active mappools on the server.", "class_name": "MapPoolList", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MapPoolList:\n \"\"\"The currently active mappools on the server.\"\"\"\n\n def __getitem__(self, index: Union[int, slice, str]) -> 'MapPool':\n \"\"\"Allow slicing by either a string (for name), or slice.\"\"\"\n <|body_0|>\n\n def __contains__(self, o: Union['MapPool', str]) -> bool:\n \"\"\"Check whether internal list contains `o`.\"\"\"\n <|body_1|>\n\n def get(self, name: str) -> Optional['MapPool']:\n \"\"\"Get a pool from the list by `name`.\"\"\"\n <|body_2|>\n\n def append(self, p: 'MapPool') -> None:\n \"\"\"Attempt to add `p` to the list.\"\"\"\n <|body_3|>\n\n def remove(self, p: 'MapPool') -> None:\n \"\"\"Attempt to remove `p` from the list.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(index, str):\n return self.get(index)\n else:\n return super().__getitem__(index)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(o, str):\n return o in [p.name for p in self]\n else:\n return o in self\n<|end_body_1|>\n\n<|body_start_2|>\n for p in self:\n if p.name == name:\n return p\n<|end_body_2|>\n\n<|body_start_3|>\n super().append(p)\n if glob.config.debug:\n log(f'{p} added to mappools list.')\n<|end_body_3|>\n\n<|body_start_4|>\n super().remove(p)\n if glob.config.debug:\n log(f'{p} removed from mappools list.')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000264", "length_bytes": 9175, "license_type": "permissive", "methods": [{"docstring": "Allow slicing by either a string (for name), or slice.", "name": "__getitem__", "signature": "def __getitem__(self, index: Union[int, slice, str]) -> 'MapPool'"}, {"docstring": "Check whether internal list contains `o`.", "name": "__contains__", "signature": "def __contains__(self, o: Union['MapPool', str]) -> bool"}, {"docstring": "Get a pool from the list by `name`.", "name": "get", "signature": "def get(self, name: str) -> Optional['MapPool']"}, {"docstring": "Attempt to add `p` to the list.", "name": "append", "signature": "def append(self, p: 'MapPool') -> None"}, {"docstring": "Attempt to remove `p` from the list.", "name": "remove", "signature": "def remove(self, p: 'MapPool') -> None"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_007146", "prompt": "Implement the Python class `MapPoolList` described below.\n\nClass description:\nThe currently active mappools on the server.\n\nMethod signatures and docstrings:\n- def __getitem__(self, index: Union[int, slice, str]) -> 'MapPool': Allow slicing by either a string (for name), or slice.\n- def __contains__(self, o: Union['MapPool', str]) -> bool: Check whether internal list contains `o`.\n- def get(self, name: str) -> Optional['MapPool']: Get a pool from the list by `name`.\n- def append(self, p: 'MapPool') -> None: Attempt to add `p` to the list.\n- def remove(self, p: 'MapPool') -> None: Attempt to remove `p` from the list.", "prompted_full_text": "Implement the Python class `MapPoolList` described below.\n\nClass description:\nThe currently active mappools on the server.\n\nMethod signatures and docstrings:\n- def __getitem__(self, index: Union[int, slice, str]) -> 'MapPool': Allow slicing by either a string (for name), or slice.\n- def __contains__(self, o: Union['MapPool', str]) -> bool: Check whether internal list contains `o`.\n- def get(self, name: str) -> Optional['MapPool']: Get a pool from the list by `name`.\n- def append(self, p: 'MapPool') -> None: Attempt to add `p` to the list.\n- def remove(self, p: 'MapPool') -> None: Attempt to remove `p` from the list.\n\n<|skeleton|>\nclass MapPoolList:\n \"\"\"The currently active mappools on the server.\"\"\"\n\n def __getitem__(self, index: Union[int, slice, str]) -> 'MapPool':\n \"\"\"Allow slicing by either a string (for name), or slice.\"\"\"\n <|body_0|>\n\n def __contains__(self, o: Union['MapPool', str]) -> bool:\n \"\"\"Check whether internal list contains `o`.\"\"\"\n <|body_1|>\n\n def get(self, name: str) -> Optional['MapPool']:\n \"\"\"Get a pool from the list by `name`.\"\"\"\n <|body_2|>\n\n def append(self, p: 'MapPool') -> None:\n \"\"\"Attempt to add `p` to the list.\"\"\"\n <|body_3|>\n\n def remove(self, p: 'MapPool') -> None:\n \"\"\"Attempt to remove `p` from the list.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(index, str):\n return self.get(index)\n else:\n return super().__getitem__(index)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(o, str):\n return o in [p.name for p in self]\n else:\n return o in self\n<|end_body_1|>\n\n<|body_start_2|>\n for p in self:\n if p.name == name:\n return p\n<|end_body_2|>\n\n<|body_start_3|>\n super().append(p)\n if glob.config.debug:\n log(f'{p} added to mappools list.')\n<|end_body_3|>\n\n<|body_start_4|>\n super().remove(p)\n if glob.config.debug:\n log(f'{p} removed from mappools list.')\n<|end_body_4|>\n", "revision_id": "d4ad1ebae2909139f2ee49f5f96310379cc3413e", "skeleton": "<|skeleton|>\nclass MapPoolList:\n \"\"\"The currently active mappools on the server.\"\"\"\n\n def __getitem__(self, index: Union[int, slice, str]) -> 'MapPool':\n \"\"\"Allow slicing by either a string (for name), or slice.\"\"\"\n <|body_0|>\n\n def __contains__(self, o: Union['MapPool', str]) -> bool:\n \"\"\"Check whether internal list contains `o`.\"\"\"\n <|body_1|>\n\n def get(self, name: str) -> Optional['MapPool']:\n \"\"\"Get a pool from the list by `name`.\"\"\"\n <|body_2|>\n\n def append(self, p: 'MapPool') -> None:\n \"\"\"Attempt to add `p` to the list.\"\"\"\n <|body_3|>\n\n def remove(self, p: 'MapPool') -> None:\n \"\"\"Attempt to remove `p` from the list.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MapPoolList:\n \"\"\"The currently active mappools on the server.\"\"\"\n\n def __getitem__(self, index: Union[int, slice, str]) -> 'MapPool':\n \"\"\"Allow slicing by either a string (for name), or slice.\"\"\"\n if isinstance(index, str):\n return self.get(index)\n else:\n return super().__getitem__(index)\n\n def __contains__(self, o: Union['MapPool', str]) -> bool:\n \"\"\"Check whether internal list contains `o`.\"\"\"\n if isinstance(o, str):\n return o in [p.name for p in self]\n else:\n return o in self\n\n def get(self, name: str) -> Optional['MapPool']:\n \"\"\"Get a pool from the list by `name`.\"\"\"\n for p in self:\n if p.name == name:\n return p\n\n def append(self, p: 'MapPool') -> None:\n \"\"\"Attempt to add `p` to the list.\"\"\"\n super().append(p)\n if glob.config.debug:\n log(f'{p} added to mappools list.')\n\n def remove(self, p: 'MapPool') -> None:\n \"\"\"Attempt to remove `p` from the list.\"\"\"\n super().remove(p)\n if glob.config.debug:\n log(f'{p} removed from mappools list.')\n", "source": "the_stack_v2_python_sparse", "source_path": "objects/collections.py", "source_repo": "itekiosu/gulag", "split": "val", "star_events_count": 3}
{"blob_id": "2424724d56efb683d4a31cd6ebfe9f7dc969d100", "bodies": ["self._x = {0}\nself._y = {1}\nself._u = {3}\nsuper().__init__(dist, [rv_x, rv_y], [], rv_mode=rv_mode)\ntheoretical_bound = self._full_shape[self._proxy_vars[0]] + 1\nbound = min(bound, theoretical_bound) if bound else theoretical_bound\nself._construct_auxvars([({0}, bound)])", "mi_a = self._mutual_information(self._u, self._y)\nmi_b = self._mutual_information(self._u, self._x)\n\ndef objective(self, x):\n \"\"\"\n Compute :math:`I[U:Y] / I[U:X]`\n\n Parameters\n ----------\n x : np.ndarray\n An optimization vector.\n\n Returns\n -------\n obj : float\n The value of the objective.\n \"\"\"\n pmf = self.construct_joint(x)\n a = mi_a(pmf)\n b = mi_b(pmf)\n return -(a / b) if not np.isclose(b, 0.0) else np.inf\nreturn objective"], "bodies_text": "<|body_start_0|>\n self._x = {0}\n self._y = {1}\n self._u = {3}\n super().__init__(dist, [rv_x, rv_y], [], rv_mode=rv_mode)\n theoretical_bound = self._full_shape[self._proxy_vars[0]] + 1\n bound = min(bound, theoretical_bound) if bound else theoretical_bound\n self._construct_auxvars([({0}, bound)])\n<|end_body_0|>\n\n<|body_start_1|>\n mi_a = self._mutual_information(self._u, self._y)\n mi_b = self._mutual_information(self._u, self._x)\n\n def objective(self, x):\n \"\"\"\n Compute :math:`I[U:Y] / I[U:X]`\n\n Parameters\n ----------\n x : np.ndarray\n An optimization vector.\n\n Returns\n -------\n obj : float\n The value of the objective.\n \"\"\"\n pmf = self.construct_joint(x)\n a = mi_a(pmf)\n b = mi_b(pmf)\n return -(a / b) if not np.isclose(b, 0.0) else np.inf\n return objective\n<|end_body_1|>\n", "class_docstring": "Computes the hypercontractivity coefficient: .. math:: max_{U - X - Y} I[U:Y] / I[U:X]", "class_name": "HypercontractivityCoefficient", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HypercontractivityCoefficient:\n \"\"\"Computes the hypercontractivity coefficient: .. math:: max_{U - X - Y} I[U:Y] / I[U:X]\"\"\"\n\n def __init__(self, dist, rv_x=None, rv_y=None, bound=None, rv_mode=None):\n \"\"\"Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to compute the intrinsic mutual information of. rv_x : iterable The variables to consider `X`. rv_y : iterable The variables to consider `Y`. bound : int, None Specifies a bound on the size of the auxiliary random variable. If None, then the theoretical bound is used. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consult\"\"\"\n <|body_0|>\n\n def _objective(self):\n \"\"\"The hypercontractivity coefficient to minimize. Returns ------- obj : func The objective function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._x = {0}\n self._y = {1}\n self._u = {3}\n super().__init__(dist, [rv_x, rv_y], [], rv_mode=rv_mode)\n theoretical_bound = self._full_shape[self._proxy_vars[0]] + 1\n bound = min(bound, theoretical_bound) if bound else theoretical_bound\n self._construct_auxvars([({0}, bound)])\n<|end_body_0|>\n\n<|body_start_1|>\n mi_a = self._mutual_information(self._u, self._y)\n mi_b = self._mutual_information(self._u, self._x)\n\n def objective(self, x):\n \"\"\"\n Compute :math:`I[U:Y] / I[U:X]`\n\n Parameters\n ----------\n x : np.ndarray\n An optimization vector.\n\n Returns\n -------\n obj : float\n The value of the objective.\n \"\"\"\n pmf = self.construct_joint(x)\n a = mi_a(pmf)\n b = mi_b(pmf)\n return -(a / b) if not np.isclose(b, 0.0) else np.inf\n return objective\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000265", "length_bytes": 4583, "license_type": "permissive", "methods": [{"docstring": "Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to compute the intrinsic mutual information of. rv_x : iterable The variables to consider `X`. rv_y : iterable The variables to consider `Y`. bound : int, None Specifies a bound on the size of the auxiliary random variable. If None, then the theoretical bound is used. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consult", "name": "__init__", "signature": "def __init__(self, dist, rv_x=None, rv_y=None, bound=None, rv_mode=None)"}, {"docstring": "The hypercontractivity coefficient to minimize. Returns ------- obj : func The objective function.", "name": "_objective", "signature": "def _objective(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005965", "prompt": "Implement the Python class `HypercontractivityCoefficient` described below.\n\nClass description:\nComputes the hypercontractivity coefficient: .. math:: max_{U - X - Y} I[U:Y] / I[U:X]\n\nMethod signatures and docstrings:\n- def __init__(self, dist, rv_x=None, rv_y=None, bound=None, rv_mode=None): Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to compute the intrinsic mutual information of. rv_x : iterable The variables to consider `X`. rv_y : iterable The variables to consider `Y`. bound : int, None Specifies a bound on the size of the auxiliary random variable. If None, then the theoretical bound is used. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consult\n- def _objective(self): The hypercontractivity coefficient to minimize. Returns ------- obj : func The objective function.", "prompted_full_text": "Implement the Python class `HypercontractivityCoefficient` described below.\n\nClass description:\nComputes the hypercontractivity coefficient: .. math:: max_{U - X - Y} I[U:Y] / I[U:X]\n\nMethod signatures and docstrings:\n- def __init__(self, dist, rv_x=None, rv_y=None, bound=None, rv_mode=None): Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to compute the intrinsic mutual information of. rv_x : iterable The variables to consider `X`. rv_y : iterable The variables to consider `Y`. bound : int, None Specifies a bound on the size of the auxiliary random variable. If None, then the theoretical bound is used. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consult\n- def _objective(self): The hypercontractivity coefficient to minimize. Returns ------- obj : func The objective function.\n\n<|skeleton|>\nclass HypercontractivityCoefficient:\n \"\"\"Computes the hypercontractivity coefficient: .. math:: max_{U - X - Y} I[U:Y] / I[U:X]\"\"\"\n\n def __init__(self, dist, rv_x=None, rv_y=None, bound=None, rv_mode=None):\n \"\"\"Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to compute the intrinsic mutual information of. rv_x : iterable The variables to consider `X`. rv_y : iterable The variables to consider `Y`. bound : int, None Specifies a bound on the size of the auxiliary random variable. If None, then the theoretical bound is used. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consult\"\"\"\n <|body_0|>\n\n def _objective(self):\n \"\"\"The hypercontractivity coefficient to minimize. Returns ------- obj : func The objective function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._x = {0}\n self._y = {1}\n self._u = {3}\n super().__init__(dist, [rv_x, rv_y], [], rv_mode=rv_mode)\n theoretical_bound = self._full_shape[self._proxy_vars[0]] + 1\n bound = min(bound, theoretical_bound) if bound else theoretical_bound\n self._construct_auxvars([({0}, bound)])\n<|end_body_0|>\n\n<|body_start_1|>\n mi_a = self._mutual_information(self._u, self._y)\n mi_b = self._mutual_information(self._u, self._x)\n\n def objective(self, x):\n \"\"\"\n Compute :math:`I[U:Y] / I[U:X]`\n\n Parameters\n ----------\n x : np.ndarray\n An optimization vector.\n\n Returns\n -------\n obj : float\n The value of the objective.\n \"\"\"\n pmf = self.construct_joint(x)\n a = mi_a(pmf)\n b = mi_b(pmf)\n return -(a / b) if not np.isclose(b, 0.0) else np.inf\n return objective\n<|end_body_1|>\n", "revision_id": "b13c5020a2b8524527a4a0db5a81d8549142228c", "skeleton": "<|skeleton|>\nclass HypercontractivityCoefficient:\n \"\"\"Computes the hypercontractivity coefficient: .. math:: max_{U - X - Y} I[U:Y] / I[U:X]\"\"\"\n\n def __init__(self, dist, rv_x=None, rv_y=None, bound=None, rv_mode=None):\n \"\"\"Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to compute the intrinsic mutual information of. rv_x : iterable The variables to consider `X`. rv_y : iterable The variables to consider `Y`. bound : int, None Specifies a bound on the size of the auxiliary random variable. If None, then the theoretical bound is used. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consult\"\"\"\n <|body_0|>\n\n def _objective(self):\n \"\"\"The hypercontractivity coefficient to minimize. Returns ------- obj : func The objective function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HypercontractivityCoefficient:\n \"\"\"Computes the hypercontractivity coefficient: .. math:: max_{U - X - Y} I[U:Y] / I[U:X]\"\"\"\n\n def __init__(self, dist, rv_x=None, rv_y=None, bound=None, rv_mode=None):\n \"\"\"Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to compute the intrinsic mutual information of. rv_x : iterable The variables to consider `X`. rv_y : iterable The variables to consider `Y`. bound : int, None Specifies a bound on the size of the auxiliary random variable. If None, then the theoretical bound is used. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consult\"\"\"\n self._x = {0}\n self._y = {1}\n self._u = {3}\n super().__init__(dist, [rv_x, rv_y], [], rv_mode=rv_mode)\n theoretical_bound = self._full_shape[self._proxy_vars[0]] + 1\n bound = min(bound, theoretical_bound) if bound else theoretical_bound\n self._construct_auxvars([({0}, bound)])\n\n def _objective(self):\n \"\"\"The hypercontractivity coefficient to minimize. Returns ------- obj : func The objective function.\"\"\"\n mi_a = self._mutual_information(self._u, self._y)\n mi_b = self._mutual_information(self._u, self._x)\n\n def objective(self, x):\n \"\"\"\n Compute :math:`I[U:Y] / I[U:X]`\n\n Parameters\n ----------\n x : np.ndarray\n An optimization vector.\n\n Returns\n -------\n obj : float\n The value of the objective.\n \"\"\"\n pmf = self.construct_joint(x)\n a = mi_a(pmf)\n b = mi_b(pmf)\n return -(a / b) if not np.isclose(b, 0.0) else np.inf\n return objective\n", "source": "the_stack_v2_python_sparse", "source_path": "dit/divergences/hypercontractivity_coefficient.py", "source_repo": "dit/dit", "split": "val", "star_events_count": 468}
{"blob_id": "f3c256d047465e7604f3a03fac3c020a85360d74", "bodies": ["self.upstream_hosts = ['tcp://{}:{}'.format(*host_tuple) for host_tuple in connect_host_tuples]\nself.socket_type = zmq.PULL\nself.context = None\nself.socket = None\nself.connected = False", "self.context = zmq.Context()\nself.socket = self.context.socket(self.socket_type)\nfor host in self.upstream_hosts:\n self.socket.connect(host)\nself.connected = True", "if not self.connected:\n raise zmq.error.ZMQError('ZeroMQReceiver is not connected to a socket')\nreturn self.socket.recv()", "if self.connected:\n self.socket.close()\n self.context.destroy()\n self.socket = None\n self.context = None\n self.connected = False"], "bodies_text": "<|body_start_0|>\n self.upstream_hosts = ['tcp://{}:{}'.format(*host_tuple) for host_tuple in connect_host_tuples]\n self.socket_type = zmq.PULL\n self.context = None\n self.socket = None\n self.connected = False\n<|end_body_0|>\n\n<|body_start_1|>\n self.context = zmq.Context()\n self.socket = self.context.socket(self.socket_type)\n for host in self.upstream_hosts:\n self.socket.connect(host)\n self.connected = True\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.connected:\n raise zmq.error.ZMQError('ZeroMQReceiver is not connected to a socket')\n return self.socket.recv()\n<|end_body_2|>\n\n<|body_start_3|>\n if self.connected:\n self.socket.close()\n self.context.destroy()\n self.socket = None\n self.context = None\n self.connected = False\n<|end_body_3|>\n", "class_docstring": "ZeroMQReceiver allows for messages to be received by pulling messages over a zmq socket from an upstream host. This client may connect to multiple upstream hosts.", "class_name": "ZeroMQReceiver", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ZeroMQReceiver:\n \"\"\"ZeroMQReceiver allows for messages to be received by pulling messages over a zmq socket from an upstream host. This client may connect to multiple upstream hosts.\"\"\"\n\n def __init__(self, connect_host_tuples):\n \"\"\"Creates an instance of the ZeroMQReceiver. :param connect_host_tuples: [(host, port), (host, port)], for example [('127.0.0.1', '5000'), ('127.0.0.1', '5001')]\"\"\"\n <|body_0|>\n\n def connect(self):\n \"\"\"Connect the receiver to upstream hosts. Create a zmq.Context and a zmq.PULL socket, and is connect the socket to all specified host:port tuples.\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Read a message form the zmq socket and return\"\"\"\n <|body_2|>\n\n def close(self):\n \"\"\"Close the zmq socket\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.upstream_hosts = ['tcp://{}:{}'.format(*host_tuple) for host_tuple in connect_host_tuples]\n self.socket_type = zmq.PULL\n self.context = None\n self.socket = None\n self.connected = False\n<|end_body_0|>\n\n<|body_start_1|>\n self.context = zmq.Context()\n self.socket = self.context.socket(self.socket_type)\n for host in self.upstream_hosts:\n self.socket.connect(host)\n self.connected = True\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.connected:\n raise zmq.error.ZMQError('ZeroMQReceiver is not connected to a socket')\n return self.socket.recv()\n<|end_body_2|>\n\n<|body_start_3|>\n if self.connected:\n self.socket.close()\n self.context.destroy()\n self.socket = None\n self.context = None\n self.connected = False\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000266", "length_bytes": 6508, "license_type": "permissive", "methods": [{"docstring": "Creates an instance of the ZeroMQReceiver. :param connect_host_tuples: [(host, port), (host, port)], for example [('127.0.0.1', '5000'), ('127.0.0.1', '5001')]", "name": "__init__", "signature": "def __init__(self, connect_host_tuples)"}, {"docstring": "Connect the receiver to upstream hosts. Create a zmq.Context and a zmq.PULL socket, and is connect the socket to all specified host:port tuples.", "name": "connect", "signature": "def connect(self)"}, {"docstring": "Read a message form the zmq socket and return", "name": "get", "signature": "def get(self)"}, {"docstring": "Close the zmq socket", "name": "close", "signature": "def close(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_035954", "prompt": "Implement the Python class `ZeroMQReceiver` described below.\n\nClass description:\nZeroMQReceiver allows for messages to be received by pulling messages over a zmq socket from an upstream host. This client may connect to multiple upstream hosts.\n\nMethod signatures and docstrings:\n- def __init__(self, connect_host_tuples): Creates an instance of the ZeroMQReceiver. :param connect_host_tuples: [(host, port), (host, port)], for example [('127.0.0.1', '5000'), ('127.0.0.1', '5001')]\n- def connect(self): Connect the receiver to upstream hosts. Create a zmq.Context and a zmq.PULL socket, and is connect the socket to all specified host:port tuples.\n- def get(self): Read a message form the zmq socket and return\n- def close(self): Close the zmq socket", "prompted_full_text": "Implement the Python class `ZeroMQReceiver` described below.\n\nClass description:\nZeroMQReceiver allows for messages to be received by pulling messages over a zmq socket from an upstream host. This client may connect to multiple upstream hosts.\n\nMethod signatures and docstrings:\n- def __init__(self, connect_host_tuples): Creates an instance of the ZeroMQReceiver. :param connect_host_tuples: [(host, port), (host, port)], for example [('127.0.0.1', '5000'), ('127.0.0.1', '5001')]\n- def connect(self): Connect the receiver to upstream hosts. Create a zmq.Context and a zmq.PULL socket, and is connect the socket to all specified host:port tuples.\n- def get(self): Read a message form the zmq socket and return\n- def close(self): Close the zmq socket\n\n<|skeleton|>\nclass ZeroMQReceiver:\n \"\"\"ZeroMQReceiver allows for messages to be received by pulling messages over a zmq socket from an upstream host. This client may connect to multiple upstream hosts.\"\"\"\n\n def __init__(self, connect_host_tuples):\n \"\"\"Creates an instance of the ZeroMQReceiver. :param connect_host_tuples: [(host, port), (host, port)], for example [('127.0.0.1', '5000'), ('127.0.0.1', '5001')]\"\"\"\n <|body_0|>\n\n def connect(self):\n \"\"\"Connect the receiver to upstream hosts. Create a zmq.Context and a zmq.PULL socket, and is connect the socket to all specified host:port tuples.\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Read a message form the zmq socket and return\"\"\"\n <|body_2|>\n\n def close(self):\n \"\"\"Close the zmq socket\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.upstream_hosts = ['tcp://{}:{}'.format(*host_tuple) for host_tuple in connect_host_tuples]\n self.socket_type = zmq.PULL\n self.context = None\n self.socket = None\n self.connected = False\n<|end_body_0|>\n\n<|body_start_1|>\n self.context = zmq.Context()\n self.socket = self.context.socket(self.socket_type)\n for host in self.upstream_hosts:\n self.socket.connect(host)\n self.connected = True\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.connected:\n raise zmq.error.ZMQError('ZeroMQReceiver is not connected to a socket')\n return self.socket.recv()\n<|end_body_2|>\n\n<|body_start_3|>\n if self.connected:\n self.socket.close()\n self.context.destroy()\n self.socket = None\n self.context = None\n self.connected = False\n<|end_body_3|>\n", "revision_id": "1df9efe33ead702d0f53dfc227b5da385ba9cf23", "skeleton": "<|skeleton|>\nclass ZeroMQReceiver:\n \"\"\"ZeroMQReceiver allows for messages to be received by pulling messages over a zmq socket from an upstream host. This client may connect to multiple upstream hosts.\"\"\"\n\n def __init__(self, connect_host_tuples):\n \"\"\"Creates an instance of the ZeroMQReceiver. :param connect_host_tuples: [(host, port), (host, port)], for example [('127.0.0.1', '5000'), ('127.0.0.1', '5001')]\"\"\"\n <|body_0|>\n\n def connect(self):\n \"\"\"Connect the receiver to upstream hosts. Create a zmq.Context and a zmq.PULL socket, and is connect the socket to all specified host:port tuples.\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Read a message form the zmq socket and return\"\"\"\n <|body_2|>\n\n def close(self):\n \"\"\"Close the zmq socket\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ZeroMQReceiver:\n \"\"\"ZeroMQReceiver allows for messages to be received by pulling messages over a zmq socket from an upstream host. This client may connect to multiple upstream hosts.\"\"\"\n\n def __init__(self, connect_host_tuples):\n \"\"\"Creates an instance of the ZeroMQReceiver. :param connect_host_tuples: [(host, port), (host, port)], for example [('127.0.0.1', '5000'), ('127.0.0.1', '5001')]\"\"\"\n self.upstream_hosts = ['tcp://{}:{}'.format(*host_tuple) for host_tuple in connect_host_tuples]\n self.socket_type = zmq.PULL\n self.context = None\n self.socket = None\n self.connected = False\n\n def connect(self):\n \"\"\"Connect the receiver to upstream hosts. Create a zmq.Context and a zmq.PULL socket, and is connect the socket to all specified host:port tuples.\"\"\"\n self.context = zmq.Context()\n self.socket = self.context.socket(self.socket_type)\n for host in self.upstream_hosts:\n self.socket.connect(host)\n self.connected = True\n\n def get(self):\n \"\"\"Read a message form the zmq socket and return\"\"\"\n if not self.connected:\n raise zmq.error.ZMQError('ZeroMQReceiver is not connected to a socket')\n return self.socket.recv()\n\n def close(self):\n \"\"\"Close the zmq socket\"\"\"\n if self.connected:\n self.socket.close()\n self.context.destroy()\n self.socket = None\n self.context = None\n self.connected = False\n", "source": "the_stack_v2_python_sparse", "source_path": "meniscus/transport.py", "source_repo": "priestd09/meniscus", "split": "val", "star_events_count": 0}
{"blob_id": "00daafeb4e8f16d5acde7ff5459533b177935bea", "bodies": ["res = []\nstack = []\nwhile root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n res.append(root.val)\n root = root.right\nreturn res", "def getSuccessor(root: TreeNode) -> TreeNode:\n succ = root.left\n while succ.right and succ.right != root:\n succ = succ.right\n return succ\nres = []\nwhile root:\n if root.left:\n succ = getSuccessor(root)\n if succ.right == root:\n succ.right = None\n res.append(root.val)\n root = root.right\n else:\n succ.right = root\n root = root.left\n else:\n res.append(root.val)\n root = root.right\nreturn res"], "bodies_text": "<|body_start_0|>\n res = []\n stack = []\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n res.append(root.val)\n root = root.right\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n def getSuccessor(root: TreeNode) -> TreeNode:\n succ = root.left\n while succ.right and succ.right != root:\n succ = succ.right\n return succ\n res = []\n while root:\n if root.left:\n succ = getSuccessor(root)\n if succ.right == root:\n succ.right = None\n res.append(root.val)\n root = root.right\n else:\n succ.right = root\n root = root.left\n else:\n res.append(root.val)\n root = root.right\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def inorderTraversal_MK1(self, root: Optional[TreeNode]) -> List[int]:\n \"\"\"Iterating method using Stack Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_0|>\n\n def inorderTraversal_MK2(self, root: Optional[TreeNode]) -> List[int]:\n \"\"\"Morris Traversal Time complexity: O(n) Space complexity: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n stack = []\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n res.append(root.val)\n root = root.right\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n def getSuccessor(root: TreeNode) -> TreeNode:\n succ = root.left\n while succ.right and succ.right != root:\n succ = succ.right\n return succ\n res = []\n while root:\n if root.left:\n succ = getSuccessor(root)\n if succ.right == root:\n succ.right = None\n res.append(root.val)\n root = root.right\n else:\n succ.right = root\n root = root.left\n else:\n res.append(root.val)\n root = root.right\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000267", "length_bytes": 1513, "license_type": "no_license", "methods": [{"docstring": "Iterating method using Stack Time complexity: O(n) Space complexity: O(n)", "name": "inorderTraversal_MK1", "signature": "def inorderTraversal_MK1(self, root: Optional[TreeNode]) -> List[int]"}, {"docstring": "Morris Traversal Time complexity: O(n) Space complexity: O(1)", "name": "inorderTraversal_MK2", "signature": "def inorderTraversal_MK2(self, root: Optional[TreeNode]) -> List[int]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_016591", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def inorderTraversal_MK1(self, root: Optional[TreeNode]) -> List[int]: Iterating method using Stack Time complexity: O(n) Space complexity: O(n)\n- def inorderTraversal_MK2(self, root: Optional[TreeNode]) -> List[int]: Morris Traversal Time complexity: O(n) Space complexity: O(1)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def inorderTraversal_MK1(self, root: Optional[TreeNode]) -> List[int]: Iterating method using Stack Time complexity: O(n) Space complexity: O(n)\n- def inorderTraversal_MK2(self, root: Optional[TreeNode]) -> List[int]: Morris Traversal Time complexity: O(n) Space complexity: O(1)\n\n<|skeleton|>\nclass Solution:\n\n def inorderTraversal_MK1(self, root: Optional[TreeNode]) -> List[int]:\n \"\"\"Iterating method using Stack Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_0|>\n\n def inorderTraversal_MK2(self, root: Optional[TreeNode]) -> List[int]:\n \"\"\"Morris Traversal Time complexity: O(n) Space complexity: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n stack = []\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n res.append(root.val)\n root = root.right\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n def getSuccessor(root: TreeNode) -> TreeNode:\n succ = root.left\n while succ.right and succ.right != root:\n succ = succ.right\n return succ\n res = []\n while root:\n if root.left:\n succ = getSuccessor(root)\n if succ.right == root:\n succ.right = None\n res.append(root.val)\n root = root.right\n else:\n succ.right = root\n root = root.left\n else:\n res.append(root.val)\n root = root.right\n return res\n<|end_body_1|>\n", "revision_id": "d7ba416d22becfa8f2a2ae4eee04c86617cd9332", "skeleton": "<|skeleton|>\nclass Solution:\n\n def inorderTraversal_MK1(self, root: Optional[TreeNode]) -> List[int]:\n \"\"\"Iterating method using Stack Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_0|>\n\n def inorderTraversal_MK2(self, root: Optional[TreeNode]) -> List[int]:\n \"\"\"Morris Traversal Time complexity: O(n) Space complexity: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def inorderTraversal_MK1(self, root: Optional[TreeNode]) -> List[int]:\n \"\"\"Iterating method using Stack Time complexity: O(n) Space complexity: O(n)\"\"\"\n res = []\n stack = []\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n res.append(root.val)\n root = root.right\n return res\n\n def inorderTraversal_MK2(self, root: Optional[TreeNode]) -> List[int]:\n \"\"\"Morris Traversal Time complexity: O(n) Space complexity: O(1)\"\"\"\n def getSuccessor(root: TreeNode) -> TreeNode:\n succ = root.left\n while succ.right and succ.right != root:\n succ = succ.right\n return succ\n res = []\n while root:\n if root.left:\n succ = getSuccessor(root)\n if succ.right == root:\n succ.right = None\n res.append(root.val)\n root = root.right\n else:\n succ.right = root\n root = root.left\n else:\n res.append(root.val)\n root = root.right\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "0094. Binary Tree Inorder Traversal/Solution.py", "source_repo": "faterazer/LeetCode", "split": "val", "star_events_count": 4}
{"blob_id": "e95b3e78e7ab3a3cd3f46380e1ac41ced5ed4419", "bodies": ["self.x = x\nself.y = y\nself.next = next", "XList = []\nYList = []\npointer = self\nwhile pointer != None:\n XList.append(pointer.x)\n YList.append(pointer.y)\n pointer = pointer.next\nreturn (XList, YList)"], "bodies_text": "<|body_start_0|>\n self.x = x\n self.y = y\n self.next = next\n<|end_body_0|>\n\n<|body_start_1|>\n XList = []\n YList = []\n pointer = self\n while pointer != None:\n XList.append(pointer.x)\n YList.append(pointer.y)\n pointer = pointer.next\n return (XList, YList)\n<|end_body_1|>\n", "class_docstring": "A class for storing the x and y paird data for the chart related function. This class is a linked list type.", "class_name": "XYNode", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass XYNode:\n \"\"\"A class for storing the x and y paird data for the chart related function. This class is a linked list type.\"\"\"\n\n def __init__(self, x, y, next=None):\n \"\"\"Constructor. Args: (float) x: the data of x axis. (float) y: the data of y axis. (XYNode) next: the next data node.\"\"\"\n <|body_0|>\n\n def toLists(self):\n \"\"\"Convert this linked list to two lists. One of the two list contains all data of x, and the other contains all data of y. Return: (list) XList: a list of all data of x. (list) YList: a list of all data of y.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.x = x\n self.y = y\n self.next = next\n<|end_body_0|>\n\n<|body_start_1|>\n XList = []\n YList = []\n pointer = self\n while pointer != None:\n XList.append(pointer.x)\n YList.append(pointer.y)\n pointer = pointer.next\n return (XList, YList)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000268", "length_bytes": 2764, "license_type": "no_license", "methods": [{"docstring": "Constructor. Args: (float) x: the data of x axis. (float) y: the data of y axis. (XYNode) next: the next data node.", "name": "__init__", "signature": "def __init__(self, x, y, next=None)"}, {"docstring": "Convert this linked list to two lists. One of the two list contains all data of x, and the other contains all data of y. Return: (list) XList: a list of all data of x. (list) YList: a list of all data of y.", "name": "toLists", "signature": "def toLists(self)"}], "n_methods": 2, "prompt": "Implement the Python class `XYNode` described below.\n\nClass description:\nA class for storing the x and y paird data for the chart related function. This class is a linked list type.\n\nMethod signatures and docstrings:\n- def __init__(self, x, y, next=None): Constructor. Args: (float) x: the data of x axis. (float) y: the data of y axis. (XYNode) next: the next data node.\n- def toLists(self): Convert this linked list to two lists. One of the two list contains all data of x, and the other contains all data of y. Return: (list) XList: a list of all data of x. (list) YList: a list of all data of y.", "prompted_full_text": "Implement the Python class `XYNode` described below.\n\nClass description:\nA class for storing the x and y paird data for the chart related function. This class is a linked list type.\n\nMethod signatures and docstrings:\n- def __init__(self, x, y, next=None): Constructor. Args: (float) x: the data of x axis. (float) y: the data of y axis. (XYNode) next: the next data node.\n- def toLists(self): Convert this linked list to two lists. One of the two list contains all data of x, and the other contains all data of y. Return: (list) XList: a list of all data of x. (list) YList: a list of all data of y.\n\n<|skeleton|>\nclass XYNode:\n \"\"\"A class for storing the x and y paird data for the chart related function. This class is a linked list type.\"\"\"\n\n def __init__(self, x, y, next=None):\n \"\"\"Constructor. Args: (float) x: the data of x axis. (float) y: the data of y axis. (XYNode) next: the next data node.\"\"\"\n <|body_0|>\n\n def toLists(self):\n \"\"\"Convert this linked list to two lists. One of the two list contains all data of x, and the other contains all data of y. Return: (list) XList: a list of all data of x. (list) YList: a list of all data of y.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.x = x\n self.y = y\n self.next = next\n<|end_body_0|>\n\n<|body_start_1|>\n XList = []\n YList = []\n pointer = self\n while pointer != None:\n XList.append(pointer.x)\n YList.append(pointer.y)\n pointer = pointer.next\n return (XList, YList)\n<|end_body_1|>\n", "revision_id": "b20af54b915daf7635204e3b942b3ae4624887d7", "skeleton": "<|skeleton|>\nclass XYNode:\n \"\"\"A class for storing the x and y paird data for the chart related function. This class is a linked list type.\"\"\"\n\n def __init__(self, x, y, next=None):\n \"\"\"Constructor. Args: (float) x: the data of x axis. (float) y: the data of y axis. (XYNode) next: the next data node.\"\"\"\n <|body_0|>\n\n def toLists(self):\n \"\"\"Convert this linked list to two lists. One of the two list contains all data of x, and the other contains all data of y. Return: (list) XList: a list of all data of x. (list) YList: a list of all data of y.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class XYNode:\n \"\"\"A class for storing the x and y paird data for the chart related function. This class is a linked list type.\"\"\"\n\n def __init__(self, x, y, next=None):\n \"\"\"Constructor. Args: (float) x: the data of x axis. (float) y: the data of y axis. (XYNode) next: the next data node.\"\"\"\n self.x = x\n self.y = y\n self.next = next\n\n def toLists(self):\n \"\"\"Convert this linked list to two lists. One of the two list contains all data of x, and the other contains all data of y. Return: (list) XList: a list of all data of x. (list) YList: a list of all data of y.\"\"\"\n XList = []\n YList = []\n pointer = self\n while pointer != None:\n XList.append(pointer.x)\n YList.append(pointer.y)\n pointer = pointer.next\n return (XList, YList)\n", "source": "the_stack_v2_python_sparse", "source_path": "Database/plot.py", "source_repo": "jasonlingo/RoadSafety", "split": "val", "star_events_count": 0}
{"blob_id": "d3e0927764167ee96e3ee58f984d21f5cf79854d", "bodies": ["self.check_installation()\nself.gbk_resources = gbk_resources\nself.outdir = Path(outdir)\nself.frag_length = frag_length\nself.process_num = self.max_process_num if process_num is None else process_num\nos.makedirs(self.outdir, exist_ok=True)", "genome_fasta_files: list[Path] = []\nfor gbk in self.gbk_resources:\n suffix = '_reverse.fna' if gbk.reverse else '.fna'\n filename = f'{gbk.name}_{gbk.min_range}-{gbk.max_range}{suffix}'\n genome_fasta_file = self.outdir / filename\n if not genome_fasta_file.exists():\n gbk.write_genome_fasta(genome_fasta_file)\n genome_fasta_files.append(genome_fasta_file)\nalign_coords = []\nfor idx in range(0, len(self.gbk_resources) - 1):\n fa_file1, fa_file2 = (genome_fasta_files[idx], genome_fasta_files[idx + 1])\n name1 = self.gbk_resources[idx].name\n name2 = self.gbk_resources[idx + 1].name\n fastani_outfile = self.outdir / f'{idx + 1:02d}_{name1}-{name2}.out'\n visual_outfile = str(fastani_outfile) + '.visual'\n cmd = f'fastANI -q {fa_file1} -r {fa_file2} -o {fastani_outfile} '\n cmd += f'--visualize -t {self.process_num} --fragLen {self.frag_length}'\n sp.run(cmd, shell=True)\n align_coords.extend(self.parse_visual_file(visual_outfile, name1, name2))\ngbk_name2min_range = {gbk.name: gbk.min_range for gbk in self.gbk_resources}\nfor ac in align_coords:\n ac.ref_start = ac.ref_start + gbk_name2min_range[ac.ref_name]\n ac.ref_end = ac.ref_end + gbk_name2min_range[ac.ref_name]\n ac.query_start = ac.query_start + gbk_name2min_range[ac.query_name]\n ac.query_end = ac.query_end + gbk_name2min_range[ac.query_name]\nreturn align_coords", "align_coords = []\nwith open(visual_file) as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if len(row) == 0:\n continue\n start1, end1 = (int(row[6]), int(row[7]))\n start2, end2 = (int(row[8]), int(row[9]))\n identity = float(row[2])\n align_coord = AlignCoord(start1, end1, start2, end2, end1 - start1 + 1, end2 - start2 + 1, identity, name1, name2)\n align_coords.append(align_coord)\nreturn align_coords"], "bodies_text": "<|body_start_0|>\n self.check_installation()\n self.gbk_resources = gbk_resources\n self.outdir = Path(outdir)\n self.frag_length = frag_length\n self.process_num = self.max_process_num if process_num is None else process_num\n os.makedirs(self.outdir, exist_ok=True)\n<|end_body_0|>\n\n<|body_start_1|>\n genome_fasta_files: list[Path] = []\n for gbk in self.gbk_resources:\n suffix = '_reverse.fna' if gbk.reverse else '.fna'\n filename = f'{gbk.name}_{gbk.min_range}-{gbk.max_range}{suffix}'\n genome_fasta_file = self.outdir / filename\n if not genome_fasta_file.exists():\n gbk.write_genome_fasta(genome_fasta_file)\n genome_fasta_files.append(genome_fasta_file)\n align_coords = []\n for idx in range(0, len(self.gbk_resources) - 1):\n fa_file1, fa_file2 = (genome_fasta_files[idx], genome_fasta_files[idx + 1])\n name1 = self.gbk_resources[idx].name\n name2 = self.gbk_resources[idx + 1].name\n fastani_outfile = self.outdir / f'{idx + 1:02d}_{name1}-{name2}.out'\n visual_outfile = str(fastani_outfile) + '.visual'\n cmd = f'fastANI -q {fa_file1} -r {fa_file2} -o {fastani_outfile} '\n cmd += f'--visualize -t {self.process_num} --fragLen {self.frag_length}'\n sp.run(cmd, shell=True)\n align_coords.extend(self.parse_visual_file(visual_outfile, name1, name2))\n gbk_name2min_range = {gbk.name: gbk.min_range for gbk in self.gbk_resources}\n for ac in align_coords:\n ac.ref_start = ac.ref_start + gbk_name2min_range[ac.ref_name]\n ac.ref_end = ac.ref_end + gbk_name2min_range[ac.ref_name]\n ac.query_start = ac.query_start + gbk_name2min_range[ac.query_name]\n ac.query_end = ac.query_end + gbk_name2min_range[ac.query_name]\n return align_coords\n<|end_body_1|>\n\n<|body_start_2|>\n align_coords = []\n with open(visual_file) as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if len(row) == 0:\n continue\n start1, end1 = (int(row[6]), int(row[7]))\n start2, end2 = (int(row[8]), int(row[9]))\n identity = float(row[2])\n align_coord = AlignCoord(start1, end1, start2, end2, end1 - start1 + 1, end2 - start2 + 1, identity, name1, name2)\n align_coords.append(align_coord)\n return align_coords\n<|end_body_2|>\n", "class_docstring": "fastANI Alignment Class", "class_name": "FastAni", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FastAni:\n \"\"\"fastANI Alignment Class\"\"\"\n\n def __init__(self, gbk_resources: list[Genbank], outdir: str | Path, frag_length: int=3000, process_num: int | None=None):\n \"\"\"Parameters ---------- gbk_resources : list[Genbank] Genbank objects outdir : str | Path Output directory frag_length: int Fragment length process_num : int | None, optional Use processor number (Default: `'Max Processor' - 1`)\"\"\"\n <|body_0|>\n\n def run(self) -> list[AlignCoord]:\n \"\"\"Run genome alignment Returns ------- align_coords : list[AlignCoord] Genome alignment coord list\"\"\"\n <|body_1|>\n\n def parse_visual_file(self, visual_file: str | Path, name1: str, name2: str) -> list[AlignCoord]:\n \"\"\"Parse fastANI visual file Parameters ---------- visual_file : str | Path fastANI visual file name1 : str Name1 name2 : str Name2 Returns ------- align_coords : list[AlignCoord] Align Coords\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.check_installation()\n self.gbk_resources = gbk_resources\n self.outdir = Path(outdir)\n self.frag_length = frag_length\n self.process_num = self.max_process_num if process_num is None else process_num\n os.makedirs(self.outdir, exist_ok=True)\n<|end_body_0|>\n\n<|body_start_1|>\n genome_fasta_files: list[Path] = []\n for gbk in self.gbk_resources:\n suffix = '_reverse.fna' if gbk.reverse else '.fna'\n filename = f'{gbk.name}_{gbk.min_range}-{gbk.max_range}{suffix}'\n genome_fasta_file = self.outdir / filename\n if not genome_fasta_file.exists():\n gbk.write_genome_fasta(genome_fasta_file)\n genome_fasta_files.append(genome_fasta_file)\n align_coords = []\n for idx in range(0, len(self.gbk_resources) - 1):\n fa_file1, fa_file2 = (genome_fasta_files[idx], genome_fasta_files[idx + 1])\n name1 = self.gbk_resources[idx].name\n name2 = self.gbk_resources[idx + 1].name\n fastani_outfile = self.outdir / f'{idx + 1:02d}_{name1}-{name2}.out'\n visual_outfile = str(fastani_outfile) + '.visual'\n cmd = f'fastANI -q {fa_file1} -r {fa_file2} -o {fastani_outfile} '\n cmd += f'--visualize -t {self.process_num} --fragLen {self.frag_length}'\n sp.run(cmd, shell=True)\n align_coords.extend(self.parse_visual_file(visual_outfile, name1, name2))\n gbk_name2min_range = {gbk.name: gbk.min_range for gbk in self.gbk_resources}\n for ac in align_coords:\n ac.ref_start = ac.ref_start + gbk_name2min_range[ac.ref_name]\n ac.ref_end = ac.ref_end + gbk_name2min_range[ac.ref_name]\n ac.query_start = ac.query_start + gbk_name2min_range[ac.query_name]\n ac.query_end = ac.query_end + gbk_name2min_range[ac.query_name]\n return align_coords\n<|end_body_1|>\n\n<|body_start_2|>\n align_coords = []\n with open(visual_file) as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if len(row) == 0:\n continue\n start1, end1 = (int(row[6]), int(row[7]))\n start2, end2 = (int(row[8]), int(row[9]))\n identity = float(row[2])\n align_coord = AlignCoord(start1, end1, start2, end2, end1 - start1 + 1, end2 - start2 + 1, identity, name1, name2)\n align_coords.append(align_coord)\n return align_coords\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000269", "length_bytes": 19881, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- gbk_resources : list[Genbank] Genbank objects outdir : str | Path Output directory frag_length: int Fragment length process_num : int | None, optional Use processor number (Default: `'Max Processor' - 1`)", "name": "__init__", "signature": "def __init__(self, gbk_resources: list[Genbank], outdir: str | Path, frag_length: int=3000, process_num: int | None=None)"}, {"docstring": "Run genome alignment Returns ------- align_coords : list[AlignCoord] Genome alignment coord list", "name": "run", "signature": "def run(self) -> list[AlignCoord]"}, {"docstring": "Parse fastANI visual file Parameters ---------- visual_file : str | Path fastANI visual file name1 : str Name1 name2 : str Name2 Returns ------- align_coords : list[AlignCoord] Align Coords", "name": "parse_visual_file", "signature": "def parse_visual_file(self, visual_file: str | Path, name1: str, name2: str) -> list[AlignCoord]"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001854", "prompt": "Implement the Python class `FastAni` described below.\n\nClass description:\nfastANI Alignment Class\n\nMethod signatures and docstrings:\n- def __init__(self, gbk_resources: list[Genbank], outdir: str | Path, frag_length: int=3000, process_num: int | None=None): Parameters ---------- gbk_resources : list[Genbank] Genbank objects outdir : str | Path Output directory frag_length: int Fragment length process_num : int | None, optional Use processor number (Default: `'Max Processor' - 1`)\n- def run(self) -> list[AlignCoord]: Run genome alignment Returns ------- align_coords : list[AlignCoord] Genome alignment coord list\n- def parse_visual_file(self, visual_file: str | Path, name1: str, name2: str) -> list[AlignCoord]: Parse fastANI visual file Parameters ---------- visual_file : str | Path fastANI visual file name1 : str Name1 name2 : str Name2 Returns ------- align_coords : list[AlignCoord] Align Coords", "prompted_full_text": "Implement the Python class `FastAni` described below.\n\nClass description:\nfastANI Alignment Class\n\nMethod signatures and docstrings:\n- def __init__(self, gbk_resources: list[Genbank], outdir: str | Path, frag_length: int=3000, process_num: int | None=None): Parameters ---------- gbk_resources : list[Genbank] Genbank objects outdir : str | Path Output directory frag_length: int Fragment length process_num : int | None, optional Use processor number (Default: `'Max Processor' - 1`)\n- def run(self) -> list[AlignCoord]: Run genome alignment Returns ------- align_coords : list[AlignCoord] Genome alignment coord list\n- def parse_visual_file(self, visual_file: str | Path, name1: str, name2: str) -> list[AlignCoord]: Parse fastANI visual file Parameters ---------- visual_file : str | Path fastANI visual file name1 : str Name1 name2 : str Name2 Returns ------- align_coords : list[AlignCoord] Align Coords\n\n<|skeleton|>\nclass FastAni:\n \"\"\"fastANI Alignment Class\"\"\"\n\n def __init__(self, gbk_resources: list[Genbank], outdir: str | Path, frag_length: int=3000, process_num: int | None=None):\n \"\"\"Parameters ---------- gbk_resources : list[Genbank] Genbank objects outdir : str | Path Output directory frag_length: int Fragment length process_num : int | None, optional Use processor number (Default: `'Max Processor' - 1`)\"\"\"\n <|body_0|>\n\n def run(self) -> list[AlignCoord]:\n \"\"\"Run genome alignment Returns ------- align_coords : list[AlignCoord] Genome alignment coord list\"\"\"\n <|body_1|>\n\n def parse_visual_file(self, visual_file: str | Path, name1: str, name2: str) -> list[AlignCoord]:\n \"\"\"Parse fastANI visual file Parameters ---------- visual_file : str | Path fastANI visual file name1 : str Name1 name2 : str Name2 Returns ------- align_coords : list[AlignCoord] Align Coords\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.check_installation()\n self.gbk_resources = gbk_resources\n self.outdir = Path(outdir)\n self.frag_length = frag_length\n self.process_num = self.max_process_num if process_num is None else process_num\n os.makedirs(self.outdir, exist_ok=True)\n<|end_body_0|>\n\n<|body_start_1|>\n genome_fasta_files: list[Path] = []\n for gbk in self.gbk_resources:\n suffix = '_reverse.fna' if gbk.reverse else '.fna'\n filename = f'{gbk.name}_{gbk.min_range}-{gbk.max_range}{suffix}'\n genome_fasta_file = self.outdir / filename\n if not genome_fasta_file.exists():\n gbk.write_genome_fasta(genome_fasta_file)\n genome_fasta_files.append(genome_fasta_file)\n align_coords = []\n for idx in range(0, len(self.gbk_resources) - 1):\n fa_file1, fa_file2 = (genome_fasta_files[idx], genome_fasta_files[idx + 1])\n name1 = self.gbk_resources[idx].name\n name2 = self.gbk_resources[idx + 1].name\n fastani_outfile = self.outdir / f'{idx + 1:02d}_{name1}-{name2}.out'\n visual_outfile = str(fastani_outfile) + '.visual'\n cmd = f'fastANI -q {fa_file1} -r {fa_file2} -o {fastani_outfile} '\n cmd += f'--visualize -t {self.process_num} --fragLen {self.frag_length}'\n sp.run(cmd, shell=True)\n align_coords.extend(self.parse_visual_file(visual_outfile, name1, name2))\n gbk_name2min_range = {gbk.name: gbk.min_range for gbk in self.gbk_resources}\n for ac in align_coords:\n ac.ref_start = ac.ref_start + gbk_name2min_range[ac.ref_name]\n ac.ref_end = ac.ref_end + gbk_name2min_range[ac.ref_name]\n ac.query_start = ac.query_start + gbk_name2min_range[ac.query_name]\n ac.query_end = ac.query_end + gbk_name2min_range[ac.query_name]\n return align_coords\n<|end_body_1|>\n\n<|body_start_2|>\n align_coords = []\n with open(visual_file) as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if len(row) == 0:\n continue\n start1, end1 = (int(row[6]), int(row[7]))\n start2, end2 = (int(row[8]), int(row[9]))\n identity = float(row[2])\n align_coord = AlignCoord(start1, end1, start2, end2, end1 - start1 + 1, end2 - start2 + 1, identity, name1, name2)\n align_coords.append(align_coord)\n return align_coords\n<|end_body_2|>\n", "revision_id": "2f9f96f8468fe1529ddffa73e0aede2302835595", "skeleton": "<|skeleton|>\nclass FastAni:\n \"\"\"fastANI Alignment Class\"\"\"\n\n def __init__(self, gbk_resources: list[Genbank], outdir: str | Path, frag_length: int=3000, process_num: int | None=None):\n \"\"\"Parameters ---------- gbk_resources : list[Genbank] Genbank objects outdir : str | Path Output directory frag_length: int Fragment length process_num : int | None, optional Use processor number (Default: `'Max Processor' - 1`)\"\"\"\n <|body_0|>\n\n def run(self) -> list[AlignCoord]:\n \"\"\"Run genome alignment Returns ------- align_coords : list[AlignCoord] Genome alignment coord list\"\"\"\n <|body_1|>\n\n def parse_visual_file(self, visual_file: str | Path, name1: str, name2: str) -> list[AlignCoord]:\n \"\"\"Parse fastANI visual file Parameters ---------- visual_file : str | Path fastANI visual file name1 : str Name1 name2 : str Name2 Returns ------- align_coords : list[AlignCoord] Align Coords\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FastAni:\n \"\"\"fastANI Alignment Class\"\"\"\n\n def __init__(self, gbk_resources: list[Genbank], outdir: str | Path, frag_length: int=3000, process_num: int | None=None):\n \"\"\"Parameters ---------- gbk_resources : list[Genbank] Genbank objects outdir : str | Path Output directory frag_length: int Fragment length process_num : int | None, optional Use processor number (Default: `'Max Processor' - 1`)\"\"\"\n self.check_installation()\n self.gbk_resources = gbk_resources\n self.outdir = Path(outdir)\n self.frag_length = frag_length\n self.process_num = self.max_process_num if process_num is None else process_num\n os.makedirs(self.outdir, exist_ok=True)\n\n def run(self) -> list[AlignCoord]:\n \"\"\"Run genome alignment Returns ------- align_coords : list[AlignCoord] Genome alignment coord list\"\"\"\n genome_fasta_files: list[Path] = []\n for gbk in self.gbk_resources:\n suffix = '_reverse.fna' if gbk.reverse else '.fna'\n filename = f'{gbk.name}_{gbk.min_range}-{gbk.max_range}{suffix}'\n genome_fasta_file = self.outdir / filename\n if not genome_fasta_file.exists():\n gbk.write_genome_fasta(genome_fasta_file)\n genome_fasta_files.append(genome_fasta_file)\n align_coords = []\n for idx in range(0, len(self.gbk_resources) - 1):\n fa_file1, fa_file2 = (genome_fasta_files[idx], genome_fasta_files[idx + 1])\n name1 = self.gbk_resources[idx].name\n name2 = self.gbk_resources[idx + 1].name\n fastani_outfile = self.outdir / f'{idx + 1:02d}_{name1}-{name2}.out'\n visual_outfile = str(fastani_outfile) + '.visual'\n cmd = f'fastANI -q {fa_file1} -r {fa_file2} -o {fastani_outfile} '\n cmd += f'--visualize -t {self.process_num} --fragLen {self.frag_length}'\n sp.run(cmd, shell=True)\n align_coords.extend(self.parse_visual_file(visual_outfile, name1, name2))\n gbk_name2min_range = {gbk.name: gbk.min_range for gbk in self.gbk_resources}\n for ac in align_coords:\n ac.ref_start = ac.ref_start + gbk_name2min_range[ac.ref_name]\n ac.ref_end = ac.ref_end + gbk_name2min_range[ac.ref_name]\n ac.query_start = ac.query_start + gbk_name2min_range[ac.query_name]\n ac.query_end = ac.query_end + gbk_name2min_range[ac.query_name]\n return align_coords\n\n def parse_visual_file(self, visual_file: str | Path, name1: str, name2: str) -> list[AlignCoord]:\n \"\"\"Parse fastANI visual file Parameters ---------- visual_file : str | Path fastANI visual file name1 : str Name1 name2 : str Name2 Returns ------- align_coords : list[AlignCoord] Align Coords\"\"\"\n align_coords = []\n with open(visual_file) as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if len(row) == 0:\n continue\n start1, end1 = (int(row[6]), int(row[7]))\n start2, end2 = (int(row[8]), int(row[9]))\n identity = float(row[2])\n align_coord = AlignCoord(start1, end1, start2, end2, end1 - start1 + 1, end2 - start2 + 1, identity, name1, name2)\n align_coords.append(align_coord)\n return align_coords\n", "source": "the_stack_v2_python_sparse", "source_path": "notebooks/fastANI/pgv-fastani.py", "source_repo": "moshi4/pyGenomeViz", "split": "val", "star_events_count": 158}
{"blob_id": "19a75ae0f8add5b5d0c7dc479bcfa5e5674ace7c", "bodies": ["rec = super(AccountMoveLine, self).default_get(fields)\nif 'line_ids' not in self._context:\n return rec\nif self._context['line_ids']:\n dic = {}\n line = self._context['line_ids'][-1][2]\n if line:\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id']\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id']\n if dic:\n rec.update(dic)\n elif self._context['line_ids'][-1]:\n line = self.search_read([('id', '=', self._context['line_ids'][-1][1])], limit=1)[0]\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id'][0] if line['analytic_account_id'] else False\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id'][0] if line['partner_id'] else False\n if dic:\n rec.update(dic)\nreturn rec", "for line in self:\n amount = line.amount_currency\n if line.currency_id and line.currency_id != line.company_currency_id:\n amount = self.currency_id.with_context(date=line.date).compute(amount, line.company_currency_id)\n if amount > 0:\n line.debit = amount\n elif amount < 0:\n line.credit = -amount"], "bodies_text": "<|body_start_0|>\n rec = super(AccountMoveLine, self).default_get(fields)\n if 'line_ids' not in self._context:\n return rec\n if self._context['line_ids']:\n dic = {}\n line = self._context['line_ids'][-1][2]\n if line:\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id']\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id']\n if dic:\n rec.update(dic)\n elif self._context['line_ids'][-1]:\n line = self.search_read([('id', '=', self._context['line_ids'][-1][1])], limit=1)[0]\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id'][0] if line['analytic_account_id'] else False\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id'][0] if line['partner_id'] else False\n if dic:\n rec.update(dic)\n return rec\n<|end_body_0|>\n\n<|body_start_1|>\n for line in self:\n amount = line.amount_currency\n if line.currency_id and line.currency_id != line.company_currency_id:\n amount = self.currency_id.with_context(date=line.date).compute(amount, line.company_currency_id)\n if amount > 0:\n line.debit = amount\n elif amount < 0:\n line.credit = -amount\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AccountMoveLine", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccountMoveLine:\n\n def default_get(self, fields):\n \"\"\"' Set last name of journal line in case of a manual entry\"\"\"\n <|body_0|>\n\n def _onchange_amount_currency(self):\n \"\"\"Overwrite function : to set default credit and debit if exist and currecny amount is zero Recompute the debit/credit based on amount_currency/currency_id and date. However, date is a related field on account.move. Then, this onchange will not be triggered by the form view by changing the date on the account.move. To fix this problem, see _onchange_date method on account.move.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rec = super(AccountMoveLine, self).default_get(fields)\n if 'line_ids' not in self._context:\n return rec\n if self._context['line_ids']:\n dic = {}\n line = self._context['line_ids'][-1][2]\n if line:\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id']\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id']\n if dic:\n rec.update(dic)\n elif self._context['line_ids'][-1]:\n line = self.search_read([('id', '=', self._context['line_ids'][-1][1])], limit=1)[0]\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id'][0] if line['analytic_account_id'] else False\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id'][0] if line['partner_id'] else False\n if dic:\n rec.update(dic)\n return rec\n<|end_body_0|>\n\n<|body_start_1|>\n for line in self:\n amount = line.amount_currency\n if line.currency_id and line.currency_id != line.company_currency_id:\n amount = self.currency_id.with_context(date=line.date).compute(amount, line.company_currency_id)\n if amount > 0:\n line.debit = amount\n elif amount < 0:\n line.credit = -amount\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000270", "length_bytes": 2510, "license_type": "no_license", "methods": [{"docstring": "' Set last name of journal line in case of a manual entry", "name": "default_get", "signature": "def default_get(self, fields)"}, {"docstring": "Overwrite function : to set default credit and debit if exist and currecny amount is zero Recompute the debit/credit based on amount_currency/currency_id and date. However, date is a related field on account.move. Then, this onchange will not be triggered by the form view by changing the date on the account.move. To fix this problem, see _onchange_date method on account.move.", "name": "_onchange_amount_currency", "signature": "def _onchange_amount_currency(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039618", "prompt": "Implement the Python class `AccountMoveLine` described below.\n\nClass description:\nImplement the AccountMoveLine class.\n\nMethod signatures and docstrings:\n- def default_get(self, fields): ' Set last name of journal line in case of a manual entry\n- def _onchange_amount_currency(self): Overwrite function : to set default credit and debit if exist and currecny amount is zero Recompute the debit/credit based on amount_currency/currency_id and date. However, date is a related field on account.move. Then, this onchange will not be triggered by the form view by changing the date on the account.move. To fix this problem, see _onchange_date method on account.move.", "prompted_full_text": "Implement the Python class `AccountMoveLine` described below.\n\nClass description:\nImplement the AccountMoveLine class.\n\nMethod signatures and docstrings:\n- def default_get(self, fields): ' Set last name of journal line in case of a manual entry\n- def _onchange_amount_currency(self): Overwrite function : to set default credit and debit if exist and currecny amount is zero Recompute the debit/credit based on amount_currency/currency_id and date. However, date is a related field on account.move. Then, this onchange will not be triggered by the form view by changing the date on the account.move. To fix this problem, see _onchange_date method on account.move.\n\n<|skeleton|>\nclass AccountMoveLine:\n\n def default_get(self, fields):\n \"\"\"' Set last name of journal line in case of a manual entry\"\"\"\n <|body_0|>\n\n def _onchange_amount_currency(self):\n \"\"\"Overwrite function : to set default credit and debit if exist and currecny amount is zero Recompute the debit/credit based on amount_currency/currency_id and date. However, date is a related field on account.move. Then, this onchange will not be triggered by the form view by changing the date on the account.move. To fix this problem, see _onchange_date method on account.move.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rec = super(AccountMoveLine, self).default_get(fields)\n if 'line_ids' not in self._context:\n return rec\n if self._context['line_ids']:\n dic = {}\n line = self._context['line_ids'][-1][2]\n if line:\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id']\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id']\n if dic:\n rec.update(dic)\n elif self._context['line_ids'][-1]:\n line = self.search_read([('id', '=', self._context['line_ids'][-1][1])], limit=1)[0]\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id'][0] if line['analytic_account_id'] else False\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id'][0] if line['partner_id'] else False\n if dic:\n rec.update(dic)\n return rec\n<|end_body_0|>\n\n<|body_start_1|>\n for line in self:\n amount = line.amount_currency\n if line.currency_id and line.currency_id != line.company_currency_id:\n amount = self.currency_id.with_context(date=line.date).compute(amount, line.company_currency_id)\n if amount > 0:\n line.debit = amount\n elif amount < 0:\n line.credit = -amount\n<|end_body_1|>\n", "revision_id": "f392c7f17c9a348b00fc9db2e460a8ba010b7748", "skeleton": "<|skeleton|>\nclass AccountMoveLine:\n\n def default_get(self, fields):\n \"\"\"' Set last name of journal line in case of a manual entry\"\"\"\n <|body_0|>\n\n def _onchange_amount_currency(self):\n \"\"\"Overwrite function : to set default credit and debit if exist and currecny amount is zero Recompute the debit/credit based on amount_currency/currency_id and date. However, date is a related field on account.move. Then, this onchange will not be triggered by the form view by changing the date on the account.move. To fix this problem, see _onchange_date method on account.move.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AccountMoveLine:\n def default_get(self, fields):\n \"\"\"' Set last name of journal line in case of a manual entry\"\"\"\n rec = super(AccountMoveLine, self).default_get(fields)\n if 'line_ids' not in self._context:\n return rec\n if self._context['line_ids']:\n dic = {}\n line = self._context['line_ids'][-1][2]\n if line:\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id']\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id']\n if dic:\n rec.update(dic)\n elif self._context['line_ids'][-1]:\n line = self.search_read([('id', '=', self._context['line_ids'][-1][1])], limit=1)[0]\n if 'name' in line:\n dic['name'] = line['name']\n if 'analytic_account_id' in line:\n dic['analytic_account_id'] = line['analytic_account_id'][0] if line['analytic_account_id'] else False\n if 'partner_id' in line:\n dic['partner_id'] = line['partner_id'][0] if line['partner_id'] else False\n if dic:\n rec.update(dic)\n return rec\n\n def _onchange_amount_currency(self):\n \"\"\"Overwrite function : to set default credit and debit if exist and currecny amount is zero Recompute the debit/credit based on amount_currency/currency_id and date. However, date is a related field on account.move. Then, this onchange will not be triggered by the form view by changing the date on the account.move. To fix this problem, see _onchange_date method on account.move.\"\"\"\n for line in self:\n amount = line.amount_currency\n if line.currency_id and line.currency_id != line.company_currency_id:\n amount = self.currency_id.with_context(date=line.date).compute(amount, line.company_currency_id)\n if amount > 0:\n line.debit = amount\n elif amount < 0:\n line.credit = -amount\n", "source": "the_stack_v2_python_sparse", "source_path": "hr-new_branch/auto_complete_journal_entry/model/account_move_line.py", "source_repo": "mfhm95/royalLine01052019", "split": "val", "star_events_count": 0}
{"blob_id": "0bd1808eecfc0c246b65e26d8ac90132f0da6860", "bodies": ["_property = mall_models.ProductModelProperty.objects.create(owner=request.manager, name='')\nresponse = create_response(200)\nresponse.data = _property.id\nreturn response.get_response()", "id = request.POST['id']\nfield = request.POST['field']\nif 'name' == field:\n name = request.POST['name']\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(name=name)\nelif 'type' == field:\n _type = request.POST.get('type')\n if _type and _type == 'text':\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_TEXT\n else:\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_IMAGE\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(type=_type)\nresponse = create_response(200)\nreturn response.get_response()", "property_id = request.POST['id']\nmodel_property = mall_models.ProductModelProperty.objects.get(owner=request.manager, id=property_id)\nsignals.pre_delete_product_model_property.send(sender=mall_models.ProductModelProperty, model_property=model_property, request=request)\nmall_models.ProductModelPropertyValue.objects.filter(property_id=property_id).update(is_deleted=True)\nmall_models.ProductModelProperty.objects.filter(id=property_id).update(is_deleted=True)\nresponse = create_response(200)\nreturn response.get_response()"], "bodies_text": "<|body_start_0|>\n _property = mall_models.ProductModelProperty.objects.create(owner=request.manager, name='')\n response = create_response(200)\n response.data = _property.id\n return response.get_response()\n<|end_body_0|>\n\n<|body_start_1|>\n id = request.POST['id']\n field = request.POST['field']\n if 'name' == field:\n name = request.POST['name']\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(name=name)\n elif 'type' == field:\n _type = request.POST.get('type')\n if _type and _type == 'text':\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_TEXT\n else:\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_IMAGE\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(type=_type)\n response = create_response(200)\n return response.get_response()\n<|end_body_1|>\n\n<|body_start_2|>\n property_id = request.POST['id']\n model_property = mall_models.ProductModelProperty.objects.get(owner=request.manager, id=property_id)\n signals.pre_delete_product_model_property.send(sender=mall_models.ProductModelProperty, model_property=model_property, request=request)\n mall_models.ProductModelPropertyValue.objects.filter(property_id=property_id).update(is_deleted=True)\n mall_models.ProductModelProperty.objects.filter(id=property_id).update(is_deleted=True)\n response = create_response(200)\n return response.get_response()\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ModelProperty", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModelProperty:\n\n def api_put(request):\n \"\"\"创建一个空的规格属性 Return json: data: %d\"\"\"\n <|body_0|>\n\n def api_post(request):\n \"\"\"更新规格属性. Args: id: 规格id filed: 指定更新规格的哪个属性: name or type name -> 新的规格名, type -> 新的规格类型. text或image\"\"\"\n <|body_1|>\n\n def api_delete(request):\n \"\"\"删除规格属性 Args: id: 规格id Note: 删除规格属性后,会send pre_delete_product_model_property signal, 处理由于规格变化引起的商品状态的变化.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _property = mall_models.ProductModelProperty.objects.create(owner=request.manager, name='')\n response = create_response(200)\n response.data = _property.id\n return response.get_response()\n<|end_body_0|>\n\n<|body_start_1|>\n id = request.POST['id']\n field = request.POST['field']\n if 'name' == field:\n name = request.POST['name']\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(name=name)\n elif 'type' == field:\n _type = request.POST.get('type')\n if _type and _type == 'text':\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_TEXT\n else:\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_IMAGE\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(type=_type)\n response = create_response(200)\n return response.get_response()\n<|end_body_1|>\n\n<|body_start_2|>\n property_id = request.POST['id']\n model_property = mall_models.ProductModelProperty.objects.get(owner=request.manager, id=property_id)\n signals.pre_delete_product_model_property.send(sender=mall_models.ProductModelProperty, model_property=model_property, request=request)\n mall_models.ProductModelPropertyValue.objects.filter(property_id=property_id).update(is_deleted=True)\n mall_models.ProductModelProperty.objects.filter(id=property_id).update(is_deleted=True)\n response = create_response(200)\n return response.get_response()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000271", "length_bytes": 9873, "license_type": "no_license", "methods": [{"docstring": "创建一个空的规格属性 Return json: data: %d", "name": "api_put", "signature": "def api_put(request)"}, {"docstring": "更新规格属性. Args: id: 规格id filed: 指定更新规格的哪个属性: name or type name -> 新的规格名, type -> 新的规格类型. text或image", "name": "api_post", "signature": "def api_post(request)"}, {"docstring": "删除规格属性 Args: id: 规格id Note: 删除规格属性后,会send pre_delete_product_model_property signal, 处理由于规格变化引起的商品状态的变化.", "name": "api_delete", "signature": "def api_delete(request)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_023519", "prompt": "Implement the Python class `ModelProperty` described below.\n\nClass description:\nImplement the ModelProperty class.\n\nMethod signatures and docstrings:\n- def api_put(request): 创建一个空的规格属性 Return json: data: %d\n- def api_post(request): 更新规格属性. Args: id: 规格id filed: 指定更新规格的哪个属性: name or type name -> 新的规格名, type -> 新的规格类型. text或image\n- def api_delete(request): 删除规格属性 Args: id: 规格id Note: 删除规格属性后,会send pre_delete_product_model_property signal, 处理由于规格变化引起的商品状态的变化.", "prompted_full_text": "Implement the Python class `ModelProperty` described below.\n\nClass description:\nImplement the ModelProperty class.\n\nMethod signatures and docstrings:\n- def api_put(request): 创建一个空的规格属性 Return json: data: %d\n- def api_post(request): 更新规格属性. Args: id: 规格id filed: 指定更新规格的哪个属性: name or type name -> 新的规格名, type -> 新的规格类型. text或image\n- def api_delete(request): 删除规格属性 Args: id: 规格id Note: 删除规格属性后,会send pre_delete_product_model_property signal, 处理由于规格变化引起的商品状态的变化.\n\n<|skeleton|>\nclass ModelProperty:\n\n def api_put(request):\n \"\"\"创建一个空的规格属性 Return json: data: %d\"\"\"\n <|body_0|>\n\n def api_post(request):\n \"\"\"更新规格属性. Args: id: 规格id filed: 指定更新规格的哪个属性: name or type name -> 新的规格名, type -> 新的规格类型. text或image\"\"\"\n <|body_1|>\n\n def api_delete(request):\n \"\"\"删除规格属性 Args: id: 规格id Note: 删除规格属性后,会send pre_delete_product_model_property signal, 处理由于规格变化引起的商品状态的变化.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _property = mall_models.ProductModelProperty.objects.create(owner=request.manager, name='')\n response = create_response(200)\n response.data = _property.id\n return response.get_response()\n<|end_body_0|>\n\n<|body_start_1|>\n id = request.POST['id']\n field = request.POST['field']\n if 'name' == field:\n name = request.POST['name']\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(name=name)\n elif 'type' == field:\n _type = request.POST.get('type')\n if _type and _type == 'text':\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_TEXT\n else:\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_IMAGE\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(type=_type)\n response = create_response(200)\n return response.get_response()\n<|end_body_1|>\n\n<|body_start_2|>\n property_id = request.POST['id']\n model_property = mall_models.ProductModelProperty.objects.get(owner=request.manager, id=property_id)\n signals.pre_delete_product_model_property.send(sender=mall_models.ProductModelProperty, model_property=model_property, request=request)\n mall_models.ProductModelPropertyValue.objects.filter(property_id=property_id).update(is_deleted=True)\n mall_models.ProductModelProperty.objects.filter(id=property_id).update(is_deleted=True)\n response = create_response(200)\n return response.get_response()\n<|end_body_2|>\n", "revision_id": "8b2f7befe92841bcc35e0e60cac5958ef3f3af54", "skeleton": "<|skeleton|>\nclass ModelProperty:\n\n def api_put(request):\n \"\"\"创建一个空的规格属性 Return json: data: %d\"\"\"\n <|body_0|>\n\n def api_post(request):\n \"\"\"更新规格属性. Args: id: 规格id filed: 指定更新规格的哪个属性: name or type name -> 新的规格名, type -> 新的规格类型. text或image\"\"\"\n <|body_1|>\n\n def api_delete(request):\n \"\"\"删除规格属性 Args: id: 规格id Note: 删除规格属性后,会send pre_delete_product_model_property signal, 处理由于规格变化引起的商品状态的变化.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ModelProperty:\n def api_put(request):\n \"\"\"创建一个空的规格属性 Return json: data: %d\"\"\"\n _property = mall_models.ProductModelProperty.objects.create(owner=request.manager, name='')\n response = create_response(200)\n response.data = _property.id\n return response.get_response()\n\n def api_post(request):\n \"\"\"更新规格属性. Args: id: 规格id filed: 指定更新规格的哪个属性: name or type name -> 新的规格名, type -> 新的规格类型. text或image\"\"\"\n id = request.POST['id']\n field = request.POST['field']\n if 'name' == field:\n name = request.POST['name']\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(name=name)\n elif 'type' == field:\n _type = request.POST.get('type')\n if _type and _type == 'text':\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_TEXT\n else:\n _type = mall_models.PRODUCT_MODEL_PROPERTY_TYPE_IMAGE\n mall_models.ProductModelProperty.objects.filter(owner=request.manager, id=id).update(type=_type)\n response = create_response(200)\n return response.get_response()\n\n def api_delete(request):\n \"\"\"删除规格属性 Args: id: 规格id Note: 删除规格属性后,会send pre_delete_product_model_property signal, 处理由于规格变化引起的商品状态的变化.\"\"\"\n property_id = request.POST['id']\n model_property = mall_models.ProductModelProperty.objects.get(owner=request.manager, id=property_id)\n signals.pre_delete_product_model_property.send(sender=mall_models.ProductModelProperty, model_property=model_property, request=request)\n mall_models.ProductModelPropertyValue.objects.filter(property_id=property_id).update(is_deleted=True)\n mall_models.ProductModelProperty.objects.filter(id=property_id).update(is_deleted=True)\n response = create_response(200)\n return response.get_response()\n", "source": "the_stack_v2_python_sparse", "source_path": "weapp/mall/product/model_property.py", "source_repo": "chengdg/weizoom", "split": "val", "star_events_count": 1}
{"blob_id": "f1ec4eeda520fa7c3ef2b4d6ed013a879cce534e", "bodies": ["super(MultiHeadAttnMlpModel, self).__init__()\nnum_dim = 500\nnum_seq = 100\nself.attn = MultiHeadAttention(num_head, num_dim, num_dim_k, num_dim_v, d_rate_attn)\nself.bn = nn.BatchNorm1d(num_dim)\nself.mlp = nn.Sequential()\nself.mlp.add_module('fc1', nn.Linear(num_seq * num_dim, num_dim))\nself.mlp.add_module('bn1', nn.BatchNorm1d(num_dim))\nself.mlp.add_module('act_fun1', nnActi.get_acti(act_func1))\nself.mlp.add_module('fc2', nn.Linear(num_dim, dim2))\nself.mlp.add_module('bn2', nn.BatchNorm1d(dim2))\nself.mlp.add_module('act_fun2', nnActi.get_acti(act_func2))\nself.mlp.add_module('fc3', nn.Linear(dim2, 1))", "data_in_chunks = torch.split(data_in, seq_len, dim=1)\ndata_in_sys = data_in_chunks[0]\ndata_in_ref = data_in_chunks[1]\ndata_attn, _ = self.attn(data_in_ref, data_in_sys, data_in_sys)\nbatch_size, num_q, num_dim = data_attn.size()\ndata_attn = data_attn.view(batch_size, -1)\nout = self.mlp(data_attn)\nreturn out"], "bodies_text": "<|body_start_0|>\n super(MultiHeadAttnMlpModel, self).__init__()\n num_dim = 500\n num_seq = 100\n self.attn = MultiHeadAttention(num_head, num_dim, num_dim_k, num_dim_v, d_rate_attn)\n self.bn = nn.BatchNorm1d(num_dim)\n self.mlp = nn.Sequential()\n self.mlp.add_module('fc1', nn.Linear(num_seq * num_dim, num_dim))\n self.mlp.add_module('bn1', nn.BatchNorm1d(num_dim))\n self.mlp.add_module('act_fun1', nnActi.get_acti(act_func1))\n self.mlp.add_module('fc2', nn.Linear(num_dim, dim2))\n self.mlp.add_module('bn2', nn.BatchNorm1d(dim2))\n self.mlp.add_module('act_fun2', nnActi.get_acti(act_func2))\n self.mlp.add_module('fc3', nn.Linear(dim2, 1))\n<|end_body_0|>\n\n<|body_start_1|>\n data_in_chunks = torch.split(data_in, seq_len, dim=1)\n data_in_sys = data_in_chunks[0]\n data_in_ref = data_in_chunks[1]\n data_attn, _ = self.attn(data_in_ref, data_in_sys, data_in_sys)\n batch_size, num_q, num_dim = data_attn.size()\n data_attn = data_attn.view(batch_size, -1)\n out = self.mlp(data_attn)\n return out\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MultiHeadAttnMlpModel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiHeadAttnMlpModel:\n\n def __init__(self, num_head=8, num_dim_k=64, num_dim_v=64, d_rate_attn=0.1, act_func1='LeakyReLU', dim2=100, act_func2='LeakyReLU'):\n \"\"\"num_head: for Attn, the number of head in MultiHeadAttention num_dim_k: for Attn, the number of dimension query and key will mapping to num_dim_v: for Attn, the number of dimension value will mapping to d_rate_attn: drop out rate for MultiHeadAttention\"\"\"\n <|body_0|>\n\n def forward(self, data_in):\n \"\"\"data_in: (batch, seq_len * 2, num_dim)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MultiHeadAttnMlpModel, self).__init__()\n num_dim = 500\n num_seq = 100\n self.attn = MultiHeadAttention(num_head, num_dim, num_dim_k, num_dim_v, d_rate_attn)\n self.bn = nn.BatchNorm1d(num_dim)\n self.mlp = nn.Sequential()\n self.mlp.add_module('fc1', nn.Linear(num_seq * num_dim, num_dim))\n self.mlp.add_module('bn1', nn.BatchNorm1d(num_dim))\n self.mlp.add_module('act_fun1', nnActi.get_acti(act_func1))\n self.mlp.add_module('fc2', nn.Linear(num_dim, dim2))\n self.mlp.add_module('bn2', nn.BatchNorm1d(dim2))\n self.mlp.add_module('act_fun2', nnActi.get_acti(act_func2))\n self.mlp.add_module('fc3', nn.Linear(dim2, 1))\n<|end_body_0|>\n\n<|body_start_1|>\n data_in_chunks = torch.split(data_in, seq_len, dim=1)\n data_in_sys = data_in_chunks[0]\n data_in_ref = data_in_chunks[1]\n data_attn, _ = self.attn(data_in_ref, data_in_sys, data_in_sys)\n batch_size, num_q, num_dim = data_attn.size()\n data_attn = data_attn.view(batch_size, -1)\n out = self.mlp(data_attn)\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000272", "length_bytes": 21586, "license_type": "no_license", "methods": [{"docstring": "num_head: for Attn, the number of head in MultiHeadAttention num_dim_k: for Attn, the number of dimension query and key will mapping to num_dim_v: for Attn, the number of dimension value will mapping to d_rate_attn: drop out rate for MultiHeadAttention", "name": "__init__", "signature": "def __init__(self, num_head=8, num_dim_k=64, num_dim_v=64, d_rate_attn=0.1, act_func1='LeakyReLU', dim2=100, act_func2='LeakyReLU')"}, {"docstring": "data_in: (batch, seq_len * 2, num_dim)", "name": "forward", "signature": "def forward(self, data_in)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_047301", "prompt": "Implement the Python class `MultiHeadAttnMlpModel` described below.\n\nClass description:\nImplement the MultiHeadAttnMlpModel class.\n\nMethod signatures and docstrings:\n- def __init__(self, num_head=8, num_dim_k=64, num_dim_v=64, d_rate_attn=0.1, act_func1='LeakyReLU', dim2=100, act_func2='LeakyReLU'): num_head: for Attn, the number of head in MultiHeadAttention num_dim_k: for Attn, the number of dimension query and key will mapping to num_dim_v: for Attn, the number of dimension value will mapping to d_rate_attn: drop out rate for MultiHeadAttention\n- def forward(self, data_in): data_in: (batch, seq_len * 2, num_dim)", "prompted_full_text": "Implement the Python class `MultiHeadAttnMlpModel` described below.\n\nClass description:\nImplement the MultiHeadAttnMlpModel class.\n\nMethod signatures and docstrings:\n- def __init__(self, num_head=8, num_dim_k=64, num_dim_v=64, d_rate_attn=0.1, act_func1='LeakyReLU', dim2=100, act_func2='LeakyReLU'): num_head: for Attn, the number of head in MultiHeadAttention num_dim_k: for Attn, the number of dimension query and key will mapping to num_dim_v: for Attn, the number of dimension value will mapping to d_rate_attn: drop out rate for MultiHeadAttention\n- def forward(self, data_in): data_in: (batch, seq_len * 2, num_dim)\n\n<|skeleton|>\nclass MultiHeadAttnMlpModel:\n\n def __init__(self, num_head=8, num_dim_k=64, num_dim_v=64, d_rate_attn=0.1, act_func1='LeakyReLU', dim2=100, act_func2='LeakyReLU'):\n \"\"\"num_head: for Attn, the number of head in MultiHeadAttention num_dim_k: for Attn, the number of dimension query and key will mapping to num_dim_v: for Attn, the number of dimension value will mapping to d_rate_attn: drop out rate for MultiHeadAttention\"\"\"\n <|body_0|>\n\n def forward(self, data_in):\n \"\"\"data_in: (batch, seq_len * 2, num_dim)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MultiHeadAttnMlpModel, self).__init__()\n num_dim = 500\n num_seq = 100\n self.attn = MultiHeadAttention(num_head, num_dim, num_dim_k, num_dim_v, d_rate_attn)\n self.bn = nn.BatchNorm1d(num_dim)\n self.mlp = nn.Sequential()\n self.mlp.add_module('fc1', nn.Linear(num_seq * num_dim, num_dim))\n self.mlp.add_module('bn1', nn.BatchNorm1d(num_dim))\n self.mlp.add_module('act_fun1', nnActi.get_acti(act_func1))\n self.mlp.add_module('fc2', nn.Linear(num_dim, dim2))\n self.mlp.add_module('bn2', nn.BatchNorm1d(dim2))\n self.mlp.add_module('act_fun2', nnActi.get_acti(act_func2))\n self.mlp.add_module('fc3', nn.Linear(dim2, 1))\n<|end_body_0|>\n\n<|body_start_1|>\n data_in_chunks = torch.split(data_in, seq_len, dim=1)\n data_in_sys = data_in_chunks[0]\n data_in_ref = data_in_chunks[1]\n data_attn, _ = self.attn(data_in_ref, data_in_sys, data_in_sys)\n batch_size, num_q, num_dim = data_attn.size()\n data_attn = data_attn.view(batch_size, -1)\n out = self.mlp(data_attn)\n return out\n<|end_body_1|>\n", "revision_id": "be85ee0c1fa915ae08ffb857643f9429a7749c0e", "skeleton": "<|skeleton|>\nclass MultiHeadAttnMlpModel:\n\n def __init__(self, num_head=8, num_dim_k=64, num_dim_v=64, d_rate_attn=0.1, act_func1='LeakyReLU', dim2=100, act_func2='LeakyReLU'):\n \"\"\"num_head: for Attn, the number of head in MultiHeadAttention num_dim_k: for Attn, the number of dimension query and key will mapping to num_dim_v: for Attn, the number of dimension value will mapping to d_rate_attn: drop out rate for MultiHeadAttention\"\"\"\n <|body_0|>\n\n def forward(self, data_in):\n \"\"\"data_in: (batch, seq_len * 2, num_dim)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MultiHeadAttnMlpModel:\n def __init__(self, num_head=8, num_dim_k=64, num_dim_v=64, d_rate_attn=0.1, act_func1='LeakyReLU', dim2=100, act_func2='LeakyReLU'):\n \"\"\"num_head: for Attn, the number of head in MultiHeadAttention num_dim_k: for Attn, the number of dimension query and key will mapping to num_dim_v: for Attn, the number of dimension value will mapping to d_rate_attn: drop out rate for MultiHeadAttention\"\"\"\n super(MultiHeadAttnMlpModel, self).__init__()\n num_dim = 500\n num_seq = 100\n self.attn = MultiHeadAttention(num_head, num_dim, num_dim_k, num_dim_v, d_rate_attn)\n self.bn = nn.BatchNorm1d(num_dim)\n self.mlp = nn.Sequential()\n self.mlp.add_module('fc1', nn.Linear(num_seq * num_dim, num_dim))\n self.mlp.add_module('bn1', nn.BatchNorm1d(num_dim))\n self.mlp.add_module('act_fun1', nnActi.get_acti(act_func1))\n self.mlp.add_module('fc2', nn.Linear(num_dim, dim2))\n self.mlp.add_module('bn2', nn.BatchNorm1d(dim2))\n self.mlp.add_module('act_fun2', nnActi.get_acti(act_func2))\n self.mlp.add_module('fc3', nn.Linear(dim2, 1))\n\n def forward(self, data_in):\n \"\"\"data_in: (batch, seq_len * 2, num_dim)\"\"\"\n data_in_chunks = torch.split(data_in, seq_len, dim=1)\n data_in_sys = data_in_chunks[0]\n data_in_ref = data_in_chunks[1]\n data_attn, _ = self.attn(data_in_ref, data_in_sys, data_in_sys)\n batch_size, num_q, num_dim = data_attn.size()\n data_attn = data_attn.view(batch_size, -1)\n out = self.mlp(data_attn)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "models/FullHiddenModel.py", "source_repo": "HuangYiran/MasterArbeit", "split": "val", "star_events_count": 1}
{"blob_id": "7761259dab72aad87d471ba08bf6310965a2fbd9", "bodies": ["context = super(HelpDetailView, self).get_context_data(**kwargs)\nobj = self.get_object()\nqueryset = self.get_queryset().filter(db_help_category=obj.db_help_category).order_by(Lower('db_key'))\ncontext['topic_list'] = queryset\nobjs = list(queryset)\nfor i, x in enumerate(objs):\n if obj is x:\n break\ntry:\n assert i + 1 <= len(objs) and objs[i + 1] is not obj\n context['topic_next'] = objs[i + 1]\nexcept:\n context['topic_next'] = None\ntry:\n assert i - 1 >= 0 and objs[i - 1] is not obj\n context['topic_previous'] = objs[i - 1]\nexcept:\n context['topic_previous'] = None\ntext = obj.db_entrytext\ntext = text.replace('\\r\\n\\r\\n', '\\n\\n')\ntext = text.replace('\\r\\n', '\\n')\ntext = text.replace('\\n', ' ')\ncontext['entry_text'] = text\nreturn context", "if not queryset:\n queryset = self.get_queryset()\ncategory = slugify(self.kwargs.get('category', ''))\ntopic = slugify(self.kwargs.get('topic', ''))\nobj = next((x for x in queryset if slugify(x.db_help_category) == category and slugify(x.db_key) == topic), None)\nif not obj:\n raise HttpResponseBadRequest('No %(verbose_name)s found matching the query' % {'verbose_name': queryset.model._meta.verbose_name})\nreturn obj"], "bodies_text": "<|body_start_0|>\n context = super(HelpDetailView, self).get_context_data(**kwargs)\n obj = self.get_object()\n queryset = self.get_queryset().filter(db_help_category=obj.db_help_category).order_by(Lower('db_key'))\n context['topic_list'] = queryset\n objs = list(queryset)\n for i, x in enumerate(objs):\n if obj is x:\n break\n try:\n assert i + 1 <= len(objs) and objs[i + 1] is not obj\n context['topic_next'] = objs[i + 1]\n except:\n context['topic_next'] = None\n try:\n assert i - 1 >= 0 and objs[i - 1] is not obj\n context['topic_previous'] = objs[i - 1]\n except:\n context['topic_previous'] = None\n text = obj.db_entrytext\n text = text.replace('\\r\\n\\r\\n', '\\n\\n')\n text = text.replace('\\r\\n', '\\n')\n text = text.replace('\\n', ' ')\n context['entry_text'] = text\n return context\n<|end_body_0|>\n\n<|body_start_1|>\n if not queryset:\n queryset = self.get_queryset()\n category = slugify(self.kwargs.get('category', ''))\n topic = slugify(self.kwargs.get('topic', ''))\n obj = next((x for x in queryset if slugify(x.db_help_category) == category and slugify(x.db_key) == topic), None)\n if not obj:\n raise HttpResponseBadRequest('No %(verbose_name)s found matching the query' % {'verbose_name': queryset.model._meta.verbose_name})\n return obj\n<|end_body_1|>\n", "class_docstring": "Returns the detail page for a given help entry.", "class_name": "HelpDetailView", "detected_licenses": ["BSD-3-Clause", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HelpDetailView:\n \"\"\"Returns the detail page for a given help entry.\"\"\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Adds navigational data to the template to let browsers go to the next or previous entry in the help list. Returns: context (dict): Django context object\"\"\"\n <|body_0|>\n\n def get_object(self, queryset=None):\n \"\"\"Override of Django hook that retrieves an object by category and topic instead of pk and slug. Returns: entry (HelpEntry): HelpEntry requested in the URL.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = super(HelpDetailView, self).get_context_data(**kwargs)\n obj = self.get_object()\n queryset = self.get_queryset().filter(db_help_category=obj.db_help_category).order_by(Lower('db_key'))\n context['topic_list'] = queryset\n objs = list(queryset)\n for i, x in enumerate(objs):\n if obj is x:\n break\n try:\n assert i + 1 <= len(objs) and objs[i + 1] is not obj\n context['topic_next'] = objs[i + 1]\n except:\n context['topic_next'] = None\n try:\n assert i - 1 >= 0 and objs[i - 1] is not obj\n context['topic_previous'] = objs[i - 1]\n except:\n context['topic_previous'] = None\n text = obj.db_entrytext\n text = text.replace('\\r\\n\\r\\n', '\\n\\n')\n text = text.replace('\\r\\n', '\\n')\n text = text.replace('\\n', ' ')\n context['entry_text'] = text\n return context\n<|end_body_0|>\n\n<|body_start_1|>\n if not queryset:\n queryset = self.get_queryset()\n category = slugify(self.kwargs.get('category', ''))\n topic = slugify(self.kwargs.get('topic', ''))\n obj = next((x for x in queryset if slugify(x.db_help_category) == category and slugify(x.db_key) == topic), None)\n if not obj:\n raise HttpResponseBadRequest('No %(verbose_name)s found matching the query' % {'verbose_name': queryset.model._meta.verbose_name})\n return obj\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000273", "length_bytes": 35922, "license_type": "permissive", "methods": [{"docstring": "Adds navigational data to the template to let browsers go to the next or previous entry in the help list. Returns: context (dict): Django context object", "name": "get_context_data", "signature": "def get_context_data(self, **kwargs)"}, {"docstring": "Override of Django hook that retrieves an object by category and topic instead of pk and slug. Returns: entry (HelpEntry): HelpEntry requested in the URL.", "name": "get_object", "signature": "def get_object(self, queryset=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_029816", "prompt": "Implement the Python class `HelpDetailView` described below.\n\nClass description:\nReturns the detail page for a given help entry.\n\nMethod signatures and docstrings:\n- def get_context_data(self, **kwargs): Adds navigational data to the template to let browsers go to the next or previous entry in the help list. Returns: context (dict): Django context object\n- def get_object(self, queryset=None): Override of Django hook that retrieves an object by category and topic instead of pk and slug. Returns: entry (HelpEntry): HelpEntry requested in the URL.", "prompted_full_text": "Implement the Python class `HelpDetailView` described below.\n\nClass description:\nReturns the detail page for a given help entry.\n\nMethod signatures and docstrings:\n- def get_context_data(self, **kwargs): Adds navigational data to the template to let browsers go to the next or previous entry in the help list. Returns: context (dict): Django context object\n- def get_object(self, queryset=None): Override of Django hook that retrieves an object by category and topic instead of pk and slug. Returns: entry (HelpEntry): HelpEntry requested in the URL.\n\n<|skeleton|>\nclass HelpDetailView:\n \"\"\"Returns the detail page for a given help entry.\"\"\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Adds navigational data to the template to let browsers go to the next or previous entry in the help list. Returns: context (dict): Django context object\"\"\"\n <|body_0|>\n\n def get_object(self, queryset=None):\n \"\"\"Override of Django hook that retrieves an object by category and topic instead of pk and slug. Returns: entry (HelpEntry): HelpEntry requested in the URL.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = super(HelpDetailView, self).get_context_data(**kwargs)\n obj = self.get_object()\n queryset = self.get_queryset().filter(db_help_category=obj.db_help_category).order_by(Lower('db_key'))\n context['topic_list'] = queryset\n objs = list(queryset)\n for i, x in enumerate(objs):\n if obj is x:\n break\n try:\n assert i + 1 <= len(objs) and objs[i + 1] is not obj\n context['topic_next'] = objs[i + 1]\n except:\n context['topic_next'] = None\n try:\n assert i - 1 >= 0 and objs[i - 1] is not obj\n context['topic_previous'] = objs[i - 1]\n except:\n context['topic_previous'] = None\n text = obj.db_entrytext\n text = text.replace('\\r\\n\\r\\n', '\\n\\n')\n text = text.replace('\\r\\n', '\\n')\n text = text.replace('\\n', ' ')\n context['entry_text'] = text\n return context\n<|end_body_0|>\n\n<|body_start_1|>\n if not queryset:\n queryset = self.get_queryset()\n category = slugify(self.kwargs.get('category', ''))\n topic = slugify(self.kwargs.get('topic', ''))\n obj = next((x for x in queryset if slugify(x.db_help_category) == category and slugify(x.db_key) == topic), None)\n if not obj:\n raise HttpResponseBadRequest('No %(verbose_name)s found matching the query' % {'verbose_name': queryset.model._meta.verbose_name})\n return obj\n<|end_body_1|>\n", "revision_id": "5e97df013399e1a401d0a7ec184c4b9eb3100edd", "skeleton": "<|skeleton|>\nclass HelpDetailView:\n \"\"\"Returns the detail page for a given help entry.\"\"\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Adds navigational data to the template to let browsers go to the next or previous entry in the help list. Returns: context (dict): Django context object\"\"\"\n <|body_0|>\n\n def get_object(self, queryset=None):\n \"\"\"Override of Django hook that retrieves an object by category and topic instead of pk and slug. Returns: entry (HelpEntry): HelpEntry requested in the URL.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HelpDetailView:\n \"\"\"Returns the detail page for a given help entry.\"\"\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Adds navigational data to the template to let browsers go to the next or previous entry in the help list. Returns: context (dict): Django context object\"\"\"\n context = super(HelpDetailView, self).get_context_data(**kwargs)\n obj = self.get_object()\n queryset = self.get_queryset().filter(db_help_category=obj.db_help_category).order_by(Lower('db_key'))\n context['topic_list'] = queryset\n objs = list(queryset)\n for i, x in enumerate(objs):\n if obj is x:\n break\n try:\n assert i + 1 <= len(objs) and objs[i + 1] is not obj\n context['topic_next'] = objs[i + 1]\n except:\n context['topic_next'] = None\n try:\n assert i - 1 >= 0 and objs[i - 1] is not obj\n context['topic_previous'] = objs[i - 1]\n except:\n context['topic_previous'] = None\n text = obj.db_entrytext\n text = text.replace('\\r\\n\\r\\n', '\\n\\n')\n text = text.replace('\\r\\n', '\\n')\n text = text.replace('\\n', ' ')\n context['entry_text'] = text\n return context\n\n def get_object(self, queryset=None):\n \"\"\"Override of Django hook that retrieves an object by category and topic instead of pk and slug. Returns: entry (HelpEntry): HelpEntry requested in the URL.\"\"\"\n if not queryset:\n queryset = self.get_queryset()\n category = slugify(self.kwargs.get('category', ''))\n topic = slugify(self.kwargs.get('topic', ''))\n obj = next((x for x in queryset if slugify(x.db_help_category) == category and slugify(x.db_key) == topic), None)\n if not obj:\n raise HttpResponseBadRequest('No %(verbose_name)s found matching the query' % {'verbose_name': queryset.model._meta.verbose_name})\n return obj\n", "source": "the_stack_v2_python_sparse", "source_path": "evennia-engine/evennia/evennia/web/website/views.py", "source_repo": "rajammanabrolu/WorldGeneration", "split": "val", "star_events_count": 69}
{"blob_id": "83224d20b07c4b152e8da0ac797dc4cadfc3404b", "bodies": ["version = Project(projectRoot.child('twisted')).getVersion()\nversionString = version.base()\nsourceURL = 'https://github.com/twisted/twisted/tree/twisted-%s' % (versionString,) + '/src'\napiBuilder = APIBuilder()\napiBuilder.build('Twisted', 'http://twistedmatrix.com/', sourceURL, projectRoot.child('twisted'), output)", "if len(args) != 2:\n sys.exit('Must specify two arguments: Twisted checkout and destination path')\nself.buildAPIDocs(FilePath(args[0]), FilePath(args[1]))"], "bodies_text": "<|body_start_0|>\n version = Project(projectRoot.child('twisted')).getVersion()\n versionString = version.base()\n sourceURL = 'https://github.com/twisted/twisted/tree/twisted-%s' % (versionString,) + '/src'\n apiBuilder = APIBuilder()\n apiBuilder.build('Twisted', 'http://twistedmatrix.com/', sourceURL, projectRoot.child('twisted'), output)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(args) != 2:\n sys.exit('Must specify two arguments: Twisted checkout and destination path')\n self.buildAPIDocs(FilePath(args[0]), FilePath(args[1]))\n<|end_body_1|>\n", "class_docstring": "A thing for building API documentation. See L{main}.", "class_name": "BuildAPIDocsScript", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BuildAPIDocsScript:\n \"\"\"A thing for building API documentation. See L{main}.\"\"\"\n\n def buildAPIDocs(self, projectRoot, output):\n \"\"\"Build the API documentation of Twisted, with our project policy. @param projectRoot: A L{FilePath} representing the root of the Twisted checkout. @param output: A L{FilePath} pointing to the desired output directory.\"\"\"\n <|body_0|>\n\n def main(self, args):\n \"\"\"Build API documentation. @type args: list of str @param args: The command line arguments to process. This must contain two strings: the path to the root of the Twisted checkout, and a path to an output directory.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n version = Project(projectRoot.child('twisted')).getVersion()\n versionString = version.base()\n sourceURL = 'https://github.com/twisted/twisted/tree/twisted-%s' % (versionString,) + '/src'\n apiBuilder = APIBuilder()\n apiBuilder.build('Twisted', 'http://twistedmatrix.com/', sourceURL, projectRoot.child('twisted'), output)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(args) != 2:\n sys.exit('Must specify two arguments: Twisted checkout and destination path')\n self.buildAPIDocs(FilePath(args[0]), FilePath(args[1]))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000274", "length_bytes": 19015, "license_type": "permissive", "methods": [{"docstring": "Build the API documentation of Twisted, with our project policy. @param projectRoot: A L{FilePath} representing the root of the Twisted checkout. @param output: A L{FilePath} pointing to the desired output directory.", "name": "buildAPIDocs", "signature": "def buildAPIDocs(self, projectRoot, output)"}, {"docstring": "Build API documentation. @type args: list of str @param args: The command line arguments to process. This must contain two strings: the path to the root of the Twisted checkout, and a path to an output directory.", "name": "main", "signature": "def main(self, args)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_038829", "prompt": "Implement the Python class `BuildAPIDocsScript` described below.\n\nClass description:\nA thing for building API documentation. See L{main}.\n\nMethod signatures and docstrings:\n- def buildAPIDocs(self, projectRoot, output): Build the API documentation of Twisted, with our project policy. @param projectRoot: A L{FilePath} representing the root of the Twisted checkout. @param output: A L{FilePath} pointing to the desired output directory.\n- def main(self, args): Build API documentation. @type args: list of str @param args: The command line arguments to process. This must contain two strings: the path to the root of the Twisted checkout, and a path to an output directory.", "prompted_full_text": "Implement the Python class `BuildAPIDocsScript` described below.\n\nClass description:\nA thing for building API documentation. See L{main}.\n\nMethod signatures and docstrings:\n- def buildAPIDocs(self, projectRoot, output): Build the API documentation of Twisted, with our project policy. @param projectRoot: A L{FilePath} representing the root of the Twisted checkout. @param output: A L{FilePath} pointing to the desired output directory.\n- def main(self, args): Build API documentation. @type args: list of str @param args: The command line arguments to process. This must contain two strings: the path to the root of the Twisted checkout, and a path to an output directory.\n\n<|skeleton|>\nclass BuildAPIDocsScript:\n \"\"\"A thing for building API documentation. See L{main}.\"\"\"\n\n def buildAPIDocs(self, projectRoot, output):\n \"\"\"Build the API documentation of Twisted, with our project policy. @param projectRoot: A L{FilePath} representing the root of the Twisted checkout. @param output: A L{FilePath} pointing to the desired output directory.\"\"\"\n <|body_0|>\n\n def main(self, args):\n \"\"\"Build API documentation. @type args: list of str @param args: The command line arguments to process. This must contain two strings: the path to the root of the Twisted checkout, and a path to an output directory.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n version = Project(projectRoot.child('twisted')).getVersion()\n versionString = version.base()\n sourceURL = 'https://github.com/twisted/twisted/tree/twisted-%s' % (versionString,) + '/src'\n apiBuilder = APIBuilder()\n apiBuilder.build('Twisted', 'http://twistedmatrix.com/', sourceURL, projectRoot.child('twisted'), output)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(args) != 2:\n sys.exit('Must specify two arguments: Twisted checkout and destination path')\n self.buildAPIDocs(FilePath(args[0]), FilePath(args[1]))\n<|end_body_1|>\n", "revision_id": "5cee0a8c4180a3108538b4e4ce945a18726595a6", "skeleton": "<|skeleton|>\nclass BuildAPIDocsScript:\n \"\"\"A thing for building API documentation. See L{main}.\"\"\"\n\n def buildAPIDocs(self, projectRoot, output):\n \"\"\"Build the API documentation of Twisted, with our project policy. @param projectRoot: A L{FilePath} representing the root of the Twisted checkout. @param output: A L{FilePath} pointing to the desired output directory.\"\"\"\n <|body_0|>\n\n def main(self, args):\n \"\"\"Build API documentation. @type args: list of str @param args: The command line arguments to process. This must contain two strings: the path to the root of the Twisted checkout, and a path to an output directory.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BuildAPIDocsScript:\n \"\"\"A thing for building API documentation. See L{main}.\"\"\"\n\n def buildAPIDocs(self, projectRoot, output):\n \"\"\"Build the API documentation of Twisted, with our project policy. @param projectRoot: A L{FilePath} representing the root of the Twisted checkout. @param output: A L{FilePath} pointing to the desired output directory.\"\"\"\n version = Project(projectRoot.child('twisted')).getVersion()\n versionString = version.base()\n sourceURL = 'https://github.com/twisted/twisted/tree/twisted-%s' % (versionString,) + '/src'\n apiBuilder = APIBuilder()\n apiBuilder.build('Twisted', 'http://twistedmatrix.com/', sourceURL, projectRoot.child('twisted'), output)\n\n def main(self, args):\n \"\"\"Build API documentation. @type args: list of str @param args: The command line arguments to process. This must contain two strings: the path to the root of the Twisted checkout, and a path to an output directory.\"\"\"\n if len(args) != 2:\n sys.exit('Must specify two arguments: Twisted checkout and destination path')\n self.buildAPIDocs(FilePath(args[0]), FilePath(args[1]))\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/Lib/site-packages/twisted/python/_release.py", "source_repo": "zoelesv/Smathchat", "split": "val", "star_events_count": 9}
{"blob_id": "0f6f3f57c41d38cb9d7ccbe427991bb6b656616b", "bodies": ["super(MapBox, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\nself.api_key = api_key\nself.domain = domain.strip('/')\nself.api = '%s://%s%s' % (self.scheme, self.domain, self.api_path)", "features = json['features']\nif features == []:\n return None\n\ndef parse_feature(feature):\n location = feature['place_name']\n place = feature['text']\n longitude = feature['geometry']['coordinates'][0]\n latitude = feature['geometry']['coordinates'][1]\n return Location(location, (latitude, longitude), place)\nif exactly_one:\n return parse_feature(features[0])\nelse:\n return [parse_feature(feature) for feature in features]", "params = {}\nparams['access_token'] = self.api_key\nquery = self.format_string % query\nif bbox:\n params['bbox'] = self._format_bounding_box(bbox, '%(lon1)s,%(lat1)s,%(lon2)s,%(lat2)s')\nif not country:\n country = []\nif isinstance(country, string_compare):\n country = [country]\nif country:\n params['country'] = ','.join(country)\nif proximity:\n p = Point(proximity)\n params['proximity'] = '%s,%s' % (p.longitude, p.latitude)\nquoted_query = quote(query.encode('utf-8'))\nurl = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\nlogger.debug('%s.geocode: %s', self.__class__.__name__, url)\nreturn self._parse_json(self._call_geocoder(url, timeout=timeout))", "params = {}\nparams['access_token'] = self.api_key\npoint = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\nquoted_query = quote(point.encode('utf-8'))\nurl = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\nlogger.debug('%s.reverse: %s', self.__class__.__name__, url)\nreturn self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)"], "bodies_text": "<|body_start_0|>\n super(MapBox, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\n self.api_key = api_key\n self.domain = domain.strip('/')\n self.api = '%s://%s%s' % (self.scheme, self.domain, self.api_path)\n<|end_body_0|>\n\n<|body_start_1|>\n features = json['features']\n if features == []:\n return None\n\n def parse_feature(feature):\n location = feature['place_name']\n place = feature['text']\n longitude = feature['geometry']['coordinates'][0]\n latitude = feature['geometry']['coordinates'][1]\n return Location(location, (latitude, longitude), place)\n if exactly_one:\n return parse_feature(features[0])\n else:\n return [parse_feature(feature) for feature in features]\n<|end_body_1|>\n\n<|body_start_2|>\n params = {}\n params['access_token'] = self.api_key\n query = self.format_string % query\n if bbox:\n params['bbox'] = self._format_bounding_box(bbox, '%(lon1)s,%(lat1)s,%(lon2)s,%(lat2)s')\n if not country:\n country = []\n if isinstance(country, string_compare):\n country = [country]\n if country:\n params['country'] = ','.join(country)\n if proximity:\n p = Point(proximity)\n params['proximity'] = '%s,%s' % (p.longitude, p.latitude)\n quoted_query = quote(query.encode('utf-8'))\n url = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\n logger.debug('%s.geocode: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout))\n<|end_body_2|>\n\n<|body_start_3|>\n params = {}\n params['access_token'] = self.api_key\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\n quoted_query = quote(point.encode('utf-8'))\n url = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\n logger.debug('%s.reverse: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_3|>\n", "class_docstring": "Geocoder using the Mapbox API. Documentation at: https://www.mapbox.com/api-documentation/ .. versionadded:: 1.17.0", "class_name": "MapBox", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MapBox:\n \"\"\"Geocoder using the Mapbox API. Documentation at: https://www.mapbox.com/api-documentation/ .. versionadded:: 1.17.0\"\"\"\n\n def __init__(self, api_key, format_string=None, scheme=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, ssl_context=DEFAULT_SENTINEL, domain='api.mapbox.com'):\n \"\"\":param str api_key: The API key required by Mapbox to perform geocoding requests. API keys are managed through Mapox's account page (https://www.mapbox.com/account/access-tokens). :param str format_string: See :attr:`geopy.geocoders.options.default_format_string`. :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. :type ssl_context: :class:`ssl.SSLContext` :param ssl_context: See :attr:`geopy.geocoders.options.default_ssl_context`. :param str domain:\"\"\"\n <|body_0|>\n\n def _parse_json(self, json, exactly_one=True):\n \"\"\"Returns location, (latitude, longitude) from json feed.\"\"\"\n <|body_1|>\n\n def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, proximity=None, country=None, bbox=None):\n \"\"\"Return a location point by address :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param proximity: A coordinate to bias local results based on a provided location. :type proximity: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param country: Country to filter result in form\"\"\"\n <|body_2|>\n\n def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):\n \"\"\"Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MapBox, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\n self.api_key = api_key\n self.domain = domain.strip('/')\n self.api = '%s://%s%s' % (self.scheme, self.domain, self.api_path)\n<|end_body_0|>\n\n<|body_start_1|>\n features = json['features']\n if features == []:\n return None\n\n def parse_feature(feature):\n location = feature['place_name']\n place = feature['text']\n longitude = feature['geometry']['coordinates'][0]\n latitude = feature['geometry']['coordinates'][1]\n return Location(location, (latitude, longitude), place)\n if exactly_one:\n return parse_feature(features[0])\n else:\n return [parse_feature(feature) for feature in features]\n<|end_body_1|>\n\n<|body_start_2|>\n params = {}\n params['access_token'] = self.api_key\n query = self.format_string % query\n if bbox:\n params['bbox'] = self._format_bounding_box(bbox, '%(lon1)s,%(lat1)s,%(lon2)s,%(lat2)s')\n if not country:\n country = []\n if isinstance(country, string_compare):\n country = [country]\n if country:\n params['country'] = ','.join(country)\n if proximity:\n p = Point(proximity)\n params['proximity'] = '%s,%s' % (p.longitude, p.latitude)\n quoted_query = quote(query.encode('utf-8'))\n url = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\n logger.debug('%s.geocode: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout))\n<|end_body_2|>\n\n<|body_start_3|>\n params = {}\n params['access_token'] = self.api_key\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\n quoted_query = quote(point.encode('utf-8'))\n url = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\n logger.debug('%s.reverse: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000275", "length_bytes": 6972, "license_type": "permissive", "methods": [{"docstring": ":param str api_key: The API key required by Mapbox to perform geocoding requests. API keys are managed through Mapox's account page (https://www.mapbox.com/account/access-tokens). :param str format_string: See :attr:`geopy.geocoders.options.default_format_string`. :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. :type ssl_context: :class:`ssl.SSLContext` :param ssl_context: See :attr:`geopy.geocoders.options.default_ssl_context`. :param str domain:", "name": "__init__", "signature": "def __init__(self, api_key, format_string=None, scheme=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, ssl_context=DEFAULT_SENTINEL, domain='api.mapbox.com')"}, {"docstring": "Returns location, (latitude, longitude) from json feed.", "name": "_parse_json", "signature": "def _parse_json(self, json, exactly_one=True)"}, {"docstring": "Return a location point by address :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param proximity: A coordinate to bias local results based on a provided location. :type proximity: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param country: Country to filter result in form", "name": "geocode", "signature": "def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, proximity=None, country=None, bbox=None)"}, {"docstring": "Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.", "name": "reverse", "signature": "def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_025443", "prompt": "Implement the Python class `MapBox` described below.\n\nClass description:\nGeocoder using the Mapbox API. Documentation at: https://www.mapbox.com/api-documentation/ .. versionadded:: 1.17.0\n\nMethod signatures and docstrings:\n- def __init__(self, api_key, format_string=None, scheme=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, ssl_context=DEFAULT_SENTINEL, domain='api.mapbox.com'): :param str api_key: The API key required by Mapbox to perform geocoding requests. API keys are managed through Mapox's account page (https://www.mapbox.com/account/access-tokens). :param str format_string: See :attr:`geopy.geocoders.options.default_format_string`. :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. :type ssl_context: :class:`ssl.SSLContext` :param ssl_context: See :attr:`geopy.geocoders.options.default_ssl_context`. :param str domain:\n- def _parse_json(self, json, exactly_one=True): Returns location, (latitude, longitude) from json feed.\n- def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, proximity=None, country=None, bbox=None): Return a location point by address :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param proximity: A coordinate to bias local results based on a provided location. :type proximity: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param country: Country to filter result in form\n- def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL): Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.", "prompted_full_text": "Implement the Python class `MapBox` described below.\n\nClass description:\nGeocoder using the Mapbox API. Documentation at: https://www.mapbox.com/api-documentation/ .. versionadded:: 1.17.0\n\nMethod signatures and docstrings:\n- def __init__(self, api_key, format_string=None, scheme=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, ssl_context=DEFAULT_SENTINEL, domain='api.mapbox.com'): :param str api_key: The API key required by Mapbox to perform geocoding requests. API keys are managed through Mapox's account page (https://www.mapbox.com/account/access-tokens). :param str format_string: See :attr:`geopy.geocoders.options.default_format_string`. :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. :type ssl_context: :class:`ssl.SSLContext` :param ssl_context: See :attr:`geopy.geocoders.options.default_ssl_context`. :param str domain:\n- def _parse_json(self, json, exactly_one=True): Returns location, (latitude, longitude) from json feed.\n- def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, proximity=None, country=None, bbox=None): Return a location point by address :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param proximity: A coordinate to bias local results based on a provided location. :type proximity: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param country: Country to filter result in form\n- def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL): Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\n\n<|skeleton|>\nclass MapBox:\n \"\"\"Geocoder using the Mapbox API. Documentation at: https://www.mapbox.com/api-documentation/ .. versionadded:: 1.17.0\"\"\"\n\n def __init__(self, api_key, format_string=None, scheme=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, ssl_context=DEFAULT_SENTINEL, domain='api.mapbox.com'):\n \"\"\":param str api_key: The API key required by Mapbox to perform geocoding requests. API keys are managed through Mapox's account page (https://www.mapbox.com/account/access-tokens). :param str format_string: See :attr:`geopy.geocoders.options.default_format_string`. :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. :type ssl_context: :class:`ssl.SSLContext` :param ssl_context: See :attr:`geopy.geocoders.options.default_ssl_context`. :param str domain:\"\"\"\n <|body_0|>\n\n def _parse_json(self, json, exactly_one=True):\n \"\"\"Returns location, (latitude, longitude) from json feed.\"\"\"\n <|body_1|>\n\n def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, proximity=None, country=None, bbox=None):\n \"\"\"Return a location point by address :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param proximity: A coordinate to bias local results based on a provided location. :type proximity: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param country: Country to filter result in form\"\"\"\n <|body_2|>\n\n def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):\n \"\"\"Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MapBox, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\n self.api_key = api_key\n self.domain = domain.strip('/')\n self.api = '%s://%s%s' % (self.scheme, self.domain, self.api_path)\n<|end_body_0|>\n\n<|body_start_1|>\n features = json['features']\n if features == []:\n return None\n\n def parse_feature(feature):\n location = feature['place_name']\n place = feature['text']\n longitude = feature['geometry']['coordinates'][0]\n latitude = feature['geometry']['coordinates'][1]\n return Location(location, (latitude, longitude), place)\n if exactly_one:\n return parse_feature(features[0])\n else:\n return [parse_feature(feature) for feature in features]\n<|end_body_1|>\n\n<|body_start_2|>\n params = {}\n params['access_token'] = self.api_key\n query = self.format_string % query\n if bbox:\n params['bbox'] = self._format_bounding_box(bbox, '%(lon1)s,%(lat1)s,%(lon2)s,%(lat2)s')\n if not country:\n country = []\n if isinstance(country, string_compare):\n country = [country]\n if country:\n params['country'] = ','.join(country)\n if proximity:\n p = Point(proximity)\n params['proximity'] = '%s,%s' % (p.longitude, p.latitude)\n quoted_query = quote(query.encode('utf-8'))\n url = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\n logger.debug('%s.geocode: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout))\n<|end_body_2|>\n\n<|body_start_3|>\n params = {}\n params['access_token'] = self.api_key\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\n quoted_query = quote(point.encode('utf-8'))\n url = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\n logger.debug('%s.reverse: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_3|>\n", "revision_id": "0c72430da633785fcb14e40d8b007c86081d515d", "skeleton": "<|skeleton|>\nclass MapBox:\n \"\"\"Geocoder using the Mapbox API. Documentation at: https://www.mapbox.com/api-documentation/ .. versionadded:: 1.17.0\"\"\"\n\n def __init__(self, api_key, format_string=None, scheme=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, ssl_context=DEFAULT_SENTINEL, domain='api.mapbox.com'):\n \"\"\":param str api_key: The API key required by Mapbox to perform geocoding requests. API keys are managed through Mapox's account page (https://www.mapbox.com/account/access-tokens). :param str format_string: See :attr:`geopy.geocoders.options.default_format_string`. :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. :type ssl_context: :class:`ssl.SSLContext` :param ssl_context: See :attr:`geopy.geocoders.options.default_ssl_context`. :param str domain:\"\"\"\n <|body_0|>\n\n def _parse_json(self, json, exactly_one=True):\n \"\"\"Returns location, (latitude, longitude) from json feed.\"\"\"\n <|body_1|>\n\n def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, proximity=None, country=None, bbox=None):\n \"\"\"Return a location point by address :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param proximity: A coordinate to bias local results based on a provided location. :type proximity: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param country: Country to filter result in form\"\"\"\n <|body_2|>\n\n def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):\n \"\"\"Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MapBox:\n \"\"\"Geocoder using the Mapbox API. Documentation at: https://www.mapbox.com/api-documentation/ .. versionadded:: 1.17.0\"\"\"\n\n def __init__(self, api_key, format_string=None, scheme=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, ssl_context=DEFAULT_SENTINEL, domain='api.mapbox.com'):\n \"\"\":param str api_key: The API key required by Mapbox to perform geocoding requests. API keys are managed through Mapox's account page (https://www.mapbox.com/account/access-tokens). :param str format_string: See :attr:`geopy.geocoders.options.default_format_string`. :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. :type ssl_context: :class:`ssl.SSLContext` :param ssl_context: See :attr:`geopy.geocoders.options.default_ssl_context`. :param str domain:\"\"\"\n super(MapBox, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\n self.api_key = api_key\n self.domain = domain.strip('/')\n self.api = '%s://%s%s' % (self.scheme, self.domain, self.api_path)\n\n def _parse_json(self, json, exactly_one=True):\n \"\"\"Returns location, (latitude, longitude) from json feed.\"\"\"\n features = json['features']\n if features == []:\n return None\n\n def parse_feature(feature):\n location = feature['place_name']\n place = feature['text']\n longitude = feature['geometry']['coordinates'][0]\n latitude = feature['geometry']['coordinates'][1]\n return Location(location, (latitude, longitude), place)\n if exactly_one:\n return parse_feature(features[0])\n else:\n return [parse_feature(feature) for feature in features]\n\n def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, proximity=None, country=None, bbox=None):\n \"\"\"Return a location point by address :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param proximity: A coordinate to bias local results based on a provided location. :type proximity: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param country: Country to filter result in form\"\"\"\n params = {}\n params['access_token'] = self.api_key\n query = self.format_string % query\n if bbox:\n params['bbox'] = self._format_bounding_box(bbox, '%(lon1)s,%(lat1)s,%(lon2)s,%(lat2)s')\n if not country:\n country = []\n if isinstance(country, string_compare):\n country = [country]\n if country:\n params['country'] = ','.join(country)\n if proximity:\n p = Point(proximity)\n params['proximity'] = '%s,%s' % (p.longitude, p.latitude)\n quoted_query = quote(query.encode('utf-8'))\n url = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\n logger.debug('%s.geocode: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout))\n\n def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):\n \"\"\"Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\"\"\"\n params = {}\n params['access_token'] = self.api_key\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\n quoted_query = quote(point.encode('utf-8'))\n url = '?'.join((self.api % dict(query=quoted_query), urlencode(params)))\n logger.debug('%s.reverse: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n", "source": "the_stack_v2_python_sparse", "source_path": "WatchDogs_Visualisation/mainVenv/lib/python3.7/site-packages/geopy/geocoders/mapbox.py", "source_repo": "prashanth-thipparthi/WatchDogs_StockMarketAnalysis", "split": "val", "star_events_count": 4}
{"blob_id": "b09225840e95dd663415a0021e3284243d999416", "bodies": ["self.copy_status = copy_status\nself.expiry_time_usecs = expiry_time_usecs\nself.message = message\nself.snapshot_target = snapshot_target", "if dictionary is None:\n return None\ncopy_status = dictionary.get('copyStatus')\nexpiry_time_usecs = dictionary.get('expiryTimeUsecs')\nmessage = dictionary.get('message')\nsnapshot_target = cohesity_management_sdk.models.snapshot_target_settings.SnapshotTargetSettings.from_dictionary(dictionary.get('snapshotTarget')) if dictionary.get('snapshotTarget') else None\nreturn cls(copy_status, expiry_time_usecs, message, snapshot_target)"], "bodies_text": "<|body_start_0|>\n self.copy_status = copy_status\n self.expiry_time_usecs = expiry_time_usecs\n self.message = message\n self.snapshot_target = snapshot_target\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n copy_status = dictionary.get('copyStatus')\n expiry_time_usecs = dictionary.get('expiryTimeUsecs')\n message = dictionary.get('message')\n snapshot_target = cohesity_management_sdk.models.snapshot_target_settings.SnapshotTargetSettings.from_dictionary(dictionary.get('snapshotTarget')) if dictionary.get('snapshotTarget') else None\n return cls(copy_status, expiry_time_usecs, message, snapshot_target)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'SnapshotCopyTask' model. Specifies information about copy tasks such as replication and archival tasks. Attributes: copy_status (string): Specifies the status of the copy task. expiry_time_usecs (long|int): Specifies when the Snapshot expires on the target. message (string): Specifies warning or error information when the copy task is not successful. snapshot_target (SnapshotTargetSettings): Specifies the target location where the Snapshot was copied to.", "class_name": "SnapshotCopyTask", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SnapshotCopyTask:\n \"\"\"Implementation of the 'SnapshotCopyTask' model. Specifies information about copy tasks such as replication and archival tasks. Attributes: copy_status (string): Specifies the status of the copy task. expiry_time_usecs (long|int): Specifies when the Snapshot expires on the target. message (string): Specifies warning or error information when the copy task is not successful. snapshot_target (SnapshotTargetSettings): Specifies the target location where the Snapshot was copied to.\"\"\"\n\n def __init__(self, copy_status=None, expiry_time_usecs=None, message=None, snapshot_target=None):\n \"\"\"Constructor for the SnapshotCopyTask class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.copy_status = copy_status\n self.expiry_time_usecs = expiry_time_usecs\n self.message = message\n self.snapshot_target = snapshot_target\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n copy_status = dictionary.get('copyStatus')\n expiry_time_usecs = dictionary.get('expiryTimeUsecs')\n message = dictionary.get('message')\n snapshot_target = cohesity_management_sdk.models.snapshot_target_settings.SnapshotTargetSettings.from_dictionary(dictionary.get('snapshotTarget')) if dictionary.get('snapshotTarget') else None\n return cls(copy_status, expiry_time_usecs, message, snapshot_target)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000276", "length_bytes": 2543, "license_type": "permissive", "methods": [{"docstring": "Constructor for the SnapshotCopyTask class", "name": "__init__", "signature": "def __init__(self, copy_status=None, expiry_time_usecs=None, message=None, snapshot_target=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013176", "prompt": "Implement the Python class `SnapshotCopyTask` described below.\n\nClass description:\nImplementation of the 'SnapshotCopyTask' model. Specifies information about copy tasks such as replication and archival tasks. Attributes: copy_status (string): Specifies the status of the copy task. expiry_time_usecs (long|int): Specifies when the Snapshot expires on the target. message (string): Specifies warning or error information when the copy task is not successful. snapshot_target (SnapshotTargetSettings): Specifies the target location where the Snapshot was copied to.\n\nMethod signatures and docstrings:\n- def __init__(self, copy_status=None, expiry_time_usecs=None, message=None, snapshot_target=None): Constructor for the SnapshotCopyTask class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `SnapshotCopyTask` described below.\n\nClass description:\nImplementation of the 'SnapshotCopyTask' model. Specifies information about copy tasks such as replication and archival tasks. Attributes: copy_status (string): Specifies the status of the copy task. expiry_time_usecs (long|int): Specifies when the Snapshot expires on the target. message (string): Specifies warning or error information when the copy task is not successful. snapshot_target (SnapshotTargetSettings): Specifies the target location where the Snapshot was copied to.\n\nMethod signatures and docstrings:\n- def __init__(self, copy_status=None, expiry_time_usecs=None, message=None, snapshot_target=None): Constructor for the SnapshotCopyTask class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass SnapshotCopyTask:\n \"\"\"Implementation of the 'SnapshotCopyTask' model. Specifies information about copy tasks such as replication and archival tasks. Attributes: copy_status (string): Specifies the status of the copy task. expiry_time_usecs (long|int): Specifies when the Snapshot expires on the target. message (string): Specifies warning or error information when the copy task is not successful. snapshot_target (SnapshotTargetSettings): Specifies the target location where the Snapshot was copied to.\"\"\"\n\n def __init__(self, copy_status=None, expiry_time_usecs=None, message=None, snapshot_target=None):\n \"\"\"Constructor for the SnapshotCopyTask class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.copy_status = copy_status\n self.expiry_time_usecs = expiry_time_usecs\n self.message = message\n self.snapshot_target = snapshot_target\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n copy_status = dictionary.get('copyStatus')\n expiry_time_usecs = dictionary.get('expiryTimeUsecs')\n message = dictionary.get('message')\n snapshot_target = cohesity_management_sdk.models.snapshot_target_settings.SnapshotTargetSettings.from_dictionary(dictionary.get('snapshotTarget')) if dictionary.get('snapshotTarget') else None\n return cls(copy_status, expiry_time_usecs, message, snapshot_target)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass SnapshotCopyTask:\n \"\"\"Implementation of the 'SnapshotCopyTask' model. Specifies information about copy tasks such as replication and archival tasks. Attributes: copy_status (string): Specifies the status of the copy task. expiry_time_usecs (long|int): Specifies when the Snapshot expires on the target. message (string): Specifies warning or error information when the copy task is not successful. snapshot_target (SnapshotTargetSettings): Specifies the target location where the Snapshot was copied to.\"\"\"\n\n def __init__(self, copy_status=None, expiry_time_usecs=None, message=None, snapshot_target=None):\n \"\"\"Constructor for the SnapshotCopyTask class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SnapshotCopyTask:\n \"\"\"Implementation of the 'SnapshotCopyTask' model. Specifies information about copy tasks such as replication and archival tasks. Attributes: copy_status (string): Specifies the status of the copy task. expiry_time_usecs (long|int): Specifies when the Snapshot expires on the target. message (string): Specifies warning or error information when the copy task is not successful. snapshot_target (SnapshotTargetSettings): Specifies the target location where the Snapshot was copied to.\"\"\"\n\n def __init__(self, copy_status=None, expiry_time_usecs=None, message=None, snapshot_target=None):\n \"\"\"Constructor for the SnapshotCopyTask class\"\"\"\n self.copy_status = copy_status\n self.expiry_time_usecs = expiry_time_usecs\n self.message = message\n self.snapshot_target = snapshot_target\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n copy_status = dictionary.get('copyStatus')\n expiry_time_usecs = dictionary.get('expiryTimeUsecs')\n message = dictionary.get('message')\n snapshot_target = cohesity_management_sdk.models.snapshot_target_settings.SnapshotTargetSettings.from_dictionary(dictionary.get('snapshotTarget')) if dictionary.get('snapshotTarget') else None\n return cls(copy_status, expiry_time_usecs, message, snapshot_target)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/snapshot_copy_task.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24}
{"blob_id": "7a37d7fd23e1ec59636847a95de0a238a699bbb4", "bodies": ["super(RestDataElement, self).__init__()\nself.Database = kwargs['Database']\nself.dbTable = kwargs['Table']\nself.Key = kwargs['Key']\nself.SingleElementTitle = kwargs['SingleElementTitle']\nself.DisplayFormat = kwargs['DisplayFormat']\nself.PutParser = kwargs['PutParser']\nself.has_parent = False\nif kwargs.has_key('ParentKey'):\n self.has_parent = True\n self.ParentKey = kwargs['ParentKey']\nself.has_child = False\nif kwargs.has_key('ChildParentKey'):\n self.has_child = True\n self.ChildParentKey = kwargs['ChildParentKey']\n self.dbChildTable = kwargs['ChildTable']", "Key = kwargs[self.Key]\nOneElement = self.dbTable.query.get_or_404(Key)\nif self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\nreturn (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)", "Key = kwargs[self.Key]\nOneElement = self.dbTable.query.get_or_404(Key)\nif self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\nargs = self.PutParser.parse_args()\nargdict = dict(args)\nIfUpdated = lambda x, y: y if x is None else x\nfor attribut in argdict.keys():\n setattr(OneElement, attribut, IfUpdated(getattr(args, attribut), getattr(OneElement, attribut)))\nself.Database.session.commit()\nreturn (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)", "if self.has_child:\n KeyValue = kwargs.get(self.Key)\n dbQuery = self.dbChildTable.query\n dbQuery = dbQuery.filter(getattr(self.dbChildTable, self.ChildParentKey) == KeyValue)\n AllChildElements = dbQuery.all()\n for Element in AllChildElements:\n db.session.delete(Element)\nKey = kwargs[self.Key]\nOneElement = self.dbTable.query.get_or_404(Key)\nif self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\ndb.session.delete(OneElement)\ndb.session.commit()\nreturn jsonify({'result': True})"], "bodies_text": "<|body_start_0|>\n super(RestDataElement, self).__init__()\n self.Database = kwargs['Database']\n self.dbTable = kwargs['Table']\n self.Key = kwargs['Key']\n self.SingleElementTitle = kwargs['SingleElementTitle']\n self.DisplayFormat = kwargs['DisplayFormat']\n self.PutParser = kwargs['PutParser']\n self.has_parent = False\n if kwargs.has_key('ParentKey'):\n self.has_parent = True\n self.ParentKey = kwargs['ParentKey']\n self.has_child = False\n if kwargs.has_key('ChildParentKey'):\n self.has_child = True\n self.ChildParentKey = kwargs['ChildParentKey']\n self.dbChildTable = kwargs['ChildTable']\n<|end_body_0|>\n\n<|body_start_1|>\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n return (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n args = self.PutParser.parse_args()\n argdict = dict(args)\n IfUpdated = lambda x, y: y if x is None else x\n for attribut in argdict.keys():\n setattr(OneElement, attribut, IfUpdated(getattr(args, attribut), getattr(OneElement, attribut)))\n self.Database.session.commit()\n return (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.has_child:\n KeyValue = kwargs.get(self.Key)\n dbQuery = self.dbChildTable.query\n dbQuery = dbQuery.filter(getattr(self.dbChildTable, self.ChildParentKey) == KeyValue)\n AllChildElements = dbQuery.all()\n for Element in AllChildElements:\n db.session.delete(Element)\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n db.session.delete(OneElement)\n db.session.commit()\n return jsonify({'result': True})\n<|end_body_3|>\n", "class_docstring": "Single element from RestDataCollection: Manage with REST a single element of a RestDataCollection : - GET : is allowing to display one given element of the collection - PUT : is allowing to modify one given element of the collection - DELETE : is allowing to delete one given element of the collection", "class_name": "RestDataElement", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RestDataElement:\n \"\"\"Single element from RestDataCollection: Manage with REST a single element of a RestDataCollection : - GET : is allowing to display one given element of the collection - PUT : is allowing to modify one given element of the collection - DELETE : is allowing to delete one given element of the collection\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"RestDataElement collection element constructor: - Database : SQL database which is containing table - Table : data persistence table - SingleElementTitle : JSON title to display for a single element display - DisplayFormat : JSON list of fields to be displayed when a GET is issued - PutParser : JSON list of fields to be parsed when a PUT is received - ParentKey (optional) : ParentKey name in Child object model - ChildTable : child data peristence table\"\"\"\n <|body_0|>\n\n def get(self, **kwargs):\n \"\"\"display a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_1|>\n\n def put(self, **kwargs):\n \"\"\"update a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_2|>\n\n def delete(self, **kwargs):\n \"\"\"remove a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RestDataElement, self).__init__()\n self.Database = kwargs['Database']\n self.dbTable = kwargs['Table']\n self.Key = kwargs['Key']\n self.SingleElementTitle = kwargs['SingleElementTitle']\n self.DisplayFormat = kwargs['DisplayFormat']\n self.PutParser = kwargs['PutParser']\n self.has_parent = False\n if kwargs.has_key('ParentKey'):\n self.has_parent = True\n self.ParentKey = kwargs['ParentKey']\n self.has_child = False\n if kwargs.has_key('ChildParentKey'):\n self.has_child = True\n self.ChildParentKey = kwargs['ChildParentKey']\n self.dbChildTable = kwargs['ChildTable']\n<|end_body_0|>\n\n<|body_start_1|>\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n return (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n args = self.PutParser.parse_args()\n argdict = dict(args)\n IfUpdated = lambda x, y: y if x is None else x\n for attribut in argdict.keys():\n setattr(OneElement, attribut, IfUpdated(getattr(args, attribut), getattr(OneElement, attribut)))\n self.Database.session.commit()\n return (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.has_child:\n KeyValue = kwargs.get(self.Key)\n dbQuery = self.dbChildTable.query\n dbQuery = dbQuery.filter(getattr(self.dbChildTable, self.ChildParentKey) == KeyValue)\n AllChildElements = dbQuery.all()\n for Element in AllChildElements:\n db.session.delete(Element)\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n db.session.delete(OneElement)\n db.session.commit()\n return jsonify({'result': True})\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000277", "length_bytes": 4414, "license_type": "no_license", "methods": [{"docstring": "RestDataElement collection element constructor: - Database : SQL database which is containing table - Table : data persistence table - SingleElementTitle : JSON title to display for a single element display - DisplayFormat : JSON list of fields to be displayed when a GET is issued - PutParser : JSON list of fields to be parsed when a PUT is received - ParentKey (optional) : ParentKey name in Child object model - ChildTable : child data peristence table", "name": "__init__", "signature": "def __init__(self, **kwargs)"}, {"docstring": "display a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL", "name": "get", "signature": "def get(self, **kwargs)"}, {"docstring": "update a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL", "name": "put", "signature": "def put(self, **kwargs)"}, {"docstring": "remove a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL", "name": "delete", "signature": "def delete(self, **kwargs)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_037093", "prompt": "Implement the Python class `RestDataElement` described below.\n\nClass description:\nSingle element from RestDataCollection: Manage with REST a single element of a RestDataCollection : - GET : is allowing to display one given element of the collection - PUT : is allowing to modify one given element of the collection - DELETE : is allowing to delete one given element of the collection\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): RestDataElement collection element constructor: - Database : SQL database which is containing table - Table : data persistence table - SingleElementTitle : JSON title to display for a single element display - DisplayFormat : JSON list of fields to be displayed when a GET is issued - PutParser : JSON list of fields to be parsed when a PUT is received - ParentKey (optional) : ParentKey name in Child object model - ChildTable : child data peristence table\n- def get(self, **kwargs): display a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\n- def put(self, **kwargs): update a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\n- def delete(self, **kwargs): remove a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL", "prompted_full_text": "Implement the Python class `RestDataElement` described below.\n\nClass description:\nSingle element from RestDataCollection: Manage with REST a single element of a RestDataCollection : - GET : is allowing to display one given element of the collection - PUT : is allowing to modify one given element of the collection - DELETE : is allowing to delete one given element of the collection\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): RestDataElement collection element constructor: - Database : SQL database which is containing table - Table : data persistence table - SingleElementTitle : JSON title to display for a single element display - DisplayFormat : JSON list of fields to be displayed when a GET is issued - PutParser : JSON list of fields to be parsed when a PUT is received - ParentKey (optional) : ParentKey name in Child object model - ChildTable : child data peristence table\n- def get(self, **kwargs): display a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\n- def put(self, **kwargs): update a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\n- def delete(self, **kwargs): remove a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\n\n<|skeleton|>\nclass RestDataElement:\n \"\"\"Single element from RestDataCollection: Manage with REST a single element of a RestDataCollection : - GET : is allowing to display one given element of the collection - PUT : is allowing to modify one given element of the collection - DELETE : is allowing to delete one given element of the collection\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"RestDataElement collection element constructor: - Database : SQL database which is containing table - Table : data persistence table - SingleElementTitle : JSON title to display for a single element display - DisplayFormat : JSON list of fields to be displayed when a GET is issued - PutParser : JSON list of fields to be parsed when a PUT is received - ParentKey (optional) : ParentKey name in Child object model - ChildTable : child data peristence table\"\"\"\n <|body_0|>\n\n def get(self, **kwargs):\n \"\"\"display a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_1|>\n\n def put(self, **kwargs):\n \"\"\"update a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_2|>\n\n def delete(self, **kwargs):\n \"\"\"remove a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RestDataElement, self).__init__()\n self.Database = kwargs['Database']\n self.dbTable = kwargs['Table']\n self.Key = kwargs['Key']\n self.SingleElementTitle = kwargs['SingleElementTitle']\n self.DisplayFormat = kwargs['DisplayFormat']\n self.PutParser = kwargs['PutParser']\n self.has_parent = False\n if kwargs.has_key('ParentKey'):\n self.has_parent = True\n self.ParentKey = kwargs['ParentKey']\n self.has_child = False\n if kwargs.has_key('ChildParentKey'):\n self.has_child = True\n self.ChildParentKey = kwargs['ChildParentKey']\n self.dbChildTable = kwargs['ChildTable']\n<|end_body_0|>\n\n<|body_start_1|>\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n return (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n args = self.PutParser.parse_args()\n argdict = dict(args)\n IfUpdated = lambda x, y: y if x is None else x\n for attribut in argdict.keys():\n setattr(OneElement, attribut, IfUpdated(getattr(args, attribut), getattr(OneElement, attribut)))\n self.Database.session.commit()\n return (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.has_child:\n KeyValue = kwargs.get(self.Key)\n dbQuery = self.dbChildTable.query\n dbQuery = dbQuery.filter(getattr(self.dbChildTable, self.ChildParentKey) == KeyValue)\n AllChildElements = dbQuery.all()\n for Element in AllChildElements:\n db.session.delete(Element)\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n db.session.delete(OneElement)\n db.session.commit()\n return jsonify({'result': True})\n<|end_body_3|>\n", "revision_id": "8f107644a74fe46827ec5ed53d0457022bd1608b", "skeleton": "<|skeleton|>\nclass RestDataElement:\n \"\"\"Single element from RestDataCollection: Manage with REST a single element of a RestDataCollection : - GET : is allowing to display one given element of the collection - PUT : is allowing to modify one given element of the collection - DELETE : is allowing to delete one given element of the collection\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"RestDataElement collection element constructor: - Database : SQL database which is containing table - Table : data persistence table - SingleElementTitle : JSON title to display for a single element display - DisplayFormat : JSON list of fields to be displayed when a GET is issued - PutParser : JSON list of fields to be parsed when a PUT is received - ParentKey (optional) : ParentKey name in Child object model - ChildTable : child data peristence table\"\"\"\n <|body_0|>\n\n def get(self, **kwargs):\n \"\"\"display a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_1|>\n\n def put(self, **kwargs):\n \"\"\"update a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_2|>\n\n def delete(self, **kwargs):\n \"\"\"remove a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RestDataElement:\n \"\"\"Single element from RestDataCollection: Manage with REST a single element of a RestDataCollection : - GET : is allowing to display one given element of the collection - PUT : is allowing to modify one given element of the collection - DELETE : is allowing to delete one given element of the collection\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"RestDataElement collection element constructor: - Database : SQL database which is containing table - Table : data persistence table - SingleElementTitle : JSON title to display for a single element display - DisplayFormat : JSON list of fields to be displayed when a GET is issued - PutParser : JSON list of fields to be parsed when a PUT is received - ParentKey (optional) : ParentKey name in Child object model - ChildTable : child data peristence table\"\"\"\n super(RestDataElement, self).__init__()\n self.Database = kwargs['Database']\n self.dbTable = kwargs['Table']\n self.Key = kwargs['Key']\n self.SingleElementTitle = kwargs['SingleElementTitle']\n self.DisplayFormat = kwargs['DisplayFormat']\n self.PutParser = kwargs['PutParser']\n self.has_parent = False\n if kwargs.has_key('ParentKey'):\n self.has_parent = True\n self.ParentKey = kwargs['ParentKey']\n self.has_child = False\n if kwargs.has_key('ChildParentKey'):\n self.has_child = True\n self.ChildParentKey = kwargs['ChildParentKey']\n self.dbChildTable = kwargs['ChildTable']\n\n def get(self, **kwargs):\n \"\"\"display a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n return (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)\n\n def put(self, **kwargs):\n \"\"\"update a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n args = self.PutParser.parse_args()\n argdict = dict(args)\n IfUpdated = lambda x, y: y if x is None else x\n for attribut in argdict.keys():\n setattr(OneElement, attribut, IfUpdated(getattr(args, attribut), getattr(OneElement, attribut)))\n self.Database.session.commit()\n return (marshal(OneElement, self.DisplayFormat, self.SingleElementTitle), 200)\n\n def delete(self, **kwargs):\n \"\"\"remove a single element. self.Key is the element key provided into the api URL. self.ParentKey (optional) : is the foreign key of parent node provided into the api URL\"\"\"\n if self.has_child:\n KeyValue = kwargs.get(self.Key)\n dbQuery = self.dbChildTable.query\n dbQuery = dbQuery.filter(getattr(self.dbChildTable, self.ChildParentKey) == KeyValue)\n AllChildElements = dbQuery.all()\n for Element in AllChildElements:\n db.session.delete(Element)\n Key = kwargs[self.Key]\n OneElement = self.dbTable.query.get_or_404(Key)\n if self.has_parent:\n ParentKeyValue = kwargs.get(self.ParentKey)\n if getattr(OneElement, self.ParentKey) != ParentKeyValue:\n abort(404)\n db.session.delete(OneElement)\n db.session.commit()\n return jsonify({'result': True})\n", "source": "the_stack_v2_python_sparse", "source_path": "restapp/view_RestDataElement.py", "source_repo": "ldurandadomia/Flask-Restful", "split": "val", "star_events_count": 0}
{"blob_id": "b3b623242d3fa5ccc9332c1d2d6a731ab593bc8a", "bodies": ["length = len(height)\nmax_water = 0\nfor i in range(length - 1):\n for j in range(i + 1, length, 1):\n max_water = max(max_water, min(height[i], height[j]) * (j - i))\nreturn max_water", "a = 0\nb = len(height) - 1\nmax_water = 0\nwhile a != b:\n max_water = max(max_water, (b - a) * min(height[a], height[b]))\n if height[a] < height[b]:\n a += 1\n else:\n b -= 1\nreturn max_water"], "bodies_text": "<|body_start_0|>\n length = len(height)\n max_water = 0\n for i in range(length - 1):\n for j in range(i + 1, length, 1):\n max_water = max(max_water, min(height[i], height[j]) * (j - i))\n return max_water\n<|end_body_0|>\n\n<|body_start_1|>\n a = 0\n b = len(height) - 1\n max_water = 0\n while a != b:\n max_water = max(max_water, (b - a) * min(height[a], height[b]))\n if height[a] < height[b]:\n a += 1\n else:\n b -= 1\n return max_water\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxArea1(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(height)\n max_water = 0\n for i in range(length - 1):\n for j in range(i + 1, length, 1):\n max_water = max(max_water, min(height[i], height[j]) * (j - i))\n return max_water\n<|end_body_0|>\n\n<|body_start_1|>\n a = 0\n b = len(height) - 1\n max_water = 0\n while a != b:\n max_water = max(max_water, (b - a) * min(height[a], height[b]))\n if height[a] < height[b]:\n a += 1\n else:\n b -= 1\n return max_water\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000278", "length_bytes": 958, "license_type": "no_license", "methods": [{"docstring": ":type height: List[int] :rtype: int", "name": "maxArea", "signature": "def maxArea(self, height)"}, {"docstring": ":type height: List[int] :rtype: int", "name": "maxArea1", "signature": "def maxArea1(self, height)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxArea(self, height): :type height: List[int] :rtype: int\n- def maxArea1(self, height): :type height: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxArea(self, height): :type height: List[int] :rtype: int\n- def maxArea1(self, height): :type height: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxArea1(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(height)\n max_water = 0\n for i in range(length - 1):\n for j in range(i + 1, length, 1):\n max_water = max(max_water, min(height[i], height[j]) * (j - i))\n return max_water\n<|end_body_0|>\n\n<|body_start_1|>\n a = 0\n b = len(height) - 1\n max_water = 0\n while a != b:\n max_water = max(max_water, (b - a) * min(height[a], height[b]))\n if height[a] < height[b]:\n a += 1\n else:\n b -= 1\n return max_water\n<|end_body_1|>\n", "revision_id": "d4a33dc28a6d3f99d5179fdb6a83b2ab8c5a0beb", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxArea1(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n length = len(height)\n max_water = 0\n for i in range(length - 1):\n for j in range(i + 1, length, 1):\n max_water = max(max_water, min(height[i], height[j]) * (j - i))\n return max_water\n\n def maxArea1(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n a = 0\n b = len(height) - 1\n max_water = 0\n while a != b:\n max_water = max(max_water, (b - a) * min(height[a], height[b]))\n if height[a] < height[b]:\n a += 1\n else:\n b -= 1\n return max_water\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/11_most_water.py", "source_repo": "294150302hxq/python_learn", "split": "val", "star_events_count": 0}
{"blob_id": "5d14637d9c9369c49456768c1889dbf02efc81e7", "bodies": ["db_client = None\ntry:\n db_client = self.mongo_connection_factory.get_connection()\n client_json = self.client_to_json(client)\n db_client['bob']['clients'].insert_one(client_json)\nexcept PyMongoError as error:\n print('Error while inserting a client.', error)\nfinally:\n db_client.close()", "db_client = None\ntry:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'token': client.token}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\nexcept PyMongoError as error:\n print('Error while finding a client.', error)\nfinally:\n db_client.close()", "db_client = None\ntry:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'password': client.password}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\nexcept PyMongoError as error:\n print('Error while finding a client.', error)\nfinally:\n db_client.close()", "db_client = None\ntry:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id}\n new_values = {'$set': {'token': client.token, 'token_date': client.token_date}}\n db_client['bob']['clients'].update_one(query, new_values)\nexcept PyMongoError as error:\n print('Error while updating a client.', error)\nfinally:\n db_client.close()", "client_json = {}\nif client:\n if client.device_id:\n client_json['device_id'] = client.device_id\n if client.user_id:\n client_json['user_id'] = client.user_id\n if client.token:\n client_json['token'] = client.token\n if client.token_date:\n client_json['token_date'] = client.token_date\n if client.panic_password:\n client_json['panic_password'] = client.panic_password\n if client.account_id:\n client_json['account_id'] = client.account_id\n if client.password:\n client_json['password'] = client.password\nreturn client_json", "client = Client()\nif 'device_id' in client_json:\n client.device_id = client_json['device_id']\nif 'user_id' in client_json:\n client.user_id = client_json['user_id']\nif 'token' in client_json:\n client.token = client_json['token']\nif 'token_date' in client_json:\n client.token_date = client_json['token_date']\nif 'panic_password' in client_json:\n client.panic_password = client_json['panic_password']\nif 'account_id' in client_json:\n client.account_id = client_json['account_id']\nif 'password' in client_json:\n client.password = client_json['password']\nreturn client"], "bodies_text": "<|body_start_0|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n client_json = self.client_to_json(client)\n db_client['bob']['clients'].insert_one(client_json)\n except PyMongoError as error:\n print('Error while inserting a client.', error)\n finally:\n db_client.close()\n<|end_body_0|>\n\n<|body_start_1|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'token': client.token}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\n except PyMongoError as error:\n print('Error while finding a client.', error)\n finally:\n db_client.close()\n<|end_body_1|>\n\n<|body_start_2|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'password': client.password}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\n except PyMongoError as error:\n print('Error while finding a client.', error)\n finally:\n db_client.close()\n<|end_body_2|>\n\n<|body_start_3|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id}\n new_values = {'$set': {'token': client.token, 'token_date': client.token_date}}\n db_client['bob']['clients'].update_one(query, new_values)\n except PyMongoError as error:\n print('Error while updating a client.', error)\n finally:\n db_client.close()\n<|end_body_3|>\n\n<|body_start_4|>\n client_json = {}\n if client:\n if client.device_id:\n client_json['device_id'] = client.device_id\n if client.user_id:\n client_json['user_id'] = client.user_id\n if client.token:\n client_json['token'] = client.token\n if client.token_date:\n client_json['token_date'] = client.token_date\n if client.panic_password:\n client_json['panic_password'] = client.panic_password\n if client.account_id:\n client_json['account_id'] = client.account_id\n if client.password:\n client_json['password'] = client.password\n return client_json\n<|end_body_4|>\n\n<|body_start_5|>\n client = Client()\n if 'device_id' in client_json:\n client.device_id = client_json['device_id']\n if 'user_id' in client_json:\n client.user_id = client_json['user_id']\n if 'token' in client_json:\n client.token = client_json['token']\n if 'token_date' in client_json:\n client.token_date = client_json['token_date']\n if 'panic_password' in client_json:\n client.panic_password = client_json['panic_password']\n if 'account_id' in client_json:\n client.account_id = client_json['account_id']\n if 'password' in client_json:\n client.password = client_json['password']\n return client\n<|end_body_5|>\n", "class_docstring": "", "class_name": "Client", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Client:\n\n def insert_client(self, client):\n \"\"\"This method inserts a client on the database. :param client: :return: None\"\"\"\n <|body_0|>\n\n def find_client_by_user_device_id(self, client):\n \"\"\"This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\"\"\"\n <|body_1|>\n\n def find_client_login(self, client):\n \"\"\"This method has to find a client using user_id and device_id :param client: user_id and device_id must be on the object :return: Client object\"\"\"\n <|body_2|>\n\n def update_client(self, client):\n \"\"\"This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\"\"\"\n <|body_3|>\n\n def client_to_json(self, client):\n \"\"\"The method is responsible to convert the class object to JSON :param client: client object :return: client_json\"\"\"\n <|body_4|>\n\n def json_to_client(self, client_json):\n \"\"\"The method is responsible to convert the JSON to client object :param client_json: client into JSON mode :return: client object\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n client_json = self.client_to_json(client)\n db_client['bob']['clients'].insert_one(client_json)\n except PyMongoError as error:\n print('Error while inserting a client.', error)\n finally:\n db_client.close()\n<|end_body_0|>\n\n<|body_start_1|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'token': client.token}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\n except PyMongoError as error:\n print('Error while finding a client.', error)\n finally:\n db_client.close()\n<|end_body_1|>\n\n<|body_start_2|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'password': client.password}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\n except PyMongoError as error:\n print('Error while finding a client.', error)\n finally:\n db_client.close()\n<|end_body_2|>\n\n<|body_start_3|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id}\n new_values = {'$set': {'token': client.token, 'token_date': client.token_date}}\n db_client['bob']['clients'].update_one(query, new_values)\n except PyMongoError as error:\n print('Error while updating a client.', error)\n finally:\n db_client.close()\n<|end_body_3|>\n\n<|body_start_4|>\n client_json = {}\n if client:\n if client.device_id:\n client_json['device_id'] = client.device_id\n if client.user_id:\n client_json['user_id'] = client.user_id\n if client.token:\n client_json['token'] = client.token\n if client.token_date:\n client_json['token_date'] = client.token_date\n if client.panic_password:\n client_json['panic_password'] = client.panic_password\n if client.account_id:\n client_json['account_id'] = client.account_id\n if client.password:\n client_json['password'] = client.password\n return client_json\n<|end_body_4|>\n\n<|body_start_5|>\n client = Client()\n if 'device_id' in client_json:\n client.device_id = client_json['device_id']\n if 'user_id' in client_json:\n client.user_id = client_json['user_id']\n if 'token' in client_json:\n client.token = client_json['token']\n if 'token_date' in client_json:\n client.token_date = client_json['token_date']\n if 'panic_password' in client_json:\n client.panic_password = client_json['panic_password']\n if 'account_id' in client_json:\n client.account_id = client_json['account_id']\n if 'password' in client_json:\n client.password = client_json['password']\n return client\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000279", "length_bytes": 5669, "license_type": "no_license", "methods": [{"docstring": "This method inserts a client on the database. :param client: :return: None", "name": "insert_client", "signature": "def insert_client(self, client)"}, {"docstring": "This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object", "name": "find_client_by_user_device_id", "signature": "def find_client_by_user_device_id(self, client)"}, {"docstring": "This method has to find a client using user_id and device_id :param client: user_id and device_id must be on the object :return: Client object", "name": "find_client_login", "signature": "def find_client_login(self, client)"}, {"docstring": "This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object", "name": "update_client", "signature": "def update_client(self, client)"}, {"docstring": "The method is responsible to convert the class object to JSON :param client: client object :return: client_json", "name": "client_to_json", "signature": "def client_to_json(self, client)"}, {"docstring": "The method is responsible to convert the JSON to client object :param client_json: client into JSON mode :return: client object", "name": "json_to_client", "signature": "def json_to_client(self, client_json)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_036241", "prompt": "Implement the Python class `Client` described below.\n\nClass description:\nImplement the Client class.\n\nMethod signatures and docstrings:\n- def insert_client(self, client): This method inserts a client on the database. :param client: :return: None\n- def find_client_by_user_device_id(self, client): This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\n- def find_client_login(self, client): This method has to find a client using user_id and device_id :param client: user_id and device_id must be on the object :return: Client object\n- def update_client(self, client): This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\n- def client_to_json(self, client): The method is responsible to convert the class object to JSON :param client: client object :return: client_json\n- def json_to_client(self, client_json): The method is responsible to convert the JSON to client object :param client_json: client into JSON mode :return: client object", "prompted_full_text": "Implement the Python class `Client` described below.\n\nClass description:\nImplement the Client class.\n\nMethod signatures and docstrings:\n- def insert_client(self, client): This method inserts a client on the database. :param client: :return: None\n- def find_client_by_user_device_id(self, client): This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\n- def find_client_login(self, client): This method has to find a client using user_id and device_id :param client: user_id and device_id must be on the object :return: Client object\n- def update_client(self, client): This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\n- def client_to_json(self, client): The method is responsible to convert the class object to JSON :param client: client object :return: client_json\n- def json_to_client(self, client_json): The method is responsible to convert the JSON to client object :param client_json: client into JSON mode :return: client object\n\n<|skeleton|>\nclass Client:\n\n def insert_client(self, client):\n \"\"\"This method inserts a client on the database. :param client: :return: None\"\"\"\n <|body_0|>\n\n def find_client_by_user_device_id(self, client):\n \"\"\"This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\"\"\"\n <|body_1|>\n\n def find_client_login(self, client):\n \"\"\"This method has to find a client using user_id and device_id :param client: user_id and device_id must be on the object :return: Client object\"\"\"\n <|body_2|>\n\n def update_client(self, client):\n \"\"\"This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\"\"\"\n <|body_3|>\n\n def client_to_json(self, client):\n \"\"\"The method is responsible to convert the class object to JSON :param client: client object :return: client_json\"\"\"\n <|body_4|>\n\n def json_to_client(self, client_json):\n \"\"\"The method is responsible to convert the JSON to client object :param client_json: client into JSON mode :return: client object\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n client_json = self.client_to_json(client)\n db_client['bob']['clients'].insert_one(client_json)\n except PyMongoError as error:\n print('Error while inserting a client.', error)\n finally:\n db_client.close()\n<|end_body_0|>\n\n<|body_start_1|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'token': client.token}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\n except PyMongoError as error:\n print('Error while finding a client.', error)\n finally:\n db_client.close()\n<|end_body_1|>\n\n<|body_start_2|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'password': client.password}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\n except PyMongoError as error:\n print('Error while finding a client.', error)\n finally:\n db_client.close()\n<|end_body_2|>\n\n<|body_start_3|>\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id}\n new_values = {'$set': {'token': client.token, 'token_date': client.token_date}}\n db_client['bob']['clients'].update_one(query, new_values)\n except PyMongoError as error:\n print('Error while updating a client.', error)\n finally:\n db_client.close()\n<|end_body_3|>\n\n<|body_start_4|>\n client_json = {}\n if client:\n if client.device_id:\n client_json['device_id'] = client.device_id\n if client.user_id:\n client_json['user_id'] = client.user_id\n if client.token:\n client_json['token'] = client.token\n if client.token_date:\n client_json['token_date'] = client.token_date\n if client.panic_password:\n client_json['panic_password'] = client.panic_password\n if client.account_id:\n client_json['account_id'] = client.account_id\n if client.password:\n client_json['password'] = client.password\n return client_json\n<|end_body_4|>\n\n<|body_start_5|>\n client = Client()\n if 'device_id' in client_json:\n client.device_id = client_json['device_id']\n if 'user_id' in client_json:\n client.user_id = client_json['user_id']\n if 'token' in client_json:\n client.token = client_json['token']\n if 'token_date' in client_json:\n client.token_date = client_json['token_date']\n if 'panic_password' in client_json:\n client.panic_password = client_json['panic_password']\n if 'account_id' in client_json:\n client.account_id = client_json['account_id']\n if 'password' in client_json:\n client.password = client_json['password']\n return client\n<|end_body_5|>\n", "revision_id": "b98ddf6443418bad5fc9057b5abe42134943e8f9", "skeleton": "<|skeleton|>\nclass Client:\n\n def insert_client(self, client):\n \"\"\"This method inserts a client on the database. :param client: :return: None\"\"\"\n <|body_0|>\n\n def find_client_by_user_device_id(self, client):\n \"\"\"This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\"\"\"\n <|body_1|>\n\n def find_client_login(self, client):\n \"\"\"This method has to find a client using user_id and device_id :param client: user_id and device_id must be on the object :return: Client object\"\"\"\n <|body_2|>\n\n def update_client(self, client):\n \"\"\"This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\"\"\"\n <|body_3|>\n\n def client_to_json(self, client):\n \"\"\"The method is responsible to convert the class object to JSON :param client: client object :return: client_json\"\"\"\n <|body_4|>\n\n def json_to_client(self, client_json):\n \"\"\"The method is responsible to convert the JSON to client object :param client_json: client into JSON mode :return: client object\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Client:\n def insert_client(self, client):\n \"\"\"This method inserts a client on the database. :param client: :return: None\"\"\"\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n client_json = self.client_to_json(client)\n db_client['bob']['clients'].insert_one(client_json)\n except PyMongoError as error:\n print('Error while inserting a client.', error)\n finally:\n db_client.close()\n\n def find_client_by_user_device_id(self, client):\n \"\"\"This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\"\"\"\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'token': client.token}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\n except PyMongoError as error:\n print('Error while finding a client.', error)\n finally:\n db_client.close()\n\n def find_client_login(self, client):\n \"\"\"This method has to find a client using user_id and device_id :param client: user_id and device_id must be on the object :return: Client object\"\"\"\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id, 'password': client.password}\n result = db_client['bob']['clients'].find(query)\n if result:\n for client_json in result:\n client_returned = self.json_to_client(client_json)\n return client_returned\n return\n except PyMongoError as error:\n print('Error while finding a client.', error)\n finally:\n db_client.close()\n\n def update_client(self, client):\n \"\"\"This method has to find a client using user_id, device_id, and token :param client: user_id, device_id, and token must be on the object :return: Client object\"\"\"\n db_client = None\n try:\n db_client = self.mongo_connection_factory.get_connection()\n query = {'user_id': client.user_id, 'device_id': client.device_id}\n new_values = {'$set': {'token': client.token, 'token_date': client.token_date}}\n db_client['bob']['clients'].update_one(query, new_values)\n except PyMongoError as error:\n print('Error while updating a client.', error)\n finally:\n db_client.close()\n\n def client_to_json(self, client):\n \"\"\"The method is responsible to convert the class object to JSON :param client: client object :return: client_json\"\"\"\n client_json = {}\n if client:\n if client.device_id:\n client_json['device_id'] = client.device_id\n if client.user_id:\n client_json['user_id'] = client.user_id\n if client.token:\n client_json['token'] = client.token\n if client.token_date:\n client_json['token_date'] = client.token_date\n if client.panic_password:\n client_json['panic_password'] = client.panic_password\n if client.account_id:\n client_json['account_id'] = client.account_id\n if client.password:\n client_json['password'] = client.password\n return client_json\n\n def json_to_client(self, client_json):\n \"\"\"The method is responsible to convert the JSON to client object :param client_json: client into JSON mode :return: client object\"\"\"\n client = Client()\n if 'device_id' in client_json:\n client.device_id = client_json['device_id']\n if 'user_id' in client_json:\n client.user_id = client_json['user_id']\n if 'token' in client_json:\n client.token = client_json['token']\n if 'token_date' in client_json:\n client.token_date = client_json['token_date']\n if 'panic_password' in client_json:\n client.panic_password = client_json['panic_password']\n if 'account_id' in client_json:\n client.account_id = client_json['account_id']\n if 'password' in client_json:\n client.password = client_json['password']\n return client\n", "source": "the_stack_v2_python_sparse", "source_path": "app/model/client.py", "source_repo": "Gabriel94Dantas/BOB-BlindOpenBank-API", "split": "val", "star_events_count": 0}
{"blob_id": "f34eec7917ae80b41ea65dfa3d9c9660ef75b474", "bodies": ["log.debug('GmosaicETI __init__')\nPyrafETI.__init__(self, inputs, params)\nself.clparam_dict = {}\nself.add_file(InAtList(inputs, params, ad))\nself.add_file(OutAtList(inputs, params, ad))\nself.add_file(LogFile(inputs, params))\nself.add_param(FlPaste(inputs, params))\nself.add_param(FlFixpix(inputs, params))\nself.add_param(Geointer(inputs, params))\nself.add_param(FlVardq(inputs, params, ad))\nself.add_param(FlClean(inputs, params, ad))\nfor param in mosaic_detectors_hardcoded_params:\n self.add_param(GmosaicParam(inputs, params, param, mosaic_detectors_hardcoded_params[param]))", "log.debug('GmosaicETI.execute()')\nxcldict = copy(self.clparam_dict)\nfor fil in self.file_objs:\n xcldict.update(fil.get_parameter())\nfor par in self.param_objs:\n xcldict.update(par.get_parameter())\niraf.unlearn(iraf.gmos.gmosaic)\nfor par in xcldict:\n if par != 'Stderr' and par != 'Stdout':\n gemini.gmos.gmosaic.setParam(par, xcldict[par])\nlog.fullinfo('\\nGMOSAIC PARAMETERS:\\n')\niraf.lpar(iraf.gmos.gmosaic, Stderr=xcldict['Stderr'], Stdout=xcldict['Stdout'])\ntry:\n gemini.gmos.gmosaic(**xcldict)\nexcept:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\nif gemini.gmos.gmosaic.status:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\nelse:\n log.fullinfo('The IRAF task gmos.gmosaic completed successfully')", "log.debug('GmosaicETI.run()')\nadlist = []\nself.prepare()\nself.execute()\nadlist = self.recover()\nself.clean()\nreturn adlist", "log.debug('GmosaicETI.recover()')\nadlist = []\nfor par in self.param_objs:\n par.recover()\nfor fil in self.file_objs:\n if isinstance(fil, OutAtList):\n adlist.extend(fil.recover())\n else:\n fil.recover()\nif len(adlist) == 1:\n return adlist[0]\nelse:\n return adlist"], "bodies_text": "<|body_start_0|>\n log.debug('GmosaicETI __init__')\n PyrafETI.__init__(self, inputs, params)\n self.clparam_dict = {}\n self.add_file(InAtList(inputs, params, ad))\n self.add_file(OutAtList(inputs, params, ad))\n self.add_file(LogFile(inputs, params))\n self.add_param(FlPaste(inputs, params))\n self.add_param(FlFixpix(inputs, params))\n self.add_param(Geointer(inputs, params))\n self.add_param(FlVardq(inputs, params, ad))\n self.add_param(FlClean(inputs, params, ad))\n for param in mosaic_detectors_hardcoded_params:\n self.add_param(GmosaicParam(inputs, params, param, mosaic_detectors_hardcoded_params[param]))\n<|end_body_0|>\n\n<|body_start_1|>\n log.debug('GmosaicETI.execute()')\n xcldict = copy(self.clparam_dict)\n for fil in self.file_objs:\n xcldict.update(fil.get_parameter())\n for par in self.param_objs:\n xcldict.update(par.get_parameter())\n iraf.unlearn(iraf.gmos.gmosaic)\n for par in xcldict:\n if par != 'Stderr' and par != 'Stdout':\n gemini.gmos.gmosaic.setParam(par, xcldict[par])\n log.fullinfo('\\nGMOSAIC PARAMETERS:\\n')\n iraf.lpar(iraf.gmos.gmosaic, Stderr=xcldict['Stderr'], Stdout=xcldict['Stdout'])\n try:\n gemini.gmos.gmosaic(**xcldict)\n except:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\n if gemini.gmos.gmosaic.status:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\n else:\n log.fullinfo('The IRAF task gmos.gmosaic completed successfully')\n<|end_body_1|>\n\n<|body_start_2|>\n log.debug('GmosaicETI.run()')\n adlist = []\n self.prepare()\n self.execute()\n adlist = self.recover()\n self.clean()\n return adlist\n<|end_body_2|>\n\n<|body_start_3|>\n log.debug('GmosaicETI.recover()')\n adlist = []\n for par in self.param_objs:\n par.recover()\n for fil in self.file_objs:\n if isinstance(fil, OutAtList):\n adlist.extend(fil.recover())\n else:\n fil.recover()\n if len(adlist) == 1:\n return adlist[0]\n else:\n return adlist\n<|end_body_3|>\n", "class_docstring": "This class coordinates the external task interface as it relates directly to the IRAF task: gmosaic", "class_name": "GmosaicETI", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GmosaicETI:\n \"\"\"This class coordinates the external task interface as it relates directly to the IRAF task: gmosaic\"\"\"\n\n def __init__(self, inputs, params, ad):\n \"\"\"Adds the file and parameter objects to a list :param rc: Used to store reduction information :type rc: ReductionContext\"\"\"\n <|body_0|>\n\n def execute(self):\n \"\"\"Execute pyraf task: gmosaic\"\"\"\n <|body_1|>\n\n def run(self):\n \"\"\"Convenience function that runs all the needed operations.\"\"\"\n <|body_2|>\n\n def recover(self):\n \"\"\"Recovers reduction information into memory\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log.debug('GmosaicETI __init__')\n PyrafETI.__init__(self, inputs, params)\n self.clparam_dict = {}\n self.add_file(InAtList(inputs, params, ad))\n self.add_file(OutAtList(inputs, params, ad))\n self.add_file(LogFile(inputs, params))\n self.add_param(FlPaste(inputs, params))\n self.add_param(FlFixpix(inputs, params))\n self.add_param(Geointer(inputs, params))\n self.add_param(FlVardq(inputs, params, ad))\n self.add_param(FlClean(inputs, params, ad))\n for param in mosaic_detectors_hardcoded_params:\n self.add_param(GmosaicParam(inputs, params, param, mosaic_detectors_hardcoded_params[param]))\n<|end_body_0|>\n\n<|body_start_1|>\n log.debug('GmosaicETI.execute()')\n xcldict = copy(self.clparam_dict)\n for fil in self.file_objs:\n xcldict.update(fil.get_parameter())\n for par in self.param_objs:\n xcldict.update(par.get_parameter())\n iraf.unlearn(iraf.gmos.gmosaic)\n for par in xcldict:\n if par != 'Stderr' and par != 'Stdout':\n gemini.gmos.gmosaic.setParam(par, xcldict[par])\n log.fullinfo('\\nGMOSAIC PARAMETERS:\\n')\n iraf.lpar(iraf.gmos.gmosaic, Stderr=xcldict['Stderr'], Stdout=xcldict['Stdout'])\n try:\n gemini.gmos.gmosaic(**xcldict)\n except:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\n if gemini.gmos.gmosaic.status:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\n else:\n log.fullinfo('The IRAF task gmos.gmosaic completed successfully')\n<|end_body_1|>\n\n<|body_start_2|>\n log.debug('GmosaicETI.run()')\n adlist = []\n self.prepare()\n self.execute()\n adlist = self.recover()\n self.clean()\n return adlist\n<|end_body_2|>\n\n<|body_start_3|>\n log.debug('GmosaicETI.recover()')\n adlist = []\n for par in self.param_objs:\n par.recover()\n for fil in self.file_objs:\n if isinstance(fil, OutAtList):\n adlist.extend(fil.recover())\n else:\n fil.recover()\n if len(adlist) == 1:\n return adlist[0]\n else:\n return adlist\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000280", "length_bytes": 3718, "license_type": "permissive", "methods": [{"docstring": "Adds the file and parameter objects to a list :param rc: Used to store reduction information :type rc: ReductionContext", "name": "__init__", "signature": "def __init__(self, inputs, params, ad)"}, {"docstring": "Execute pyraf task: gmosaic", "name": "execute", "signature": "def execute(self)"}, {"docstring": "Convenience function that runs all the needed operations.", "name": "run", "signature": "def run(self)"}, {"docstring": "Recovers reduction information into memory", "name": "recover", "signature": "def recover(self)"}], "n_methods": 4, "prompt": "Implement the Python class `GmosaicETI` described below.\n\nClass description:\nThis class coordinates the external task interface as it relates directly to the IRAF task: gmosaic\n\nMethod signatures and docstrings:\n- def __init__(self, inputs, params, ad): Adds the file and parameter objects to a list :param rc: Used to store reduction information :type rc: ReductionContext\n- def execute(self): Execute pyraf task: gmosaic\n- def run(self): Convenience function that runs all the needed operations.\n- def recover(self): Recovers reduction information into memory", "prompted_full_text": "Implement the Python class `GmosaicETI` described below.\n\nClass description:\nThis class coordinates the external task interface as it relates directly to the IRAF task: gmosaic\n\nMethod signatures and docstrings:\n- def __init__(self, inputs, params, ad): Adds the file and parameter objects to a list :param rc: Used to store reduction information :type rc: ReductionContext\n- def execute(self): Execute pyraf task: gmosaic\n- def run(self): Convenience function that runs all the needed operations.\n- def recover(self): Recovers reduction information into memory\n\n<|skeleton|>\nclass GmosaicETI:\n \"\"\"This class coordinates the external task interface as it relates directly to the IRAF task: gmosaic\"\"\"\n\n def __init__(self, inputs, params, ad):\n \"\"\"Adds the file and parameter objects to a list :param rc: Used to store reduction information :type rc: ReductionContext\"\"\"\n <|body_0|>\n\n def execute(self):\n \"\"\"Execute pyraf task: gmosaic\"\"\"\n <|body_1|>\n\n def run(self):\n \"\"\"Convenience function that runs all the needed operations.\"\"\"\n <|body_2|>\n\n def recover(self):\n \"\"\"Recovers reduction information into memory\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log.debug('GmosaicETI __init__')\n PyrafETI.__init__(self, inputs, params)\n self.clparam_dict = {}\n self.add_file(InAtList(inputs, params, ad))\n self.add_file(OutAtList(inputs, params, ad))\n self.add_file(LogFile(inputs, params))\n self.add_param(FlPaste(inputs, params))\n self.add_param(FlFixpix(inputs, params))\n self.add_param(Geointer(inputs, params))\n self.add_param(FlVardq(inputs, params, ad))\n self.add_param(FlClean(inputs, params, ad))\n for param in mosaic_detectors_hardcoded_params:\n self.add_param(GmosaicParam(inputs, params, param, mosaic_detectors_hardcoded_params[param]))\n<|end_body_0|>\n\n<|body_start_1|>\n log.debug('GmosaicETI.execute()')\n xcldict = copy(self.clparam_dict)\n for fil in self.file_objs:\n xcldict.update(fil.get_parameter())\n for par in self.param_objs:\n xcldict.update(par.get_parameter())\n iraf.unlearn(iraf.gmos.gmosaic)\n for par in xcldict:\n if par != 'Stderr' and par != 'Stdout':\n gemini.gmos.gmosaic.setParam(par, xcldict[par])\n log.fullinfo('\\nGMOSAIC PARAMETERS:\\n')\n iraf.lpar(iraf.gmos.gmosaic, Stderr=xcldict['Stderr'], Stdout=xcldict['Stdout'])\n try:\n gemini.gmos.gmosaic(**xcldict)\n except:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\n if gemini.gmos.gmosaic.status:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\n else:\n log.fullinfo('The IRAF task gmos.gmosaic completed successfully')\n<|end_body_1|>\n\n<|body_start_2|>\n log.debug('GmosaicETI.run()')\n adlist = []\n self.prepare()\n self.execute()\n adlist = self.recover()\n self.clean()\n return adlist\n<|end_body_2|>\n\n<|body_start_3|>\n log.debug('GmosaicETI.recover()')\n adlist = []\n for par in self.param_objs:\n par.recover()\n for fil in self.file_objs:\n if isinstance(fil, OutAtList):\n adlist.extend(fil.recover())\n else:\n fil.recover()\n if len(adlist) == 1:\n return adlist[0]\n else:\n return adlist\n<|end_body_3|>\n", "revision_id": "159439b43029d0fd9136e4d30e10fa963d6f9e7f", "skeleton": "<|skeleton|>\nclass GmosaicETI:\n \"\"\"This class coordinates the external task interface as it relates directly to the IRAF task: gmosaic\"\"\"\n\n def __init__(self, inputs, params, ad):\n \"\"\"Adds the file and parameter objects to a list :param rc: Used to store reduction information :type rc: ReductionContext\"\"\"\n <|body_0|>\n\n def execute(self):\n \"\"\"Execute pyraf task: gmosaic\"\"\"\n <|body_1|>\n\n def run(self):\n \"\"\"Convenience function that runs all the needed operations.\"\"\"\n <|body_2|>\n\n def recover(self):\n \"\"\"Recovers reduction information into memory\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GmosaicETI:\n \"\"\"This class coordinates the external task interface as it relates directly to the IRAF task: gmosaic\"\"\"\n\n def __init__(self, inputs, params, ad):\n \"\"\"Adds the file and parameter objects to a list :param rc: Used to store reduction information :type rc: ReductionContext\"\"\"\n log.debug('GmosaicETI __init__')\n PyrafETI.__init__(self, inputs, params)\n self.clparam_dict = {}\n self.add_file(InAtList(inputs, params, ad))\n self.add_file(OutAtList(inputs, params, ad))\n self.add_file(LogFile(inputs, params))\n self.add_param(FlPaste(inputs, params))\n self.add_param(FlFixpix(inputs, params))\n self.add_param(Geointer(inputs, params))\n self.add_param(FlVardq(inputs, params, ad))\n self.add_param(FlClean(inputs, params, ad))\n for param in mosaic_detectors_hardcoded_params:\n self.add_param(GmosaicParam(inputs, params, param, mosaic_detectors_hardcoded_params[param]))\n\n def execute(self):\n \"\"\"Execute pyraf task: gmosaic\"\"\"\n log.debug('GmosaicETI.execute()')\n xcldict = copy(self.clparam_dict)\n for fil in self.file_objs:\n xcldict.update(fil.get_parameter())\n for par in self.param_objs:\n xcldict.update(par.get_parameter())\n iraf.unlearn(iraf.gmos.gmosaic)\n for par in xcldict:\n if par != 'Stderr' and par != 'Stdout':\n gemini.gmos.gmosaic.setParam(par, xcldict[par])\n log.fullinfo('\\nGMOSAIC PARAMETERS:\\n')\n iraf.lpar(iraf.gmos.gmosaic, Stderr=xcldict['Stderr'], Stdout=xcldict['Stdout'])\n try:\n gemini.gmos.gmosaic(**xcldict)\n except:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\n if gemini.gmos.gmosaic.status:\n raise RuntimeError('The IRAF task gmos.gmosaic failed')\n else:\n log.fullinfo('The IRAF task gmos.gmosaic completed successfully')\n\n def run(self):\n \"\"\"Convenience function that runs all the needed operations.\"\"\"\n log.debug('GmosaicETI.run()')\n adlist = []\n self.prepare()\n self.execute()\n adlist = self.recover()\n self.clean()\n return adlist\n\n def recover(self):\n \"\"\"Recovers reduction information into memory\"\"\"\n log.debug('GmosaicETI.recover()')\n adlist = []\n for par in self.param_objs:\n par.recover()\n for fil in self.file_objs:\n if isinstance(fil, OutAtList):\n adlist.extend(fil.recover())\n else:\n fil.recover()\n if len(adlist) == 1:\n return adlist[0]\n else:\n return adlist\n", "source": "the_stack_v2_python_sparse", "source_path": "gempy/gemini/eti/gmosaiceti.py", "source_repo": "GeminiDRSoftware/DRAGONS", "split": "val", "star_events_count": 28}
{"blob_id": "1bb69a91efb77ee151f70f2ba35860b4a4cbaaea", "bodies": ["os_walk_input_iter = (('a1', ['b1'], ['c1', 'd1']), ('a2', ['b2'], ['c2', 'd2']), ('a3', ['b3'], ['c3', 'd3']))\nos_walk_expected_output = ('a1/c1', 'a1/d1', 'a2/c2', 'a2/d2', 'a3/c3', 'a3/d3')\nos_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\nassert os_walk_expected_output == os_walk_actual_output", "os_walk_input_iter = ()\nos_walk_expected_output = ()\nos_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\nassert os_walk_expected_output == os_walk_actual_output"], "bodies_text": "<|body_start_0|>\n os_walk_input_iter = (('a1', ['b1'], ['c1', 'd1']), ('a2', ['b2'], ['c2', 'd2']), ('a3', ['b3'], ['c3', 'd3']))\n os_walk_expected_output = ('a1/c1', 'a1/d1', 'a2/c2', 'a2/d2', 'a3/c3', 'a3/d3')\n os_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\n assert os_walk_expected_output == os_walk_actual_output\n<|end_body_0|>\n\n<|body_start_1|>\n os_walk_input_iter = ()\n os_walk_expected_output = ()\n os_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\n assert os_walk_expected_output == os_walk_actual_output\n<|end_body_1|>\n", "class_docstring": "Tet cases for the _adapt_os_walk_to_filepath function.", "class_name": "Specify_AdaptOsWalkToFilepath", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Specify_AdaptOsWalkToFilepath:\n \"\"\"Tet cases for the _adapt_os_walk_to_filepath function.\"\"\"\n\n def it_serialises_simple_tree(self):\n \"\"\"Test _adapt_os_walk_to_filepath handles a simple use case as expected. The _adapt_os_walk_to_filepath should take output in the form provided by os.walk and should adapt it to produce a sequence of \"flat\" file paths.\"\"\"\n <|body_0|>\n\n def it_serialises_empty_tree(self):\n \"\"\"Test _adapt_os_walk_to_filepath handles an edge case as expected. Empty trees should produce no output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n os_walk_input_iter = (('a1', ['b1'], ['c1', 'd1']), ('a2', ['b2'], ['c2', 'd2']), ('a3', ['b3'], ['c3', 'd3']))\n os_walk_expected_output = ('a1/c1', 'a1/d1', 'a2/c2', 'a2/d2', 'a3/c3', 'a3/d3')\n os_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\n assert os_walk_expected_output == os_walk_actual_output\n<|end_body_0|>\n\n<|body_start_1|>\n os_walk_input_iter = ()\n os_walk_expected_output = ()\n os_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\n assert os_walk_expected_output == os_walk_actual_output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000281", "length_bytes": 29518, "license_type": "permissive", "methods": [{"docstring": "Test _adapt_os_walk_to_filepath handles a simple use case as expected. The _adapt_os_walk_to_filepath should take output in the form provided by os.walk and should adapt it to produce a sequence of \"flat\" file paths.", "name": "it_serialises_simple_tree", "signature": "def it_serialises_simple_tree(self)"}, {"docstring": "Test _adapt_os_walk_to_filepath handles an edge case as expected. Empty trees should produce no output.", "name": "it_serialises_empty_tree", "signature": "def it_serialises_empty_tree(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028361", "prompt": "Implement the Python class `Specify_AdaptOsWalkToFilepath` described below.\n\nClass description:\nTet cases for the _adapt_os_walk_to_filepath function.\n\nMethod signatures and docstrings:\n- def it_serialises_simple_tree(self): Test _adapt_os_walk_to_filepath handles a simple use case as expected. The _adapt_os_walk_to_filepath should take output in the form provided by os.walk and should adapt it to produce a sequence of \"flat\" file paths.\n- def it_serialises_empty_tree(self): Test _adapt_os_walk_to_filepath handles an edge case as expected. Empty trees should produce no output.", "prompted_full_text": "Implement the Python class `Specify_AdaptOsWalkToFilepath` described below.\n\nClass description:\nTet cases for the _adapt_os_walk_to_filepath function.\n\nMethod signatures and docstrings:\n- def it_serialises_simple_tree(self): Test _adapt_os_walk_to_filepath handles a simple use case as expected. The _adapt_os_walk_to_filepath should take output in the form provided by os.walk and should adapt it to produce a sequence of \"flat\" file paths.\n- def it_serialises_empty_tree(self): Test _adapt_os_walk_to_filepath handles an edge case as expected. Empty trees should produce no output.\n\n<|skeleton|>\nclass Specify_AdaptOsWalkToFilepath:\n \"\"\"Tet cases for the _adapt_os_walk_to_filepath function.\"\"\"\n\n def it_serialises_simple_tree(self):\n \"\"\"Test _adapt_os_walk_to_filepath handles a simple use case as expected. The _adapt_os_walk_to_filepath should take output in the form provided by os.walk and should adapt it to produce a sequence of \"flat\" file paths.\"\"\"\n <|body_0|>\n\n def it_serialises_empty_tree(self):\n \"\"\"Test _adapt_os_walk_to_filepath handles an edge case as expected. Empty trees should produce no output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n os_walk_input_iter = (('a1', ['b1'], ['c1', 'd1']), ('a2', ['b2'], ['c2', 'd2']), ('a3', ['b3'], ['c3', 'd3']))\n os_walk_expected_output = ('a1/c1', 'a1/d1', 'a2/c2', 'a2/d2', 'a3/c3', 'a3/d3')\n os_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\n assert os_walk_expected_output == os_walk_actual_output\n<|end_body_0|>\n\n<|body_start_1|>\n os_walk_input_iter = ()\n os_walk_expected_output = ()\n os_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\n assert os_walk_expected_output == os_walk_actual_output\n<|end_body_1|>\n", "revision_id": "04a13be2792323e3f9fdb83fd236a8e9cfe6aa2d", "skeleton": "<|skeleton|>\nclass Specify_AdaptOsWalkToFilepath:\n \"\"\"Tet cases for the _adapt_os_walk_to_filepath function.\"\"\"\n\n def it_serialises_simple_tree(self):\n \"\"\"Test _adapt_os_walk_to_filepath handles a simple use case as expected. The _adapt_os_walk_to_filepath should take output in the form provided by os.walk and should adapt it to produce a sequence of \"flat\" file paths.\"\"\"\n <|body_0|>\n\n def it_serialises_empty_tree(self):\n \"\"\"Test _adapt_os_walk_to_filepath handles an edge case as expected. Empty trees should produce no output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Specify_AdaptOsWalkToFilepath:\n \"\"\"Tet cases for the _adapt_os_walk_to_filepath function.\"\"\"\n\n def it_serialises_simple_tree(self):\n \"\"\"Test _adapt_os_walk_to_filepath handles a simple use case as expected. The _adapt_os_walk_to_filepath should take output in the form provided by os.walk and should adapt it to produce a sequence of \"flat\" file paths.\"\"\"\n os_walk_input_iter = (('a1', ['b1'], ['c1', 'd1']), ('a2', ['b2'], ['c2', 'd2']), ('a3', ['b3'], ['c3', 'd3']))\n os_walk_expected_output = ('a1/c1', 'a1/d1', 'a2/c2', 'a2/d2', 'a3/c3', 'a3/d3')\n os_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\n assert os_walk_expected_output == os_walk_actual_output\n\n def it_serialises_empty_tree(self):\n \"\"\"Test _adapt_os_walk_to_filepath handles an edge case as expected. Empty trees should produce no output.\"\"\"\n os_walk_input_iter = ()\n os_walk_expected_output = ()\n os_walk_actual_output = tuple(da.lwc.search._adapt_os_walk_to_filepath(os_walk_input_iter))\n assert os_walk_expected_output == os_walk_actual_output\n", "source": "the_stack_v2_python_sparse", "source_path": "a3_src/h70_internal/da/lwc/spec/spec_search.py", "source_repo": "wtpayne/hiai", "split": "val", "star_events_count": 5}
{"blob_id": "ad3e822a848fb09cb289c5f4a5df3359ac7a962e", "bodies": ["if self.request.method == 'GET':\n return (IsInActiveCommunity(), IsInPubliclyVisibleCommunity())\nelif self.request.method == 'POST':\n return (permissions.IsAuthenticated(),)\nelif self.request.method in ('PUT', 'PATCH'):\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateCustomMembershipLabel())\nelif self.request.method == 'DELETE':\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsDeputyLeaderOfCommunity())\nreturn tuple()", "if self.request.method == 'POST':\n return NotExistingCustomMembershipLabelSerializer\nreturn ExistingCustomMembershipLabelSerializer", "queryset = self.get_queryset()\nqueryset = filter_queryset_permission(queryset, request, self.get_permissions())\nqueryset = filter_queryset(queryset, request, target_param='membership', is_foreign_key=False)\nif request.query_params.get('membership') is not None and len(queryset) == 0:\n return Response(status=status.HTTP_404_NOT_FOUND)\nelif request.query_params.get('membership') is not None and len(queryset) == 1:\n serializer = self.get_serializer(queryset[0], many=False)\nelse:\n serializer = self.get_serializer(queryset, many=True)\nreturn Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n if self.request.method == 'GET':\n return (IsInActiveCommunity(), IsInPubliclyVisibleCommunity())\n elif self.request.method == 'POST':\n return (permissions.IsAuthenticated(),)\n elif self.request.method in ('PUT', 'PATCH'):\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateCustomMembershipLabel())\n elif self.request.method == 'DELETE':\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsDeputyLeaderOfCommunity())\n return tuple()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.method == 'POST':\n return NotExistingCustomMembershipLabelSerializer\n return ExistingCustomMembershipLabelSerializer\n<|end_body_1|>\n\n<|body_start_2|>\n queryset = self.get_queryset()\n queryset = filter_queryset_permission(queryset, request, self.get_permissions())\n queryset = filter_queryset(queryset, request, target_param='membership', is_foreign_key=False)\n if request.query_params.get('membership') is not None and len(queryset) == 0:\n return Response(status=status.HTTP_404_NOT_FOUND)\n elif request.query_params.get('membership') is not None and len(queryset) == 1:\n serializer = self.get_serializer(queryset[0], many=False)\n else:\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_2|>\n", "class_docstring": "Custom membership label view set", "class_name": "CustomMembershipLabelViewSet", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomMembershipLabelViewSet:\n \"\"\"Custom membership label view set\"\"\"\n\n def get_permissions(self):\n \"\"\"Get permissions\"\"\"\n <|body_0|>\n\n def get_serializer_class(self):\n \"\"\"Get serializer class\"\"\"\n <|body_1|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"List custom membership labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.method == 'GET':\n return (IsInActiveCommunity(), IsInPubliclyVisibleCommunity())\n elif self.request.method == 'POST':\n return (permissions.IsAuthenticated(),)\n elif self.request.method in ('PUT', 'PATCH'):\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateCustomMembershipLabel())\n elif self.request.method == 'DELETE':\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsDeputyLeaderOfCommunity())\n return tuple()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.method == 'POST':\n return NotExistingCustomMembershipLabelSerializer\n return ExistingCustomMembershipLabelSerializer\n<|end_body_1|>\n\n<|body_start_2|>\n queryset = self.get_queryset()\n queryset = filter_queryset_permission(queryset, request, self.get_permissions())\n queryset = filter_queryset(queryset, request, target_param='membership', is_foreign_key=False)\n if request.query_params.get('membership') is not None and len(queryset) == 0:\n return Response(status=status.HTTP_404_NOT_FOUND)\n elif request.query_params.get('membership') is not None and len(queryset) == 1:\n serializer = self.get_serializer(queryset[0], many=False)\n else:\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000282", "length_bytes": 27778, "license_type": "permissive", "methods": [{"docstring": "Get permissions", "name": "get_permissions", "signature": "def get_permissions(self)"}, {"docstring": "Get serializer class", "name": "get_serializer_class", "signature": "def get_serializer_class(self)"}, {"docstring": "List custom membership labels", "name": "list", "signature": "def list(self, request, *args, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_025105", "prompt": "Implement the Python class `CustomMembershipLabelViewSet` described below.\n\nClass description:\nCustom membership label view set\n\nMethod signatures and docstrings:\n- def get_permissions(self): Get permissions\n- def get_serializer_class(self): Get serializer class\n- def list(self, request, *args, **kwargs): List custom membership labels", "prompted_full_text": "Implement the Python class `CustomMembershipLabelViewSet` described below.\n\nClass description:\nCustom membership label view set\n\nMethod signatures and docstrings:\n- def get_permissions(self): Get permissions\n- def get_serializer_class(self): Get serializer class\n- def list(self, request, *args, **kwargs): List custom membership labels\n\n<|skeleton|>\nclass CustomMembershipLabelViewSet:\n \"\"\"Custom membership label view set\"\"\"\n\n def get_permissions(self):\n \"\"\"Get permissions\"\"\"\n <|body_0|>\n\n def get_serializer_class(self):\n \"\"\"Get serializer class\"\"\"\n <|body_1|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"List custom membership labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.method == 'GET':\n return (IsInActiveCommunity(), IsInPubliclyVisibleCommunity())\n elif self.request.method == 'POST':\n return (permissions.IsAuthenticated(),)\n elif self.request.method in ('PUT', 'PATCH'):\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateCustomMembershipLabel())\n elif self.request.method == 'DELETE':\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsDeputyLeaderOfCommunity())\n return tuple()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.method == 'POST':\n return NotExistingCustomMembershipLabelSerializer\n return ExistingCustomMembershipLabelSerializer\n<|end_body_1|>\n\n<|body_start_2|>\n queryset = self.get_queryset()\n queryset = filter_queryset_permission(queryset, request, self.get_permissions())\n queryset = filter_queryset(queryset, request, target_param='membership', is_foreign_key=False)\n if request.query_params.get('membership') is not None and len(queryset) == 0:\n return Response(status=status.HTTP_404_NOT_FOUND)\n elif request.query_params.get('membership') is not None and len(queryset) == 1:\n serializer = self.get_serializer(queryset[0], many=False)\n else:\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_2|>\n", "revision_id": "cf429f43251ad7e77c0d9bc9fe91bb030ca8bae8", "skeleton": "<|skeleton|>\nclass CustomMembershipLabelViewSet:\n \"\"\"Custom membership label view set\"\"\"\n\n def get_permissions(self):\n \"\"\"Get permissions\"\"\"\n <|body_0|>\n\n def get_serializer_class(self):\n \"\"\"Get serializer class\"\"\"\n <|body_1|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"List custom membership labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CustomMembershipLabelViewSet:\n \"\"\"Custom membership label view set\"\"\"\n\n def get_permissions(self):\n \"\"\"Get permissions\"\"\"\n if self.request.method == 'GET':\n return (IsInActiveCommunity(), IsInPubliclyVisibleCommunity())\n elif self.request.method == 'POST':\n return (permissions.IsAuthenticated(),)\n elif self.request.method in ('PUT', 'PATCH'):\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsAbleToUpdateCustomMembershipLabel())\n elif self.request.method == 'DELETE':\n return (permissions.IsAuthenticated(), IsInActiveCommunity(), IsDeputyLeaderOfCommunity())\n return tuple()\n\n def get_serializer_class(self):\n \"\"\"Get serializer class\"\"\"\n if self.request.method == 'POST':\n return NotExistingCustomMembershipLabelSerializer\n return ExistingCustomMembershipLabelSerializer\n\n def list(self, request, *args, **kwargs):\n \"\"\"List custom membership labels\"\"\"\n queryset = self.get_queryset()\n queryset = filter_queryset_permission(queryset, request, self.get_permissions())\n queryset = filter_queryset(queryset, request, target_param='membership', is_foreign_key=False)\n if request.query_params.get('membership') is not None and len(queryset) == 0:\n return Response(status=status.HTTP_404_NOT_FOUND)\n elif request.query_params.get('membership') is not None and len(queryset) == 1:\n serializer = self.get_serializer(queryset[0], many=False)\n else:\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "membership/views.py", "source_repo": "810Teams/clubs-and-events-backend", "split": "val", "star_events_count": 3}
{"blob_id": "95270ce02581a7b9dfeac28f5b2a49c32632d901", "bodies": ["self.mask_func = mask_func\nself.resolution = resolution\nself.use_seed = use_seed", "if target is not None:\n target = T.to_tensor(target)\n max_value = attrs['max']\nelse:\n target = torch.tensor(0)\n max_value = 0.0\nkspace = T.to_tensor(kspace)\nseed = None if not self.use_seed else tuple(map(ord, fname))\nacq_start = attrs['padding_left']\nacq_end = attrs['padding_right']\nif self.mask_func:\n masked_kspace, mask = T.apply_mask(kspace, self.mask_func, seed, (acq_start, acq_end))\nelse:\n masked_kspace = kspace\n shape = np.array(kspace.shape)\n num_cols = shape[-2]\n shape[:-3] = 1\n mask_shape = [1 for _ in shape]\n mask_shape[-2] = num_cols\n mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))\n mask[:, :, :acq_start] = 0\n mask[:, :, acq_end:] = 0\nreturn (masked_kspace, mask.byte(), target, fname, max_value)"], "bodies_text": "<|body_start_0|>\n self.mask_func = mask_func\n self.resolution = resolution\n self.use_seed = use_seed\n<|end_body_0|>\n\n<|body_start_1|>\n if target is not None:\n target = T.to_tensor(target)\n max_value = attrs['max']\n else:\n target = torch.tensor(0)\n max_value = 0.0\n kspace = T.to_tensor(kspace)\n seed = None if not self.use_seed else tuple(map(ord, fname))\n acq_start = attrs['padding_left']\n acq_end = attrs['padding_right']\n if self.mask_func:\n masked_kspace, mask = T.apply_mask(kspace, self.mask_func, seed, (acq_start, acq_end))\n else:\n masked_kspace = kspace\n shape = np.array(kspace.shape)\n num_cols = shape[-2]\n shape[:-3] = 1\n mask_shape = [1 for _ in shape]\n mask_shape[-2] = num_cols\n mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))\n mask[:, :, :acq_start] = 0\n mask[:, :, acq_end:] = 0\n return (masked_kspace, mask.byte(), target, fname, max_value)\n<|end_body_1|>\n", "class_docstring": "Data Transformer for training Var Net models.", "class_name": "DataTransform", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataTransform:\n \"\"\"Data Transformer for training Var Net models.\"\"\"\n\n def __init__(self, resolution, mask_func=None, use_seed=True):\n \"\"\"Args: mask_func (common.subsample.MaskFunc): A function that can create a mask of appropriate shape. resolution (int): Resolution of the image. use_seed (bool): If true, this class computes a pseudo random number generator seed from the filename. This ensures that the same mask is used for all the slices of a given volume every time.\"\"\"\n <|body_0|>\n\n def __call__(self, kspace, mask, target, attrs, fname):\n \"\"\"Args: kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil data or (rows, cols, 2) for single coil data. mask (numpy.array): Mask from the test dataset target (numpy.array): Target image attrs (dict): Acquisition related information stored in the HDF5 object. fname (str): File name slice (int): Serial number of the slice. Returns: (tuple): tuple containing: masked_kspace (torch.Tensor): Masked k-space mask (torch.Tensor): Mask target (torch.Tensor): Target image converted to a torch Tensor. fname (str): File name slice (int): Serial number of the slice. max_value (numpy.array): Maximum value in the image volume\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.mask_func = mask_func\n self.resolution = resolution\n self.use_seed = use_seed\n<|end_body_0|>\n\n<|body_start_1|>\n if target is not None:\n target = T.to_tensor(target)\n max_value = attrs['max']\n else:\n target = torch.tensor(0)\n max_value = 0.0\n kspace = T.to_tensor(kspace)\n seed = None if not self.use_seed else tuple(map(ord, fname))\n acq_start = attrs['padding_left']\n acq_end = attrs['padding_right']\n if self.mask_func:\n masked_kspace, mask = T.apply_mask(kspace, self.mask_func, seed, (acq_start, acq_end))\n else:\n masked_kspace = kspace\n shape = np.array(kspace.shape)\n num_cols = shape[-2]\n shape[:-3] = 1\n mask_shape = [1 for _ in shape]\n mask_shape[-2] = num_cols\n mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))\n mask[:, :, :acq_start] = 0\n mask[:, :, acq_end:] = 0\n return (masked_kspace, mask.byte(), target, fname, max_value)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000283", "length_bytes": 6605, "license_type": "no_license", "methods": [{"docstring": "Args: mask_func (common.subsample.MaskFunc): A function that can create a mask of appropriate shape. resolution (int): Resolution of the image. use_seed (bool): If true, this class computes a pseudo random number generator seed from the filename. This ensures that the same mask is used for all the slices of a given volume every time.", "name": "__init__", "signature": "def __init__(self, resolution, mask_func=None, use_seed=True)"}, {"docstring": "Args: kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil data or (rows, cols, 2) for single coil data. mask (numpy.array): Mask from the test dataset target (numpy.array): Target image attrs (dict): Acquisition related information stored in the HDF5 object. fname (str): File name slice (int): Serial number of the slice. Returns: (tuple): tuple containing: masked_kspace (torch.Tensor): Masked k-space mask (torch.Tensor): Mask target (torch.Tensor): Target image converted to a torch Tensor. fname (str): File name slice (int): Serial number of the slice. max_value (numpy.array): Maximum value in the image volume", "name": "__call__", "signature": "def __call__(self, kspace, mask, target, attrs, fname)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024267", "prompt": "Implement the Python class `DataTransform` described below.\n\nClass description:\nData Transformer for training Var Net models.\n\nMethod signatures and docstrings:\n- def __init__(self, resolution, mask_func=None, use_seed=True): Args: mask_func (common.subsample.MaskFunc): A function that can create a mask of appropriate shape. resolution (int): Resolution of the image. use_seed (bool): If true, this class computes a pseudo random number generator seed from the filename. This ensures that the same mask is used for all the slices of a given volume every time.\n- def __call__(self, kspace, mask, target, attrs, fname): Args: kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil data or (rows, cols, 2) for single coil data. mask (numpy.array): Mask from the test dataset target (numpy.array): Target image attrs (dict): Acquisition related information stored in the HDF5 object. fname (str): File name slice (int): Serial number of the slice. Returns: (tuple): tuple containing: masked_kspace (torch.Tensor): Masked k-space mask (torch.Tensor): Mask target (torch.Tensor): Target image converted to a torch Tensor. fname (str): File name slice (int): Serial number of the slice. max_value (numpy.array): Maximum value in the image volume", "prompted_full_text": "Implement the Python class `DataTransform` described below.\n\nClass description:\nData Transformer for training Var Net models.\n\nMethod signatures and docstrings:\n- def __init__(self, resolution, mask_func=None, use_seed=True): Args: mask_func (common.subsample.MaskFunc): A function that can create a mask of appropriate shape. resolution (int): Resolution of the image. use_seed (bool): If true, this class computes a pseudo random number generator seed from the filename. This ensures that the same mask is used for all the slices of a given volume every time.\n- def __call__(self, kspace, mask, target, attrs, fname): Args: kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil data or (rows, cols, 2) for single coil data. mask (numpy.array): Mask from the test dataset target (numpy.array): Target image attrs (dict): Acquisition related information stored in the HDF5 object. fname (str): File name slice (int): Serial number of the slice. Returns: (tuple): tuple containing: masked_kspace (torch.Tensor): Masked k-space mask (torch.Tensor): Mask target (torch.Tensor): Target image converted to a torch Tensor. fname (str): File name slice (int): Serial number of the slice. max_value (numpy.array): Maximum value in the image volume\n\n<|skeleton|>\nclass DataTransform:\n \"\"\"Data Transformer for training Var Net models.\"\"\"\n\n def __init__(self, resolution, mask_func=None, use_seed=True):\n \"\"\"Args: mask_func (common.subsample.MaskFunc): A function that can create a mask of appropriate shape. resolution (int): Resolution of the image. use_seed (bool): If true, this class computes a pseudo random number generator seed from the filename. This ensures that the same mask is used for all the slices of a given volume every time.\"\"\"\n <|body_0|>\n\n def __call__(self, kspace, mask, target, attrs, fname):\n \"\"\"Args: kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil data or (rows, cols, 2) for single coil data. mask (numpy.array): Mask from the test dataset target (numpy.array): Target image attrs (dict): Acquisition related information stored in the HDF5 object. fname (str): File name slice (int): Serial number of the slice. Returns: (tuple): tuple containing: masked_kspace (torch.Tensor): Masked k-space mask (torch.Tensor): Mask target (torch.Tensor): Target image converted to a torch Tensor. fname (str): File name slice (int): Serial number of the slice. max_value (numpy.array): Maximum value in the image volume\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.mask_func = mask_func\n self.resolution = resolution\n self.use_seed = use_seed\n<|end_body_0|>\n\n<|body_start_1|>\n if target is not None:\n target = T.to_tensor(target)\n max_value = attrs['max']\n else:\n target = torch.tensor(0)\n max_value = 0.0\n kspace = T.to_tensor(kspace)\n seed = None if not self.use_seed else tuple(map(ord, fname))\n acq_start = attrs['padding_left']\n acq_end = attrs['padding_right']\n if self.mask_func:\n masked_kspace, mask = T.apply_mask(kspace, self.mask_func, seed, (acq_start, acq_end))\n else:\n masked_kspace = kspace\n shape = np.array(kspace.shape)\n num_cols = shape[-2]\n shape[:-3] = 1\n mask_shape = [1 for _ in shape]\n mask_shape[-2] = num_cols\n mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))\n mask[:, :, :acq_start] = 0\n mask[:, :, acq_end:] = 0\n return (masked_kspace, mask.byte(), target, fname, max_value)\n<|end_body_1|>\n", "revision_id": "219652c8a08c4f2f682acd9f95a4e1b3fd36b70b", "skeleton": "<|skeleton|>\nclass DataTransform:\n \"\"\"Data Transformer for training Var Net models.\"\"\"\n\n def __init__(self, resolution, mask_func=None, use_seed=True):\n \"\"\"Args: mask_func (common.subsample.MaskFunc): A function that can create a mask of appropriate shape. resolution (int): Resolution of the image. use_seed (bool): If true, this class computes a pseudo random number generator seed from the filename. This ensures that the same mask is used for all the slices of a given volume every time.\"\"\"\n <|body_0|>\n\n def __call__(self, kspace, mask, target, attrs, fname):\n \"\"\"Args: kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil data or (rows, cols, 2) for single coil data. mask (numpy.array): Mask from the test dataset target (numpy.array): Target image attrs (dict): Acquisition related information stored in the HDF5 object. fname (str): File name slice (int): Serial number of the slice. Returns: (tuple): tuple containing: masked_kspace (torch.Tensor): Masked k-space mask (torch.Tensor): Mask target (torch.Tensor): Target image converted to a torch Tensor. fname (str): File name slice (int): Serial number of the slice. max_value (numpy.array): Maximum value in the image volume\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DataTransform:\n \"\"\"Data Transformer for training Var Net models.\"\"\"\n\n def __init__(self, resolution, mask_func=None, use_seed=True):\n \"\"\"Args: mask_func (common.subsample.MaskFunc): A function that can create a mask of appropriate shape. resolution (int): Resolution of the image. use_seed (bool): If true, this class computes a pseudo random number generator seed from the filename. This ensures that the same mask is used for all the slices of a given volume every time.\"\"\"\n self.mask_func = mask_func\n self.resolution = resolution\n self.use_seed = use_seed\n\n def __call__(self, kspace, mask, target, attrs, fname):\n \"\"\"Args: kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil data or (rows, cols, 2) for single coil data. mask (numpy.array): Mask from the test dataset target (numpy.array): Target image attrs (dict): Acquisition related information stored in the HDF5 object. fname (str): File name slice (int): Serial number of the slice. Returns: (tuple): tuple containing: masked_kspace (torch.Tensor): Masked k-space mask (torch.Tensor): Mask target (torch.Tensor): Target image converted to a torch Tensor. fname (str): File name slice (int): Serial number of the slice. max_value (numpy.array): Maximum value in the image volume\"\"\"\n if target is not None:\n target = T.to_tensor(target)\n max_value = attrs['max']\n else:\n target = torch.tensor(0)\n max_value = 0.0\n kspace = T.to_tensor(kspace)\n seed = None if not self.use_seed else tuple(map(ord, fname))\n acq_start = attrs['padding_left']\n acq_end = attrs['padding_right']\n if self.mask_func:\n masked_kspace, mask = T.apply_mask(kspace, self.mask_func, seed, (acq_start, acq_end))\n else:\n masked_kspace = kspace\n shape = np.array(kspace.shape)\n num_cols = shape[-2]\n shape[:-3] = 1\n mask_shape = [1 for _ in shape]\n mask_shape[-2] = num_cols\n mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))\n mask[:, :, :acq_start] = 0\n mask[:, :, acq_end:] = 0\n return (masked_kspace, mask.byte(), target, fname, max_value)\n", "source": "the_stack_v2_python_sparse", "source_path": "fastmri_fixed_sensitivity_variationaldc/valid.py", "source_repo": "Bala93/Holistic-MRI-Reconstruction", "split": "val", "star_events_count": 1}
{"blob_id": "dfe91c59d774f4d7d93386b091ea20b446abf875", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n", "class_docstring": "", "class_name": "DeviceTagConfigServiceServicer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeviceTagConfigServiceServicer:\n\n def GetOne(self, request, context):\n \"\"\"GetOne returns a unary model as specified by the key in the request. The key must be provided and all fields populated (unless otherwise specified).\"\"\"\n <|body_0|>\n\n def GetAll(self, request, context):\n \"\"\"GetAll returns all entities for this model, with optional filtering.\"\"\"\n <|body_1|>\n\n def Subscribe(self, request, context):\n \"\"\"Subscribe first returns all initial state known to the system, then will send deltas as entities are changed.\"\"\"\n <|body_2|>\n\n def Set(self, request, context):\n \"\"\"Set allows setting values for the entity specified by the key in the request. The key must be provided and all fields set (unless otherwise specified).\"\"\"\n <|body_3|>\n\n def Delete(self, request, context):\n \"\"\"Delete will remove the entity specified by the key within the request. The key must be provided and all fields populated (unless otherwise specified).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000284", "length_bytes": 30872, "license_type": "permissive", "methods": [{"docstring": "GetOne returns a unary model as specified by the key in the request. The key must be provided and all fields populated (unless otherwise specified).", "name": "GetOne", "signature": "def GetOne(self, request, context)"}, {"docstring": "GetAll returns all entities for this model, with optional filtering.", "name": "GetAll", "signature": "def GetAll(self, request, context)"}, {"docstring": "Subscribe first returns all initial state known to the system, then will send deltas as entities are changed.", "name": "Subscribe", "signature": "def Subscribe(self, request, context)"}, {"docstring": "Set allows setting values for the entity specified by the key in the request. The key must be provided and all fields set (unless otherwise specified).", "name": "Set", "signature": "def Set(self, request, context)"}, {"docstring": "Delete will remove the entity specified by the key within the request. The key must be provided and all fields populated (unless otherwise specified).", "name": "Delete", "signature": "def Delete(self, request, context)"}], "n_methods": 5, "prompt": "Implement the Python class `DeviceTagConfigServiceServicer` described below.\n\nClass description:\nImplement the DeviceTagConfigServiceServicer class.\n\nMethod signatures and docstrings:\n- def GetOne(self, request, context): GetOne returns a unary model as specified by the key in the request. The key must be provided and all fields populated (unless otherwise specified).\n- def GetAll(self, request, context): GetAll returns all entities for this model, with optional filtering.\n- def Subscribe(self, request, context): Subscribe first returns all initial state known to the system, then will send deltas as entities are changed.\n- def Set(self, request, context): Set allows setting values for the entity specified by the key in the request. The key must be provided and all fields set (unless otherwise specified).\n- def Delete(self, request, context): Delete will remove the entity specified by the key within the request. The key must be provided and all fields populated (unless otherwise specified).", "prompted_full_text": "Implement the Python class `DeviceTagConfigServiceServicer` described below.\n\nClass description:\nImplement the DeviceTagConfigServiceServicer class.\n\nMethod signatures and docstrings:\n- def GetOne(self, request, context): GetOne returns a unary model as specified by the key in the request. The key must be provided and all fields populated (unless otherwise specified).\n- def GetAll(self, request, context): GetAll returns all entities for this model, with optional filtering.\n- def Subscribe(self, request, context): Subscribe first returns all initial state known to the system, then will send deltas as entities are changed.\n- def Set(self, request, context): Set allows setting values for the entity specified by the key in the request. The key must be provided and all fields set (unless otherwise specified).\n- def Delete(self, request, context): Delete will remove the entity specified by the key within the request. The key must be provided and all fields populated (unless otherwise specified).\n\n<|skeleton|>\nclass DeviceTagConfigServiceServicer:\n\n def GetOne(self, request, context):\n \"\"\"GetOne returns a unary model as specified by the key in the request. The key must be provided and all fields populated (unless otherwise specified).\"\"\"\n <|body_0|>\n\n def GetAll(self, request, context):\n \"\"\"GetAll returns all entities for this model, with optional filtering.\"\"\"\n <|body_1|>\n\n def Subscribe(self, request, context):\n \"\"\"Subscribe first returns all initial state known to the system, then will send deltas as entities are changed.\"\"\"\n <|body_2|>\n\n def Set(self, request, context):\n \"\"\"Set allows setting values for the entity specified by the key in the request. The key must be provided and all fields set (unless otherwise specified).\"\"\"\n <|body_3|>\n\n def Delete(self, request, context):\n \"\"\"Delete will remove the entity specified by the key within the request. The key must be provided and all fields populated (unless otherwise specified).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n", "revision_id": "d93b5f66a00b1e3710257d607d62f9d43736304e", "skeleton": "<|skeleton|>\nclass DeviceTagConfigServiceServicer:\n\n def GetOne(self, request, context):\n \"\"\"GetOne returns a unary model as specified by the key in the request. The key must be provided and all fields populated (unless otherwise specified).\"\"\"\n <|body_0|>\n\n def GetAll(self, request, context):\n \"\"\"GetAll returns all entities for this model, with optional filtering.\"\"\"\n <|body_1|>\n\n def Subscribe(self, request, context):\n \"\"\"Subscribe first returns all initial state known to the system, then will send deltas as entities are changed.\"\"\"\n <|body_2|>\n\n def Set(self, request, context):\n \"\"\"Set allows setting values for the entity specified by the key in the request. The key must be provided and all fields set (unless otherwise specified).\"\"\"\n <|body_3|>\n\n def Delete(self, request, context):\n \"\"\"Delete will remove the entity specified by the key within the request. The key must be provided and all fields populated (unless otherwise specified).\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DeviceTagConfigServiceServicer:\n def GetOne(self, request, context):\n \"\"\"GetOne returns a unary model as specified by the key in the request. The key must be provided and all fields populated (unless otherwise specified).\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetAll(self, request, context):\n \"\"\"GetAll returns all entities for this model, with optional filtering.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Subscribe(self, request, context):\n \"\"\"Subscribe first returns all initial state known to the system, then will send deltas as entities are changed.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Set(self, request, context):\n \"\"\"Set allows setting values for the entity specified by the key in the request. The key must be provided and all fields set (unless otherwise specified).\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Delete(self, request, context):\n \"\"\"Delete will remove the entity specified by the key within the request. The key must be provided and all fields populated (unless otherwise specified).\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "CVP_API/Snapshot_Utils/getSnapshots_Resource_API/cloudvision-python/arista/tag/v1/services/gen_pb2_grpc.py", "source_repo": "Hugh-Adams/Example_Scripts", "split": "val", "star_events_count": 4}
{"blob_id": "2b985e52be982d5294acdb410ef0087d463c9e4e", "bodies": ["sstream = RosettaFunctionConstructs._ATOMPAIR\nsstream += 'BOUNDED {lower_bound: >.3f} {upper_bound: >.3f} 1 0.5 #'\nreturn sstream", "sstream = RosettaFunctionConstructs._ATOMPAIR\nsstream += RosettaFunctionConstructs._SCALARWEIGHTED\nsstream += 'BOUNDED 0 {lower_bound: >.3f} 1 0.5'\nreturn sstream", "sstream = RosettaFunctionConstructs._ATOMPAIR\nsstream += 'FADE -10 19 10 {energy_bonus: >5.2f} 0'\nreturn sstream", "sstream = RosettaFunctionConstructs._ATOMPAIR\nsstream += 'FADE -10 19 10 -15.00 0'\nreturn sstream", "sstream = RosettaFunctionConstructs._ATOMPAIR\nsstream += 'SIGMOID 8.00 1.00 #ContactMap: {raw_score: >4.3f}'\nreturn sstream", "sstream = RosettaFunctionConstructs._ATOMPAIR\nsstream += RosettaFunctionConstructs._SCALARWEIGHTED\nsstream += 'SUMFUNC 2 SIGMOID {sigmoid_cutoff: >6.3f} {sigmoid_slope: >6.3f} CONSTANTFUNC -0.5'\nreturn sstream"], "bodies_text": "<|body_start_0|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'BOUNDED {lower_bound: >.3f} {upper_bound: >.3f} 1 0.5 #'\n return sstream\n<|end_body_0|>\n\n<|body_start_1|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += RosettaFunctionConstructs._SCALARWEIGHTED\n sstream += 'BOUNDED 0 {lower_bound: >.3f} 1 0.5'\n return sstream\n<|end_body_1|>\n\n<|body_start_2|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'FADE -10 19 10 {energy_bonus: >5.2f} 0'\n return sstream\n<|end_body_2|>\n\n<|body_start_3|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'FADE -10 19 10 -15.00 0'\n return sstream\n<|end_body_3|>\n\n<|body_start_4|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'SIGMOID 8.00 1.00 #ContactMap: {raw_score: >4.3f}'\n return sstream\n<|end_body_4|>\n\n<|body_start_5|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += RosettaFunctionConstructs._SCALARWEIGHTED\n sstream += 'SUMFUNC 2 SIGMOID {sigmoid_cutoff: >6.3f} {sigmoid_slope: >6.3f} CONSTANTFUNC -0.5'\n return sstream\n<|end_body_5|>\n", "class_docstring": "Storage for string formats of different Rosetta energy function constructs For more information on the different energy functions, please refer to the corresponding references or the official `RosettaCommons documentation `_", "class_name": "RosettaFunctionConstructs", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RosettaFunctionConstructs:\n \"\"\"Storage for string formats of different Rosetta energy function constructs For more information on the different energy functions, please refer to the corresponding references or the official `RosettaCommons documentation `_\"\"\"\n\n def BOUNDED_default(self):\n \"\"\"Simple bounded energy function\"\"\"\n <|body_0|>\n\n def BOUNDED_gremlin(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 3(4), e09248.\"\"\"\n <|body_1|>\n\n def FADE(self):\n \"\"\"Energy function according to [#]_ and [#]_ References ---------- .. [#] Simkovic et al. (2016). Residue contacts predicted by evolutionary covariance extend the application of ab initio molecular replacement to larger and more challenging protein folds. IUCrJ 3(Pt 4), 259-270. .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\"\"\"\n <|body_2|>\n\n def FADE_default(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\"\"\"\n <|body_3|>\n\n def SIGMOID_default(self):\n \"\"\"Simple sigmoidal energy function\"\"\"\n <|body_4|>\n\n def SIGMOID_gremlin(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 4, e09248.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'BOUNDED {lower_bound: >.3f} {upper_bound: >.3f} 1 0.5 #'\n return sstream\n<|end_body_0|>\n\n<|body_start_1|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += RosettaFunctionConstructs._SCALARWEIGHTED\n sstream += 'BOUNDED 0 {lower_bound: >.3f} 1 0.5'\n return sstream\n<|end_body_1|>\n\n<|body_start_2|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'FADE -10 19 10 {energy_bonus: >5.2f} 0'\n return sstream\n<|end_body_2|>\n\n<|body_start_3|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'FADE -10 19 10 -15.00 0'\n return sstream\n<|end_body_3|>\n\n<|body_start_4|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'SIGMOID 8.00 1.00 #ContactMap: {raw_score: >4.3f}'\n return sstream\n<|end_body_4|>\n\n<|body_start_5|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += RosettaFunctionConstructs._SCALARWEIGHTED\n sstream += 'SUMFUNC 2 SIGMOID {sigmoid_cutoff: >6.3f} {sigmoid_slope: >6.3f} CONSTANTFUNC -0.5'\n return sstream\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000285", "length_bytes": 4869, "license_type": "permissive", "methods": [{"docstring": "Simple bounded energy function", "name": "BOUNDED_default", "signature": "def BOUNDED_default(self)"}, {"docstring": "Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 3(4), e09248.", "name": "BOUNDED_gremlin", "signature": "def BOUNDED_gremlin(self)"}, {"docstring": "Energy function according to [#]_ and [#]_ References ---------- .. [#] Simkovic et al. (2016). Residue contacts predicted by evolutionary covariance extend the application of ab initio molecular replacement to larger and more challenging protein folds. IUCrJ 3(Pt 4), 259-270. .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488", "name": "FADE", "signature": "def FADE(self)"}, {"docstring": "Energy function according to [#]_ References ---------- .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488", "name": "FADE_default", "signature": "def FADE_default(self)"}, {"docstring": "Simple sigmoidal energy function", "name": "SIGMOID_default", "signature": "def SIGMOID_default(self)"}, {"docstring": "Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 4, e09248.", "name": "SIGMOID_gremlin", "signature": "def SIGMOID_gremlin(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_015670", "prompt": "Implement the Python class `RosettaFunctionConstructs` described below.\n\nClass description:\nStorage for string formats of different Rosetta energy function constructs For more information on the different energy functions, please refer to the corresponding references or the official `RosettaCommons documentation `_\n\nMethod signatures and docstrings:\n- def BOUNDED_default(self): Simple bounded energy function\n- def BOUNDED_gremlin(self): Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 3(4), e09248.\n- def FADE(self): Energy function according to [#]_ and [#]_ References ---------- .. [#] Simkovic et al. (2016). Residue contacts predicted by evolutionary covariance extend the application of ab initio molecular replacement to larger and more challenging protein folds. IUCrJ 3(Pt 4), 259-270. .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\n- def FADE_default(self): Energy function according to [#]_ References ---------- .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\n- def SIGMOID_default(self): Simple sigmoidal energy function\n- def SIGMOID_gremlin(self): Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 4, e09248.", "prompted_full_text": "Implement the Python class `RosettaFunctionConstructs` described below.\n\nClass description:\nStorage for string formats of different Rosetta energy function constructs For more information on the different energy functions, please refer to the corresponding references or the official `RosettaCommons documentation `_\n\nMethod signatures and docstrings:\n- def BOUNDED_default(self): Simple bounded energy function\n- def BOUNDED_gremlin(self): Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 3(4), e09248.\n- def FADE(self): Energy function according to [#]_ and [#]_ References ---------- .. [#] Simkovic et al. (2016). Residue contacts predicted by evolutionary covariance extend the application of ab initio molecular replacement to larger and more challenging protein folds. IUCrJ 3(Pt 4), 259-270. .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\n- def FADE_default(self): Energy function according to [#]_ References ---------- .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\n- def SIGMOID_default(self): Simple sigmoidal energy function\n- def SIGMOID_gremlin(self): Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 4, e09248.\n\n<|skeleton|>\nclass RosettaFunctionConstructs:\n \"\"\"Storage for string formats of different Rosetta energy function constructs For more information on the different energy functions, please refer to the corresponding references or the official `RosettaCommons documentation `_\"\"\"\n\n def BOUNDED_default(self):\n \"\"\"Simple bounded energy function\"\"\"\n <|body_0|>\n\n def BOUNDED_gremlin(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 3(4), e09248.\"\"\"\n <|body_1|>\n\n def FADE(self):\n \"\"\"Energy function according to [#]_ and [#]_ References ---------- .. [#] Simkovic et al. (2016). Residue contacts predicted by evolutionary covariance extend the application of ab initio molecular replacement to larger and more challenging protein folds. IUCrJ 3(Pt 4), 259-270. .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\"\"\"\n <|body_2|>\n\n def FADE_default(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\"\"\"\n <|body_3|>\n\n def SIGMOID_default(self):\n \"\"\"Simple sigmoidal energy function\"\"\"\n <|body_4|>\n\n def SIGMOID_gremlin(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 4, e09248.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'BOUNDED {lower_bound: >.3f} {upper_bound: >.3f} 1 0.5 #'\n return sstream\n<|end_body_0|>\n\n<|body_start_1|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += RosettaFunctionConstructs._SCALARWEIGHTED\n sstream += 'BOUNDED 0 {lower_bound: >.3f} 1 0.5'\n return sstream\n<|end_body_1|>\n\n<|body_start_2|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'FADE -10 19 10 {energy_bonus: >5.2f} 0'\n return sstream\n<|end_body_2|>\n\n<|body_start_3|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'FADE -10 19 10 -15.00 0'\n return sstream\n<|end_body_3|>\n\n<|body_start_4|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'SIGMOID 8.00 1.00 #ContactMap: {raw_score: >4.3f}'\n return sstream\n<|end_body_4|>\n\n<|body_start_5|>\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += RosettaFunctionConstructs._SCALARWEIGHTED\n sstream += 'SUMFUNC 2 SIGMOID {sigmoid_cutoff: >6.3f} {sigmoid_slope: >6.3f} CONSTANTFUNC -0.5'\n return sstream\n<|end_body_5|>\n", "revision_id": "926f194a660d95350e9172d236c9c002e8a921a3", "skeleton": "<|skeleton|>\nclass RosettaFunctionConstructs:\n \"\"\"Storage for string formats of different Rosetta energy function constructs For more information on the different energy functions, please refer to the corresponding references or the official `RosettaCommons documentation `_\"\"\"\n\n def BOUNDED_default(self):\n \"\"\"Simple bounded energy function\"\"\"\n <|body_0|>\n\n def BOUNDED_gremlin(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 3(4), e09248.\"\"\"\n <|body_1|>\n\n def FADE(self):\n \"\"\"Energy function according to [#]_ and [#]_ References ---------- .. [#] Simkovic et al. (2016). Residue contacts predicted by evolutionary covariance extend the application of ab initio molecular replacement to larger and more challenging protein folds. IUCrJ 3(Pt 4), 259-270. .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\"\"\"\n <|body_2|>\n\n def FADE_default(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\"\"\"\n <|body_3|>\n\n def SIGMOID_default(self):\n \"\"\"Simple sigmoidal energy function\"\"\"\n <|body_4|>\n\n def SIGMOID_gremlin(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 4, e09248.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RosettaFunctionConstructs:\n \"\"\"Storage for string formats of different Rosetta energy function constructs For more information on the different energy functions, please refer to the corresponding references or the official `RosettaCommons documentation `_\"\"\"\n\n def BOUNDED_default(self):\n \"\"\"Simple bounded energy function\"\"\"\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'BOUNDED {lower_bound: >.3f} {upper_bound: >.3f} 1 0.5 #'\n return sstream\n\n def BOUNDED_gremlin(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 3(4), e09248.\"\"\"\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += RosettaFunctionConstructs._SCALARWEIGHTED\n sstream += 'BOUNDED 0 {lower_bound: >.3f} 1 0.5'\n return sstream\n\n def FADE(self):\n \"\"\"Energy function according to [#]_ and [#]_ References ---------- .. [#] Simkovic et al. (2016). Residue contacts predicted by evolutionary covariance extend the application of ab initio molecular replacement to larger and more challenging protein folds. IUCrJ 3(Pt 4), 259-270. .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\"\"\"\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'FADE -10 19 10 {energy_bonus: >5.2f} 0'\n return sstream\n\n def FADE_default(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Michel et al. (2014). PconsFold: improved contact predictions improve protein models. Bioinformatics 30(17), i482-i488\"\"\"\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'FADE -10 19 10 -15.00 0'\n return sstream\n\n def SIGMOID_default(self):\n \"\"\"Simple sigmoidal energy function\"\"\"\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += 'SIGMOID 8.00 1.00 #ContactMap: {raw_score: >4.3f}'\n return sstream\n\n def SIGMOID_gremlin(self):\n \"\"\"Energy function according to [#]_ References ---------- .. [#] Ovchinnekov et al. (2015). Large-scale determination of previously unsolved protein structures using evolutionary information. Elife 4, e09248.\"\"\"\n sstream = RosettaFunctionConstructs._ATOMPAIR\n sstream += RosettaFunctionConstructs._SCALARWEIGHTED\n sstream += 'SUMFUNC 2 SIGMOID {sigmoid_cutoff: >6.3f} {sigmoid_slope: >6.3f} CONSTANTFUNC -0.5'\n return sstream\n", "source": "the_stack_v2_python_sparse", "source_path": "conkit/misc/energyfunction.py", "source_repo": "rigdenlab/conkit", "split": "val", "star_events_count": 19}
{"blob_id": "135eac01c2cd0327162077e57d59532a23d20bde", "bodies": ["super().__init__(model_serialization_path, model_name)\nself.latent_dimension = latent_dimension\nself.generate_model(input_shape, input_shape)", "hidden_neurons_1 = input_shape // 4\nhidden_neurons_2 = 3000\nhidden_neurons_4 = 5000\nhidden_neurons_6 = 500\nhidden_neurons_7 = 300\nencoder_input_layer = Input(shape=input_shape)\nencoder_layer_1 = Dense(hidden_neurons_1, activation='relu')(encoder_input_layer)\nreshaped_layer = Reshape((hidden_neurons_1, 1))(encoder_layer_1)\nencoder_layer_2 = Conv1D(256, 4, activation='relu')(reshaped_layer)\nencoder_layer_11 = Conv1D(1, 4, activation='relu')(encoder_layer_2)\nflattened = Flatten()(encoder_layer_11)\nencoder_output = Dense(self.latent_dimension, activation='sigmoid')(flattened)\nself.generate_encoder(encoder_input_layer, encoder_output)\ndecoder_input_layer = Input(shape=self.latent_dimension)\ndecoder_layer_1 = Dense(hidden_neurons_7, activation='relu')(decoder_input_layer)\ndecoder_layer_2 = Dense(hidden_neurons_6, activation='relu')(decoder_layer_1)\ndecoder_layer_4 = Dense(hidden_neurons_4, activation='relu')(decoder_layer_2)\ndecoder_layer_6 = Dense(hidden_neurons_2, activation='relu')(decoder_layer_4)\ndecoder_output = Dense(output_shape, activation='sigmoid')(decoder_layer_6)\nself.generate_decoder(decoder_input_layer, decoder_output)\nmodel_input = Input(shape=input_shape)\ncode = self.encoder_layer(model_input)\nmodel_output = self.decoder_layer(code)\nself.compile_model(model_input, model_output, Adam(lr=0.001), CosineSimilarity())"], "bodies_text": "<|body_start_0|>\n super().__init__(model_serialization_path, model_name)\n self.latent_dimension = latent_dimension\n self.generate_model(input_shape, input_shape)\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_neurons_1 = input_shape // 4\n hidden_neurons_2 = 3000\n hidden_neurons_4 = 5000\n hidden_neurons_6 = 500\n hidden_neurons_7 = 300\n encoder_input_layer = Input(shape=input_shape)\n encoder_layer_1 = Dense(hidden_neurons_1, activation='relu')(encoder_input_layer)\n reshaped_layer = Reshape((hidden_neurons_1, 1))(encoder_layer_1)\n encoder_layer_2 = Conv1D(256, 4, activation='relu')(reshaped_layer)\n encoder_layer_11 = Conv1D(1, 4, activation='relu')(encoder_layer_2)\n flattened = Flatten()(encoder_layer_11)\n encoder_output = Dense(self.latent_dimension, activation='sigmoid')(flattened)\n self.generate_encoder(encoder_input_layer, encoder_output)\n decoder_input_layer = Input(shape=self.latent_dimension)\n decoder_layer_1 = Dense(hidden_neurons_7, activation='relu')(decoder_input_layer)\n decoder_layer_2 = Dense(hidden_neurons_6, activation='relu')(decoder_layer_1)\n decoder_layer_4 = Dense(hidden_neurons_4, activation='relu')(decoder_layer_2)\n decoder_layer_6 = Dense(hidden_neurons_2, activation='relu')(decoder_layer_4)\n decoder_output = Dense(output_shape, activation='sigmoid')(decoder_layer_6)\n self.generate_decoder(decoder_input_layer, decoder_output)\n model_input = Input(shape=input_shape)\n code = self.encoder_layer(model_input)\n model_output = self.decoder_layer(code)\n self.compile_model(model_input, model_output, Adam(lr=0.001), CosineSimilarity())\n<|end_body_1|>\n", "class_docstring": "An autoencoder for mRNA data", "class_name": "ShallowConvolutionalAE", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ShallowConvolutionalAE:\n \"\"\"An autoencoder for mRNA data\"\"\"\n\n def __init__(self, input_shape, latent_dimension=100, model_serialization_path='models/autoencoder/', model_name='autoencoder'):\n \"\"\"Class constructor :param input_shape: the size of the input :param latent_dimension: the size of the output :param model_serialization_path: the path where to save the model :param model_name: the model name\"\"\"\n <|body_0|>\n\n def generate_model(self, input_shape, output_shape):\n \"\"\"Instantiates encoder, decoder and model as NN with 3 hidden layers :param input_shape: the input shape :param output_shape: the output shape :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(model_serialization_path, model_name)\n self.latent_dimension = latent_dimension\n self.generate_model(input_shape, input_shape)\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_neurons_1 = input_shape // 4\n hidden_neurons_2 = 3000\n hidden_neurons_4 = 5000\n hidden_neurons_6 = 500\n hidden_neurons_7 = 300\n encoder_input_layer = Input(shape=input_shape)\n encoder_layer_1 = Dense(hidden_neurons_1, activation='relu')(encoder_input_layer)\n reshaped_layer = Reshape((hidden_neurons_1, 1))(encoder_layer_1)\n encoder_layer_2 = Conv1D(256, 4, activation='relu')(reshaped_layer)\n encoder_layer_11 = Conv1D(1, 4, activation='relu')(encoder_layer_2)\n flattened = Flatten()(encoder_layer_11)\n encoder_output = Dense(self.latent_dimension, activation='sigmoid')(flattened)\n self.generate_encoder(encoder_input_layer, encoder_output)\n decoder_input_layer = Input(shape=self.latent_dimension)\n decoder_layer_1 = Dense(hidden_neurons_7, activation='relu')(decoder_input_layer)\n decoder_layer_2 = Dense(hidden_neurons_6, activation='relu')(decoder_layer_1)\n decoder_layer_4 = Dense(hidden_neurons_4, activation='relu')(decoder_layer_2)\n decoder_layer_6 = Dense(hidden_neurons_2, activation='relu')(decoder_layer_4)\n decoder_output = Dense(output_shape, activation='sigmoid')(decoder_layer_6)\n self.generate_decoder(decoder_input_layer, decoder_output)\n model_input = Input(shape=input_shape)\n code = self.encoder_layer(model_input)\n model_output = self.decoder_layer(code)\n self.compile_model(model_input, model_output, Adam(lr=0.001), CosineSimilarity())\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000286", "length_bytes": 15750, "license_type": "no_license", "methods": [{"docstring": "Class constructor :param input_shape: the size of the input :param latent_dimension: the size of the output :param model_serialization_path: the path where to save the model :param model_name: the model name", "name": "__init__", "signature": "def __init__(self, input_shape, latent_dimension=100, model_serialization_path='models/autoencoder/', model_name='autoencoder')"}, {"docstring": "Instantiates encoder, decoder and model as NN with 3 hidden layers :param input_shape: the input shape :param output_shape: the output shape :return: None", "name": "generate_model", "signature": "def generate_model(self, input_shape, output_shape)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041359", "prompt": "Implement the Python class `ShallowConvolutionalAE` described below.\n\nClass description:\nAn autoencoder for mRNA data\n\nMethod signatures and docstrings:\n- def __init__(self, input_shape, latent_dimension=100, model_serialization_path='models/autoencoder/', model_name='autoencoder'): Class constructor :param input_shape: the size of the input :param latent_dimension: the size of the output :param model_serialization_path: the path where to save the model :param model_name: the model name\n- def generate_model(self, input_shape, output_shape): Instantiates encoder, decoder and model as NN with 3 hidden layers :param input_shape: the input shape :param output_shape: the output shape :return: None", "prompted_full_text": "Implement the Python class `ShallowConvolutionalAE` described below.\n\nClass description:\nAn autoencoder for mRNA data\n\nMethod signatures and docstrings:\n- def __init__(self, input_shape, latent_dimension=100, model_serialization_path='models/autoencoder/', model_name='autoencoder'): Class constructor :param input_shape: the size of the input :param latent_dimension: the size of the output :param model_serialization_path: the path where to save the model :param model_name: the model name\n- def generate_model(self, input_shape, output_shape): Instantiates encoder, decoder and model as NN with 3 hidden layers :param input_shape: the input shape :param output_shape: the output shape :return: None\n\n<|skeleton|>\nclass ShallowConvolutionalAE:\n \"\"\"An autoencoder for mRNA data\"\"\"\n\n def __init__(self, input_shape, latent_dimension=100, model_serialization_path='models/autoencoder/', model_name='autoencoder'):\n \"\"\"Class constructor :param input_shape: the size of the input :param latent_dimension: the size of the output :param model_serialization_path: the path where to save the model :param model_name: the model name\"\"\"\n <|body_0|>\n\n def generate_model(self, input_shape, output_shape):\n \"\"\"Instantiates encoder, decoder and model as NN with 3 hidden layers :param input_shape: the input shape :param output_shape: the output shape :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(model_serialization_path, model_name)\n self.latent_dimension = latent_dimension\n self.generate_model(input_shape, input_shape)\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_neurons_1 = input_shape // 4\n hidden_neurons_2 = 3000\n hidden_neurons_4 = 5000\n hidden_neurons_6 = 500\n hidden_neurons_7 = 300\n encoder_input_layer = Input(shape=input_shape)\n encoder_layer_1 = Dense(hidden_neurons_1, activation='relu')(encoder_input_layer)\n reshaped_layer = Reshape((hidden_neurons_1, 1))(encoder_layer_1)\n encoder_layer_2 = Conv1D(256, 4, activation='relu')(reshaped_layer)\n encoder_layer_11 = Conv1D(1, 4, activation='relu')(encoder_layer_2)\n flattened = Flatten()(encoder_layer_11)\n encoder_output = Dense(self.latent_dimension, activation='sigmoid')(flattened)\n self.generate_encoder(encoder_input_layer, encoder_output)\n decoder_input_layer = Input(shape=self.latent_dimension)\n decoder_layer_1 = Dense(hidden_neurons_7, activation='relu')(decoder_input_layer)\n decoder_layer_2 = Dense(hidden_neurons_6, activation='relu')(decoder_layer_1)\n decoder_layer_4 = Dense(hidden_neurons_4, activation='relu')(decoder_layer_2)\n decoder_layer_6 = Dense(hidden_neurons_2, activation='relu')(decoder_layer_4)\n decoder_output = Dense(output_shape, activation='sigmoid')(decoder_layer_6)\n self.generate_decoder(decoder_input_layer, decoder_output)\n model_input = Input(shape=input_shape)\n code = self.encoder_layer(model_input)\n model_output = self.decoder_layer(code)\n self.compile_model(model_input, model_output, Adam(lr=0.001), CosineSimilarity())\n<|end_body_1|>\n", "revision_id": "9d26bbd1c01e16c3b4f6ee0d425d62945d4e31aa", "skeleton": "<|skeleton|>\nclass ShallowConvolutionalAE:\n \"\"\"An autoencoder for mRNA data\"\"\"\n\n def __init__(self, input_shape, latent_dimension=100, model_serialization_path='models/autoencoder/', model_name='autoencoder'):\n \"\"\"Class constructor :param input_shape: the size of the input :param latent_dimension: the size of the output :param model_serialization_path: the path where to save the model :param model_name: the model name\"\"\"\n <|body_0|>\n\n def generate_model(self, input_shape, output_shape):\n \"\"\"Instantiates encoder, decoder and model as NN with 3 hidden layers :param input_shape: the input shape :param output_shape: the output shape :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ShallowConvolutionalAE:\n \"\"\"An autoencoder for mRNA data\"\"\"\n\n def __init__(self, input_shape, latent_dimension=100, model_serialization_path='models/autoencoder/', model_name='autoencoder'):\n \"\"\"Class constructor :param input_shape: the size of the input :param latent_dimension: the size of the output :param model_serialization_path: the path where to save the model :param model_name: the model name\"\"\"\n super().__init__(model_serialization_path, model_name)\n self.latent_dimension = latent_dimension\n self.generate_model(input_shape, input_shape)\n\n def generate_model(self, input_shape, output_shape):\n \"\"\"Instantiates encoder, decoder and model as NN with 3 hidden layers :param input_shape: the input shape :param output_shape: the output shape :return: None\"\"\"\n hidden_neurons_1 = input_shape // 4\n hidden_neurons_2 = 3000\n hidden_neurons_4 = 5000\n hidden_neurons_6 = 500\n hidden_neurons_7 = 300\n encoder_input_layer = Input(shape=input_shape)\n encoder_layer_1 = Dense(hidden_neurons_1, activation='relu')(encoder_input_layer)\n reshaped_layer = Reshape((hidden_neurons_1, 1))(encoder_layer_1)\n encoder_layer_2 = Conv1D(256, 4, activation='relu')(reshaped_layer)\n encoder_layer_11 = Conv1D(1, 4, activation='relu')(encoder_layer_2)\n flattened = Flatten()(encoder_layer_11)\n encoder_output = Dense(self.latent_dimension, activation='sigmoid')(flattened)\n self.generate_encoder(encoder_input_layer, encoder_output)\n decoder_input_layer = Input(shape=self.latent_dimension)\n decoder_layer_1 = Dense(hidden_neurons_7, activation='relu')(decoder_input_layer)\n decoder_layer_2 = Dense(hidden_neurons_6, activation='relu')(decoder_layer_1)\n decoder_layer_4 = Dense(hidden_neurons_4, activation='relu')(decoder_layer_2)\n decoder_layer_6 = Dense(hidden_neurons_2, activation='relu')(decoder_layer_4)\n decoder_output = Dense(output_shape, activation='sigmoid')(decoder_layer_6)\n self.generate_decoder(decoder_input_layer, decoder_output)\n model_input = Input(shape=input_shape)\n code = self.encoder_layer(model_input)\n model_output = self.decoder_layer(code)\n self.compile_model(model_input, model_output, Adam(lr=0.001), CosineSimilarity())\n", "source": "the_stack_v2_python_sparse", "source_path": "models/autoencoders.py", "source_repo": "TestaDiRapa/asimov", "split": "val", "star_events_count": 0}
{"blob_id": "8fec3623d6d98be9ef6276c3cf82ee30bd017dd4", "bodies": ["super().__init__(method_serializers={'GET': search_serializers, 'POST': record_serializers}, default_method_media_type={'GET': default_media_type, 'POST': default_media_type}, default_media_type=default_media_type, **kwargs)\nself.pid_type = pid_type\nself.minter = current_pidstore.minters[minter_name]\nself.pid_fetcher = current_pidstore.fetchers[pid_fetcher]\nself.read_permission_factory = read_permission_factory\nself.create_permission_factory = create_permission_factory or current_records_rest.create_permission_factory\nself.list_permission_factory = list_permission_factory or current_records_rest.list_permission_factory\nself.search_class = search_class\nself.max_result_window = max_result_window or 10000\nself.search_factory = partial(search_factory, self)\nself.item_links_factory = item_links_factory\nself.loaders = record_loaders or current_records_rest.loaders\nself.record_class = record_class or Record\nself.indexer_class = indexer_class", "urlkwargs = dict()\nsearch_obj = self.search_class()\nsearch = search_obj.with_preference_param().params(version=True)\nsearch = search[pagination['from_idx']:pagination['to_idx']]\nsearch = search.extra(track_total_hits=True)\nsearch, qs_kwargs = self.search_factory(search)\nurlkwargs.update(qs_kwargs)\nsearch_result = search.execute()\ntotal = search_result.hits.total['value']\nendpoint = '.{0}_list'.format(current_records_rest.default_endpoint_prefixes[self.pid_type])\nurlkwargs.update(size=pagination['size'], _external=True)\nlinks = {}\n\ndef _link(name):\n urlkwargs.update(pagination['links'][name])\n links[name] = url_for(endpoint, **urlkwargs)\n_link('self')\nif pagination['from_idx'] >= 1:\n _link('prev')\nif pagination['to_idx'] < min(total, self.max_result_window):\n _link('next')\nreturn self.make_response(pid_fetcher=self.pid_fetcher, search_result=search_result.to_dict(), links=links, item_links_factory=self.item_links_factory)", "if request.mimetype not in self.loaders:\n raise UnsupportedMediaRESTError(request.mimetype)\ndata = self.loaders[request.mimetype]()\nif data is None:\n raise InvalidDataRESTError()\npermission_factory = self.create_permission_factory\nif permission_factory:\n verify_record_permission(permission_factory, data)\nrecord_uuid = uuid.uuid4()\npid = self.minter(record_uuid, data=data)\nrecord = self.record_class.create(data, id_=record_uuid)\ndb.session.commit()\nif self.indexer_class:\n self.indexer_class().index(record)\nresponse = self.make_response(pid, record, 201, links_factory=self.item_links_factory)\nendpoint = '.{0}_item'.format(current_records_rest.default_endpoint_prefixes[pid.pid_type])\nlocation = url_for(endpoint, pid_value=pid.pid_value, _external=True)\nresponse.headers.extend(dict(location=location))\nreturn response"], "bodies_text": "<|body_start_0|>\n super().__init__(method_serializers={'GET': search_serializers, 'POST': record_serializers}, default_method_media_type={'GET': default_media_type, 'POST': default_media_type}, default_media_type=default_media_type, **kwargs)\n self.pid_type = pid_type\n self.minter = current_pidstore.minters[minter_name]\n self.pid_fetcher = current_pidstore.fetchers[pid_fetcher]\n self.read_permission_factory = read_permission_factory\n self.create_permission_factory = create_permission_factory or current_records_rest.create_permission_factory\n self.list_permission_factory = list_permission_factory or current_records_rest.list_permission_factory\n self.search_class = search_class\n self.max_result_window = max_result_window or 10000\n self.search_factory = partial(search_factory, self)\n self.item_links_factory = item_links_factory\n self.loaders = record_loaders or current_records_rest.loaders\n self.record_class = record_class or Record\n self.indexer_class = indexer_class\n<|end_body_0|>\n\n<|body_start_1|>\n urlkwargs = dict()\n search_obj = self.search_class()\n search = search_obj.with_preference_param().params(version=True)\n search = search[pagination['from_idx']:pagination['to_idx']]\n search = search.extra(track_total_hits=True)\n search, qs_kwargs = self.search_factory(search)\n urlkwargs.update(qs_kwargs)\n search_result = search.execute()\n total = search_result.hits.total['value']\n endpoint = '.{0}_list'.format(current_records_rest.default_endpoint_prefixes[self.pid_type])\n urlkwargs.update(size=pagination['size'], _external=True)\n links = {}\n\n def _link(name):\n urlkwargs.update(pagination['links'][name])\n links[name] = url_for(endpoint, **urlkwargs)\n _link('self')\n if pagination['from_idx'] >= 1:\n _link('prev')\n if pagination['to_idx'] < min(total, self.max_result_window):\n _link('next')\n return self.make_response(pid_fetcher=self.pid_fetcher, search_result=search_result.to_dict(), links=links, item_links_factory=self.item_links_factory)\n<|end_body_1|>\n\n<|body_start_2|>\n if request.mimetype not in self.loaders:\n raise UnsupportedMediaRESTError(request.mimetype)\n data = self.loaders[request.mimetype]()\n if data is None:\n raise InvalidDataRESTError()\n permission_factory = self.create_permission_factory\n if permission_factory:\n verify_record_permission(permission_factory, data)\n record_uuid = uuid.uuid4()\n pid = self.minter(record_uuid, data=data)\n record = self.record_class.create(data, id_=record_uuid)\n db.session.commit()\n if self.indexer_class:\n self.indexer_class().index(record)\n response = self.make_response(pid, record, 201, links_factory=self.item_links_factory)\n endpoint = '.{0}_item'.format(current_records_rest.default_endpoint_prefixes[pid.pid_type])\n location = url_for(endpoint, pid_value=pid.pid_value, _external=True)\n response.headers.extend(dict(location=location))\n return response\n<|end_body_2|>\n", "class_docstring": "Resource for records listing.", "class_name": "RecordsListResource", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecordsListResource:\n \"\"\"Resource for records listing.\"\"\"\n\n def __init__(self, minter_name=None, pid_type=None, pid_fetcher=None, read_permission_factory=None, create_permission_factory=None, list_permission_factory=None, search_class=None, record_serializers=None, record_loaders=None, search_serializers=None, default_media_type=None, max_result_window=None, search_factory=None, item_links_factory=None, record_class=None, indexer_class=None, **kwargs):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def get(self, pagination=None, **kwargs):\n \"\"\"Search records. Permissions: the `list_permission_factory` permissions are checked. :returns: Search result containing hits and aggregations as returned by invenio-search.\"\"\"\n <|body_1|>\n\n def post(self, **kwargs):\n \"\"\"Create a record. Permissions: ``create_permission_factory`` Procedure description: #. First of all, the `create_permission_factory` permissions are checked. #. Then, the record is deserialized by the proper loader. #. A second call to the `create_permission_factory` factory is done: it differs from the previous call because this time the record is passed as parameter. #. A `uuid` is generated for the record and the minter is called. #. The record class is called to create the record. #. The HTTP response is built with the help of the item link factory. :returns: The created record.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(method_serializers={'GET': search_serializers, 'POST': record_serializers}, default_method_media_type={'GET': default_media_type, 'POST': default_media_type}, default_media_type=default_media_type, **kwargs)\n self.pid_type = pid_type\n self.minter = current_pidstore.minters[minter_name]\n self.pid_fetcher = current_pidstore.fetchers[pid_fetcher]\n self.read_permission_factory = read_permission_factory\n self.create_permission_factory = create_permission_factory or current_records_rest.create_permission_factory\n self.list_permission_factory = list_permission_factory or current_records_rest.list_permission_factory\n self.search_class = search_class\n self.max_result_window = max_result_window or 10000\n self.search_factory = partial(search_factory, self)\n self.item_links_factory = item_links_factory\n self.loaders = record_loaders or current_records_rest.loaders\n self.record_class = record_class or Record\n self.indexer_class = indexer_class\n<|end_body_0|>\n\n<|body_start_1|>\n urlkwargs = dict()\n search_obj = self.search_class()\n search = search_obj.with_preference_param().params(version=True)\n search = search[pagination['from_idx']:pagination['to_idx']]\n search = search.extra(track_total_hits=True)\n search, qs_kwargs = self.search_factory(search)\n urlkwargs.update(qs_kwargs)\n search_result = search.execute()\n total = search_result.hits.total['value']\n endpoint = '.{0}_list'.format(current_records_rest.default_endpoint_prefixes[self.pid_type])\n urlkwargs.update(size=pagination['size'], _external=True)\n links = {}\n\n def _link(name):\n urlkwargs.update(pagination['links'][name])\n links[name] = url_for(endpoint, **urlkwargs)\n _link('self')\n if pagination['from_idx'] >= 1:\n _link('prev')\n if pagination['to_idx'] < min(total, self.max_result_window):\n _link('next')\n return self.make_response(pid_fetcher=self.pid_fetcher, search_result=search_result.to_dict(), links=links, item_links_factory=self.item_links_factory)\n<|end_body_1|>\n\n<|body_start_2|>\n if request.mimetype not in self.loaders:\n raise UnsupportedMediaRESTError(request.mimetype)\n data = self.loaders[request.mimetype]()\n if data is None:\n raise InvalidDataRESTError()\n permission_factory = self.create_permission_factory\n if permission_factory:\n verify_record_permission(permission_factory, data)\n record_uuid = uuid.uuid4()\n pid = self.minter(record_uuid, data=data)\n record = self.record_class.create(data, id_=record_uuid)\n db.session.commit()\n if self.indexer_class:\n self.indexer_class().index(record)\n response = self.make_response(pid, record, 201, links_factory=self.item_links_factory)\n endpoint = '.{0}_item'.format(current_records_rest.default_endpoint_prefixes[pid.pid_type])\n location = url_for(endpoint, pid_value=pid.pid_value, _external=True)\n response.headers.extend(dict(location=location))\n return response\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000287", "length_bytes": 34926, "license_type": "permissive", "methods": [{"docstring": "Constructor.", "name": "__init__", "signature": "def __init__(self, minter_name=None, pid_type=None, pid_fetcher=None, read_permission_factory=None, create_permission_factory=None, list_permission_factory=None, search_class=None, record_serializers=None, record_loaders=None, search_serializers=None, default_media_type=None, max_result_window=None, search_factory=None, item_links_factory=None, record_class=None, indexer_class=None, **kwargs)"}, {"docstring": "Search records. Permissions: the `list_permission_factory` permissions are checked. :returns: Search result containing hits and aggregations as returned by invenio-search.", "name": "get", "signature": "def get(self, pagination=None, **kwargs)"}, {"docstring": "Create a record. Permissions: ``create_permission_factory`` Procedure description: #. First of all, the `create_permission_factory` permissions are checked. #. Then, the record is deserialized by the proper loader. #. A second call to the `create_permission_factory` factory is done: it differs from the previous call because this time the record is passed as parameter. #. A `uuid` is generated for the record and the minter is called. #. The record class is called to create the record. #. The HTTP response is built with the help of the item link factory. :returns: The created record.", "name": "post", "signature": "def post(self, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_025242", "prompt": "Implement the Python class `RecordsListResource` described below.\n\nClass description:\nResource for records listing.\n\nMethod signatures and docstrings:\n- def __init__(self, minter_name=None, pid_type=None, pid_fetcher=None, read_permission_factory=None, create_permission_factory=None, list_permission_factory=None, search_class=None, record_serializers=None, record_loaders=None, search_serializers=None, default_media_type=None, max_result_window=None, search_factory=None, item_links_factory=None, record_class=None, indexer_class=None, **kwargs): Constructor.\n- def get(self, pagination=None, **kwargs): Search records. Permissions: the `list_permission_factory` permissions are checked. :returns: Search result containing hits and aggregations as returned by invenio-search.\n- def post(self, **kwargs): Create a record. Permissions: ``create_permission_factory`` Procedure description: #. First of all, the `create_permission_factory` permissions are checked. #. Then, the record is deserialized by the proper loader. #. A second call to the `create_permission_factory` factory is done: it differs from the previous call because this time the record is passed as parameter. #. A `uuid` is generated for the record and the minter is called. #. The record class is called to create the record. #. The HTTP response is built with the help of the item link factory. :returns: The created record.", "prompted_full_text": "Implement the Python class `RecordsListResource` described below.\n\nClass description:\nResource for records listing.\n\nMethod signatures and docstrings:\n- def __init__(self, minter_name=None, pid_type=None, pid_fetcher=None, read_permission_factory=None, create_permission_factory=None, list_permission_factory=None, search_class=None, record_serializers=None, record_loaders=None, search_serializers=None, default_media_type=None, max_result_window=None, search_factory=None, item_links_factory=None, record_class=None, indexer_class=None, **kwargs): Constructor.\n- def get(self, pagination=None, **kwargs): Search records. Permissions: the `list_permission_factory` permissions are checked. :returns: Search result containing hits and aggregations as returned by invenio-search.\n- def post(self, **kwargs): Create a record. Permissions: ``create_permission_factory`` Procedure description: #. First of all, the `create_permission_factory` permissions are checked. #. Then, the record is deserialized by the proper loader. #. A second call to the `create_permission_factory` factory is done: it differs from the previous call because this time the record is passed as parameter. #. A `uuid` is generated for the record and the minter is called. #. The record class is called to create the record. #. The HTTP response is built with the help of the item link factory. :returns: The created record.\n\n<|skeleton|>\nclass RecordsListResource:\n \"\"\"Resource for records listing.\"\"\"\n\n def __init__(self, minter_name=None, pid_type=None, pid_fetcher=None, read_permission_factory=None, create_permission_factory=None, list_permission_factory=None, search_class=None, record_serializers=None, record_loaders=None, search_serializers=None, default_media_type=None, max_result_window=None, search_factory=None, item_links_factory=None, record_class=None, indexer_class=None, **kwargs):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def get(self, pagination=None, **kwargs):\n \"\"\"Search records. Permissions: the `list_permission_factory` permissions are checked. :returns: Search result containing hits and aggregations as returned by invenio-search.\"\"\"\n <|body_1|>\n\n def post(self, **kwargs):\n \"\"\"Create a record. Permissions: ``create_permission_factory`` Procedure description: #. First of all, the `create_permission_factory` permissions are checked. #. Then, the record is deserialized by the proper loader. #. A second call to the `create_permission_factory` factory is done: it differs from the previous call because this time the record is passed as parameter. #. A `uuid` is generated for the record and the minter is called. #. The record class is called to create the record. #. The HTTP response is built with the help of the item link factory. :returns: The created record.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(method_serializers={'GET': search_serializers, 'POST': record_serializers}, default_method_media_type={'GET': default_media_type, 'POST': default_media_type}, default_media_type=default_media_type, **kwargs)\n self.pid_type = pid_type\n self.minter = current_pidstore.minters[minter_name]\n self.pid_fetcher = current_pidstore.fetchers[pid_fetcher]\n self.read_permission_factory = read_permission_factory\n self.create_permission_factory = create_permission_factory or current_records_rest.create_permission_factory\n self.list_permission_factory = list_permission_factory or current_records_rest.list_permission_factory\n self.search_class = search_class\n self.max_result_window = max_result_window or 10000\n self.search_factory = partial(search_factory, self)\n self.item_links_factory = item_links_factory\n self.loaders = record_loaders or current_records_rest.loaders\n self.record_class = record_class or Record\n self.indexer_class = indexer_class\n<|end_body_0|>\n\n<|body_start_1|>\n urlkwargs = dict()\n search_obj = self.search_class()\n search = search_obj.with_preference_param().params(version=True)\n search = search[pagination['from_idx']:pagination['to_idx']]\n search = search.extra(track_total_hits=True)\n search, qs_kwargs = self.search_factory(search)\n urlkwargs.update(qs_kwargs)\n search_result = search.execute()\n total = search_result.hits.total['value']\n endpoint = '.{0}_list'.format(current_records_rest.default_endpoint_prefixes[self.pid_type])\n urlkwargs.update(size=pagination['size'], _external=True)\n links = {}\n\n def _link(name):\n urlkwargs.update(pagination['links'][name])\n links[name] = url_for(endpoint, **urlkwargs)\n _link('self')\n if pagination['from_idx'] >= 1:\n _link('prev')\n if pagination['to_idx'] < min(total, self.max_result_window):\n _link('next')\n return self.make_response(pid_fetcher=self.pid_fetcher, search_result=search_result.to_dict(), links=links, item_links_factory=self.item_links_factory)\n<|end_body_1|>\n\n<|body_start_2|>\n if request.mimetype not in self.loaders:\n raise UnsupportedMediaRESTError(request.mimetype)\n data = self.loaders[request.mimetype]()\n if data is None:\n raise InvalidDataRESTError()\n permission_factory = self.create_permission_factory\n if permission_factory:\n verify_record_permission(permission_factory, data)\n record_uuid = uuid.uuid4()\n pid = self.minter(record_uuid, data=data)\n record = self.record_class.create(data, id_=record_uuid)\n db.session.commit()\n if self.indexer_class:\n self.indexer_class().index(record)\n response = self.make_response(pid, record, 201, links_factory=self.item_links_factory)\n endpoint = '.{0}_item'.format(current_records_rest.default_endpoint_prefixes[pid.pid_type])\n location = url_for(endpoint, pid_value=pid.pid_value, _external=True)\n response.headers.extend(dict(location=location))\n return response\n<|end_body_2|>\n", "revision_id": "bcb241206c712aa37ef5179555a4027b2b0c1bda", "skeleton": "<|skeleton|>\nclass RecordsListResource:\n \"\"\"Resource for records listing.\"\"\"\n\n def __init__(self, minter_name=None, pid_type=None, pid_fetcher=None, read_permission_factory=None, create_permission_factory=None, list_permission_factory=None, search_class=None, record_serializers=None, record_loaders=None, search_serializers=None, default_media_type=None, max_result_window=None, search_factory=None, item_links_factory=None, record_class=None, indexer_class=None, **kwargs):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def get(self, pagination=None, **kwargs):\n \"\"\"Search records. Permissions: the `list_permission_factory` permissions are checked. :returns: Search result containing hits and aggregations as returned by invenio-search.\"\"\"\n <|body_1|>\n\n def post(self, **kwargs):\n \"\"\"Create a record. Permissions: ``create_permission_factory`` Procedure description: #. First of all, the `create_permission_factory` permissions are checked. #. Then, the record is deserialized by the proper loader. #. A second call to the `create_permission_factory` factory is done: it differs from the previous call because this time the record is passed as parameter. #. A `uuid` is generated for the record and the minter is called. #. The record class is called to create the record. #. The HTTP response is built with the help of the item link factory. :returns: The created record.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RecordsListResource:\n \"\"\"Resource for records listing.\"\"\"\n\n def __init__(self, minter_name=None, pid_type=None, pid_fetcher=None, read_permission_factory=None, create_permission_factory=None, list_permission_factory=None, search_class=None, record_serializers=None, record_loaders=None, search_serializers=None, default_media_type=None, max_result_window=None, search_factory=None, item_links_factory=None, record_class=None, indexer_class=None, **kwargs):\n \"\"\"Constructor.\"\"\"\n super().__init__(method_serializers={'GET': search_serializers, 'POST': record_serializers}, default_method_media_type={'GET': default_media_type, 'POST': default_media_type}, default_media_type=default_media_type, **kwargs)\n self.pid_type = pid_type\n self.minter = current_pidstore.minters[minter_name]\n self.pid_fetcher = current_pidstore.fetchers[pid_fetcher]\n self.read_permission_factory = read_permission_factory\n self.create_permission_factory = create_permission_factory or current_records_rest.create_permission_factory\n self.list_permission_factory = list_permission_factory or current_records_rest.list_permission_factory\n self.search_class = search_class\n self.max_result_window = max_result_window or 10000\n self.search_factory = partial(search_factory, self)\n self.item_links_factory = item_links_factory\n self.loaders = record_loaders or current_records_rest.loaders\n self.record_class = record_class or Record\n self.indexer_class = indexer_class\n\n def get(self, pagination=None, **kwargs):\n \"\"\"Search records. Permissions: the `list_permission_factory` permissions are checked. :returns: Search result containing hits and aggregations as returned by invenio-search.\"\"\"\n urlkwargs = dict()\n search_obj = self.search_class()\n search = search_obj.with_preference_param().params(version=True)\n search = search[pagination['from_idx']:pagination['to_idx']]\n search = search.extra(track_total_hits=True)\n search, qs_kwargs = self.search_factory(search)\n urlkwargs.update(qs_kwargs)\n search_result = search.execute()\n total = search_result.hits.total['value']\n endpoint = '.{0}_list'.format(current_records_rest.default_endpoint_prefixes[self.pid_type])\n urlkwargs.update(size=pagination['size'], _external=True)\n links = {}\n\n def _link(name):\n urlkwargs.update(pagination['links'][name])\n links[name] = url_for(endpoint, **urlkwargs)\n _link('self')\n if pagination['from_idx'] >= 1:\n _link('prev')\n if pagination['to_idx'] < min(total, self.max_result_window):\n _link('next')\n return self.make_response(pid_fetcher=self.pid_fetcher, search_result=search_result.to_dict(), links=links, item_links_factory=self.item_links_factory)\n\n def post(self, **kwargs):\n \"\"\"Create a record. Permissions: ``create_permission_factory`` Procedure description: #. First of all, the `create_permission_factory` permissions are checked. #. Then, the record is deserialized by the proper loader. #. A second call to the `create_permission_factory` factory is done: it differs from the previous call because this time the record is passed as parameter. #. A `uuid` is generated for the record and the minter is called. #. The record class is called to create the record. #. The HTTP response is built with the help of the item link factory. :returns: The created record.\"\"\"\n if request.mimetype not in self.loaders:\n raise UnsupportedMediaRESTError(request.mimetype)\n data = self.loaders[request.mimetype]()\n if data is None:\n raise InvalidDataRESTError()\n permission_factory = self.create_permission_factory\n if permission_factory:\n verify_record_permission(permission_factory, data)\n record_uuid = uuid.uuid4()\n pid = self.minter(record_uuid, data=data)\n record = self.record_class.create(data, id_=record_uuid)\n db.session.commit()\n if self.indexer_class:\n self.indexer_class().index(record)\n response = self.make_response(pid, record, 201, links_factory=self.item_links_factory)\n endpoint = '.{0}_item'.format(current_records_rest.default_endpoint_prefixes[pid.pid_type])\n location = url_for(endpoint, pid_value=pid.pid_value, _external=True)\n response.headers.extend(dict(location=location))\n return response\n", "source": "the_stack_v2_python_sparse", "source_path": "invenio_records_rest/views.py", "source_repo": "inveniosoftware/invenio-records-rest", "split": "val", "star_events_count": 5}
{"blob_id": "c46a70515c76527af4b3923e771c11cda7f03345", "bodies": ["super().__init__(observation_space=observation_space, action_space=action_space, model_config_dict=model_config_dict)\nself.model_size = self._model_config_dict['model_size']\nself.is_img_space = len(self.observation_space.shape) in [2, 3]\nself.is_gray_scale = self.is_img_space and len(self.observation_space.shape) == 2", "if framework != 'tf2':\n raise NotImplementedError\nif self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.cnn_atari import CNNAtari\n return CNNAtari(model_size=self.model_size)\nelse:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP\n return MLP(model_size=self.model_size, name='vector_encoder')", "if framework != 'tf2':\n raise NotImplementedError\nif self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import conv_transpose_atari\n return conv_transpose_atari.ConvTransposeAtari(model_size=self.model_size, gray_scaled=self.is_gray_scale)\nelse:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import vector_decoder\n return vector_decoder.VectorDecoder(model_size=self.model_size, observation_space=self.observation_space)"], "bodies_text": "<|body_start_0|>\n super().__init__(observation_space=observation_space, action_space=action_space, model_config_dict=model_config_dict)\n self.model_size = self._model_config_dict['model_size']\n self.is_img_space = len(self.observation_space.shape) in [2, 3]\n self.is_gray_scale = self.is_img_space and len(self.observation_space.shape) == 2\n<|end_body_0|>\n\n<|body_start_1|>\n if framework != 'tf2':\n raise NotImplementedError\n if self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.cnn_atari import CNNAtari\n return CNNAtari(model_size=self.model_size)\n else:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP\n return MLP(model_size=self.model_size, name='vector_encoder')\n<|end_body_1|>\n\n<|body_start_2|>\n if framework != 'tf2':\n raise NotImplementedError\n if self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import conv_transpose_atari\n return conv_transpose_atari.ConvTransposeAtari(model_size=self.model_size, gray_scaled=self.is_gray_scale)\n else:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import vector_decoder\n return vector_decoder.VectorDecoder(model_size=self.model_size, observation_space=self.observation_space)\n<|end_body_2|>\n", "class_docstring": "The Catalog class used to build all the models needed for DreamerV3 training.", "class_name": "DreamerV3Catalog", "detected_licenses": ["MIT", "BSD-3-Clause", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DreamerV3Catalog:\n \"\"\"The Catalog class used to build all the models needed for DreamerV3 training.\"\"\"\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, model_config_dict: dict):\n \"\"\"Initializes a DreamerV3Catalog instance. Args: observation_space: The observation space of the environment. action_space: The action space of the environment. model_config_dict: The model config to use.\"\"\"\n <|body_0|>\n\n def build_encoder(self, framework: str) -> Encoder:\n \"\"\"Builds the World-Model's encoder network depending on the obs space.\"\"\"\n <|body_1|>\n\n def build_decoder(self, framework: str) -> Model:\n \"\"\"Builds the World-Model's decoder network depending on the obs space.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(observation_space=observation_space, action_space=action_space, model_config_dict=model_config_dict)\n self.model_size = self._model_config_dict['model_size']\n self.is_img_space = len(self.observation_space.shape) in [2, 3]\n self.is_gray_scale = self.is_img_space and len(self.observation_space.shape) == 2\n<|end_body_0|>\n\n<|body_start_1|>\n if framework != 'tf2':\n raise NotImplementedError\n if self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.cnn_atari import CNNAtari\n return CNNAtari(model_size=self.model_size)\n else:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP\n return MLP(model_size=self.model_size, name='vector_encoder')\n<|end_body_1|>\n\n<|body_start_2|>\n if framework != 'tf2':\n raise NotImplementedError\n if self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import conv_transpose_atari\n return conv_transpose_atari.ConvTransposeAtari(model_size=self.model_size, gray_scaled=self.is_gray_scale)\n else:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import vector_decoder\n return vector_decoder.VectorDecoder(model_size=self.model_size, observation_space=self.observation_space)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000288", "length_bytes": 2876, "license_type": "permissive", "methods": [{"docstring": "Initializes a DreamerV3Catalog instance. Args: observation_space: The observation space of the environment. action_space: The action space of the environment. model_config_dict: The model config to use.", "name": "__init__", "signature": "def __init__(self, observation_space: gym.Space, action_space: gym.Space, model_config_dict: dict)"}, {"docstring": "Builds the World-Model's encoder network depending on the obs space.", "name": "build_encoder", "signature": "def build_encoder(self, framework: str) -> Encoder"}, {"docstring": "Builds the World-Model's decoder network depending on the obs space.", "name": "build_decoder", "signature": "def build_decoder(self, framework: str) -> Model"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_015814", "prompt": "Implement the Python class `DreamerV3Catalog` described below.\n\nClass description:\nThe Catalog class used to build all the models needed for DreamerV3 training.\n\nMethod signatures and docstrings:\n- def __init__(self, observation_space: gym.Space, action_space: gym.Space, model_config_dict: dict): Initializes a DreamerV3Catalog instance. Args: observation_space: The observation space of the environment. action_space: The action space of the environment. model_config_dict: The model config to use.\n- def build_encoder(self, framework: str) -> Encoder: Builds the World-Model's encoder network depending on the obs space.\n- def build_decoder(self, framework: str) -> Model: Builds the World-Model's decoder network depending on the obs space.", "prompted_full_text": "Implement the Python class `DreamerV3Catalog` described below.\n\nClass description:\nThe Catalog class used to build all the models needed for DreamerV3 training.\n\nMethod signatures and docstrings:\n- def __init__(self, observation_space: gym.Space, action_space: gym.Space, model_config_dict: dict): Initializes a DreamerV3Catalog instance. Args: observation_space: The observation space of the environment. action_space: The action space of the environment. model_config_dict: The model config to use.\n- def build_encoder(self, framework: str) -> Encoder: Builds the World-Model's encoder network depending on the obs space.\n- def build_decoder(self, framework: str) -> Model: Builds the World-Model's decoder network depending on the obs space.\n\n<|skeleton|>\nclass DreamerV3Catalog:\n \"\"\"The Catalog class used to build all the models needed for DreamerV3 training.\"\"\"\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, model_config_dict: dict):\n \"\"\"Initializes a DreamerV3Catalog instance. Args: observation_space: The observation space of the environment. action_space: The action space of the environment. model_config_dict: The model config to use.\"\"\"\n <|body_0|>\n\n def build_encoder(self, framework: str) -> Encoder:\n \"\"\"Builds the World-Model's encoder network depending on the obs space.\"\"\"\n <|body_1|>\n\n def build_decoder(self, framework: str) -> Model:\n \"\"\"Builds the World-Model's decoder network depending on the obs space.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(observation_space=observation_space, action_space=action_space, model_config_dict=model_config_dict)\n self.model_size = self._model_config_dict['model_size']\n self.is_img_space = len(self.observation_space.shape) in [2, 3]\n self.is_gray_scale = self.is_img_space and len(self.observation_space.shape) == 2\n<|end_body_0|>\n\n<|body_start_1|>\n if framework != 'tf2':\n raise NotImplementedError\n if self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.cnn_atari import CNNAtari\n return CNNAtari(model_size=self.model_size)\n else:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP\n return MLP(model_size=self.model_size, name='vector_encoder')\n<|end_body_1|>\n\n<|body_start_2|>\n if framework != 'tf2':\n raise NotImplementedError\n if self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import conv_transpose_atari\n return conv_transpose_atari.ConvTransposeAtari(model_size=self.model_size, gray_scaled=self.is_gray_scale)\n else:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import vector_decoder\n return vector_decoder.VectorDecoder(model_size=self.model_size, observation_space=self.observation_space)\n<|end_body_2|>\n", "revision_id": "edba68c3e7cf255d1d6479329f305adb7fa4c3ed", "skeleton": "<|skeleton|>\nclass DreamerV3Catalog:\n \"\"\"The Catalog class used to build all the models needed for DreamerV3 training.\"\"\"\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, model_config_dict: dict):\n \"\"\"Initializes a DreamerV3Catalog instance. Args: observation_space: The observation space of the environment. action_space: The action space of the environment. model_config_dict: The model config to use.\"\"\"\n <|body_0|>\n\n def build_encoder(self, framework: str) -> Encoder:\n \"\"\"Builds the World-Model's encoder network depending on the obs space.\"\"\"\n <|body_1|>\n\n def build_decoder(self, framework: str) -> Model:\n \"\"\"Builds the World-Model's decoder network depending on the obs space.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DreamerV3Catalog:\n \"\"\"The Catalog class used to build all the models needed for DreamerV3 training.\"\"\"\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, model_config_dict: dict):\n \"\"\"Initializes a DreamerV3Catalog instance. Args: observation_space: The observation space of the environment. action_space: The action space of the environment. model_config_dict: The model config to use.\"\"\"\n super().__init__(observation_space=observation_space, action_space=action_space, model_config_dict=model_config_dict)\n self.model_size = self._model_config_dict['model_size']\n self.is_img_space = len(self.observation_space.shape) in [2, 3]\n self.is_gray_scale = self.is_img_space and len(self.observation_space.shape) == 2\n\n def build_encoder(self, framework: str) -> Encoder:\n \"\"\"Builds the World-Model's encoder network depending on the obs space.\"\"\"\n if framework != 'tf2':\n raise NotImplementedError\n if self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.cnn_atari import CNNAtari\n return CNNAtari(model_size=self.model_size)\n else:\n from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP\n return MLP(model_size=self.model_size, name='vector_encoder')\n\n def build_decoder(self, framework: str) -> Model:\n \"\"\"Builds the World-Model's decoder network depending on the obs space.\"\"\"\n if framework != 'tf2':\n raise NotImplementedError\n if self.is_img_space:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import conv_transpose_atari\n return conv_transpose_atari.ConvTransposeAtari(model_size=self.model_size, gray_scaled=self.is_gray_scale)\n else:\n from ray.rllib.algorithms.dreamerv3.tf.models.components import vector_decoder\n return vector_decoder.VectorDecoder(model_size=self.model_size, observation_space=self.observation_space)\n", "source": "the_stack_v2_python_sparse", "source_path": "rllib/algorithms/dreamerv3/dreamerv3_catalog.py", "source_repo": "ray-project/ray", "split": "val", "star_events_count": 29482}
{"blob_id": "c03e5835cc9ed223a07943effda86b211d7bc535", "bodies": ["self.user = User.objects.create_user(username='test', password='testpassword')\nProduct.objects.create(name='testname', description='test description', offer=10, price=20, category='', stock=3, vendor=User.objects.get(username='test'))\nProduct.objects.create(name='testname2', description='test description', offer=10, price=20, category='', stock=3)", "self.client.login(username='test', password='testpassword')\nresponse = self.client.get(reverse('user_products'))\nproduct_names = [x.name for x in response.context['products']]\nself.assertIn('testname', product_names)\nself.assertNotIn('testname2', product_names)\nself.assertTemplateUsed('user_products.html')"], "bodies_text": "<|body_start_0|>\n self.user = User.objects.create_user(username='test', password='testpassword')\n Product.objects.create(name='testname', description='test description', offer=10, price=20, category='', stock=3, vendor=User.objects.get(username='test'))\n Product.objects.create(name='testname2', description='test description', offer=10, price=20, category='', stock=3)\n<|end_body_0|>\n\n<|body_start_1|>\n self.client.login(username='test', password='testpassword')\n response = self.client.get(reverse('user_products'))\n product_names = [x.name for x in response.context['products']]\n self.assertIn('testname', product_names)\n self.assertNotIn('testname2', product_names)\n self.assertTemplateUsed('user_products.html')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TestUserProducts", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestUserProducts:\n\n def setUp(self):\n \"\"\"pre setting database and user logged in\"\"\"\n <|body_0|>\n\n def test_get_vendor_products(self):\n \"\"\"test only products by logged in user are visible\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.user = User.objects.create_user(username='test', password='testpassword')\n Product.objects.create(name='testname', description='test description', offer=10, price=20, category='', stock=3, vendor=User.objects.get(username='test'))\n Product.objects.create(name='testname2', description='test description', offer=10, price=20, category='', stock=3)\n<|end_body_0|>\n\n<|body_start_1|>\n self.client.login(username='test', password='testpassword')\n response = self.client.get(reverse('user_products'))\n product_names = [x.name for x in response.context['products']]\n self.assertIn('testname', product_names)\n self.assertNotIn('testname2', product_names)\n self.assertTemplateUsed('user_products.html')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000289", "length_bytes": 1308, "license_type": "no_license", "methods": [{"docstring": "pre setting database and user logged in", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "test only products by logged in user are visible", "name": "test_get_vendor_products", "signature": "def test_get_vendor_products(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030217", "prompt": "Implement the Python class `TestUserProducts` described below.\n\nClass description:\nImplement the TestUserProducts class.\n\nMethod signatures and docstrings:\n- def setUp(self): pre setting database and user logged in\n- def test_get_vendor_products(self): test only products by logged in user are visible", "prompted_full_text": "Implement the Python class `TestUserProducts` described below.\n\nClass description:\nImplement the TestUserProducts class.\n\nMethod signatures and docstrings:\n- def setUp(self): pre setting database and user logged in\n- def test_get_vendor_products(self): test only products by logged in user are visible\n\n<|skeleton|>\nclass TestUserProducts:\n\n def setUp(self):\n \"\"\"pre setting database and user logged in\"\"\"\n <|body_0|>\n\n def test_get_vendor_products(self):\n \"\"\"test only products by logged in user are visible\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.user = User.objects.create_user(username='test', password='testpassword')\n Product.objects.create(name='testname', description='test description', offer=10, price=20, category='', stock=3, vendor=User.objects.get(username='test'))\n Product.objects.create(name='testname2', description='test description', offer=10, price=20, category='', stock=3)\n<|end_body_0|>\n\n<|body_start_1|>\n self.client.login(username='test', password='testpassword')\n response = self.client.get(reverse('user_products'))\n product_names = [x.name for x in response.context['products']]\n self.assertIn('testname', product_names)\n self.assertNotIn('testname2', product_names)\n self.assertTemplateUsed('user_products.html')\n<|end_body_1|>\n", "revision_id": "c25fe47d386357d929242e2a6dd36666328195b0", "skeleton": "<|skeleton|>\nclass TestUserProducts:\n\n def setUp(self):\n \"\"\"pre setting database and user logged in\"\"\"\n <|body_0|>\n\n def test_get_vendor_products(self):\n \"\"\"test only products by logged in user are visible\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestUserProducts:\n def setUp(self):\n \"\"\"pre setting database and user logged in\"\"\"\n self.user = User.objects.create_user(username='test', password='testpassword')\n Product.objects.create(name='testname', description='test description', offer=10, price=20, category='', stock=3, vendor=User.objects.get(username='test'))\n Product.objects.create(name='testname2', description='test description', offer=10, price=20, category='', stock=3)\n\n def test_get_vendor_products(self):\n \"\"\"test only products by logged in user are visible\"\"\"\n self.client.login(username='test', password='testpassword')\n response = self.client.get(reverse('user_products'))\n product_names = [x.name for x in response.context['products']]\n self.assertIn('testname', product_names)\n self.assertNotIn('testname2', product_names)\n self.assertTemplateUsed('user_products.html')\n", "source": "the_stack_v2_python_sparse", "source_path": "product_manager/tests.py", "source_repo": "SalvatoreFiengo/myecommerce", "split": "val", "star_events_count": 0}
{"blob_id": "adf22b6675e44bb974367c57ff9a0dfe13870cb1", "bodies": ["self.model = Sequential()\nself.inp = input_shape\nself.recf = receptive_field\nself.nfilt = n_filters\nself.nneur = n_neurons_connected\nself.categories = len(np.unique(labels))\nself.eta = eta\nself.lmbd = lmbd", "self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\nself.model.add(MaxPooling2D(pool_size=(2, 2)))\nself.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\nself.model.add(MaxPooling2D(pool_size=(2, 2)))\n'\\n Before we can add dense layers, the output from previous\\n 3D layers must be flatten (i.e. convert to vector form)\\n '\nself.model.add(Flatten())\nself.model.add(Dense(self.nneur, activation='relu', kernel_regularizer=regularizers.l2(self.lmbd)))\nself.model.add(Dense(self.categories, activation='softmax', kernel_regularizer=regularizers.l2(self.lmbd)))\nsgd = optimizers.SGD(lr=self.eta)\nself.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\nif show_model == True:\n self.model.summary()"], "bodies_text": "<|body_start_0|>\n self.model = Sequential()\n self.inp = input_shape\n self.recf = receptive_field\n self.nfilt = n_filters\n self.nneur = n_neurons_connected\n self.categories = len(np.unique(labels))\n self.eta = eta\n self.lmbd = lmbd\n<|end_body_0|>\n\n<|body_start_1|>\n self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n '\\n Before we can add dense layers, the output from previous\\n 3D layers must be flatten (i.e. convert to vector form)\\n '\n self.model.add(Flatten())\n self.model.add(Dense(self.nneur, activation='relu', kernel_regularizer=regularizers.l2(self.lmbd)))\n self.model.add(Dense(self.categories, activation='softmax', kernel_regularizer=regularizers.l2(self.lmbd)))\n sgd = optimizers.SGD(lr=self.eta)\n self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n if show_model == True:\n self.model.summary()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "CNN_keras", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CNN_keras:\n\n def __init__(self, input_shape, receptive_field, n_filters, n_neurons_connected, labels, eta, lmbd):\n \"\"\"input_shape: the shape of input data receptive_field: size of receptive fields/kernels/filters (NxN) n_filters: number of fields/kernels/filters n_neurons_connected: neurons in dense layer labels: labels of data to recognise\"\"\"\n <|body_0|>\n\n def add_layer(self, show_model=False):\n \"\"\"First convolutional layer must contain the input shape, the other layers is not dependent of it\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.model = Sequential()\n self.inp = input_shape\n self.recf = receptive_field\n self.nfilt = n_filters\n self.nneur = n_neurons_connected\n self.categories = len(np.unique(labels))\n self.eta = eta\n self.lmbd = lmbd\n<|end_body_0|>\n\n<|body_start_1|>\n self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n '\\n Before we can add dense layers, the output from previous\\n 3D layers must be flatten (i.e. convert to vector form)\\n '\n self.model.add(Flatten())\n self.model.add(Dense(self.nneur, activation='relu', kernel_regularizer=regularizers.l2(self.lmbd)))\n self.model.add(Dense(self.categories, activation='softmax', kernel_regularizer=regularizers.l2(self.lmbd)))\n sgd = optimizers.SGD(lr=self.eta)\n self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n if show_model == True:\n self.model.summary()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000290", "length_bytes": 2195, "license_type": "no_license", "methods": [{"docstring": "input_shape: the shape of input data receptive_field: size of receptive fields/kernels/filters (NxN) n_filters: number of fields/kernels/filters n_neurons_connected: neurons in dense layer labels: labels of data to recognise", "name": "__init__", "signature": "def __init__(self, input_shape, receptive_field, n_filters, n_neurons_connected, labels, eta, lmbd)"}, {"docstring": "First convolutional layer must contain the input shape, the other layers is not dependent of it", "name": "add_layer", "signature": "def add_layer(self, show_model=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032951", "prompt": "Implement the Python class `CNN_keras` described below.\n\nClass description:\nImplement the CNN_keras class.\n\nMethod signatures and docstrings:\n- def __init__(self, input_shape, receptive_field, n_filters, n_neurons_connected, labels, eta, lmbd): input_shape: the shape of input data receptive_field: size of receptive fields/kernels/filters (NxN) n_filters: number of fields/kernels/filters n_neurons_connected: neurons in dense layer labels: labels of data to recognise\n- def add_layer(self, show_model=False): First convolutional layer must contain the input shape, the other layers is not dependent of it", "prompted_full_text": "Implement the Python class `CNN_keras` described below.\n\nClass description:\nImplement the CNN_keras class.\n\nMethod signatures and docstrings:\n- def __init__(self, input_shape, receptive_field, n_filters, n_neurons_connected, labels, eta, lmbd): input_shape: the shape of input data receptive_field: size of receptive fields/kernels/filters (NxN) n_filters: number of fields/kernels/filters n_neurons_connected: neurons in dense layer labels: labels of data to recognise\n- def add_layer(self, show_model=False): First convolutional layer must contain the input shape, the other layers is not dependent of it\n\n<|skeleton|>\nclass CNN_keras:\n\n def __init__(self, input_shape, receptive_field, n_filters, n_neurons_connected, labels, eta, lmbd):\n \"\"\"input_shape: the shape of input data receptive_field: size of receptive fields/kernels/filters (NxN) n_filters: number of fields/kernels/filters n_neurons_connected: neurons in dense layer labels: labels of data to recognise\"\"\"\n <|body_0|>\n\n def add_layer(self, show_model=False):\n \"\"\"First convolutional layer must contain the input shape, the other layers is not dependent of it\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.model = Sequential()\n self.inp = input_shape\n self.recf = receptive_field\n self.nfilt = n_filters\n self.nneur = n_neurons_connected\n self.categories = len(np.unique(labels))\n self.eta = eta\n self.lmbd = lmbd\n<|end_body_0|>\n\n<|body_start_1|>\n self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n '\\n Before we can add dense layers, the output from previous\\n 3D layers must be flatten (i.e. convert to vector form)\\n '\n self.model.add(Flatten())\n self.model.add(Dense(self.nneur, activation='relu', kernel_regularizer=regularizers.l2(self.lmbd)))\n self.model.add(Dense(self.categories, activation='softmax', kernel_regularizer=regularizers.l2(self.lmbd)))\n sgd = optimizers.SGD(lr=self.eta)\n self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n if show_model == True:\n self.model.summary()\n<|end_body_1|>\n", "revision_id": "d4f3345146c46d5f9fa0ebd39e7bf3618bf79044", "skeleton": "<|skeleton|>\nclass CNN_keras:\n\n def __init__(self, input_shape, receptive_field, n_filters, n_neurons_connected, labels, eta, lmbd):\n \"\"\"input_shape: the shape of input data receptive_field: size of receptive fields/kernels/filters (NxN) n_filters: number of fields/kernels/filters n_neurons_connected: neurons in dense layer labels: labels of data to recognise\"\"\"\n <|body_0|>\n\n def add_layer(self, show_model=False):\n \"\"\"First convolutional layer must contain the input shape, the other layers is not dependent of it\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CNN_keras:\n def __init__(self, input_shape, receptive_field, n_filters, n_neurons_connected, labels, eta, lmbd):\n \"\"\"input_shape: the shape of input data receptive_field: size of receptive fields/kernels/filters (NxN) n_filters: number of fields/kernels/filters n_neurons_connected: neurons in dense layer labels: labels of data to recognise\"\"\"\n self.model = Sequential()\n self.inp = input_shape\n self.recf = receptive_field\n self.nfilt = n_filters\n self.nneur = n_neurons_connected\n self.categories = len(np.unique(labels))\n self.eta = eta\n self.lmbd = lmbd\n\n def add_layer(self, show_model=False):\n \"\"\"First convolutional layer must contain the input shape, the other layers is not dependent of it\"\"\"\n self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(self.nfilt, (self.recf, self.recf), input_shape=self.inp, activation='relu', padding='same'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n '\\n Before we can add dense layers, the output from previous\\n 3D layers must be flatten (i.e. convert to vector form)\\n '\n self.model.add(Flatten())\n self.model.add(Dense(self.nneur, activation='relu', kernel_regularizer=regularizers.l2(self.lmbd)))\n self.model.add(Dense(self.categories, activation='softmax', kernel_regularizer=regularizers.l2(self.lmbd)))\n sgd = optimizers.SGD(lr=self.eta)\n self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n if show_model == True:\n self.model.summary()\n", "source": "the_stack_v2_python_sparse", "source_path": "Project3/src/CNN_class.py", "source_repo": "jacobllie/FYS-STK4155", "split": "val", "star_events_count": 2}
{"blob_id": "b2a9ad8ecbf82e7a7524c5b33130ac6ac745bd2d", "bodies": ["order = get_a_order(order_id)\nif not order:\n api.abort(404)\nelse:\n return order", "data = request.json\norder = update_order(data=data, id=order_id)\nif not order:\n api.abort(404)\nelse:\n return order", "order = delete_a_order(id=order_id)\nif not order:\n api.abort(404)\nelse:\n return order"], "bodies_text": "<|body_start_0|>\n order = get_a_order(order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.json\n order = update_order(data=data, id=order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_1|>\n\n<|body_start_2|>\n order = delete_a_order(id=order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Order", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Order:\n\n def get(self, order_id):\n \"\"\"get a order given its identifier\"\"\"\n <|body_0|>\n\n def put(self, order_id):\n \"\"\"Updates a new Order\"\"\"\n <|body_1|>\n\n def delete(self, order_id):\n \"\"\"Deletes a new Order\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n order = get_a_order(order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.json\n order = update_order(data=data, id=order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_1|>\n\n<|body_start_2|>\n order = delete_a_order(id=order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000291", "length_bytes": 2618, "license_type": "no_license", "methods": [{"docstring": "get a order given its identifier", "name": "get", "signature": "def get(self, order_id)"}, {"docstring": "Updates a new Order", "name": "put", "signature": "def put(self, order_id)"}, {"docstring": "Deletes a new Order", "name": "delete", "signature": "def delete(self, order_id)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_040189", "prompt": "Implement the Python class `Order` described below.\n\nClass description:\nImplement the Order class.\n\nMethod signatures and docstrings:\n- def get(self, order_id): get a order given its identifier\n- def put(self, order_id): Updates a new Order\n- def delete(self, order_id): Deletes a new Order", "prompted_full_text": "Implement the Python class `Order` described below.\n\nClass description:\nImplement the Order class.\n\nMethod signatures and docstrings:\n- def get(self, order_id): get a order given its identifier\n- def put(self, order_id): Updates a new Order\n- def delete(self, order_id): Deletes a new Order\n\n<|skeleton|>\nclass Order:\n\n def get(self, order_id):\n \"\"\"get a order given its identifier\"\"\"\n <|body_0|>\n\n def put(self, order_id):\n \"\"\"Updates a new Order\"\"\"\n <|body_1|>\n\n def delete(self, order_id):\n \"\"\"Deletes a new Order\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n order = get_a_order(order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.json\n order = update_order(data=data, id=order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_1|>\n\n<|body_start_2|>\n order = delete_a_order(id=order_id)\n if not order:\n api.abort(404)\n else:\n return order\n<|end_body_2|>\n", "revision_id": "3f33450a1c556724ff131ccf0f3afeb590b859b8", "skeleton": "<|skeleton|>\nclass Order:\n\n def get(self, order_id):\n \"\"\"get a order given its identifier\"\"\"\n <|body_0|>\n\n def put(self, order_id):\n \"\"\"Updates a new Order\"\"\"\n <|body_1|>\n\n def delete(self, order_id):\n \"\"\"Deletes a new Order\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Order:\n def get(self, order_id):\n \"\"\"get a order given its identifier\"\"\"\n order = get_a_order(order_id)\n if not order:\n api.abort(404)\n else:\n return order\n\n def put(self, order_id):\n \"\"\"Updates a new Order\"\"\"\n data = request.json\n order = update_order(data=data, id=order_id)\n if not order:\n api.abort(404)\n else:\n return order\n\n def delete(self, order_id):\n \"\"\"Deletes a new Order\"\"\"\n order = delete_a_order(id=order_id)\n if not order:\n api.abort(404)\n else:\n return order\n", "source": "the_stack_v2_python_sparse", "source_path": "app/main/controller/order_controller.py", "source_repo": "TheJina/orderquick", "split": "val", "star_events_count": 0}
{"blob_id": "0538a9ca72119847168c6aac82aa7df1b261d462", "bodies": ["user_id = uid\nride_request_ref = RideRequestGenericDao().rideRequestCollectionRef.document(rideRequestId)\nride_request = RideRequestGenericDao().get(ride_request_ref)\nprint('userId: {}, rideRequestId: {}'.format(user_id, rideRequestId))\nresponse_dict = ride_request.to_dict_view()['baggages']\nreturn (response_dict, 200)", "args = ride_request_parsers.luggage_parser.parse_args()\nluggage_list = args['luggages']\nluggages = Luggages()\nluggages.add_from_list(luggage_list)\nluggage_actions.put_luggages(ride_request_id=rideRequestId, luggages=luggages)\nresponse_dict = {'newLuggageValues': luggages.to_dict()}\nreturn (response_dict, 200)"], "bodies_text": "<|body_start_0|>\n user_id = uid\n ride_request_ref = RideRequestGenericDao().rideRequestCollectionRef.document(rideRequestId)\n ride_request = RideRequestGenericDao().get(ride_request_ref)\n print('userId: {}, rideRequestId: {}'.format(user_id, rideRequestId))\n response_dict = ride_request.to_dict_view()['baggages']\n return (response_dict, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n args = ride_request_parsers.luggage_parser.parse_args()\n luggage_list = args['luggages']\n luggages = Luggages()\n luggages.add_from_list(luggage_list)\n luggage_actions.put_luggages(ride_request_id=rideRequestId, luggages=luggages)\n response_dict = {'newLuggageValues': luggages.to_dict()}\n return (response_dict, 200)\n<|end_body_1|>\n", "class_docstring": "/rideRequest/:rideRequestId/luggage/", "class_name": "LuggageService", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LuggageService:\n \"\"\"/rideRequest/:rideRequestId/luggage/\"\"\"\n\n def get(self, rideRequestId, uid):\n \"\"\"Get the JSON for the luggage associatedd with ride request :param rideRequestId: :param uid: :return:\"\"\"\n <|body_0|>\n\n def put(self, rideRequestId, uid):\n \"\"\":param rideRequestId: :param uid: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_id = uid\n ride_request_ref = RideRequestGenericDao().rideRequestCollectionRef.document(rideRequestId)\n ride_request = RideRequestGenericDao().get(ride_request_ref)\n print('userId: {}, rideRequestId: {}'.format(user_id, rideRequestId))\n response_dict = ride_request.to_dict_view()['baggages']\n return (response_dict, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n args = ride_request_parsers.luggage_parser.parse_args()\n luggage_list = args['luggages']\n luggages = Luggages()\n luggages.add_from_list(luggage_list)\n luggage_actions.put_luggages(ride_request_id=rideRequestId, luggages=luggages)\n response_dict = {'newLuggageValues': luggages.to_dict()}\n return (response_dict, 200)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000292", "length_bytes": 6960, "license_type": "no_license", "methods": [{"docstring": "Get the JSON for the luggage associatedd with ride request :param rideRequestId: :param uid: :return:", "name": "get", "signature": "def get(self, rideRequestId, uid)"}, {"docstring": ":param rideRequestId: :param uid: :return:", "name": "put", "signature": "def put(self, rideRequestId, uid)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_035509", "prompt": "Implement the Python class `LuggageService` described below.\n\nClass description:\n/rideRequest/:rideRequestId/luggage/\n\nMethod signatures and docstrings:\n- def get(self, rideRequestId, uid): Get the JSON for the luggage associatedd with ride request :param rideRequestId: :param uid: :return:\n- def put(self, rideRequestId, uid): :param rideRequestId: :param uid: :return:", "prompted_full_text": "Implement the Python class `LuggageService` described below.\n\nClass description:\n/rideRequest/:rideRequestId/luggage/\n\nMethod signatures and docstrings:\n- def get(self, rideRequestId, uid): Get the JSON for the luggage associatedd with ride request :param rideRequestId: :param uid: :return:\n- def put(self, rideRequestId, uid): :param rideRequestId: :param uid: :return:\n\n<|skeleton|>\nclass LuggageService:\n \"\"\"/rideRequest/:rideRequestId/luggage/\"\"\"\n\n def get(self, rideRequestId, uid):\n \"\"\"Get the JSON for the luggage associatedd with ride request :param rideRequestId: :param uid: :return:\"\"\"\n <|body_0|>\n\n def put(self, rideRequestId, uid):\n \"\"\":param rideRequestId: :param uid: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_id = uid\n ride_request_ref = RideRequestGenericDao().rideRequestCollectionRef.document(rideRequestId)\n ride_request = RideRequestGenericDao().get(ride_request_ref)\n print('userId: {}, rideRequestId: {}'.format(user_id, rideRequestId))\n response_dict = ride_request.to_dict_view()['baggages']\n return (response_dict, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n args = ride_request_parsers.luggage_parser.parse_args()\n luggage_list = args['luggages']\n luggages = Luggages()\n luggages.add_from_list(luggage_list)\n luggage_actions.put_luggages(ride_request_id=rideRequestId, luggages=luggages)\n response_dict = {'newLuggageValues': luggages.to_dict()}\n return (response_dict, 200)\n<|end_body_1|>\n", "revision_id": "ff6b4d99764d2b9cc1a100489e4a0bce7aa69e2d", "skeleton": "<|skeleton|>\nclass LuggageService:\n \"\"\"/rideRequest/:rideRequestId/luggage/\"\"\"\n\n def get(self, rideRequestId, uid):\n \"\"\"Get the JSON for the luggage associatedd with ride request :param rideRequestId: :param uid: :return:\"\"\"\n <|body_0|>\n\n def put(self, rideRequestId, uid):\n \"\"\":param rideRequestId: :param uid: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LuggageService:\n \"\"\"/rideRequest/:rideRequestId/luggage/\"\"\"\n\n def get(self, rideRequestId, uid):\n \"\"\"Get the JSON for the luggage associatedd with ride request :param rideRequestId: :param uid: :return:\"\"\"\n user_id = uid\n ride_request_ref = RideRequestGenericDao().rideRequestCollectionRef.document(rideRequestId)\n ride_request = RideRequestGenericDao().get(ride_request_ref)\n print('userId: {}, rideRequestId: {}'.format(user_id, rideRequestId))\n response_dict = ride_request.to_dict_view()['baggages']\n return (response_dict, 200)\n\n def put(self, rideRequestId, uid):\n \"\"\":param rideRequestId: :param uid: :return:\"\"\"\n args = ride_request_parsers.luggage_parser.parse_args()\n luggage_list = args['luggages']\n luggages = Luggages()\n luggages.add_from_list(luggage_list)\n luggage_actions.put_luggages(ride_request_id=rideRequestId, luggages=luggages)\n response_dict = {'newLuggageValues': luggages.to_dict()}\n return (response_dict, 200)\n", "source": "the_stack_v2_python_sparse", "source_path": "gravitate/api_server/ride_request/services.py", "source_repo": "lw75251/Gravitate-Backend", "split": "val", "star_events_count": 1}
{"blob_id": "796f29d2394c085913bf5f3159ff0ba5d873141b", "bodies": ["answer = ''\nfor i in range(len(strs)):\n answer += str(len(strs[i]))\n answer += ':'\n answer += strs[i]\nreturn answer", "answer = []\ni = 0\nwhile i < len(s):\n temp = int(s[i])\n while s[i + 1] != ':':\n i += 1\n temp = temp * 10 + int(s[i])\n i += 2\n j = i + temp\n answer.append(s[i:j])\n i = j\nreturn answer"], "bodies_text": "<|body_start_0|>\n answer = ''\n for i in range(len(strs)):\n answer += str(len(strs[i]))\n answer += ':'\n answer += strs[i]\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n answer = []\n i = 0\n while i < len(s):\n temp = int(s[i])\n while s[i + 1] != ':':\n i += 1\n temp = temp * 10 + int(s[i])\n i += 2\n j = i + temp\n answer.append(s[i:j])\n i = j\n return answer\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n answer = ''\n for i in range(len(strs)):\n answer += str(len(strs[i]))\n answer += ':'\n answer += strs[i]\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n answer = []\n i = 0\n while i < len(s):\n temp = int(s[i])\n while s[i + 1] != ':':\n i += 1\n temp = temp * 10 + int(s[i])\n i += 2\n j = i + temp\n answer.append(s[i:j])\n i = j\n return answer\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000293", "length_bytes": 987, "license_type": "no_license", "methods": [{"docstring": "Encodes a list of strings to a single string. :type strs: List[str] :rtype: str", "name": "encode", "signature": "def encode(self, strs)"}, {"docstring": "Decodes a single string to a list of strings. :type s: str :rtype: List[str]", "name": "decode", "signature": "def decode(self, s)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001667", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def encode(self, strs): Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\n- def decode(self, s): Decodes a single string to a list of strings. :type s: str :rtype: List[str]", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def encode(self, strs): Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\n- def decode(self, s): Decodes a single string to a list of strings. :type s: str :rtype: List[str]\n\n<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n answer = ''\n for i in range(len(strs)):\n answer += str(len(strs[i]))\n answer += ':'\n answer += strs[i]\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n answer = []\n i = 0\n while i < len(s):\n temp = int(s[i])\n while s[i + 1] != ':':\n i += 1\n temp = temp * 10 + int(s[i])\n i += 2\n j = i + temp\n answer.append(s[i:j])\n i = j\n return answer\n<|end_body_1|>\n", "revision_id": "c2b6220eae870f3b9a88283148512d600a9585ed", "skeleton": "<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n answer = ''\n for i in range(len(strs)):\n answer += str(len(strs[i]))\n answer += ':'\n answer += strs[i]\n return answer\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n answer = []\n i = 0\n while i < len(s):\n temp = int(s[i])\n while s[i + 1] != ':':\n i += 1\n temp = temp * 10 + int(s[i])\n i += 2\n j = i + temp\n answer.append(s[i:j])\n i = j\n return answer\n", "source": "the_stack_v2_python_sparse", "source_path": "Encode Decode String.py", "source_repo": "verazhou823/Leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "241bb6da6f869f43c7ba6e691e28c397a370ff40", "bodies": ["def serializeHelper(node, vals):\n if node:\n vals.append(node.val)\n serializeHelper(node.left, vals)\n serializeHelper(node.right, vals)\nvals = []\nserializeHelper(root, vals)\nreturn ' '.join(map(str, vals))", "def deserializeHelper(minVal, maxVal, vals):\n if not vals:\n return None\n if minVal < vals[0] < maxVal:\n val = vals.popleft()\n node = TreeNode(val)\n node.left = deserializeHelper(minVal, val, vals)\n node.right = deserializeHelper(val, maxVal, vals)\n return node\n else:\n return None\nvals = collections.deque([int(val) for val in data.split()])\nreturn deserializeHelper(float('-inf'), float('inf'), vals)"], "bodies_text": "<|body_start_0|>\n def serializeHelper(node, vals):\n if node:\n vals.append(node.val)\n serializeHelper(node.left, vals)\n serializeHelper(node.right, vals)\n vals = []\n serializeHelper(root, vals)\n return ' '.join(map(str, vals))\n<|end_body_0|>\n\n<|body_start_1|>\n def deserializeHelper(minVal, maxVal, vals):\n if not vals:\n return None\n if minVal < vals[0] < maxVal:\n val = vals.popleft()\n node = TreeNode(val)\n node.left = deserializeHelper(minVal, val, vals)\n node.right = deserializeHelper(val, maxVal, vals)\n return node\n else:\n return None\n vals = collections.deque([int(val) for val in data.split()])\n return deserializeHelper(float('-inf'), float('inf'), vals)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def serializeHelper(node, vals):\n if node:\n vals.append(node.val)\n serializeHelper(node.left, vals)\n serializeHelper(node.right, vals)\n vals = []\n serializeHelper(root, vals)\n return ' '.join(map(str, vals))\n<|end_body_0|>\n\n<|body_start_1|>\n def deserializeHelper(minVal, maxVal, vals):\n if not vals:\n return None\n if minVal < vals[0] < maxVal:\n val = vals.popleft()\n node = TreeNode(val)\n node.left = deserializeHelper(minVal, val, vals)\n node.right = deserializeHelper(val, maxVal, vals)\n return node\n else:\n return None\n vals = collections.deque([int(val) for val in data.split()])\n return deserializeHelper(float('-inf'), float('inf'), vals)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000294", "length_bytes": 1344, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_046112", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def serializeHelper(node, vals):\n if node:\n vals.append(node.val)\n serializeHelper(node.left, vals)\n serializeHelper(node.right, vals)\n vals = []\n serializeHelper(root, vals)\n return ' '.join(map(str, vals))\n<|end_body_0|>\n\n<|body_start_1|>\n def deserializeHelper(minVal, maxVal, vals):\n if not vals:\n return None\n if minVal < vals[0] < maxVal:\n val = vals.popleft()\n node = TreeNode(val)\n node.left = deserializeHelper(minVal, val, vals)\n node.right = deserializeHelper(val, maxVal, vals)\n return node\n else:\n return None\n vals = collections.deque([int(val) for val in data.split()])\n return deserializeHelper(float('-inf'), float('inf'), vals)\n<|end_body_1|>\n", "revision_id": "4dc4e6642dc92f1983c13564cc0fd99917cab358", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n def serializeHelper(node, vals):\n if node:\n vals.append(node.val)\n serializeHelper(node.left, vals)\n serializeHelper(node.right, vals)\n vals = []\n serializeHelper(root, vals)\n return ' '.join(map(str, vals))\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n def deserializeHelper(minVal, maxVal, vals):\n if not vals:\n return None\n if minVal < vals[0] < maxVal:\n val = vals.popleft()\n node = TreeNode(val)\n node.left = deserializeHelper(minVal, val, vals)\n node.right = deserializeHelper(val, maxVal, vals)\n return node\n else:\n return None\n vals = collections.deque([int(val) for val in data.split()])\n return deserializeHelper(float('-inf'), float('inf'), vals)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/serialize-and-deserialize-bst.py", "source_repo": "kamyu104/LeetCode-Solutions", "split": "val", "star_events_count": 4549}
{"blob_id": "7d260fc3f3b9de7d635f8b1acfd65fbd72ca8f14", "bodies": ["super().__init__(d_model, q, v, h, attention_size, **kwargs)\nself._window_size = window_size\nself._padding = padding\nself._q = q\nself._v = v\nself._step = self._window_size - 2 * self._padding\nself._future_mask = nn.Parameter(torch.triu(torch.ones((self._window_size, self._window_size)), diagonal=1).bool(), requires_grad=False)\nif self._attention_size is not None:\n self._attention_mask = nn.Parameter(generate_local_map_mask(self._window_size, self._attention_size), requires_grad=False)", "batch_size = query.shape[0]\nquery = F.pad(query.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\nkey = F.pad(key.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\nvalue = F.pad(value.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\nqueries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)\nkeys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)\nvalues = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)\nqueries = queries.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\nkeys = keys.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\nvalues = values.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._v, self._window_size)).transpose(1, 2)\nself._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._window_size)\nif self._attention_size is not None:\n self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))\nif mask == 'subsequent':\n self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))\nself._scores = F.softmax(self._scores, dim=-1)\nattention = torch.bmm(self._scores, values)\nattention = attention.reshape((batch_size * self._h, -1, self._window_size, self._v))\nattention = attention[:, :, self._padding:-self._padding, :]\nattention = attention.reshape((batch_size * self._h, -1, self._v))\nattention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)\nself_attention = self._W_o(attention_heads)\nreturn self_attention"], "bodies_text": "<|body_start_0|>\n super().__init__(d_model, q, v, h, attention_size, **kwargs)\n self._window_size = window_size\n self._padding = padding\n self._q = q\n self._v = v\n self._step = self._window_size - 2 * self._padding\n self._future_mask = nn.Parameter(torch.triu(torch.ones((self._window_size, self._window_size)), diagonal=1).bool(), requires_grad=False)\n if self._attention_size is not None:\n self._attention_mask = nn.Parameter(generate_local_map_mask(self._window_size, self._attention_size), requires_grad=False)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = query.shape[0]\n query = F.pad(query.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n key = F.pad(key.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n value = F.pad(value.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n queries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)\n keys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)\n values = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)\n queries = queries.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\n keys = keys.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\n values = values.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._v, self._window_size)).transpose(1, 2)\n self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._window_size)\n if self._attention_size is not None:\n self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))\n if mask == 'subsequent':\n self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))\n self._scores = F.softmax(self._scores, dim=-1)\n attention = torch.bmm(self._scores, values)\n attention = attention.reshape((batch_size * self._h, -1, self._window_size, self._v))\n attention = attention[:, :, self._padding:-self._padding, :]\n attention = attention.reshape((batch_size * self._h, -1, self._v))\n attention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)\n self_attention = self._W_o(attention_heads)\n return self_attention\n<|end_body_1|>\n", "class_docstring": "Multi Head Attention block with moving window. Given 3 inputs of shape (batch_size, K, d_model), that will be used to compute query, keys and values, we output a self attention tensor of shape (batch_size, K, d_model). Queries, keys and values are divided in chunks using a moving window. Parameters ---------- d_model: Dimension of the input vector. q: Dimension of all query matrix. v: Dimension of all value matrix. h: Number of heads. attention_size: Number of backward elements to apply attention. Deactivated if ``None``. Default is ``None``. window_size: Size of the window used to extract chunks. Default is 168 padding: Padding around each window. Padding will be applied to input sequence.", "class_name": "MultiHeadAttentionWindow", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiHeadAttentionWindow:\n \"\"\"Multi Head Attention block with moving window. Given 3 inputs of shape (batch_size, K, d_model), that will be used to compute query, keys and values, we output a self attention tensor of shape (batch_size, K, d_model). Queries, keys and values are divided in chunks using a moving window. Parameters ---------- d_model: Dimension of the input vector. q: Dimension of all query matrix. v: Dimension of all value matrix. h: Number of heads. attention_size: Number of backward elements to apply attention. Deactivated if ``None``. Default is ``None``. window_size: Size of the window used to extract chunks. Default is 168 padding: Padding around each window. Padding will be applied to input sequence.\"\"\"\n\n def __init__(self, d_model: int, q: int, v: int, h: int, attention_size: int=None, window_size: Optional[int]=168, padding: Optional[int]=168 // 4, **kwargs):\n \"\"\"Initialize the Multi Head Block.\"\"\"\n <|body_0|>\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[str]=None) -> torch.Tensor:\n \"\"\"Propagate forward the input through the MHB. We compute for each head the queries, keys and values matrices, followed by the Scaled Dot-Product. The result is concatenated and returned with shape (batch_size, K, d_model). Parameters ---------- query: Input tensor with shape (batch_size, K, d_model) used to compute queries. key: Input tensor with shape (batch_size, K, d_model) used to compute keys. value: Input tensor with shape (batch_size, K, d_model) used to compute values. mask: Mask to apply on scores before computing attention. One of ``'subsequent'``, None. Default is None. Returns ------- Self attention tensor with shape (batch_size, K, d_model).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(d_model, q, v, h, attention_size, **kwargs)\n self._window_size = window_size\n self._padding = padding\n self._q = q\n self._v = v\n self._step = self._window_size - 2 * self._padding\n self._future_mask = nn.Parameter(torch.triu(torch.ones((self._window_size, self._window_size)), diagonal=1).bool(), requires_grad=False)\n if self._attention_size is not None:\n self._attention_mask = nn.Parameter(generate_local_map_mask(self._window_size, self._attention_size), requires_grad=False)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = query.shape[0]\n query = F.pad(query.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n key = F.pad(key.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n value = F.pad(value.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n queries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)\n keys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)\n values = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)\n queries = queries.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\n keys = keys.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\n values = values.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._v, self._window_size)).transpose(1, 2)\n self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._window_size)\n if self._attention_size is not None:\n self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))\n if mask == 'subsequent':\n self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))\n self._scores = F.softmax(self._scores, dim=-1)\n attention = torch.bmm(self._scores, values)\n attention = attention.reshape((batch_size * self._h, -1, self._window_size, self._v))\n attention = attention[:, :, self._padding:-self._padding, :]\n attention = attention.reshape((batch_size * self._h, -1, self._v))\n attention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)\n self_attention = self._W_o(attention_heads)\n return self_attention\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000295", "length_bytes": 13552, "license_type": "permissive", "methods": [{"docstring": "Initialize the Multi Head Block.", "name": "__init__", "signature": "def __init__(self, d_model: int, q: int, v: int, h: int, attention_size: int=None, window_size: Optional[int]=168, padding: Optional[int]=168 // 4, **kwargs)"}, {"docstring": "Propagate forward the input through the MHB. We compute for each head the queries, keys and values matrices, followed by the Scaled Dot-Product. The result is concatenated and returned with shape (batch_size, K, d_model). Parameters ---------- query: Input tensor with shape (batch_size, K, d_model) used to compute queries. key: Input tensor with shape (batch_size, K, d_model) used to compute keys. value: Input tensor with shape (batch_size, K, d_model) used to compute values. mask: Mask to apply on scores before computing attention. One of ``'subsequent'``, None. Default is None. Returns ------- Self attention tensor with shape (batch_size, K, d_model).", "name": "forward", "signature": "def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[str]=None) -> torch.Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043357", "prompt": "Implement the Python class `MultiHeadAttentionWindow` described below.\n\nClass description:\nMulti Head Attention block with moving window. Given 3 inputs of shape (batch_size, K, d_model), that will be used to compute query, keys and values, we output a self attention tensor of shape (batch_size, K, d_model). Queries, keys and values are divided in chunks using a moving window. Parameters ---------- d_model: Dimension of the input vector. q: Dimension of all query matrix. v: Dimension of all value matrix. h: Number of heads. attention_size: Number of backward elements to apply attention. Deactivated if ``None``. Default is ``None``. window_size: Size of the window used to extract chunks. Default is 168 padding: Padding around each window. Padding will be applied to input sequence.\n\nMethod signatures and docstrings:\n- def __init__(self, d_model: int, q: int, v: int, h: int, attention_size: int=None, window_size: Optional[int]=168, padding: Optional[int]=168 // 4, **kwargs): Initialize the Multi Head Block.\n- def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[str]=None) -> torch.Tensor: Propagate forward the input through the MHB. We compute for each head the queries, keys and values matrices, followed by the Scaled Dot-Product. The result is concatenated and returned with shape (batch_size, K, d_model). Parameters ---------- query: Input tensor with shape (batch_size, K, d_model) used to compute queries. key: Input tensor with shape (batch_size, K, d_model) used to compute keys. value: Input tensor with shape (batch_size, K, d_model) used to compute values. mask: Mask to apply on scores before computing attention. One of ``'subsequent'``, None. Default is None. Returns ------- Self attention tensor with shape (batch_size, K, d_model).", "prompted_full_text": "Implement the Python class `MultiHeadAttentionWindow` described below.\n\nClass description:\nMulti Head Attention block with moving window. Given 3 inputs of shape (batch_size, K, d_model), that will be used to compute query, keys and values, we output a self attention tensor of shape (batch_size, K, d_model). Queries, keys and values are divided in chunks using a moving window. Parameters ---------- d_model: Dimension of the input vector. q: Dimension of all query matrix. v: Dimension of all value matrix. h: Number of heads. attention_size: Number of backward elements to apply attention. Deactivated if ``None``. Default is ``None``. window_size: Size of the window used to extract chunks. Default is 168 padding: Padding around each window. Padding will be applied to input sequence.\n\nMethod signatures and docstrings:\n- def __init__(self, d_model: int, q: int, v: int, h: int, attention_size: int=None, window_size: Optional[int]=168, padding: Optional[int]=168 // 4, **kwargs): Initialize the Multi Head Block.\n- def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[str]=None) -> torch.Tensor: Propagate forward the input through the MHB. We compute for each head the queries, keys and values matrices, followed by the Scaled Dot-Product. The result is concatenated and returned with shape (batch_size, K, d_model). Parameters ---------- query: Input tensor with shape (batch_size, K, d_model) used to compute queries. key: Input tensor with shape (batch_size, K, d_model) used to compute keys. value: Input tensor with shape (batch_size, K, d_model) used to compute values. mask: Mask to apply on scores before computing attention. One of ``'subsequent'``, None. Default is None. Returns ------- Self attention tensor with shape (batch_size, K, d_model).\n\n<|skeleton|>\nclass MultiHeadAttentionWindow:\n \"\"\"Multi Head Attention block with moving window. Given 3 inputs of shape (batch_size, K, d_model), that will be used to compute query, keys and values, we output a self attention tensor of shape (batch_size, K, d_model). Queries, keys and values are divided in chunks using a moving window. Parameters ---------- d_model: Dimension of the input vector. q: Dimension of all query matrix. v: Dimension of all value matrix. h: Number of heads. attention_size: Number of backward elements to apply attention. Deactivated if ``None``. Default is ``None``. window_size: Size of the window used to extract chunks. Default is 168 padding: Padding around each window. Padding will be applied to input sequence.\"\"\"\n\n def __init__(self, d_model: int, q: int, v: int, h: int, attention_size: int=None, window_size: Optional[int]=168, padding: Optional[int]=168 // 4, **kwargs):\n \"\"\"Initialize the Multi Head Block.\"\"\"\n <|body_0|>\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[str]=None) -> torch.Tensor:\n \"\"\"Propagate forward the input through the MHB. We compute for each head the queries, keys and values matrices, followed by the Scaled Dot-Product. The result is concatenated and returned with shape (batch_size, K, d_model). Parameters ---------- query: Input tensor with shape (batch_size, K, d_model) used to compute queries. key: Input tensor with shape (batch_size, K, d_model) used to compute keys. value: Input tensor with shape (batch_size, K, d_model) used to compute values. mask: Mask to apply on scores before computing attention. One of ``'subsequent'``, None. Default is None. Returns ------- Self attention tensor with shape (batch_size, K, d_model).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(d_model, q, v, h, attention_size, **kwargs)\n self._window_size = window_size\n self._padding = padding\n self._q = q\n self._v = v\n self._step = self._window_size - 2 * self._padding\n self._future_mask = nn.Parameter(torch.triu(torch.ones((self._window_size, self._window_size)), diagonal=1).bool(), requires_grad=False)\n if self._attention_size is not None:\n self._attention_mask = nn.Parameter(generate_local_map_mask(self._window_size, self._attention_size), requires_grad=False)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = query.shape[0]\n query = F.pad(query.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n key = F.pad(key.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n value = F.pad(value.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n queries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)\n keys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)\n values = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)\n queries = queries.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\n keys = keys.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\n values = values.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._v, self._window_size)).transpose(1, 2)\n self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._window_size)\n if self._attention_size is not None:\n self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))\n if mask == 'subsequent':\n self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))\n self._scores = F.softmax(self._scores, dim=-1)\n attention = torch.bmm(self._scores, values)\n attention = attention.reshape((batch_size * self._h, -1, self._window_size, self._v))\n attention = attention[:, :, self._padding:-self._padding, :]\n attention = attention.reshape((batch_size * self._h, -1, self._v))\n attention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)\n self_attention = self._W_o(attention_heads)\n return self_attention\n<|end_body_1|>\n", "revision_id": "0b801d2d2e828ac480d1097cb3bdd82b1e25c15b", "skeleton": "<|skeleton|>\nclass MultiHeadAttentionWindow:\n \"\"\"Multi Head Attention block with moving window. Given 3 inputs of shape (batch_size, K, d_model), that will be used to compute query, keys and values, we output a self attention tensor of shape (batch_size, K, d_model). Queries, keys and values are divided in chunks using a moving window. Parameters ---------- d_model: Dimension of the input vector. q: Dimension of all query matrix. v: Dimension of all value matrix. h: Number of heads. attention_size: Number of backward elements to apply attention. Deactivated if ``None``. Default is ``None``. window_size: Size of the window used to extract chunks. Default is 168 padding: Padding around each window. Padding will be applied to input sequence.\"\"\"\n\n def __init__(self, d_model: int, q: int, v: int, h: int, attention_size: int=None, window_size: Optional[int]=168, padding: Optional[int]=168 // 4, **kwargs):\n \"\"\"Initialize the Multi Head Block.\"\"\"\n <|body_0|>\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[str]=None) -> torch.Tensor:\n \"\"\"Propagate forward the input through the MHB. We compute for each head the queries, keys and values matrices, followed by the Scaled Dot-Product. The result is concatenated and returned with shape (batch_size, K, d_model). Parameters ---------- query: Input tensor with shape (batch_size, K, d_model) used to compute queries. key: Input tensor with shape (batch_size, K, d_model) used to compute keys. value: Input tensor with shape (batch_size, K, d_model) used to compute values. mask: Mask to apply on scores before computing attention. One of ``'subsequent'``, None. Default is None. Returns ------- Self attention tensor with shape (batch_size, K, d_model).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MultiHeadAttentionWindow:\n \"\"\"Multi Head Attention block with moving window. Given 3 inputs of shape (batch_size, K, d_model), that will be used to compute query, keys and values, we output a self attention tensor of shape (batch_size, K, d_model). Queries, keys and values are divided in chunks using a moving window. Parameters ---------- d_model: Dimension of the input vector. q: Dimension of all query matrix. v: Dimension of all value matrix. h: Number of heads. attention_size: Number of backward elements to apply attention. Deactivated if ``None``. Default is ``None``. window_size: Size of the window used to extract chunks. Default is 168 padding: Padding around each window. Padding will be applied to input sequence.\"\"\"\n\n def __init__(self, d_model: int, q: int, v: int, h: int, attention_size: int=None, window_size: Optional[int]=168, padding: Optional[int]=168 // 4, **kwargs):\n \"\"\"Initialize the Multi Head Block.\"\"\"\n super().__init__(d_model, q, v, h, attention_size, **kwargs)\n self._window_size = window_size\n self._padding = padding\n self._q = q\n self._v = v\n self._step = self._window_size - 2 * self._padding\n self._future_mask = nn.Parameter(torch.triu(torch.ones((self._window_size, self._window_size)), diagonal=1).bool(), requires_grad=False)\n if self._attention_size is not None:\n self._attention_mask = nn.Parameter(generate_local_map_mask(self._window_size, self._attention_size), requires_grad=False)\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[str]=None) -> torch.Tensor:\n \"\"\"Propagate forward the input through the MHB. We compute for each head the queries, keys and values matrices, followed by the Scaled Dot-Product. The result is concatenated and returned with shape (batch_size, K, d_model). Parameters ---------- query: Input tensor with shape (batch_size, K, d_model) used to compute queries. key: Input tensor with shape (batch_size, K, d_model) used to compute keys. value: Input tensor with shape (batch_size, K, d_model) used to compute values. mask: Mask to apply on scores before computing attention. One of ``'subsequent'``, None. Default is None. Returns ------- Self attention tensor with shape (batch_size, K, d_model).\"\"\"\n batch_size = query.shape[0]\n query = F.pad(query.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n key = F.pad(key.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n value = F.pad(value.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)\n queries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)\n keys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)\n values = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)\n queries = queries.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\n keys = keys.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)\n values = values.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._v, self._window_size)).transpose(1, 2)\n self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._window_size)\n if self._attention_size is not None:\n self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))\n if mask == 'subsequent':\n self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))\n self._scores = F.softmax(self._scores, dim=-1)\n attention = torch.bmm(self._scores, values)\n attention = attention.reshape((batch_size * self._h, -1, self._window_size, self._v))\n attention = attention[:, :, self._padding:-self._padding, :]\n attention = attention.reshape((batch_size * self._h, -1, self._v))\n attention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)\n self_attention = self._W_o(attention_heads)\n return self_attention\n", "source": "the_stack_v2_python_sparse", "source_path": "code/deep/adarnn/tst/multiHeadAttention.py", "source_repo": "jindongwang/transferlearning", "split": "val", "star_events_count": 12773}
{"blob_id": "5004fa8aba4cb2886827adcd43ee749aef0996e6", "bodies": ["result = volumeManager.getVolumeInfo('/')\nself.assertTrue(result is not None, 'getVolumeInfo returned None for the root volume')\nself.assertTrue('diskType' in result, 'getVolumeInfo did not get a diskType for the root volume')\nself.assertEqual(result['diskType'], 'Hard Drive', 'The boot volume diskType was not \"Hard Drive\" as expected, but rather: ' + str(result['diskType']))\nself.assertTrue('volumeFormat' in result, 'getVolumeInfo did not get a volumeFormat for the root volume')\nself.assertEqual(result['volumeFormat'], 'Journaled HFS+', 'The boot volume volumeFormat was not \"Journaled HFS+\" as expected, but rather: ' + str(result['volumeFormat']))\nself.assertTrue('mountPath' in result, 'getVolumeInfo did not get a mountPath for the root volume')\nself.assertEqual(result['mountPath'], '/', 'The boot volume volumeFormat was not \"/\" as expected, but rather: ' + str(result['mountPath']))\nself.assertTrue('volumeName' in result, 'getVolumeInfo did not get a volumeName for the root volume')\nself.assertTrue('diskBsdName' in result, 'getVolumeInfo did not get a diskBsdName for the root volume')\nself.assertTrue(result['diskBsdName'].startswith('disk'), 'The boot volume diskBsdName did not start with \"disk\" as expected, but rather was: ' + str(result['diskBsdName']))\nself.assertTrue('bsdPath' in result, 'getVolumeInfo did not get a bsdPath for the root volume')\nself.assertTrue(result['bsdPath'].startswith('/dev/disk'), 'The boot volume bsdPath did not start with \"/dev/disk\" as expected, but rather was: ' + str(result['bsdPath']))\nself.assertTrue(result['bsdPath'].startswith('/dev/' + result['diskBsdName'] + 's'), 'The boot volume bsdPath did not start with the diskBsdName (%s) as expected, but rather was: %s' % (result['diskBsdName'], str(result['bsdPath'])))", "rootResult = volumeManager.getVolumeInfo('/')\nself.assertTrue(rootResult is not None, 'getVolumeInfo returned None for the root volume')\napplicationsResult = volumeManager.getVolumeInfo('/Applications')\nself.assertTrue(applicationsResult is not None, 'getVolumeInfo returned None for the Applications folder')\nself.assertEqual(rootResult, applicationsResult, 'getVolumeInfo did not return the same information for the root volume as it did for Applications')", "version, build = volumeManager.getMacOSVersionAndBuildOfVolume('/')\nself.assertTrue(version is not None, 'getMacOSVersionAndBuildOfVolume got None as the version of MacOS on the root volume')\nself.assertTrue(version.startswith('10.'), 'The value that getMacOSVersionAndBuildOfVolume returned for the version of MacOS on the root volume did not start with \"10.\": ' + version)\nself.assertTrue(build is not None, 'getMacOSVersionAndBuildOfVolume got None as the build of MacOS on the root volume')\nself.assertTrue(re.match('^\\\\d+[A-Z]\\\\d+[a-zA-Z]?$', build), 'The value that getMacOSVersionAndBuildOfVolume returned for the build of MacOS on the root volume did not look correct: ' + build)", "mountedVolumes = volumeManager.getMountedVolumes(excludeRoot=False)\nself.assertTrue(hasattr(mountedVolumes, '__iter__'), 'The output of getMountedVolumes including root was not an array')\nself.assertTrue('/' in mountedVolumes, 'The output of getMountedVolumes including root did not include \"/\"')\nfor thisMountPoint in mountedVolumes:\n self.assertTrue(os.path.ismount(thisMountPoint), 'An item returned from getMountedVolumes was not a volume: ' + str(thisMountPoint))\nmountedVolumes = volumeManager.getMountedVolumes()\nself.assertFalse('/' in mountedVolumes, 'The output of getMountedVolumes not including root still included \"/\"')", "root = volumeManager('/')\nself.assertTrue(root.diskType is not None, 'After being created with the root path, the volumeManager object did not have a diskType value')\nself.assertEqual(root.diskType, 'Hard Drive', \"After being created with the root path, the volumeManager object's diskType was not 'Hard Drive' as expectd, but rather: \" + root.diskType)\nself.assertTrue(root.mountPath is not None, 'After being created with the root path, the volumeManager object did not have a mountPath value')\nself.assertEqual(root.mountPath, '/', \"After being created with the root path, the volumeManager object's mountPath was not '/' as expectd, but rather: \" + root.mountPath)\nself.assertTrue(root.volumeFormat is not None, 'After being created with the root path, the volumeManager object did not have a volumeFormat value')\nself.assertEqual(root.volumeFormat, 'Journaled HFS+', \"After being created with the root path, the volumeManager object's volumeFormat was not 'Mac OS Extended (Journaled)' as expectd, but rather: \" + root.volumeFormat)\nself.assertTrue(root.isMounted(), 'The root object is not reporting being mounted')"], "bodies_text": "<|body_start_0|>\n result = volumeManager.getVolumeInfo('/')\n self.assertTrue(result is not None, 'getVolumeInfo returned None for the root volume')\n self.assertTrue('diskType' in result, 'getVolumeInfo did not get a diskType for the root volume')\n self.assertEqual(result['diskType'], 'Hard Drive', 'The boot volume diskType was not \"Hard Drive\" as expected, but rather: ' + str(result['diskType']))\n self.assertTrue('volumeFormat' in result, 'getVolumeInfo did not get a volumeFormat for the root volume')\n self.assertEqual(result['volumeFormat'], 'Journaled HFS+', 'The boot volume volumeFormat was not \"Journaled HFS+\" as expected, but rather: ' + str(result['volumeFormat']))\n self.assertTrue('mountPath' in result, 'getVolumeInfo did not get a mountPath for the root volume')\n self.assertEqual(result['mountPath'], '/', 'The boot volume volumeFormat was not \"/\" as expected, but rather: ' + str(result['mountPath']))\n self.assertTrue('volumeName' in result, 'getVolumeInfo did not get a volumeName for the root volume')\n self.assertTrue('diskBsdName' in result, 'getVolumeInfo did not get a diskBsdName for the root volume')\n self.assertTrue(result['diskBsdName'].startswith('disk'), 'The boot volume diskBsdName did not start with \"disk\" as expected, but rather was: ' + str(result['diskBsdName']))\n self.assertTrue('bsdPath' in result, 'getVolumeInfo did not get a bsdPath for the root volume')\n self.assertTrue(result['bsdPath'].startswith('/dev/disk'), 'The boot volume bsdPath did not start with \"/dev/disk\" as expected, but rather was: ' + str(result['bsdPath']))\n self.assertTrue(result['bsdPath'].startswith('/dev/' + result['diskBsdName'] + 's'), 'The boot volume bsdPath did not start with the diskBsdName (%s) as expected, but rather was: %s' % (result['diskBsdName'], str(result['bsdPath'])))\n<|end_body_0|>\n\n<|body_start_1|>\n rootResult = volumeManager.getVolumeInfo('/')\n self.assertTrue(rootResult is not None, 'getVolumeInfo returned None for the root volume')\n applicationsResult = volumeManager.getVolumeInfo('/Applications')\n self.assertTrue(applicationsResult is not None, 'getVolumeInfo returned None for the Applications folder')\n self.assertEqual(rootResult, applicationsResult, 'getVolumeInfo did not return the same information for the root volume as it did for Applications')\n<|end_body_1|>\n\n<|body_start_2|>\n version, build = volumeManager.getMacOSVersionAndBuildOfVolume('/')\n self.assertTrue(version is not None, 'getMacOSVersionAndBuildOfVolume got None as the version of MacOS on the root volume')\n self.assertTrue(version.startswith('10.'), 'The value that getMacOSVersionAndBuildOfVolume returned for the version of MacOS on the root volume did not start with \"10.\": ' + version)\n self.assertTrue(build is not None, 'getMacOSVersionAndBuildOfVolume got None as the build of MacOS on the root volume')\n self.assertTrue(re.match('^\\\\d+[A-Z]\\\\d+[a-zA-Z]?$', build), 'The value that getMacOSVersionAndBuildOfVolume returned for the build of MacOS on the root volume did not look correct: ' + build)\n<|end_body_2|>\n\n<|body_start_3|>\n mountedVolumes = volumeManager.getMountedVolumes(excludeRoot=False)\n self.assertTrue(hasattr(mountedVolumes, '__iter__'), 'The output of getMountedVolumes including root was not an array')\n self.assertTrue('/' in mountedVolumes, 'The output of getMountedVolumes including root did not include \"/\"')\n for thisMountPoint in mountedVolumes:\n self.assertTrue(os.path.ismount(thisMountPoint), 'An item returned from getMountedVolumes was not a volume: ' + str(thisMountPoint))\n mountedVolumes = volumeManager.getMountedVolumes()\n self.assertFalse('/' in mountedVolumes, 'The output of getMountedVolumes not including root still included \"/\"')\n<|end_body_3|>\n\n<|body_start_4|>\n root = volumeManager('/')\n self.assertTrue(root.diskType is not None, 'After being created with the root path, the volumeManager object did not have a diskType value')\n self.assertEqual(root.diskType, 'Hard Drive', \"After being created with the root path, the volumeManager object's diskType was not 'Hard Drive' as expectd, but rather: \" + root.diskType)\n self.assertTrue(root.mountPath is not None, 'After being created with the root path, the volumeManager object did not have a mountPath value')\n self.assertEqual(root.mountPath, '/', \"After being created with the root path, the volumeManager object's mountPath was not '/' as expectd, but rather: \" + root.mountPath)\n self.assertTrue(root.volumeFormat is not None, 'After being created with the root path, the volumeManager object did not have a volumeFormat value')\n self.assertEqual(root.volumeFormat, 'Journaled HFS+', \"After being created with the root path, the volumeManager object's volumeFormat was not 'Mac OS Extended (Journaled)' as expectd, but rather: \" + root.volumeFormat)\n self.assertTrue(root.isMounted(), 'The root object is not reporting being mounted')\n<|end_body_4|>\n", "class_docstring": "Test the diskUtility routines", "class_name": "volumeManagerTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass volumeManagerTests:\n \"\"\"Test the diskUtility routines\"\"\"\n\n def test_getVolumeInfo_root(self):\n \"\"\"Test getVolumeInfo on the root volume, expecting that it is a HFS+ volume\"\"\"\n <|body_0|>\n\n def test_getVolumeInfo_Applications(self):\n \"\"\"Test that using getVolumeInfo on an item inside of a volume returns the volume's info\"\"\"\n <|body_1|>\n\n def test_getMacOSVersionAndBuildOfVolume(self):\n \"\"\"Test that getMacOSVersionAndBuildOfVolume can get the information from the root volume\"\"\"\n <|body_2|>\n\n def test_getMountedVolumes(self):\n \"\"\"Test that getMountedVolumes is returning a list of volumes\"\"\"\n <|body_3|>\n\n def test_volumeManager_onRoot(self):\n \"\"\"Test volumeManager by creating a new instance with root's path\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = volumeManager.getVolumeInfo('/')\n self.assertTrue(result is not None, 'getVolumeInfo returned None for the root volume')\n self.assertTrue('diskType' in result, 'getVolumeInfo did not get a diskType for the root volume')\n self.assertEqual(result['diskType'], 'Hard Drive', 'The boot volume diskType was not \"Hard Drive\" as expected, but rather: ' + str(result['diskType']))\n self.assertTrue('volumeFormat' in result, 'getVolumeInfo did not get a volumeFormat for the root volume')\n self.assertEqual(result['volumeFormat'], 'Journaled HFS+', 'The boot volume volumeFormat was not \"Journaled HFS+\" as expected, but rather: ' + str(result['volumeFormat']))\n self.assertTrue('mountPath' in result, 'getVolumeInfo did not get a mountPath for the root volume')\n self.assertEqual(result['mountPath'], '/', 'The boot volume volumeFormat was not \"/\" as expected, but rather: ' + str(result['mountPath']))\n self.assertTrue('volumeName' in result, 'getVolumeInfo did not get a volumeName for the root volume')\n self.assertTrue('diskBsdName' in result, 'getVolumeInfo did not get a diskBsdName for the root volume')\n self.assertTrue(result['diskBsdName'].startswith('disk'), 'The boot volume diskBsdName did not start with \"disk\" as expected, but rather was: ' + str(result['diskBsdName']))\n self.assertTrue('bsdPath' in result, 'getVolumeInfo did not get a bsdPath for the root volume')\n self.assertTrue(result['bsdPath'].startswith('/dev/disk'), 'The boot volume bsdPath did not start with \"/dev/disk\" as expected, but rather was: ' + str(result['bsdPath']))\n self.assertTrue(result['bsdPath'].startswith('/dev/' + result['diskBsdName'] + 's'), 'The boot volume bsdPath did not start with the diskBsdName (%s) as expected, but rather was: %s' % (result['diskBsdName'], str(result['bsdPath'])))\n<|end_body_0|>\n\n<|body_start_1|>\n rootResult = volumeManager.getVolumeInfo('/')\n self.assertTrue(rootResult is not None, 'getVolumeInfo returned None for the root volume')\n applicationsResult = volumeManager.getVolumeInfo('/Applications')\n self.assertTrue(applicationsResult is not None, 'getVolumeInfo returned None for the Applications folder')\n self.assertEqual(rootResult, applicationsResult, 'getVolumeInfo did not return the same information for the root volume as it did for Applications')\n<|end_body_1|>\n\n<|body_start_2|>\n version, build = volumeManager.getMacOSVersionAndBuildOfVolume('/')\n self.assertTrue(version is not None, 'getMacOSVersionAndBuildOfVolume got None as the version of MacOS on the root volume')\n self.assertTrue(version.startswith('10.'), 'The value that getMacOSVersionAndBuildOfVolume returned for the version of MacOS on the root volume did not start with \"10.\": ' + version)\n self.assertTrue(build is not None, 'getMacOSVersionAndBuildOfVolume got None as the build of MacOS on the root volume')\n self.assertTrue(re.match('^\\\\d+[A-Z]\\\\d+[a-zA-Z]?$', build), 'The value that getMacOSVersionAndBuildOfVolume returned for the build of MacOS on the root volume did not look correct: ' + build)\n<|end_body_2|>\n\n<|body_start_3|>\n mountedVolumes = volumeManager.getMountedVolumes(excludeRoot=False)\n self.assertTrue(hasattr(mountedVolumes, '__iter__'), 'The output of getMountedVolumes including root was not an array')\n self.assertTrue('/' in mountedVolumes, 'The output of getMountedVolumes including root did not include \"/\"')\n for thisMountPoint in mountedVolumes:\n self.assertTrue(os.path.ismount(thisMountPoint), 'An item returned from getMountedVolumes was not a volume: ' + str(thisMountPoint))\n mountedVolumes = volumeManager.getMountedVolumes()\n self.assertFalse('/' in mountedVolumes, 'The output of getMountedVolumes not including root still included \"/\"')\n<|end_body_3|>\n\n<|body_start_4|>\n root = volumeManager('/')\n self.assertTrue(root.diskType is not None, 'After being created with the root path, the volumeManager object did not have a diskType value')\n self.assertEqual(root.diskType, 'Hard Drive', \"After being created with the root path, the volumeManager object's diskType was not 'Hard Drive' as expectd, but rather: \" + root.diskType)\n self.assertTrue(root.mountPath is not None, 'After being created with the root path, the volumeManager object did not have a mountPath value')\n self.assertEqual(root.mountPath, '/', \"After being created with the root path, the volumeManager object's mountPath was not '/' as expectd, but rather: \" + root.mountPath)\n self.assertTrue(root.volumeFormat is not None, 'After being created with the root path, the volumeManager object did not have a volumeFormat value')\n self.assertEqual(root.volumeFormat, 'Journaled HFS+', \"After being created with the root path, the volumeManager object's volumeFormat was not 'Mac OS Extended (Journaled)' as expectd, but rather: \" + root.volumeFormat)\n self.assertTrue(root.isMounted(), 'The root object is not reporting being mounted')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000296", "length_bytes": 6435, "license_type": "no_license", "methods": [{"docstring": "Test getVolumeInfo on the root volume, expecting that it is a HFS+ volume", "name": "test_getVolumeInfo_root", "signature": "def test_getVolumeInfo_root(self)"}, {"docstring": "Test that using getVolumeInfo on an item inside of a volume returns the volume's info", "name": "test_getVolumeInfo_Applications", "signature": "def test_getVolumeInfo_Applications(self)"}, {"docstring": "Test that getMacOSVersionAndBuildOfVolume can get the information from the root volume", "name": "test_getMacOSVersionAndBuildOfVolume", "signature": "def test_getMacOSVersionAndBuildOfVolume(self)"}, {"docstring": "Test that getMountedVolumes is returning a list of volumes", "name": "test_getMountedVolumes", "signature": "def test_getMountedVolumes(self)"}, {"docstring": "Test volumeManager by creating a new instance with root's path", "name": "test_volumeManager_onRoot", "signature": "def test_volumeManager_onRoot(self)"}], "n_methods": 5, "prompt": "Implement the Python class `volumeManagerTests` described below.\n\nClass description:\nTest the diskUtility routines\n\nMethod signatures and docstrings:\n- def test_getVolumeInfo_root(self): Test getVolumeInfo on the root volume, expecting that it is a HFS+ volume\n- def test_getVolumeInfo_Applications(self): Test that using getVolumeInfo on an item inside of a volume returns the volume's info\n- def test_getMacOSVersionAndBuildOfVolume(self): Test that getMacOSVersionAndBuildOfVolume can get the information from the root volume\n- def test_getMountedVolumes(self): Test that getMountedVolumes is returning a list of volumes\n- def test_volumeManager_onRoot(self): Test volumeManager by creating a new instance with root's path", "prompted_full_text": "Implement the Python class `volumeManagerTests` described below.\n\nClass description:\nTest the diskUtility routines\n\nMethod signatures and docstrings:\n- def test_getVolumeInfo_root(self): Test getVolumeInfo on the root volume, expecting that it is a HFS+ volume\n- def test_getVolumeInfo_Applications(self): Test that using getVolumeInfo on an item inside of a volume returns the volume's info\n- def test_getMacOSVersionAndBuildOfVolume(self): Test that getMacOSVersionAndBuildOfVolume can get the information from the root volume\n- def test_getMountedVolumes(self): Test that getMountedVolumes is returning a list of volumes\n- def test_volumeManager_onRoot(self): Test volumeManager by creating a new instance with root's path\n\n<|skeleton|>\nclass volumeManagerTests:\n \"\"\"Test the diskUtility routines\"\"\"\n\n def test_getVolumeInfo_root(self):\n \"\"\"Test getVolumeInfo on the root volume, expecting that it is a HFS+ volume\"\"\"\n <|body_0|>\n\n def test_getVolumeInfo_Applications(self):\n \"\"\"Test that using getVolumeInfo on an item inside of a volume returns the volume's info\"\"\"\n <|body_1|>\n\n def test_getMacOSVersionAndBuildOfVolume(self):\n \"\"\"Test that getMacOSVersionAndBuildOfVolume can get the information from the root volume\"\"\"\n <|body_2|>\n\n def test_getMountedVolumes(self):\n \"\"\"Test that getMountedVolumes is returning a list of volumes\"\"\"\n <|body_3|>\n\n def test_volumeManager_onRoot(self):\n \"\"\"Test volumeManager by creating a new instance with root's path\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = volumeManager.getVolumeInfo('/')\n self.assertTrue(result is not None, 'getVolumeInfo returned None for the root volume')\n self.assertTrue('diskType' in result, 'getVolumeInfo did not get a diskType for the root volume')\n self.assertEqual(result['diskType'], 'Hard Drive', 'The boot volume diskType was not \"Hard Drive\" as expected, but rather: ' + str(result['diskType']))\n self.assertTrue('volumeFormat' in result, 'getVolumeInfo did not get a volumeFormat for the root volume')\n self.assertEqual(result['volumeFormat'], 'Journaled HFS+', 'The boot volume volumeFormat was not \"Journaled HFS+\" as expected, but rather: ' + str(result['volumeFormat']))\n self.assertTrue('mountPath' in result, 'getVolumeInfo did not get a mountPath for the root volume')\n self.assertEqual(result['mountPath'], '/', 'The boot volume volumeFormat was not \"/\" as expected, but rather: ' + str(result['mountPath']))\n self.assertTrue('volumeName' in result, 'getVolumeInfo did not get a volumeName for the root volume')\n self.assertTrue('diskBsdName' in result, 'getVolumeInfo did not get a diskBsdName for the root volume')\n self.assertTrue(result['diskBsdName'].startswith('disk'), 'The boot volume diskBsdName did not start with \"disk\" as expected, but rather was: ' + str(result['diskBsdName']))\n self.assertTrue('bsdPath' in result, 'getVolumeInfo did not get a bsdPath for the root volume')\n self.assertTrue(result['bsdPath'].startswith('/dev/disk'), 'The boot volume bsdPath did not start with \"/dev/disk\" as expected, but rather was: ' + str(result['bsdPath']))\n self.assertTrue(result['bsdPath'].startswith('/dev/' + result['diskBsdName'] + 's'), 'The boot volume bsdPath did not start with the diskBsdName (%s) as expected, but rather was: %s' % (result['diskBsdName'], str(result['bsdPath'])))\n<|end_body_0|>\n\n<|body_start_1|>\n rootResult = volumeManager.getVolumeInfo('/')\n self.assertTrue(rootResult is not None, 'getVolumeInfo returned None for the root volume')\n applicationsResult = volumeManager.getVolumeInfo('/Applications')\n self.assertTrue(applicationsResult is not None, 'getVolumeInfo returned None for the Applications folder')\n self.assertEqual(rootResult, applicationsResult, 'getVolumeInfo did not return the same information for the root volume as it did for Applications')\n<|end_body_1|>\n\n<|body_start_2|>\n version, build = volumeManager.getMacOSVersionAndBuildOfVolume('/')\n self.assertTrue(version is not None, 'getMacOSVersionAndBuildOfVolume got None as the version of MacOS on the root volume')\n self.assertTrue(version.startswith('10.'), 'The value that getMacOSVersionAndBuildOfVolume returned for the version of MacOS on the root volume did not start with \"10.\": ' + version)\n self.assertTrue(build is not None, 'getMacOSVersionAndBuildOfVolume got None as the build of MacOS on the root volume')\n self.assertTrue(re.match('^\\\\d+[A-Z]\\\\d+[a-zA-Z]?$', build), 'The value that getMacOSVersionAndBuildOfVolume returned for the build of MacOS on the root volume did not look correct: ' + build)\n<|end_body_2|>\n\n<|body_start_3|>\n mountedVolumes = volumeManager.getMountedVolumes(excludeRoot=False)\n self.assertTrue(hasattr(mountedVolumes, '__iter__'), 'The output of getMountedVolumes including root was not an array')\n self.assertTrue('/' in mountedVolumes, 'The output of getMountedVolumes including root did not include \"/\"')\n for thisMountPoint in mountedVolumes:\n self.assertTrue(os.path.ismount(thisMountPoint), 'An item returned from getMountedVolumes was not a volume: ' + str(thisMountPoint))\n mountedVolumes = volumeManager.getMountedVolumes()\n self.assertFalse('/' in mountedVolumes, 'The output of getMountedVolumes not including root still included \"/\"')\n<|end_body_3|>\n\n<|body_start_4|>\n root = volumeManager('/')\n self.assertTrue(root.diskType is not None, 'After being created with the root path, the volumeManager object did not have a diskType value')\n self.assertEqual(root.diskType, 'Hard Drive', \"After being created with the root path, the volumeManager object's diskType was not 'Hard Drive' as expectd, but rather: \" + root.diskType)\n self.assertTrue(root.mountPath is not None, 'After being created with the root path, the volumeManager object did not have a mountPath value')\n self.assertEqual(root.mountPath, '/', \"After being created with the root path, the volumeManager object's mountPath was not '/' as expectd, but rather: \" + root.mountPath)\n self.assertTrue(root.volumeFormat is not None, 'After being created with the root path, the volumeManager object did not have a volumeFormat value')\n self.assertEqual(root.volumeFormat, 'Journaled HFS+', \"After being created with the root path, the volumeManager object's volumeFormat was not 'Mac OS Extended (Journaled)' as expectd, but rather: \" + root.volumeFormat)\n self.assertTrue(root.isMounted(), 'The root object is not reporting being mounted')\n<|end_body_4|>\n", "revision_id": "22642cfab2e14fd84c2c8a9a43ff3cb6c9f328c1", "skeleton": "<|skeleton|>\nclass volumeManagerTests:\n \"\"\"Test the diskUtility routines\"\"\"\n\n def test_getVolumeInfo_root(self):\n \"\"\"Test getVolumeInfo on the root volume, expecting that it is a HFS+ volume\"\"\"\n <|body_0|>\n\n def test_getVolumeInfo_Applications(self):\n \"\"\"Test that using getVolumeInfo on an item inside of a volume returns the volume's info\"\"\"\n <|body_1|>\n\n def test_getMacOSVersionAndBuildOfVolume(self):\n \"\"\"Test that getMacOSVersionAndBuildOfVolume can get the information from the root volume\"\"\"\n <|body_2|>\n\n def test_getMountedVolumes(self):\n \"\"\"Test that getMountedVolumes is returning a list of volumes\"\"\"\n <|body_3|>\n\n def test_volumeManager_onRoot(self):\n \"\"\"Test volumeManager by creating a new instance with root's path\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class volumeManagerTests:\n \"\"\"Test the diskUtility routines\"\"\"\n\n def test_getVolumeInfo_root(self):\n \"\"\"Test getVolumeInfo on the root volume, expecting that it is a HFS+ volume\"\"\"\n result = volumeManager.getVolumeInfo('/')\n self.assertTrue(result is not None, 'getVolumeInfo returned None for the root volume')\n self.assertTrue('diskType' in result, 'getVolumeInfo did not get a diskType for the root volume')\n self.assertEqual(result['diskType'], 'Hard Drive', 'The boot volume diskType was not \"Hard Drive\" as expected, but rather: ' + str(result['diskType']))\n self.assertTrue('volumeFormat' in result, 'getVolumeInfo did not get a volumeFormat for the root volume')\n self.assertEqual(result['volumeFormat'], 'Journaled HFS+', 'The boot volume volumeFormat was not \"Journaled HFS+\" as expected, but rather: ' + str(result['volumeFormat']))\n self.assertTrue('mountPath' in result, 'getVolumeInfo did not get a mountPath for the root volume')\n self.assertEqual(result['mountPath'], '/', 'The boot volume volumeFormat was not \"/\" as expected, but rather: ' + str(result['mountPath']))\n self.assertTrue('volumeName' in result, 'getVolumeInfo did not get a volumeName for the root volume')\n self.assertTrue('diskBsdName' in result, 'getVolumeInfo did not get a diskBsdName for the root volume')\n self.assertTrue(result['diskBsdName'].startswith('disk'), 'The boot volume diskBsdName did not start with \"disk\" as expected, but rather was: ' + str(result['diskBsdName']))\n self.assertTrue('bsdPath' in result, 'getVolumeInfo did not get a bsdPath for the root volume')\n self.assertTrue(result['bsdPath'].startswith('/dev/disk'), 'The boot volume bsdPath did not start with \"/dev/disk\" as expected, but rather was: ' + str(result['bsdPath']))\n self.assertTrue(result['bsdPath'].startswith('/dev/' + result['diskBsdName'] + 's'), 'The boot volume bsdPath did not start with the diskBsdName (%s) as expected, but rather was: %s' % (result['diskBsdName'], str(result['bsdPath'])))\n\n def test_getVolumeInfo_Applications(self):\n \"\"\"Test that using getVolumeInfo on an item inside of a volume returns the volume's info\"\"\"\n rootResult = volumeManager.getVolumeInfo('/')\n self.assertTrue(rootResult is not None, 'getVolumeInfo returned None for the root volume')\n applicationsResult = volumeManager.getVolumeInfo('/Applications')\n self.assertTrue(applicationsResult is not None, 'getVolumeInfo returned None for the Applications folder')\n self.assertEqual(rootResult, applicationsResult, 'getVolumeInfo did not return the same information for the root volume as it did for Applications')\n\n def test_getMacOSVersionAndBuildOfVolume(self):\n \"\"\"Test that getMacOSVersionAndBuildOfVolume can get the information from the root volume\"\"\"\n version, build = volumeManager.getMacOSVersionAndBuildOfVolume('/')\n self.assertTrue(version is not None, 'getMacOSVersionAndBuildOfVolume got None as the version of MacOS on the root volume')\n self.assertTrue(version.startswith('10.'), 'The value that getMacOSVersionAndBuildOfVolume returned for the version of MacOS on the root volume did not start with \"10.\": ' + version)\n self.assertTrue(build is not None, 'getMacOSVersionAndBuildOfVolume got None as the build of MacOS on the root volume')\n self.assertTrue(re.match('^\\\\d+[A-Z]\\\\d+[a-zA-Z]?$', build), 'The value that getMacOSVersionAndBuildOfVolume returned for the build of MacOS on the root volume did not look correct: ' + build)\n\n def test_getMountedVolumes(self):\n \"\"\"Test that getMountedVolumes is returning a list of volumes\"\"\"\n mountedVolumes = volumeManager.getMountedVolumes(excludeRoot=False)\n self.assertTrue(hasattr(mountedVolumes, '__iter__'), 'The output of getMountedVolumes including root was not an array')\n self.assertTrue('/' in mountedVolumes, 'The output of getMountedVolumes including root did not include \"/\"')\n for thisMountPoint in mountedVolumes:\n self.assertTrue(os.path.ismount(thisMountPoint), 'An item returned from getMountedVolumes was not a volume: ' + str(thisMountPoint))\n mountedVolumes = volumeManager.getMountedVolumes()\n self.assertFalse('/' in mountedVolumes, 'The output of getMountedVolumes not including root still included \"/\"')\n\n def test_volumeManager_onRoot(self):\n \"\"\"Test volumeManager by creating a new instance with root's path\"\"\"\n root = volumeManager('/')\n self.assertTrue(root.diskType is not None, 'After being created with the root path, the volumeManager object did not have a diskType value')\n self.assertEqual(root.diskType, 'Hard Drive', \"After being created with the root path, the volumeManager object's diskType was not 'Hard Drive' as expectd, but rather: \" + root.diskType)\n self.assertTrue(root.mountPath is not None, 'After being created with the root path, the volumeManager object did not have a mountPath value')\n self.assertEqual(root.mountPath, '/', \"After being created with the root path, the volumeManager object's mountPath was not '/' as expectd, but rather: \" + root.mountPath)\n self.assertTrue(root.volumeFormat is not None, 'After being created with the root path, the volumeManager object did not have a volumeFormat value')\n self.assertEqual(root.volumeFormat, 'Journaled HFS+', \"After being created with the root path, the volumeManager object's volumeFormat was not 'Mac OS Extended (Journaled)' as expectd, but rather: \" + root.volumeFormat)\n self.assertTrue(root.isMounted(), 'The root object is not reporting being mounted')\n", "source": "the_stack_v2_python_sparse", "source_path": "AddOns/InstaUp2Date/Resources/volumeManager_test.py", "source_repo": "n8felton/instadmg", "split": "val", "star_events_count": 1}
{"blob_id": "67a745bc693d1dac4abd6e4836ad8c8c92574098", "bodies": ["super(SGD, self).__init__(name=name)\nself.loss = loss\nself.lr = tf.cast(lr, float)\nself.n_iter = tf.cast(n_iter, tf.int32)\nself.decay = tf.cast(decay, float)\nself.grad_threshold = tf.cast(grad_threshold, float)", "@tf.function\ndef body(i, z, lr, update_size):\n with tf.GradientTape() as tape:\n tape.watch(z)\n loss = self.loss(log_w_x, w_x, z, log_w_y, w_y, y)\n grad = tape.gradient(loss, z)\n max_grad = tf.reduce_max(grad)\n return (i + 1, z - lr * grad, lr * self.decay, max_grad)\n\n@tf.function\ndef cond(i, _z, _lr, update_size):\n return tf.logical_and(i < self.n_iter, update_size > self.grad_threshold)\ni_0 = tf.constant(0)\n_, res, _, _ = tf.while_loop(cond, body, [i_0, x, self.lr, 2.0 * self.grad_threshold])\nreturn res"], "bodies_text": "<|body_start_0|>\n super(SGD, self).__init__(name=name)\n self.loss = loss\n self.lr = tf.cast(lr, float)\n self.n_iter = tf.cast(n_iter, tf.int32)\n self.decay = tf.cast(decay, float)\n self.grad_threshold = tf.cast(grad_threshold, float)\n<|end_body_0|>\n\n<|body_start_1|>\n @tf.function\n def body(i, z, lr, update_size):\n with tf.GradientTape() as tape:\n tape.watch(z)\n loss = self.loss(log_w_x, w_x, z, log_w_y, w_y, y)\n grad = tape.gradient(loss, z)\n max_grad = tf.reduce_max(grad)\n return (i + 1, z - lr * grad, lr * self.decay, max_grad)\n\n @tf.function\n def cond(i, _z, _lr, update_size):\n return tf.logical_and(i < self.n_iter, update_size > self.grad_threshold)\n i_0 = tf.constant(0)\n _, res, _, _ = tf.while_loop(cond, body, [i_0, x, self.lr, 2.0 * self.grad_threshold])\n return res\n<|end_body_1|>\n", "class_docstring": "Optimizer.", "class_name": "SGD", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SGD:\n \"\"\"Optimizer.\"\"\"\n\n def __init__(self, loss, lr=0.1, n_iter=50, decay=0.5, grad_threshold=0.0001, name='SGD'):\n \"\"\"Needs a nice :param loss: :param lr: :param n_iter: :param name:\"\"\"\n <|body_0|>\n\n def __call__(self, log_w_x, w_x, x, log_w_y, w_y, y):\n \"\"\"Needs a nice docstring :param log_w_x: :param w_x: :param x: :param log_w_y: :param w_y: :param y: :return: The optimized point cloud starting from x :rtype: tf.Tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SGD, self).__init__(name=name)\n self.loss = loss\n self.lr = tf.cast(lr, float)\n self.n_iter = tf.cast(n_iter, tf.int32)\n self.decay = tf.cast(decay, float)\n self.grad_threshold = tf.cast(grad_threshold, float)\n<|end_body_0|>\n\n<|body_start_1|>\n @tf.function\n def body(i, z, lr, update_size):\n with tf.GradientTape() as tape:\n tape.watch(z)\n loss = self.loss(log_w_x, w_x, z, log_w_y, w_y, y)\n grad = tape.gradient(loss, z)\n max_grad = tf.reduce_max(grad)\n return (i + 1, z - lr * grad, lr * self.decay, max_grad)\n\n @tf.function\n def cond(i, _z, _lr, update_size):\n return tf.logical_and(i < self.n_iter, update_size > self.grad_threshold)\n i_0 = tf.constant(0)\n _, res, _, _ = tf.while_loop(cond, body, [i_0, x, self.lr, 2.0 * self.grad_threshold])\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000297", "length_bytes": 1905, "license_type": "permissive", "methods": [{"docstring": "Needs a nice :param loss: :param lr: :param n_iter: :param name:", "name": "__init__", "signature": "def __init__(self, loss, lr=0.1, n_iter=50, decay=0.5, grad_threshold=0.0001, name='SGD')"}, {"docstring": "Needs a nice docstring :param log_w_x: :param w_x: :param x: :param log_w_y: :param w_y: :param y: :return: The optimized point cloud starting from x :rtype: tf.Tensor", "name": "__call__", "signature": "def __call__(self, log_w_x, w_x, x, log_w_y, w_y, y)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_001007", "prompt": "Implement the Python class `SGD` described below.\n\nClass description:\nOptimizer.\n\nMethod signatures and docstrings:\n- def __init__(self, loss, lr=0.1, n_iter=50, decay=0.5, grad_threshold=0.0001, name='SGD'): Needs a nice :param loss: :param lr: :param n_iter: :param name:\n- def __call__(self, log_w_x, w_x, x, log_w_y, w_y, y): Needs a nice docstring :param log_w_x: :param w_x: :param x: :param log_w_y: :param w_y: :param y: :return: The optimized point cloud starting from x :rtype: tf.Tensor", "prompted_full_text": "Implement the Python class `SGD` described below.\n\nClass description:\nOptimizer.\n\nMethod signatures and docstrings:\n- def __init__(self, loss, lr=0.1, n_iter=50, decay=0.5, grad_threshold=0.0001, name='SGD'): Needs a nice :param loss: :param lr: :param n_iter: :param name:\n- def __call__(self, log_w_x, w_x, x, log_w_y, w_y, y): Needs a nice docstring :param log_w_x: :param w_x: :param x: :param log_w_y: :param w_y: :param y: :return: The optimized point cloud starting from x :rtype: tf.Tensor\n\n<|skeleton|>\nclass SGD:\n \"\"\"Optimizer.\"\"\"\n\n def __init__(self, loss, lr=0.1, n_iter=50, decay=0.5, grad_threshold=0.0001, name='SGD'):\n \"\"\"Needs a nice :param loss: :param lr: :param n_iter: :param name:\"\"\"\n <|body_0|>\n\n def __call__(self, log_w_x, w_x, x, log_w_y, w_y, y):\n \"\"\"Needs a nice docstring :param log_w_x: :param w_x: :param x: :param log_w_y: :param w_y: :param y: :return: The optimized point cloud starting from x :rtype: tf.Tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SGD, self).__init__(name=name)\n self.loss = loss\n self.lr = tf.cast(lr, float)\n self.n_iter = tf.cast(n_iter, tf.int32)\n self.decay = tf.cast(decay, float)\n self.grad_threshold = tf.cast(grad_threshold, float)\n<|end_body_0|>\n\n<|body_start_1|>\n @tf.function\n def body(i, z, lr, update_size):\n with tf.GradientTape() as tape:\n tape.watch(z)\n loss = self.loss(log_w_x, w_x, z, log_w_y, w_y, y)\n grad = tape.gradient(loss, z)\n max_grad = tf.reduce_max(grad)\n return (i + 1, z - lr * grad, lr * self.decay, max_grad)\n\n @tf.function\n def cond(i, _z, _lr, update_size):\n return tf.logical_and(i < self.n_iter, update_size > self.grad_threshold)\n i_0 = tf.constant(0)\n _, res, _, _ = tf.while_loop(cond, body, [i_0, x, self.lr, 2.0 * self.grad_threshold])\n return res\n<|end_body_1|>\n", "revision_id": "5d8300ba247c4c17e1a301a22560c24fd0670bfe", "skeleton": "<|skeleton|>\nclass SGD:\n \"\"\"Optimizer.\"\"\"\n\n def __init__(self, loss, lr=0.1, n_iter=50, decay=0.5, grad_threshold=0.0001, name='SGD'):\n \"\"\"Needs a nice :param loss: :param lr: :param n_iter: :param name:\"\"\"\n <|body_0|>\n\n def __call__(self, log_w_x, w_x, x, log_w_y, w_y, y):\n \"\"\"Needs a nice docstring :param log_w_x: :param w_x: :param x: :param log_w_y: :param w_y: :param y: :return: The optimized point cloud starting from x :rtype: tf.Tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SGD:\n \"\"\"Optimizer.\"\"\"\n\n def __init__(self, loss, lr=0.1, n_iter=50, decay=0.5, grad_threshold=0.0001, name='SGD'):\n \"\"\"Needs a nice :param loss: :param lr: :param n_iter: :param name:\"\"\"\n super(SGD, self).__init__(name=name)\n self.loss = loss\n self.lr = tf.cast(lr, float)\n self.n_iter = tf.cast(n_iter, tf.int32)\n self.decay = tf.cast(decay, float)\n self.grad_threshold = tf.cast(grad_threshold, float)\n\n def __call__(self, log_w_x, w_x, x, log_w_y, w_y, y):\n \"\"\"Needs a nice docstring :param log_w_x: :param w_x: :param x: :param log_w_y: :param w_y: :param y: :return: The optimized point cloud starting from x :rtype: tf.Tensor\"\"\"\n @tf.function\n def body(i, z, lr, update_size):\n with tf.GradientTape() as tape:\n tape.watch(z)\n loss = self.loss(log_w_x, w_x, z, log_w_y, w_y, y)\n grad = tape.gradient(loss, z)\n max_grad = tf.reduce_max(grad)\n return (i + 1, z - lr * grad, lr * self.decay, max_grad)\n\n @tf.function\n def cond(i, _z, _lr, update_size):\n return tf.logical_and(i < self.n_iter, update_size > self.grad_threshold)\n i_0 = tf.constant(0)\n _, res, _, _ = tf.while_loop(cond, body, [i_0, x, self.lr, 2.0 * self.grad_threshold])\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "filterflow/resampling/differentiable/optimizer/sgd.py", "source_repo": "JTT94/filterflow", "split": "val", "star_events_count": 39}
{"blob_id": "43634fbe2f5843204dee533fbfce91a6845e3060", "bodies": ["help_wanted = HelpWanted.objects.get(owner=owner, repo=repo)\nserializer = HelpWantedSerializer(help_wanted)\nreturn Response(serializer.data, status=status.HTTP_200_OK)", "data = HelpWanted.objects.filter(owner=owner, repo=repo)\nif data:\n serializer = HelpWantedSerializer(data[0])\n return Response(serializer.data, status=status.HTTP_200_OK)\nurl = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\ntotal_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\nif total_issues == 0:\n rate = 0\nelse:\n rate = help_wanted_issues / total_issues\ndata = HelpWanted.objects.create(owner=owner, repo=repo, total_issues=total_issues, help_wanted_issues=help_wanted_issues, help_wanted_rate=rate, help_wanted_max_rate=constants.HELP_WANTED_MAX_RATE)\nserializer = HelpWantedSerializer(data)\nreturn Response(serializer.data, status=status.HTTP_201_CREATED)", "url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\ntotal_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\nif total_issues == 0:\n rate = 0\nelse:\n rate = help_wanted_issues / total_issues\ndata = HelpWanted.objects.get(owner=owner, repo=repo)\ndata.total_issues = total_issues\ndata.help_wanted_issues = help_wanted_issues\ndata.help_wanted_rate = rate\ndata.help_wanted_max_rate = constants.HELP_WANTED_MAX_RATE\ndata.save()\nserializer = HelpWantedSerializer(data)\nreturn Response(serializer.data, status=status.HTTP_200_OK)", "username = os.environ['NAME']\ntoken = os.environ['TOKEN']\ntotal_issues = 0\nhelp_wanted_issues = 0\ninfo_repo = requests.get(url, headers={'Authorization': 'token ' + token_auth}).json()\ntotal_issues = info_repo['open_issues_count']\npage = '&page=1'\nlabel_url = url + constants.LABEL_HELP_ESPACE_WANTED\nresult = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n'\\n checks possibilities for different aliases of help wanted\\n '\nif result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\nelse:\n label_url = url + constants.LABEL_HELPWANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELP_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\nreturn (total_issues, help_wanted_issues)"], "bodies_text": "<|body_start_0|>\n help_wanted = HelpWanted.objects.get(owner=owner, repo=repo)\n serializer = HelpWantedSerializer(help_wanted)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = HelpWanted.objects.filter(owner=owner, repo=repo)\n if data:\n serializer = HelpWantedSerializer(data[0])\n return Response(serializer.data, status=status.HTTP_200_OK)\n url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\n total_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\n if total_issues == 0:\n rate = 0\n else:\n rate = help_wanted_issues / total_issues\n data = HelpWanted.objects.create(owner=owner, repo=repo, total_issues=total_issues, help_wanted_issues=help_wanted_issues, help_wanted_rate=rate, help_wanted_max_rate=constants.HELP_WANTED_MAX_RATE)\n serializer = HelpWantedSerializer(data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\n total_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\n if total_issues == 0:\n rate = 0\n else:\n rate = help_wanted_issues / total_issues\n data = HelpWanted.objects.get(owner=owner, repo=repo)\n data.total_issues = total_issues\n data.help_wanted_issues = help_wanted_issues\n data.help_wanted_rate = rate\n data.help_wanted_max_rate = constants.HELP_WANTED_MAX_RATE\n data.save()\n serializer = HelpWantedSerializer(data)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n username = os.environ['NAME']\n token = os.environ['TOKEN']\n total_issues = 0\n help_wanted_issues = 0\n info_repo = requests.get(url, headers={'Authorization': 'token ' + token_auth}).json()\n total_issues = info_repo['open_issues_count']\n page = '&page=1'\n label_url = url + constants.LABEL_HELP_ESPACE_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n '\\n checks possibilities for different aliases of help wanted\\n '\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELPWANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELP_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n return (total_issues, help_wanted_issues)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "HelpWantedView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HelpWantedView:\n\n def get(self, request, owner, repo, token_auth):\n \"\"\"returns help wanted issue rate\"\"\"\n <|body_0|>\n\n def post(self, request, owner, repo, token_auth):\n \"\"\"Create help wanted object\"\"\"\n <|body_1|>\n\n def put(self, request, owner, repo, token_auth):\n \"\"\"Update help hanted object\"\"\"\n <|body_2|>\n\n def get_total_helpwanted(self, url, token_auth):\n \"\"\"returns the number of all issues and the issues with help wanted label\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n help_wanted = HelpWanted.objects.get(owner=owner, repo=repo)\n serializer = HelpWantedSerializer(help_wanted)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = HelpWanted.objects.filter(owner=owner, repo=repo)\n if data:\n serializer = HelpWantedSerializer(data[0])\n return Response(serializer.data, status=status.HTTP_200_OK)\n url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\n total_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\n if total_issues == 0:\n rate = 0\n else:\n rate = help_wanted_issues / total_issues\n data = HelpWanted.objects.create(owner=owner, repo=repo, total_issues=total_issues, help_wanted_issues=help_wanted_issues, help_wanted_rate=rate, help_wanted_max_rate=constants.HELP_WANTED_MAX_RATE)\n serializer = HelpWantedSerializer(data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\n total_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\n if total_issues == 0:\n rate = 0\n else:\n rate = help_wanted_issues / total_issues\n data = HelpWanted.objects.get(owner=owner, repo=repo)\n data.total_issues = total_issues\n data.help_wanted_issues = help_wanted_issues\n data.help_wanted_rate = rate\n data.help_wanted_max_rate = constants.HELP_WANTED_MAX_RATE\n data.save()\n serializer = HelpWantedSerializer(data)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n username = os.environ['NAME']\n token = os.environ['TOKEN']\n total_issues = 0\n help_wanted_issues = 0\n info_repo = requests.get(url, headers={'Authorization': 'token ' + token_auth}).json()\n total_issues = info_repo['open_issues_count']\n page = '&page=1'\n label_url = url + constants.LABEL_HELP_ESPACE_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n '\\n checks possibilities for different aliases of help wanted\\n '\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELPWANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELP_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n return (total_issues, help_wanted_issues)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000298", "length_bytes": 4585, "license_type": "permissive", "methods": [{"docstring": "returns help wanted issue rate", "name": "get", "signature": "def get(self, request, owner, repo, token_auth)"}, {"docstring": "Create help wanted object", "name": "post", "signature": "def post(self, request, owner, repo, token_auth)"}, {"docstring": "Update help hanted object", "name": "put", "signature": "def put(self, request, owner, repo, token_auth)"}, {"docstring": "returns the number of all issues and the issues with help wanted label", "name": "get_total_helpwanted", "signature": "def get_total_helpwanted(self, url, token_auth)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_045056", "prompt": "Implement the Python class `HelpWantedView` described below.\n\nClass description:\nImplement the HelpWantedView class.\n\nMethod signatures and docstrings:\n- def get(self, request, owner, repo, token_auth): returns help wanted issue rate\n- def post(self, request, owner, repo, token_auth): Create help wanted object\n- def put(self, request, owner, repo, token_auth): Update help hanted object\n- def get_total_helpwanted(self, url, token_auth): returns the number of all issues and the issues with help wanted label", "prompted_full_text": "Implement the Python class `HelpWantedView` described below.\n\nClass description:\nImplement the HelpWantedView class.\n\nMethod signatures and docstrings:\n- def get(self, request, owner, repo, token_auth): returns help wanted issue rate\n- def post(self, request, owner, repo, token_auth): Create help wanted object\n- def put(self, request, owner, repo, token_auth): Update help hanted object\n- def get_total_helpwanted(self, url, token_auth): returns the number of all issues and the issues with help wanted label\n\n<|skeleton|>\nclass HelpWantedView:\n\n def get(self, request, owner, repo, token_auth):\n \"\"\"returns help wanted issue rate\"\"\"\n <|body_0|>\n\n def post(self, request, owner, repo, token_auth):\n \"\"\"Create help wanted object\"\"\"\n <|body_1|>\n\n def put(self, request, owner, repo, token_auth):\n \"\"\"Update help hanted object\"\"\"\n <|body_2|>\n\n def get_total_helpwanted(self, url, token_auth):\n \"\"\"returns the number of all issues and the issues with help wanted label\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n help_wanted = HelpWanted.objects.get(owner=owner, repo=repo)\n serializer = HelpWantedSerializer(help_wanted)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = HelpWanted.objects.filter(owner=owner, repo=repo)\n if data:\n serializer = HelpWantedSerializer(data[0])\n return Response(serializer.data, status=status.HTTP_200_OK)\n url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\n total_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\n if total_issues == 0:\n rate = 0\n else:\n rate = help_wanted_issues / total_issues\n data = HelpWanted.objects.create(owner=owner, repo=repo, total_issues=total_issues, help_wanted_issues=help_wanted_issues, help_wanted_rate=rate, help_wanted_max_rate=constants.HELP_WANTED_MAX_RATE)\n serializer = HelpWantedSerializer(data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\n total_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\n if total_issues == 0:\n rate = 0\n else:\n rate = help_wanted_issues / total_issues\n data = HelpWanted.objects.get(owner=owner, repo=repo)\n data.total_issues = total_issues\n data.help_wanted_issues = help_wanted_issues\n data.help_wanted_rate = rate\n data.help_wanted_max_rate = constants.HELP_WANTED_MAX_RATE\n data.save()\n serializer = HelpWantedSerializer(data)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n username = os.environ['NAME']\n token = os.environ['TOKEN']\n total_issues = 0\n help_wanted_issues = 0\n info_repo = requests.get(url, headers={'Authorization': 'token ' + token_auth}).json()\n total_issues = info_repo['open_issues_count']\n page = '&page=1'\n label_url = url + constants.LABEL_HELP_ESPACE_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n '\\n checks possibilities for different aliases of help wanted\\n '\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELPWANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELP_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n return (total_issues, help_wanted_issues)\n<|end_body_3|>\n", "revision_id": "3f031eac9559a10fdcf70a88ee4c548cf93e4ac2", "skeleton": "<|skeleton|>\nclass HelpWantedView:\n\n def get(self, request, owner, repo, token_auth):\n \"\"\"returns help wanted issue rate\"\"\"\n <|body_0|>\n\n def post(self, request, owner, repo, token_auth):\n \"\"\"Create help wanted object\"\"\"\n <|body_1|>\n\n def put(self, request, owner, repo, token_auth):\n \"\"\"Update help hanted object\"\"\"\n <|body_2|>\n\n def get_total_helpwanted(self, url, token_auth):\n \"\"\"returns the number of all issues and the issues with help wanted label\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HelpWantedView:\n def get(self, request, owner, repo, token_auth):\n \"\"\"returns help wanted issue rate\"\"\"\n help_wanted = HelpWanted.objects.get(owner=owner, repo=repo)\n serializer = HelpWantedSerializer(help_wanted)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def post(self, request, owner, repo, token_auth):\n \"\"\"Create help wanted object\"\"\"\n data = HelpWanted.objects.filter(owner=owner, repo=repo)\n if data:\n serializer = HelpWantedSerializer(data[0])\n return Response(serializer.data, status=status.HTTP_200_OK)\n url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\n total_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\n if total_issues == 0:\n rate = 0\n else:\n rate = help_wanted_issues / total_issues\n data = HelpWanted.objects.create(owner=owner, repo=repo, total_issues=total_issues, help_wanted_issues=help_wanted_issues, help_wanted_rate=rate, help_wanted_max_rate=constants.HELP_WANTED_MAX_RATE)\n serializer = HelpWantedSerializer(data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def put(self, request, owner, repo, token_auth):\n \"\"\"Update help hanted object\"\"\"\n url = '{0}{1}/{2}'.format(constants.MAIN_URL, owner, repo)\n total_issues, help_wanted_issues = self.get_total_helpwanted(url, token_auth)\n if total_issues == 0:\n rate = 0\n else:\n rate = help_wanted_issues / total_issues\n data = HelpWanted.objects.get(owner=owner, repo=repo)\n data.total_issues = total_issues\n data.help_wanted_issues = help_wanted_issues\n data.help_wanted_rate = rate\n data.help_wanted_max_rate = constants.HELP_WANTED_MAX_RATE\n data.save()\n serializer = HelpWantedSerializer(data)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def get_total_helpwanted(self, url, token_auth):\n \"\"\"returns the number of all issues and the issues with help wanted label\"\"\"\n username = os.environ['NAME']\n token = os.environ['TOKEN']\n total_issues = 0\n help_wanted_issues = 0\n info_repo = requests.get(url, headers={'Authorization': 'token ' + token_auth}).json()\n total_issues = info_repo['open_issues_count']\n page = '&page=1'\n label_url = url + constants.LABEL_HELP_ESPACE_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n '\\n checks possibilities for different aliases of help wanted\\n '\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELPWANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n else:\n label_url = url + constants.LABEL_HELP_WANTED\n result = requests.get(label_url + page, headers={'Authorization': 'token ' + token_auth}).json()\n if result:\n help_wanted_issues = count_all_label(label_url, result, token_auth)\n return (total_issues, help_wanted_issues)\n", "source": "the_stack_v2_python_sparse", "source_path": "hubcare/metrics/issue_metrics/help_wanted/views.py", "source_repo": "fga-eps-mds/2019.1-hubcare-api", "split": "val", "star_events_count": 7}
{"blob_id": "68edf82bd69e43f3e9ee4f17a5379968d1262545", "bodies": ["List = []\ncount = 0\nwhile head:\n List.append(head)\n head = head.next\n count = count + 1\nif count == 1:\n return None\nif List[-n].next is None:\n List[-n - 1].next = None\n return List[0]\nelse:\n List[-n].val = List[-n].next.val\n List[-n].next = List[-n].next.next\nreturn List[0]", "dummy = ListNode(0)\ndummy.next = head\np1 = p2 = dummy\nfor i in range(n):\n p1 = p1.next\nwhile p1.next:\n p1 = p1.next\n p2 = p2.next\np2.next = p2.next.next\nreturn dummy.next"], "bodies_text": "<|body_start_0|>\n List = []\n count = 0\n while head:\n List.append(head)\n head = head.next\n count = count + 1\n if count == 1:\n return None\n if List[-n].next is None:\n List[-n - 1].next = None\n return List[0]\n else:\n List[-n].val = List[-n].next.val\n List[-n].next = List[-n].next.next\n return List[0]\n<|end_body_0|>\n\n<|body_start_1|>\n dummy = ListNode(0)\n dummy.next = head\n p1 = p2 = dummy\n for i in range(n):\n p1 = p1.next\n while p1.next:\n p1 = p1.next\n p2 = p2.next\n p2.next = p2.next.next\n return dummy.next\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def removeNthFromEnd2(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n List = []\n count = 0\n while head:\n List.append(head)\n head = head.next\n count = count + 1\n if count == 1:\n return None\n if List[-n].next is None:\n List[-n - 1].next = None\n return List[0]\n else:\n List[-n].val = List[-n].next.val\n List[-n].next = List[-n].next.next\n return List[0]\n<|end_body_0|>\n\n<|body_start_1|>\n dummy = ListNode(0)\n dummy.next = head\n p1 = p2 = dummy\n for i in range(n):\n p1 = p1.next\n while p1.next:\n p1 = p1.next\n p2 = p2.next\n p2.next = p2.next.next\n return dummy.next\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000299", "length_bytes": 1241, "license_type": "no_license", "methods": [{"docstring": ":type head: ListNode :type n: int :rtype: ListNode", "name": "removeNthFromEnd", "signature": "def removeNthFromEnd(self, head, n)"}, {"docstring": ":type head: ListNode :type n: int :rtype: ListNode", "name": "removeNthFromEnd2", "signature": "def removeNthFromEnd2(self, head, n)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeNthFromEnd(self, head, n): :type head: ListNode :type n: int :rtype: ListNode\n- def removeNthFromEnd2(self, head, n): :type head: ListNode :type n: int :rtype: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeNthFromEnd(self, head, n): :type head: ListNode :type n: int :rtype: ListNode\n- def removeNthFromEnd2(self, head, n): :type head: ListNode :type n: int :rtype: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def removeNthFromEnd2(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n List = []\n count = 0\n while head:\n List.append(head)\n head = head.next\n count = count + 1\n if count == 1:\n return None\n if List[-n].next is None:\n List[-n - 1].next = None\n return List[0]\n else:\n List[-n].val = List[-n].next.val\n List[-n].next = List[-n].next.next\n return List[0]\n<|end_body_0|>\n\n<|body_start_1|>\n dummy = ListNode(0)\n dummy.next = head\n p1 = p2 = dummy\n for i in range(n):\n p1 = p1.next\n while p1.next:\n p1 = p1.next\n p2 = p2.next\n p2.next = p2.next.next\n return dummy.next\n<|end_body_1|>\n", "revision_id": "829f918a0d4d94da5fd3004768421974fbe056e7", "skeleton": "<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def removeNthFromEnd2(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def removeNthFromEnd(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n List = []\n count = 0\n while head:\n List.append(head)\n head = head.next\n count = count + 1\n if count == 1:\n return None\n if List[-n].next is None:\n List[-n - 1].next = None\n return List[0]\n else:\n List[-n].val = List[-n].next.val\n List[-n].next = List[-n].next.next\n return List[0]\n\n def removeNthFromEnd2(self, head, n):\n \"\"\":type head: ListNode :type n: int :rtype: ListNode\"\"\"\n dummy = ListNode(0)\n dummy.next = head\n p1 = p2 = dummy\n for i in range(n):\n p1 = p1.next\n while p1.next:\n p1 = p1.next\n p2 = p2.next\n p2.next = p2.next.next\n return dummy.next\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/medium/19_删除链表的倒数第N个节点.py", "source_repo": "Weikoi/OJ_Python", "split": "val", "star_events_count": 0}
{"blob_id": "e6c91b7b4412d4a58b171de49d1289c051abad3b", "bodies": ["fornecedor = get_a_vendor('id', id)\nif not fornecedor:\n api.abort(404)\nelse:\n return fornecedor", "fornecedor = get_a_vendor('id', id)\ndata = request.json\nif not fornecedor:\n api.abort(404, 'Fornecedor não encontrado.')\nif not data:\n api.abort(400, 'Payload vazio.')\nreturn update_vendor(fornecedor, data=data)"], "bodies_text": "<|body_start_0|>\n fornecedor = get_a_vendor('id', id)\n if not fornecedor:\n api.abort(404)\n else:\n return fornecedor\n<|end_body_0|>\n\n<|body_start_1|>\n fornecedor = get_a_vendor('id', id)\n data = request.json\n if not fornecedor:\n api.abort(404, 'Fornecedor não encontrado.')\n if not data:\n api.abort(400, 'Payload vazio.')\n return update_vendor(fornecedor, data=data)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Fornecedor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Fornecedor:\n\n def get(self, id):\n \"\"\"Obtem informações de um fornecedor com base no seu id\"\"\"\n <|body_0|>\n\n def patch(self, id):\n \"\"\"Atualiza um fornecedor Obs: para inativar, coloque 'ativo': false\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fornecedor = get_a_vendor('id', id)\n if not fornecedor:\n api.abort(404)\n else:\n return fornecedor\n<|end_body_0|>\n\n<|body_start_1|>\n fornecedor = get_a_vendor('id', id)\n data = request.json\n if not fornecedor:\n api.abort(404, 'Fornecedor não encontrado.')\n if not data:\n api.abort(400, 'Payload vazio.')\n return update_vendor(fornecedor, data=data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000300", "length_bytes": 3446, "license_type": "no_license", "methods": [{"docstring": "Obtem informações de um fornecedor com base no seu id", "name": "get", "signature": "def get(self, id)"}, {"docstring": "Atualiza um fornecedor Obs: para inativar, coloque 'ativo': false", "name": "patch", "signature": "def patch(self, id)"}], "n_methods": 2, "prompt": "Implement the Python class `Fornecedor` described below.\n\nClass description:\nImplement the Fornecedor class.\n\nMethod signatures and docstrings:\n- def get(self, id): Obtem informações de um fornecedor com base no seu id\n- def patch(self, id): Atualiza um fornecedor Obs: para inativar, coloque 'ativo': false", "prompted_full_text": "Implement the Python class `Fornecedor` described below.\n\nClass description:\nImplement the Fornecedor class.\n\nMethod signatures and docstrings:\n- def get(self, id): Obtem informações de um fornecedor com base no seu id\n- def patch(self, id): Atualiza um fornecedor Obs: para inativar, coloque 'ativo': false\n\n<|skeleton|>\nclass Fornecedor:\n\n def get(self, id):\n \"\"\"Obtem informações de um fornecedor com base no seu id\"\"\"\n <|body_0|>\n\n def patch(self, id):\n \"\"\"Atualiza um fornecedor Obs: para inativar, coloque 'ativo': false\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fornecedor = get_a_vendor('id', id)\n if not fornecedor:\n api.abort(404)\n else:\n return fornecedor\n<|end_body_0|>\n\n<|body_start_1|>\n fornecedor = get_a_vendor('id', id)\n data = request.json\n if not fornecedor:\n api.abort(404, 'Fornecedor não encontrado.')\n if not data:\n api.abort(400, 'Payload vazio.')\n return update_vendor(fornecedor, data=data)\n<|end_body_1|>\n", "revision_id": "a86fcb085af8567a661d47876f8b9f13d7b062a9", "skeleton": "<|skeleton|>\nclass Fornecedor:\n\n def get(self, id):\n \"\"\"Obtem informações de um fornecedor com base no seu id\"\"\"\n <|body_0|>\n\n def patch(self, id):\n \"\"\"Atualiza um fornecedor Obs: para inativar, coloque 'ativo': false\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Fornecedor:\n def get(self, id):\n \"\"\"Obtem informações de um fornecedor com base no seu id\"\"\"\n fornecedor = get_a_vendor('id', id)\n if not fornecedor:\n api.abort(404)\n else:\n return fornecedor\n\n def patch(self, id):\n \"\"\"Atualiza um fornecedor Obs: para inativar, coloque 'ativo': false\"\"\"\n fornecedor = get_a_vendor('id', id)\n data = request.json\n if not fornecedor:\n api.abort(404, 'Fornecedor não encontrado.')\n if not data:\n api.abort(400, 'Payload vazio.')\n return update_vendor(fornecedor, data=data)\n", "source": "the_stack_v2_python_sparse", "source_path": "backend/app/main/controller/fornecedor_controller.py", "source_repo": "AnderSilva/ozomali", "split": "val", "star_events_count": 1}
{"blob_id": "f8c7437ef757340b5bdf0fbbee2c2d58793f3038", "bodies": ["Parametre.__init__(self, 'objectif', 'objective')\nself.tronquer = True\nself.schema = '()'\nself.aide_courte = 'consulte ou modifie les objectifs'\nself.aide_longue = 'Les objectifs sont des ordres permanents qui peuvent rester en sommeil pendant un temps indéfini. Généralement, seul le premier objectif est actif, les autres sont en attente, même si ils peuvent influencer la manoeuvre. Concrètement, un objectif est un but fixé pour un équipage, comme \"se rendre à un point indiqué\", \"attaquer le navire que l\\'on voit à l\\'horizon\", \"chercher à accoster dans le port voisin\". La différence avec de simples ordres, c\\'est qu\\'ils sont maintenus pendant toute la durée de leur accomplissement et sont faits pour évoluer au cours d la manoeuvre. Par exemple, aller vers une côte ou un autre navire peut être une combinaison d\\'ordres assez simples, mais il faut tenir compte de plusieurs facteurs qui pourraient modifier la manoeuvre, comme la position du vent si il tourne, l\\'apparition d\\'un autre navire sur la trajectoire assignée, la modification du cap si le navire cible se déplace. Un objectif crée généralement des contrôles : à la différence des objectifs, les contrôles ne peuvent se contredire. Vous pourriez avoir deux objectifs actifs : l\\'un demandant à se rendre vers une côte située vers l\\'est, l\\'autre demandant d\\'attaquer un navire à l\\'ouest. Dans ce cas, le commandant choisit l\\'objectif le plus prioritaire (celui en haut de la liste) et donne les ordres pour atteindre la côte à l\\'est. En revanche, si l\\'objectif pour attaquer le navire cible est maintenu, pendant l\\'accomplissement du premier objectif, une manoeuvre pourrait permettre à la cible d\\'être à portée de canon. C\\'est pour cela que les objectifs sont maintenus même si ils s\\'opposent en apparence. Vous pouvez entrer cette commande sans paramètre pour voir les objectifs actuellement donnés à votre équipage. Notez que le premier (numéroté |ent|1|ff|) est considéré comme l\\'objectif actif, celui par lequel les décisions conflictuelles sont tranchées. Les autres objectifs sont conservés mais ne font pas partie des décisions du commandant, sauf si il n\\'y a aucun conflit dans les objectifs non prioritaires. Vous pouvez également entrer cette commande en précisant un numéro : l\\'objectif du numéro indiqué sera retiré et ne fera plus parti des décisions prises par le commandant.'", "salle = personnage.salle\nnavire = getattr(salle, 'navire', None)\nif navire is None:\n personnage << \"|err|Vous n'êtes pas sur un navire.|ff|\"\n return\nif not navire.a_le_droit(personnage, 'officier'):\n personnage << \"|err|Vous ne pouvez donner d'ordre sur ce navire.|ff|\"\n return\nequipage = salle.navire.equipage\nnombre = dic_masques['nombre']\nif nombre:\n nombre = nombre.nombre\n if nombre < 0 or nombre > len(equipage.objectifs):\n personnage << \"|err|Numéro d'objectif invalide.|ff|\"\n return\n nombre = nombre - 1\n objectif = equipage.objectifs[nombre]\n equipage.retirer_objectif(nombre)\n personnage << \"L'objectif a bien été supprimé : {}.\".format(objectif.afficher())\n return\nif not equipage.objectifs:\n personnage << 'Aucun objectif actif sur cet équipage.'\n return\nmsg = 'Objectifs actifs :\\n'\nfor i, objectif in enumerate(equipage.objectifs):\n msg += '\\n|ent|{}|ff| - {}'.format(str(i + 1).rjust(2), objectif.afficher())\npersonnage << msg"], "bodies_text": "<|body_start_0|>\n Parametre.__init__(self, 'objectif', 'objective')\n self.tronquer = True\n self.schema = '()'\n self.aide_courte = 'consulte ou modifie les objectifs'\n self.aide_longue = 'Les objectifs sont des ordres permanents qui peuvent rester en sommeil pendant un temps indéfini. Généralement, seul le premier objectif est actif, les autres sont en attente, même si ils peuvent influencer la manoeuvre. Concrètement, un objectif est un but fixé pour un équipage, comme \"se rendre à un point indiqué\", \"attaquer le navire que l\\'on voit à l\\'horizon\", \"chercher à accoster dans le port voisin\". La différence avec de simples ordres, c\\'est qu\\'ils sont maintenus pendant toute la durée de leur accomplissement et sont faits pour évoluer au cours d la manoeuvre. Par exemple, aller vers une côte ou un autre navire peut être une combinaison d\\'ordres assez simples, mais il faut tenir compte de plusieurs facteurs qui pourraient modifier la manoeuvre, comme la position du vent si il tourne, l\\'apparition d\\'un autre navire sur la trajectoire assignée, la modification du cap si le navire cible se déplace. Un objectif crée généralement des contrôles : à la différence des objectifs, les contrôles ne peuvent se contredire. Vous pourriez avoir deux objectifs actifs : l\\'un demandant à se rendre vers une côte située vers l\\'est, l\\'autre demandant d\\'attaquer un navire à l\\'ouest. Dans ce cas, le commandant choisit l\\'objectif le plus prioritaire (celui en haut de la liste) et donne les ordres pour atteindre la côte à l\\'est. En revanche, si l\\'objectif pour attaquer le navire cible est maintenu, pendant l\\'accomplissement du premier objectif, une manoeuvre pourrait permettre à la cible d\\'être à portée de canon. C\\'est pour cela que les objectifs sont maintenus même si ils s\\'opposent en apparence. Vous pouvez entrer cette commande sans paramètre pour voir les objectifs actuellement donnés à votre équipage. Notez que le premier (numéroté |ent|1|ff|) est considéré comme l\\'objectif actif, celui par lequel les décisions conflictuelles sont tranchées. Les autres objectifs sont conservés mais ne font pas partie des décisions du commandant, sauf si il n\\'y a aucun conflit dans les objectifs non prioritaires. Vous pouvez également entrer cette commande en précisant un numéro : l\\'objectif du numéro indiqué sera retiré et ne fera plus parti des décisions prises par le commandant.'\n<|end_body_0|>\n\n<|body_start_1|>\n salle = personnage.salle\n navire = getattr(salle, 'navire', None)\n if navire is None:\n personnage << \"|err|Vous n'êtes pas sur un navire.|ff|\"\n return\n if not navire.a_le_droit(personnage, 'officier'):\n personnage << \"|err|Vous ne pouvez donner d'ordre sur ce navire.|ff|\"\n return\n equipage = salle.navire.equipage\n nombre = dic_masques['nombre']\n if nombre:\n nombre = nombre.nombre\n if nombre < 0 or nombre > len(equipage.objectifs):\n personnage << \"|err|Numéro d'objectif invalide.|ff|\"\n return\n nombre = nombre - 1\n objectif = equipage.objectifs[nombre]\n equipage.retirer_objectif(nombre)\n personnage << \"L'objectif a bien été supprimé : {}.\".format(objectif.afficher())\n return\n if not equipage.objectifs:\n personnage << 'Aucun objectif actif sur cet équipage.'\n return\n msg = 'Objectifs actifs :\\n'\n for i, objectif in enumerate(equipage.objectifs):\n msg += '\\n|ent|{}|ff| - {}'.format(str(i + 1).rjust(2), objectif.afficher())\n personnage << msg\n<|end_body_1|>\n", "class_docstring": "Commande 'équipage objectif'.", "class_name": "PrmObjectif", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PrmObjectif:\n \"\"\"Commande 'équipage objectif'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Parametre.__init__(self, 'objectif', 'objective')\n self.tronquer = True\n self.schema = '()'\n self.aide_courte = 'consulte ou modifie les objectifs'\n self.aide_longue = 'Les objectifs sont des ordres permanents qui peuvent rester en sommeil pendant un temps indéfini. Généralement, seul le premier objectif est actif, les autres sont en attente, même si ils peuvent influencer la manoeuvre. Concrètement, un objectif est un but fixé pour un équipage, comme \"se rendre à un point indiqué\", \"attaquer le navire que l\\'on voit à l\\'horizon\", \"chercher à accoster dans le port voisin\". La différence avec de simples ordres, c\\'est qu\\'ils sont maintenus pendant toute la durée de leur accomplissement et sont faits pour évoluer au cours d la manoeuvre. Par exemple, aller vers une côte ou un autre navire peut être une combinaison d\\'ordres assez simples, mais il faut tenir compte de plusieurs facteurs qui pourraient modifier la manoeuvre, comme la position du vent si il tourne, l\\'apparition d\\'un autre navire sur la trajectoire assignée, la modification du cap si le navire cible se déplace. Un objectif crée généralement des contrôles : à la différence des objectifs, les contrôles ne peuvent se contredire. Vous pourriez avoir deux objectifs actifs : l\\'un demandant à se rendre vers une côte située vers l\\'est, l\\'autre demandant d\\'attaquer un navire à l\\'ouest. Dans ce cas, le commandant choisit l\\'objectif le plus prioritaire (celui en haut de la liste) et donne les ordres pour atteindre la côte à l\\'est. En revanche, si l\\'objectif pour attaquer le navire cible est maintenu, pendant l\\'accomplissement du premier objectif, une manoeuvre pourrait permettre à la cible d\\'être à portée de canon. C\\'est pour cela que les objectifs sont maintenus même si ils s\\'opposent en apparence. Vous pouvez entrer cette commande sans paramètre pour voir les objectifs actuellement donnés à votre équipage. Notez que le premier (numéroté |ent|1|ff|) est considéré comme l\\'objectif actif, celui par lequel les décisions conflictuelles sont tranchées. Les autres objectifs sont conservés mais ne font pas partie des décisions du commandant, sauf si il n\\'y a aucun conflit dans les objectifs non prioritaires. Vous pouvez également entrer cette commande en précisant un numéro : l\\'objectif du numéro indiqué sera retiré et ne fera plus parti des décisions prises par le commandant.'\n<|end_body_0|>\n\n<|body_start_1|>\n salle = personnage.salle\n navire = getattr(salle, 'navire', None)\n if navire is None:\n personnage << \"|err|Vous n'êtes pas sur un navire.|ff|\"\n return\n if not navire.a_le_droit(personnage, 'officier'):\n personnage << \"|err|Vous ne pouvez donner d'ordre sur ce navire.|ff|\"\n return\n equipage = salle.navire.equipage\n nombre = dic_masques['nombre']\n if nombre:\n nombre = nombre.nombre\n if nombre < 0 or nombre > len(equipage.objectifs):\n personnage << \"|err|Numéro d'objectif invalide.|ff|\"\n return\n nombre = nombre - 1\n objectif = equipage.objectifs[nombre]\n equipage.retirer_objectif(nombre)\n personnage << \"L'objectif a bien été supprimé : {}.\".format(objectif.afficher())\n return\n if not equipage.objectifs:\n personnage << 'Aucun objectif actif sur cet équipage.'\n return\n msg = 'Objectifs actifs :\\n'\n for i, objectif in enumerate(equipage.objectifs):\n msg += '\\n|ent|{}|ff| - {}'.format(str(i + 1).rjust(2), objectif.afficher())\n personnage << msg\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000301", "length_bytes": 6446, "license_type": "permissive", "methods": [{"docstring": "Constructeur du paramètre", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Interprétation du paramètre", "name": "interpreter", "signature": "def interpreter(self, personnage, dic_masques)"}], "n_methods": 2, "prompt": "Implement the Python class `PrmObjectif` described below.\n\nClass description:\nCommande 'équipage objectif'.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructeur du paramètre\n- def interpreter(self, personnage, dic_masques): Interprétation du paramètre", "prompted_full_text": "Implement the Python class `PrmObjectif` described below.\n\nClass description:\nCommande 'équipage objectif'.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructeur du paramètre\n- def interpreter(self, personnage, dic_masques): Interprétation du paramètre\n\n<|skeleton|>\nclass PrmObjectif:\n \"\"\"Commande 'équipage objectif'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Parametre.__init__(self, 'objectif', 'objective')\n self.tronquer = True\n self.schema = '()'\n self.aide_courte = 'consulte ou modifie les objectifs'\n self.aide_longue = 'Les objectifs sont des ordres permanents qui peuvent rester en sommeil pendant un temps indéfini. Généralement, seul le premier objectif est actif, les autres sont en attente, même si ils peuvent influencer la manoeuvre. Concrètement, un objectif est un but fixé pour un équipage, comme \"se rendre à un point indiqué\", \"attaquer le navire que l\\'on voit à l\\'horizon\", \"chercher à accoster dans le port voisin\". La différence avec de simples ordres, c\\'est qu\\'ils sont maintenus pendant toute la durée de leur accomplissement et sont faits pour évoluer au cours d la manoeuvre. Par exemple, aller vers une côte ou un autre navire peut être une combinaison d\\'ordres assez simples, mais il faut tenir compte de plusieurs facteurs qui pourraient modifier la manoeuvre, comme la position du vent si il tourne, l\\'apparition d\\'un autre navire sur la trajectoire assignée, la modification du cap si le navire cible se déplace. Un objectif crée généralement des contrôles : à la différence des objectifs, les contrôles ne peuvent se contredire. Vous pourriez avoir deux objectifs actifs : l\\'un demandant à se rendre vers une côte située vers l\\'est, l\\'autre demandant d\\'attaquer un navire à l\\'ouest. Dans ce cas, le commandant choisit l\\'objectif le plus prioritaire (celui en haut de la liste) et donne les ordres pour atteindre la côte à l\\'est. En revanche, si l\\'objectif pour attaquer le navire cible est maintenu, pendant l\\'accomplissement du premier objectif, une manoeuvre pourrait permettre à la cible d\\'être à portée de canon. C\\'est pour cela que les objectifs sont maintenus même si ils s\\'opposent en apparence. Vous pouvez entrer cette commande sans paramètre pour voir les objectifs actuellement donnés à votre équipage. Notez que le premier (numéroté |ent|1|ff|) est considéré comme l\\'objectif actif, celui par lequel les décisions conflictuelles sont tranchées. Les autres objectifs sont conservés mais ne font pas partie des décisions du commandant, sauf si il n\\'y a aucun conflit dans les objectifs non prioritaires. Vous pouvez également entrer cette commande en précisant un numéro : l\\'objectif du numéro indiqué sera retiré et ne fera plus parti des décisions prises par le commandant.'\n<|end_body_0|>\n\n<|body_start_1|>\n salle = personnage.salle\n navire = getattr(salle, 'navire', None)\n if navire is None:\n personnage << \"|err|Vous n'êtes pas sur un navire.|ff|\"\n return\n if not navire.a_le_droit(personnage, 'officier'):\n personnage << \"|err|Vous ne pouvez donner d'ordre sur ce navire.|ff|\"\n return\n equipage = salle.navire.equipage\n nombre = dic_masques['nombre']\n if nombre:\n nombre = nombre.nombre\n if nombre < 0 or nombre > len(equipage.objectifs):\n personnage << \"|err|Numéro d'objectif invalide.|ff|\"\n return\n nombre = nombre - 1\n objectif = equipage.objectifs[nombre]\n equipage.retirer_objectif(nombre)\n personnage << \"L'objectif a bien été supprimé : {}.\".format(objectif.afficher())\n return\n if not equipage.objectifs:\n personnage << 'Aucun objectif actif sur cet équipage.'\n return\n msg = 'Objectifs actifs :\\n'\n for i, objectif in enumerate(equipage.objectifs):\n msg += '\\n|ent|{}|ff| - {}'.format(str(i + 1).rjust(2), objectif.afficher())\n personnage << msg\n<|end_body_1|>\n", "revision_id": "7e93bff08cdf891352efba587e89c40f3b4a2301", "skeleton": "<|skeleton|>\nclass PrmObjectif:\n \"\"\"Commande 'équipage objectif'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PrmObjectif:\n \"\"\"Commande 'équipage objectif'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n Parametre.__init__(self, 'objectif', 'objective')\n self.tronquer = True\n self.schema = '()'\n self.aide_courte = 'consulte ou modifie les objectifs'\n self.aide_longue = 'Les objectifs sont des ordres permanents qui peuvent rester en sommeil pendant un temps indéfini. Généralement, seul le premier objectif est actif, les autres sont en attente, même si ils peuvent influencer la manoeuvre. Concrètement, un objectif est un but fixé pour un équipage, comme \"se rendre à un point indiqué\", \"attaquer le navire que l\\'on voit à l\\'horizon\", \"chercher à accoster dans le port voisin\". La différence avec de simples ordres, c\\'est qu\\'ils sont maintenus pendant toute la durée de leur accomplissement et sont faits pour évoluer au cours d la manoeuvre. Par exemple, aller vers une côte ou un autre navire peut être une combinaison d\\'ordres assez simples, mais il faut tenir compte de plusieurs facteurs qui pourraient modifier la manoeuvre, comme la position du vent si il tourne, l\\'apparition d\\'un autre navire sur la trajectoire assignée, la modification du cap si le navire cible se déplace. Un objectif crée généralement des contrôles : à la différence des objectifs, les contrôles ne peuvent se contredire. Vous pourriez avoir deux objectifs actifs : l\\'un demandant à se rendre vers une côte située vers l\\'est, l\\'autre demandant d\\'attaquer un navire à l\\'ouest. Dans ce cas, le commandant choisit l\\'objectif le plus prioritaire (celui en haut de la liste) et donne les ordres pour atteindre la côte à l\\'est. En revanche, si l\\'objectif pour attaquer le navire cible est maintenu, pendant l\\'accomplissement du premier objectif, une manoeuvre pourrait permettre à la cible d\\'être à portée de canon. C\\'est pour cela que les objectifs sont maintenus même si ils s\\'opposent en apparence. Vous pouvez entrer cette commande sans paramètre pour voir les objectifs actuellement donnés à votre équipage. Notez que le premier (numéroté |ent|1|ff|) est considéré comme l\\'objectif actif, celui par lequel les décisions conflictuelles sont tranchées. Les autres objectifs sont conservés mais ne font pas partie des décisions du commandant, sauf si il n\\'y a aucun conflit dans les objectifs non prioritaires. Vous pouvez également entrer cette commande en précisant un numéro : l\\'objectif du numéro indiqué sera retiré et ne fera plus parti des décisions prises par le commandant.'\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n salle = personnage.salle\n navire = getattr(salle, 'navire', None)\n if navire is None:\n personnage << \"|err|Vous n'êtes pas sur un navire.|ff|\"\n return\n if not navire.a_le_droit(personnage, 'officier'):\n personnage << \"|err|Vous ne pouvez donner d'ordre sur ce navire.|ff|\"\n return\n equipage = salle.navire.equipage\n nombre = dic_masques['nombre']\n if nombre:\n nombre = nombre.nombre\n if nombre < 0 or nombre > len(equipage.objectifs):\n personnage << \"|err|Numéro d'objectif invalide.|ff|\"\n return\n nombre = nombre - 1\n objectif = equipage.objectifs[nombre]\n equipage.retirer_objectif(nombre)\n personnage << \"L'objectif a bien été supprimé : {}.\".format(objectif.afficher())\n return\n if not equipage.objectifs:\n personnage << 'Aucun objectif actif sur cet équipage.'\n return\n msg = 'Objectifs actifs :\\n'\n for i, objectif in enumerate(equipage.objectifs):\n msg += '\\n|ent|{}|ff| - {}'.format(str(i + 1).rjust(2), objectif.afficher())\n personnage << msg\n", "source": "the_stack_v2_python_sparse", "source_path": "src/secondaires/navigation/commandes/equipage/objectif.py", "source_repo": "vincent-lg/tsunami", "split": "val", "star_events_count": 5}
{"blob_id": "0314a088022a6428982143b4d6f403ecb1b5be49", "bodies": ["words = sentence.split()\ndict.sort(key=lambda x: (x, len(x)))\nfor i, word in enumerate(words):\n new_word = list(filter(lambda x: word.startswith(x), dict))\n if len(new_word) > 0:\n words[i] = new_word[0]\nreturn ' '.join(words)", "dict_set = set(dict)\n\ndef replace(word):\n \"\"\"\n Replace the word which runs in O(W^2)\n\n Args:\n word(str):\n\n Returns:\n str:\n\n \"\"\"\n for i in range(1, len(word) + 1):\n if word[:i] in dict_set:\n return word[:i]\n return word\nreturn ' '.join(map(replace, sentence.split()))", "TRIE = lambda: defaultdict(TRIE)\nEND = True\ntrie = TRIE()\nfor root in roots:\n current = trie\n for char in root:\n current = current[char]\n current[END] = root\n\ndef replace(word):\n \"\"\"\n Replace the string. This runs in O(n)\n\n Args:\n word:\n\n Returns:\n\n \"\"\"\n current = trie\n for char in word:\n if char not in current:\n break\n current = current[char]\n if END in current:\n return current[END]\n return word\nreturn ' '.join(map(replace, sentence.split()))"], "bodies_text": "<|body_start_0|>\n words = sentence.split()\n dict.sort(key=lambda x: (x, len(x)))\n for i, word in enumerate(words):\n new_word = list(filter(lambda x: word.startswith(x), dict))\n if len(new_word) > 0:\n words[i] = new_word[0]\n return ' '.join(words)\n<|end_body_0|>\n\n<|body_start_1|>\n dict_set = set(dict)\n\n def replace(word):\n \"\"\"\n Replace the word which runs in O(W^2)\n\n Args:\n word(str):\n\n Returns:\n str:\n\n \"\"\"\n for i in range(1, len(word) + 1):\n if word[:i] in dict_set:\n return word[:i]\n return word\n return ' '.join(map(replace, sentence.split()))\n<|end_body_1|>\n\n<|body_start_2|>\n TRIE = lambda: defaultdict(TRIE)\n END = True\n trie = TRIE()\n for root in roots:\n current = trie\n for char in root:\n current = current[char]\n current[END] = root\n\n def replace(word):\n \"\"\"\n Replace the string. This runs in O(n)\n\n Args:\n word:\n\n Returns:\n\n \"\"\"\n current = trie\n for char in word:\n if char not in current:\n break\n current = current[char]\n if END in current:\n return current[END]\n return word\n return ' '.join(map(replace, sentence.split()))\n<|end_body_2|>\n", "class_docstring": "Runtime: 84 ms, faster than 79.35% of Python3 online submissions for Replace Words. Memory Usage: 28.6 MB, less than 50.00% of Python3 online submissions for Replace Words.", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"Runtime: 84 ms, faster than 79.35% of Python3 online submissions for Replace Words. Memory Usage: 28.6 MB, less than 50.00% of Python3 online submissions for Replace Words.\"\"\"\n\n def brute_force(self, dict: List[str], sentence: str) -> str:\n \"\"\"Brute force solution that I come up with Args: dict(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_0|>\n\n def sub_optimal(self, dict: List[str], sentence: str) -> str:\n \"\"\"Slightly better solution which runs in O(M + W^2) where M = len(dict) W = max(len(word)in sentence) Args: dict(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_1|>\n\n def optimal(self, roots, sentence):\n \"\"\"Optimal solution which runs in O(Mm + Nn) where M = len(roots), m = len(roots[i]) N = len(sentence), n = len(word) Args: roots(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n words = sentence.split()\n dict.sort(key=lambda x: (x, len(x)))\n for i, word in enumerate(words):\n new_word = list(filter(lambda x: word.startswith(x), dict))\n if len(new_word) > 0:\n words[i] = new_word[0]\n return ' '.join(words)\n<|end_body_0|>\n\n<|body_start_1|>\n dict_set = set(dict)\n\n def replace(word):\n \"\"\"\n Replace the word which runs in O(W^2)\n\n Args:\n word(str):\n\n Returns:\n str:\n\n \"\"\"\n for i in range(1, len(word) + 1):\n if word[:i] in dict_set:\n return word[:i]\n return word\n return ' '.join(map(replace, sentence.split()))\n<|end_body_1|>\n\n<|body_start_2|>\n TRIE = lambda: defaultdict(TRIE)\n END = True\n trie = TRIE()\n for root in roots:\n current = trie\n for char in root:\n current = current[char]\n current[END] = root\n\n def replace(word):\n \"\"\"\n Replace the string. This runs in O(n)\n\n Args:\n word:\n\n Returns:\n\n \"\"\"\n current = trie\n for char in word:\n if char not in current:\n break\n current = current[char]\n if END in current:\n return current[END]\n return word\n return ' '.join(map(replace, sentence.split()))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000302", "length_bytes": 3247, "license_type": "no_license", "methods": [{"docstring": "Brute force solution that I come up with Args: dict(list[str]): sentence(str): Returns: str:", "name": "brute_force", "signature": "def brute_force(self, dict: List[str], sentence: str) -> str"}, {"docstring": "Slightly better solution which runs in O(M + W^2) where M = len(dict) W = max(len(word)in sentence) Args: dict(list[str]): sentence(str): Returns: str:", "name": "sub_optimal", "signature": "def sub_optimal(self, dict: List[str], sentence: str) -> str"}, {"docstring": "Optimal solution which runs in O(Mm + Nn) where M = len(roots), m = len(roots[i]) N = len(sentence), n = len(word) Args: roots(list[str]): sentence(str): Returns: str:", "name": "optimal", "signature": "def optimal(self, roots, sentence)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_048368", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nRuntime: 84 ms, faster than 79.35% of Python3 online submissions for Replace Words. Memory Usage: 28.6 MB, less than 50.00% of Python3 online submissions for Replace Words.\n\nMethod signatures and docstrings:\n- def brute_force(self, dict: List[str], sentence: str) -> str: Brute force solution that I come up with Args: dict(list[str]): sentence(str): Returns: str:\n- def sub_optimal(self, dict: List[str], sentence: str) -> str: Slightly better solution which runs in O(M + W^2) where M = len(dict) W = max(len(word)in sentence) Args: dict(list[str]): sentence(str): Returns: str:\n- def optimal(self, roots, sentence): Optimal solution which runs in O(Mm + Nn) where M = len(roots), m = len(roots[i]) N = len(sentence), n = len(word) Args: roots(list[str]): sentence(str): Returns: str:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nRuntime: 84 ms, faster than 79.35% of Python3 online submissions for Replace Words. Memory Usage: 28.6 MB, less than 50.00% of Python3 online submissions for Replace Words.\n\nMethod signatures and docstrings:\n- def brute_force(self, dict: List[str], sentence: str) -> str: Brute force solution that I come up with Args: dict(list[str]): sentence(str): Returns: str:\n- def sub_optimal(self, dict: List[str], sentence: str) -> str: Slightly better solution which runs in O(M + W^2) where M = len(dict) W = max(len(word)in sentence) Args: dict(list[str]): sentence(str): Returns: str:\n- def optimal(self, roots, sentence): Optimal solution which runs in O(Mm + Nn) where M = len(roots), m = len(roots[i]) N = len(sentence), n = len(word) Args: roots(list[str]): sentence(str): Returns: str:\n\n<|skeleton|>\nclass Solution:\n \"\"\"Runtime: 84 ms, faster than 79.35% of Python3 online submissions for Replace Words. Memory Usage: 28.6 MB, less than 50.00% of Python3 online submissions for Replace Words.\"\"\"\n\n def brute_force(self, dict: List[str], sentence: str) -> str:\n \"\"\"Brute force solution that I come up with Args: dict(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_0|>\n\n def sub_optimal(self, dict: List[str], sentence: str) -> str:\n \"\"\"Slightly better solution which runs in O(M + W^2) where M = len(dict) W = max(len(word)in sentence) Args: dict(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_1|>\n\n def optimal(self, roots, sentence):\n \"\"\"Optimal solution which runs in O(Mm + Nn) where M = len(roots), m = len(roots[i]) N = len(sentence), n = len(word) Args: roots(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n words = sentence.split()\n dict.sort(key=lambda x: (x, len(x)))\n for i, word in enumerate(words):\n new_word = list(filter(lambda x: word.startswith(x), dict))\n if len(new_word) > 0:\n words[i] = new_word[0]\n return ' '.join(words)\n<|end_body_0|>\n\n<|body_start_1|>\n dict_set = set(dict)\n\n def replace(word):\n \"\"\"\n Replace the word which runs in O(W^2)\n\n Args:\n word(str):\n\n Returns:\n str:\n\n \"\"\"\n for i in range(1, len(word) + 1):\n if word[:i] in dict_set:\n return word[:i]\n return word\n return ' '.join(map(replace, sentence.split()))\n<|end_body_1|>\n\n<|body_start_2|>\n TRIE = lambda: defaultdict(TRIE)\n END = True\n trie = TRIE()\n for root in roots:\n current = trie\n for char in root:\n current = current[char]\n current[END] = root\n\n def replace(word):\n \"\"\"\n Replace the string. This runs in O(n)\n\n Args:\n word:\n\n Returns:\n\n \"\"\"\n current = trie\n for char in word:\n if char not in current:\n break\n current = current[char]\n if END in current:\n return current[END]\n return word\n return ' '.join(map(replace, sentence.split()))\n<|end_body_2|>\n", "revision_id": "01fe893ba2e37c9bda79e3081c556698f0b6d2f0", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"Runtime: 84 ms, faster than 79.35% of Python3 online submissions for Replace Words. Memory Usage: 28.6 MB, less than 50.00% of Python3 online submissions for Replace Words.\"\"\"\n\n def brute_force(self, dict: List[str], sentence: str) -> str:\n \"\"\"Brute force solution that I come up with Args: dict(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_0|>\n\n def sub_optimal(self, dict: List[str], sentence: str) -> str:\n \"\"\"Slightly better solution which runs in O(M + W^2) where M = len(dict) W = max(len(word)in sentence) Args: dict(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_1|>\n\n def optimal(self, roots, sentence):\n \"\"\"Optimal solution which runs in O(Mm + Nn) where M = len(roots), m = len(roots[i]) N = len(sentence), n = len(word) Args: roots(list[str]): sentence(str): Returns: str:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n \"\"\"Runtime: 84 ms, faster than 79.35% of Python3 online submissions for Replace Words. Memory Usage: 28.6 MB, less than 50.00% of Python3 online submissions for Replace Words.\"\"\"\n\n def brute_force(self, dict: List[str], sentence: str) -> str:\n \"\"\"Brute force solution that I come up with Args: dict(list[str]): sentence(str): Returns: str:\"\"\"\n words = sentence.split()\n dict.sort(key=lambda x: (x, len(x)))\n for i, word in enumerate(words):\n new_word = list(filter(lambda x: word.startswith(x), dict))\n if len(new_word) > 0:\n words[i] = new_word[0]\n return ' '.join(words)\n\n def sub_optimal(self, dict: List[str], sentence: str) -> str:\n \"\"\"Slightly better solution which runs in O(M + W^2) where M = len(dict) W = max(len(word)in sentence) Args: dict(list[str]): sentence(str): Returns: str:\"\"\"\n dict_set = set(dict)\n\n def replace(word):\n \"\"\"\n Replace the word which runs in O(W^2)\n\n Args:\n word(str):\n\n Returns:\n str:\n\n \"\"\"\n for i in range(1, len(word) + 1):\n if word[:i] in dict_set:\n return word[:i]\n return word\n return ' '.join(map(replace, sentence.split()))\n\n def optimal(self, roots, sentence):\n \"\"\"Optimal solution which runs in O(Mm + Nn) where M = len(roots), m = len(roots[i]) N = len(sentence), n = len(word) Args: roots(list[str]): sentence(str): Returns: str:\"\"\"\n TRIE = lambda: defaultdict(TRIE)\n END = True\n trie = TRIE()\n for root in roots:\n current = trie\n for char in root:\n current = current[char]\n current[END] = root\n\n def replace(word):\n \"\"\"\n Replace the string. This runs in O(n)\n\n Args:\n word:\n\n Returns:\n\n \"\"\"\n current = trie\n for char in word:\n if char not in current:\n break\n current = current[char]\n if END in current:\n return current[END]\n return word\n return ' '.join(map(replace, sentence.split()))\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/648_replace_words.py", "source_repo": "KKosukeee/CodingQuestions", "split": "val", "star_events_count": 1}
{"blob_id": "e8c339aa2d79fc5be8e4267acd9b40b784f4382b", "bodies": ["def rserialize(root, s):\n if not root:\n s += 'null,'\n return s\n s += '{},'.format(root.val)\n s = rserialize(root.left, s)\n s = rserialize(root.right, s)\n return s\nreturn rserialize(root, '')", "l = []\n_tmp = data.split(',')\nfor item in _tmp:\n if item:\n l.append(item)\nl.reverse()\n\ndef rdeserialize():\n if not l:\n return None\n val = l.pop()\n if val == 'null':\n return None\n root = TreeNode(val)\n root.left = rdeserialize()\n root.right = rdeserialize()\n return root\nreturn rdeserialize()"], "bodies_text": "<|body_start_0|>\n def rserialize(root, s):\n if not root:\n s += 'null,'\n return s\n s += '{},'.format(root.val)\n s = rserialize(root.left, s)\n s = rserialize(root.right, s)\n return s\n return rserialize(root, '')\n<|end_body_0|>\n\n<|body_start_1|>\n l = []\n _tmp = data.split(',')\n for item in _tmp:\n if item:\n l.append(item)\n l.reverse()\n\n def rdeserialize():\n if not l:\n return None\n val = l.pop()\n if val == 'null':\n return None\n root = TreeNode(val)\n root.left = rdeserialize()\n root.right = rdeserialize()\n return root\n return rdeserialize()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def rserialize(root, s):\n if not root:\n s += 'null,'\n return s\n s += '{},'.format(root.val)\n s = rserialize(root.left, s)\n s = rserialize(root.right, s)\n return s\n return rserialize(root, '')\n<|end_body_0|>\n\n<|body_start_1|>\n l = []\n _tmp = data.split(',')\n for item in _tmp:\n if item:\n l.append(item)\n l.reverse()\n\n def rdeserialize():\n if not l:\n return None\n val = l.pop()\n if val == 'null':\n return None\n root = TreeNode(val)\n root.left = rdeserialize()\n root.right = rdeserialize()\n return root\n return rdeserialize()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000303", "length_bytes": 1475, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040135", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def rserialize(root, s):\n if not root:\n s += 'null,'\n return s\n s += '{},'.format(root.val)\n s = rserialize(root.left, s)\n s = rserialize(root.right, s)\n return s\n return rserialize(root, '')\n<|end_body_0|>\n\n<|body_start_1|>\n l = []\n _tmp = data.split(',')\n for item in _tmp:\n if item:\n l.append(item)\n l.reverse()\n\n def rdeserialize():\n if not l:\n return None\n val = l.pop()\n if val == 'null':\n return None\n root = TreeNode(val)\n root.left = rdeserialize()\n root.right = rdeserialize()\n return root\n return rdeserialize()\n<|end_body_1|>\n", "revision_id": "ad5e724d20a8492b8eba03fc0f24e4ff5964b3ea", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n def rserialize(root, s):\n if not root:\n s += 'null,'\n return s\n s += '{},'.format(root.val)\n s = rserialize(root.left, s)\n s = rserialize(root.right, s)\n return s\n return rserialize(root, '')\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n l = []\n _tmp = data.split(',')\n for item in _tmp:\n if item:\n l.append(item)\n l.reverse()\n\n def rdeserialize():\n if not l:\n return None\n val = l.pop()\n if val == 'null':\n return None\n root = TreeNode(val)\n root.left = rdeserialize()\n root.right = rdeserialize()\n return root\n return rdeserialize()\n", "source": "the_stack_v2_python_sparse", "source_path": "dailyQuestion/2020/2020-06/06-15/python/solution_dfs.py", "source_repo": "russellgao/algorithm", "split": "val", "star_events_count": 3}
{"blob_id": "4266ad5e6986392a5701650af71f579bada8c925", "bodies": ["self.is_released = False\ntk.Frame.__init__(self, parent)\nparent.title('Add a Doctor')\nself._close_cb = close_callback\nself.grid(rowspan=2, columnspan=2, padx=30, pady=30)\nttk.Label(self, text='ID:').grid(row=1, column=1)\nself.id = ttk.Entry(self)\nself.id.grid(row=1, column=2)\nttk.Label(self, text='First Name:').grid(row=2, column=1)\nself.first_name = ttk.Entry(self)\nself.first_name.grid(row=2, column=2)\nttk.Label(self, text='Last Name:').grid(row=3, column=1)\nself.last_name = ttk.Entry(self)\nself.last_name.grid(row=3, column=2)\nttk.Label(self, text='Address:').grid(row=5, column=1)\nself.address = ttk.Entry(self)\nttk.Label(self, text='Date of Birth:').grid(row=4, column=1)\nttk.Button(self, text='Calendar', command=self._date_of_birth).grid(row=4, column=2)\nself.address.grid(row=5, column=2)\nttk.Label(self, text='Office Number:').grid(row=7, column=1)\nself.office_num = ttk.Entry(self)\nself.office_num.grid(row=7, column=2)\nttk.Label(self, text='Income:').grid(row=8, column=1)\nself.income = ttk.Entry(self)\nself.income.grid(row=8, column=2)\nttk.Button(self, text='Submit', command=self._submit_cb).grid(row=9, column=1, pady=20)\nttk.Button(self, text='Close', command=self._close_cb).grid(row=9, column=2)", "new_wins = tk.Toplevel()\ncal = DateEntry(new_wins, width=15, background='blue', foreground='red', borderwidth=3, date_pattern='y-mm-dd')\ncal.pack(padx=10, pady=10)\nself.date_of_birth = cal.get_date()", "data = {}\ndata['first_name'] = self.first_name.get()\ndata['last_name'] = self.last_name.get()\ndata['date_of_birth'] = self.date_of_birth.strftime('%d-%b-%Y')\ndata['address'] = self.address.get()\ndata['id'] = self.id.get()\ndata['is_released'] = self.is_released\ntry:\n data['office_num'] = int(self.office_num.get())\n data['income'] = int(self.income.get())\nexcept TypeError:\n messagebox.showerror('Error', 'Office number or Income must be greater than or equal 0, and they are an integer', icon='error')\nregex_1 = re.compile('[a-zA-Z]', re.I)\nmatch_1 = regex_1.match(str(data['first_name']))\nregex_2 = re.compile('[a-zA-Z]', re.I)\nmatch_2 = regex_2.match(str(data['last_name']))\ntry:\n response = requests.post('http://127.0.0.1:5000/department/Doctor', json=data)\n if response.status_code == 200:\n print(response.text)\n else:\n messagebox.showerror('Error', response.text)\nexcept ValueError as err:\n messagebox.showerror(title='Invalid Value', message=err, icon='error')"], "bodies_text": "<|body_start_0|>\n self.is_released = False\n tk.Frame.__init__(self, parent)\n parent.title('Add a Doctor')\n self._close_cb = close_callback\n self.grid(rowspan=2, columnspan=2, padx=30, pady=30)\n ttk.Label(self, text='ID:').grid(row=1, column=1)\n self.id = ttk.Entry(self)\n self.id.grid(row=1, column=2)\n ttk.Label(self, text='First Name:').grid(row=2, column=1)\n self.first_name = ttk.Entry(self)\n self.first_name.grid(row=2, column=2)\n ttk.Label(self, text='Last Name:').grid(row=3, column=1)\n self.last_name = ttk.Entry(self)\n self.last_name.grid(row=3, column=2)\n ttk.Label(self, text='Address:').grid(row=5, column=1)\n self.address = ttk.Entry(self)\n ttk.Label(self, text='Date of Birth:').grid(row=4, column=1)\n ttk.Button(self, text='Calendar', command=self._date_of_birth).grid(row=4, column=2)\n self.address.grid(row=5, column=2)\n ttk.Label(self, text='Office Number:').grid(row=7, column=1)\n self.office_num = ttk.Entry(self)\n self.office_num.grid(row=7, column=2)\n ttk.Label(self, text='Income:').grid(row=8, column=1)\n self.income = ttk.Entry(self)\n self.income.grid(row=8, column=2)\n ttk.Button(self, text='Submit', command=self._submit_cb).grid(row=9, column=1, pady=20)\n ttk.Button(self, text='Close', command=self._close_cb).grid(row=9, column=2)\n<|end_body_0|>\n\n<|body_start_1|>\n new_wins = tk.Toplevel()\n cal = DateEntry(new_wins, width=15, background='blue', foreground='red', borderwidth=3, date_pattern='y-mm-dd')\n cal.pack(padx=10, pady=10)\n self.date_of_birth = cal.get_date()\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n data['first_name'] = self.first_name.get()\n data['last_name'] = self.last_name.get()\n data['date_of_birth'] = self.date_of_birth.strftime('%d-%b-%Y')\n data['address'] = self.address.get()\n data['id'] = self.id.get()\n data['is_released'] = self.is_released\n try:\n data['office_num'] = int(self.office_num.get())\n data['income'] = int(self.income.get())\n except TypeError:\n messagebox.showerror('Error', 'Office number or Income must be greater than or equal 0, and they are an integer', icon='error')\n regex_1 = re.compile('[a-zA-Z]', re.I)\n match_1 = regex_1.match(str(data['first_name']))\n regex_2 = re.compile('[a-zA-Z]', re.I)\n match_2 = regex_2.match(str(data['last_name']))\n try:\n response = requests.post('http://127.0.0.1:5000/department/Doctor', json=data)\n if response.status_code == 200:\n print(response.text)\n else:\n messagebox.showerror('Error', response.text)\n except ValueError as err:\n messagebox.showerror(title='Invalid Value', message=err, icon='error')\n<|end_body_2|>\n", "class_docstring": "Popup Frame to Add a Doctor", "class_name": "AddDoctorPopup", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AddDoctorPopup:\n \"\"\"Popup Frame to Add a Doctor\"\"\"\n\n def __init__(self, parent, close_callback):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def _date_of_birth(self):\n \"\"\"Calendar popup\"\"\"\n <|body_1|>\n\n def _submit_cb(self):\n \"\"\"Submit the Add Doctor button\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_released = False\n tk.Frame.__init__(self, parent)\n parent.title('Add a Doctor')\n self._close_cb = close_callback\n self.grid(rowspan=2, columnspan=2, padx=30, pady=30)\n ttk.Label(self, text='ID:').grid(row=1, column=1)\n self.id = ttk.Entry(self)\n self.id.grid(row=1, column=2)\n ttk.Label(self, text='First Name:').grid(row=2, column=1)\n self.first_name = ttk.Entry(self)\n self.first_name.grid(row=2, column=2)\n ttk.Label(self, text='Last Name:').grid(row=3, column=1)\n self.last_name = ttk.Entry(self)\n self.last_name.grid(row=3, column=2)\n ttk.Label(self, text='Address:').grid(row=5, column=1)\n self.address = ttk.Entry(self)\n ttk.Label(self, text='Date of Birth:').grid(row=4, column=1)\n ttk.Button(self, text='Calendar', command=self._date_of_birth).grid(row=4, column=2)\n self.address.grid(row=5, column=2)\n ttk.Label(self, text='Office Number:').grid(row=7, column=1)\n self.office_num = ttk.Entry(self)\n self.office_num.grid(row=7, column=2)\n ttk.Label(self, text='Income:').grid(row=8, column=1)\n self.income = ttk.Entry(self)\n self.income.grid(row=8, column=2)\n ttk.Button(self, text='Submit', command=self._submit_cb).grid(row=9, column=1, pady=20)\n ttk.Button(self, text='Close', command=self._close_cb).grid(row=9, column=2)\n<|end_body_0|>\n\n<|body_start_1|>\n new_wins = tk.Toplevel()\n cal = DateEntry(new_wins, width=15, background='blue', foreground='red', borderwidth=3, date_pattern='y-mm-dd')\n cal.pack(padx=10, pady=10)\n self.date_of_birth = cal.get_date()\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n data['first_name'] = self.first_name.get()\n data['last_name'] = self.last_name.get()\n data['date_of_birth'] = self.date_of_birth.strftime('%d-%b-%Y')\n data['address'] = self.address.get()\n data['id'] = self.id.get()\n data['is_released'] = self.is_released\n try:\n data['office_num'] = int(self.office_num.get())\n data['income'] = int(self.income.get())\n except TypeError:\n messagebox.showerror('Error', 'Office number or Income must be greater than or equal 0, and they are an integer', icon='error')\n regex_1 = re.compile('[a-zA-Z]', re.I)\n match_1 = regex_1.match(str(data['first_name']))\n regex_2 = re.compile('[a-zA-Z]', re.I)\n match_2 = regex_2.match(str(data['last_name']))\n try:\n response = requests.post('http://127.0.0.1:5000/department/Doctor', json=data)\n if response.status_code == 200:\n print(response.text)\n else:\n messagebox.showerror('Error', response.text)\n except ValueError as err:\n messagebox.showerror(title='Invalid Value', message=err, icon='error')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000304", "length_bytes": 3431, "license_type": "no_license", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, parent, close_callback)"}, {"docstring": "Calendar popup", "name": "_date_of_birth", "signature": "def _date_of_birth(self)"}, {"docstring": "Submit the Add Doctor button", "name": "_submit_cb", "signature": "def _submit_cb(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_031890", "prompt": "Implement the Python class `AddDoctorPopup` described below.\n\nClass description:\nPopup Frame to Add a Doctor\n\nMethod signatures and docstrings:\n- def __init__(self, parent, close_callback): Constructor\n- def _date_of_birth(self): Calendar popup\n- def _submit_cb(self): Submit the Add Doctor button", "prompted_full_text": "Implement the Python class `AddDoctorPopup` described below.\n\nClass description:\nPopup Frame to Add a Doctor\n\nMethod signatures and docstrings:\n- def __init__(self, parent, close_callback): Constructor\n- def _date_of_birth(self): Calendar popup\n- def _submit_cb(self): Submit the Add Doctor button\n\n<|skeleton|>\nclass AddDoctorPopup:\n \"\"\"Popup Frame to Add a Doctor\"\"\"\n\n def __init__(self, parent, close_callback):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def _date_of_birth(self):\n \"\"\"Calendar popup\"\"\"\n <|body_1|>\n\n def _submit_cb(self):\n \"\"\"Submit the Add Doctor button\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_released = False\n tk.Frame.__init__(self, parent)\n parent.title('Add a Doctor')\n self._close_cb = close_callback\n self.grid(rowspan=2, columnspan=2, padx=30, pady=30)\n ttk.Label(self, text='ID:').grid(row=1, column=1)\n self.id = ttk.Entry(self)\n self.id.grid(row=1, column=2)\n ttk.Label(self, text='First Name:').grid(row=2, column=1)\n self.first_name = ttk.Entry(self)\n self.first_name.grid(row=2, column=2)\n ttk.Label(self, text='Last Name:').grid(row=3, column=1)\n self.last_name = ttk.Entry(self)\n self.last_name.grid(row=3, column=2)\n ttk.Label(self, text='Address:').grid(row=5, column=1)\n self.address = ttk.Entry(self)\n ttk.Label(self, text='Date of Birth:').grid(row=4, column=1)\n ttk.Button(self, text='Calendar', command=self._date_of_birth).grid(row=4, column=2)\n self.address.grid(row=5, column=2)\n ttk.Label(self, text='Office Number:').grid(row=7, column=1)\n self.office_num = ttk.Entry(self)\n self.office_num.grid(row=7, column=2)\n ttk.Label(self, text='Income:').grid(row=8, column=1)\n self.income = ttk.Entry(self)\n self.income.grid(row=8, column=2)\n ttk.Button(self, text='Submit', command=self._submit_cb).grid(row=9, column=1, pady=20)\n ttk.Button(self, text='Close', command=self._close_cb).grid(row=9, column=2)\n<|end_body_0|>\n\n<|body_start_1|>\n new_wins = tk.Toplevel()\n cal = DateEntry(new_wins, width=15, background='blue', foreground='red', borderwidth=3, date_pattern='y-mm-dd')\n cal.pack(padx=10, pady=10)\n self.date_of_birth = cal.get_date()\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n data['first_name'] = self.first_name.get()\n data['last_name'] = self.last_name.get()\n data['date_of_birth'] = self.date_of_birth.strftime('%d-%b-%Y')\n data['address'] = self.address.get()\n data['id'] = self.id.get()\n data['is_released'] = self.is_released\n try:\n data['office_num'] = int(self.office_num.get())\n data['income'] = int(self.income.get())\n except TypeError:\n messagebox.showerror('Error', 'Office number or Income must be greater than or equal 0, and they are an integer', icon='error')\n regex_1 = re.compile('[a-zA-Z]', re.I)\n match_1 = regex_1.match(str(data['first_name']))\n regex_2 = re.compile('[a-zA-Z]', re.I)\n match_2 = regex_2.match(str(data['last_name']))\n try:\n response = requests.post('http://127.0.0.1:5000/department/Doctor', json=data)\n if response.status_code == 200:\n print(response.text)\n else:\n messagebox.showerror('Error', response.text)\n except ValueError as err:\n messagebox.showerror(title='Invalid Value', message=err, icon='error')\n<|end_body_2|>\n", "revision_id": "0178908002a9fb9a0a561b9bbc8caee0239ac7c4", "skeleton": "<|skeleton|>\nclass AddDoctorPopup:\n \"\"\"Popup Frame to Add a Doctor\"\"\"\n\n def __init__(self, parent, close_callback):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def _date_of_birth(self):\n \"\"\"Calendar popup\"\"\"\n <|body_1|>\n\n def _submit_cb(self):\n \"\"\"Submit the Add Doctor button\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AddDoctorPopup:\n \"\"\"Popup Frame to Add a Doctor\"\"\"\n\n def __init__(self, parent, close_callback):\n \"\"\"Constructor\"\"\"\n self.is_released = False\n tk.Frame.__init__(self, parent)\n parent.title('Add a Doctor')\n self._close_cb = close_callback\n self.grid(rowspan=2, columnspan=2, padx=30, pady=30)\n ttk.Label(self, text='ID:').grid(row=1, column=1)\n self.id = ttk.Entry(self)\n self.id.grid(row=1, column=2)\n ttk.Label(self, text='First Name:').grid(row=2, column=1)\n self.first_name = ttk.Entry(self)\n self.first_name.grid(row=2, column=2)\n ttk.Label(self, text='Last Name:').grid(row=3, column=1)\n self.last_name = ttk.Entry(self)\n self.last_name.grid(row=3, column=2)\n ttk.Label(self, text='Address:').grid(row=5, column=1)\n self.address = ttk.Entry(self)\n ttk.Label(self, text='Date of Birth:').grid(row=4, column=1)\n ttk.Button(self, text='Calendar', command=self._date_of_birth).grid(row=4, column=2)\n self.address.grid(row=5, column=2)\n ttk.Label(self, text='Office Number:').grid(row=7, column=1)\n self.office_num = ttk.Entry(self)\n self.office_num.grid(row=7, column=2)\n ttk.Label(self, text='Income:').grid(row=8, column=1)\n self.income = ttk.Entry(self)\n self.income.grid(row=8, column=2)\n ttk.Button(self, text='Submit', command=self._submit_cb).grid(row=9, column=1, pady=20)\n ttk.Button(self, text='Close', command=self._close_cb).grid(row=9, column=2)\n\n def _date_of_birth(self):\n \"\"\"Calendar popup\"\"\"\n new_wins = tk.Toplevel()\n cal = DateEntry(new_wins, width=15, background='blue', foreground='red', borderwidth=3, date_pattern='y-mm-dd')\n cal.pack(padx=10, pady=10)\n self.date_of_birth = cal.get_date()\n\n def _submit_cb(self):\n \"\"\"Submit the Add Doctor button\"\"\"\n data = {}\n data['first_name'] = self.first_name.get()\n data['last_name'] = self.last_name.get()\n data['date_of_birth'] = self.date_of_birth.strftime('%d-%b-%Y')\n data['address'] = self.address.get()\n data['id'] = self.id.get()\n data['is_released'] = self.is_released\n try:\n data['office_num'] = int(self.office_num.get())\n data['income'] = int(self.income.get())\n except TypeError:\n messagebox.showerror('Error', 'Office number or Income must be greater than or equal 0, and they are an integer', icon='error')\n regex_1 = re.compile('[a-zA-Z]', re.I)\n match_1 = regex_1.match(str(data['first_name']))\n regex_2 = re.compile('[a-zA-Z]', re.I)\n match_2 = regex_2.match(str(data['last_name']))\n try:\n response = requests.post('http://127.0.0.1:5000/department/Doctor', json=data)\n if response.status_code == 200:\n print(response.text)\n else:\n messagebox.showerror('Error', response.text)\n except ValueError as err:\n messagebox.showerror(title='Invalid Value', message=err, icon='error')\n", "source": "the_stack_v2_python_sparse", "source_path": "Assignment 4/GUI/add_doctor_popup.py", "source_repo": "ntran46/Python_OOP", "split": "val", "star_events_count": 0}
{"blob_id": "05972d13a01a975f94d66cb71288d4f13210f062", "bodies": ["description = ' '.join(response.css('.vc_col-sm-6 .wpb_wrapper p *::text').extract())\nlocation = self.location\nif 'virtual' in description.lower():\n location = {'name': 'Virtual', 'address': ''}\nelif 'cancel' not in description.lower() and '301 East Cermak' not in description:\n raise ValueError('Meeting location has changed')\nfor item in response.css('.supsystic-table tr')[3:]:\n title = self._parse_title(item)\n classification = self._parse_classification(title)\n meeting = Meeting(title=title, description='', classification=classification, start=self._parse_start(item, classification), end=None, all_day=False, time_notes='Refer to notice for start time', location=location, links=self._parse_links(item), source=response.url)\n meeting['status'] = self._get_status(meeting, text=item.css('td:nth-child(2)::text').extract_first() or '')\n meeting['id'] = self._get_id(meeting)\n yield meeting", "title_str = item.css('td:nth-child(2)::text').extract_first()\nif 'committee' in title_str.lower():\n return title_str.replace('Meeting', '').strip()\nreturn 'Board of Directors'", "if 'committee' in title.lower():\n return COMMITTEE\nreturn BOARD", "date_str = item.css('td::text').extract_first().strip()\ndate_obj = datetime.strptime(date_str, '%B %d, %Y').date()\ntime_obj = time(9)\nif classification == COMMITTEE:\n time_obj = time(13, 30)\nreturn datetime.combine(date_obj, time_obj)", "links = []\nfor link in item.css('a'):\n links.append({'href': link.attrib['href'], 'title': link.xpath('./text()').extract_first()})\nreturn links"], "bodies_text": "<|body_start_0|>\n description = ' '.join(response.css('.vc_col-sm-6 .wpb_wrapper p *::text').extract())\n location = self.location\n if 'virtual' in description.lower():\n location = {'name': 'Virtual', 'address': ''}\n elif 'cancel' not in description.lower() and '301 East Cermak' not in description:\n raise ValueError('Meeting location has changed')\n for item in response.css('.supsystic-table tr')[3:]:\n title = self._parse_title(item)\n classification = self._parse_classification(title)\n meeting = Meeting(title=title, description='', classification=classification, start=self._parse_start(item, classification), end=None, all_day=False, time_notes='Refer to notice for start time', location=location, links=self._parse_links(item), source=response.url)\n meeting['status'] = self._get_status(meeting, text=item.css('td:nth-child(2)::text').extract_first() or '')\n meeting['id'] = self._get_id(meeting)\n yield meeting\n<|end_body_0|>\n\n<|body_start_1|>\n title_str = item.css('td:nth-child(2)::text').extract_first()\n if 'committee' in title_str.lower():\n return title_str.replace('Meeting', '').strip()\n return 'Board of Directors'\n<|end_body_1|>\n\n<|body_start_2|>\n if 'committee' in title.lower():\n return COMMITTEE\n return BOARD\n<|end_body_2|>\n\n<|body_start_3|>\n date_str = item.css('td::text').extract_first().strip()\n date_obj = datetime.strptime(date_str, '%B %d, %Y').date()\n time_obj = time(9)\n if classification == COMMITTEE:\n time_obj = time(13, 30)\n return datetime.combine(date_obj, time_obj)\n<|end_body_3|>\n\n<|body_start_4|>\n links = []\n for link in item.css('a'):\n links.append({'href': link.attrib['href'], 'title': link.xpath('./text()').extract_first()})\n return links\n<|end_body_4|>\n", "class_docstring": "", "class_name": "ChiMetroPierExpositionSpider", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ChiMetroPierExpositionSpider:\n\n def parse(self, response):\n \"\"\"`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\"\"\"\n <|body_0|>\n\n def _parse_title(self, item):\n \"\"\"Parse or generate meeting title.\"\"\"\n <|body_1|>\n\n def _parse_classification(self, title):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n <|body_2|>\n\n def _parse_start(self, item, classification):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n <|body_3|>\n\n def _parse_links(self, item):\n \"\"\"Parse or generate links.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n description = ' '.join(response.css('.vc_col-sm-6 .wpb_wrapper p *::text').extract())\n location = self.location\n if 'virtual' in description.lower():\n location = {'name': 'Virtual', 'address': ''}\n elif 'cancel' not in description.lower() and '301 East Cermak' not in description:\n raise ValueError('Meeting location has changed')\n for item in response.css('.supsystic-table tr')[3:]:\n title = self._parse_title(item)\n classification = self._parse_classification(title)\n meeting = Meeting(title=title, description='', classification=classification, start=self._parse_start(item, classification), end=None, all_day=False, time_notes='Refer to notice for start time', location=location, links=self._parse_links(item), source=response.url)\n meeting['status'] = self._get_status(meeting, text=item.css('td:nth-child(2)::text').extract_first() or '')\n meeting['id'] = self._get_id(meeting)\n yield meeting\n<|end_body_0|>\n\n<|body_start_1|>\n title_str = item.css('td:nth-child(2)::text').extract_first()\n if 'committee' in title_str.lower():\n return title_str.replace('Meeting', '').strip()\n return 'Board of Directors'\n<|end_body_1|>\n\n<|body_start_2|>\n if 'committee' in title.lower():\n return COMMITTEE\n return BOARD\n<|end_body_2|>\n\n<|body_start_3|>\n date_str = item.css('td::text').extract_first().strip()\n date_obj = datetime.strptime(date_str, '%B %d, %Y').date()\n time_obj = time(9)\n if classification == COMMITTEE:\n time_obj = time(13, 30)\n return datetime.combine(date_obj, time_obj)\n<|end_body_3|>\n\n<|body_start_4|>\n links = []\n for link in item.css('a'):\n links.append({'href': link.attrib['href'], 'title': link.xpath('./text()').extract_first()})\n return links\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000305", "length_bytes": 3441, "license_type": "permissive", "methods": [{"docstring": "`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.", "name": "parse", "signature": "def parse(self, response)"}, {"docstring": "Parse or generate meeting title.", "name": "_parse_title", "signature": "def _parse_title(self, item)"}, {"docstring": "Parse or generate classification from allowed options.", "name": "_parse_classification", "signature": "def _parse_classification(self, title)"}, {"docstring": "Parse start datetime as a naive datetime object.", "name": "_parse_start", "signature": "def _parse_start(self, item, classification)"}, {"docstring": "Parse or generate links.", "name": "_parse_links", "signature": "def _parse_links(self, item)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_053005", "prompt": "Implement the Python class `ChiMetroPierExpositionSpider` described below.\n\nClass description:\nImplement the ChiMetroPierExpositionSpider class.\n\nMethod signatures and docstrings:\n- def parse(self, response): `parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\n- def _parse_title(self, item): Parse or generate meeting title.\n- def _parse_classification(self, title): Parse or generate classification from allowed options.\n- def _parse_start(self, item, classification): Parse start datetime as a naive datetime object.\n- def _parse_links(self, item): Parse or generate links.", "prompted_full_text": "Implement the Python class `ChiMetroPierExpositionSpider` described below.\n\nClass description:\nImplement the ChiMetroPierExpositionSpider class.\n\nMethod signatures and docstrings:\n- def parse(self, response): `parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\n- def _parse_title(self, item): Parse or generate meeting title.\n- def _parse_classification(self, title): Parse or generate classification from allowed options.\n- def _parse_start(self, item, classification): Parse start datetime as a naive datetime object.\n- def _parse_links(self, item): Parse or generate links.\n\n<|skeleton|>\nclass ChiMetroPierExpositionSpider:\n\n def parse(self, response):\n \"\"\"`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\"\"\"\n <|body_0|>\n\n def _parse_title(self, item):\n \"\"\"Parse or generate meeting title.\"\"\"\n <|body_1|>\n\n def _parse_classification(self, title):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n <|body_2|>\n\n def _parse_start(self, item, classification):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n <|body_3|>\n\n def _parse_links(self, item):\n \"\"\"Parse or generate links.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n description = ' '.join(response.css('.vc_col-sm-6 .wpb_wrapper p *::text').extract())\n location = self.location\n if 'virtual' in description.lower():\n location = {'name': 'Virtual', 'address': ''}\n elif 'cancel' not in description.lower() and '301 East Cermak' not in description:\n raise ValueError('Meeting location has changed')\n for item in response.css('.supsystic-table tr')[3:]:\n title = self._parse_title(item)\n classification = self._parse_classification(title)\n meeting = Meeting(title=title, description='', classification=classification, start=self._parse_start(item, classification), end=None, all_day=False, time_notes='Refer to notice for start time', location=location, links=self._parse_links(item), source=response.url)\n meeting['status'] = self._get_status(meeting, text=item.css('td:nth-child(2)::text').extract_first() or '')\n meeting['id'] = self._get_id(meeting)\n yield meeting\n<|end_body_0|>\n\n<|body_start_1|>\n title_str = item.css('td:nth-child(2)::text').extract_first()\n if 'committee' in title_str.lower():\n return title_str.replace('Meeting', '').strip()\n return 'Board of Directors'\n<|end_body_1|>\n\n<|body_start_2|>\n if 'committee' in title.lower():\n return COMMITTEE\n return BOARD\n<|end_body_2|>\n\n<|body_start_3|>\n date_str = item.css('td::text').extract_first().strip()\n date_obj = datetime.strptime(date_str, '%B %d, %Y').date()\n time_obj = time(9)\n if classification == COMMITTEE:\n time_obj = time(13, 30)\n return datetime.combine(date_obj, time_obj)\n<|end_body_3|>\n\n<|body_start_4|>\n links = []\n for link in item.css('a'):\n links.append({'href': link.attrib['href'], 'title': link.xpath('./text()').extract_first()})\n return links\n<|end_body_4|>\n", "revision_id": "611fce6a2705446e25a2fc33e32090a571eb35d1", "skeleton": "<|skeleton|>\nclass ChiMetroPierExpositionSpider:\n\n def parse(self, response):\n \"\"\"`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\"\"\"\n <|body_0|>\n\n def _parse_title(self, item):\n \"\"\"Parse or generate meeting title.\"\"\"\n <|body_1|>\n\n def _parse_classification(self, title):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n <|body_2|>\n\n def _parse_start(self, item, classification):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n <|body_3|>\n\n def _parse_links(self, item):\n \"\"\"Parse or generate links.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ChiMetroPierExpositionSpider:\n def parse(self, response):\n \"\"\"`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\"\"\"\n description = ' '.join(response.css('.vc_col-sm-6 .wpb_wrapper p *::text').extract())\n location = self.location\n if 'virtual' in description.lower():\n location = {'name': 'Virtual', 'address': ''}\n elif 'cancel' not in description.lower() and '301 East Cermak' not in description:\n raise ValueError('Meeting location has changed')\n for item in response.css('.supsystic-table tr')[3:]:\n title = self._parse_title(item)\n classification = self._parse_classification(title)\n meeting = Meeting(title=title, description='', classification=classification, start=self._parse_start(item, classification), end=None, all_day=False, time_notes='Refer to notice for start time', location=location, links=self._parse_links(item), source=response.url)\n meeting['status'] = self._get_status(meeting, text=item.css('td:nth-child(2)::text').extract_first() or '')\n meeting['id'] = self._get_id(meeting)\n yield meeting\n\n def _parse_title(self, item):\n \"\"\"Parse or generate meeting title.\"\"\"\n title_str = item.css('td:nth-child(2)::text').extract_first()\n if 'committee' in title_str.lower():\n return title_str.replace('Meeting', '').strip()\n return 'Board of Directors'\n\n def _parse_classification(self, title):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n if 'committee' in title.lower():\n return COMMITTEE\n return BOARD\n\n def _parse_start(self, item, classification):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n date_str = item.css('td::text').extract_first().strip()\n date_obj = datetime.strptime(date_str, '%B %d, %Y').date()\n time_obj = time(9)\n if classification == COMMITTEE:\n time_obj = time(13, 30)\n return datetime.combine(date_obj, time_obj)\n\n def _parse_links(self, item):\n \"\"\"Parse or generate links.\"\"\"\n links = []\n for link in item.css('a'):\n links.append({'href': link.attrib['href'], 'title': link.xpath('./text()').extract_first()})\n return links\n", "source": "the_stack_v2_python_sparse", "source_path": "city_scrapers/spiders/chi_metro_pier_exposition.py", "source_repo": "City-Bureau/city-scrapers", "split": "val", "star_events_count": 308}
{"blob_id": "e59a8db10ff05797345353182cb7d141482091ec", "bodies": ["self.explanation_type = explanation_type\nself._internal_obj = internal_obj\nself.feature_names = feature_names\nself.feature_types = feature_types\nself.name = name\nself.selector = selector", "if key is None:\n return self._internal_obj['overall']\nreturn None", "from ..visual.plot import plot_performance_curve\ndata_dict = self.data(key)\nif data_dict is None:\n return None\nreturn plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title='PR Curve: ' + self.name, auc_prefix='Average Precision')"], "bodies_text": "<|body_start_0|>\n self.explanation_type = explanation_type\n self._internal_obj = internal_obj\n self.feature_names = feature_names\n self.feature_types = feature_types\n self.name = name\n self.selector = selector\n<|end_body_0|>\n\n<|body_start_1|>\n if key is None:\n return self._internal_obj['overall']\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n from ..visual.plot import plot_performance_curve\n data_dict = self.data(key)\n if data_dict is None:\n return None\n return plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title='PR Curve: ' + self.name, auc_prefix='Average Precision')\n<|end_body_2|>\n", "class_docstring": "Explanation object specific to PR explainer.", "class_name": "PRExplanation", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PRExplanation:\n \"\"\"Explanation object specific to PR explainer.\"\"\"\n\n def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):\n \"\"\"Initializes class. Args: explanation_type: Type of explanation. internal_obj: A jsonable object that backs the explanation. feature_names: List of feature names. feature_types: List of feature types. name: User-defined name of explanation. selector: A dataframe whose indices correspond to explanation entries.\"\"\"\n <|body_0|>\n\n def data(self, key=None):\n \"\"\"Provides specific explanation data. Args: key: A number/string that references a specific data item. Returns: A serializable dictionary.\"\"\"\n <|body_1|>\n\n def visualize(self, key=None):\n \"\"\"Provides interactive visualizations. Args: key: Either a scalar or list that indexes the internal object for sub-plotting. If an overall visualization is requested, pass None. Returns: A Plotly figure.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.explanation_type = explanation_type\n self._internal_obj = internal_obj\n self.feature_names = feature_names\n self.feature_types = feature_types\n self.name = name\n self.selector = selector\n<|end_body_0|>\n\n<|body_start_1|>\n if key is None:\n return self._internal_obj['overall']\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n from ..visual.plot import plot_performance_curve\n data_dict = self.data(key)\n if data_dict is None:\n return None\n return plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title='PR Curve: ' + self.name, auc_prefix='Average Precision')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000306", "length_bytes": 10362, "license_type": "permissive", "methods": [{"docstring": "Initializes class. Args: explanation_type: Type of explanation. internal_obj: A jsonable object that backs the explanation. feature_names: List of feature names. feature_types: List of feature types. name: User-defined name of explanation. selector: A dataframe whose indices correspond to explanation entries.", "name": "__init__", "signature": "def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None)"}, {"docstring": "Provides specific explanation data. Args: key: A number/string that references a specific data item. Returns: A serializable dictionary.", "name": "data", "signature": "def data(self, key=None)"}, {"docstring": "Provides interactive visualizations. Args: key: Either a scalar or list that indexes the internal object for sub-plotting. If an overall visualization is requested, pass None. Returns: A Plotly figure.", "name": "visualize", "signature": "def visualize(self, key=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_029045", "prompt": "Implement the Python class `PRExplanation` described below.\n\nClass description:\nExplanation object specific to PR explainer.\n\nMethod signatures and docstrings:\n- def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None): Initializes class. Args: explanation_type: Type of explanation. internal_obj: A jsonable object that backs the explanation. feature_names: List of feature names. feature_types: List of feature types. name: User-defined name of explanation. selector: A dataframe whose indices correspond to explanation entries.\n- def data(self, key=None): Provides specific explanation data. Args: key: A number/string that references a specific data item. Returns: A serializable dictionary.\n- def visualize(self, key=None): Provides interactive visualizations. Args: key: Either a scalar or list that indexes the internal object for sub-plotting. If an overall visualization is requested, pass None. Returns: A Plotly figure.", "prompted_full_text": "Implement the Python class `PRExplanation` described below.\n\nClass description:\nExplanation object specific to PR explainer.\n\nMethod signatures and docstrings:\n- def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None): Initializes class. Args: explanation_type: Type of explanation. internal_obj: A jsonable object that backs the explanation. feature_names: List of feature names. feature_types: List of feature types. name: User-defined name of explanation. selector: A dataframe whose indices correspond to explanation entries.\n- def data(self, key=None): Provides specific explanation data. Args: key: A number/string that references a specific data item. Returns: A serializable dictionary.\n- def visualize(self, key=None): Provides interactive visualizations. Args: key: Either a scalar or list that indexes the internal object for sub-plotting. If an overall visualization is requested, pass None. Returns: A Plotly figure.\n\n<|skeleton|>\nclass PRExplanation:\n \"\"\"Explanation object specific to PR explainer.\"\"\"\n\n def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):\n \"\"\"Initializes class. Args: explanation_type: Type of explanation. internal_obj: A jsonable object that backs the explanation. feature_names: List of feature names. feature_types: List of feature types. name: User-defined name of explanation. selector: A dataframe whose indices correspond to explanation entries.\"\"\"\n <|body_0|>\n\n def data(self, key=None):\n \"\"\"Provides specific explanation data. Args: key: A number/string that references a specific data item. Returns: A serializable dictionary.\"\"\"\n <|body_1|>\n\n def visualize(self, key=None):\n \"\"\"Provides interactive visualizations. Args: key: Either a scalar or list that indexes the internal object for sub-plotting. If an overall visualization is requested, pass None. Returns: A Plotly figure.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.explanation_type = explanation_type\n self._internal_obj = internal_obj\n self.feature_names = feature_names\n self.feature_types = feature_types\n self.name = name\n self.selector = selector\n<|end_body_0|>\n\n<|body_start_1|>\n if key is None:\n return self._internal_obj['overall']\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n from ..visual.plot import plot_performance_curve\n data_dict = self.data(key)\n if data_dict is None:\n return None\n return plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title='PR Curve: ' + self.name, auc_prefix='Average Precision')\n<|end_body_2|>\n", "revision_id": "e6f38ea195aecbbd9d28c7183a83c65ada16e1ae", "skeleton": "<|skeleton|>\nclass PRExplanation:\n \"\"\"Explanation object specific to PR explainer.\"\"\"\n\n def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):\n \"\"\"Initializes class. Args: explanation_type: Type of explanation. internal_obj: A jsonable object that backs the explanation. feature_names: List of feature names. feature_types: List of feature types. name: User-defined name of explanation. selector: A dataframe whose indices correspond to explanation entries.\"\"\"\n <|body_0|>\n\n def data(self, key=None):\n \"\"\"Provides specific explanation data. Args: key: A number/string that references a specific data item. Returns: A serializable dictionary.\"\"\"\n <|body_1|>\n\n def visualize(self, key=None):\n \"\"\"Provides interactive visualizations. Args: key: Either a scalar or list that indexes the internal object for sub-plotting. If an overall visualization is requested, pass None. Returns: A Plotly figure.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PRExplanation:\n \"\"\"Explanation object specific to PR explainer.\"\"\"\n\n def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):\n \"\"\"Initializes class. Args: explanation_type: Type of explanation. internal_obj: A jsonable object that backs the explanation. feature_names: List of feature names. feature_types: List of feature types. name: User-defined name of explanation. selector: A dataframe whose indices correspond to explanation entries.\"\"\"\n self.explanation_type = explanation_type\n self._internal_obj = internal_obj\n self.feature_names = feature_names\n self.feature_types = feature_types\n self.name = name\n self.selector = selector\n\n def data(self, key=None):\n \"\"\"Provides specific explanation data. Args: key: A number/string that references a specific data item. Returns: A serializable dictionary.\"\"\"\n if key is None:\n return self._internal_obj['overall']\n return None\n\n def visualize(self, key=None):\n \"\"\"Provides interactive visualizations. Args: key: Either a scalar or list that indexes the internal object for sub-plotting. If an overall visualization is requested, pass None. Returns: A Plotly figure.\"\"\"\n from ..visual.plot import plot_performance_curve\n data_dict = self.data(key)\n if data_dict is None:\n return None\n return plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title='PR Curve: ' + self.name, auc_prefix='Average Precision')\n", "source": "the_stack_v2_python_sparse", "source_path": "python/interpret-core/interpret/perf/_curve.py", "source_repo": "interpretml/interpret", "split": "val", "star_events_count": 3731}
{"blob_id": "aac0d5477577bb48862068d44bd862f2a34d51bf", "bodies": ["if num <= 0:\n return False\nreturn num & 1431655765 != 0 and (not num & num - 1)", "if num <= 0:\n return False\nreturn not num & num - 1 and (num - 1) % 3 == 0"], "bodies_text": "<|body_start_0|>\n if num <= 0:\n return False\n return num & 1431655765 != 0 and (not num & num - 1)\n<|end_body_0|>\n\n<|body_start_1|>\n if num <= 0:\n return False\n return not num & num - 1 and (num - 1) % 3 == 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isPowerOfFour(self, num):\n \"\"\":type num: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfFour(self, num):\n \"\"\":type num: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if num <= 0:\n return False\n return num & 1431655765 != 0 and (not num & num - 1)\n<|end_body_0|>\n\n<|body_start_1|>\n if num <= 0:\n return False\n return not num & num - 1 and (num - 1) % 3 == 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000307", "length_bytes": 801, "license_type": "no_license", "methods": [{"docstring": ":type num: int :rtype: bool", "name": "isPowerOfFour", "signature": "def isPowerOfFour(self, num)"}, {"docstring": ":type num: int :rtype: bool", "name": "isPowerOfFour", "signature": "def isPowerOfFour(self, num)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_038566", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isPowerOfFour(self, num): :type num: int :rtype: bool\n- def isPowerOfFour(self, num): :type num: int :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isPowerOfFour(self, num): :type num: int :rtype: bool\n- def isPowerOfFour(self, num): :type num: int :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def isPowerOfFour(self, num):\n \"\"\":type num: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfFour(self, num):\n \"\"\":type num: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if num <= 0:\n return False\n return num & 1431655765 != 0 and (not num & num - 1)\n<|end_body_0|>\n\n<|body_start_1|>\n if num <= 0:\n return False\n return not num & num - 1 and (num - 1) % 3 == 0\n<|end_body_1|>\n", "revision_id": "6fec95b9b4d735727160905e754a698513bfb7d8", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isPowerOfFour(self, num):\n \"\"\":type num: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfFour(self, num):\n \"\"\":type num: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isPowerOfFour(self, num):\n \"\"\":type num: int :rtype: bool\"\"\"\n if num <= 0:\n return False\n return num & 1431655765 != 0 and (not num & num - 1)\n\n def isPowerOfFour(self, num):\n \"\"\":type num: int :rtype: bool\"\"\"\n if num <= 0:\n return False\n return not num & num - 1 and (num - 1) % 3 == 0\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/bit-manipulation/power-of-four.py", "source_repo": "jwyx3/practices", "split": "val", "star_events_count": 2}
{"blob_id": "f6615216591df7501328ba8e7ae0433db1bacc88", "bodies": ["content_path_dict = {}\ntry:\n for file_name in os.listdir(directory_path):\n file_path = directory_path + '/' + file_name\n image_content = ImageInfoExtractor.parse_image_text(file_path)\n if image_content is not None:\n content_path_dict[image_content] = file_path\n return content_path_dict\nexcept OSError as e:\n logging.warning('could not open path')\n return None", "image_content = api.ocr_file(file_path)\nif image_content != '':\n image_content.replace('\\r\\n', '\\n')\n logging.info('the following content was detected by the api: ' + image_content + ' from this image path: ' + file_path)\n return image_content\nelse:\n logging.warning('the api did not succeed to detect image content from the following image path: ' + file_path + '\\nTherefore, the decision is not saved to database')"], "bodies_text": "<|body_start_0|>\n content_path_dict = {}\n try:\n for file_name in os.listdir(directory_path):\n file_path = directory_path + '/' + file_name\n image_content = ImageInfoExtractor.parse_image_text(file_path)\n if image_content is not None:\n content_path_dict[image_content] = file_path\n return content_path_dict\n except OSError as e:\n logging.warning('could not open path')\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n image_content = api.ocr_file(file_path)\n if image_content != '':\n image_content.replace('\\r\\n', '\\n')\n logging.info('the following content was detected by the api: ' + image_content + ' from this image path: ' + file_path)\n return image_content\n else:\n logging.warning('the api did not succeed to detect image content from the following image path: ' + file_path + '\\nTherefore, the decision is not saved to database')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ImageInfoExtractor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ImageInfoExtractor:\n\n def parse_images_text_from_dir_path(directory_path='images'):\n \"\"\"@:param (str) directory path from which it iterates through all images, sends a request to ocrspace API and get plate content for each image @:returns (dict) key(str): licence plate content. value(str): image path\"\"\"\n <|body_0|>\n\n def parse_image_text(file_path):\n \"\"\"@:param (str) image path, the method sends a request to ocrspace API with the image path and get plate content for each image @:returns image plate licence content\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n content_path_dict = {}\n try:\n for file_name in os.listdir(directory_path):\n file_path = directory_path + '/' + file_name\n image_content = ImageInfoExtractor.parse_image_text(file_path)\n if image_content is not None:\n content_path_dict[image_content] = file_path\n return content_path_dict\n except OSError as e:\n logging.warning('could not open path')\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n image_content = api.ocr_file(file_path)\n if image_content != '':\n image_content.replace('\\r\\n', '\\n')\n logging.info('the following content was detected by the api: ' + image_content + ' from this image path: ' + file_path)\n return image_content\n else:\n logging.warning('the api did not succeed to detect image content from the following image path: ' + file_path + '\\nTherefore, the decision is not saved to database')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000308", "length_bytes": 1781, "license_type": "no_license", "methods": [{"docstring": "@:param (str) directory path from which it iterates through all images, sends a request to ocrspace API and get plate content for each image @:returns (dict) key(str): licence plate content. value(str): image path", "name": "parse_images_text_from_dir_path", "signature": "def parse_images_text_from_dir_path(directory_path='images')"}, {"docstring": "@:param (str) image path, the method sends a request to ocrspace API with the image path and get plate content for each image @:returns image plate licence content", "name": "parse_image_text", "signature": "def parse_image_text(file_path)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012196", "prompt": "Implement the Python class `ImageInfoExtractor` described below.\n\nClass description:\nImplement the ImageInfoExtractor class.\n\nMethod signatures and docstrings:\n- def parse_images_text_from_dir_path(directory_path='images'): @:param (str) directory path from which it iterates through all images, sends a request to ocrspace API and get plate content for each image @:returns (dict) key(str): licence plate content. value(str): image path\n- def parse_image_text(file_path): @:param (str) image path, the method sends a request to ocrspace API with the image path and get plate content for each image @:returns image plate licence content", "prompted_full_text": "Implement the Python class `ImageInfoExtractor` described below.\n\nClass description:\nImplement the ImageInfoExtractor class.\n\nMethod signatures and docstrings:\n- def parse_images_text_from_dir_path(directory_path='images'): @:param (str) directory path from which it iterates through all images, sends a request to ocrspace API and get plate content for each image @:returns (dict) key(str): licence plate content. value(str): image path\n- def parse_image_text(file_path): @:param (str) image path, the method sends a request to ocrspace API with the image path and get plate content for each image @:returns image plate licence content\n\n<|skeleton|>\nclass ImageInfoExtractor:\n\n def parse_images_text_from_dir_path(directory_path='images'):\n \"\"\"@:param (str) directory path from which it iterates through all images, sends a request to ocrspace API and get plate content for each image @:returns (dict) key(str): licence plate content. value(str): image path\"\"\"\n <|body_0|>\n\n def parse_image_text(file_path):\n \"\"\"@:param (str) image path, the method sends a request to ocrspace API with the image path and get plate content for each image @:returns image plate licence content\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n content_path_dict = {}\n try:\n for file_name in os.listdir(directory_path):\n file_path = directory_path + '/' + file_name\n image_content = ImageInfoExtractor.parse_image_text(file_path)\n if image_content is not None:\n content_path_dict[image_content] = file_path\n return content_path_dict\n except OSError as e:\n logging.warning('could not open path')\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n image_content = api.ocr_file(file_path)\n if image_content != '':\n image_content.replace('\\r\\n', '\\n')\n logging.info('the following content was detected by the api: ' + image_content + ' from this image path: ' + file_path)\n return image_content\n else:\n logging.warning('the api did not succeed to detect image content from the following image path: ' + file_path + '\\nTherefore, the decision is not saved to database')\n<|end_body_1|>\n", "revision_id": "55f3df08b65957502a123d265df14f56b8788859", "skeleton": "<|skeleton|>\nclass ImageInfoExtractor:\n\n def parse_images_text_from_dir_path(directory_path='images'):\n \"\"\"@:param (str) directory path from which it iterates through all images, sends a request to ocrspace API and get plate content for each image @:returns (dict) key(str): licence plate content. value(str): image path\"\"\"\n <|body_0|>\n\n def parse_image_text(file_path):\n \"\"\"@:param (str) image path, the method sends a request to ocrspace API with the image path and get plate content for each image @:returns image plate licence content\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ImageInfoExtractor:\n def parse_images_text_from_dir_path(directory_path='images'):\n \"\"\"@:param (str) directory path from which it iterates through all images, sends a request to ocrspace API and get plate content for each image @:returns (dict) key(str): licence plate content. value(str): image path\"\"\"\n content_path_dict = {}\n try:\n for file_name in os.listdir(directory_path):\n file_path = directory_path + '/' + file_name\n image_content = ImageInfoExtractor.parse_image_text(file_path)\n if image_content is not None:\n content_path_dict[image_content] = file_path\n return content_path_dict\n except OSError as e:\n logging.warning('could not open path')\n return None\n\n def parse_image_text(file_path):\n \"\"\"@:param (str) image path, the method sends a request to ocrspace API with the image path and get plate content for each image @:returns image plate licence content\"\"\"\n image_content = api.ocr_file(file_path)\n if image_content != '':\n image_content.replace('\\r\\n', '\\n')\n logging.info('the following content was detected by the api: ' + image_content + ' from this image path: ' + file_path)\n return image_content\n else:\n logging.warning('the api did not succeed to detect image content from the following image path: ' + file_path + '\\nTherefore, the decision is not saved to database')\n", "source": "the_stack_v2_python_sparse", "source_path": "ImageInfoExtractor/ImageInfoExtractor.py", "source_repo": "Itayventura/ParkingLot", "split": "val", "star_events_count": 0}
{"blob_id": "8910402101459a694216f3362416699afe2f1d60", "bodies": ["if use_filemanager is None:\n super().__init__()\nelse:\n super().__init__(use_filemanager)\nself.prefix = prefix", "result_string = '\\n# Statistical Callback\\n'\nprint('\\n# Statistical Callback')\nfor key in data_dict:\n print(f'## {key}')\n print(data_dict[key].to_pandas().describe())\n result_string += f'## {key}\\n {data_dict[key].to_pandas().describe()} \\n'\nwith open(self.get_path(f'{self.prefix}_Statistics.md'), 'w') as file:\n file.write(result_string)"], "bodies_text": "<|body_start_0|>\n if use_filemanager is None:\n super().__init__()\n else:\n super().__init__(use_filemanager)\n self.prefix = prefix\n<|end_body_0|>\n\n<|body_start_1|>\n result_string = '\\n# Statistical Callback\\n'\n print('\\n# Statistical Callback')\n for key in data_dict:\n print(f'## {key}')\n print(data_dict[key].to_pandas().describe())\n result_string += f'## {key}\\n {data_dict[key].to_pandas().describe()} \\n'\n with open(self.get_path(f'{self.prefix}_Statistics.md'), 'w') as file:\n file.write(result_string)\n<|end_body_1|>\n", "class_docstring": "Statistic callback class to print out statistical information about the results into terminal for better understanding and debugging. :param BaseCallback: Base callback class. :type BaseCallback: BaseCallback", "class_name": "StatisticCallback", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StatisticCallback:\n \"\"\"Statistic callback class to print out statistical information about the results into terminal for better understanding and debugging. :param BaseCallback: Base callback class. :type BaseCallback: BaseCallback\"\"\"\n\n def __init__(self, prefix: str, use_filemanager: Optional[bool]=None):\n \"\"\"Initialise Statistical callback object given a filename and optional use_filemanager flag. :param prefix: Prefix to use for the line plot output file. :type prefix: str :param use_filemanager: Flag to denote if the filemanager of the pipeline should be used. :type use_filemanager: Optional[bool]\"\"\"\n <|body_0|>\n\n def __call__(self, data_dict: Dict[str, xr.DataArray]):\n \"\"\"Implementation of abstract base method to print out pipeline statistical information of step results into terminal. :param data_dict: Dict of DataArrays that statistical information should be printed out. :type data_dict: Dict[str, xr.DataArray]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if use_filemanager is None:\n super().__init__()\n else:\n super().__init__(use_filemanager)\n self.prefix = prefix\n<|end_body_0|>\n\n<|body_start_1|>\n result_string = '\\n# Statistical Callback\\n'\n print('\\n# Statistical Callback')\n for key in data_dict:\n print(f'## {key}')\n print(data_dict[key].to_pandas().describe())\n result_string += f'## {key}\\n {data_dict[key].to_pandas().describe()} \\n'\n with open(self.get_path(f'{self.prefix}_Statistics.md'), 'w') as file:\n file.write(result_string)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000309", "length_bytes": 2615, "license_type": "permissive", "methods": [{"docstring": "Initialise Statistical callback object given a filename and optional use_filemanager flag. :param prefix: Prefix to use for the line plot output file. :type prefix: str :param use_filemanager: Flag to denote if the filemanager of the pipeline should be used. :type use_filemanager: Optional[bool]", "name": "__init__", "signature": "def __init__(self, prefix: str, use_filemanager: Optional[bool]=None)"}, {"docstring": "Implementation of abstract base method to print out pipeline statistical information of step results into terminal. :param data_dict: Dict of DataArrays that statistical information should be printed out. :type data_dict: Dict[str, xr.DataArray]", "name": "__call__", "signature": "def __call__(self, data_dict: Dict[str, xr.DataArray])"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002260", "prompt": "Implement the Python class `StatisticCallback` described below.\n\nClass description:\nStatistic callback class to print out statistical information about the results into terminal for better understanding and debugging. :param BaseCallback: Base callback class. :type BaseCallback: BaseCallback\n\nMethod signatures and docstrings:\n- def __init__(self, prefix: str, use_filemanager: Optional[bool]=None): Initialise Statistical callback object given a filename and optional use_filemanager flag. :param prefix: Prefix to use for the line plot output file. :type prefix: str :param use_filemanager: Flag to denote if the filemanager of the pipeline should be used. :type use_filemanager: Optional[bool]\n- def __call__(self, data_dict: Dict[str, xr.DataArray]): Implementation of abstract base method to print out pipeline statistical information of step results into terminal. :param data_dict: Dict of DataArrays that statistical information should be printed out. :type data_dict: Dict[str, xr.DataArray]", "prompted_full_text": "Implement the Python class `StatisticCallback` described below.\n\nClass description:\nStatistic callback class to print out statistical information about the results into terminal for better understanding and debugging. :param BaseCallback: Base callback class. :type BaseCallback: BaseCallback\n\nMethod signatures and docstrings:\n- def __init__(self, prefix: str, use_filemanager: Optional[bool]=None): Initialise Statistical callback object given a filename and optional use_filemanager flag. :param prefix: Prefix to use for the line plot output file. :type prefix: str :param use_filemanager: Flag to denote if the filemanager of the pipeline should be used. :type use_filemanager: Optional[bool]\n- def __call__(self, data_dict: Dict[str, xr.DataArray]): Implementation of abstract base method to print out pipeline statistical information of step results into terminal. :param data_dict: Dict of DataArrays that statistical information should be printed out. :type data_dict: Dict[str, xr.DataArray]\n\n<|skeleton|>\nclass StatisticCallback:\n \"\"\"Statistic callback class to print out statistical information about the results into terminal for better understanding and debugging. :param BaseCallback: Base callback class. :type BaseCallback: BaseCallback\"\"\"\n\n def __init__(self, prefix: str, use_filemanager: Optional[bool]=None):\n \"\"\"Initialise Statistical callback object given a filename and optional use_filemanager flag. :param prefix: Prefix to use for the line plot output file. :type prefix: str :param use_filemanager: Flag to denote if the filemanager of the pipeline should be used. :type use_filemanager: Optional[bool]\"\"\"\n <|body_0|>\n\n def __call__(self, data_dict: Dict[str, xr.DataArray]):\n \"\"\"Implementation of abstract base method to print out pipeline statistical information of step results into terminal. :param data_dict: Dict of DataArrays that statistical information should be printed out. :type data_dict: Dict[str, xr.DataArray]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if use_filemanager is None:\n super().__init__()\n else:\n super().__init__(use_filemanager)\n self.prefix = prefix\n<|end_body_0|>\n\n<|body_start_1|>\n result_string = '\\n# Statistical Callback\\n'\n print('\\n# Statistical Callback')\n for key in data_dict:\n print(f'## {key}')\n print(data_dict[key].to_pandas().describe())\n result_string += f'## {key}\\n {data_dict[key].to_pandas().describe()} \\n'\n with open(self.get_path(f'{self.prefix}_Statistics.md'), 'w') as file:\n file.write(result_string)\n<|end_body_1|>\n", "revision_id": "af956f8b1cedf87366259b6010a9f73e6daf5522", "skeleton": "<|skeleton|>\nclass StatisticCallback:\n \"\"\"Statistic callback class to print out statistical information about the results into terminal for better understanding and debugging. :param BaseCallback: Base callback class. :type BaseCallback: BaseCallback\"\"\"\n\n def __init__(self, prefix: str, use_filemanager: Optional[bool]=None):\n \"\"\"Initialise Statistical callback object given a filename and optional use_filemanager flag. :param prefix: Prefix to use for the line plot output file. :type prefix: str :param use_filemanager: Flag to denote if the filemanager of the pipeline should be used. :type use_filemanager: Optional[bool]\"\"\"\n <|body_0|>\n\n def __call__(self, data_dict: Dict[str, xr.DataArray]):\n \"\"\"Implementation of abstract base method to print out pipeline statistical information of step results into terminal. :param data_dict: Dict of DataArrays that statistical information should be printed out. :type data_dict: Dict[str, xr.DataArray]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class StatisticCallback:\n \"\"\"Statistic callback class to print out statistical information about the results into terminal for better understanding and debugging. :param BaseCallback: Base callback class. :type BaseCallback: BaseCallback\"\"\"\n\n def __init__(self, prefix: str, use_filemanager: Optional[bool]=None):\n \"\"\"Initialise Statistical callback object given a filename and optional use_filemanager flag. :param prefix: Prefix to use for the line plot output file. :type prefix: str :param use_filemanager: Flag to denote if the filemanager of the pipeline should be used. :type use_filemanager: Optional[bool]\"\"\"\n if use_filemanager is None:\n super().__init__()\n else:\n super().__init__(use_filemanager)\n self.prefix = prefix\n\n def __call__(self, data_dict: Dict[str, xr.DataArray]):\n \"\"\"Implementation of abstract base method to print out pipeline statistical information of step results into terminal. :param data_dict: Dict of DataArrays that statistical information should be printed out. :type data_dict: Dict[str, xr.DataArray]\"\"\"\n result_string = '\\n# Statistical Callback\\n'\n print('\\n# Statistical Callback')\n for key in data_dict:\n print(f'## {key}')\n print(data_dict[key].to_pandas().describe())\n result_string += f'## {key}\\n {data_dict[key].to_pandas().describe()} \\n'\n with open(self.get_path(f'{self.prefix}_Statistics.md'), 'w') as file:\n file.write(result_string)\n", "source": "the_stack_v2_python_sparse", "source_path": "pywatts/callbacks/debug_callback.py", "source_repo": "KIT-IAI/pyWATTS", "split": "val", "star_events_count": 47}
{"blob_id": "b30e52c39672c8c520c71d0e4f39bf9ad27ea887", "bodies": ["n = nums[0]\nm = len(nums)\nif m == 1:\n return [[n]]\nnew_perm = []\nperms = self.permute(nums[1:])\nfor perm in perms:\n for i in range(m):\n new_perm.append(perm[:i] + [n] + perm[i:])\nreturn new_perm", "n = nums[0]\nm = len(nums)\nif m == 1:\n return [[n]]\nnew_perm = []\nperms = self.permuteUnique(nums[1:])\nfor perm in perms:\n for i in range(m):\n l = perm[:i] + [n] + perm[i:]\n new_perm.append(l)\n if i < m - 1 and perm[i] == n:\n break\nreturn new_perm", "m = len(nums)\nj = m - 1\nwhile j > 0 and nums[j] <= nums[j - 1]:\n j = j - 1\nif j == 0:\n return nums[::-1]\nn = nums[j - 1]\nleft, right = (j, m - 1)\nwhile left < right:\n middle = (left + right + 1) // 2\n if nums[middle] > n:\n left = middle\n else:\n right = middle - 1\nnums[j - 1] = nums[left]\nnums[left] = n\nreturn nums[:j] + nums[m - 1:j - 1:-1]"], "bodies_text": "<|body_start_0|>\n n = nums[0]\n m = len(nums)\n if m == 1:\n return [[n]]\n new_perm = []\n perms = self.permute(nums[1:])\n for perm in perms:\n for i in range(m):\n new_perm.append(perm[:i] + [n] + perm[i:])\n return new_perm\n<|end_body_0|>\n\n<|body_start_1|>\n n = nums[0]\n m = len(nums)\n if m == 1:\n return [[n]]\n new_perm = []\n perms = self.permuteUnique(nums[1:])\n for perm in perms:\n for i in range(m):\n l = perm[:i] + [n] + perm[i:]\n new_perm.append(l)\n if i < m - 1 and perm[i] == n:\n break\n return new_perm\n<|end_body_1|>\n\n<|body_start_2|>\n m = len(nums)\n j = m - 1\n while j > 0 and nums[j] <= nums[j - 1]:\n j = j - 1\n if j == 0:\n return nums[::-1]\n n = nums[j - 1]\n left, right = (j, m - 1)\n while left < right:\n middle = (left + right + 1) // 2\n if nums[middle] > n:\n left = middle\n else:\n right = middle - 1\n nums[j - 1] = nums[left]\n nums[left] = n\n return nums[:j] + nums[m - 1:j - 1:-1]\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def permute(self, nums):\n \"\"\"distinct numbers\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums):\n \"\"\"might contain duplicates\"\"\"\n <|body_1|>\n\n def nextPermutation(self, nums):\n \"\"\"next dict permutation\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = nums[0]\n m = len(nums)\n if m == 1:\n return [[n]]\n new_perm = []\n perms = self.permute(nums[1:])\n for perm in perms:\n for i in range(m):\n new_perm.append(perm[:i] + [n] + perm[i:])\n return new_perm\n<|end_body_0|>\n\n<|body_start_1|>\n n = nums[0]\n m = len(nums)\n if m == 1:\n return [[n]]\n new_perm = []\n perms = self.permuteUnique(nums[1:])\n for perm in perms:\n for i in range(m):\n l = perm[:i] + [n] + perm[i:]\n new_perm.append(l)\n if i < m - 1 and perm[i] == n:\n break\n return new_perm\n<|end_body_1|>\n\n<|body_start_2|>\n m = len(nums)\n j = m - 1\n while j > 0 and nums[j] <= nums[j - 1]:\n j = j - 1\n if j == 0:\n return nums[::-1]\n n = nums[j - 1]\n left, right = (j, m - 1)\n while left < right:\n middle = (left + right + 1) // 2\n if nums[middle] > n:\n left = middle\n else:\n right = middle - 1\n nums[j - 1] = nums[left]\n nums[left] = n\n return nums[:j] + nums[m - 1:j - 1:-1]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000310", "length_bytes": 1609, "license_type": "no_license", "methods": [{"docstring": "distinct numbers", "name": "permute", "signature": "def permute(self, nums)"}, {"docstring": "might contain duplicates", "name": "permuteUnique", "signature": "def permuteUnique(self, nums)"}, {"docstring": "next dict permutation", "name": "nextPermutation", "signature": "def nextPermutation(self, nums)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000937", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def permute(self, nums): distinct numbers\n- def permuteUnique(self, nums): might contain duplicates\n- def nextPermutation(self, nums): next dict permutation", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def permute(self, nums): distinct numbers\n- def permuteUnique(self, nums): might contain duplicates\n- def nextPermutation(self, nums): next dict permutation\n\n<|skeleton|>\nclass Solution:\n\n def permute(self, nums):\n \"\"\"distinct numbers\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums):\n \"\"\"might contain duplicates\"\"\"\n <|body_1|>\n\n def nextPermutation(self, nums):\n \"\"\"next dict permutation\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = nums[0]\n m = len(nums)\n if m == 1:\n return [[n]]\n new_perm = []\n perms = self.permute(nums[1:])\n for perm in perms:\n for i in range(m):\n new_perm.append(perm[:i] + [n] + perm[i:])\n return new_perm\n<|end_body_0|>\n\n<|body_start_1|>\n n = nums[0]\n m = len(nums)\n if m == 1:\n return [[n]]\n new_perm = []\n perms = self.permuteUnique(nums[1:])\n for perm in perms:\n for i in range(m):\n l = perm[:i] + [n] + perm[i:]\n new_perm.append(l)\n if i < m - 1 and perm[i] == n:\n break\n return new_perm\n<|end_body_1|>\n\n<|body_start_2|>\n m = len(nums)\n j = m - 1\n while j > 0 and nums[j] <= nums[j - 1]:\n j = j - 1\n if j == 0:\n return nums[::-1]\n n = nums[j - 1]\n left, right = (j, m - 1)\n while left < right:\n middle = (left + right + 1) // 2\n if nums[middle] > n:\n left = middle\n else:\n right = middle - 1\n nums[j - 1] = nums[left]\n nums[left] = n\n return nums[:j] + nums[m - 1:j - 1:-1]\n<|end_body_2|>\n", "revision_id": "c9fb0b623501b3746444b05da55405e3a6c42bbf", "skeleton": "<|skeleton|>\nclass Solution:\n\n def permute(self, nums):\n \"\"\"distinct numbers\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums):\n \"\"\"might contain duplicates\"\"\"\n <|body_1|>\n\n def nextPermutation(self, nums):\n \"\"\"next dict permutation\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def permute(self, nums):\n \"\"\"distinct numbers\"\"\"\n n = nums[0]\n m = len(nums)\n if m == 1:\n return [[n]]\n new_perm = []\n perms = self.permute(nums[1:])\n for perm in perms:\n for i in range(m):\n new_perm.append(perm[:i] + [n] + perm[i:])\n return new_perm\n\n def permuteUnique(self, nums):\n \"\"\"might contain duplicates\"\"\"\n n = nums[0]\n m = len(nums)\n if m == 1:\n return [[n]]\n new_perm = []\n perms = self.permuteUnique(nums[1:])\n for perm in perms:\n for i in range(m):\n l = perm[:i] + [n] + perm[i:]\n new_perm.append(l)\n if i < m - 1 and perm[i] == n:\n break\n return new_perm\n\n def nextPermutation(self, nums):\n \"\"\"next dict permutation\"\"\"\n m = len(nums)\n j = m - 1\n while j > 0 and nums[j] <= nums[j - 1]:\n j = j - 1\n if j == 0:\n return nums[::-1]\n n = nums[j - 1]\n left, right = (j, m - 1)\n while left < right:\n middle = (left + right + 1) // 2\n if nums[middle] > n:\n left = middle\n else:\n right = middle - 1\n nums[j - 1] = nums[left]\n nums[left] = n\n return nums[:j] + nums[m - 1:j - 1:-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "Archive-1/Permutations.py", "source_repo": "smsxgz/my-leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "5696f37f25d8be00fb98a60000a38f24bbbf14ab", "bodies": ["es_client = new_es_client()\npub = BaseESPublication(project_id=project_id, revision=revision, using=es_client)\nlatest_revision = IndexedPublication.max_revision(project_id=project_id, using=es_client)\nlatest_pub_dict = None\nif latest_revision > 0 and latest_revision != revision:\n latest_pub = BaseESPublication(project_id=project_id, revision=latest_revision, using=es_client)\n if latest_pub is not None and hasattr(latest_pub, 'project'):\n latest_pub_dict = latest_pub.to_dict()\nif pub is not None and hasattr(pub, 'project'):\n pub_dict = pub.to_dict()\n if pub_dict['project']['value']['projectType'] != 'other':\n metrics.info('Data Depot', extra={'user': request.user.username, 'sessionId': getattr(request.session, 'session_key', ''), 'operation': 'listing', 'agent': request.META.get('HTTP_USER_AGENT'), 'ip': get_client_ip(request), 'info': {'api': 'agave', 'systemId': 'designsafe.storage.published', 'filePath': project_id, 'query': {}}})\n if latest_pub_dict:\n pub_dict['latestRevision'] = latest_pub_dict\n return JsonResponse(pub_dict)\nelse:\n return JsonResponse({'status': 404, 'message': 'Not found'}, status=404)", "if request.is_ajax():\n data = json.loads(request.body)\nelse:\n data = request.POST\nstatus = data.get('status', 'saved')\nrevision = data.get('revision', None)\nrevision_text = data.get('revisionText', None)\nrevision_titles = data.get('revisionTitles', None)\nrevised_authors = data.get('revisionAuthors', None)\nselected_files = data.get('selectedFiles', None)\nproject_id = data['publication']['project']['value']['projectId']\ncurrent_revision = None\nif revision:\n latest_revision = IndexedPublication.max_revision(project_id=project_id)\n current_revision = latest_revision + 1 if latest_revision >= 2 else 2\npub = initilize_publication(data['publication'], status, revision=current_revision, revision_text=revision_text, revision_titles=revision_titles)\nif data.get('status', 'save').startswith('publish'):\n (tasks.freeze_publication_meta.s(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='api') | group(tasks.save_publication.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='files', countdown=60), tasks.copy_publication_files_to_corral.si(project_id=pub.projectId, revision=current_revision, selected_files=selected_files).set(queue='files', countdown=60)) | tasks.swap_file_tag_uuids.si(pub.projectId, revision=current_revision) | tasks.set_publish_status.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision) | tasks.zip_publication_files.si(pub.projectId, revision=current_revision) | tasks.email_user_publication_request_confirmation.si(request.user.username) | tasks.check_published_files.si(pub.projectId, revision=current_revision, selected_files=selected_files)).apply_async()\nreturn JsonResponse({'success': 'Project is publishing.'}, status=200)"], "bodies_text": "<|body_start_0|>\n es_client = new_es_client()\n pub = BaseESPublication(project_id=project_id, revision=revision, using=es_client)\n latest_revision = IndexedPublication.max_revision(project_id=project_id, using=es_client)\n latest_pub_dict = None\n if latest_revision > 0 and latest_revision != revision:\n latest_pub = BaseESPublication(project_id=project_id, revision=latest_revision, using=es_client)\n if latest_pub is not None and hasattr(latest_pub, 'project'):\n latest_pub_dict = latest_pub.to_dict()\n if pub is not None and hasattr(pub, 'project'):\n pub_dict = pub.to_dict()\n if pub_dict['project']['value']['projectType'] != 'other':\n metrics.info('Data Depot', extra={'user': request.user.username, 'sessionId': getattr(request.session, 'session_key', ''), 'operation': 'listing', 'agent': request.META.get('HTTP_USER_AGENT'), 'ip': get_client_ip(request), 'info': {'api': 'agave', 'systemId': 'designsafe.storage.published', 'filePath': project_id, 'query': {}}})\n if latest_pub_dict:\n pub_dict['latestRevision'] = latest_pub_dict\n return JsonResponse(pub_dict)\n else:\n return JsonResponse({'status': 404, 'message': 'Not found'}, status=404)\n<|end_body_0|>\n\n<|body_start_1|>\n if request.is_ajax():\n data = json.loads(request.body)\n else:\n data = request.POST\n status = data.get('status', 'saved')\n revision = data.get('revision', None)\n revision_text = data.get('revisionText', None)\n revision_titles = data.get('revisionTitles', None)\n revised_authors = data.get('revisionAuthors', None)\n selected_files = data.get('selectedFiles', None)\n project_id = data['publication']['project']['value']['projectId']\n current_revision = None\n if revision:\n latest_revision = IndexedPublication.max_revision(project_id=project_id)\n current_revision = latest_revision + 1 if latest_revision >= 2 else 2\n pub = initilize_publication(data['publication'], status, revision=current_revision, revision_text=revision_text, revision_titles=revision_titles)\n if data.get('status', 'save').startswith('publish'):\n (tasks.freeze_publication_meta.s(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='api') | group(tasks.save_publication.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='files', countdown=60), tasks.copy_publication_files_to_corral.si(project_id=pub.projectId, revision=current_revision, selected_files=selected_files).set(queue='files', countdown=60)) | tasks.swap_file_tag_uuids.si(pub.projectId, revision=current_revision) | tasks.set_publish_status.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision) | tasks.zip_publication_files.si(pub.projectId, revision=current_revision) | tasks.email_user_publication_request_confirmation.si(request.user.username) | tasks.check_published_files.si(pub.projectId, revision=current_revision, selected_files=selected_files)).apply_async()\n return JsonResponse({'success': 'Project is publishing.'}, status=200)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "PublicationView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PublicationView:\n\n def get(self, request, project_id, revision=None):\n \"\"\"Get a publication. If a revision is not supplied, return the \"Original\" publication. Include the latest version if it is not being queried.\"\"\"\n <|body_0|>\n\n def post(self, request, **kwargs):\n \"\"\"Publish a project or version a publication\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n es_client = new_es_client()\n pub = BaseESPublication(project_id=project_id, revision=revision, using=es_client)\n latest_revision = IndexedPublication.max_revision(project_id=project_id, using=es_client)\n latest_pub_dict = None\n if latest_revision > 0 and latest_revision != revision:\n latest_pub = BaseESPublication(project_id=project_id, revision=latest_revision, using=es_client)\n if latest_pub is not None and hasattr(latest_pub, 'project'):\n latest_pub_dict = latest_pub.to_dict()\n if pub is not None and hasattr(pub, 'project'):\n pub_dict = pub.to_dict()\n if pub_dict['project']['value']['projectType'] != 'other':\n metrics.info('Data Depot', extra={'user': request.user.username, 'sessionId': getattr(request.session, 'session_key', ''), 'operation': 'listing', 'agent': request.META.get('HTTP_USER_AGENT'), 'ip': get_client_ip(request), 'info': {'api': 'agave', 'systemId': 'designsafe.storage.published', 'filePath': project_id, 'query': {}}})\n if latest_pub_dict:\n pub_dict['latestRevision'] = latest_pub_dict\n return JsonResponse(pub_dict)\n else:\n return JsonResponse({'status': 404, 'message': 'Not found'}, status=404)\n<|end_body_0|>\n\n<|body_start_1|>\n if request.is_ajax():\n data = json.loads(request.body)\n else:\n data = request.POST\n status = data.get('status', 'saved')\n revision = data.get('revision', None)\n revision_text = data.get('revisionText', None)\n revision_titles = data.get('revisionTitles', None)\n revised_authors = data.get('revisionAuthors', None)\n selected_files = data.get('selectedFiles', None)\n project_id = data['publication']['project']['value']['projectId']\n current_revision = None\n if revision:\n latest_revision = IndexedPublication.max_revision(project_id=project_id)\n current_revision = latest_revision + 1 if latest_revision >= 2 else 2\n pub = initilize_publication(data['publication'], status, revision=current_revision, revision_text=revision_text, revision_titles=revision_titles)\n if data.get('status', 'save').startswith('publish'):\n (tasks.freeze_publication_meta.s(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='api') | group(tasks.save_publication.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='files', countdown=60), tasks.copy_publication_files_to_corral.si(project_id=pub.projectId, revision=current_revision, selected_files=selected_files).set(queue='files', countdown=60)) | tasks.swap_file_tag_uuids.si(pub.projectId, revision=current_revision) | tasks.set_publish_status.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision) | tasks.zip_publication_files.si(pub.projectId, revision=current_revision) | tasks.email_user_publication_request_confirmation.si(request.user.username) | tasks.check_published_files.si(pub.projectId, revision=current_revision, selected_files=selected_files)).apply_async()\n return JsonResponse({'success': 'Project is publishing.'}, status=200)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000311", "length_bytes": 28639, "license_type": "no_license", "methods": [{"docstring": "Get a publication. If a revision is not supplied, return the \"Original\" publication. Include the latest version if it is not being queried.", "name": "get", "signature": "def get(self, request, project_id, revision=None)"}, {"docstring": "Publish a project or version a publication", "name": "post", "signature": "def post(self, request, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `PublicationView` described below.\n\nClass description:\nImplement the PublicationView class.\n\nMethod signatures and docstrings:\n- def get(self, request, project_id, revision=None): Get a publication. If a revision is not supplied, return the \"Original\" publication. Include the latest version if it is not being queried.\n- def post(self, request, **kwargs): Publish a project or version a publication", "prompted_full_text": "Implement the Python class `PublicationView` described below.\n\nClass description:\nImplement the PublicationView class.\n\nMethod signatures and docstrings:\n- def get(self, request, project_id, revision=None): Get a publication. If a revision is not supplied, return the \"Original\" publication. Include the latest version if it is not being queried.\n- def post(self, request, **kwargs): Publish a project or version a publication\n\n<|skeleton|>\nclass PublicationView:\n\n def get(self, request, project_id, revision=None):\n \"\"\"Get a publication. If a revision is not supplied, return the \"Original\" publication. Include the latest version if it is not being queried.\"\"\"\n <|body_0|>\n\n def post(self, request, **kwargs):\n \"\"\"Publish a project or version a publication\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n es_client = new_es_client()\n pub = BaseESPublication(project_id=project_id, revision=revision, using=es_client)\n latest_revision = IndexedPublication.max_revision(project_id=project_id, using=es_client)\n latest_pub_dict = None\n if latest_revision > 0 and latest_revision != revision:\n latest_pub = BaseESPublication(project_id=project_id, revision=latest_revision, using=es_client)\n if latest_pub is not None and hasattr(latest_pub, 'project'):\n latest_pub_dict = latest_pub.to_dict()\n if pub is not None and hasattr(pub, 'project'):\n pub_dict = pub.to_dict()\n if pub_dict['project']['value']['projectType'] != 'other':\n metrics.info('Data Depot', extra={'user': request.user.username, 'sessionId': getattr(request.session, 'session_key', ''), 'operation': 'listing', 'agent': request.META.get('HTTP_USER_AGENT'), 'ip': get_client_ip(request), 'info': {'api': 'agave', 'systemId': 'designsafe.storage.published', 'filePath': project_id, 'query': {}}})\n if latest_pub_dict:\n pub_dict['latestRevision'] = latest_pub_dict\n return JsonResponse(pub_dict)\n else:\n return JsonResponse({'status': 404, 'message': 'Not found'}, status=404)\n<|end_body_0|>\n\n<|body_start_1|>\n if request.is_ajax():\n data = json.loads(request.body)\n else:\n data = request.POST\n status = data.get('status', 'saved')\n revision = data.get('revision', None)\n revision_text = data.get('revisionText', None)\n revision_titles = data.get('revisionTitles', None)\n revised_authors = data.get('revisionAuthors', None)\n selected_files = data.get('selectedFiles', None)\n project_id = data['publication']['project']['value']['projectId']\n current_revision = None\n if revision:\n latest_revision = IndexedPublication.max_revision(project_id=project_id)\n current_revision = latest_revision + 1 if latest_revision >= 2 else 2\n pub = initilize_publication(data['publication'], status, revision=current_revision, revision_text=revision_text, revision_titles=revision_titles)\n if data.get('status', 'save').startswith('publish'):\n (tasks.freeze_publication_meta.s(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='api') | group(tasks.save_publication.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='files', countdown=60), tasks.copy_publication_files_to_corral.si(project_id=pub.projectId, revision=current_revision, selected_files=selected_files).set(queue='files', countdown=60)) | tasks.swap_file_tag_uuids.si(pub.projectId, revision=current_revision) | tasks.set_publish_status.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision) | tasks.zip_publication_files.si(pub.projectId, revision=current_revision) | tasks.email_user_publication_request_confirmation.si(request.user.username) | tasks.check_published_files.si(pub.projectId, revision=current_revision, selected_files=selected_files)).apply_async()\n return JsonResponse({'success': 'Project is publishing.'}, status=200)\n<|end_body_1|>\n", "revision_id": "040e0d88eac6037703a6128d6f4644b5a99ea11b", "skeleton": "<|skeleton|>\nclass PublicationView:\n\n def get(self, request, project_id, revision=None):\n \"\"\"Get a publication. If a revision is not supplied, return the \"Original\" publication. Include the latest version if it is not being queried.\"\"\"\n <|body_0|>\n\n def post(self, request, **kwargs):\n \"\"\"Publish a project or version a publication\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PublicationView:\n def get(self, request, project_id, revision=None):\n \"\"\"Get a publication. If a revision is not supplied, return the \"Original\" publication. Include the latest version if it is not being queried.\"\"\"\n es_client = new_es_client()\n pub = BaseESPublication(project_id=project_id, revision=revision, using=es_client)\n latest_revision = IndexedPublication.max_revision(project_id=project_id, using=es_client)\n latest_pub_dict = None\n if latest_revision > 0 and latest_revision != revision:\n latest_pub = BaseESPublication(project_id=project_id, revision=latest_revision, using=es_client)\n if latest_pub is not None and hasattr(latest_pub, 'project'):\n latest_pub_dict = latest_pub.to_dict()\n if pub is not None and hasattr(pub, 'project'):\n pub_dict = pub.to_dict()\n if pub_dict['project']['value']['projectType'] != 'other':\n metrics.info('Data Depot', extra={'user': request.user.username, 'sessionId': getattr(request.session, 'session_key', ''), 'operation': 'listing', 'agent': request.META.get('HTTP_USER_AGENT'), 'ip': get_client_ip(request), 'info': {'api': 'agave', 'systemId': 'designsafe.storage.published', 'filePath': project_id, 'query': {}}})\n if latest_pub_dict:\n pub_dict['latestRevision'] = latest_pub_dict\n return JsonResponse(pub_dict)\n else:\n return JsonResponse({'status': 404, 'message': 'Not found'}, status=404)\n\n def post(self, request, **kwargs):\n \"\"\"Publish a project or version a publication\"\"\"\n if request.is_ajax():\n data = json.loads(request.body)\n else:\n data = request.POST\n status = data.get('status', 'saved')\n revision = data.get('revision', None)\n revision_text = data.get('revisionText', None)\n revision_titles = data.get('revisionTitles', None)\n revised_authors = data.get('revisionAuthors', None)\n selected_files = data.get('selectedFiles', None)\n project_id = data['publication']['project']['value']['projectId']\n current_revision = None\n if revision:\n latest_revision = IndexedPublication.max_revision(project_id=project_id)\n current_revision = latest_revision + 1 if latest_revision >= 2 else 2\n pub = initilize_publication(data['publication'], status, revision=current_revision, revision_text=revision_text, revision_titles=revision_titles)\n if data.get('status', 'save').startswith('publish'):\n (tasks.freeze_publication_meta.s(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='api') | group(tasks.save_publication.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision, revised_authors=revised_authors).set(queue='files', countdown=60), tasks.copy_publication_files_to_corral.si(project_id=pub.projectId, revision=current_revision, selected_files=selected_files).set(queue='files', countdown=60)) | tasks.swap_file_tag_uuids.si(pub.projectId, revision=current_revision) | tasks.set_publish_status.si(project_id=pub.projectId, entity_uuids=data.get('mainEntityUuids'), revision=current_revision) | tasks.zip_publication_files.si(pub.projectId, revision=current_revision) | tasks.email_user_publication_request_confirmation.si(request.user.username) | tasks.check_published_files.si(pub.projectId, revision=current_revision, selected_files=selected_files)).apply_async()\n return JsonResponse({'success': 'Project is publishing.'}, status=200)\n", "source": "the_stack_v2_python_sparse", "source_path": "designsafe/apps/api/projects/views.py", "source_repo": "DesignSafe-CI/portal", "split": "val", "star_events_count": 12}
{"blob_id": "48485edcfe31c6acf0c9ee5c2ca8a66b185fad98", "bodies": ["if not kwargs.get('obj_ids'):\n obj_model = facade.get_peer_group_by_search(self.search)\n objects = obj_model['query_set']\n only_main_property = False\nelse:\n ids = kwargs.get('obj_ids').split(';')\n objects = facade.get_peer_group_by_ids(ids)\n only_main_property = True\n obj_model = None\nserializer = serializers.PeerGroupV4Serializer(objects, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\ndata = render_to_json(serializer, main_property='peer_groups', obj_model=obj_model, request=request, only_main_property=only_main_property)\nreturn Response(data, status=status.HTTP_200_OK)", "objects = request.DATA\njson_validate(SPECS.get('peer_group_post_v4')).validate(objects)\nresponse = list()\nfor obj in objects['peer_groups']:\n created_obj = facade.create_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\nreturn Response(response, status=status.HTTP_201_CREATED)", "objects = request.DATA\njson_validate(SPECS.get('peer_group_put_v4')).validate(objects)\nresponse = list()\nfor obj in objects['peer_groups']:\n created_obj = facade.update_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\nreturn Response(response, status=status.HTTP_200_OK)", "obj_ids = kwargs['obj_ids'].split(';')\nfacade.delete_peer_group(obj_ids)\nreturn Response({}, status=status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n if not kwargs.get('obj_ids'):\n obj_model = facade.get_peer_group_by_search(self.search)\n objects = obj_model['query_set']\n only_main_property = False\n else:\n ids = kwargs.get('obj_ids').split(';')\n objects = facade.get_peer_group_by_ids(ids)\n only_main_property = True\n obj_model = None\n serializer = serializers.PeerGroupV4Serializer(objects, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\n data = render_to_json(serializer, main_property='peer_groups', obj_model=obj_model, request=request, only_main_property=only_main_property)\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n objects = request.DATA\n json_validate(SPECS.get('peer_group_post_v4')).validate(objects)\n response = list()\n for obj in objects['peer_groups']:\n created_obj = facade.create_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\n return Response(response, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n objects = request.DATA\n json_validate(SPECS.get('peer_group_put_v4')).validate(objects)\n response = list()\n for obj in objects['peer_groups']:\n created_obj = facade.update_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_ids = kwargs['obj_ids'].split(';')\n facade.delete_peer_group(obj_ids)\n return Response({}, status=status.HTTP_200_OK)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "PeerGroupDBView", "detected_licenses": ["Apache-2.0", "BSD-3-Clause", "MIT", "LicenseRef-scancode-public-domain", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PeerGroupDBView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Returns a list of PeerGroups by ids ou dict.\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Create new PeerGroup.\"\"\"\n <|body_1|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Update PeerGroup.\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Delete PeerGroup.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not kwargs.get('obj_ids'):\n obj_model = facade.get_peer_group_by_search(self.search)\n objects = obj_model['query_set']\n only_main_property = False\n else:\n ids = kwargs.get('obj_ids').split(';')\n objects = facade.get_peer_group_by_ids(ids)\n only_main_property = True\n obj_model = None\n serializer = serializers.PeerGroupV4Serializer(objects, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\n data = render_to_json(serializer, main_property='peer_groups', obj_model=obj_model, request=request, only_main_property=only_main_property)\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n objects = request.DATA\n json_validate(SPECS.get('peer_group_post_v4')).validate(objects)\n response = list()\n for obj in objects['peer_groups']:\n created_obj = facade.create_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\n return Response(response, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n objects = request.DATA\n json_validate(SPECS.get('peer_group_put_v4')).validate(objects)\n response = list()\n for obj in objects['peer_groups']:\n created_obj = facade.update_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_ids = kwargs['obj_ids'].split(';')\n facade.delete_peer_group(obj_ids)\n return Response({}, status=status.HTTP_200_OK)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000312", "length_bytes": 4049, "license_type": "permissive", "methods": [{"docstring": "Returns a list of PeerGroups by ids ou dict.", "name": "get", "signature": "def get(self, request, *args, **kwargs)"}, {"docstring": "Create new PeerGroup.", "name": "post", "signature": "def post(self, request, *args, **kwargs)"}, {"docstring": "Update PeerGroup.", "name": "put", "signature": "def put(self, request, *args, **kwargs)"}, {"docstring": "Delete PeerGroup.", "name": "delete", "signature": "def delete(self, request, *args, **kwargs)"}], "n_methods": 4, "prompt": "Implement the Python class `PeerGroupDBView` described below.\n\nClass description:\nImplement the PeerGroupDBView class.\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): Returns a list of PeerGroups by ids ou dict.\n- def post(self, request, *args, **kwargs): Create new PeerGroup.\n- def put(self, request, *args, **kwargs): Update PeerGroup.\n- def delete(self, request, *args, **kwargs): Delete PeerGroup.", "prompted_full_text": "Implement the Python class `PeerGroupDBView` described below.\n\nClass description:\nImplement the PeerGroupDBView class.\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): Returns a list of PeerGroups by ids ou dict.\n- def post(self, request, *args, **kwargs): Create new PeerGroup.\n- def put(self, request, *args, **kwargs): Update PeerGroup.\n- def delete(self, request, *args, **kwargs): Delete PeerGroup.\n\n<|skeleton|>\nclass PeerGroupDBView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Returns a list of PeerGroups by ids ou dict.\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Create new PeerGroup.\"\"\"\n <|body_1|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Update PeerGroup.\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Delete PeerGroup.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not kwargs.get('obj_ids'):\n obj_model = facade.get_peer_group_by_search(self.search)\n objects = obj_model['query_set']\n only_main_property = False\n else:\n ids = kwargs.get('obj_ids').split(';')\n objects = facade.get_peer_group_by_ids(ids)\n only_main_property = True\n obj_model = None\n serializer = serializers.PeerGroupV4Serializer(objects, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\n data = render_to_json(serializer, main_property='peer_groups', obj_model=obj_model, request=request, only_main_property=only_main_property)\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n objects = request.DATA\n json_validate(SPECS.get('peer_group_post_v4')).validate(objects)\n response = list()\n for obj in objects['peer_groups']:\n created_obj = facade.create_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\n return Response(response, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n objects = request.DATA\n json_validate(SPECS.get('peer_group_put_v4')).validate(objects)\n response = list()\n for obj in objects['peer_groups']:\n created_obj = facade.update_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_ids = kwargs['obj_ids'].split(';')\n facade.delete_peer_group(obj_ids)\n return Response({}, status=status.HTTP_200_OK)\n<|end_body_3|>\n", "revision_id": "eb27e1d977a1c4bb1fee8fb51b8d8050c64696d9", "skeleton": "<|skeleton|>\nclass PeerGroupDBView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Returns a list of PeerGroups by ids ou dict.\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Create new PeerGroup.\"\"\"\n <|body_1|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Update PeerGroup.\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Delete PeerGroup.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PeerGroupDBView:\n def get(self, request, *args, **kwargs):\n \"\"\"Returns a list of PeerGroups by ids ou dict.\"\"\"\n if not kwargs.get('obj_ids'):\n obj_model = facade.get_peer_group_by_search(self.search)\n objects = obj_model['query_set']\n only_main_property = False\n else:\n ids = kwargs.get('obj_ids').split(';')\n objects = facade.get_peer_group_by_ids(ids)\n only_main_property = True\n obj_model = None\n serializer = serializers.PeerGroupV4Serializer(objects, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\n data = render_to_json(serializer, main_property='peer_groups', obj_model=obj_model, request=request, only_main_property=only_main_property)\n return Response(data, status=status.HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n \"\"\"Create new PeerGroup.\"\"\"\n objects = request.DATA\n json_validate(SPECS.get('peer_group_post_v4')).validate(objects)\n response = list()\n for obj in objects['peer_groups']:\n created_obj = facade.create_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\n return Response(response, status=status.HTTP_201_CREATED)\n\n def put(self, request, *args, **kwargs):\n \"\"\"Update PeerGroup.\"\"\"\n objects = request.DATA\n json_validate(SPECS.get('peer_group_put_v4')).validate(objects)\n response = list()\n for obj in objects['peer_groups']:\n created_obj = facade.update_peer_group(obj, request.user)\n response.append({'id': created_obj.id})\n return Response(response, status=status.HTTP_200_OK)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Delete PeerGroup.\"\"\"\n obj_ids = kwargs['obj_ids'].split(';')\n facade.delete_peer_group(obj_ids)\n return Response({}, status=status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "networkapi/api_peer_group/v4/views.py", "source_repo": "globocom/GloboNetworkAPI", "split": "val", "star_events_count": 86}
{"blob_id": "ce74b113a86905b244a95cc0841316fd2ad1264a", "bodies": ["def decorated_view(request: HttpRequest, course_id: int, *args, **kwargs):\n course = Course.objects.get(id=course_id)\n user = request.user\n is_instructor = user in course.instructors.all()\n is_student = user in course.students.all()\n if is_instructor or is_student:\n return view(request, course_id, *args, **kwargs)\n return HttpResponse('Unauthorized', status=401)\nreturn decorated_view", "payload = loads(request.body)\nresult = []\nif payload.get('query', False):\n course = Course.objects.get(id=course_id)\n query_obj = Q(title__contains=payload['query']) | Q(content=payload['query'])\n search_results = course.posts.filter(query_obj).all()\n is_instructor = request.user in course.instructors.all()\n for post in search_results:\n if post.visibility == Post.Visibility.PUBLIC or post.author == request.user or request.user in post.course.instructors.all():\n result.append(PostService.post_to_dict(post, is_instructor=is_instructor))\nreturn HttpResponse(dumps(result))"], "bodies_text": "<|body_start_0|>\n def decorated_view(request: HttpRequest, course_id: int, *args, **kwargs):\n course = Course.objects.get(id=course_id)\n user = request.user\n is_instructor = user in course.instructors.all()\n is_student = user in course.students.all()\n if is_instructor or is_student:\n return view(request, course_id, *args, **kwargs)\n return HttpResponse('Unauthorized', status=401)\n return decorated_view\n<|end_body_0|>\n\n<|body_start_1|>\n payload = loads(request.body)\n result = []\n if payload.get('query', False):\n course = Course.objects.get(id=course_id)\n query_obj = Q(title__contains=payload['query']) | Q(content=payload['query'])\n search_results = course.posts.filter(query_obj).all()\n is_instructor = request.user in course.instructors.all()\n for post in search_results:\n if post.visibility == Post.Visibility.PUBLIC or post.author == request.user or request.user in post.course.instructors.all():\n result.append(PostService.post_to_dict(post, is_instructor=is_instructor))\n return HttpResponse(dumps(result))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SearchView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SearchView:\n\n def permissioned(view):\n \"\"\"View decorator that checks if a user has permission to access/edit the post specified in the url.\"\"\"\n <|body_0|>\n\n def post(self, request: HttpRequest, course_id: int) -> HttpResponse:\n \"\"\"Post function for searching in courses\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def decorated_view(request: HttpRequest, course_id: int, *args, **kwargs):\n course = Course.objects.get(id=course_id)\n user = request.user\n is_instructor = user in course.instructors.all()\n is_student = user in course.students.all()\n if is_instructor or is_student:\n return view(request, course_id, *args, **kwargs)\n return HttpResponse('Unauthorized', status=401)\n return decorated_view\n<|end_body_0|>\n\n<|body_start_1|>\n payload = loads(request.body)\n result = []\n if payload.get('query', False):\n course = Course.objects.get(id=course_id)\n query_obj = Q(title__contains=payload['query']) | Q(content=payload['query'])\n search_results = course.posts.filter(query_obj).all()\n is_instructor = request.user in course.instructors.all()\n for post in search_results:\n if post.visibility == Post.Visibility.PUBLIC or post.author == request.user or request.user in post.course.instructors.all():\n result.append(PostService.post_to_dict(post, is_instructor=is_instructor))\n return HttpResponse(dumps(result))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000313", "length_bytes": 2222, "license_type": "permissive", "methods": [{"docstring": "View decorator that checks if a user has permission to access/edit the post specified in the url.", "name": "permissioned", "signature": "def permissioned(view)"}, {"docstring": "Post function for searching in courses", "name": "post", "signature": "def post(self, request: HttpRequest, course_id: int) -> HttpResponse"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031499", "prompt": "Implement the Python class `SearchView` described below.\n\nClass description:\nImplement the SearchView class.\n\nMethod signatures and docstrings:\n- def permissioned(view): View decorator that checks if a user has permission to access/edit the post specified in the url.\n- def post(self, request: HttpRequest, course_id: int) -> HttpResponse: Post function for searching in courses", "prompted_full_text": "Implement the Python class `SearchView` described below.\n\nClass description:\nImplement the SearchView class.\n\nMethod signatures and docstrings:\n- def permissioned(view): View decorator that checks if a user has permission to access/edit the post specified in the url.\n- def post(self, request: HttpRequest, course_id: int) -> HttpResponse: Post function for searching in courses\n\n<|skeleton|>\nclass SearchView:\n\n def permissioned(view):\n \"\"\"View decorator that checks if a user has permission to access/edit the post specified in the url.\"\"\"\n <|body_0|>\n\n def post(self, request: HttpRequest, course_id: int) -> HttpResponse:\n \"\"\"Post function for searching in courses\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def decorated_view(request: HttpRequest, course_id: int, *args, **kwargs):\n course = Course.objects.get(id=course_id)\n user = request.user\n is_instructor = user in course.instructors.all()\n is_student = user in course.students.all()\n if is_instructor or is_student:\n return view(request, course_id, *args, **kwargs)\n return HttpResponse('Unauthorized', status=401)\n return decorated_view\n<|end_body_0|>\n\n<|body_start_1|>\n payload = loads(request.body)\n result = []\n if payload.get('query', False):\n course = Course.objects.get(id=course_id)\n query_obj = Q(title__contains=payload['query']) | Q(content=payload['query'])\n search_results = course.posts.filter(query_obj).all()\n is_instructor = request.user in course.instructors.all()\n for post in search_results:\n if post.visibility == Post.Visibility.PUBLIC or post.author == request.user or request.user in post.course.instructors.all():\n result.append(PostService.post_to_dict(post, is_instructor=is_instructor))\n return HttpResponse(dumps(result))\n<|end_body_1|>\n", "revision_id": "6b688c28c79e56df5cc667d704db72ba30141f7a", "skeleton": "<|skeleton|>\nclass SearchView:\n\n def permissioned(view):\n \"\"\"View decorator that checks if a user has permission to access/edit the post specified in the url.\"\"\"\n <|body_0|>\n\n def post(self, request: HttpRequest, course_id: int) -> HttpResponse:\n \"\"\"Post function for searching in courses\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SearchView:\n def permissioned(view):\n \"\"\"View decorator that checks if a user has permission to access/edit the post specified in the url.\"\"\"\n def decorated_view(request: HttpRequest, course_id: int, *args, **kwargs):\n course = Course.objects.get(id=course_id)\n user = request.user\n is_instructor = user in course.instructors.all()\n is_student = user in course.students.all()\n if is_instructor or is_student:\n return view(request, course_id, *args, **kwargs)\n return HttpResponse('Unauthorized', status=401)\n return decorated_view\n\n def post(self, request: HttpRequest, course_id: int) -> HttpResponse:\n \"\"\"Post function for searching in courses\"\"\"\n payload = loads(request.body)\n result = []\n if payload.get('query', False):\n course = Course.objects.get(id=course_id)\n query_obj = Q(title__contains=payload['query']) | Q(content=payload['query'])\n search_results = course.posts.filter(query_obj).all()\n is_instructor = request.user in course.instructors.all()\n for post in search_results:\n if post.visibility == Post.Visibility.PUBLIC or post.author == request.user or request.user in post.course.instructors.all():\n result.append(PostService.post_to_dict(post, is_instructor=is_instructor))\n return HttpResponse(dumps(result))\n", "source": "the_stack_v2_python_sparse", "source_path": "backend/api/views/SearchView.py", "source_repo": "CaoRuiming/CS1320-Final-Project", "split": "val", "star_events_count": 0}
{"blob_id": "0e4356a4b08ef76bd377d80ff3124a9e5609dd51", "bodies": ["self.f = pyedflib.EdfReader(file_name)\nself.k = self.f.signals_in_file\nself.N = self.f.getNSamples()[0]\nself.fs = self.f.getSampleFrequency(0)", "data = np.zeros((self.k, self.N))\nfor i in np.arange(self.k):\n data[i, :] = self.f.readSignal(i)\nreturn data", "data = self.build_data()\nA_matrix = cp.mvarmodel.Mvar.fit(data)[0]\nV_matrix = cp.mvarmodel.Mvar.fit(data)[1]\ndtf = cp.conn.dtf_fun(A_matrix, V_matrix, self.fs, self.fs // 2)\nfreq_selection = dtf[7:13]\nreturn freq_selection", "freq_selection = self.MVar_DTF()\nmean_matrix = np.mean(freq_selection, 0)\nmatrix_no_diagonal = mean_matrix - np.triu(np.tril(mean_matrix))\nreturn matrix_no_diagonal", "matrix_no_diagonal = self.final_dtf_matrix()\nthresholds = np.linspace(0.0, 1.0, 10000)\ndensities = []\nfor t in thresholds:\n adj_mat = np.zeros((self.k, self.k))\n adj_mat[matrix_no_diagonal >= t] = 1\n adj_mat[matrix_no_diagonal < t] = 0\n graph = nx.from_numpy_matrix(adj_mat, create_using=nx.DiGraph)\n densities.append(nx.density(graph))\ndensities = np.asarray(densities)\nwhere = np.abs(densities - density).argmin()\nth = thresholds[where]\nresult_adj_mat = np.zeros((self.k, self.k))\nresult_adj_mat[matrix_no_diagonal >= th] = 1\nresult_adj_mat[matrix_no_diagonal < th] = 0\nreturn result_adj_mat", "adj_mat = self.adj_matrix(density)\nfig, ax = plt.subplots()\ncmap = mpl.colors.ListedColormap(['k', 'c'])\nbounds = [0.0, 0.5, 1.0]\nnorm = mpl.colors.BoundaryNorm(bounds, cmap.N)\nplt.title('Using DTF \\n File: %s ' % file_name + ' Density: %f ' % density)\nax.imshow(adj_mat, interpolation='none', cmap=cmap, norm=norm)\nplt.show()"], "bodies_text": "<|body_start_0|>\n self.f = pyedflib.EdfReader(file_name)\n self.k = self.f.signals_in_file\n self.N = self.f.getNSamples()[0]\n self.fs = self.f.getSampleFrequency(0)\n<|end_body_0|>\n\n<|body_start_1|>\n data = np.zeros((self.k, self.N))\n for i in np.arange(self.k):\n data[i, :] = self.f.readSignal(i)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n data = self.build_data()\n A_matrix = cp.mvarmodel.Mvar.fit(data)[0]\n V_matrix = cp.mvarmodel.Mvar.fit(data)[1]\n dtf = cp.conn.dtf_fun(A_matrix, V_matrix, self.fs, self.fs // 2)\n freq_selection = dtf[7:13]\n return freq_selection\n<|end_body_2|>\n\n<|body_start_3|>\n freq_selection = self.MVar_DTF()\n mean_matrix = np.mean(freq_selection, 0)\n matrix_no_diagonal = mean_matrix - np.triu(np.tril(mean_matrix))\n return matrix_no_diagonal\n<|end_body_3|>\n\n<|body_start_4|>\n matrix_no_diagonal = self.final_dtf_matrix()\n thresholds = np.linspace(0.0, 1.0, 10000)\n densities = []\n for t in thresholds:\n adj_mat = np.zeros((self.k, self.k))\n adj_mat[matrix_no_diagonal >= t] = 1\n adj_mat[matrix_no_diagonal < t] = 0\n graph = nx.from_numpy_matrix(adj_mat, create_using=nx.DiGraph)\n densities.append(nx.density(graph))\n densities = np.asarray(densities)\n where = np.abs(densities - density).argmin()\n th = thresholds[where]\n result_adj_mat = np.zeros((self.k, self.k))\n result_adj_mat[matrix_no_diagonal >= th] = 1\n result_adj_mat[matrix_no_diagonal < th] = 0\n return result_adj_mat\n<|end_body_4|>\n\n<|body_start_5|>\n adj_mat = self.adj_matrix(density)\n fig, ax = plt.subplots()\n cmap = mpl.colors.ListedColormap(['k', 'c'])\n bounds = [0.0, 0.5, 1.0]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n plt.title('Using DTF \\n File: %s ' % file_name + ' Density: %f ' % density)\n ax.imshow(adj_mat, interpolation='none', cmap=cmap, norm=norm)\n plt.show()\n<|end_body_5|>\n", "class_docstring": "", "class_name": "DTF", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DTF:\n\n def __init__(self, file_name):\n \"\"\"- file_name: edf file name string;\"\"\"\n <|body_0|>\n\n def build_data(self):\n \"\"\"Returns: an array of data: rows must be channels and cols must be data points.\"\"\"\n <|body_1|>\n\n def MVar_DTF(self):\n \"\"\"Fits Multivariate model on our data. Computes the DTF estimator. Returns: an array with inside as many matrices as the frequency range dimension.\"\"\"\n <|body_2|>\n\n def final_dtf_matrix(self):\n \"\"\"Returns: final dtf matrix, without diagonal (no self-loops).\"\"\"\n <|body_3|>\n\n def adj_matrix(self, density):\n \"\"\"Returns: The adjacency matrix obtained by applying the threshold (s.t density = 20%) on dtf matrix.\"\"\"\n <|body_4|>\n\n def binary_heatmap(self, density, file_name):\n \"\"\"Returns: binary heatmap of adjacency matrix.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.f = pyedflib.EdfReader(file_name)\n self.k = self.f.signals_in_file\n self.N = self.f.getNSamples()[0]\n self.fs = self.f.getSampleFrequency(0)\n<|end_body_0|>\n\n<|body_start_1|>\n data = np.zeros((self.k, self.N))\n for i in np.arange(self.k):\n data[i, :] = self.f.readSignal(i)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n data = self.build_data()\n A_matrix = cp.mvarmodel.Mvar.fit(data)[0]\n V_matrix = cp.mvarmodel.Mvar.fit(data)[1]\n dtf = cp.conn.dtf_fun(A_matrix, V_matrix, self.fs, self.fs // 2)\n freq_selection = dtf[7:13]\n return freq_selection\n<|end_body_2|>\n\n<|body_start_3|>\n freq_selection = self.MVar_DTF()\n mean_matrix = np.mean(freq_selection, 0)\n matrix_no_diagonal = mean_matrix - np.triu(np.tril(mean_matrix))\n return matrix_no_diagonal\n<|end_body_3|>\n\n<|body_start_4|>\n matrix_no_diagonal = self.final_dtf_matrix()\n thresholds = np.linspace(0.0, 1.0, 10000)\n densities = []\n for t in thresholds:\n adj_mat = np.zeros((self.k, self.k))\n adj_mat[matrix_no_diagonal >= t] = 1\n adj_mat[matrix_no_diagonal < t] = 0\n graph = nx.from_numpy_matrix(adj_mat, create_using=nx.DiGraph)\n densities.append(nx.density(graph))\n densities = np.asarray(densities)\n where = np.abs(densities - density).argmin()\n th = thresholds[where]\n result_adj_mat = np.zeros((self.k, self.k))\n result_adj_mat[matrix_no_diagonal >= th] = 1\n result_adj_mat[matrix_no_diagonal < th] = 0\n return result_adj_mat\n<|end_body_4|>\n\n<|body_start_5|>\n adj_mat = self.adj_matrix(density)\n fig, ax = plt.subplots()\n cmap = mpl.colors.ListedColormap(['k', 'c'])\n bounds = [0.0, 0.5, 1.0]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n plt.title('Using DTF \\n File: %s ' % file_name + ' Density: %f ' % density)\n ax.imshow(adj_mat, interpolation='none', cmap=cmap, norm=norm)\n plt.show()\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000314", "length_bytes": 5856, "license_type": "no_license", "methods": [{"docstring": "- file_name: edf file name string;", "name": "__init__", "signature": "def __init__(self, file_name)"}, {"docstring": "Returns: an array of data: rows must be channels and cols must be data points.", "name": "build_data", "signature": "def build_data(self)"}, {"docstring": "Fits Multivariate model on our data. Computes the DTF estimator. Returns: an array with inside as many matrices as the frequency range dimension.", "name": "MVar_DTF", "signature": "def MVar_DTF(self)"}, {"docstring": "Returns: final dtf matrix, without diagonal (no self-loops).", "name": "final_dtf_matrix", "signature": "def final_dtf_matrix(self)"}, {"docstring": "Returns: The adjacency matrix obtained by applying the threshold (s.t density = 20%) on dtf matrix.", "name": "adj_matrix", "signature": "def adj_matrix(self, density)"}, {"docstring": "Returns: binary heatmap of adjacency matrix.", "name": "binary_heatmap", "signature": "def binary_heatmap(self, density, file_name)"}], "n_methods": 6, "prompt": "Implement the Python class `DTF` described below.\n\nClass description:\nImplement the DTF class.\n\nMethod signatures and docstrings:\n- def __init__(self, file_name): - file_name: edf file name string;\n- def build_data(self): Returns: an array of data: rows must be channels and cols must be data points.\n- def MVar_DTF(self): Fits Multivariate model on our data. Computes the DTF estimator. Returns: an array with inside as many matrices as the frequency range dimension.\n- def final_dtf_matrix(self): Returns: final dtf matrix, without diagonal (no self-loops).\n- def adj_matrix(self, density): Returns: The adjacency matrix obtained by applying the threshold (s.t density = 20%) on dtf matrix.\n- def binary_heatmap(self, density, file_name): Returns: binary heatmap of adjacency matrix.", "prompted_full_text": "Implement the Python class `DTF` described below.\n\nClass description:\nImplement the DTF class.\n\nMethod signatures and docstrings:\n- def __init__(self, file_name): - file_name: edf file name string;\n- def build_data(self): Returns: an array of data: rows must be channels and cols must be data points.\n- def MVar_DTF(self): Fits Multivariate model on our data. Computes the DTF estimator. Returns: an array with inside as many matrices as the frequency range dimension.\n- def final_dtf_matrix(self): Returns: final dtf matrix, without diagonal (no self-loops).\n- def adj_matrix(self, density): Returns: The adjacency matrix obtained by applying the threshold (s.t density = 20%) on dtf matrix.\n- def binary_heatmap(self, density, file_name): Returns: binary heatmap of adjacency matrix.\n\n<|skeleton|>\nclass DTF:\n\n def __init__(self, file_name):\n \"\"\"- file_name: edf file name string;\"\"\"\n <|body_0|>\n\n def build_data(self):\n \"\"\"Returns: an array of data: rows must be channels and cols must be data points.\"\"\"\n <|body_1|>\n\n def MVar_DTF(self):\n \"\"\"Fits Multivariate model on our data. Computes the DTF estimator. Returns: an array with inside as many matrices as the frequency range dimension.\"\"\"\n <|body_2|>\n\n def final_dtf_matrix(self):\n \"\"\"Returns: final dtf matrix, without diagonal (no self-loops).\"\"\"\n <|body_3|>\n\n def adj_matrix(self, density):\n \"\"\"Returns: The adjacency matrix obtained by applying the threshold (s.t density = 20%) on dtf matrix.\"\"\"\n <|body_4|>\n\n def binary_heatmap(self, density, file_name):\n \"\"\"Returns: binary heatmap of adjacency matrix.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.f = pyedflib.EdfReader(file_name)\n self.k = self.f.signals_in_file\n self.N = self.f.getNSamples()[0]\n self.fs = self.f.getSampleFrequency(0)\n<|end_body_0|>\n\n<|body_start_1|>\n data = np.zeros((self.k, self.N))\n for i in np.arange(self.k):\n data[i, :] = self.f.readSignal(i)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n data = self.build_data()\n A_matrix = cp.mvarmodel.Mvar.fit(data)[0]\n V_matrix = cp.mvarmodel.Mvar.fit(data)[1]\n dtf = cp.conn.dtf_fun(A_matrix, V_matrix, self.fs, self.fs // 2)\n freq_selection = dtf[7:13]\n return freq_selection\n<|end_body_2|>\n\n<|body_start_3|>\n freq_selection = self.MVar_DTF()\n mean_matrix = np.mean(freq_selection, 0)\n matrix_no_diagonal = mean_matrix - np.triu(np.tril(mean_matrix))\n return matrix_no_diagonal\n<|end_body_3|>\n\n<|body_start_4|>\n matrix_no_diagonal = self.final_dtf_matrix()\n thresholds = np.linspace(0.0, 1.0, 10000)\n densities = []\n for t in thresholds:\n adj_mat = np.zeros((self.k, self.k))\n adj_mat[matrix_no_diagonal >= t] = 1\n adj_mat[matrix_no_diagonal < t] = 0\n graph = nx.from_numpy_matrix(adj_mat, create_using=nx.DiGraph)\n densities.append(nx.density(graph))\n densities = np.asarray(densities)\n where = np.abs(densities - density).argmin()\n th = thresholds[where]\n result_adj_mat = np.zeros((self.k, self.k))\n result_adj_mat[matrix_no_diagonal >= th] = 1\n result_adj_mat[matrix_no_diagonal < th] = 0\n return result_adj_mat\n<|end_body_4|>\n\n<|body_start_5|>\n adj_mat = self.adj_matrix(density)\n fig, ax = plt.subplots()\n cmap = mpl.colors.ListedColormap(['k', 'c'])\n bounds = [0.0, 0.5, 1.0]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n plt.title('Using DTF \\n File: %s ' % file_name + ' Density: %f ' % density)\n ax.imshow(adj_mat, interpolation='none', cmap=cmap, norm=norm)\n plt.show()\n<|end_body_5|>\n", "revision_id": "a345c1c944b5b48defea0fe2c41b9d229efd9950", "skeleton": "<|skeleton|>\nclass DTF:\n\n def __init__(self, file_name):\n \"\"\"- file_name: edf file name string;\"\"\"\n <|body_0|>\n\n def build_data(self):\n \"\"\"Returns: an array of data: rows must be channels and cols must be data points.\"\"\"\n <|body_1|>\n\n def MVar_DTF(self):\n \"\"\"Fits Multivariate model on our data. Computes the DTF estimator. Returns: an array with inside as many matrices as the frequency range dimension.\"\"\"\n <|body_2|>\n\n def final_dtf_matrix(self):\n \"\"\"Returns: final dtf matrix, without diagonal (no self-loops).\"\"\"\n <|body_3|>\n\n def adj_matrix(self, density):\n \"\"\"Returns: The adjacency matrix obtained by applying the threshold (s.t density = 20%) on dtf matrix.\"\"\"\n <|body_4|>\n\n def binary_heatmap(self, density, file_name):\n \"\"\"Returns: binary heatmap of adjacency matrix.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DTF:\n def __init__(self, file_name):\n \"\"\"- file_name: edf file name string;\"\"\"\n self.f = pyedflib.EdfReader(file_name)\n self.k = self.f.signals_in_file\n self.N = self.f.getNSamples()[0]\n self.fs = self.f.getSampleFrequency(0)\n\n def build_data(self):\n \"\"\"Returns: an array of data: rows must be channels and cols must be data points.\"\"\"\n data = np.zeros((self.k, self.N))\n for i in np.arange(self.k):\n data[i, :] = self.f.readSignal(i)\n return data\n\n def MVar_DTF(self):\n \"\"\"Fits Multivariate model on our data. Computes the DTF estimator. Returns: an array with inside as many matrices as the frequency range dimension.\"\"\"\n data = self.build_data()\n A_matrix = cp.mvarmodel.Mvar.fit(data)[0]\n V_matrix = cp.mvarmodel.Mvar.fit(data)[1]\n dtf = cp.conn.dtf_fun(A_matrix, V_matrix, self.fs, self.fs // 2)\n freq_selection = dtf[7:13]\n return freq_selection\n\n def final_dtf_matrix(self):\n \"\"\"Returns: final dtf matrix, without diagonal (no self-loops).\"\"\"\n freq_selection = self.MVar_DTF()\n mean_matrix = np.mean(freq_selection, 0)\n matrix_no_diagonal = mean_matrix - np.triu(np.tril(mean_matrix))\n return matrix_no_diagonal\n\n def adj_matrix(self, density):\n \"\"\"Returns: The adjacency matrix obtained by applying the threshold (s.t density = 20%) on dtf matrix.\"\"\"\n matrix_no_diagonal = self.final_dtf_matrix()\n thresholds = np.linspace(0.0, 1.0, 10000)\n densities = []\n for t in thresholds:\n adj_mat = np.zeros((self.k, self.k))\n adj_mat[matrix_no_diagonal >= t] = 1\n adj_mat[matrix_no_diagonal < t] = 0\n graph = nx.from_numpy_matrix(adj_mat, create_using=nx.DiGraph)\n densities.append(nx.density(graph))\n densities = np.asarray(densities)\n where = np.abs(densities - density).argmin()\n th = thresholds[where]\n result_adj_mat = np.zeros((self.k, self.k))\n result_adj_mat[matrix_no_diagonal >= th] = 1\n result_adj_mat[matrix_no_diagonal < th] = 0\n return result_adj_mat\n\n def binary_heatmap(self, density, file_name):\n \"\"\"Returns: binary heatmap of adjacency matrix.\"\"\"\n adj_mat = self.adj_matrix(density)\n fig, ax = plt.subplots()\n cmap = mpl.colors.ListedColormap(['k', 'c'])\n bounds = [0.0, 0.5, 1.0]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n plt.title('Using DTF \\n File: %s ' % file_name + ' Density: %f ' % density)\n ax.imshow(adj_mat, interpolation='none', cmap=cmap, norm=norm)\n plt.show()\n", "source": "the_stack_v2_python_sparse", "source_path": "task_1_2.py", "source_repo": "LivLilli/Brain-network-study-during-resting-states", "split": "val", "star_events_count": 0}
{"blob_id": "77d3f6f2924e00fa9d0d2566ab5a499f73b3296d", "bodies": ["res = 0\nfor i in range(len(nums)):\n for j in range(i, len(nums)):\n res += bin(nums[i] ^ nums[j]).count('1')\nreturn res", "ones, N = (collections.defaultdict(int), len(nums))\nfor n in nums:\n s = bin(n)[::-1]\n for i in range(0, len(s)):\n if s[i] == '1':\n ones[i] += 1\nreturn sum([(N - ones[i]) * ones[i] for i in ones])"], "bodies_text": "<|body_start_0|>\n res = 0\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n res += bin(nums[i] ^ nums[j]).count('1')\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n ones, N = (collections.defaultdict(int), len(nums))\n for n in nums:\n s = bin(n)[::-1]\n for i in range(0, len(s)):\n if s[i] == '1':\n ones[i] += 1\n return sum([(N - ones[i]) * ones[i] for i in ones])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def totalHammingDistance_bruteForce(self, nums: List[int]) -> int:\n \"\"\"Brute Force solution\"\"\"\n <|body_0|>\n\n def totalHammingDistance(self, nums: List[int]) -> int:\n \"\"\"For each position i, count the frequency ones[i] of ones in nums. Total contribution from position i is (len(nums)-ones[i])*ones[i].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = 0\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n res += bin(nums[i] ^ nums[j]).count('1')\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n ones, N = (collections.defaultdict(int), len(nums))\n for n in nums:\n s = bin(n)[::-1]\n for i in range(0, len(s)):\n if s[i] == '1':\n ones[i] += 1\n return sum([(N - ones[i]) * ones[i] for i in ones])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000315", "length_bytes": 1805, "license_type": "no_license", "methods": [{"docstring": "Brute Force solution", "name": "totalHammingDistance_bruteForce", "signature": "def totalHammingDistance_bruteForce(self, nums: List[int]) -> int"}, {"docstring": "For each position i, count the frequency ones[i] of ones in nums. Total contribution from position i is (len(nums)-ones[i])*ones[i].", "name": "totalHammingDistance", "signature": "def totalHammingDistance(self, nums: List[int]) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def totalHammingDistance_bruteForce(self, nums: List[int]) -> int: Brute Force solution\n- def totalHammingDistance(self, nums: List[int]) -> int: For each position i, count the frequency ones[i] of ones in nums. Total contribution from position i is (len(nums)-ones[i])*ones[i].", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def totalHammingDistance_bruteForce(self, nums: List[int]) -> int: Brute Force solution\n- def totalHammingDistance(self, nums: List[int]) -> int: For each position i, count the frequency ones[i] of ones in nums. Total contribution from position i is (len(nums)-ones[i])*ones[i].\n\n<|skeleton|>\nclass Solution:\n\n def totalHammingDistance_bruteForce(self, nums: List[int]) -> int:\n \"\"\"Brute Force solution\"\"\"\n <|body_0|>\n\n def totalHammingDistance(self, nums: List[int]) -> int:\n \"\"\"For each position i, count the frequency ones[i] of ones in nums. Total contribution from position i is (len(nums)-ones[i])*ones[i].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = 0\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n res += bin(nums[i] ^ nums[j]).count('1')\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n ones, N = (collections.defaultdict(int), len(nums))\n for n in nums:\n s = bin(n)[::-1]\n for i in range(0, len(s)):\n if s[i] == '1':\n ones[i] += 1\n return sum([(N - ones[i]) * ones[i] for i in ones])\n<|end_body_1|>\n", "revision_id": "791fc1b43beef89d668788de6d12f5c643431b8f", "skeleton": "<|skeleton|>\nclass Solution:\n\n def totalHammingDistance_bruteForce(self, nums: List[int]) -> int:\n \"\"\"Brute Force solution\"\"\"\n <|body_0|>\n\n def totalHammingDistance(self, nums: List[int]) -> int:\n \"\"\"For each position i, count the frequency ones[i] of ones in nums. Total contribution from position i is (len(nums)-ones[i])*ones[i].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def totalHammingDistance_bruteForce(self, nums: List[int]) -> int:\n \"\"\"Brute Force solution\"\"\"\n res = 0\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n res += bin(nums[i] ^ nums[j]).count('1')\n return res\n\n def totalHammingDistance(self, nums: List[int]) -> int:\n \"\"\"For each position i, count the frequency ones[i] of ones in nums. Total contribution from position i is (len(nums)-ones[i])*ones[i].\"\"\"\n ones, N = (collections.defaultdict(int), len(nums))\n for n in nums:\n s = bin(n)[::-1]\n for i in range(0, len(s)):\n if s[i] == '1':\n ones[i] += 1\n return sum([(N - ones[i]) * ones[i] for i in ones])\n", "source": "the_stack_v2_python_sparse", "source_path": "477.total-hamming-distance.py", "source_repo": "Code-Wen/LeetCode_Notes", "split": "val", "star_events_count": 1}
{"blob_id": "1771ed036f391c7282c3e12ab47daf6d6c1a3573", "bodies": ["belong = request.GET.get('belong', 'all')\norder_status = request.GET.get('order_status', '-1')\nmall_type = request.user_profile.webapp_type\nwoid = request.webapp_owner_id\nexport2data = {}\nif belong == 'audit':\n second_nav_name = export.ORDER_AUDIT\n has_order = util.is_has_order(request, True)\n page_type = u'财务审核'\n export_jobs = ExportJob.objects.filter(woid=woid, type=3, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\nelse:\n second_nav_name = export.ORDER_ALL\n has_order = util.is_has_order(request)\n page_type = u'所有订单'\n export_jobs = ExportJob.objects.filter(woid=woid, type=1, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\nif not export2data:\n export2data = {'woid': 0, 'status': 1, 'is_download': 1, 'id': 0, 'file_path': 0}\nc = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': export.get_mall_order_second_navs(request), 'second_nav_name': second_nav_name, 'has_order': has_order, 'page_type': page_type, 'order_status': order_status, 'mall_type': mall_type, 'export2data': export2data})\nreturn render_to_response('mall/editor/orders.html', c)", "belong = request.GET.get('belong', 'all')\nif belong == 'all':\n return util.get_orders_response(request)\nelse:\n return util.get_orders_response(request, True)"], "bodies_text": "<|body_start_0|>\n belong = request.GET.get('belong', 'all')\n order_status = request.GET.get('order_status', '-1')\n mall_type = request.user_profile.webapp_type\n woid = request.webapp_owner_id\n export2data = {}\n if belong == 'audit':\n second_nav_name = export.ORDER_AUDIT\n has_order = util.is_has_order(request, True)\n page_type = u'财务审核'\n export_jobs = ExportJob.objects.filter(woid=woid, type=3, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\n else:\n second_nav_name = export.ORDER_ALL\n has_order = util.is_has_order(request)\n page_type = u'所有订单'\n export_jobs = ExportJob.objects.filter(woid=woid, type=1, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\n if not export2data:\n export2data = {'woid': 0, 'status': 1, 'is_download': 1, 'id': 0, 'file_path': 0}\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': export.get_mall_order_second_navs(request), 'second_nav_name': second_nav_name, 'has_order': has_order, 'page_type': page_type, 'order_status': order_status, 'mall_type': mall_type, 'export2data': export2data})\n return render_to_response('mall/editor/orders.html', c)\n<|end_body_0|>\n\n<|body_start_1|>\n belong = request.GET.get('belong', 'all')\n if belong == 'all':\n return util.get_orders_response(request)\n else:\n return util.get_orders_response(request, True)\n<|end_body_1|>\n", "class_docstring": "订单列表资源", "class_name": "OrderList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OrderList:\n \"\"\"订单列表资源\"\"\"\n\n def get(request):\n \"\"\"显示订单列表\"\"\"\n <|body_0|>\n\n def api_get(request):\n \"\"\"advanced table中订单列表\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n belong = request.GET.get('belong', 'all')\n order_status = request.GET.get('order_status', '-1')\n mall_type = request.user_profile.webapp_type\n woid = request.webapp_owner_id\n export2data = {}\n if belong == 'audit':\n second_nav_name = export.ORDER_AUDIT\n has_order = util.is_has_order(request, True)\n page_type = u'财务审核'\n export_jobs = ExportJob.objects.filter(woid=woid, type=3, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\n else:\n second_nav_name = export.ORDER_ALL\n has_order = util.is_has_order(request)\n page_type = u'所有订单'\n export_jobs = ExportJob.objects.filter(woid=woid, type=1, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\n if not export2data:\n export2data = {'woid': 0, 'status': 1, 'is_download': 1, 'id': 0, 'file_path': 0}\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': export.get_mall_order_second_navs(request), 'second_nav_name': second_nav_name, 'has_order': has_order, 'page_type': page_type, 'order_status': order_status, 'mall_type': mall_type, 'export2data': export2data})\n return render_to_response('mall/editor/orders.html', c)\n<|end_body_0|>\n\n<|body_start_1|>\n belong = request.GET.get('belong', 'all')\n if belong == 'all':\n return util.get_orders_response(request)\n else:\n return util.get_orders_response(request, True)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000316", "length_bytes": 36586, "license_type": "no_license", "methods": [{"docstring": "显示订单列表", "name": "get", "signature": "def get(request)"}, {"docstring": "advanced table中订单列表", "name": "api_get", "signature": "def api_get(request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_053271", "prompt": "Implement the Python class `OrderList` described below.\n\nClass description:\n订单列表资源\n\nMethod signatures and docstrings:\n- def get(request): 显示订单列表\n- def api_get(request): advanced table中订单列表", "prompted_full_text": "Implement the Python class `OrderList` described below.\n\nClass description:\n订单列表资源\n\nMethod signatures and docstrings:\n- def get(request): 显示订单列表\n- def api_get(request): advanced table中订单列表\n\n<|skeleton|>\nclass OrderList:\n \"\"\"订单列表资源\"\"\"\n\n def get(request):\n \"\"\"显示订单列表\"\"\"\n <|body_0|>\n\n def api_get(request):\n \"\"\"advanced table中订单列表\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n belong = request.GET.get('belong', 'all')\n order_status = request.GET.get('order_status', '-1')\n mall_type = request.user_profile.webapp_type\n woid = request.webapp_owner_id\n export2data = {}\n if belong == 'audit':\n second_nav_name = export.ORDER_AUDIT\n has_order = util.is_has_order(request, True)\n page_type = u'财务审核'\n export_jobs = ExportJob.objects.filter(woid=woid, type=3, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\n else:\n second_nav_name = export.ORDER_ALL\n has_order = util.is_has_order(request)\n page_type = u'所有订单'\n export_jobs = ExportJob.objects.filter(woid=woid, type=1, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\n if not export2data:\n export2data = {'woid': 0, 'status': 1, 'is_download': 1, 'id': 0, 'file_path': 0}\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': export.get_mall_order_second_navs(request), 'second_nav_name': second_nav_name, 'has_order': has_order, 'page_type': page_type, 'order_status': order_status, 'mall_type': mall_type, 'export2data': export2data})\n return render_to_response('mall/editor/orders.html', c)\n<|end_body_0|>\n\n<|body_start_1|>\n belong = request.GET.get('belong', 'all')\n if belong == 'all':\n return util.get_orders_response(request)\n else:\n return util.get_orders_response(request, True)\n<|end_body_1|>\n", "revision_id": "8b2f7befe92841bcc35e0e60cac5958ef3f3af54", "skeleton": "<|skeleton|>\nclass OrderList:\n \"\"\"订单列表资源\"\"\"\n\n def get(request):\n \"\"\"显示订单列表\"\"\"\n <|body_0|>\n\n def api_get(request):\n \"\"\"advanced table中订单列表\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OrderList:\n \"\"\"订单列表资源\"\"\"\n\n def get(request):\n \"\"\"显示订单列表\"\"\"\n belong = request.GET.get('belong', 'all')\n order_status = request.GET.get('order_status', '-1')\n mall_type = request.user_profile.webapp_type\n woid = request.webapp_owner_id\n export2data = {}\n if belong == 'audit':\n second_nav_name = export.ORDER_AUDIT\n has_order = util.is_has_order(request, True)\n page_type = u'财务审核'\n export_jobs = ExportJob.objects.filter(woid=woid, type=3, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\n else:\n second_nav_name = export.ORDER_ALL\n has_order = util.is_has_order(request)\n page_type = u'所有订单'\n export_jobs = ExportJob.objects.filter(woid=woid, type=1, is_download=0).order_by('-id')\n if export_jobs:\n export2data = {'woid': int(export_jobs[0].woid), 'status': 1 if export_jobs[0].status else 0, 'is_download': 1 if export_jobs[0].is_download else 0, 'id': int(export_jobs[0].id)}\n if not export2data:\n export2data = {'woid': 0, 'status': 1, 'is_download': 1, 'id': 0, 'file_path': 0}\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': export.get_mall_order_second_navs(request), 'second_nav_name': second_nav_name, 'has_order': has_order, 'page_type': page_type, 'order_status': order_status, 'mall_type': mall_type, 'export2data': export2data})\n return render_to_response('mall/editor/orders.html', c)\n\n def api_get(request):\n \"\"\"advanced table中订单列表\"\"\"\n belong = request.GET.get('belong', 'all')\n if belong == 'all':\n return util.get_orders_response(request)\n else:\n return util.get_orders_response(request, True)\n", "source": "the_stack_v2_python_sparse", "source_path": "weapp/mall/order/order.py", "source_repo": "chengdg/weizoom", "split": "val", "star_events_count": 1}
{"blob_id": "a4d98f5d57b730590af14185b984e342a0cef813", "bodies": ["x = self.lrelu(self.conv1(x_in))\nx = self.lrelu(self.conv2(x))\nx = self.lrelu(self.conv3(x))\nx = self.conv4(x)\nreturn x", "super(MidNet2, self).__init__()\nself.lrelu = nn.LeakyReLU()\nself.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)\nself.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)\nself.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)\nself.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)"], "bodies_text": "<|body_start_0|>\n x = self.lrelu(self.conv1(x_in))\n x = self.lrelu(self.conv2(x))\n x = self.lrelu(self.conv3(x))\n x = self.conv4(x)\n return x\n<|end_body_0|>\n\n<|body_start_1|>\n super(MidNet2, self).__init__()\n self.lrelu = nn.LeakyReLU()\n self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)\n self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MidNet2", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MidNet2:\n\n def forward(self, x_in):\n \"\"\"Network with dilation rate 2 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor\"\"\"\n <|body_0|>\n\n def __init__(self, in_channels=16):\n \"\"\"FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n x = self.lrelu(self.conv1(x_in))\n x = self.lrelu(self.conv2(x))\n x = self.lrelu(self.conv3(x))\n x = self.conv4(x)\n return x\n<|end_body_0|>\n\n<|body_start_1|>\n super(MidNet2, self).__init__()\n self.lrelu = nn.LeakyReLU()\n self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)\n self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000317", "length_bytes": 8922, "license_type": "permissive", "methods": [{"docstring": "Network with dilation rate 2 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor", "name": "forward", "signature": "def forward(self, x_in)"}, {"docstring": "FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A", "name": "__init__", "signature": "def __init__(self, in_channels=16)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031635", "prompt": "Implement the Python class `MidNet2` described below.\n\nClass description:\nImplement the MidNet2 class.\n\nMethod signatures and docstrings:\n- def forward(self, x_in): Network with dilation rate 2 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor\n- def __init__(self, in_channels=16): FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A", "prompted_full_text": "Implement the Python class `MidNet2` described below.\n\nClass description:\nImplement the MidNet2 class.\n\nMethod signatures and docstrings:\n- def forward(self, x_in): Network with dilation rate 2 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor\n- def __init__(self, in_channels=16): FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A\n\n<|skeleton|>\nclass MidNet2:\n\n def forward(self, x_in):\n \"\"\"Network with dilation rate 2 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor\"\"\"\n <|body_0|>\n\n def __init__(self, in_channels=16):\n \"\"\"FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n x = self.lrelu(self.conv1(x_in))\n x = self.lrelu(self.conv2(x))\n x = self.lrelu(self.conv3(x))\n x = self.conv4(x)\n return x\n<|end_body_0|>\n\n<|body_start_1|>\n super(MidNet2, self).__init__()\n self.lrelu = nn.LeakyReLU()\n self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)\n self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n<|end_body_1|>\n", "revision_id": "82c49c36b76987a46dec8479793f7cf0150839c6", "skeleton": "<|skeleton|>\nclass MidNet2:\n\n def forward(self, x_in):\n \"\"\"Network with dilation rate 2 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor\"\"\"\n <|body_0|>\n\n def __init__(self, in_channels=16):\n \"\"\"FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MidNet2:\n def forward(self, x_in):\n \"\"\"Network with dilation rate 2 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor\"\"\"\n x = self.lrelu(self.conv1(x_in))\n x = self.lrelu(self.conv2(x))\n x = self.lrelu(self.conv3(x))\n x = self.conv4(x)\n return x\n\n def __init__(self, in_channels=16):\n \"\"\"FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A\"\"\"\n super(MidNet2, self).__init__()\n self.lrelu = nn.LeakyReLU()\n self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)\n self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)\n", "source": "the_stack_v2_python_sparse", "source_path": "CURL/rgb_ted.py", "source_repo": "huawei-noah/noah-research", "split": "val", "star_events_count": 816}
{"blob_id": "d77ad6bf461c93196744e599479e1c95c6e2d95d", "bodies": ["grpc_obj = GrpcService(self.request)\nresp = await grpc_obj.get_redis_info()\nreturn ResponseMsg(data=resp)", "json_data = await self.request.json()\nhost = json_data.get('host')\nport = json_data.get('port')\ntoken = json_data.get('token')\ngrpc_obj = GrpcService(self.request)\nresp = await grpc_obj.put_redis_info(host=host, port=port, token=token)\nreturn ResponseMsg(data=resp)"], "bodies_text": "<|body_start_0|>\n grpc_obj = GrpcService(self.request)\n resp = await grpc_obj.get_redis_info()\n return ResponseMsg(data=resp)\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = await self.request.json()\n host = json_data.get('host')\n port = json_data.get('port')\n token = json_data.get('token')\n grpc_obj = GrpcService(self.request)\n resp = await grpc_obj.put_redis_info(host=host, port=port, token=token)\n return ResponseMsg(data=resp)\n<|end_body_1|>\n", "class_docstring": "redis", "class_name": "RedisListController", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RedisListController:\n \"\"\"redis\"\"\"\n\n async def get(self):\n \"\"\"获取 配置的 redis 信息 :return: @id->int redis uuid @host->string redis host @port->string redis port @token->string redis password @status->bool redis is ok? @delay_time->int delay time\"\"\"\n <|body_0|>\n\n async def post(self):\n \"\"\"注册 配置的redis信息 @host->string redis host @port->string redis port @token->string redis password :return: @id->int redis uuid @status->bool redis is ok? @delay_time->int delay time\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n grpc_obj = GrpcService(self.request)\n resp = await grpc_obj.get_redis_info()\n return ResponseMsg(data=resp)\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = await self.request.json()\n host = json_data.get('host')\n port = json_data.get('port')\n token = json_data.get('token')\n grpc_obj = GrpcService(self.request)\n resp = await grpc_obj.put_redis_info(host=host, port=port, token=token)\n return ResponseMsg(data=resp)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000318", "length_bytes": 5386, "license_type": "no_license", "methods": [{"docstring": "获取 配置的 redis 信息 :return: @id->int redis uuid @host->string redis host @port->string redis port @token->string redis password @status->bool redis is ok? @delay_time->int delay time", "name": "get", "signature": "async def get(self)"}, {"docstring": "注册 配置的redis信息 @host->string redis host @port->string redis port @token->string redis password :return: @id->int redis uuid @status->bool redis is ok? @delay_time->int delay time", "name": "post", "signature": "async def post(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_011221", "prompt": "Implement the Python class `RedisListController` described below.\n\nClass description:\nredis\n\nMethod signatures and docstrings:\n- async def get(self): 获取 配置的 redis 信息 :return: @id->int redis uuid @host->string redis host @port->string redis port @token->string redis password @status->bool redis is ok? @delay_time->int delay time\n- async def post(self): 注册 配置的redis信息 @host->string redis host @port->string redis port @token->string redis password :return: @id->int redis uuid @status->bool redis is ok? @delay_time->int delay time", "prompted_full_text": "Implement the Python class `RedisListController` described below.\n\nClass description:\nredis\n\nMethod signatures and docstrings:\n- async def get(self): 获取 配置的 redis 信息 :return: @id->int redis uuid @host->string redis host @port->string redis port @token->string redis password @status->bool redis is ok? @delay_time->int delay time\n- async def post(self): 注册 配置的redis信息 @host->string redis host @port->string redis port @token->string redis password :return: @id->int redis uuid @status->bool redis is ok? @delay_time->int delay time\n\n<|skeleton|>\nclass RedisListController:\n \"\"\"redis\"\"\"\n\n async def get(self):\n \"\"\"获取 配置的 redis 信息 :return: @id->int redis uuid @host->string redis host @port->string redis port @token->string redis password @status->bool redis is ok? @delay_time->int delay time\"\"\"\n <|body_0|>\n\n async def post(self):\n \"\"\"注册 配置的redis信息 @host->string redis host @port->string redis port @token->string redis password :return: @id->int redis uuid @status->bool redis is ok? @delay_time->int delay time\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n grpc_obj = GrpcService(self.request)\n resp = await grpc_obj.get_redis_info()\n return ResponseMsg(data=resp)\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = await self.request.json()\n host = json_data.get('host')\n port = json_data.get('port')\n token = json_data.get('token')\n grpc_obj = GrpcService(self.request)\n resp = await grpc_obj.put_redis_info(host=host, port=port, token=token)\n return ResponseMsg(data=resp)\n<|end_body_1|>\n", "revision_id": "c6fdd48dae3bc98f9c41c603bef20d10c15476d7", "skeleton": "<|skeleton|>\nclass RedisListController:\n \"\"\"redis\"\"\"\n\n async def get(self):\n \"\"\"获取 配置的 redis 信息 :return: @id->int redis uuid @host->string redis host @port->string redis port @token->string redis password @status->bool redis is ok? @delay_time->int delay time\"\"\"\n <|body_0|>\n\n async def post(self):\n \"\"\"注册 配置的redis信息 @host->string redis host @port->string redis port @token->string redis password :return: @id->int redis uuid @status->bool redis is ok? @delay_time->int delay time\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RedisListController:\n \"\"\"redis\"\"\"\n\n async def get(self):\n \"\"\"获取 配置的 redis 信息 :return: @id->int redis uuid @host->string redis host @port->string redis port @token->string redis password @status->bool redis is ok? @delay_time->int delay time\"\"\"\n grpc_obj = GrpcService(self.request)\n resp = await grpc_obj.get_redis_info()\n return ResponseMsg(data=resp)\n\n async def post(self):\n \"\"\"注册 配置的redis信息 @host->string redis host @port->string redis port @token->string redis password :return: @id->int redis uuid @status->bool redis is ok? @delay_time->int delay time\"\"\"\n json_data = await self.request.json()\n host = json_data.get('host')\n port = json_data.get('port')\n token = json_data.get('token')\n grpc_obj = GrpcService(self.request)\n resp = await grpc_obj.put_redis_info(host=host, port=port, token=token)\n return ResponseMsg(data=resp)\n", "source": "the_stack_v2_python_sparse", "source_path": "api/redis_controller.py", "source_repo": "Fosity/rpc_web", "split": "val", "star_events_count": 0}
{"blob_id": "987ece94fee63037dda0c82508864cb6e8e996e7", "bodies": ["super(RelaxedTransformerLoss, self).__init__(fix_im)\nself.regularization_constant = regularization_constant\nself.transformation_loss = transformation_loss\nself.transform_norm_kwargs = transform_norm_kwargs or {}", "transformer = kwargs['transformer']\nassert isinstance(transformer, st.ParameterizedTransformation)\ntransformer_norm = self.regularization_constant * transformer.norm(**self.transform_norm_kwargs)\ndelta = self.transformer.forward(self.fix_im) - examples\ntransformation_loss = self.transformation_loss(delta)\nreturn transformation_loss + transformer_norm"], "bodies_text": "<|body_start_0|>\n super(RelaxedTransformerLoss, self).__init__(fix_im)\n self.regularization_constant = regularization_constant\n self.transformation_loss = transformation_loss\n self.transform_norm_kwargs = transform_norm_kwargs or {}\n<|end_body_0|>\n\n<|body_start_1|>\n transformer = kwargs['transformer']\n assert isinstance(transformer, st.ParameterizedTransformation)\n transformer_norm = self.regularization_constant * transformer.norm(**self.transform_norm_kwargs)\n delta = self.transformer.forward(self.fix_im) - examples\n transformation_loss = self.transformation_loss(delta)\n return transformation_loss + transformer_norm\n<|end_body_1|>\n", "class_docstring": "Relaxed version of transformer loss: assumes that the adversarial examples are of the form Y=S(X) + delta for some S in the transformation class and some small delta perturbation outside the perturbation. In this case, we just compute ||delta|| + c||S|| This saves us from having to do the inner minmization step", "class_name": "RelaxedTransformerLoss", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RelaxedTransformerLoss:\n \"\"\"Relaxed version of transformer loss: assumes that the adversarial examples are of the form Y=S(X) + delta for some S in the transformation class and some small delta perturbation outside the perturbation. In this case, we just compute ||delta|| + c||S|| This saves us from having to do the inner minmization step\"\"\"\n\n def __init__(self, fix_im, regularization_constant=1.0, transformation_loss=partial(utils.summed_lp_norm, lp=2), transform_norm_kwargs=None):\n \"\"\"Takes in a reference fix im and a class of transformations we need to search over to compute forward.\"\"\"\n <|body_0|>\n\n def forward(self, examples, *args, **kwargs):\n \"\"\"Computes the distance between examples and args ARGS: examples : NxCxHxW Variable - 'adversarially' perturbed image from the self.fix_im KWARGS: optimization stuff here\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RelaxedTransformerLoss, self).__init__(fix_im)\n self.regularization_constant = regularization_constant\n self.transformation_loss = transformation_loss\n self.transform_norm_kwargs = transform_norm_kwargs or {}\n<|end_body_0|>\n\n<|body_start_1|>\n transformer = kwargs['transformer']\n assert isinstance(transformer, st.ParameterizedTransformation)\n transformer_norm = self.regularization_constant * transformer.norm(**self.transform_norm_kwargs)\n delta = self.transformer.forward(self.fix_im) - examples\n transformation_loss = self.transformation_loss(delta)\n return transformation_loss + transformer_norm\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000319", "length_bytes": 22033, "license_type": "permissive", "methods": [{"docstring": "Takes in a reference fix im and a class of transformations we need to search over to compute forward.", "name": "__init__", "signature": "def __init__(self, fix_im, regularization_constant=1.0, transformation_loss=partial(utils.summed_lp_norm, lp=2), transform_norm_kwargs=None)"}, {"docstring": "Computes the distance between examples and args ARGS: examples : NxCxHxW Variable - 'adversarially' perturbed image from the self.fix_im KWARGS: optimization stuff here", "name": "forward", "signature": "def forward(self, examples, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_025042", "prompt": "Implement the Python class `RelaxedTransformerLoss` described below.\n\nClass description:\nRelaxed version of transformer loss: assumes that the adversarial examples are of the form Y=S(X) + delta for some S in the transformation class and some small delta perturbation outside the perturbation. In this case, we just compute ||delta|| + c||S|| This saves us from having to do the inner minmization step\n\nMethod signatures and docstrings:\n- def __init__(self, fix_im, regularization_constant=1.0, transformation_loss=partial(utils.summed_lp_norm, lp=2), transform_norm_kwargs=None): Takes in a reference fix im and a class of transformations we need to search over to compute forward.\n- def forward(self, examples, *args, **kwargs): Computes the distance between examples and args ARGS: examples : NxCxHxW Variable - 'adversarially' perturbed image from the self.fix_im KWARGS: optimization stuff here", "prompted_full_text": "Implement the Python class `RelaxedTransformerLoss` described below.\n\nClass description:\nRelaxed version of transformer loss: assumes that the adversarial examples are of the form Y=S(X) + delta for some S in the transformation class and some small delta perturbation outside the perturbation. In this case, we just compute ||delta|| + c||S|| This saves us from having to do the inner minmization step\n\nMethod signatures and docstrings:\n- def __init__(self, fix_im, regularization_constant=1.0, transformation_loss=partial(utils.summed_lp_norm, lp=2), transform_norm_kwargs=None): Takes in a reference fix im and a class of transformations we need to search over to compute forward.\n- def forward(self, examples, *args, **kwargs): Computes the distance between examples and args ARGS: examples : NxCxHxW Variable - 'adversarially' perturbed image from the self.fix_im KWARGS: optimization stuff here\n\n<|skeleton|>\nclass RelaxedTransformerLoss:\n \"\"\"Relaxed version of transformer loss: assumes that the adversarial examples are of the form Y=S(X) + delta for some S in the transformation class and some small delta perturbation outside the perturbation. In this case, we just compute ||delta|| + c||S|| This saves us from having to do the inner minmization step\"\"\"\n\n def __init__(self, fix_im, regularization_constant=1.0, transformation_loss=partial(utils.summed_lp_norm, lp=2), transform_norm_kwargs=None):\n \"\"\"Takes in a reference fix im and a class of transformations we need to search over to compute forward.\"\"\"\n <|body_0|>\n\n def forward(self, examples, *args, **kwargs):\n \"\"\"Computes the distance between examples and args ARGS: examples : NxCxHxW Variable - 'adversarially' perturbed image from the self.fix_im KWARGS: optimization stuff here\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RelaxedTransformerLoss, self).__init__(fix_im)\n self.regularization_constant = regularization_constant\n self.transformation_loss = transformation_loss\n self.transform_norm_kwargs = transform_norm_kwargs or {}\n<|end_body_0|>\n\n<|body_start_1|>\n transformer = kwargs['transformer']\n assert isinstance(transformer, st.ParameterizedTransformation)\n transformer_norm = self.regularization_constant * transformer.norm(**self.transform_norm_kwargs)\n delta = self.transformer.forward(self.fix_im) - examples\n transformation_loss = self.transformation_loss(delta)\n return transformation_loss + transformer_norm\n<|end_body_1|>\n", "revision_id": "c030c009fcf2842d9951ced1296b0d6578cee151", "skeleton": "<|skeleton|>\nclass RelaxedTransformerLoss:\n \"\"\"Relaxed version of transformer loss: assumes that the adversarial examples are of the form Y=S(X) + delta for some S in the transformation class and some small delta perturbation outside the perturbation. In this case, we just compute ||delta|| + c||S|| This saves us from having to do the inner minmization step\"\"\"\n\n def __init__(self, fix_im, regularization_constant=1.0, transformation_loss=partial(utils.summed_lp_norm, lp=2), transform_norm_kwargs=None):\n \"\"\"Takes in a reference fix im and a class of transformations we need to search over to compute forward.\"\"\"\n <|body_0|>\n\n def forward(self, examples, *args, **kwargs):\n \"\"\"Computes the distance between examples and args ARGS: examples : NxCxHxW Variable - 'adversarially' perturbed image from the self.fix_im KWARGS: optimization stuff here\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RelaxedTransformerLoss:\n \"\"\"Relaxed version of transformer loss: assumes that the adversarial examples are of the form Y=S(X) + delta for some S in the transformation class and some small delta perturbation outside the perturbation. In this case, we just compute ||delta|| + c||S|| This saves us from having to do the inner minmization step\"\"\"\n\n def __init__(self, fix_im, regularization_constant=1.0, transformation_loss=partial(utils.summed_lp_norm, lp=2), transform_norm_kwargs=None):\n \"\"\"Takes in a reference fix im and a class of transformations we need to search over to compute forward.\"\"\"\n super(RelaxedTransformerLoss, self).__init__(fix_im)\n self.regularization_constant = regularization_constant\n self.transformation_loss = transformation_loss\n self.transform_norm_kwargs = transform_norm_kwargs or {}\n\n def forward(self, examples, *args, **kwargs):\n \"\"\"Computes the distance between examples and args ARGS: examples : NxCxHxW Variable - 'adversarially' perturbed image from the self.fix_im KWARGS: optimization stuff here\"\"\"\n transformer = kwargs['transformer']\n assert isinstance(transformer, st.ParameterizedTransformation)\n transformer_norm = self.regularization_constant * transformer.norm(**self.transform_norm_kwargs)\n delta = self.transformer.forward(self.fix_im) - examples\n transformation_loss = self.transformation_loss(delta)\n return transformation_loss + transformer_norm\n", "source": "the_stack_v2_python_sparse", "source_path": "recoloradv/mister_ed/loss_functions.py", "source_repo": "cassidylaidlaw/ReColorAdv", "split": "val", "star_events_count": 32}
{"blob_id": "85afc79ffaad0a653fdc60cebb86522403db5483", "bodies": ["if isinstance(key, int):\n return ErrorCode(key)\nif key not in ErrorCode._member_map_:\n extend_enum(ErrorCode, key, default)\nreturn ErrorCode[key]", "if not (isinstance(value, int) and 0 <= value <= 4294967295):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\nif 14 <= value <= 4294967295:\n temp = hex(value)[2:].upper().zfill(8)\n extend_enum(cls, 'Unassigned_0x%s' % (temp[:4] + '_' + temp[4:]), value)\n return cls(value)\nreturn super()._missing_(value)"], "bodies_text": "<|body_start_0|>\n if isinstance(key, int):\n return ErrorCode(key)\n if key not in ErrorCode._member_map_:\n extend_enum(ErrorCode, key, default)\n return ErrorCode[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 4294967295):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n if 14 <= value <= 4294967295:\n temp = hex(value)[2:].upper().zfill(8)\n extend_enum(cls, 'Unassigned_0x%s' % (temp[:4] + '_' + temp[4:]), value)\n return cls(value)\n return super()._missing_(value)\n<|end_body_1|>\n", "class_docstring": "[ErrorCode] HTTP/2 Error Code", "class_name": "ErrorCode", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ErrorCode:\n \"\"\"[ErrorCode] HTTP/2 Error Code\"\"\"\n\n def get(key, default=-1):\n \"\"\"Backport support for original codes.\"\"\"\n <|body_0|>\n\n def _missing_(cls, value):\n \"\"\"Lookup function used when value is not found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(key, int):\n return ErrorCode(key)\n if key not in ErrorCode._member_map_:\n extend_enum(ErrorCode, key, default)\n return ErrorCode[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 4294967295):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n if 14 <= value <= 4294967295:\n temp = hex(value)[2:].upper().zfill(8)\n extend_enum(cls, 'Unassigned_0x%s' % (temp[:4] + '_' + temp[4:]), value)\n return cls(value)\n return super()._missing_(value)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000320", "length_bytes": 1709, "license_type": "permissive", "methods": [{"docstring": "Backport support for original codes.", "name": "get", "signature": "def get(key, default=-1)"}, {"docstring": "Lookup function used when value is not found.", "name": "_missing_", "signature": "def _missing_(cls, value)"}], "n_methods": 2, "prompt": "Implement the Python class `ErrorCode` described below.\n\nClass description:\n[ErrorCode] HTTP/2 Error Code\n\nMethod signatures and docstrings:\n- def get(key, default=-1): Backport support for original codes.\n- def _missing_(cls, value): Lookup function used when value is not found.", "prompted_full_text": "Implement the Python class `ErrorCode` described below.\n\nClass description:\n[ErrorCode] HTTP/2 Error Code\n\nMethod signatures and docstrings:\n- def get(key, default=-1): Backport support for original codes.\n- def _missing_(cls, value): Lookup function used when value is not found.\n\n<|skeleton|>\nclass ErrorCode:\n \"\"\"[ErrorCode] HTTP/2 Error Code\"\"\"\n\n def get(key, default=-1):\n \"\"\"Backport support for original codes.\"\"\"\n <|body_0|>\n\n def _missing_(cls, value):\n \"\"\"Lookup function used when value is not found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(key, int):\n return ErrorCode(key)\n if key not in ErrorCode._member_map_:\n extend_enum(ErrorCode, key, default)\n return ErrorCode[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if not (isinstance(value, int) and 0 <= value <= 4294967295):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n if 14 <= value <= 4294967295:\n temp = hex(value)[2:].upper().zfill(8)\n extend_enum(cls, 'Unassigned_0x%s' % (temp[:4] + '_' + temp[4:]), value)\n return cls(value)\n return super()._missing_(value)\n<|end_body_1|>\n", "revision_id": "90cd07d67df28d5c5ab0585bc60f467a78d9db33", "skeleton": "<|skeleton|>\nclass ErrorCode:\n \"\"\"[ErrorCode] HTTP/2 Error Code\"\"\"\n\n def get(key, default=-1):\n \"\"\"Backport support for original codes.\"\"\"\n <|body_0|>\n\n def _missing_(cls, value):\n \"\"\"Lookup function used when value is not found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ErrorCode:\n \"\"\"[ErrorCode] HTTP/2 Error Code\"\"\"\n\n def get(key, default=-1):\n \"\"\"Backport support for original codes.\"\"\"\n if isinstance(key, int):\n return ErrorCode(key)\n if key not in ErrorCode._member_map_:\n extend_enum(ErrorCode, key, default)\n return ErrorCode[key]\n\n def _missing_(cls, value):\n \"\"\"Lookup function used when value is not found.\"\"\"\n if not (isinstance(value, int) and 0 <= value <= 4294967295):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n if 14 <= value <= 4294967295:\n temp = hex(value)[2:].upper().zfill(8)\n extend_enum(cls, 'Unassigned_0x%s' % (temp[:4] + '_' + temp[4:]), value)\n return cls(value)\n return super()._missing_(value)\n", "source": "the_stack_v2_python_sparse", "source_path": "pcapkit/const/http/error_code.py", "source_repo": "stjordanis/PyPCAPKit", "split": "val", "star_events_count": 0}
{"blob_id": "e557217b93cdd29f210d226db1238abb6afcb0a0", "bodies": ["super().__init__()\nif nTh < 2:\n print('nTh must be an integer >= 2')\n exit(1)\nself.d = d\nself.m = m\nself.nTh = nTh\nself.layers = nn.ModuleList([])\nself.layers.append(nn.Linear(d + 1, m, bias=True))\nself.layers.append(nn.Linear(m, m, bias=True))\nfor i in range(nTh - 2):\n self.layers.append(copy.deepcopy(self.layers[1]))\nself.act = antiderivTanh\nself.h = 1.0 / (self.nTh - 1)", "x = self.act(self.layers[0].forward(x))\nfor i in range(1, self.nTh):\n x = x + self.h * self.act(self.layers[i](x))\nreturn x"], "bodies_text": "<|body_start_0|>\n super().__init__()\n if nTh < 2:\n print('nTh must be an integer >= 2')\n exit(1)\n self.d = d\n self.m = m\n self.nTh = nTh\n self.layers = nn.ModuleList([])\n self.layers.append(nn.Linear(d + 1, m, bias=True))\n self.layers.append(nn.Linear(m, m, bias=True))\n for i in range(nTh - 2):\n self.layers.append(copy.deepcopy(self.layers[1]))\n self.act = antiderivTanh\n self.h = 1.0 / (self.nTh - 1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.act(self.layers[0].forward(x))\n for i in range(1, self.nTh):\n x = x + self.h * self.act(self.layers[i](x))\n return x\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ResNN", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ResNN:\n\n def __init__(self, d, m, nTh=2):\n \"\"\"ResNet N portion of Phi :param d: int, dimension of space input (expect inputs to be d+1 for space-time) :param m: int, hidden dimension :param nTh: int, number of resNet layers , (number of theta layers)\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"N(s;theta). the forward propogation of the ResNet :param x: tensor nex-by-d+1, inputs :return: tensor nex-by-m, outputs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if nTh < 2:\n print('nTh must be an integer >= 2')\n exit(1)\n self.d = d\n self.m = m\n self.nTh = nTh\n self.layers = nn.ModuleList([])\n self.layers.append(nn.Linear(d + 1, m, bias=True))\n self.layers.append(nn.Linear(m, m, bias=True))\n for i in range(nTh - 2):\n self.layers.append(copy.deepcopy(self.layers[1]))\n self.act = antiderivTanh\n self.h = 1.0 / (self.nTh - 1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.act(self.layers[0].forward(x))\n for i in range(1, self.nTh):\n x = x + self.h * self.act(self.layers[i](x))\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000321", "length_bytes": 5671, "license_type": "permissive", "methods": [{"docstring": "ResNet N portion of Phi :param d: int, dimension of space input (expect inputs to be d+1 for space-time) :param m: int, hidden dimension :param nTh: int, number of resNet layers , (number of theta layers)", "name": "__init__", "signature": "def __init__(self, d, m, nTh=2)"}, {"docstring": "N(s;theta). the forward propogation of the ResNet :param x: tensor nex-by-d+1, inputs :return: tensor nex-by-m, outputs", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001330", "prompt": "Implement the Python class `ResNN` described below.\n\nClass description:\nImplement the ResNN class.\n\nMethod signatures and docstrings:\n- def __init__(self, d, m, nTh=2): ResNet N portion of Phi :param d: int, dimension of space input (expect inputs to be d+1 for space-time) :param m: int, hidden dimension :param nTh: int, number of resNet layers , (number of theta layers)\n- def forward(self, x): N(s;theta). the forward propogation of the ResNet :param x: tensor nex-by-d+1, inputs :return: tensor nex-by-m, outputs", "prompted_full_text": "Implement the Python class `ResNN` described below.\n\nClass description:\nImplement the ResNN class.\n\nMethod signatures and docstrings:\n- def __init__(self, d, m, nTh=2): ResNet N portion of Phi :param d: int, dimension of space input (expect inputs to be d+1 for space-time) :param m: int, hidden dimension :param nTh: int, number of resNet layers , (number of theta layers)\n- def forward(self, x): N(s;theta). the forward propogation of the ResNet :param x: tensor nex-by-d+1, inputs :return: tensor nex-by-m, outputs\n\n<|skeleton|>\nclass ResNN:\n\n def __init__(self, d, m, nTh=2):\n \"\"\"ResNet N portion of Phi :param d: int, dimension of space input (expect inputs to be d+1 for space-time) :param m: int, hidden dimension :param nTh: int, number of resNet layers , (number of theta layers)\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"N(s;theta). the forward propogation of the ResNet :param x: tensor nex-by-d+1, inputs :return: tensor nex-by-m, outputs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if nTh < 2:\n print('nTh must be an integer >= 2')\n exit(1)\n self.d = d\n self.m = m\n self.nTh = nTh\n self.layers = nn.ModuleList([])\n self.layers.append(nn.Linear(d + 1, m, bias=True))\n self.layers.append(nn.Linear(m, m, bias=True))\n for i in range(nTh - 2):\n self.layers.append(copy.deepcopy(self.layers[1]))\n self.act = antiderivTanh\n self.h = 1.0 / (self.nTh - 1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.act(self.layers[0].forward(x))\n for i in range(1, self.nTh):\n x = x + self.h * self.act(self.layers[i](x))\n return x\n<|end_body_1|>\n", "revision_id": "25465cbfec7abae9c029154fdd7de48b4facbc61", "skeleton": "<|skeleton|>\nclass ResNN:\n\n def __init__(self, d, m, nTh=2):\n \"\"\"ResNet N portion of Phi :param d: int, dimension of space input (expect inputs to be d+1 for space-time) :param m: int, hidden dimension :param nTh: int, number of resNet layers , (number of theta layers)\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"N(s;theta). the forward propogation of the ResNet :param x: tensor nex-by-d+1, inputs :return: tensor nex-by-m, outputs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ResNN:\n def __init__(self, d, m, nTh=2):\n \"\"\"ResNet N portion of Phi :param d: int, dimension of space input (expect inputs to be d+1 for space-time) :param m: int, hidden dimension :param nTh: int, number of resNet layers , (number of theta layers)\"\"\"\n super().__init__()\n if nTh < 2:\n print('nTh must be an integer >= 2')\n exit(1)\n self.d = d\n self.m = m\n self.nTh = nTh\n self.layers = nn.ModuleList([])\n self.layers.append(nn.Linear(d + 1, m, bias=True))\n self.layers.append(nn.Linear(m, m, bias=True))\n for i in range(nTh - 2):\n self.layers.append(copy.deepcopy(self.layers[1]))\n self.act = antiderivTanh\n self.h = 1.0 / (self.nTh - 1)\n\n def forward(self, x):\n \"\"\"N(s;theta). the forward propogation of the ResNet :param x: tensor nex-by-d+1, inputs :return: tensor nex-by-m, outputs\"\"\"\n x = self.act(self.layers[0].forward(x))\n for i in range(1, self.nTh):\n x = x + self.h * self.act(self.layers[i](x))\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "src/Phi.py", "source_repo": "Zhangyj98/NeuralOC", "split": "val", "star_events_count": 0}
{"blob_id": "dbc03d641b62560eafe9eee9cef1639629c6e76e", "bodies": ["data = {}\nfor k in cls.serialization_fields:\n data[k] = pickle.loads((path / k).read_bytes())\nreturn cls(**data)", "path.mkdir(exist_ok=True)\nfor k in self.serialization_fields:\n data = getattr(self, k)\n with (path / k).open('wb') as f:\n pickle.dump(data, f)"], "bodies_text": "<|body_start_0|>\n data = {}\n for k in cls.serialization_fields:\n data[k] = pickle.loads((path / k).read_bytes())\n return cls(**data)\n<|end_body_0|>\n\n<|body_start_1|>\n path.mkdir(exist_ok=True)\n for k in self.serialization_fields:\n data = getattr(self, k)\n with (path / k).open('wb') as f:\n pickle.dump(data, f)\n<|end_body_1|>\n", "class_docstring": "SerDe Mixin for arbitrary class. Any class can implement `SerDe` if their fields to be saved can be dumped as pickle. Attributes: serialization_fields: ClassVar for indicating which fields to be saved. Examples: >>> class Foo(SerializationMixin): serialization_fields = [\"foo\"] def __init__(self): self.foo = 'foo' # this field will be saved. self.bar = 'bar' # but this is not because not listed in `serialization_fields`. >>> obj = Foo() >>> camphr.serde.to_disk(obj, \"path/to/save\")", "class_name": "SerializationMixin", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SerializationMixin:\n \"\"\"SerDe Mixin for arbitrary class. Any class can implement `SerDe` if their fields to be saved can be dumped as pickle. Attributes: serialization_fields: ClassVar for indicating which fields to be saved. Examples: >>> class Foo(SerializationMixin): serialization_fields = [\"foo\"] def __init__(self): self.foo = 'foo' # this field will be saved. self.bar = 'bar' # but this is not because not listed in `serialization_fields`. >>> obj = Foo() >>> camphr.serde.to_disk(obj, \"path/to/save\")\"\"\"\n\n def from_disk(cls: Type[T_Ser], path: Path) -> T_Ser:\n \"\"\"Save fields in `serialization_fields` as pickle.\"\"\"\n <|body_0|>\n\n def to_disk(self, path: Path):\n \"\"\"Load fields from pickle file, then create the instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = {}\n for k in cls.serialization_fields:\n data[k] = pickle.loads((path / k).read_bytes())\n return cls(**data)\n<|end_body_0|>\n\n<|body_start_1|>\n path.mkdir(exist_ok=True)\n for k in self.serialization_fields:\n data = getattr(self, k)\n with (path / k).open('wb') as f:\n pickle.dump(data, f)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000322", "length_bytes": 4927, "license_type": "permissive", "methods": [{"docstring": "Save fields in `serialization_fields` as pickle.", "name": "from_disk", "signature": "def from_disk(cls: Type[T_Ser], path: Path) -> T_Ser"}, {"docstring": "Load fields from pickle file, then create the instance.", "name": "to_disk", "signature": "def to_disk(self, path: Path)"}], "n_methods": 2, "prompt": "Implement the Python class `SerializationMixin` described below.\n\nClass description:\nSerDe Mixin for arbitrary class. Any class can implement `SerDe` if their fields to be saved can be dumped as pickle. Attributes: serialization_fields: ClassVar for indicating which fields to be saved. Examples: >>> class Foo(SerializationMixin): serialization_fields = [\"foo\"] def __init__(self): self.foo = 'foo' # this field will be saved. self.bar = 'bar' # but this is not because not listed in `serialization_fields`. >>> obj = Foo() >>> camphr.serde.to_disk(obj, \"path/to/save\")\n\nMethod signatures and docstrings:\n- def from_disk(cls: Type[T_Ser], path: Path) -> T_Ser: Save fields in `serialization_fields` as pickle.\n- def to_disk(self, path: Path): Load fields from pickle file, then create the instance.", "prompted_full_text": "Implement the Python class `SerializationMixin` described below.\n\nClass description:\nSerDe Mixin for arbitrary class. Any class can implement `SerDe` if their fields to be saved can be dumped as pickle. Attributes: serialization_fields: ClassVar for indicating which fields to be saved. Examples: >>> class Foo(SerializationMixin): serialization_fields = [\"foo\"] def __init__(self): self.foo = 'foo' # this field will be saved. self.bar = 'bar' # but this is not because not listed in `serialization_fields`. >>> obj = Foo() >>> camphr.serde.to_disk(obj, \"path/to/save\")\n\nMethod signatures and docstrings:\n- def from_disk(cls: Type[T_Ser], path: Path) -> T_Ser: Save fields in `serialization_fields` as pickle.\n- def to_disk(self, path: Path): Load fields from pickle file, then create the instance.\n\n<|skeleton|>\nclass SerializationMixin:\n \"\"\"SerDe Mixin for arbitrary class. Any class can implement `SerDe` if their fields to be saved can be dumped as pickle. Attributes: serialization_fields: ClassVar for indicating which fields to be saved. Examples: >>> class Foo(SerializationMixin): serialization_fields = [\"foo\"] def __init__(self): self.foo = 'foo' # this field will be saved. self.bar = 'bar' # but this is not because not listed in `serialization_fields`. >>> obj = Foo() >>> camphr.serde.to_disk(obj, \"path/to/save\")\"\"\"\n\n def from_disk(cls: Type[T_Ser], path: Path) -> T_Ser:\n \"\"\"Save fields in `serialization_fields` as pickle.\"\"\"\n <|body_0|>\n\n def to_disk(self, path: Path):\n \"\"\"Load fields from pickle file, then create the instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = {}\n for k in cls.serialization_fields:\n data[k] = pickle.loads((path / k).read_bytes())\n return cls(**data)\n<|end_body_0|>\n\n<|body_start_1|>\n path.mkdir(exist_ok=True)\n for k in self.serialization_fields:\n data = getattr(self, k)\n with (path / k).open('wb') as f:\n pickle.dump(data, f)\n<|end_body_1|>\n", "revision_id": "d464d079e71fed0d53f78d45a42c1fdf6637c10a", "skeleton": "<|skeleton|>\nclass SerializationMixin:\n \"\"\"SerDe Mixin for arbitrary class. Any class can implement `SerDe` if their fields to be saved can be dumped as pickle. Attributes: serialization_fields: ClassVar for indicating which fields to be saved. Examples: >>> class Foo(SerializationMixin): serialization_fields = [\"foo\"] def __init__(self): self.foo = 'foo' # this field will be saved. self.bar = 'bar' # but this is not because not listed in `serialization_fields`. >>> obj = Foo() >>> camphr.serde.to_disk(obj, \"path/to/save\")\"\"\"\n\n def from_disk(cls: Type[T_Ser], path: Path) -> T_Ser:\n \"\"\"Save fields in `serialization_fields` as pickle.\"\"\"\n <|body_0|>\n\n def to_disk(self, path: Path):\n \"\"\"Load fields from pickle file, then create the instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SerializationMixin:\n \"\"\"SerDe Mixin for arbitrary class. Any class can implement `SerDe` if their fields to be saved can be dumped as pickle. Attributes: serialization_fields: ClassVar for indicating which fields to be saved. Examples: >>> class Foo(SerializationMixin): serialization_fields = [\"foo\"] def __init__(self): self.foo = 'foo' # this field will be saved. self.bar = 'bar' # but this is not because not listed in `serialization_fields`. >>> obj = Foo() >>> camphr.serde.to_disk(obj, \"path/to/save\")\"\"\"\n\n def from_disk(cls: Type[T_Ser], path: Path) -> T_Ser:\n \"\"\"Save fields in `serialization_fields` as pickle.\"\"\"\n data = {}\n for k in cls.serialization_fields:\n data[k] = pickle.loads((path / k).read_bytes())\n return cls(**data)\n\n def to_disk(self, path: Path):\n \"\"\"Load fields from pickle file, then create the instance.\"\"\"\n path.mkdir(exist_ok=True)\n for k in self.serialization_fields:\n data = getattr(self, k)\n with (path / k).open('wb') as f:\n pickle.dump(data, f)\n", "source": "the_stack_v2_python_sparse", "source_path": "packages/camphr/camphr/serde.py", "source_repo": "PKSHATechnology-Research/camphr", "split": "val", "star_events_count": 373}
{"blob_id": "b241bdb66c696cf457b36d9911d6564df2576cde", "bodies": ["cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\nfeature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)\nlk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))\ncolor = np.random.randint(0, 255, (100, 3))\nret, old_frame = cap.read()\nold_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)\np0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\nmask = np.zeros_like(old_frame)\nwhile 1:\n ret, frame = cap.read()\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv.add(frame, mask)\n cv.imshow('frame', img)\n k = cv.waitKey(30) & 255\n if k == 27:\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\ncv.destroyAllWindows()\ncap.release()", "cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\nret, frame1 = cap.read()\nprvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)\nhsv = np.zeros_like(frame1)\nhsv[..., 1] = 255\nwhile True:\n ret, frame2 = cap.read()\n next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\n flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)\n rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n cv.imshow('frame2', rgb)\n cv.waitKey(30)\n prvs = next\ncap.release()\ncv.destroyAllWindows()"], "bodies_text": "<|body_start_0|>\n cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n ret, old_frame = cap.read()\n old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)\n p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while 1:\n ret, frame = cap.read()\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv.add(frame, mask)\n cv.imshow('frame', img)\n k = cv.waitKey(30) & 255\n if k == 27:\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n cv.destroyAllWindows()\n cap.release()\n<|end_body_0|>\n\n<|body_start_1|>\n cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\n ret, frame1 = cap.read()\n prvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n ret, frame2 = cap.read()\n next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\n flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)\n rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n cv.imshow('frame2', rgb)\n cv.waitKey(30)\n prvs = next\n cap.release()\n cv.destroyAllWindows()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Example", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Example:\n\n def lucas_kanade_tracker(self):\n \"\"\"基于光流法的特征点跟踪\"\"\"\n <|body_0|>\n\n def optical_flow(cls):\n \"\"\"用光流找出动的地方\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n ret, old_frame = cap.read()\n old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)\n p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while 1:\n ret, frame = cap.read()\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv.add(frame, mask)\n cv.imshow('frame', img)\n k = cv.waitKey(30) & 255\n if k == 27:\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n cv.destroyAllWindows()\n cap.release()\n<|end_body_0|>\n\n<|body_start_1|>\n cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\n ret, frame1 = cap.read()\n prvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n ret, frame2 = cap.read()\n next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\n flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)\n rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n cv.imshow('frame2', rgb)\n cv.waitKey(30)\n prvs = next\n cap.release()\n cv.destroyAllWindows()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000323", "length_bytes": 4094, "license_type": "no_license", "methods": [{"docstring": "基于光流法的特征点跟踪", "name": "lucas_kanade_tracker", "signature": "def lucas_kanade_tracker(self)"}, {"docstring": "用光流找出动的地方", "name": "optical_flow", "signature": "def optical_flow(cls)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036211", "prompt": "Implement the Python class `Example` described below.\n\nClass description:\nImplement the Example class.\n\nMethod signatures and docstrings:\n- def lucas_kanade_tracker(self): 基于光流法的特征点跟踪\n- def optical_flow(cls): 用光流找出动的地方", "prompted_full_text": "Implement the Python class `Example` described below.\n\nClass description:\nImplement the Example class.\n\nMethod signatures and docstrings:\n- def lucas_kanade_tracker(self): 基于光流法的特征点跟踪\n- def optical_flow(cls): 用光流找出动的地方\n\n<|skeleton|>\nclass Example:\n\n def lucas_kanade_tracker(self):\n \"\"\"基于光流法的特征点跟踪\"\"\"\n <|body_0|>\n\n def optical_flow(cls):\n \"\"\"用光流找出动的地方\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n ret, old_frame = cap.read()\n old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)\n p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while 1:\n ret, frame = cap.read()\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv.add(frame, mask)\n cv.imshow('frame', img)\n k = cv.waitKey(30) & 255\n if k == 27:\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n cv.destroyAllWindows()\n cap.release()\n<|end_body_0|>\n\n<|body_start_1|>\n cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\n ret, frame1 = cap.read()\n prvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n ret, frame2 = cap.read()\n next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\n flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)\n rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n cv.imshow('frame2', rgb)\n cv.waitKey(30)\n prvs = next\n cap.release()\n cv.destroyAllWindows()\n<|end_body_1|>\n", "revision_id": "890fa5e92c69b1bd067ca8fbac6a281c9770bb9f", "skeleton": "<|skeleton|>\nclass Example:\n\n def lucas_kanade_tracker(self):\n \"\"\"基于光流法的特征点跟踪\"\"\"\n <|body_0|>\n\n def optical_flow(cls):\n \"\"\"用光流找出动的地方\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Example:\n def lucas_kanade_tracker(self):\n \"\"\"基于光流法的特征点跟踪\"\"\"\n cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n ret, old_frame = cap.read()\n old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)\n p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while 1:\n ret, frame = cap.read()\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv.add(frame, mask)\n cv.imshow('frame', img)\n k = cv.waitKey(30) & 255\n if k == 27:\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n cv.destroyAllWindows()\n cap.release()\n\n def optical_flow(cls):\n \"\"\"用光流找出动的地方\"\"\"\n cap = cv.VideoCapture('/Users/liuweijie/Desktop/output.mp4')\n ret, frame1 = cap.read()\n prvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n ret, frame2 = cap.read()\n next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\n flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)\n rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n cv.imshow('frame2', rgb)\n cv.waitKey(30)\n prvs = next\n cap.release()\n cv.destroyAllWindows()\n", "source": "the_stack_v2_python_sparse", "source_path": "python/Part.VI.VideoAnalysis/40.opticalflow.py", "source_repo": "chuckpu/ComputerVersion", "split": "val", "star_events_count": 0}
{"blob_id": "c91dd992709d88651fec3722fb89e5d336d91db0", "bodies": ["ans = []\nself.dfs_search(ans, '', num, target, 0, 0, 0)\nreturn ans", "if pos == len(num):\n if value == target:\n ans.append(path)\n return\nfor i in range(pos + 1, len(num) + 1):\n cur_str, cur_n = (num[pos:i], int(num[pos:i]))\n if i > pos + 1 and num[pos] == '0':\n break\n if pos == 0:\n self.dfs_search(ans, path + cur_str, num, target, i, cur_n, cur_n)\n else:\n self.dfs_search(ans, path + '+' + cur_str, num, target, i, cur_n, value + cur_n)\n self.dfs_search(ans, path + '-' + cur_str, num, target, i, -cur_n, value - cur_n)\n self.dfs_search(ans, path + '*' + cur_str, num, target, i, pre_num * cur_n, value - pre_num + pre_num * cur_n)"], "bodies_text": "<|body_start_0|>\n ans = []\n self.dfs_search(ans, '', num, target, 0, 0, 0)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if pos == len(num):\n if value == target:\n ans.append(path)\n return\n for i in range(pos + 1, len(num) + 1):\n cur_str, cur_n = (num[pos:i], int(num[pos:i]))\n if i > pos + 1 and num[pos] == '0':\n break\n if pos == 0:\n self.dfs_search(ans, path + cur_str, num, target, i, cur_n, cur_n)\n else:\n self.dfs_search(ans, path + '+' + cur_str, num, target, i, cur_n, value + cur_n)\n self.dfs_search(ans, path + '-' + cur_str, num, target, i, -cur_n, value - cur_n)\n self.dfs_search(ans, path + '*' + cur_str, num, target, i, pre_num * cur_n, value - pre_num + pre_num * cur_n)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def addOperators(self, num, target):\n \"\"\"Once you can understand the solution space tree, you just get it. Refer to: https://discuss.leetcode.com/topic/24523/java-standard-backtrace-ac-solutoin-short-and-clear\"\"\"\n <|body_0|>\n\n def dfs_search(self, ans, path, num, target, pos, pre_num, value):\n \"\"\"Put binary operator in pos, and then calculate the new value. @pre_num: when process *, we need to know the previous number.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = []\n self.dfs_search(ans, '', num, target, 0, 0, 0)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if pos == len(num):\n if value == target:\n ans.append(path)\n return\n for i in range(pos + 1, len(num) + 1):\n cur_str, cur_n = (num[pos:i], int(num[pos:i]))\n if i > pos + 1 and num[pos] == '0':\n break\n if pos == 0:\n self.dfs_search(ans, path + cur_str, num, target, i, cur_n, cur_n)\n else:\n self.dfs_search(ans, path + '+' + cur_str, num, target, i, cur_n, value + cur_n)\n self.dfs_search(ans, path + '-' + cur_str, num, target, i, -cur_n, value - cur_n)\n self.dfs_search(ans, path + '*' + cur_str, num, target, i, pre_num * cur_n, value - pre_num + pre_num * cur_n)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000324", "length_bytes": 1759, "license_type": "no_license", "methods": [{"docstring": "Once you can understand the solution space tree, you just get it. Refer to: https://discuss.leetcode.com/topic/24523/java-standard-backtrace-ac-solutoin-short-and-clear", "name": "addOperators", "signature": "def addOperators(self, num, target)"}, {"docstring": "Put binary operator in pos, and then calculate the new value. @pre_num: when process *, we need to know the previous number.", "name": "dfs_search", "signature": "def dfs_search(self, ans, path, num, target, pos, pre_num, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036655", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def addOperators(self, num, target): Once you can understand the solution space tree, you just get it. Refer to: https://discuss.leetcode.com/topic/24523/java-standard-backtrace-ac-solutoin-short-and-clear\n- def dfs_search(self, ans, path, num, target, pos, pre_num, value): Put binary operator in pos, and then calculate the new value. @pre_num: when process *, we need to know the previous number.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def addOperators(self, num, target): Once you can understand the solution space tree, you just get it. Refer to: https://discuss.leetcode.com/topic/24523/java-standard-backtrace-ac-solutoin-short-and-clear\n- def dfs_search(self, ans, path, num, target, pos, pre_num, value): Put binary operator in pos, and then calculate the new value. @pre_num: when process *, we need to know the previous number.\n\n<|skeleton|>\nclass Solution:\n\n def addOperators(self, num, target):\n \"\"\"Once you can understand the solution space tree, you just get it. Refer to: https://discuss.leetcode.com/topic/24523/java-standard-backtrace-ac-solutoin-short-and-clear\"\"\"\n <|body_0|>\n\n def dfs_search(self, ans, path, num, target, pos, pre_num, value):\n \"\"\"Put binary operator in pos, and then calculate the new value. @pre_num: when process *, we need to know the previous number.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = []\n self.dfs_search(ans, '', num, target, 0, 0, 0)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if pos == len(num):\n if value == target:\n ans.append(path)\n return\n for i in range(pos + 1, len(num) + 1):\n cur_str, cur_n = (num[pos:i], int(num[pos:i]))\n if i > pos + 1 and num[pos] == '0':\n break\n if pos == 0:\n self.dfs_search(ans, path + cur_str, num, target, i, cur_n, cur_n)\n else:\n self.dfs_search(ans, path + '+' + cur_str, num, target, i, cur_n, value + cur_n)\n self.dfs_search(ans, path + '-' + cur_str, num, target, i, -cur_n, value - cur_n)\n self.dfs_search(ans, path + '*' + cur_str, num, target, i, pre_num * cur_n, value - pre_num + pre_num * cur_n)\n<|end_body_1|>\n", "revision_id": "be331826b490b73f0a176e6abed86ef68ff2dd2b", "skeleton": "<|skeleton|>\nclass Solution:\n\n def addOperators(self, num, target):\n \"\"\"Once you can understand the solution space tree, you just get it. Refer to: https://discuss.leetcode.com/topic/24523/java-standard-backtrace-ac-solutoin-short-and-clear\"\"\"\n <|body_0|>\n\n def dfs_search(self, ans, path, num, target, pos, pre_num, value):\n \"\"\"Put binary operator in pos, and then calculate the new value. @pre_num: when process *, we need to know the previous number.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def addOperators(self, num, target):\n \"\"\"Once you can understand the solution space tree, you just get it. Refer to: https://discuss.leetcode.com/topic/24523/java-standard-backtrace-ac-solutoin-short-and-clear\"\"\"\n ans = []\n self.dfs_search(ans, '', num, target, 0, 0, 0)\n return ans\n\n def dfs_search(self, ans, path, num, target, pos, pre_num, value):\n \"\"\"Put binary operator in pos, and then calculate the new value. @pre_num: when process *, we need to know the previous number.\"\"\"\n if pos == len(num):\n if value == target:\n ans.append(path)\n return\n for i in range(pos + 1, len(num) + 1):\n cur_str, cur_n = (num[pos:i], int(num[pos:i]))\n if i > pos + 1 and num[pos] == '0':\n break\n if pos == 0:\n self.dfs_search(ans, path + cur_str, num, target, i, cur_n, cur_n)\n else:\n self.dfs_search(ans, path + '+' + cur_str, num, target, i, cur_n, value + cur_n)\n self.dfs_search(ans, path + '-' + cur_str, num, target, i, -cur_n, value - cur_n)\n self.dfs_search(ans, path + '*' + cur_str, num, target, i, pre_num * cur_n, value - pre_num + pre_num * cur_n)\n", "source": "the_stack_v2_python_sparse", "source_path": "125_algorithms/_examples/_algorithms_challenges/leetcode/leetCode/DepthFirstSearch/282_ExpressionAddOperators.py", "source_repo": "syurskyi/Python_Topics", "split": "val", "star_events_count": 3}
{"blob_id": "fec24415a50c7052efc5f9f29001f500f27bd724", "bodies": ["super().__init__(model)\nself.base_graph = base_graph\nself._convert_graph()", "if node.is_function():\n val = node.function()\nelif node.is_input():\n val = 'input_placeholder'\nelif node.is_output():\n val = 'output_placeholder'\nelif node.is_method():\n val = node.method()\nelif node.is_get_attr():\n val = 'get_attr'\nelif node.is_module():\n val = node.module()\nelse:\n raise NotImplementedError(f'{node} is unsupported')\nnew_node = ModuleNode(node.name, val)\nreturn new_node", "base_graph = self.base_graph\nmodule_graph = ModuleGraph.copy_from(base_graph, self._node_converter)\nself.graph = module_graph"], "bodies_text": "<|body_start_0|>\n super().__init__(model)\n self.base_graph = base_graph\n self._convert_graph()\n<|end_body_0|>\n\n<|body_start_1|>\n if node.is_function():\n val = node.function()\n elif node.is_input():\n val = 'input_placeholder'\n elif node.is_output():\n val = 'output_placeholder'\n elif node.is_method():\n val = node.method()\n elif node.is_get_attr():\n val = 'get_attr'\n elif node.is_module():\n val = node.module()\n else:\n raise NotImplementedError(f'{node} is unsupported')\n new_node = ModuleNode(node.name, val)\n return new_node\n<|end_body_1|>\n\n<|body_start_2|>\n base_graph = self.base_graph\n module_graph = ModuleGraph.copy_from(base_graph, self._node_converter)\n self.graph = module_graph\n<|end_body_2|>\n", "class_docstring": "Use fx tracer to parse model, and generate module-graph.", "class_name": "FxTracerToGraphConverter", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FxTracerToGraphConverter:\n \"\"\"Use fx tracer to parse model, and generate module-graph.\"\"\"\n\n def __init__(self, base_graph, model=None) -> None:\n \"\"\"Args: model (Module): the model which will be parsed is_extra_leaf_module (Callable): a function used to determine, if a module is a leaf module except torch pre-defined modules\"\"\"\n <|body_0|>\n\n def _node_converter(self, node: FxBaseNode):\n \"\"\"Convert a fxnode to a module-node.\"\"\"\n <|body_1|>\n\n def _convert_graph(self):\n \"\"\"Convert a torch-graph to a module-graph.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(model)\n self.base_graph = base_graph\n self._convert_graph()\n<|end_body_0|>\n\n<|body_start_1|>\n if node.is_function():\n val = node.function()\n elif node.is_input():\n val = 'input_placeholder'\n elif node.is_output():\n val = 'output_placeholder'\n elif node.is_method():\n val = node.method()\n elif node.is_get_attr():\n val = 'get_attr'\n elif node.is_module():\n val = node.module()\n else:\n raise NotImplementedError(f'{node} is unsupported')\n new_node = ModuleNode(node.name, val)\n return new_node\n<|end_body_1|>\n\n<|body_start_2|>\n base_graph = self.base_graph\n module_graph = ModuleGraph.copy_from(base_graph, self._node_converter)\n self.graph = module_graph\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000325", "length_bytes": 17905, "license_type": "permissive", "methods": [{"docstring": "Args: model (Module): the model which will be parsed is_extra_leaf_module (Callable): a function used to determine, if a module is a leaf module except torch pre-defined modules", "name": "__init__", "signature": "def __init__(self, base_graph, model=None) -> None"}, {"docstring": "Convert a fxnode to a module-node.", "name": "_node_converter", "signature": "def _node_converter(self, node: FxBaseNode)"}, {"docstring": "Convert a torch-graph to a module-graph.", "name": "_convert_graph", "signature": "def _convert_graph(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_044608", "prompt": "Implement the Python class `FxTracerToGraphConverter` described below.\n\nClass description:\nUse fx tracer to parse model, and generate module-graph.\n\nMethod signatures and docstrings:\n- def __init__(self, base_graph, model=None) -> None: Args: model (Module): the model which will be parsed is_extra_leaf_module (Callable): a function used to determine, if a module is a leaf module except torch pre-defined modules\n- def _node_converter(self, node: FxBaseNode): Convert a fxnode to a module-node.\n- def _convert_graph(self): Convert a torch-graph to a module-graph.", "prompted_full_text": "Implement the Python class `FxTracerToGraphConverter` described below.\n\nClass description:\nUse fx tracer to parse model, and generate module-graph.\n\nMethod signatures and docstrings:\n- def __init__(self, base_graph, model=None) -> None: Args: model (Module): the model which will be parsed is_extra_leaf_module (Callable): a function used to determine, if a module is a leaf module except torch pre-defined modules\n- def _node_converter(self, node: FxBaseNode): Convert a fxnode to a module-node.\n- def _convert_graph(self): Convert a torch-graph to a module-graph.\n\n<|skeleton|>\nclass FxTracerToGraphConverter:\n \"\"\"Use fx tracer to parse model, and generate module-graph.\"\"\"\n\n def __init__(self, base_graph, model=None) -> None:\n \"\"\"Args: model (Module): the model which will be parsed is_extra_leaf_module (Callable): a function used to determine, if a module is a leaf module except torch pre-defined modules\"\"\"\n <|body_0|>\n\n def _node_converter(self, node: FxBaseNode):\n \"\"\"Convert a fxnode to a module-node.\"\"\"\n <|body_1|>\n\n def _convert_graph(self):\n \"\"\"Convert a torch-graph to a module-graph.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(model)\n self.base_graph = base_graph\n self._convert_graph()\n<|end_body_0|>\n\n<|body_start_1|>\n if node.is_function():\n val = node.function()\n elif node.is_input():\n val = 'input_placeholder'\n elif node.is_output():\n val = 'output_placeholder'\n elif node.is_method():\n val = node.method()\n elif node.is_get_attr():\n val = 'get_attr'\n elif node.is_module():\n val = node.module()\n else:\n raise NotImplementedError(f'{node} is unsupported')\n new_node = ModuleNode(node.name, val)\n return new_node\n<|end_body_1|>\n\n<|body_start_2|>\n base_graph = self.base_graph\n module_graph = ModuleGraph.copy_from(base_graph, self._node_converter)\n self.graph = module_graph\n<|end_body_2|>\n", "revision_id": "9d643e88946fc4a24f2d4d073c08b05ea693f4c5", "skeleton": "<|skeleton|>\nclass FxTracerToGraphConverter:\n \"\"\"Use fx tracer to parse model, and generate module-graph.\"\"\"\n\n def __init__(self, base_graph, model=None) -> None:\n \"\"\"Args: model (Module): the model which will be parsed is_extra_leaf_module (Callable): a function used to determine, if a module is a leaf module except torch pre-defined modules\"\"\"\n <|body_0|>\n\n def _node_converter(self, node: FxBaseNode):\n \"\"\"Convert a fxnode to a module-node.\"\"\"\n <|body_1|>\n\n def _convert_graph(self):\n \"\"\"Convert a torch-graph to a module-graph.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FxTracerToGraphConverter:\n \"\"\"Use fx tracer to parse model, and generate module-graph.\"\"\"\n\n def __init__(self, base_graph, model=None) -> None:\n \"\"\"Args: model (Module): the model which will be parsed is_extra_leaf_module (Callable): a function used to determine, if a module is a leaf module except torch pre-defined modules\"\"\"\n super().__init__(model)\n self.base_graph = base_graph\n self._convert_graph()\n\n def _node_converter(self, node: FxBaseNode):\n \"\"\"Convert a fxnode to a module-node.\"\"\"\n if node.is_function():\n val = node.function()\n elif node.is_input():\n val = 'input_placeholder'\n elif node.is_output():\n val = 'output_placeholder'\n elif node.is_method():\n val = node.method()\n elif node.is_get_attr():\n val = 'get_attr'\n elif node.is_module():\n val = node.module()\n else:\n raise NotImplementedError(f'{node} is unsupported')\n new_node = ModuleNode(node.name, val)\n return new_node\n\n def _convert_graph(self):\n \"\"\"Convert a torch-graph to a module-graph.\"\"\"\n base_graph = self.base_graph\n module_graph = ModuleGraph.copy_from(base_graph, self._node_converter)\n self.graph = module_graph\n", "source": "the_stack_v2_python_sparse", "source_path": "cv/distiller/CWD/pytorch/mmrazor/mmrazor/structures/graph/module_graph.py", "source_repo": "Deep-Spark/DeepSparkHub", "split": "val", "star_events_count": 7}
{"blob_id": "0d1e59ee5c9a339e86b002e5a6ef51efb5b5f1f2", "bodies": ["self.low = low\nself.high = high\nloc_constraint = kwargs.pop('loc_constraint', None)\nif loc_constraint is None:\n loc_constraint = tf.keras.constraints.NonNeg()\nkwargs.update({'loc_constraint': loc_constraint})\nsuper(EmbeddingTruncatedNormalDiag, self).__init__(input_dim, output_dim, **kwargs)", "self.loc = self.add_weight(name='loc', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.loc_initializer, regularizer=self.loc_regularizer, trainable=self.loc_trainable, constraint=self.loc_constraint)\nself.untransformed_scale = self.add_weight(name='untransformed_scale', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.scale_initializer, regularizer=self.scale_regularizer, trainable=self.scale_trainable, constraint=self.scale_constraint)\nscale = tfp.util.DeferredTensor(self.untransformed_scale, lambda x: K.epsilon() + tf.nn.softplus(x))\ndist = psiz.distributions.TruncatedNormal(self.loc, scale, self.low, self.high)\nbatch_ndims = tf.size(dist.batch_shape_tensor())\nreturn tfp.distributions.Independent(dist, reinterpreted_batch_ndims=batch_ndims)", "[inputs_loc, inputs_scale] = super().call(inputs)\ndist_batch = psiz.distributions.TruncatedNormal(inputs_loc, inputs_scale, self.low, self.high)\nreturn dist_batch.sample(self.n_sample)", "config = super().get_config()\nconfig.update({'low': float(self.low), 'high': float(self.high)})\nreturn config"], "bodies_text": "<|body_start_0|>\n self.low = low\n self.high = high\n loc_constraint = kwargs.pop('loc_constraint', None)\n if loc_constraint is None:\n loc_constraint = tf.keras.constraints.NonNeg()\n kwargs.update({'loc_constraint': loc_constraint})\n super(EmbeddingTruncatedNormalDiag, self).__init__(input_dim, output_dim, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n self.loc = self.add_weight(name='loc', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.loc_initializer, regularizer=self.loc_regularizer, trainable=self.loc_trainable, constraint=self.loc_constraint)\n self.untransformed_scale = self.add_weight(name='untransformed_scale', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.scale_initializer, regularizer=self.scale_regularizer, trainable=self.scale_trainable, constraint=self.scale_constraint)\n scale = tfp.util.DeferredTensor(self.untransformed_scale, lambda x: K.epsilon() + tf.nn.softplus(x))\n dist = psiz.distributions.TruncatedNormal(self.loc, scale, self.low, self.high)\n batch_ndims = tf.size(dist.batch_shape_tensor())\n return tfp.distributions.Independent(dist, reinterpreted_batch_ndims=batch_ndims)\n<|end_body_1|>\n\n<|body_start_2|>\n [inputs_loc, inputs_scale] = super().call(inputs)\n dist_batch = psiz.distributions.TruncatedNormal(inputs_loc, inputs_scale, self.low, self.high)\n return dist_batch.sample(self.n_sample)\n<|end_body_2|>\n\n<|body_start_3|>\n config = super().get_config()\n config.update({'low': float(self.low), 'high': float(self.high)})\n return config\n<|end_body_3|>\n", "class_docstring": "A distribution-based embedding. Each embedding point is characterized by a Truncated Normal distribution with a diagonal scale matrix.", "class_name": "EmbeddingTruncatedNormalDiag", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EmbeddingTruncatedNormalDiag:\n \"\"\"A distribution-based embedding. Each embedding point is characterized by a Truncated Normal distribution with a diagonal scale matrix.\"\"\"\n\n def __init__(self, input_dim, output_dim, low=0.0, high=1000000.0, **kwargs):\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def _build_embeddings_distribution(self, dtype):\n \"\"\"Build embeddings distribution.\"\"\"\n <|body_1|>\n\n def call(self, inputs):\n \"\"\"Call.\"\"\"\n <|body_2|>\n\n def get_config(self):\n \"\"\"Return layer configuration.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.low = low\n self.high = high\n loc_constraint = kwargs.pop('loc_constraint', None)\n if loc_constraint is None:\n loc_constraint = tf.keras.constraints.NonNeg()\n kwargs.update({'loc_constraint': loc_constraint})\n super(EmbeddingTruncatedNormalDiag, self).__init__(input_dim, output_dim, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n self.loc = self.add_weight(name='loc', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.loc_initializer, regularizer=self.loc_regularizer, trainable=self.loc_trainable, constraint=self.loc_constraint)\n self.untransformed_scale = self.add_weight(name='untransformed_scale', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.scale_initializer, regularizer=self.scale_regularizer, trainable=self.scale_trainable, constraint=self.scale_constraint)\n scale = tfp.util.DeferredTensor(self.untransformed_scale, lambda x: K.epsilon() + tf.nn.softplus(x))\n dist = psiz.distributions.TruncatedNormal(self.loc, scale, self.low, self.high)\n batch_ndims = tf.size(dist.batch_shape_tensor())\n return tfp.distributions.Independent(dist, reinterpreted_batch_ndims=batch_ndims)\n<|end_body_1|>\n\n<|body_start_2|>\n [inputs_loc, inputs_scale] = super().call(inputs)\n dist_batch = psiz.distributions.TruncatedNormal(inputs_loc, inputs_scale, self.low, self.high)\n return dist_batch.sample(self.n_sample)\n<|end_body_2|>\n\n<|body_start_3|>\n config = super().get_config()\n config.update({'low': float(self.low), 'high': float(self.high)})\n return config\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000326", "length_bytes": 32786, "license_type": "permissive", "methods": [{"docstring": "Initialize.", "name": "__init__", "signature": "def __init__(self, input_dim, output_dim, low=0.0, high=1000000.0, **kwargs)"}, {"docstring": "Build embeddings distribution.", "name": "_build_embeddings_distribution", "signature": "def _build_embeddings_distribution(self, dtype)"}, {"docstring": "Call.", "name": "call", "signature": "def call(self, inputs)"}, {"docstring": "Return layer configuration.", "name": "get_config", "signature": "def get_config(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_033862", "prompt": "Implement the Python class `EmbeddingTruncatedNormalDiag` described below.\n\nClass description:\nA distribution-based embedding. Each embedding point is characterized by a Truncated Normal distribution with a diagonal scale matrix.\n\nMethod signatures and docstrings:\n- def __init__(self, input_dim, output_dim, low=0.0, high=1000000.0, **kwargs): Initialize.\n- def _build_embeddings_distribution(self, dtype): Build embeddings distribution.\n- def call(self, inputs): Call.\n- def get_config(self): Return layer configuration.", "prompted_full_text": "Implement the Python class `EmbeddingTruncatedNormalDiag` described below.\n\nClass description:\nA distribution-based embedding. Each embedding point is characterized by a Truncated Normal distribution with a diagonal scale matrix.\n\nMethod signatures and docstrings:\n- def __init__(self, input_dim, output_dim, low=0.0, high=1000000.0, **kwargs): Initialize.\n- def _build_embeddings_distribution(self, dtype): Build embeddings distribution.\n- def call(self, inputs): Call.\n- def get_config(self): Return layer configuration.\n\n<|skeleton|>\nclass EmbeddingTruncatedNormalDiag:\n \"\"\"A distribution-based embedding. Each embedding point is characterized by a Truncated Normal distribution with a diagonal scale matrix.\"\"\"\n\n def __init__(self, input_dim, output_dim, low=0.0, high=1000000.0, **kwargs):\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def _build_embeddings_distribution(self, dtype):\n \"\"\"Build embeddings distribution.\"\"\"\n <|body_1|>\n\n def call(self, inputs):\n \"\"\"Call.\"\"\"\n <|body_2|>\n\n def get_config(self):\n \"\"\"Return layer configuration.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.low = low\n self.high = high\n loc_constraint = kwargs.pop('loc_constraint', None)\n if loc_constraint is None:\n loc_constraint = tf.keras.constraints.NonNeg()\n kwargs.update({'loc_constraint': loc_constraint})\n super(EmbeddingTruncatedNormalDiag, self).__init__(input_dim, output_dim, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n self.loc = self.add_weight(name='loc', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.loc_initializer, regularizer=self.loc_regularizer, trainable=self.loc_trainable, constraint=self.loc_constraint)\n self.untransformed_scale = self.add_weight(name='untransformed_scale', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.scale_initializer, regularizer=self.scale_regularizer, trainable=self.scale_trainable, constraint=self.scale_constraint)\n scale = tfp.util.DeferredTensor(self.untransformed_scale, lambda x: K.epsilon() + tf.nn.softplus(x))\n dist = psiz.distributions.TruncatedNormal(self.loc, scale, self.low, self.high)\n batch_ndims = tf.size(dist.batch_shape_tensor())\n return tfp.distributions.Independent(dist, reinterpreted_batch_ndims=batch_ndims)\n<|end_body_1|>\n\n<|body_start_2|>\n [inputs_loc, inputs_scale] = super().call(inputs)\n dist_batch = psiz.distributions.TruncatedNormal(inputs_loc, inputs_scale, self.low, self.high)\n return dist_batch.sample(self.n_sample)\n<|end_body_2|>\n\n<|body_start_3|>\n config = super().get_config()\n config.update({'low': float(self.low), 'high': float(self.high)})\n return config\n<|end_body_3|>\n", "revision_id": "4f05348cf43d2d53ff9cc6dee633de385df883e3", "skeleton": "<|skeleton|>\nclass EmbeddingTruncatedNormalDiag:\n \"\"\"A distribution-based embedding. Each embedding point is characterized by a Truncated Normal distribution with a diagonal scale matrix.\"\"\"\n\n def __init__(self, input_dim, output_dim, low=0.0, high=1000000.0, **kwargs):\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def _build_embeddings_distribution(self, dtype):\n \"\"\"Build embeddings distribution.\"\"\"\n <|body_1|>\n\n def call(self, inputs):\n \"\"\"Call.\"\"\"\n <|body_2|>\n\n def get_config(self):\n \"\"\"Return layer configuration.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EmbeddingTruncatedNormalDiag:\n \"\"\"A distribution-based embedding. Each embedding point is characterized by a Truncated Normal distribution with a diagonal scale matrix.\"\"\"\n\n def __init__(self, input_dim, output_dim, low=0.0, high=1000000.0, **kwargs):\n \"\"\"Initialize.\"\"\"\n self.low = low\n self.high = high\n loc_constraint = kwargs.pop('loc_constraint', None)\n if loc_constraint is None:\n loc_constraint = tf.keras.constraints.NonNeg()\n kwargs.update({'loc_constraint': loc_constraint})\n super(EmbeddingTruncatedNormalDiag, self).__init__(input_dim, output_dim, **kwargs)\n\n def _build_embeddings_distribution(self, dtype):\n \"\"\"Build embeddings distribution.\"\"\"\n self.loc = self.add_weight(name='loc', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.loc_initializer, regularizer=self.loc_regularizer, trainable=self.loc_trainable, constraint=self.loc_constraint)\n self.untransformed_scale = self.add_weight(name='untransformed_scale', shape=[self.input_dim, self.output_dim], dtype=dtype, initializer=self.scale_initializer, regularizer=self.scale_regularizer, trainable=self.scale_trainable, constraint=self.scale_constraint)\n scale = tfp.util.DeferredTensor(self.untransformed_scale, lambda x: K.epsilon() + tf.nn.softplus(x))\n dist = psiz.distributions.TruncatedNormal(self.loc, scale, self.low, self.high)\n batch_ndims = tf.size(dist.batch_shape_tensor())\n return tfp.distributions.Independent(dist, reinterpreted_batch_ndims=batch_ndims)\n\n def call(self, inputs):\n \"\"\"Call.\"\"\"\n [inputs_loc, inputs_scale] = super().call(inputs)\n dist_batch = psiz.distributions.TruncatedNormal(inputs_loc, inputs_scale, self.low, self.high)\n return dist_batch.sample(self.n_sample)\n\n def get_config(self):\n \"\"\"Return layer configuration.\"\"\"\n config = super().get_config()\n config.update({'low': float(self.low), 'high': float(self.high)})\n return config\n", "source": "the_stack_v2_python_sparse", "source_path": "psiz/keras/layers/embeddings.py", "source_repo": "asuiconlab/psiz", "split": "val", "star_events_count": 0}
{"blob_id": "3a246305936b631fc4b4a3ad21aa16d5defee301", "bodies": ["ans = collections.defaultdict(list)\nfor s in strs:\n ans[tuple(sorted(s))].append(s)\nreturn ans.values()", "ans = collections.defaultdict(list)\nfor s in strs:\n count = [0] * 26\n for c in s:\n count[ord(c) - ord('a')] += 1\n ans[tuple(count)].append(s)\nreturn ans.values()"], "bodies_text": "<|body_start_0|>\n ans = collections.defaultdict(list)\n for s in strs:\n ans[tuple(sorted(s))].append(s)\n return ans.values()\n<|end_body_0|>\n\n<|body_start_1|>\n ans = collections.defaultdict(list)\n for s in strs:\n count = [0] * 26\n for c in s:\n count[ord(c) - ord('a')] += 1\n ans[tuple(count)].append(s)\n return ans.values()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "OfficialSolution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OfficialSolution:\n\n def group_anagrams(self, strs: List[str]) -> List[List[str]]:\n \"\"\"排序数组分类。\"\"\"\n <|body_0|>\n\n def group_anagrams_2(self, strs: List[str]) -> List[List[str]]:\n \"\"\"按计数分类。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = collections.defaultdict(list)\n for s in strs:\n ans[tuple(sorted(s))].append(s)\n return ans.values()\n<|end_body_0|>\n\n<|body_start_1|>\n ans = collections.defaultdict(list)\n for s in strs:\n count = [0] * 26\n for c in s:\n count[ord(c) - ord('a')] += 1\n ans[tuple(count)].append(s)\n return ans.values()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000327", "length_bytes": 3871, "license_type": "no_license", "methods": [{"docstring": "排序数组分类。", "name": "group_anagrams", "signature": "def group_anagrams(self, strs: List[str]) -> List[List[str]]"}, {"docstring": "按计数分类。", "name": "group_anagrams_2", "signature": "def group_anagrams_2(self, strs: List[str]) -> List[List[str]]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_046507", "prompt": "Implement the Python class `OfficialSolution` described below.\n\nClass description:\nImplement the OfficialSolution class.\n\nMethod signatures and docstrings:\n- def group_anagrams(self, strs: List[str]) -> List[List[str]]: 排序数组分类。\n- def group_anagrams_2(self, strs: List[str]) -> List[List[str]]: 按计数分类。", "prompted_full_text": "Implement the Python class `OfficialSolution` described below.\n\nClass description:\nImplement the OfficialSolution class.\n\nMethod signatures and docstrings:\n- def group_anagrams(self, strs: List[str]) -> List[List[str]]: 排序数组分类。\n- def group_anagrams_2(self, strs: List[str]) -> List[List[str]]: 按计数分类。\n\n<|skeleton|>\nclass OfficialSolution:\n\n def group_anagrams(self, strs: List[str]) -> List[List[str]]:\n \"\"\"排序数组分类。\"\"\"\n <|body_0|>\n\n def group_anagrams_2(self, strs: List[str]) -> List[List[str]]:\n \"\"\"按计数分类。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = collections.defaultdict(list)\n for s in strs:\n ans[tuple(sorted(s))].append(s)\n return ans.values()\n<|end_body_0|>\n\n<|body_start_1|>\n ans = collections.defaultdict(list)\n for s in strs:\n count = [0] * 26\n for c in s:\n count[ord(c) - ord('a')] += 1\n ans[tuple(count)].append(s)\n return ans.values()\n<|end_body_1|>\n", "revision_id": "6932d69353b94ec824dd0ddc86a92453f6673232", "skeleton": "<|skeleton|>\nclass OfficialSolution:\n\n def group_anagrams(self, strs: List[str]) -> List[List[str]]:\n \"\"\"排序数组分类。\"\"\"\n <|body_0|>\n\n def group_anagrams_2(self, strs: List[str]) -> List[List[str]]:\n \"\"\"按计数分类。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OfficialSolution:\n def group_anagrams(self, strs: List[str]) -> List[List[str]]:\n \"\"\"排序数组分类。\"\"\"\n ans = collections.defaultdict(list)\n for s in strs:\n ans[tuple(sorted(s))].append(s)\n return ans.values()\n\n def group_anagrams_2(self, strs: List[str]) -> List[List[str]]:\n \"\"\"按计数分类。\"\"\"\n ans = collections.defaultdict(list)\n for s in strs:\n count = [0] * 26\n for c in s:\n count[ord(c) - ord('a')] += 1\n ans[tuple(count)].append(s)\n return ans.values()\n", "source": "the_stack_v2_python_sparse", "source_path": "0049_group-anagrams.py", "source_repo": "Nigirimeshi/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "e04a74fd21078da61cfc4cd4a72409108cfc4fd6", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "class_docstring": "Missing associated documentation comment in .proto file.", "class_name": "ChatManagerServicer", "detected_licenses": ["Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ChatManagerServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def create_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def get_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def list_chats(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def update_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000328", "length_bytes": 7948, "license_type": "permissive", "methods": [{"docstring": "Missing associated documentation comment in .proto file.", "name": "create_chat", "signature": "def create_chat(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "get_chat", "signature": "def get_chat(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "list_chats", "signature": "def list_chats(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "update_chat", "signature": "def update_chat(self, request, context)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_027802", "prompt": "Implement the Python class `ChatManagerServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def create_chat(self, request, context): Missing associated documentation comment in .proto file.\n- def get_chat(self, request, context): Missing associated documentation comment in .proto file.\n- def list_chats(self, request, context): Missing associated documentation comment in .proto file.\n- def update_chat(self, request, context): Missing associated documentation comment in .proto file.", "prompted_full_text": "Implement the Python class `ChatManagerServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def create_chat(self, request, context): Missing associated documentation comment in .proto file.\n- def get_chat(self, request, context): Missing associated documentation comment in .proto file.\n- def list_chats(self, request, context): Missing associated documentation comment in .proto file.\n- def update_chat(self, request, context): Missing associated documentation comment in .proto file.\n\n<|skeleton|>\nclass ChatManagerServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def create_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def get_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def list_chats(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def update_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "revision_id": "7db858386f1a20e8d49bc16f53bfd7f1e4d03f7e", "skeleton": "<|skeleton|>\nclass ChatManagerServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def create_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def get_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def list_chats(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def update_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ChatManagerServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def create_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def get_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def list_chats(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def update_chat(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "idm/api/proto/chat_manager_service_pb2_grpc.py", "source_repo": "MrHamdu/hyperboria", "split": "val", "star_events_count": 0}
{"blob_id": "ac875b95b27dcee8a8a830585481915e7a206a47", "bodies": ["if not isinstance(condition, PassPredicate):\n raise TypeError(f'Expected PassPredicate, got {type(condition)}.')\nself.condition = condition\nself.workflow = Workflow(loop_body)", "while self.condition(circuit, data):\n _logger.debug('Loop body executing...')\n await self.workflow.run(circuit, data)"], "bodies_text": "<|body_start_0|>\n if not isinstance(condition, PassPredicate):\n raise TypeError(f'Expected PassPredicate, got {type(condition)}.')\n self.condition = condition\n self.workflow = Workflow(loop_body)\n<|end_body_0|>\n\n<|body_start_1|>\n while self.condition(circuit, data):\n _logger.debug('Loop body executing...')\n await self.workflow.run(circuit, data)\n<|end_body_1|>\n", "class_docstring": "The WhileLoopPass class. This is a control pass that conditionally executes a workflow in a loop.", "class_name": "WhileLoopPass", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WhileLoopPass:\n \"\"\"The WhileLoopPass class. This is a control pass that conditionally executes a workflow in a loop.\"\"\"\n\n def __init__(self, condition: PassPredicate, loop_body: WorkflowLike) -> None:\n \"\"\"Construct a WhileLoopPass. Args: condition (PassPredicate): The condition checked. loop_body (WorkflowLike): The pass or passes to execute while `condition` is true.\"\"\"\n <|body_0|>\n\n async def run(self, circuit: Circuit, data: PassData) -> None:\n \"\"\"Perform the pass's operation, see :class:`BasePass` for more.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isinstance(condition, PassPredicate):\n raise TypeError(f'Expected PassPredicate, got {type(condition)}.')\n self.condition = condition\n self.workflow = Workflow(loop_body)\n<|end_body_0|>\n\n<|body_start_1|>\n while self.condition(circuit, data):\n _logger.debug('Loop body executing...')\n await self.workflow.run(circuit, data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000329", "length_bytes": 1516, "license_type": "permissive", "methods": [{"docstring": "Construct a WhileLoopPass. Args: condition (PassPredicate): The condition checked. loop_body (WorkflowLike): The pass or passes to execute while `condition` is true.", "name": "__init__", "signature": "def __init__(self, condition: PassPredicate, loop_body: WorkflowLike) -> None"}, {"docstring": "Perform the pass's operation, see :class:`BasePass` for more.", "name": "run", "signature": "async def run(self, circuit: Circuit, data: PassData) -> None"}], "n_methods": 2, "prompt": "Implement the Python class `WhileLoopPass` described below.\n\nClass description:\nThe WhileLoopPass class. This is a control pass that conditionally executes a workflow in a loop.\n\nMethod signatures and docstrings:\n- def __init__(self, condition: PassPredicate, loop_body: WorkflowLike) -> None: Construct a WhileLoopPass. Args: condition (PassPredicate): The condition checked. loop_body (WorkflowLike): The pass or passes to execute while `condition` is true.\n- async def run(self, circuit: Circuit, data: PassData) -> None: Perform the pass's operation, see :class:`BasePass` for more.", "prompted_full_text": "Implement the Python class `WhileLoopPass` described below.\n\nClass description:\nThe WhileLoopPass class. This is a control pass that conditionally executes a workflow in a loop.\n\nMethod signatures and docstrings:\n- def __init__(self, condition: PassPredicate, loop_body: WorkflowLike) -> None: Construct a WhileLoopPass. Args: condition (PassPredicate): The condition checked. loop_body (WorkflowLike): The pass or passes to execute while `condition` is true.\n- async def run(self, circuit: Circuit, data: PassData) -> None: Perform the pass's operation, see :class:`BasePass` for more.\n\n<|skeleton|>\nclass WhileLoopPass:\n \"\"\"The WhileLoopPass class. This is a control pass that conditionally executes a workflow in a loop.\"\"\"\n\n def __init__(self, condition: PassPredicate, loop_body: WorkflowLike) -> None:\n \"\"\"Construct a WhileLoopPass. Args: condition (PassPredicate): The condition checked. loop_body (WorkflowLike): The pass or passes to execute while `condition` is true.\"\"\"\n <|body_0|>\n\n async def run(self, circuit: Circuit, data: PassData) -> None:\n \"\"\"Perform the pass's operation, see :class:`BasePass` for more.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isinstance(condition, PassPredicate):\n raise TypeError(f'Expected PassPredicate, got {type(condition)}.')\n self.condition = condition\n self.workflow = Workflow(loop_body)\n<|end_body_0|>\n\n<|body_start_1|>\n while self.condition(circuit, data):\n _logger.debug('Loop body executing...')\n await self.workflow.run(circuit, data)\n<|end_body_1|>\n", "revision_id": "c89112d15072e8ffffb68cf1757b184e2aeb3dc8", "skeleton": "<|skeleton|>\nclass WhileLoopPass:\n \"\"\"The WhileLoopPass class. This is a control pass that conditionally executes a workflow in a loop.\"\"\"\n\n def __init__(self, condition: PassPredicate, loop_body: WorkflowLike) -> None:\n \"\"\"Construct a WhileLoopPass. Args: condition (PassPredicate): The condition checked. loop_body (WorkflowLike): The pass or passes to execute while `condition` is true.\"\"\"\n <|body_0|>\n\n async def run(self, circuit: Circuit, data: PassData) -> None:\n \"\"\"Perform the pass's operation, see :class:`BasePass` for more.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WhileLoopPass:\n \"\"\"The WhileLoopPass class. This is a control pass that conditionally executes a workflow in a loop.\"\"\"\n\n def __init__(self, condition: PassPredicate, loop_body: WorkflowLike) -> None:\n \"\"\"Construct a WhileLoopPass. Args: condition (PassPredicate): The condition checked. loop_body (WorkflowLike): The pass or passes to execute while `condition` is true.\"\"\"\n if not isinstance(condition, PassPredicate):\n raise TypeError(f'Expected PassPredicate, got {type(condition)}.')\n self.condition = condition\n self.workflow = Workflow(loop_body)\n\n async def run(self, circuit: Circuit, data: PassData) -> None:\n \"\"\"Perform the pass's operation, see :class:`BasePass` for more.\"\"\"\n while self.condition(circuit, data):\n _logger.debug('Loop body executing...')\n await self.workflow.run(circuit, data)\n", "source": "the_stack_v2_python_sparse", "source_path": "bqskit/passes/control/whileloop.py", "source_repo": "BQSKit/bqskit", "split": "val", "star_events_count": 54}
{"blob_id": "24644bc5f431cba1d0ff807e16cd3a880a99781d", "bodies": ["test = application.orm.get_test(test_id)\ntest_schema = TestsSchema()\nif test is None:\n return fail_response('Test is not found', code=404)\nres = test_schema.dump(test)\nquestions = []\nquestions_schema = QuestionsSchema()\nfor question_id in res.data['questions_tests']:\n obj = application.orm.get_question(question_id)\n questions.append(questions_schema.dump(obj).data)\nres.data.update({'questions': questions})\nreturn jsonify(res.data)", "test_name = args['test'].test_name\ntest_time = args['test'].max_time\nreq_test_id = args['test'].id\nif test_name is None or test_time is None or req_test_id is None:\n return fail_response(msg='Wrong input data', code=400)\ntest_id = application.orm.update_test(test_id=args['test'].id, test_name=args['test'].test_name, max_time=args['test'].max_time)\nif test_id is None:\n return fail_response('Test is not found', code=404)\nfor question in args['questions']:\n if question.id is None:\n application.orm.add_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id))\n else:\n application.orm.update_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id), question_id=question.id)\nreturn generic_response(status='Success', msg='Test changed', code=201)", "success = application.orm.delete_test(test_id)\nif success:\n return generic_response(status='success', code=201, msg='Deleted')\nreturn fail_response(msg=\"Can't delete test\", code=406)"], "bodies_text": "<|body_start_0|>\n test = application.orm.get_test(test_id)\n test_schema = TestsSchema()\n if test is None:\n return fail_response('Test is not found', code=404)\n res = test_schema.dump(test)\n questions = []\n questions_schema = QuestionsSchema()\n for question_id in res.data['questions_tests']:\n obj = application.orm.get_question(question_id)\n questions.append(questions_schema.dump(obj).data)\n res.data.update({'questions': questions})\n return jsonify(res.data)\n<|end_body_0|>\n\n<|body_start_1|>\n test_name = args['test'].test_name\n test_time = args['test'].max_time\n req_test_id = args['test'].id\n if test_name is None or test_time is None or req_test_id is None:\n return fail_response(msg='Wrong input data', code=400)\n test_id = application.orm.update_test(test_id=args['test'].id, test_name=args['test'].test_name, max_time=args['test'].max_time)\n if test_id is None:\n return fail_response('Test is not found', code=404)\n for question in args['questions']:\n if question.id is None:\n application.orm.add_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id))\n else:\n application.orm.update_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id), question_id=question.id)\n return generic_response(status='Success', msg='Test changed', code=201)\n<|end_body_1|>\n\n<|body_start_2|>\n success = application.orm.delete_test(test_id)\n if success:\n return generic_response(status='success', code=201, msg='Deleted')\n return fail_response(msg=\"Can't delete test\", code=406)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TestManagement", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestManagement:\n\n def get(self, test_id):\n \"\"\"--- summary: Get test by id description: All test questions with test metainfo parameters: - in: path name: user_id schema: type: integer required: true description: Numeric ID of the user to get responses: 200: description: OK content: application/json: schema: TestsSchema example: { \"archived\": false, \"id\": 8, \"max_time\": null, \"questions\": [ { \"answer\": \"1\", \"id\": 5, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 5 }, { \"answer\": \"1\", \"id\": 9, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 9 } ], \"questions_tests\": [ 5, 9 ], \"test_name\": \"45\" } 404: description: Not found content: application/json: schema: ErrorSche\"\"\"\n <|body_0|>\n\n def put(self, args, test_id):\n \"\"\"--- summary: Change test by id description: Changes test parameters: - in: path required: true name: test_id schema: type: int requestBody: required: true content: application/json: schema: TestsSchema responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found] 400: description: Bad request content: application/json: schema: ErrorSchema example: message: [Wrong input data]\"\"\"\n <|body_1|>\n\n def delete(self, test_id):\n \"\"\"--- summary: Delete test by id description: Deletes test responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n test = application.orm.get_test(test_id)\n test_schema = TestsSchema()\n if test is None:\n return fail_response('Test is not found', code=404)\n res = test_schema.dump(test)\n questions = []\n questions_schema = QuestionsSchema()\n for question_id in res.data['questions_tests']:\n obj = application.orm.get_question(question_id)\n questions.append(questions_schema.dump(obj).data)\n res.data.update({'questions': questions})\n return jsonify(res.data)\n<|end_body_0|>\n\n<|body_start_1|>\n test_name = args['test'].test_name\n test_time = args['test'].max_time\n req_test_id = args['test'].id\n if test_name is None or test_time is None or req_test_id is None:\n return fail_response(msg='Wrong input data', code=400)\n test_id = application.orm.update_test(test_id=args['test'].id, test_name=args['test'].test_name, max_time=args['test'].max_time)\n if test_id is None:\n return fail_response('Test is not found', code=404)\n for question in args['questions']:\n if question.id is None:\n application.orm.add_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id))\n else:\n application.orm.update_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id), question_id=question.id)\n return generic_response(status='Success', msg='Test changed', code=201)\n<|end_body_1|>\n\n<|body_start_2|>\n success = application.orm.delete_test(test_id)\n if success:\n return generic_response(status='success', code=201, msg='Deleted')\n return fail_response(msg=\"Can't delete test\", code=406)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000330", "length_bytes": 12238, "license_type": "no_license", "methods": [{"docstring": "--- summary: Get test by id description: All test questions with test metainfo parameters: - in: path name: user_id schema: type: integer required: true description: Numeric ID of the user to get responses: 200: description: OK content: application/json: schema: TestsSchema example: { \"archived\": false, \"id\": 8, \"max_time\": null, \"questions\": [ { \"answer\": \"1\", \"id\": 5, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 5 }, { \"answer\": \"1\", \"id\": 9, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 9 } ], \"questions_tests\": [ 5, 9 ], \"test_name\": \"45\" } 404: description: Not found content: application/json: schema: ErrorSche", "name": "get", "signature": "def get(self, test_id)"}, {"docstring": "--- summary: Change test by id description: Changes test parameters: - in: path required: true name: test_id schema: type: int requestBody: required: true content: application/json: schema: TestsSchema responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found] 400: description: Bad request content: application/json: schema: ErrorSchema example: message: [Wrong input data]", "name": "put", "signature": "def put(self, args, test_id)"}, {"docstring": "--- summary: Delete test by id description: Deletes test responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found]", "name": "delete", "signature": "def delete(self, test_id)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_023834", "prompt": "Implement the Python class `TestManagement` described below.\n\nClass description:\nImplement the TestManagement class.\n\nMethod signatures and docstrings:\n- def get(self, test_id): --- summary: Get test by id description: All test questions with test metainfo parameters: - in: path name: user_id schema: type: integer required: true description: Numeric ID of the user to get responses: 200: description: OK content: application/json: schema: TestsSchema example: { \"archived\": false, \"id\": 8, \"max_time\": null, \"questions\": [ { \"answer\": \"1\", \"id\": 5, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 5 }, { \"answer\": \"1\", \"id\": 9, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 9 } ], \"questions_tests\": [ 5, 9 ], \"test_name\": \"45\" } 404: description: Not found content: application/json: schema: ErrorSche\n- def put(self, args, test_id): --- summary: Change test by id description: Changes test parameters: - in: path required: true name: test_id schema: type: int requestBody: required: true content: application/json: schema: TestsSchema responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found] 400: description: Bad request content: application/json: schema: ErrorSchema example: message: [Wrong input data]\n- def delete(self, test_id): --- summary: Delete test by id description: Deletes test responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found]", "prompted_full_text": "Implement the Python class `TestManagement` described below.\n\nClass description:\nImplement the TestManagement class.\n\nMethod signatures and docstrings:\n- def get(self, test_id): --- summary: Get test by id description: All test questions with test metainfo parameters: - in: path name: user_id schema: type: integer required: true description: Numeric ID of the user to get responses: 200: description: OK content: application/json: schema: TestsSchema example: { \"archived\": false, \"id\": 8, \"max_time\": null, \"questions\": [ { \"answer\": \"1\", \"id\": 5, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 5 }, { \"answer\": \"1\", \"id\": 9, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 9 } ], \"questions_tests\": [ 5, 9 ], \"test_name\": \"45\" } 404: description: Not found content: application/json: schema: ErrorSche\n- def put(self, args, test_id): --- summary: Change test by id description: Changes test parameters: - in: path required: true name: test_id schema: type: int requestBody: required: true content: application/json: schema: TestsSchema responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found] 400: description: Bad request content: application/json: schema: ErrorSchema example: message: [Wrong input data]\n- def delete(self, test_id): --- summary: Delete test by id description: Deletes test responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found]\n\n<|skeleton|>\nclass TestManagement:\n\n def get(self, test_id):\n \"\"\"--- summary: Get test by id description: All test questions with test metainfo parameters: - in: path name: user_id schema: type: integer required: true description: Numeric ID of the user to get responses: 200: description: OK content: application/json: schema: TestsSchema example: { \"archived\": false, \"id\": 8, \"max_time\": null, \"questions\": [ { \"answer\": \"1\", \"id\": 5, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 5 }, { \"answer\": \"1\", \"id\": 9, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 9 } ], \"questions_tests\": [ 5, 9 ], \"test_name\": \"45\" } 404: description: Not found content: application/json: schema: ErrorSche\"\"\"\n <|body_0|>\n\n def put(self, args, test_id):\n \"\"\"--- summary: Change test by id description: Changes test parameters: - in: path required: true name: test_id schema: type: int requestBody: required: true content: application/json: schema: TestsSchema responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found] 400: description: Bad request content: application/json: schema: ErrorSchema example: message: [Wrong input data]\"\"\"\n <|body_1|>\n\n def delete(self, test_id):\n \"\"\"--- summary: Delete test by id description: Deletes test responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n test = application.orm.get_test(test_id)\n test_schema = TestsSchema()\n if test is None:\n return fail_response('Test is not found', code=404)\n res = test_schema.dump(test)\n questions = []\n questions_schema = QuestionsSchema()\n for question_id in res.data['questions_tests']:\n obj = application.orm.get_question(question_id)\n questions.append(questions_schema.dump(obj).data)\n res.data.update({'questions': questions})\n return jsonify(res.data)\n<|end_body_0|>\n\n<|body_start_1|>\n test_name = args['test'].test_name\n test_time = args['test'].max_time\n req_test_id = args['test'].id\n if test_name is None or test_time is None or req_test_id is None:\n return fail_response(msg='Wrong input data', code=400)\n test_id = application.orm.update_test(test_id=args['test'].id, test_name=args['test'].test_name, max_time=args['test'].max_time)\n if test_id is None:\n return fail_response('Test is not found', code=404)\n for question in args['questions']:\n if question.id is None:\n application.orm.add_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id))\n else:\n application.orm.update_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id), question_id=question.id)\n return generic_response(status='Success', msg='Test changed', code=201)\n<|end_body_1|>\n\n<|body_start_2|>\n success = application.orm.delete_test(test_id)\n if success:\n return generic_response(status='success', code=201, msg='Deleted')\n return fail_response(msg=\"Can't delete test\", code=406)\n<|end_body_2|>\n", "revision_id": "171f990754f1c89cefe2b416001d1b7e3a6a430d", "skeleton": "<|skeleton|>\nclass TestManagement:\n\n def get(self, test_id):\n \"\"\"--- summary: Get test by id description: All test questions with test metainfo parameters: - in: path name: user_id schema: type: integer required: true description: Numeric ID of the user to get responses: 200: description: OK content: application/json: schema: TestsSchema example: { \"archived\": false, \"id\": 8, \"max_time\": null, \"questions\": [ { \"answer\": \"1\", \"id\": 5, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 5 }, { \"answer\": \"1\", \"id\": 9, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 9 } ], \"questions_tests\": [ 5, 9 ], \"test_name\": \"45\" } 404: description: Not found content: application/json: schema: ErrorSche\"\"\"\n <|body_0|>\n\n def put(self, args, test_id):\n \"\"\"--- summary: Change test by id description: Changes test parameters: - in: path required: true name: test_id schema: type: int requestBody: required: true content: application/json: schema: TestsSchema responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found] 400: description: Bad request content: application/json: schema: ErrorSchema example: message: [Wrong input data]\"\"\"\n <|body_1|>\n\n def delete(self, test_id):\n \"\"\"--- summary: Delete test by id description: Deletes test responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestManagement:\n def get(self, test_id):\n \"\"\"--- summary: Get test by id description: All test questions with test metainfo parameters: - in: path name: user_id schema: type: integer required: true description: Numeric ID of the user to get responses: 200: description: OK content: application/json: schema: TestsSchema example: { \"archived\": false, \"id\": 8, \"max_time\": null, \"questions\": [ { \"answer\": \"1\", \"id\": 5, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 5 }, { \"answer\": \"1\", \"id\": 9, \"manually_grading\": true, \"points\": 0, \"question\": \"2\", \"question_type\": 0, \"test\": 9 } ], \"questions_tests\": [ 5, 9 ], \"test_name\": \"45\" } 404: description: Not found content: application/json: schema: ErrorSche\"\"\"\n test = application.orm.get_test(test_id)\n test_schema = TestsSchema()\n if test is None:\n return fail_response('Test is not found', code=404)\n res = test_schema.dump(test)\n questions = []\n questions_schema = QuestionsSchema()\n for question_id in res.data['questions_tests']:\n obj = application.orm.get_question(question_id)\n questions.append(questions_schema.dump(obj).data)\n res.data.update({'questions': questions})\n return jsonify(res.data)\n\n def put(self, args, test_id):\n \"\"\"--- summary: Change test by id description: Changes test parameters: - in: path required: true name: test_id schema: type: int requestBody: required: true content: application/json: schema: TestsSchema responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found] 400: description: Bad request content: application/json: schema: ErrorSchema example: message: [Wrong input data]\"\"\"\n test_name = args['test'].test_name\n test_time = args['test'].max_time\n req_test_id = args['test'].id\n if test_name is None or test_time is None or req_test_id is None:\n return fail_response(msg='Wrong input data', code=400)\n test_id = application.orm.update_test(test_id=args['test'].id, test_name=args['test'].test_name, max_time=args['test'].max_time)\n if test_id is None:\n return fail_response('Test is not found', code=404)\n for question in args['questions']:\n if question.id is None:\n application.orm.add_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id))\n else:\n application.orm.update_question(question=question.question, question_type=question.question_type, answer=question.answer, manually_grading=question.manually_grading, points=question.points, test_id=int(test_id), question_id=question.id)\n return generic_response(status='Success', msg='Test changed', code=201)\n\n def delete(self, test_id):\n \"\"\"--- summary: Delete test by id description: Deletes test responses: 201: description: OK 404: description: Not found content: application/json: schema: ErrorSchema example: message: [Test not found]\"\"\"\n success = application.orm.delete_test(test_id)\n if success:\n return generic_response(status='success', code=201, msg='Deleted')\n return fail_response(msg=\"Can't delete test\", code=406)\n", "source": "the_stack_v2_python_sparse", "source_path": "backend/api/test.py", "source_repo": "ssd-courseproject/adminssion-forms-backend", "split": "val", "star_events_count": 0}
{"blob_id": "b3db424730844be0762c4e5eb6f52da978c4dd4f", "bodies": ["super().__init__(auth_module, setup_schema, user_id)\nself._auth_module: NotifyAuthModule = auth_module\nself._available_notify_services = available_notify_services\nself._secret: str | None = None\nself._count: int | None = None\nself._notify_service: str | None = None\nself._target: str | None = None", "errors: dict[str, str] = {}\nhass = self._auth_module.hass\nif user_input:\n self._notify_service = user_input['notify_service']\n self._target = user_input.get('target')\n self._secret = await hass.async_add_executor_job(_generate_secret)\n self._count = await hass.async_add_executor_job(_generate_random)\n return await self.async_step_setup()\nif not self._available_notify_services:\n return self.async_abort(reason='no_available_service')\nschema: dict[str, Any] = OrderedDict()\nschema['notify_service'] = vol.In(self._available_notify_services)\nschema['target'] = vol.Optional(str)\nreturn self.async_show_form(step_id='init', data_schema=vol.Schema(schema), errors=errors)", "errors: dict[str, str] = {}\nhass = self._auth_module.hass\nassert self._secret and self._count\nif user_input:\n verified = await hass.async_add_executor_job(_verify_otp, self._secret, user_input['code'], self._count)\n if verified:\n await self._auth_module.async_setup_user(self._user_id, {'notify_service': self._notify_service, 'target': self._target})\n return self.async_create_entry(data={})\n errors['base'] = 'invalid_code'\ncode = await hass.async_add_executor_job(_generate_otp, self._secret, self._count)\nassert self._notify_service\ntry:\n await self._auth_module.async_notify(code, self._notify_service, self._target)\nexcept ServiceNotFound:\n return self.async_abort(reason='notify_service_not_exist')\nreturn self.async_show_form(step_id='setup', data_schema=self._setup_schema, description_placeholders={'notify_service': self._notify_service}, errors=errors)"], "bodies_text": "<|body_start_0|>\n super().__init__(auth_module, setup_schema, user_id)\n self._auth_module: NotifyAuthModule = auth_module\n self._available_notify_services = available_notify_services\n self._secret: str | None = None\n self._count: int | None = None\n self._notify_service: str | None = None\n self._target: str | None = None\n<|end_body_0|>\n\n<|body_start_1|>\n errors: dict[str, str] = {}\n hass = self._auth_module.hass\n if user_input:\n self._notify_service = user_input['notify_service']\n self._target = user_input.get('target')\n self._secret = await hass.async_add_executor_job(_generate_secret)\n self._count = await hass.async_add_executor_job(_generate_random)\n return await self.async_step_setup()\n if not self._available_notify_services:\n return self.async_abort(reason='no_available_service')\n schema: dict[str, Any] = OrderedDict()\n schema['notify_service'] = vol.In(self._available_notify_services)\n schema['target'] = vol.Optional(str)\n return self.async_show_form(step_id='init', data_schema=vol.Schema(schema), errors=errors)\n<|end_body_1|>\n\n<|body_start_2|>\n errors: dict[str, str] = {}\n hass = self._auth_module.hass\n assert self._secret and self._count\n if user_input:\n verified = await hass.async_add_executor_job(_verify_otp, self._secret, user_input['code'], self._count)\n if verified:\n await self._auth_module.async_setup_user(self._user_id, {'notify_service': self._notify_service, 'target': self._target})\n return self.async_create_entry(data={})\n errors['base'] = 'invalid_code'\n code = await hass.async_add_executor_job(_generate_otp, self._secret, self._count)\n assert self._notify_service\n try:\n await self._auth_module.async_notify(code, self._notify_service, self._target)\n except ServiceNotFound:\n return self.async_abort(reason='notify_service_not_exist')\n return self.async_show_form(step_id='setup', data_schema=self._setup_schema, description_placeholders={'notify_service': self._notify_service}, errors=errors)\n<|end_body_2|>\n", "class_docstring": "Handler for the setup flow.", "class_name": "NotifySetupFlow", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NotifySetupFlow:\n \"\"\"Handler for the setup flow.\"\"\"\n\n def __init__(self, auth_module: NotifyAuthModule, setup_schema: vol.Schema, user_id: str, available_notify_services: list[str]) -> None:\n \"\"\"Initialize the setup flow.\"\"\"\n <|body_0|>\n\n async def async_step_init(self, user_input: dict[str, str] | None=None) -> FlowResult:\n \"\"\"Let user select available notify services.\"\"\"\n <|body_1|>\n\n async def async_step_setup(self, user_input: dict[str, str] | None=None) -> FlowResult:\n \"\"\"Verify user can receive one-time password.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(auth_module, setup_schema, user_id)\n self._auth_module: NotifyAuthModule = auth_module\n self._available_notify_services = available_notify_services\n self._secret: str | None = None\n self._count: int | None = None\n self._notify_service: str | None = None\n self._target: str | None = None\n<|end_body_0|>\n\n<|body_start_1|>\n errors: dict[str, str] = {}\n hass = self._auth_module.hass\n if user_input:\n self._notify_service = user_input['notify_service']\n self._target = user_input.get('target')\n self._secret = await hass.async_add_executor_job(_generate_secret)\n self._count = await hass.async_add_executor_job(_generate_random)\n return await self.async_step_setup()\n if not self._available_notify_services:\n return self.async_abort(reason='no_available_service')\n schema: dict[str, Any] = OrderedDict()\n schema['notify_service'] = vol.In(self._available_notify_services)\n schema['target'] = vol.Optional(str)\n return self.async_show_form(step_id='init', data_schema=vol.Schema(schema), errors=errors)\n<|end_body_1|>\n\n<|body_start_2|>\n errors: dict[str, str] = {}\n hass = self._auth_module.hass\n assert self._secret and self._count\n if user_input:\n verified = await hass.async_add_executor_job(_verify_otp, self._secret, user_input['code'], self._count)\n if verified:\n await self._auth_module.async_setup_user(self._user_id, {'notify_service': self._notify_service, 'target': self._target})\n return self.async_create_entry(data={})\n errors['base'] = 'invalid_code'\n code = await hass.async_add_executor_job(_generate_otp, self._secret, self._count)\n assert self._notify_service\n try:\n await self._auth_module.async_notify(code, self._notify_service, self._target)\n except ServiceNotFound:\n return self.async_abort(reason='notify_service_not_exist')\n return self.async_show_form(step_id='setup', data_schema=self._setup_schema, description_placeholders={'notify_service': self._notify_service}, errors=errors)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000331", "length_bytes": 11980, "license_type": "permissive", "methods": [{"docstring": "Initialize the setup flow.", "name": "__init__", "signature": "def __init__(self, auth_module: NotifyAuthModule, setup_schema: vol.Schema, user_id: str, available_notify_services: list[str]) -> None"}, {"docstring": "Let user select available notify services.", "name": "async_step_init", "signature": "async def async_step_init(self, user_input: dict[str, str] | None=None) -> FlowResult"}, {"docstring": "Verify user can receive one-time password.", "name": "async_step_setup", "signature": "async def async_step_setup(self, user_input: dict[str, str] | None=None) -> FlowResult"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_042600", "prompt": "Implement the Python class `NotifySetupFlow` described below.\n\nClass description:\nHandler for the setup flow.\n\nMethod signatures and docstrings:\n- def __init__(self, auth_module: NotifyAuthModule, setup_schema: vol.Schema, user_id: str, available_notify_services: list[str]) -> None: Initialize the setup flow.\n- async def async_step_init(self, user_input: dict[str, str] | None=None) -> FlowResult: Let user select available notify services.\n- async def async_step_setup(self, user_input: dict[str, str] | None=None) -> FlowResult: Verify user can receive one-time password.", "prompted_full_text": "Implement the Python class `NotifySetupFlow` described below.\n\nClass description:\nHandler for the setup flow.\n\nMethod signatures and docstrings:\n- def __init__(self, auth_module: NotifyAuthModule, setup_schema: vol.Schema, user_id: str, available_notify_services: list[str]) -> None: Initialize the setup flow.\n- async def async_step_init(self, user_input: dict[str, str] | None=None) -> FlowResult: Let user select available notify services.\n- async def async_step_setup(self, user_input: dict[str, str] | None=None) -> FlowResult: Verify user can receive one-time password.\n\n<|skeleton|>\nclass NotifySetupFlow:\n \"\"\"Handler for the setup flow.\"\"\"\n\n def __init__(self, auth_module: NotifyAuthModule, setup_schema: vol.Schema, user_id: str, available_notify_services: list[str]) -> None:\n \"\"\"Initialize the setup flow.\"\"\"\n <|body_0|>\n\n async def async_step_init(self, user_input: dict[str, str] | None=None) -> FlowResult:\n \"\"\"Let user select available notify services.\"\"\"\n <|body_1|>\n\n async def async_step_setup(self, user_input: dict[str, str] | None=None) -> FlowResult:\n \"\"\"Verify user can receive one-time password.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(auth_module, setup_schema, user_id)\n self._auth_module: NotifyAuthModule = auth_module\n self._available_notify_services = available_notify_services\n self._secret: str | None = None\n self._count: int | None = None\n self._notify_service: str | None = None\n self._target: str | None = None\n<|end_body_0|>\n\n<|body_start_1|>\n errors: dict[str, str] = {}\n hass = self._auth_module.hass\n if user_input:\n self._notify_service = user_input['notify_service']\n self._target = user_input.get('target')\n self._secret = await hass.async_add_executor_job(_generate_secret)\n self._count = await hass.async_add_executor_job(_generate_random)\n return await self.async_step_setup()\n if not self._available_notify_services:\n return self.async_abort(reason='no_available_service')\n schema: dict[str, Any] = OrderedDict()\n schema['notify_service'] = vol.In(self._available_notify_services)\n schema['target'] = vol.Optional(str)\n return self.async_show_form(step_id='init', data_schema=vol.Schema(schema), errors=errors)\n<|end_body_1|>\n\n<|body_start_2|>\n errors: dict[str, str] = {}\n hass = self._auth_module.hass\n assert self._secret and self._count\n if user_input:\n verified = await hass.async_add_executor_job(_verify_otp, self._secret, user_input['code'], self._count)\n if verified:\n await self._auth_module.async_setup_user(self._user_id, {'notify_service': self._notify_service, 'target': self._target})\n return self.async_create_entry(data={})\n errors['base'] = 'invalid_code'\n code = await hass.async_add_executor_job(_generate_otp, self._secret, self._count)\n assert self._notify_service\n try:\n await self._auth_module.async_notify(code, self._notify_service, self._target)\n except ServiceNotFound:\n return self.async_abort(reason='notify_service_not_exist')\n return self.async_show_form(step_id='setup', data_schema=self._setup_schema, description_placeholders={'notify_service': self._notify_service}, errors=errors)\n<|end_body_2|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass NotifySetupFlow:\n \"\"\"Handler for the setup flow.\"\"\"\n\n def __init__(self, auth_module: NotifyAuthModule, setup_schema: vol.Schema, user_id: str, available_notify_services: list[str]) -> None:\n \"\"\"Initialize the setup flow.\"\"\"\n <|body_0|>\n\n async def async_step_init(self, user_input: dict[str, str] | None=None) -> FlowResult:\n \"\"\"Let user select available notify services.\"\"\"\n <|body_1|>\n\n async def async_step_setup(self, user_input: dict[str, str] | None=None) -> FlowResult:\n \"\"\"Verify user can receive one-time password.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NotifySetupFlow:\n \"\"\"Handler for the setup flow.\"\"\"\n\n def __init__(self, auth_module: NotifyAuthModule, setup_schema: vol.Schema, user_id: str, available_notify_services: list[str]) -> None:\n \"\"\"Initialize the setup flow.\"\"\"\n super().__init__(auth_module, setup_schema, user_id)\n self._auth_module: NotifyAuthModule = auth_module\n self._available_notify_services = available_notify_services\n self._secret: str | None = None\n self._count: int | None = None\n self._notify_service: str | None = None\n self._target: str | None = None\n\n async def async_step_init(self, user_input: dict[str, str] | None=None) -> FlowResult:\n \"\"\"Let user select available notify services.\"\"\"\n errors: dict[str, str] = {}\n hass = self._auth_module.hass\n if user_input:\n self._notify_service = user_input['notify_service']\n self._target = user_input.get('target')\n self._secret = await hass.async_add_executor_job(_generate_secret)\n self._count = await hass.async_add_executor_job(_generate_random)\n return await self.async_step_setup()\n if not self._available_notify_services:\n return self.async_abort(reason='no_available_service')\n schema: dict[str, Any] = OrderedDict()\n schema['notify_service'] = vol.In(self._available_notify_services)\n schema['target'] = vol.Optional(str)\n return self.async_show_form(step_id='init', data_schema=vol.Schema(schema), errors=errors)\n\n async def async_step_setup(self, user_input: dict[str, str] | None=None) -> FlowResult:\n \"\"\"Verify user can receive one-time password.\"\"\"\n errors: dict[str, str] = {}\n hass = self._auth_module.hass\n assert self._secret and self._count\n if user_input:\n verified = await hass.async_add_executor_job(_verify_otp, self._secret, user_input['code'], self._count)\n if verified:\n await self._auth_module.async_setup_user(self._user_id, {'notify_service': self._notify_service, 'target': self._target})\n return self.async_create_entry(data={})\n errors['base'] = 'invalid_code'\n code = await hass.async_add_executor_job(_generate_otp, self._secret, self._count)\n assert self._notify_service\n try:\n await self._auth_module.async_notify(code, self._notify_service, self._target)\n except ServiceNotFound:\n return self.async_abort(reason='notify_service_not_exist')\n return self.async_show_form(step_id='setup', data_schema=self._setup_schema, description_placeholders={'notify_service': self._notify_service}, errors=errors)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/auth/mfa_modules/notify.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501}
{"blob_id": "6adabbed041444abab2016d473b77df470ca0cdc", "bodies": ["self.graph = [[] for _ in range(n)]\nfor edge in edges:\n self.graph[edge[0]].append(edge[1])\n self.graph[edge[1]].append(edge[0])\nself.height0 = [0] * n\nself.height = [0] * n\nself.dfs0(0)\nself.dfs1(0)\nh = n\nans = []\nfor i in range(n):\n if self.height[i] < h:\n h = self.height[i]\n ans = []\n if self.height[i] == h:\n ans.append(i)\nreturn ans", "self.height0[u] = 1\nh = 0\nfor v in self.graph[u]:\n if self.height0[v] != 0:\n continue\n self.dfs0(v)\n h = max(h, self.height0[v])\nself.height0[u] = h + 1", "first, second = (0, 0)\nfor v in self.graph[u]:\n if self.height0[v] > first:\n second = first\n first = self.height0[v]\n elif self.height0[v] > second:\n second = self.height0[v]\nfor v in self.graph[u]:\n if self.height[v] != 0:\n continue\n self.height0[u] = (first if self.height0[v] != first else second) + 1\n self.height[v] = max(self.height0[v], self.height0[u] + 1)\n self.dfs1(v)"], "bodies_text": "<|body_start_0|>\n self.graph = [[] for _ in range(n)]\n for edge in edges:\n self.graph[edge[0]].append(edge[1])\n self.graph[edge[1]].append(edge[0])\n self.height0 = [0] * n\n self.height = [0] * n\n self.dfs0(0)\n self.dfs1(0)\n h = n\n ans = []\n for i in range(n):\n if self.height[i] < h:\n h = self.height[i]\n ans = []\n if self.height[i] == h:\n ans.append(i)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n self.height0[u] = 1\n h = 0\n for v in self.graph[u]:\n if self.height0[v] != 0:\n continue\n self.dfs0(v)\n h = max(h, self.height0[v])\n self.height0[u] = h + 1\n<|end_body_1|>\n\n<|body_start_2|>\n first, second = (0, 0)\n for v in self.graph[u]:\n if self.height0[v] > first:\n second = first\n first = self.height0[v]\n elif self.height0[v] > second:\n second = self.height0[v]\n for v in self.graph[u]:\n if self.height[v] != 0:\n continue\n self.height0[u] = (first if self.height0[v] != first else second) + 1\n self.height[v] = max(self.height0[v], self.height0[u] + 1)\n self.dfs1(v)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findMinHeightTrees(self, n, edges):\n \"\"\"u 为根 a1\\\\ /b1 a2-u----v-b2 a3/ \b3 先计算在 u 为根的树中,以各节点为根的子树高度 height_u[u] 表示“在 u 为根的树中,以 u 为根的子树高度”,即树高 height_u[v] 表示“在 u 为根的树中,以 v 为根的子树高度” u 换根成 v,我们想求以 v 为根的树高 a1\\\\ 换根 /b1 a2-u----v-b2 a3/ \b3 height[v] = height_v[v] = max(height_u[v], height_v[u]+1) height_v[u] 的高度怎么求呢?我们注意到: 以 a1-a3, b1-b3 为根的子树高度都保持不变, 对这些节点来说其 height_v 就等于 height_u 1. height_v[u] 可以通过求 a1-a3 的最大值来求得 2. height_v[u] 还可以通过记录在 以 u 为根的树中,a1-a3 和 v 为根的子树高度最大值和次大值 - 如果v 为根的子树高度是最大的,a1-a3 的值中只有次大值 - 如果v 为根的子树高度不是最大的,a1-a3 的值中还有最大值 :type n: int :type edges: List[List[int]] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def dfs0(self, u):\n \"\"\"在以 0 号节点为根的树中,根为 u 节点的子树高度\"\"\"\n <|body_1|>\n\n def dfs1(self, u):\n \"\"\"进行换根动态规划,计算出以各节点为根的所有树的高度 原先以 u 为根,换成以 v 为根,其中 v 为 u 的某个子节点 self.height0 的含义 self.height_u,即以 u 为根的树\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.graph = [[] for _ in range(n)]\n for edge in edges:\n self.graph[edge[0]].append(edge[1])\n self.graph[edge[1]].append(edge[0])\n self.height0 = [0] * n\n self.height = [0] * n\n self.dfs0(0)\n self.dfs1(0)\n h = n\n ans = []\n for i in range(n):\n if self.height[i] < h:\n h = self.height[i]\n ans = []\n if self.height[i] == h:\n ans.append(i)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n self.height0[u] = 1\n h = 0\n for v in self.graph[u]:\n if self.height0[v] != 0:\n continue\n self.dfs0(v)\n h = max(h, self.height0[v])\n self.height0[u] = h + 1\n<|end_body_1|>\n\n<|body_start_2|>\n first, second = (0, 0)\n for v in self.graph[u]:\n if self.height0[v] > first:\n second = first\n first = self.height0[v]\n elif self.height0[v] > second:\n second = self.height0[v]\n for v in self.graph[u]:\n if self.height[v] != 0:\n continue\n self.height0[u] = (first if self.height0[v] != first else second) + 1\n self.height[v] = max(self.height0[v], self.height0[u] + 1)\n self.dfs1(v)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000332", "length_bytes": 5788, "license_type": "no_license", "methods": [{"docstring": "u 为根 a1\\\\ /b1 a2-u----v-b2 a3/ \b3 先计算在 u 为根的树中,以各节点为根的子树高度 height_u[u] 表示“在 u 为根的树中,以 u 为根的子树高度”,即树高 height_u[v] 表示“在 u 为根的树中,以 v 为根的子树高度” u 换根成 v,我们想求以 v 为根的树高 a1\\\\ 换根 /b1 a2-u----v-b2 a3/ \b3 height[v] = height_v[v] = max(height_u[v], height_v[u]+1) height_v[u] 的高度怎么求呢?我们注意到: 以 a1-a3, b1-b3 为根的子树高度都保持不变, 对这些节点来说其 height_v 就等于 height_u 1. height_v[u] 可以通过求 a1-a3 的最大值来求得 2. height_v[u] 还可以通过记录在 以 u 为根的树中,a1-a3 和 v 为根的子树高度最大值和次大值 - 如果v 为根的子树高度是最大的,a1-a3 的值中只有次大值 - 如果v 为根的子树高度不是最大的,a1-a3 的值中还有最大值 :type n: int :type edges: List[List[int]] :rtype: List[int]", "name": "findMinHeightTrees", "signature": "def findMinHeightTrees(self, n, edges)"}, {"docstring": "在以 0 号节点为根的树中,根为 u 节点的子树高度", "name": "dfs0", "signature": "def dfs0(self, u)"}, {"docstring": "进行换根动态规划,计算出以各节点为根的所有树的高度 原先以 u 为根,换成以 v 为根,其中 v 为 u 的某个子节点 self.height0 的含义 self.height_u,即以 u 为根的树", "name": "dfs1", "signature": "def dfs1(self, u)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMinHeightTrees(self, n, edges): u 为根 a1\\\\ /b1 a2-u----v-b2 a3/ \b3 先计算在 u 为根的树中,以各节点为根的子树高度 height_u[u] 表示“在 u 为根的树中,以 u 为根的子树高度”,即树高 height_u[v] 表示“在 u 为根的树中,以 v 为根的子树高度” u 换根成 v,我们想求以 v 为根的树高 a1\\\\ 换根 /b1 a2-u----v-b2 a3/ \b3 height[v] = height_v[v] = max(height_u[v], height_v[u]+1) height_v[u] 的高度怎么求呢?我们注意到: 以 a1-a3, b1-b3 为根的子树高度都保持不变, 对这些节点来说其 height_v 就等于 height_u 1. height_v[u] 可以通过求 a1-a3 的最大值来求得 2. height_v[u] 还可以通过记录在 以 u 为根的树中,a1-a3 和 v 为根的子树高度最大值和次大值 - 如果v 为根的子树高度是最大的,a1-a3 的值中只有次大值 - 如果v 为根的子树高度不是最大的,a1-a3 的值中还有最大值 :type n: int :type edges: List[List[int]] :rtype: List[int]\n- def dfs0(self, u): 在以 0 号节点为根的树中,根为 u 节点的子树高度\n- def dfs1(self, u): 进行换根动态规划,计算出以各节点为根的所有树的高度 原先以 u 为根,换成以 v 为根,其中 v 为 u 的某个子节点 self.height0 的含义 self.height_u,即以 u 为根的树", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMinHeightTrees(self, n, edges): u 为根 a1\\\\ /b1 a2-u----v-b2 a3/ \b3 先计算在 u 为根的树中,以各节点为根的子树高度 height_u[u] 表示“在 u 为根的树中,以 u 为根的子树高度”,即树高 height_u[v] 表示“在 u 为根的树中,以 v 为根的子树高度” u 换根成 v,我们想求以 v 为根的树高 a1\\\\ 换根 /b1 a2-u----v-b2 a3/ \b3 height[v] = height_v[v] = max(height_u[v], height_v[u]+1) height_v[u] 的高度怎么求呢?我们注意到: 以 a1-a3, b1-b3 为根的子树高度都保持不变, 对这些节点来说其 height_v 就等于 height_u 1. height_v[u] 可以通过求 a1-a3 的最大值来求得 2. height_v[u] 还可以通过记录在 以 u 为根的树中,a1-a3 和 v 为根的子树高度最大值和次大值 - 如果v 为根的子树高度是最大的,a1-a3 的值中只有次大值 - 如果v 为根的子树高度不是最大的,a1-a3 的值中还有最大值 :type n: int :type edges: List[List[int]] :rtype: List[int]\n- def dfs0(self, u): 在以 0 号节点为根的树中,根为 u 节点的子树高度\n- def dfs1(self, u): 进行换根动态规划,计算出以各节点为根的所有树的高度 原先以 u 为根,换成以 v 为根,其中 v 为 u 的某个子节点 self.height0 的含义 self.height_u,即以 u 为根的树\n\n<|skeleton|>\nclass Solution:\n\n def findMinHeightTrees(self, n, edges):\n \"\"\"u 为根 a1\\\\ /b1 a2-u----v-b2 a3/ \b3 先计算在 u 为根的树中,以各节点为根的子树高度 height_u[u] 表示“在 u 为根的树中,以 u 为根的子树高度”,即树高 height_u[v] 表示“在 u 为根的树中,以 v 为根的子树高度” u 换根成 v,我们想求以 v 为根的树高 a1\\\\ 换根 /b1 a2-u----v-b2 a3/ \b3 height[v] = height_v[v] = max(height_u[v], height_v[u]+1) height_v[u] 的高度怎么求呢?我们注意到: 以 a1-a3, b1-b3 为根的子树高度都保持不变, 对这些节点来说其 height_v 就等于 height_u 1. height_v[u] 可以通过求 a1-a3 的最大值来求得 2. height_v[u] 还可以通过记录在 以 u 为根的树中,a1-a3 和 v 为根的子树高度最大值和次大值 - 如果v 为根的子树高度是最大的,a1-a3 的值中只有次大值 - 如果v 为根的子树高度不是最大的,a1-a3 的值中还有最大值 :type n: int :type edges: List[List[int]] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def dfs0(self, u):\n \"\"\"在以 0 号节点为根的树中,根为 u 节点的子树高度\"\"\"\n <|body_1|>\n\n def dfs1(self, u):\n \"\"\"进行换根动态规划,计算出以各节点为根的所有树的高度 原先以 u 为根,换成以 v 为根,其中 v 为 u 的某个子节点 self.height0 的含义 self.height_u,即以 u 为根的树\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.graph = [[] for _ in range(n)]\n for edge in edges:\n self.graph[edge[0]].append(edge[1])\n self.graph[edge[1]].append(edge[0])\n self.height0 = [0] * n\n self.height = [0] * n\n self.dfs0(0)\n self.dfs1(0)\n h = n\n ans = []\n for i in range(n):\n if self.height[i] < h:\n h = self.height[i]\n ans = []\n if self.height[i] == h:\n ans.append(i)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n self.height0[u] = 1\n h = 0\n for v in self.graph[u]:\n if self.height0[v] != 0:\n continue\n self.dfs0(v)\n h = max(h, self.height0[v])\n self.height0[u] = h + 1\n<|end_body_1|>\n\n<|body_start_2|>\n first, second = (0, 0)\n for v in self.graph[u]:\n if self.height0[v] > first:\n second = first\n first = self.height0[v]\n elif self.height0[v] > second:\n second = self.height0[v]\n for v in self.graph[u]:\n if self.height[v] != 0:\n continue\n self.height0[u] = (first if self.height0[v] != first else second) + 1\n self.height[v] = max(self.height0[v], self.height0[u] + 1)\n self.dfs1(v)\n<|end_body_2|>\n", "revision_id": "860590239da0618c52967a55eda8d6bbe00bfa96", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findMinHeightTrees(self, n, edges):\n \"\"\"u 为根 a1\\\\ /b1 a2-u----v-b2 a3/ \b3 先计算在 u 为根的树中,以各节点为根的子树高度 height_u[u] 表示“在 u 为根的树中,以 u 为根的子树高度”,即树高 height_u[v] 表示“在 u 为根的树中,以 v 为根的子树高度” u 换根成 v,我们想求以 v 为根的树高 a1\\\\ 换根 /b1 a2-u----v-b2 a3/ \b3 height[v] = height_v[v] = max(height_u[v], height_v[u]+1) height_v[u] 的高度怎么求呢?我们注意到: 以 a1-a3, b1-b3 为根的子树高度都保持不变, 对这些节点来说其 height_v 就等于 height_u 1. height_v[u] 可以通过求 a1-a3 的最大值来求得 2. height_v[u] 还可以通过记录在 以 u 为根的树中,a1-a3 和 v 为根的子树高度最大值和次大值 - 如果v 为根的子树高度是最大的,a1-a3 的值中只有次大值 - 如果v 为根的子树高度不是最大的,a1-a3 的值中还有最大值 :type n: int :type edges: List[List[int]] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def dfs0(self, u):\n \"\"\"在以 0 号节点为根的树中,根为 u 节点的子树高度\"\"\"\n <|body_1|>\n\n def dfs1(self, u):\n \"\"\"进行换根动态规划,计算出以各节点为根的所有树的高度 原先以 u 为根,换成以 v 为根,其中 v 为 u 的某个子节点 self.height0 的含义 self.height_u,即以 u 为根的树\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def findMinHeightTrees(self, n, edges):\n \"\"\"u 为根 a1\\\\ /b1 a2-u----v-b2 a3/ \b3 先计算在 u 为根的树中,以各节点为根的子树高度 height_u[u] 表示“在 u 为根的树中,以 u 为根的子树高度”,即树高 height_u[v] 表示“在 u 为根的树中,以 v 为根的子树高度” u 换根成 v,我们想求以 v 为根的树高 a1\\\\ 换根 /b1 a2-u----v-b2 a3/ \b3 height[v] = height_v[v] = max(height_u[v], height_v[u]+1) height_v[u] 的高度怎么求呢?我们注意到: 以 a1-a3, b1-b3 为根的子树高度都保持不变, 对这些节点来说其 height_v 就等于 height_u 1. height_v[u] 可以通过求 a1-a3 的最大值来求得 2. height_v[u] 还可以通过记录在 以 u 为根的树中,a1-a3 和 v 为根的子树高度最大值和次大值 - 如果v 为根的子树高度是最大的,a1-a3 的值中只有次大值 - 如果v 为根的子树高度不是最大的,a1-a3 的值中还有最大值 :type n: int :type edges: List[List[int]] :rtype: List[int]\"\"\"\n self.graph = [[] for _ in range(n)]\n for edge in edges:\n self.graph[edge[0]].append(edge[1])\n self.graph[edge[1]].append(edge[0])\n self.height0 = [0] * n\n self.height = [0] * n\n self.dfs0(0)\n self.dfs1(0)\n h = n\n ans = []\n for i in range(n):\n if self.height[i] < h:\n h = self.height[i]\n ans = []\n if self.height[i] == h:\n ans.append(i)\n return ans\n\n def dfs0(self, u):\n \"\"\"在以 0 号节点为根的树中,根为 u 节点的子树高度\"\"\"\n self.height0[u] = 1\n h = 0\n for v in self.graph[u]:\n if self.height0[v] != 0:\n continue\n self.dfs0(v)\n h = max(h, self.height0[v])\n self.height0[u] = h + 1\n\n def dfs1(self, u):\n \"\"\"进行换根动态规划,计算出以各节点为根的所有树的高度 原先以 u 为根,换成以 v 为根,其中 v 为 u 的某个子节点 self.height0 的含义 self.height_u,即以 u 为根的树\"\"\"\n first, second = (0, 0)\n for v in self.graph[u]:\n if self.height0[v] > first:\n second = first\n first = self.height0[v]\n elif self.height0[v] > second:\n second = self.height0[v]\n for v in self.graph[u]:\n if self.height[v] != 0:\n continue\n self.height0[u] = (first if self.height0[v] != first else second) + 1\n self.height[v] = max(self.height0[v], self.height0[u] + 1)\n self.dfs1(v)\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/p0310/I/minimum-height-trees.py", "source_repo": "Ynjxsjmh/PracticeMakesPerfect", "split": "val", "star_events_count": 0}
{"blob_id": "77e962aba1379118a851595e611258fb5586b7d8", "bodies": ["if not board or not board[0]:\n return\nprevious = [0] * len(board[0])\nsurround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\nfor i in range(len(board)):\n left, tmp_previous = (0, board[i].copy())\n for j in range(len(board[0])):\n sum_, tmp = (0, board[i][j])\n for dx, dy in surround:\n if dx == -1:\n if 0 <= j + dy < len(board[0]):\n tmp1 = previous[j + dy]\n else:\n tmp1 = 0\n sum_ += tmp1\n elif dx == 0 and dy == -1:\n sum_ += left\n left = tmp\n else:\n if i + dx < len(board) and 0 <= j + dy < len(board[0]):\n tmp2 = board[i + dx][j + dy]\n else:\n tmp2 = 0\n sum_ += tmp2\n if board[i][j] and (sum_ > 3 or sum_ < 2):\n board[i][j] = 0\n elif board[i][j] == 0 and sum_ == 3:\n board[i][j] = 1\n previous = tmp_previous", "if not board or not board[0]:\n return\n\ndef g(i: int, j: int) -> int:\n if 0 <= i < len(board) and 0 <= j < len(board[0]):\n return board[i][j] & 1\n return 0\nsurround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\nfor i in range(len(board)):\n for j in range(len(board[0])):\n sum_ = 0\n for dx, dy in surround:\n sum_ += g(i + dx, j + dy)\n if board[i][j]:\n if sum_ < 2 or sum_ > 3:\n board[i][j] = 1\n else:\n board[i][j] = 3\n elif sum_ == 3:\n board[i][j] = 2\nfor i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] >>= 1"], "bodies_text": "<|body_start_0|>\n if not board or not board[0]:\n return\n previous = [0] * len(board[0])\n surround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n for i in range(len(board)):\n left, tmp_previous = (0, board[i].copy())\n for j in range(len(board[0])):\n sum_, tmp = (0, board[i][j])\n for dx, dy in surround:\n if dx == -1:\n if 0 <= j + dy < len(board[0]):\n tmp1 = previous[j + dy]\n else:\n tmp1 = 0\n sum_ += tmp1\n elif dx == 0 and dy == -1:\n sum_ += left\n left = tmp\n else:\n if i + dx < len(board) and 0 <= j + dy < len(board[0]):\n tmp2 = board[i + dx][j + dy]\n else:\n tmp2 = 0\n sum_ += tmp2\n if board[i][j] and (sum_ > 3 or sum_ < 2):\n board[i][j] = 0\n elif board[i][j] == 0 and sum_ == 3:\n board[i][j] = 1\n previous = tmp_previous\n<|end_body_0|>\n\n<|body_start_1|>\n if not board or not board[0]:\n return\n\n def g(i: int, j: int) -> int:\n if 0 <= i < len(board) and 0 <= j < len(board[0]):\n return board[i][j] & 1\n return 0\n surround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n for i in range(len(board)):\n for j in range(len(board[0])):\n sum_ = 0\n for dx, dy in surround:\n sum_ += g(i + dx, j + dy)\n if board[i][j]:\n if sum_ < 2 or sum_ > 3:\n board[i][j] = 1\n else:\n board[i][j] = 3\n elif sum_ == 3:\n board[i][j] = 2\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] >>= 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def gameOfLife(self, board: List[List[int]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_0|>\n\n def gameOfLife1(self, board: List[List[int]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not board or not board[0]:\n return\n previous = [0] * len(board[0])\n surround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n for i in range(len(board)):\n left, tmp_previous = (0, board[i].copy())\n for j in range(len(board[0])):\n sum_, tmp = (0, board[i][j])\n for dx, dy in surround:\n if dx == -1:\n if 0 <= j + dy < len(board[0]):\n tmp1 = previous[j + dy]\n else:\n tmp1 = 0\n sum_ += tmp1\n elif dx == 0 and dy == -1:\n sum_ += left\n left = tmp\n else:\n if i + dx < len(board) and 0 <= j + dy < len(board[0]):\n tmp2 = board[i + dx][j + dy]\n else:\n tmp2 = 0\n sum_ += tmp2\n if board[i][j] and (sum_ > 3 or sum_ < 2):\n board[i][j] = 0\n elif board[i][j] == 0 and sum_ == 3:\n board[i][j] = 1\n previous = tmp_previous\n<|end_body_0|>\n\n<|body_start_1|>\n if not board or not board[0]:\n return\n\n def g(i: int, j: int) -> int:\n if 0 <= i < len(board) and 0 <= j < len(board[0]):\n return board[i][j] & 1\n return 0\n surround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n for i in range(len(board)):\n for j in range(len(board[0])):\n sum_ = 0\n for dx, dy in surround:\n sum_ += g(i + dx, j + dy)\n if board[i][j]:\n if sum_ < 2 or sum_ > 3:\n board[i][j] = 1\n else:\n board[i][j] = 3\n elif sum_ == 3:\n board[i][j] = 2\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] >>= 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000333", "length_bytes": 2741, "license_type": "no_license", "methods": [{"docstring": "Do not return anything, modify board in-place instead.", "name": "gameOfLife", "signature": "def gameOfLife(self, board: List[List[int]]) -> None"}, {"docstring": "Do not return anything, modify board in-place instead.", "name": "gameOfLife1", "signature": "def gameOfLife1(self, board: List[List[int]]) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003761", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def gameOfLife(self, board: List[List[int]]) -> None: Do not return anything, modify board in-place instead.\n- def gameOfLife1(self, board: List[List[int]]) -> None: Do not return anything, modify board in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def gameOfLife(self, board: List[List[int]]) -> None: Do not return anything, modify board in-place instead.\n- def gameOfLife1(self, board: List[List[int]]) -> None: Do not return anything, modify board in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def gameOfLife(self, board: List[List[int]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_0|>\n\n def gameOfLife1(self, board: List[List[int]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not board or not board[0]:\n return\n previous = [0] * len(board[0])\n surround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n for i in range(len(board)):\n left, tmp_previous = (0, board[i].copy())\n for j in range(len(board[0])):\n sum_, tmp = (0, board[i][j])\n for dx, dy in surround:\n if dx == -1:\n if 0 <= j + dy < len(board[0]):\n tmp1 = previous[j + dy]\n else:\n tmp1 = 0\n sum_ += tmp1\n elif dx == 0 and dy == -1:\n sum_ += left\n left = tmp\n else:\n if i + dx < len(board) and 0 <= j + dy < len(board[0]):\n tmp2 = board[i + dx][j + dy]\n else:\n tmp2 = 0\n sum_ += tmp2\n if board[i][j] and (sum_ > 3 or sum_ < 2):\n board[i][j] = 0\n elif board[i][j] == 0 and sum_ == 3:\n board[i][j] = 1\n previous = tmp_previous\n<|end_body_0|>\n\n<|body_start_1|>\n if not board or not board[0]:\n return\n\n def g(i: int, j: int) -> int:\n if 0 <= i < len(board) and 0 <= j < len(board[0]):\n return board[i][j] & 1\n return 0\n surround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n for i in range(len(board)):\n for j in range(len(board[0])):\n sum_ = 0\n for dx, dy in surround:\n sum_ += g(i + dx, j + dy)\n if board[i][j]:\n if sum_ < 2 or sum_ > 3:\n board[i][j] = 1\n else:\n board[i][j] = 3\n elif sum_ == 3:\n board[i][j] = 2\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] >>= 1\n<|end_body_1|>\n", "revision_id": "e2fecd266bfced6208694b19a2d81182b13dacd6", "skeleton": "<|skeleton|>\nclass Solution:\n\n def gameOfLife(self, board: List[List[int]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_0|>\n\n def gameOfLife1(self, board: List[List[int]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def gameOfLife(self, board: List[List[int]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n if not board or not board[0]:\n return\n previous = [0] * len(board[0])\n surround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n for i in range(len(board)):\n left, tmp_previous = (0, board[i].copy())\n for j in range(len(board[0])):\n sum_, tmp = (0, board[i][j])\n for dx, dy in surround:\n if dx == -1:\n if 0 <= j + dy < len(board[0]):\n tmp1 = previous[j + dy]\n else:\n tmp1 = 0\n sum_ += tmp1\n elif dx == 0 and dy == -1:\n sum_ += left\n left = tmp\n else:\n if i + dx < len(board) and 0 <= j + dy < len(board[0]):\n tmp2 = board[i + dx][j + dy]\n else:\n tmp2 = 0\n sum_ += tmp2\n if board[i][j] and (sum_ > 3 or sum_ < 2):\n board[i][j] = 0\n elif board[i][j] == 0 and sum_ == 3:\n board[i][j] = 1\n previous = tmp_previous\n\n def gameOfLife1(self, board: List[List[int]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n if not board or not board[0]:\n return\n\n def g(i: int, j: int) -> int:\n if 0 <= i < len(board) and 0 <= j < len(board[0]):\n return board[i][j] & 1\n return 0\n surround = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n for i in range(len(board)):\n for j in range(len(board[0])):\n sum_ = 0\n for dx, dy in surround:\n sum_ += g(i + dx, j + dy)\n if board[i][j]:\n if sum_ < 2 or sum_ > 3:\n board[i][j] = 1\n else:\n board[i][j] = 3\n elif sum_ == 3:\n board[i][j] = 2\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] >>= 1\n", "source": "the_stack_v2_python_sparse", "source_path": "gameOfLife.py", "source_repo": "HuipengXu/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "d30efe73bd6c7c0a0d84a699d708d547debeae49", "bodies": ["self.config_entry = entry\nsession = async_get_clientsession(hass)\nself.tailscale = Tailscale(session=session, api_key=entry.data[CONF_API_KEY], tailnet=entry.data[CONF_TAILNET])\nsuper().__init__(hass, LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)", "try:\n return await self.tailscale.devices()\nexcept TailscaleAuthenticationError as err:\n raise ConfigEntryAuthFailed from err"], "bodies_text": "<|body_start_0|>\n self.config_entry = entry\n session = async_get_clientsession(hass)\n self.tailscale = Tailscale(session=session, api_key=entry.data[CONF_API_KEY], tailnet=entry.data[CONF_TAILNET])\n super().__init__(hass, LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return await self.tailscale.devices()\n except TailscaleAuthenticationError as err:\n raise ConfigEntryAuthFailed from err\n<|end_body_1|>\n", "class_docstring": "The Tailscale Data Update Coordinator.", "class_name": "TailscaleDataUpdateCoordinator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TailscaleDataUpdateCoordinator:\n \"\"\"The Tailscale Data Update Coordinator.\"\"\"\n\n def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Initialize the Tailscale coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self) -> dict[str, Device]:\n \"\"\"Fetch devices from Tailscale.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.config_entry = entry\n session = async_get_clientsession(hass)\n self.tailscale = Tailscale(session=session, api_key=entry.data[CONF_API_KEY], tailnet=entry.data[CONF_TAILNET])\n super().__init__(hass, LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return await self.tailscale.devices()\n except TailscaleAuthenticationError as err:\n raise ConfigEntryAuthFailed from err\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000334", "length_bytes": 1472, "license_type": "permissive", "methods": [{"docstring": "Initialize the Tailscale coordinator.", "name": "__init__", "signature": "def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None"}, {"docstring": "Fetch devices from Tailscale.", "name": "_async_update_data", "signature": "async def _async_update_data(self) -> dict[str, Device]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_044092", "prompt": "Implement the Python class `TailscaleDataUpdateCoordinator` described below.\n\nClass description:\nThe Tailscale Data Update Coordinator.\n\nMethod signatures and docstrings:\n- def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None: Initialize the Tailscale coordinator.\n- async def _async_update_data(self) -> dict[str, Device]: Fetch devices from Tailscale.", "prompted_full_text": "Implement the Python class `TailscaleDataUpdateCoordinator` described below.\n\nClass description:\nThe Tailscale Data Update Coordinator.\n\nMethod signatures and docstrings:\n- def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None: Initialize the Tailscale coordinator.\n- async def _async_update_data(self) -> dict[str, Device]: Fetch devices from Tailscale.\n\n<|skeleton|>\nclass TailscaleDataUpdateCoordinator:\n \"\"\"The Tailscale Data Update Coordinator.\"\"\"\n\n def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Initialize the Tailscale coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self) -> dict[str, Device]:\n \"\"\"Fetch devices from Tailscale.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.config_entry = entry\n session = async_get_clientsession(hass)\n self.tailscale = Tailscale(session=session, api_key=entry.data[CONF_API_KEY], tailnet=entry.data[CONF_TAILNET])\n super().__init__(hass, LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return await self.tailscale.devices()\n except TailscaleAuthenticationError as err:\n raise ConfigEntryAuthFailed from err\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass TailscaleDataUpdateCoordinator:\n \"\"\"The Tailscale Data Update Coordinator.\"\"\"\n\n def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Initialize the Tailscale coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self) -> dict[str, Device]:\n \"\"\"Fetch devices from Tailscale.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TailscaleDataUpdateCoordinator:\n \"\"\"The Tailscale Data Update Coordinator.\"\"\"\n\n def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Initialize the Tailscale coordinator.\"\"\"\n self.config_entry = entry\n session = async_get_clientsession(hass)\n self.tailscale = Tailscale(session=session, api_key=entry.data[CONF_API_KEY], tailnet=entry.data[CONF_TAILNET])\n super().__init__(hass, LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)\n\n async def _async_update_data(self) -> dict[str, Device]:\n \"\"\"Fetch devices from Tailscale.\"\"\"\n try:\n return await self.tailscale.devices()\n except TailscaleAuthenticationError as err:\n raise ConfigEntryAuthFailed from err\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/tailscale/coordinator.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501}
{"blob_id": "57d10aaf07070f881b63df75e0eefafd369596a9", "bodies": ["form.save()\nmessages.add_message(self.request, messages.SUCCESS, 'Successful sign in')\nreturn HttpResponseRedirect(self.get_success_url())", "errors = form.errors.get_json_data()\nfor error in errors:\n messages.add_message(self.request, messages.ERROR, f\"{errors[error][0]['message']}\")\nreturn self.render_to_response(self.get_context_data(form=form))"], "bodies_text": "<|body_start_0|>\n form.save()\n messages.add_message(self.request, messages.SUCCESS, 'Successful sign in')\n return HttpResponseRedirect(self.get_success_url())\n<|end_body_0|>\n\n<|body_start_1|>\n errors = form.errors.get_json_data()\n for error in errors:\n messages.add_message(self.request, messages.ERROR, f\"{errors[error][0]['message']}\")\n return self.render_to_response(self.get_context_data(form=form))\n<|end_body_1|>\n", "class_docstring": "RegisterView FormView for register user page.", "class_name": "RegisterView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RegisterView:\n \"\"\"RegisterView FormView for register user page.\"\"\"\n\n def form_valid(self, form):\n \"\"\"If form is valid, save it and show a message. Args: form (obj): Form object Returns: A http response with a successful message\"\"\"\n <|body_0|>\n\n def form_invalid(self, form):\n \"\"\"If form is invalid, show an error message. Args: form (obj): Form object Returns: A http response with an error message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form.save()\n messages.add_message(self.request, messages.SUCCESS, 'Successful sign in')\n return HttpResponseRedirect(self.get_success_url())\n<|end_body_0|>\n\n<|body_start_1|>\n errors = form.errors.get_json_data()\n for error in errors:\n messages.add_message(self.request, messages.ERROR, f\"{errors[error][0]['message']}\")\n return self.render_to_response(self.get_context_data(form=form))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000335", "length_bytes": 1527, "license_type": "no_license", "methods": [{"docstring": "If form is valid, save it and show a message. Args: form (obj): Form object Returns: A http response with a successful message", "name": "form_valid", "signature": "def form_valid(self, form)"}, {"docstring": "If form is invalid, show an error message. Args: form (obj): Form object Returns: A http response with an error message", "name": "form_invalid", "signature": "def form_invalid(self, form)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049674", "prompt": "Implement the Python class `RegisterView` described below.\n\nClass description:\nRegisterView FormView for register user page.\n\nMethod signatures and docstrings:\n- def form_valid(self, form): If form is valid, save it and show a message. Args: form (obj): Form object Returns: A http response with a successful message\n- def form_invalid(self, form): If form is invalid, show an error message. Args: form (obj): Form object Returns: A http response with an error message", "prompted_full_text": "Implement the Python class `RegisterView` described below.\n\nClass description:\nRegisterView FormView for register user page.\n\nMethod signatures and docstrings:\n- def form_valid(self, form): If form is valid, save it and show a message. Args: form (obj): Form object Returns: A http response with a successful message\n- def form_invalid(self, form): If form is invalid, show an error message. Args: form (obj): Form object Returns: A http response with an error message\n\n<|skeleton|>\nclass RegisterView:\n \"\"\"RegisterView FormView for register user page.\"\"\"\n\n def form_valid(self, form):\n \"\"\"If form is valid, save it and show a message. Args: form (obj): Form object Returns: A http response with a successful message\"\"\"\n <|body_0|>\n\n def form_invalid(self, form):\n \"\"\"If form is invalid, show an error message. Args: form (obj): Form object Returns: A http response with an error message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form.save()\n messages.add_message(self.request, messages.SUCCESS, 'Successful sign in')\n return HttpResponseRedirect(self.get_success_url())\n<|end_body_0|>\n\n<|body_start_1|>\n errors = form.errors.get_json_data()\n for error in errors:\n messages.add_message(self.request, messages.ERROR, f\"{errors[error][0]['message']}\")\n return self.render_to_response(self.get_context_data(form=form))\n<|end_body_1|>\n", "revision_id": "90503254f18df11b7f1e63a21fee17e814a41db0", "skeleton": "<|skeleton|>\nclass RegisterView:\n \"\"\"RegisterView FormView for register user page.\"\"\"\n\n def form_valid(self, form):\n \"\"\"If form is valid, save it and show a message. Args: form (obj): Form object Returns: A http response with a successful message\"\"\"\n <|body_0|>\n\n def form_invalid(self, form):\n \"\"\"If form is invalid, show an error message. Args: form (obj): Form object Returns: A http response with an error message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RegisterView:\n \"\"\"RegisterView FormView for register user page.\"\"\"\n\n def form_valid(self, form):\n \"\"\"If form is valid, save it and show a message. Args: form (obj): Form object Returns: A http response with a successful message\"\"\"\n form.save()\n messages.add_message(self.request, messages.SUCCESS, 'Successful sign in')\n return HttpResponseRedirect(self.get_success_url())\n\n def form_invalid(self, form):\n \"\"\"If form is invalid, show an error message. Args: form (obj): Form object Returns: A http response with an error message\"\"\"\n errors = form.errors.get_json_data()\n for error in errors:\n messages.add_message(self.request, messages.ERROR, f\"{errors[error][0]['message']}\")\n return self.render_to_response(self.get_context_data(form=form))\n", "source": "the_stack_v2_python_sparse", "source_path": "users/views.py", "source_repo": "milandre/qapp-project", "split": "val", "star_events_count": 0}
{"blob_id": "9c14cd7e07fb8d26491edc4fd9aa98a5c59a4916", "bodies": ["threading.Thread.__init__(self)\nself.maze = maze\nself.exit = False\nself.executor = Executor()\nself.exchanger = exchanger", "while not self.exit:\n self.executor.chunk(self.maze, self.maze.characters)\n os.system('cls' if os.name == 'nt' else 'clear')\n self.maze.printMazeArray(self.maze.map, self.maze.characters)\n self.exchanger.update(self.maze)\n self.exchanger.publish(self.maze)\n time.sleep(0.82)"], "bodies_text": "<|body_start_0|>\n threading.Thread.__init__(self)\n self.maze = maze\n self.exit = False\n self.executor = Executor()\n self.exchanger = exchanger\n<|end_body_0|>\n\n<|body_start_1|>\n while not self.exit:\n self.executor.chunk(self.maze, self.maze.characters)\n os.system('cls' if os.name == 'nt' else 'clear')\n self.maze.printMazeArray(self.maze.map, self.maze.characters)\n self.exchanger.update(self.maze)\n self.exchanger.publish(self.maze)\n time.sleep(0.82)\n<|end_body_1|>\n", "class_docstring": "A simulator class used to run the maze simulations based on a map and a set of characters", "class_name": "Simulator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Simulator:\n \"\"\"A simulator class used to run the maze simulations based on a map and a set of characters\"\"\"\n\n def __init__(self, maze, exchanger):\n \"\"\"Initialize the simulator\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"The run method for this thread\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n threading.Thread.__init__(self)\n self.maze = maze\n self.exit = False\n self.executor = Executor()\n self.exchanger = exchanger\n<|end_body_0|>\n\n<|body_start_1|>\n while not self.exit:\n self.executor.chunk(self.maze, self.maze.characters)\n os.system('cls' if os.name == 'nt' else 'clear')\n self.maze.printMazeArray(self.maze.map, self.maze.characters)\n self.exchanger.update(self.maze)\n self.exchanger.publish(self.maze)\n time.sleep(0.82)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000336", "length_bytes": 1003, "license_type": "no_license", "methods": [{"docstring": "Initialize the simulator", "name": "__init__", "signature": "def __init__(self, maze, exchanger)"}, {"docstring": "The run method for this thread", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "prompt": "Implement the Python class `Simulator` described below.\n\nClass description:\nA simulator class used to run the maze simulations based on a map and a set of characters\n\nMethod signatures and docstrings:\n- def __init__(self, maze, exchanger): Initialize the simulator\n- def run(self): The run method for this thread", "prompted_full_text": "Implement the Python class `Simulator` described below.\n\nClass description:\nA simulator class used to run the maze simulations based on a map and a set of characters\n\nMethod signatures and docstrings:\n- def __init__(self, maze, exchanger): Initialize the simulator\n- def run(self): The run method for this thread\n\n<|skeleton|>\nclass Simulator:\n \"\"\"A simulator class used to run the maze simulations based on a map and a set of characters\"\"\"\n\n def __init__(self, maze, exchanger):\n \"\"\"Initialize the simulator\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"The run method for this thread\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n threading.Thread.__init__(self)\n self.maze = maze\n self.exit = False\n self.executor = Executor()\n self.exchanger = exchanger\n<|end_body_0|>\n\n<|body_start_1|>\n while not self.exit:\n self.executor.chunk(self.maze, self.maze.characters)\n os.system('cls' if os.name == 'nt' else 'clear')\n self.maze.printMazeArray(self.maze.map, self.maze.characters)\n self.exchanger.update(self.maze)\n self.exchanger.publish(self.maze)\n time.sleep(0.82)\n<|end_body_1|>\n", "revision_id": "a6bd74648d97056bf7899b4de8e69e15af69afe1", "skeleton": "<|skeleton|>\nclass Simulator:\n \"\"\"A simulator class used to run the maze simulations based on a map and a set of characters\"\"\"\n\n def __init__(self, maze, exchanger):\n \"\"\"Initialize the simulator\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"The run method for this thread\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Simulator:\n \"\"\"A simulator class used to run the maze simulations based on a map and a set of characters\"\"\"\n\n def __init__(self, maze, exchanger):\n \"\"\"Initialize the simulator\"\"\"\n threading.Thread.__init__(self)\n self.maze = maze\n self.exit = False\n self.executor = Executor()\n self.exchanger = exchanger\n\n def run(self):\n \"\"\"The run method for this thread\"\"\"\n while not self.exit:\n self.executor.chunk(self.maze, self.maze.characters)\n os.system('cls' if os.name == 'nt' else 'clear')\n self.maze.printMazeArray(self.maze.map, self.maze.characters)\n self.exchanger.update(self.maze)\n self.exchanger.publish(self.maze)\n time.sleep(0.82)\n", "source": "the_stack_v2_python_sparse", "source_path": "server/labyrinth/sim/simulator.py", "source_repo": "LeStarch/minoan-maze", "split": "val", "star_events_count": 0}
{"blob_id": "e3971317e01666e071acbfd95b35537556be8ab2", "bodies": ["super().__init__(coordinator)\nself.system_id = zone_data[AZD_SYSTEM_ID]\nself.zone_id = zone_id\nself._attr_device_info = DeviceInfo(identifiers={(DOMAIN, zone_id)}, manufacturer=MANUFACTURER, name=zone_data[AZD_NAME], via_device=(DOMAIN, self.system_id))", "value = None\nif (zone := self.coordinator.data[AZD_ZONES].get(self.zone_id)):\n value = zone.get(key)\nreturn value"], "bodies_text": "<|body_start_0|>\n super().__init__(coordinator)\n self.system_id = zone_data[AZD_SYSTEM_ID]\n self.zone_id = zone_id\n self._attr_device_info = DeviceInfo(identifiers={(DOMAIN, zone_id)}, manufacturer=MANUFACTURER, name=zone_data[AZD_NAME], via_device=(DOMAIN, self.system_id))\n<|end_body_0|>\n\n<|body_start_1|>\n value = None\n if (zone := self.coordinator.data[AZD_ZONES].get(self.zone_id)):\n value = zone.get(key)\n return value\n<|end_body_1|>\n", "class_docstring": "Define an Airzone Cloud Zone entity.", "class_name": "AirzoneZoneEntity", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AirzoneZoneEntity:\n \"\"\"Define an Airzone Cloud Zone entity.\"\"\"\n\n def __init__(self, coordinator: AirzoneUpdateCoordinator, zone_id: str, zone_data: dict[str, Any]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def get_airzone_value(self, key: str) -> Any:\n \"\"\"Return zone value by key.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator)\n self.system_id = zone_data[AZD_SYSTEM_ID]\n self.zone_id = zone_id\n self._attr_device_info = DeviceInfo(identifiers={(DOMAIN, zone_id)}, manufacturer=MANUFACTURER, name=zone_data[AZD_NAME], via_device=(DOMAIN, self.system_id))\n<|end_body_0|>\n\n<|body_start_1|>\n value = None\n if (zone := self.coordinator.data[AZD_ZONES].get(self.zone_id)):\n value = zone.get(key)\n return value\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000337", "length_bytes": 4444, "license_type": "permissive", "methods": [{"docstring": "Initialize.", "name": "__init__", "signature": "def __init__(self, coordinator: AirzoneUpdateCoordinator, zone_id: str, zone_data: dict[str, Any]) -> None"}, {"docstring": "Return zone value by key.", "name": "get_airzone_value", "signature": "def get_airzone_value(self, key: str) -> Any"}], "n_methods": 2, "prompt": "Implement the Python class `AirzoneZoneEntity` described below.\n\nClass description:\nDefine an Airzone Cloud Zone entity.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator: AirzoneUpdateCoordinator, zone_id: str, zone_data: dict[str, Any]) -> None: Initialize.\n- def get_airzone_value(self, key: str) -> Any: Return zone value by key.", "prompted_full_text": "Implement the Python class `AirzoneZoneEntity` described below.\n\nClass description:\nDefine an Airzone Cloud Zone entity.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator: AirzoneUpdateCoordinator, zone_id: str, zone_data: dict[str, Any]) -> None: Initialize.\n- def get_airzone_value(self, key: str) -> Any: Return zone value by key.\n\n<|skeleton|>\nclass AirzoneZoneEntity:\n \"\"\"Define an Airzone Cloud Zone entity.\"\"\"\n\n def __init__(self, coordinator: AirzoneUpdateCoordinator, zone_id: str, zone_data: dict[str, Any]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def get_airzone_value(self, key: str) -> Any:\n \"\"\"Return zone value by key.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator)\n self.system_id = zone_data[AZD_SYSTEM_ID]\n self.zone_id = zone_id\n self._attr_device_info = DeviceInfo(identifiers={(DOMAIN, zone_id)}, manufacturer=MANUFACTURER, name=zone_data[AZD_NAME], via_device=(DOMAIN, self.system_id))\n<|end_body_0|>\n\n<|body_start_1|>\n value = None\n if (zone := self.coordinator.data[AZD_ZONES].get(self.zone_id)):\n value = zone.get(key)\n return value\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass AirzoneZoneEntity:\n \"\"\"Define an Airzone Cloud Zone entity.\"\"\"\n\n def __init__(self, coordinator: AirzoneUpdateCoordinator, zone_id: str, zone_data: dict[str, Any]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def get_airzone_value(self, key: str) -> Any:\n \"\"\"Return zone value by key.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AirzoneZoneEntity:\n \"\"\"Define an Airzone Cloud Zone entity.\"\"\"\n\n def __init__(self, coordinator: AirzoneUpdateCoordinator, zone_id: str, zone_data: dict[str, Any]) -> None:\n \"\"\"Initialize.\"\"\"\n super().__init__(coordinator)\n self.system_id = zone_data[AZD_SYSTEM_ID]\n self.zone_id = zone_id\n self._attr_device_info = DeviceInfo(identifiers={(DOMAIN, zone_id)}, manufacturer=MANUFACTURER, name=zone_data[AZD_NAME], via_device=(DOMAIN, self.system_id))\n\n def get_airzone_value(self, key: str) -> Any:\n \"\"\"Return zone value by key.\"\"\"\n value = None\n if (zone := self.coordinator.data[AZD_ZONES].get(self.zone_id)):\n value = zone.get(key)\n return value\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/airzone_cloud/entity.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501}
{"blob_id": "5f8d630ae8bed336ba8b3708f5cd597b7bdce910", "bodies": ["check_scalar(self.epsilon, 'epsilon', float, min_val=0.0)\nself.policy_name = f'logistic_ucb_{self.epsilon}'\nsuper().__post_init__()", "theta = np.array([model.predict_proba(context) for model in self.model_list]).flatten()\nstd = np.array([np.sqrt(np.sum(model._q ** (-1) * context ** 2)) for model in self.model_list]).flatten()\nucb_score = theta + self.epsilon * std\nreturn ucb_score.argsort()[::-1][:self.len_list]"], "bodies_text": "<|body_start_0|>\n check_scalar(self.epsilon, 'epsilon', float, min_val=0.0)\n self.policy_name = f'logistic_ucb_{self.epsilon}'\n super().__post_init__()\n<|end_body_0|>\n\n<|body_start_1|>\n theta = np.array([model.predict_proba(context) for model in self.model_list]).flatten()\n std = np.array([np.sqrt(np.sum(model._q ** (-1) * context ** 2)) for model in self.model_list]).flatten()\n ucb_score = theta + self.epsilon * std\n return ucb_score.argsort()[::-1][:self.len_list]\n<|end_body_1|>\n", "class_docstring": "Logistic Upper Confidence Bound. Parameters ------------ dim: int Number of dimensions of context vectors. n_actions: int Number of actions. len_list: int, default=1 Length of a list of actions recommended in each impression. When Open Bandit Dataset is used, 3 should be set. batch_size: int, default=1 Number of samples used in a batch parameter update. random_state: int, default=None Controls the random seed in sampling actions. alpha_: float, default=1. Prior parameter for the online logistic regression. lambda_: float, default=1. Regularization hyperparameter for the online logistic regression. epsilon: float, default=0. Exploration hyperparameter that must be greater than or equal to 0.0", "class_name": "LogisticUCB", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LogisticUCB:\n \"\"\"Logistic Upper Confidence Bound. Parameters ------------ dim: int Number of dimensions of context vectors. n_actions: int Number of actions. len_list: int, default=1 Length of a list of actions recommended in each impression. When Open Bandit Dataset is used, 3 should be set. batch_size: int, default=1 Number of samples used in a batch parameter update. random_state: int, default=None Controls the random seed in sampling actions. alpha_: float, default=1. Prior parameter for the online logistic regression. lambda_: float, default=1. Regularization hyperparameter for the online logistic regression. epsilon: float, default=0. Exploration hyperparameter that must be greater than or equal to 0.0\"\"\"\n\n def __post_init__(self) -> None:\n \"\"\"Initialize class.\"\"\"\n <|body_0|>\n\n def select_action(self, context: np.ndarray) -> np.ndarray:\n \"\"\"Select action for new data. Parameters ------------ context: array-like, shape (1, dim_context) Observed context vector. Returns ---------- selected_actions: array-like, shape (len_list, ) List of selected actions.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n check_scalar(self.epsilon, 'epsilon', float, min_val=0.0)\n self.policy_name = f'logistic_ucb_{self.epsilon}'\n super().__post_init__()\n<|end_body_0|>\n\n<|body_start_1|>\n theta = np.array([model.predict_proba(context) for model in self.model_list]).flatten()\n std = np.array([np.sqrt(np.sum(model._q ** (-1) * context ** 2)) for model in self.model_list]).flatten()\n ucb_score = theta + self.epsilon * std\n return ucb_score.argsort()[::-1][:self.len_list]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000338", "length_bytes": 10908, "license_type": "permissive", "methods": [{"docstring": "Initialize class.", "name": "__post_init__", "signature": "def __post_init__(self) -> None"}, {"docstring": "Select action for new data. Parameters ------------ context: array-like, shape (1, dim_context) Observed context vector. Returns ---------- selected_actions: array-like, shape (len_list, ) List of selected actions.", "name": "select_action", "signature": "def select_action(self, context: np.ndarray) -> np.ndarray"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031963", "prompt": "Implement the Python class `LogisticUCB` described below.\n\nClass description:\nLogistic Upper Confidence Bound. Parameters ------------ dim: int Number of dimensions of context vectors. n_actions: int Number of actions. len_list: int, default=1 Length of a list of actions recommended in each impression. When Open Bandit Dataset is used, 3 should be set. batch_size: int, default=1 Number of samples used in a batch parameter update. random_state: int, default=None Controls the random seed in sampling actions. alpha_: float, default=1. Prior parameter for the online logistic regression. lambda_: float, default=1. Regularization hyperparameter for the online logistic regression. epsilon: float, default=0. Exploration hyperparameter that must be greater than or equal to 0.0\n\nMethod signatures and docstrings:\n- def __post_init__(self) -> None: Initialize class.\n- def select_action(self, context: np.ndarray) -> np.ndarray: Select action for new data. Parameters ------------ context: array-like, shape (1, dim_context) Observed context vector. Returns ---------- selected_actions: array-like, shape (len_list, ) List of selected actions.", "prompted_full_text": "Implement the Python class `LogisticUCB` described below.\n\nClass description:\nLogistic Upper Confidence Bound. Parameters ------------ dim: int Number of dimensions of context vectors. n_actions: int Number of actions. len_list: int, default=1 Length of a list of actions recommended in each impression. When Open Bandit Dataset is used, 3 should be set. batch_size: int, default=1 Number of samples used in a batch parameter update. random_state: int, default=None Controls the random seed in sampling actions. alpha_: float, default=1. Prior parameter for the online logistic regression. lambda_: float, default=1. Regularization hyperparameter for the online logistic regression. epsilon: float, default=0. Exploration hyperparameter that must be greater than or equal to 0.0\n\nMethod signatures and docstrings:\n- def __post_init__(self) -> None: Initialize class.\n- def select_action(self, context: np.ndarray) -> np.ndarray: Select action for new data. Parameters ------------ context: array-like, shape (1, dim_context) Observed context vector. Returns ---------- selected_actions: array-like, shape (len_list, ) List of selected actions.\n\n<|skeleton|>\nclass LogisticUCB:\n \"\"\"Logistic Upper Confidence Bound. Parameters ------------ dim: int Number of dimensions of context vectors. n_actions: int Number of actions. len_list: int, default=1 Length of a list of actions recommended in each impression. When Open Bandit Dataset is used, 3 should be set. batch_size: int, default=1 Number of samples used in a batch parameter update. random_state: int, default=None Controls the random seed in sampling actions. alpha_: float, default=1. Prior parameter for the online logistic regression. lambda_: float, default=1. Regularization hyperparameter for the online logistic regression. epsilon: float, default=0. Exploration hyperparameter that must be greater than or equal to 0.0\"\"\"\n\n def __post_init__(self) -> None:\n \"\"\"Initialize class.\"\"\"\n <|body_0|>\n\n def select_action(self, context: np.ndarray) -> np.ndarray:\n \"\"\"Select action for new data. Parameters ------------ context: array-like, shape (1, dim_context) Observed context vector. Returns ---------- selected_actions: array-like, shape (len_list, ) List of selected actions.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n check_scalar(self.epsilon, 'epsilon', float, min_val=0.0)\n self.policy_name = f'logistic_ucb_{self.epsilon}'\n super().__post_init__()\n<|end_body_0|>\n\n<|body_start_1|>\n theta = np.array([model.predict_proba(context) for model in self.model_list]).flatten()\n std = np.array([np.sqrt(np.sum(model._q ** (-1) * context ** 2)) for model in self.model_list]).flatten()\n ucb_score = theta + self.epsilon * std\n return ucb_score.argsort()[::-1][:self.len_list]\n<|end_body_1|>\n", "revision_id": "53598edab284b4364d127ec5662137de3f9c1206", "skeleton": "<|skeleton|>\nclass LogisticUCB:\n \"\"\"Logistic Upper Confidence Bound. Parameters ------------ dim: int Number of dimensions of context vectors. n_actions: int Number of actions. len_list: int, default=1 Length of a list of actions recommended in each impression. When Open Bandit Dataset is used, 3 should be set. batch_size: int, default=1 Number of samples used in a batch parameter update. random_state: int, default=None Controls the random seed in sampling actions. alpha_: float, default=1. Prior parameter for the online logistic regression. lambda_: float, default=1. Regularization hyperparameter for the online logistic regression. epsilon: float, default=0. Exploration hyperparameter that must be greater than or equal to 0.0\"\"\"\n\n def __post_init__(self) -> None:\n \"\"\"Initialize class.\"\"\"\n <|body_0|>\n\n def select_action(self, context: np.ndarray) -> np.ndarray:\n \"\"\"Select action for new data. Parameters ------------ context: array-like, shape (1, dim_context) Observed context vector. Returns ---------- selected_actions: array-like, shape (len_list, ) List of selected actions.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LogisticUCB:\n \"\"\"Logistic Upper Confidence Bound. Parameters ------------ dim: int Number of dimensions of context vectors. n_actions: int Number of actions. len_list: int, default=1 Length of a list of actions recommended in each impression. When Open Bandit Dataset is used, 3 should be set. batch_size: int, default=1 Number of samples used in a batch parameter update. random_state: int, default=None Controls the random seed in sampling actions. alpha_: float, default=1. Prior parameter for the online logistic regression. lambda_: float, default=1. Regularization hyperparameter for the online logistic regression. epsilon: float, default=0. Exploration hyperparameter that must be greater than or equal to 0.0\"\"\"\n\n def __post_init__(self) -> None:\n \"\"\"Initialize class.\"\"\"\n check_scalar(self.epsilon, 'epsilon', float, min_val=0.0)\n self.policy_name = f'logistic_ucb_{self.epsilon}'\n super().__post_init__()\n\n def select_action(self, context: np.ndarray) -> np.ndarray:\n \"\"\"Select action for new data. Parameters ------------ context: array-like, shape (1, dim_context) Observed context vector. Returns ---------- selected_actions: array-like, shape (len_list, ) List of selected actions.\"\"\"\n theta = np.array([model.predict_proba(context) for model in self.model_list]).flatten()\n std = np.array([np.sqrt(np.sum(model._q ** (-1) * context ** 2)) for model in self.model_list]).flatten()\n ucb_score = theta + self.epsilon * std\n return ucb_score.argsort()[::-1][:self.len_list]\n", "source": "the_stack_v2_python_sparse", "source_path": "obp/policy/logistic.py", "source_repo": "han20192019/newRL", "split": "val", "star_events_count": 0}
{"blob_id": "e79169cb4dd3e72ae82dc4ace9ff963f4bd274b8", "bodies": ["self.log_dev_name = device_name\nself.__driver_logger = logging.getLogger(__name__)\nself.__is_log_out = True\nif len(self.__driver_logger.handlers) == 0:\n self.__driver_logger.propagate = False\n if len(GlobalModule.EM_LOGGER.handlers) != 2:\n raise ValueError('not 2rotate handler')\n for i in range(len(GlobalModule.EM_LOGGER.handlers)):\n self._sethandler(i)", "if self.__is_log_out is False:\n return False\nif not self.log_dev_name and device_name:\n self.log_dev_name = device_name\nif not device_name:\n device_name = self.log_dev_name\nframe = inspect.currentframe(2)\nlog_line_no = str(frame.f_lineno)\nlog_func_name = frame.f_code.co_name\nout_message = '(%(module)s::%(funcName)s:%(lineno)s):{%(device)s}:%(message)s' % {'module': log_module, 'funcName': log_func_name, 'lineno': log_line_no, 'device': device_name, 'message': log_message}\nif log_level == 'DEBUG':\n self.__driver_logger.debug(out_message)\nelif log_level == 'INFO':\n self.__driver_logger.info(out_message)\nelif log_level == 'WARN':\n self.__driver_logger.warning(out_message)\nelif log_level == 'ERROR':\n self.__driver_logger.error(out_message)\nelse:\n return False\nreturn True", "time_rotate_handle = GlobalModule.EM_LOGGER.handlers[i]\nhandler = time_rotate_handle.getFileHandler()\nformatter = EmLoggingTool.Formatter('[%(asctime)s] [%(levelname)s] [tid=%(thread)d] %(message)s')\nhandler.setFormatter(formatter)\nself.__driver_logger.addHandler(handler)"], "bodies_text": "<|body_start_0|>\n self.log_dev_name = device_name\n self.__driver_logger = logging.getLogger(__name__)\n self.__is_log_out = True\n if len(self.__driver_logger.handlers) == 0:\n self.__driver_logger.propagate = False\n if len(GlobalModule.EM_LOGGER.handlers) != 2:\n raise ValueError('not 2rotate handler')\n for i in range(len(GlobalModule.EM_LOGGER.handlers)):\n self._sethandler(i)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__is_log_out is False:\n return False\n if not self.log_dev_name and device_name:\n self.log_dev_name = device_name\n if not device_name:\n device_name = self.log_dev_name\n frame = inspect.currentframe(2)\n log_line_no = str(frame.f_lineno)\n log_func_name = frame.f_code.co_name\n out_message = '(%(module)s::%(funcName)s:%(lineno)s):{%(device)s}:%(message)s' % {'module': log_module, 'funcName': log_func_name, 'lineno': log_line_no, 'device': device_name, 'message': log_message}\n if log_level == 'DEBUG':\n self.__driver_logger.debug(out_message)\n elif log_level == 'INFO':\n self.__driver_logger.info(out_message)\n elif log_level == 'WARN':\n self.__driver_logger.warning(out_message)\n elif log_level == 'ERROR':\n self.__driver_logger.error(out_message)\n else:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n time_rotate_handle = GlobalModule.EM_LOGGER.handlers[i]\n handler = time_rotate_handle.getFileHandler()\n formatter = EmLoggingTool.Formatter('[%(asctime)s] [%(levelname)s] [tid=%(thread)d] %(message)s')\n handler.setFormatter(formatter)\n self.__driver_logger.addHandler(handler)\n<|end_body_2|>\n", "class_docstring": "Log output class for the individual section on the driver", "class_name": "EmDriverCommonUtilityLog", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EmDriverCommonUtilityLog:\n \"\"\"Log output class for the individual section on the driver\"\"\"\n\n def __init__(self, device_name=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def logging(self, device_name=None, log_level=None, log_message=None, log_module=' '):\n \"\"\"Log output (Individual section on the driver) Explanation about parameter: device_name: Device name log_level: Log level (DEBUG,INFO,WARN,ERROR) log_message: Log message log_module: Module name (Make sure to input \"_name_\" as the argument.) Explanation about the return value: Log output result : Boolean\"\"\"\n <|body_1|>\n\n def _sethandler(self, i):\n \"\"\"Log output (Individual section on the driver) Explanation about parameter: i: place where handler is acquired\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.log_dev_name = device_name\n self.__driver_logger = logging.getLogger(__name__)\n self.__is_log_out = True\n if len(self.__driver_logger.handlers) == 0:\n self.__driver_logger.propagate = False\n if len(GlobalModule.EM_LOGGER.handlers) != 2:\n raise ValueError('not 2rotate handler')\n for i in range(len(GlobalModule.EM_LOGGER.handlers)):\n self._sethandler(i)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__is_log_out is False:\n return False\n if not self.log_dev_name and device_name:\n self.log_dev_name = device_name\n if not device_name:\n device_name = self.log_dev_name\n frame = inspect.currentframe(2)\n log_line_no = str(frame.f_lineno)\n log_func_name = frame.f_code.co_name\n out_message = '(%(module)s::%(funcName)s:%(lineno)s):{%(device)s}:%(message)s' % {'module': log_module, 'funcName': log_func_name, 'lineno': log_line_no, 'device': device_name, 'message': log_message}\n if log_level == 'DEBUG':\n self.__driver_logger.debug(out_message)\n elif log_level == 'INFO':\n self.__driver_logger.info(out_message)\n elif log_level == 'WARN':\n self.__driver_logger.warning(out_message)\n elif log_level == 'ERROR':\n self.__driver_logger.error(out_message)\n else:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n time_rotate_handle = GlobalModule.EM_LOGGER.handlers[i]\n handler = time_rotate_handle.getFileHandler()\n formatter = EmLoggingTool.Formatter('[%(asctime)s] [%(levelname)s] [tid=%(thread)d] %(message)s')\n handler.setFormatter(formatter)\n self.__driver_logger.addHandler(handler)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000339", "length_bytes": 3487, "license_type": "permissive", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, device_name=None)"}, {"docstring": "Log output (Individual section on the driver) Explanation about parameter: device_name: Device name log_level: Log level (DEBUG,INFO,WARN,ERROR) log_message: Log message log_module: Module name (Make sure to input \"_name_\" as the argument.) Explanation about the return value: Log output result : Boolean", "name": "logging", "signature": "def logging(self, device_name=None, log_level=None, log_message=None, log_module=' ')"}, {"docstring": "Log output (Individual section on the driver) Explanation about parameter: i: place where handler is acquired", "name": "_sethandler", "signature": "def _sethandler(self, i)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_001281", "prompt": "Implement the Python class `EmDriverCommonUtilityLog` described below.\n\nClass description:\nLog output class for the individual section on the driver\n\nMethod signatures and docstrings:\n- def __init__(self, device_name=None): Constructor\n- def logging(self, device_name=None, log_level=None, log_message=None, log_module=' '): Log output (Individual section on the driver) Explanation about parameter: device_name: Device name log_level: Log level (DEBUG,INFO,WARN,ERROR) log_message: Log message log_module: Module name (Make sure to input \"_name_\" as the argument.) Explanation about the return value: Log output result : Boolean\n- def _sethandler(self, i): Log output (Individual section on the driver) Explanation about parameter: i: place where handler is acquired", "prompted_full_text": "Implement the Python class `EmDriverCommonUtilityLog` described below.\n\nClass description:\nLog output class for the individual section on the driver\n\nMethod signatures and docstrings:\n- def __init__(self, device_name=None): Constructor\n- def logging(self, device_name=None, log_level=None, log_message=None, log_module=' '): Log output (Individual section on the driver) Explanation about parameter: device_name: Device name log_level: Log level (DEBUG,INFO,WARN,ERROR) log_message: Log message log_module: Module name (Make sure to input \"_name_\" as the argument.) Explanation about the return value: Log output result : Boolean\n- def _sethandler(self, i): Log output (Individual section on the driver) Explanation about parameter: i: place where handler is acquired\n\n<|skeleton|>\nclass EmDriverCommonUtilityLog:\n \"\"\"Log output class for the individual section on the driver\"\"\"\n\n def __init__(self, device_name=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def logging(self, device_name=None, log_level=None, log_message=None, log_module=' '):\n \"\"\"Log output (Individual section on the driver) Explanation about parameter: device_name: Device name log_level: Log level (DEBUG,INFO,WARN,ERROR) log_message: Log message log_module: Module name (Make sure to input \"_name_\" as the argument.) Explanation about the return value: Log output result : Boolean\"\"\"\n <|body_1|>\n\n def _sethandler(self, i):\n \"\"\"Log output (Individual section on the driver) Explanation about parameter: i: place where handler is acquired\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.log_dev_name = device_name\n self.__driver_logger = logging.getLogger(__name__)\n self.__is_log_out = True\n if len(self.__driver_logger.handlers) == 0:\n self.__driver_logger.propagate = False\n if len(GlobalModule.EM_LOGGER.handlers) != 2:\n raise ValueError('not 2rotate handler')\n for i in range(len(GlobalModule.EM_LOGGER.handlers)):\n self._sethandler(i)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__is_log_out is False:\n return False\n if not self.log_dev_name and device_name:\n self.log_dev_name = device_name\n if not device_name:\n device_name = self.log_dev_name\n frame = inspect.currentframe(2)\n log_line_no = str(frame.f_lineno)\n log_func_name = frame.f_code.co_name\n out_message = '(%(module)s::%(funcName)s:%(lineno)s):{%(device)s}:%(message)s' % {'module': log_module, 'funcName': log_func_name, 'lineno': log_line_no, 'device': device_name, 'message': log_message}\n if log_level == 'DEBUG':\n self.__driver_logger.debug(out_message)\n elif log_level == 'INFO':\n self.__driver_logger.info(out_message)\n elif log_level == 'WARN':\n self.__driver_logger.warning(out_message)\n elif log_level == 'ERROR':\n self.__driver_logger.error(out_message)\n else:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n time_rotate_handle = GlobalModule.EM_LOGGER.handlers[i]\n handler = time_rotate_handle.getFileHandler()\n formatter = EmLoggingTool.Formatter('[%(asctime)s] [%(levelname)s] [tid=%(thread)d] %(message)s')\n handler.setFormatter(formatter)\n self.__driver_logger.addHandler(handler)\n<|end_body_2|>\n", "revision_id": "e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f", "skeleton": "<|skeleton|>\nclass EmDriverCommonUtilityLog:\n \"\"\"Log output class for the individual section on the driver\"\"\"\n\n def __init__(self, device_name=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def logging(self, device_name=None, log_level=None, log_message=None, log_module=' '):\n \"\"\"Log output (Individual section on the driver) Explanation about parameter: device_name: Device name log_level: Log level (DEBUG,INFO,WARN,ERROR) log_message: Log message log_module: Module name (Make sure to input \"_name_\" as the argument.) Explanation about the return value: Log output result : Boolean\"\"\"\n <|body_1|>\n\n def _sethandler(self, i):\n \"\"\"Log output (Individual section on the driver) Explanation about parameter: i: place where handler is acquired\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EmDriverCommonUtilityLog:\n \"\"\"Log output class for the individual section on the driver\"\"\"\n\n def __init__(self, device_name=None):\n \"\"\"Constructor\"\"\"\n self.log_dev_name = device_name\n self.__driver_logger = logging.getLogger(__name__)\n self.__is_log_out = True\n if len(self.__driver_logger.handlers) == 0:\n self.__driver_logger.propagate = False\n if len(GlobalModule.EM_LOGGER.handlers) != 2:\n raise ValueError('not 2rotate handler')\n for i in range(len(GlobalModule.EM_LOGGER.handlers)):\n self._sethandler(i)\n\n def logging(self, device_name=None, log_level=None, log_message=None, log_module=' '):\n \"\"\"Log output (Individual section on the driver) Explanation about parameter: device_name: Device name log_level: Log level (DEBUG,INFO,WARN,ERROR) log_message: Log message log_module: Module name (Make sure to input \"_name_\" as the argument.) Explanation about the return value: Log output result : Boolean\"\"\"\n if self.__is_log_out is False:\n return False\n if not self.log_dev_name and device_name:\n self.log_dev_name = device_name\n if not device_name:\n device_name = self.log_dev_name\n frame = inspect.currentframe(2)\n log_line_no = str(frame.f_lineno)\n log_func_name = frame.f_code.co_name\n out_message = '(%(module)s::%(funcName)s:%(lineno)s):{%(device)s}:%(message)s' % {'module': log_module, 'funcName': log_func_name, 'lineno': log_line_no, 'device': device_name, 'message': log_message}\n if log_level == 'DEBUG':\n self.__driver_logger.debug(out_message)\n elif log_level == 'INFO':\n self.__driver_logger.info(out_message)\n elif log_level == 'WARN':\n self.__driver_logger.warning(out_message)\n elif log_level == 'ERROR':\n self.__driver_logger.error(out_message)\n else:\n return False\n return True\n\n def _sethandler(self, i):\n \"\"\"Log output (Individual section on the driver) Explanation about parameter: i: place where handler is acquired\"\"\"\n time_rotate_handle = GlobalModule.EM_LOGGER.handlers[i]\n handler = time_rotate_handle.getFileHandler()\n formatter = EmLoggingTool.Formatter('[%(asctime)s] [%(levelname)s] [tid=%(thread)d] %(message)s')\n handler.setFormatter(formatter)\n self.__driver_logger.addHandler(handler)\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/DriverUtility/EmDriverCommonUtilityLog.py", "source_repo": "lixiaochun/element-manager", "split": "val", "star_events_count": 0}
{"blob_id": "824298a29e4d1a8a99a242b3fed418152ee0cb87", "bodies": ["if root is None:\n return False\nif root.left is None and root.right is None:\n return root.val is sum_value\nreturn self.hasPathSum2(root.left, sum_value - root.val) or self.hasPathSum2(root.right, sum_value - root.val)", "if root is None:\n return False\nlast_poped = False\njourney = [(root, root.val)]\nwhile journey:\n node, s = (journey[-1][0], journey[-1][1])\n if not node.left and (not node.right):\n if s == sum_value:\n return True\n else:\n last_poped = journey.pop()[0]\n if node.left is last_poped or node.right is last_poped:\n last_poped = journey.pop()[0]\n continue\n if node.left and node.left is not last_poped:\n journey.append((node.left, s + node.left.val))\n if node.right and node.right is not last_poped:\n journey.append((node.right, s + node.right.val))\nreturn False"], "bodies_text": "<|body_start_0|>\n if root is None:\n return False\n if root.left is None and root.right is None:\n return root.val is sum_value\n return self.hasPathSum2(root.left, sum_value - root.val) or self.hasPathSum2(root.right, sum_value - root.val)\n<|end_body_0|>\n\n<|body_start_1|>\n if root is None:\n return False\n last_poped = False\n journey = [(root, root.val)]\n while journey:\n node, s = (journey[-1][0], journey[-1][1])\n if not node.left and (not node.right):\n if s == sum_value:\n return True\n else:\n last_poped = journey.pop()[0]\n if node.left is last_poped or node.right is last_poped:\n last_poped = journey.pop()[0]\n continue\n if node.left and node.left is not last_poped:\n journey.append((node.left, s + node.left.val))\n if node.right and node.right is not last_poped:\n journey.append((node.right, s + node.right.val))\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def hasPathSum2(self, root, sum_value):\n \"\"\":type root: TreeNode :type sum: int :rtype: bool recursion\"\"\"\n <|body_0|>\n\n def hasPathSum(self, root, sum_value):\n \"\"\":type root: TreeNode :type sum: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root is None:\n return False\n if root.left is None and root.right is None:\n return root.val is sum_value\n return self.hasPathSum2(root.left, sum_value - root.val) or self.hasPathSum2(root.right, sum_value - root.val)\n<|end_body_0|>\n\n<|body_start_1|>\n if root is None:\n return False\n last_poped = False\n journey = [(root, root.val)]\n while journey:\n node, s = (journey[-1][0], journey[-1][1])\n if not node.left and (not node.right):\n if s == sum_value:\n return True\n else:\n last_poped = journey.pop()[0]\n if node.left is last_poped or node.right is last_poped:\n last_poped = journey.pop()[0]\n continue\n if node.left and node.left is not last_poped:\n journey.append((node.left, s + node.left.val))\n if node.right and node.right is not last_poped:\n journey.append((node.right, s + node.right.val))\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000340", "length_bytes": 3972, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :type sum: int :rtype: bool recursion", "name": "hasPathSum2", "signature": "def hasPathSum2(self, root, sum_value)"}, {"docstring": ":type root: TreeNode :type sum: int :rtype: bool", "name": "hasPathSum", "signature": "def hasPathSum(self, root, sum_value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036559", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hasPathSum2(self, root, sum_value): :type root: TreeNode :type sum: int :rtype: bool recursion\n- def hasPathSum(self, root, sum_value): :type root: TreeNode :type sum: int :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hasPathSum2(self, root, sum_value): :type root: TreeNode :type sum: int :rtype: bool recursion\n- def hasPathSum(self, root, sum_value): :type root: TreeNode :type sum: int :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def hasPathSum2(self, root, sum_value):\n \"\"\":type root: TreeNode :type sum: int :rtype: bool recursion\"\"\"\n <|body_0|>\n\n def hasPathSum(self, root, sum_value):\n \"\"\":type root: TreeNode :type sum: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root is None:\n return False\n if root.left is None and root.right is None:\n return root.val is sum_value\n return self.hasPathSum2(root.left, sum_value - root.val) or self.hasPathSum2(root.right, sum_value - root.val)\n<|end_body_0|>\n\n<|body_start_1|>\n if root is None:\n return False\n last_poped = False\n journey = [(root, root.val)]\n while journey:\n node, s = (journey[-1][0], journey[-1][1])\n if not node.left and (not node.right):\n if s == sum_value:\n return True\n else:\n last_poped = journey.pop()[0]\n if node.left is last_poped or node.right is last_poped:\n last_poped = journey.pop()[0]\n continue\n if node.left and node.left is not last_poped:\n journey.append((node.left, s + node.left.val))\n if node.right and node.right is not last_poped:\n journey.append((node.right, s + node.right.val))\n return False\n<|end_body_1|>\n", "revision_id": "d2e8b2dca40fc955045eb62e576c776bad8ee5f1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def hasPathSum2(self, root, sum_value):\n \"\"\":type root: TreeNode :type sum: int :rtype: bool recursion\"\"\"\n <|body_0|>\n\n def hasPathSum(self, root, sum_value):\n \"\"\":type root: TreeNode :type sum: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def hasPathSum2(self, root, sum_value):\n \"\"\":type root: TreeNode :type sum: int :rtype: bool recursion\"\"\"\n if root is None:\n return False\n if root.left is None and root.right is None:\n return root.val is sum_value\n return self.hasPathSum2(root.left, sum_value - root.val) or self.hasPathSum2(root.right, sum_value - root.val)\n\n def hasPathSum(self, root, sum_value):\n \"\"\":type root: TreeNode :type sum: int :rtype: bool\"\"\"\n if root is None:\n return False\n last_poped = False\n journey = [(root, root.val)]\n while journey:\n node, s = (journey[-1][0], journey[-1][1])\n if not node.left and (not node.right):\n if s == sum_value:\n return True\n else:\n last_poped = journey.pop()[0]\n if node.left is last_poped or node.right is last_poped:\n last_poped = journey.pop()[0]\n continue\n if node.left and node.left is not last_poped:\n journey.append((node.left, s + node.left.val))\n if node.right and node.right is not last_poped:\n journey.append((node.right, s + node.right.val))\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "path-sum/solution.py", "source_repo": "childe/leetcode", "split": "val", "star_events_count": 2}
{"blob_id": "5fca7db2c06a0ad68b6e70dfb0e5cab039fb98d5", "bodies": ["es_config = es_router.merge_es_config(destination_config)\nif not isinstance(data, (list, tuple)):\n data = [data]\n_param = param.get('fields') if param else {}\ninput_param = dict(data[0], **_param)\nes_config = es_router.route(es_config, input_param=input_param)\noperation = es_config.get('operation', 'create')\nself._add_private_field(es_config, data, input_param)\nif operation == 'create':\n es_adapter.batch_create(es_config, data, input_param)\nelif operation == 'update':\n es_adapter.batch_update(es_config, data, input_param)\nelif operation == 'delete':\n es_adapter.batch_delete(es_config, data, input_param)\nelif operation == 'ids_same_prop_update':\n es_adapter.batch_update_with_props_by_ids(es_config, data, input_param)", "if data is None:\n app_log.warning('destination clear fail, because data is null, {0}', destination_config)\n return\nif data == {} or data == [] or data == ():\n app_log.warning('destination clear maybe fail, because data is empty, {0}', destination_config)\n data = [{}]\nes_config = es_router.merge_es_config(destination_config)\nif not isinstance(data, (list, tuple)):\n data = [data]\nes_config = es_router.route(es_config, input_param=data[0])\nif 'clear_policy' not in es_config or not es_config.get('clear_policy'):\n return\nclear_policy = es_config.get('clear_policy')\ndata = data[0] if len(data) > 0 else data\ndata = data or {}\nif param:\n param = param['fields'] if 'fields' in param else param\n data = dict(data, **param)\nif clear_policy == 'every_msg,all':\n es_adapter.delete_all_doc_by_type(es_config, data)\nelif clear_policy == 'every_msg,by_adminId':\n if not data.get('adminId'):\n app_log.error('destination clear fail, because adminId is null {0}', data)\n return\n es_adapter.delete_by_field(es_config, data, '_adminId', data['adminId'])", "if not param or not data_list or (not es_config.get('add_admin_id_field')):\n return\nif not param.get('adminId'):\n return\nadmin_id = param['adminId']\nfor item in data_list:\n item['_adminId'] = admin_id"], "bodies_text": "<|body_start_0|>\n es_config = es_router.merge_es_config(destination_config)\n if not isinstance(data, (list, tuple)):\n data = [data]\n _param = param.get('fields') if param else {}\n input_param = dict(data[0], **_param)\n es_config = es_router.route(es_config, input_param=input_param)\n operation = es_config.get('operation', 'create')\n self._add_private_field(es_config, data, input_param)\n if operation == 'create':\n es_adapter.batch_create(es_config, data, input_param)\n elif operation == 'update':\n es_adapter.batch_update(es_config, data, input_param)\n elif operation == 'delete':\n es_adapter.batch_delete(es_config, data, input_param)\n elif operation == 'ids_same_prop_update':\n es_adapter.batch_update_with_props_by_ids(es_config, data, input_param)\n<|end_body_0|>\n\n<|body_start_1|>\n if data is None:\n app_log.warning('destination clear fail, because data is null, {0}', destination_config)\n return\n if data == {} or data == [] or data == ():\n app_log.warning('destination clear maybe fail, because data is empty, {0}', destination_config)\n data = [{}]\n es_config = es_router.merge_es_config(destination_config)\n if not isinstance(data, (list, tuple)):\n data = [data]\n es_config = es_router.route(es_config, input_param=data[0])\n if 'clear_policy' not in es_config or not es_config.get('clear_policy'):\n return\n clear_policy = es_config.get('clear_policy')\n data = data[0] if len(data) > 0 else data\n data = data or {}\n if param:\n param = param['fields'] if 'fields' in param else param\n data = dict(data, **param)\n if clear_policy == 'every_msg,all':\n es_adapter.delete_all_doc_by_type(es_config, data)\n elif clear_policy == 'every_msg,by_adminId':\n if not data.get('adminId'):\n app_log.error('destination clear fail, because adminId is null {0}', data)\n return\n es_adapter.delete_by_field(es_config, data, '_adminId', data['adminId'])\n<|end_body_1|>\n\n<|body_start_2|>\n if not param or not data_list or (not es_config.get('add_admin_id_field')):\n return\n if not param.get('adminId'):\n return\n admin_id = param['adminId']\n for item in data_list:\n item['_adminId'] = admin_id\n<|end_body_2|>\n", "class_docstring": "数据流目的地为Elasticsearch", "class_name": "ElasticSearchDestination", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ElasticSearchDestination:\n \"\"\"数据流目的地为Elasticsearch\"\"\"\n\n def push(self, destination_config, data, param=None):\n \"\"\"将数据推到ES中,数据流的最后一步 :param destination_config: :param data: :param param :return:\"\"\"\n <|body_0|>\n\n def clear(self, destination_config, data, param=None):\n \"\"\"清除掉ES数据源中得所有数据 :param destination_config: :param data: :return:\"\"\"\n <|body_1|>\n\n def _add_private_field(self, es_config, data_list, param):\n \"\"\"添加搜索平台私有字段,主要是将adminId作为私有字段添加到数据结构中 :param es_config :param data_list: :param param: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n es_config = es_router.merge_es_config(destination_config)\n if not isinstance(data, (list, tuple)):\n data = [data]\n _param = param.get('fields') if param else {}\n input_param = dict(data[0], **_param)\n es_config = es_router.route(es_config, input_param=input_param)\n operation = es_config.get('operation', 'create')\n self._add_private_field(es_config, data, input_param)\n if operation == 'create':\n es_adapter.batch_create(es_config, data, input_param)\n elif operation == 'update':\n es_adapter.batch_update(es_config, data, input_param)\n elif operation == 'delete':\n es_adapter.batch_delete(es_config, data, input_param)\n elif operation == 'ids_same_prop_update':\n es_adapter.batch_update_with_props_by_ids(es_config, data, input_param)\n<|end_body_0|>\n\n<|body_start_1|>\n if data is None:\n app_log.warning('destination clear fail, because data is null, {0}', destination_config)\n return\n if data == {} or data == [] or data == ():\n app_log.warning('destination clear maybe fail, because data is empty, {0}', destination_config)\n data = [{}]\n es_config = es_router.merge_es_config(destination_config)\n if not isinstance(data, (list, tuple)):\n data = [data]\n es_config = es_router.route(es_config, input_param=data[0])\n if 'clear_policy' not in es_config or not es_config.get('clear_policy'):\n return\n clear_policy = es_config.get('clear_policy')\n data = data[0] if len(data) > 0 else data\n data = data or {}\n if param:\n param = param['fields'] if 'fields' in param else param\n data = dict(data, **param)\n if clear_policy == 'every_msg,all':\n es_adapter.delete_all_doc_by_type(es_config, data)\n elif clear_policy == 'every_msg,by_adminId':\n if not data.get('adminId'):\n app_log.error('destination clear fail, because adminId is null {0}', data)\n return\n es_adapter.delete_by_field(es_config, data, '_adminId', data['adminId'])\n<|end_body_1|>\n\n<|body_start_2|>\n if not param or not data_list or (not es_config.get('add_admin_id_field')):\n return\n if not param.get('adminId'):\n return\n admin_id = param['adminId']\n for item in data_list:\n item['_adminId'] = admin_id\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000341", "length_bytes": 9641, "license_type": "permissive", "methods": [{"docstring": "将数据推到ES中,数据流的最后一步 :param destination_config: :param data: :param param :return:", "name": "push", "signature": "def push(self, destination_config, data, param=None)"}, {"docstring": "清除掉ES数据源中得所有数据 :param destination_config: :param data: :return:", "name": "clear", "signature": "def clear(self, destination_config, data, param=None)"}, {"docstring": "添加搜索平台私有字段,主要是将adminId作为私有字段添加到数据结构中 :param es_config :param data_list: :param param: :return:", "name": "_add_private_field", "signature": "def _add_private_field(self, es_config, data_list, param)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_011359", "prompt": "Implement the Python class `ElasticSearchDestination` described below.\n\nClass description:\n数据流目的地为Elasticsearch\n\nMethod signatures and docstrings:\n- def push(self, destination_config, data, param=None): 将数据推到ES中,数据流的最后一步 :param destination_config: :param data: :param param :return:\n- def clear(self, destination_config, data, param=None): 清除掉ES数据源中得所有数据 :param destination_config: :param data: :return:\n- def _add_private_field(self, es_config, data_list, param): 添加搜索平台私有字段,主要是将adminId作为私有字段添加到数据结构中 :param es_config :param data_list: :param param: :return:", "prompted_full_text": "Implement the Python class `ElasticSearchDestination` described below.\n\nClass description:\n数据流目的地为Elasticsearch\n\nMethod signatures and docstrings:\n- def push(self, destination_config, data, param=None): 将数据推到ES中,数据流的最后一步 :param destination_config: :param data: :param param :return:\n- def clear(self, destination_config, data, param=None): 清除掉ES数据源中得所有数据 :param destination_config: :param data: :return:\n- def _add_private_field(self, es_config, data_list, param): 添加搜索平台私有字段,主要是将adminId作为私有字段添加到数据结构中 :param es_config :param data_list: :param param: :return:\n\n<|skeleton|>\nclass ElasticSearchDestination:\n \"\"\"数据流目的地为Elasticsearch\"\"\"\n\n def push(self, destination_config, data, param=None):\n \"\"\"将数据推到ES中,数据流的最后一步 :param destination_config: :param data: :param param :return:\"\"\"\n <|body_0|>\n\n def clear(self, destination_config, data, param=None):\n \"\"\"清除掉ES数据源中得所有数据 :param destination_config: :param data: :return:\"\"\"\n <|body_1|>\n\n def _add_private_field(self, es_config, data_list, param):\n \"\"\"添加搜索平台私有字段,主要是将adminId作为私有字段添加到数据结构中 :param es_config :param data_list: :param param: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n es_config = es_router.merge_es_config(destination_config)\n if not isinstance(data, (list, tuple)):\n data = [data]\n _param = param.get('fields') if param else {}\n input_param = dict(data[0], **_param)\n es_config = es_router.route(es_config, input_param=input_param)\n operation = es_config.get('operation', 'create')\n self._add_private_field(es_config, data, input_param)\n if operation == 'create':\n es_adapter.batch_create(es_config, data, input_param)\n elif operation == 'update':\n es_adapter.batch_update(es_config, data, input_param)\n elif operation == 'delete':\n es_adapter.batch_delete(es_config, data, input_param)\n elif operation == 'ids_same_prop_update':\n es_adapter.batch_update_with_props_by_ids(es_config, data, input_param)\n<|end_body_0|>\n\n<|body_start_1|>\n if data is None:\n app_log.warning('destination clear fail, because data is null, {0}', destination_config)\n return\n if data == {} or data == [] or data == ():\n app_log.warning('destination clear maybe fail, because data is empty, {0}', destination_config)\n data = [{}]\n es_config = es_router.merge_es_config(destination_config)\n if not isinstance(data, (list, tuple)):\n data = [data]\n es_config = es_router.route(es_config, input_param=data[0])\n if 'clear_policy' not in es_config or not es_config.get('clear_policy'):\n return\n clear_policy = es_config.get('clear_policy')\n data = data[0] if len(data) > 0 else data\n data = data or {}\n if param:\n param = param['fields'] if 'fields' in param else param\n data = dict(data, **param)\n if clear_policy == 'every_msg,all':\n es_adapter.delete_all_doc_by_type(es_config, data)\n elif clear_policy == 'every_msg,by_adminId':\n if not data.get('adminId'):\n app_log.error('destination clear fail, because adminId is null {0}', data)\n return\n es_adapter.delete_by_field(es_config, data, '_adminId', data['adminId'])\n<|end_body_1|>\n\n<|body_start_2|>\n if not param or not data_list or (not es_config.get('add_admin_id_field')):\n return\n if not param.get('adminId'):\n return\n admin_id = param['adminId']\n for item in data_list:\n item['_adminId'] = admin_id\n<|end_body_2|>\n", "revision_id": "a72b4e4d78b4375f69887e75abcc1e6a6782c551", "skeleton": "<|skeleton|>\nclass ElasticSearchDestination:\n \"\"\"数据流目的地为Elasticsearch\"\"\"\n\n def push(self, destination_config, data, param=None):\n \"\"\"将数据推到ES中,数据流的最后一步 :param destination_config: :param data: :param param :return:\"\"\"\n <|body_0|>\n\n def clear(self, destination_config, data, param=None):\n \"\"\"清除掉ES数据源中得所有数据 :param destination_config: :param data: :return:\"\"\"\n <|body_1|>\n\n def _add_private_field(self, es_config, data_list, param):\n \"\"\"添加搜索平台私有字段,主要是将adminId作为私有字段添加到数据结构中 :param es_config :param data_list: :param param: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ElasticSearchDestination:\n \"\"\"数据流目的地为Elasticsearch\"\"\"\n\n def push(self, destination_config, data, param=None):\n \"\"\"将数据推到ES中,数据流的最后一步 :param destination_config: :param data: :param param :return:\"\"\"\n es_config = es_router.merge_es_config(destination_config)\n if not isinstance(data, (list, tuple)):\n data = [data]\n _param = param.get('fields') if param else {}\n input_param = dict(data[0], **_param)\n es_config = es_router.route(es_config, input_param=input_param)\n operation = es_config.get('operation', 'create')\n self._add_private_field(es_config, data, input_param)\n if operation == 'create':\n es_adapter.batch_create(es_config, data, input_param)\n elif operation == 'update':\n es_adapter.batch_update(es_config, data, input_param)\n elif operation == 'delete':\n es_adapter.batch_delete(es_config, data, input_param)\n elif operation == 'ids_same_prop_update':\n es_adapter.batch_update_with_props_by_ids(es_config, data, input_param)\n\n def clear(self, destination_config, data, param=None):\n \"\"\"清除掉ES数据源中得所有数据 :param destination_config: :param data: :return:\"\"\"\n if data is None:\n app_log.warning('destination clear fail, because data is null, {0}', destination_config)\n return\n if data == {} or data == [] or data == ():\n app_log.warning('destination clear maybe fail, because data is empty, {0}', destination_config)\n data = [{}]\n es_config = es_router.merge_es_config(destination_config)\n if not isinstance(data, (list, tuple)):\n data = [data]\n es_config = es_router.route(es_config, input_param=data[0])\n if 'clear_policy' not in es_config or not es_config.get('clear_policy'):\n return\n clear_policy = es_config.get('clear_policy')\n data = data[0] if len(data) > 0 else data\n data = data or {}\n if param:\n param = param['fields'] if 'fields' in param else param\n data = dict(data, **param)\n if clear_policy == 'every_msg,all':\n es_adapter.delete_all_doc_by_type(es_config, data)\n elif clear_policy == 'every_msg,by_adminId':\n if not data.get('adminId'):\n app_log.error('destination clear fail, because adminId is null {0}', data)\n return\n es_adapter.delete_by_field(es_config, data, '_adminId', data['adminId'])\n\n def _add_private_field(self, es_config, data_list, param):\n \"\"\"添加搜索平台私有字段,主要是将adminId作为私有字段添加到数据结构中 :param es_config :param data_list: :param param: :return:\"\"\"\n if not param or not data_list or (not es_config.get('add_admin_id_field')):\n return\n if not param.get('adminId'):\n return\n admin_id = param['adminId']\n for item in data_list:\n item['_adminId'] = admin_id\n", "source": "the_stack_v2_python_sparse", "source_path": "river/destination.py", "source_repo": "RitterHou/search_platform", "split": "val", "star_events_count": 0}
{"blob_id": "d10191e20a860bfd94bc4e9f5b8ecf559c4de9ed", "bodies": ["settings = self.settings\nexcept_list = split_comma_separated(settings.get('except', '').strip())\ncmdline = [config['exe_paths']['rubocop'], '--format=json', '--display-style-guide']\nif except_list:\n cmdline.append('--except=%s' % ','.join(except_list))\nreturn cmdline", "output = execute(base_command + [path], ignore_errors=True)\ntry:\n results = json.loads(output)\nexcept ValueError:\n lines = output.splitlines()\n f.comment('RuboCop could not analyze this file, due to the following errors:\\n\\n```%s```' % lines[0].strip(), first_line=None, rich_text=True)\n return\nif results['summary']['offense_count'] > 0:\n for offense in results['files'][0]['offenses']:\n cop_name = offense['cop_name']\n message = offense['message']\n location = offense['location']\n prefix = '%s: ' % cop_name\n if message.startswith(prefix):\n message = message[len(prefix):]\n first_line = location.get('start_line', location['line'])\n last_line = location.get('last_line', location['line'])\n start_column = location.get('start_column', location['column'])\n f.comment(message, first_line=first_line, num_lines=last_line - first_line + 1, start_column=start_column, severity=offense.get('severity'), error_code=cop_name, rich_text=True)"], "bodies_text": "<|body_start_0|>\n settings = self.settings\n except_list = split_comma_separated(settings.get('except', '').strip())\n cmdline = [config['exe_paths']['rubocop'], '--format=json', '--display-style-guide']\n if except_list:\n cmdline.append('--except=%s' % ','.join(except_list))\n return cmdline\n<|end_body_0|>\n\n<|body_start_1|>\n output = execute(base_command + [path], ignore_errors=True)\n try:\n results = json.loads(output)\n except ValueError:\n lines = output.splitlines()\n f.comment('RuboCop could not analyze this file, due to the following errors:\\n\\n```%s```' % lines[0].strip(), first_line=None, rich_text=True)\n return\n if results['summary']['offense_count'] > 0:\n for offense in results['files'][0]['offenses']:\n cop_name = offense['cop_name']\n message = offense['message']\n location = offense['location']\n prefix = '%s: ' % cop_name\n if message.startswith(prefix):\n message = message[len(prefix):]\n first_line = location.get('start_line', location['line'])\n last_line = location.get('last_line', location['line'])\n start_column = location.get('start_column', location['column'])\n f.comment(message, first_line=first_line, num_lines=last_line - first_line + 1, start_column=start_column, severity=offense.get('severity'), error_code=cop_name, rich_text=True)\n<|end_body_1|>\n", "class_docstring": "Review Bot tool to run rubocop.", "class_name": "RubocopTool", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RubocopTool:\n \"\"\"Review Bot tool to run rubocop.\"\"\"\n\n def build_base_command(self, **kwargs):\n \"\"\"Build the base command line used to review files. Args: **kwargs (dict, unused): Additional keyword arguments. Returns: list of unicode: The base command line.\"\"\"\n <|body_0|>\n\n def handle_file(self, f, path, base_command, **kwargs):\n \"\"\"Perform a review of a single file. Args: f (reviewbot.processing.review.File): The file to process. path (unicode): The local path to the patched file to review. base_command (list of unicode): The base command used to run rubocop. **kwargs (dict, unused): Additional keyword arguments.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n settings = self.settings\n except_list = split_comma_separated(settings.get('except', '').strip())\n cmdline = [config['exe_paths']['rubocop'], '--format=json', '--display-style-guide']\n if except_list:\n cmdline.append('--except=%s' % ','.join(except_list))\n return cmdline\n<|end_body_0|>\n\n<|body_start_1|>\n output = execute(base_command + [path], ignore_errors=True)\n try:\n results = json.loads(output)\n except ValueError:\n lines = output.splitlines()\n f.comment('RuboCop could not analyze this file, due to the following errors:\\n\\n```%s```' % lines[0].strip(), first_line=None, rich_text=True)\n return\n if results['summary']['offense_count'] > 0:\n for offense in results['files'][0]['offenses']:\n cop_name = offense['cop_name']\n message = offense['message']\n location = offense['location']\n prefix = '%s: ' % cop_name\n if message.startswith(prefix):\n message = message[len(prefix):]\n first_line = location.get('start_line', location['line'])\n last_line = location.get('last_line', location['line'])\n start_column = location.get('start_column', location['column'])\n f.comment(message, first_line=first_line, num_lines=last_line - first_line + 1, start_column=start_column, severity=offense.get('severity'), error_code=cop_name, rich_text=True)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000342", "length_bytes": 4161, "license_type": "permissive", "methods": [{"docstring": "Build the base command line used to review files. Args: **kwargs (dict, unused): Additional keyword arguments. Returns: list of unicode: The base command line.", "name": "build_base_command", "signature": "def build_base_command(self, **kwargs)"}, {"docstring": "Perform a review of a single file. Args: f (reviewbot.processing.review.File): The file to process. path (unicode): The local path to the patched file to review. base_command (list of unicode): The base command used to run rubocop. **kwargs (dict, unused): Additional keyword arguments.", "name": "handle_file", "signature": "def handle_file(self, f, path, base_command, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032177", "prompt": "Implement the Python class `RubocopTool` described below.\n\nClass description:\nReview Bot tool to run rubocop.\n\nMethod signatures and docstrings:\n- def build_base_command(self, **kwargs): Build the base command line used to review files. Args: **kwargs (dict, unused): Additional keyword arguments. Returns: list of unicode: The base command line.\n- def handle_file(self, f, path, base_command, **kwargs): Perform a review of a single file. Args: f (reviewbot.processing.review.File): The file to process. path (unicode): The local path to the patched file to review. base_command (list of unicode): The base command used to run rubocop. **kwargs (dict, unused): Additional keyword arguments.", "prompted_full_text": "Implement the Python class `RubocopTool` described below.\n\nClass description:\nReview Bot tool to run rubocop.\n\nMethod signatures and docstrings:\n- def build_base_command(self, **kwargs): Build the base command line used to review files. Args: **kwargs (dict, unused): Additional keyword arguments. Returns: list of unicode: The base command line.\n- def handle_file(self, f, path, base_command, **kwargs): Perform a review of a single file. Args: f (reviewbot.processing.review.File): The file to process. path (unicode): The local path to the patched file to review. base_command (list of unicode): The base command used to run rubocop. **kwargs (dict, unused): Additional keyword arguments.\n\n<|skeleton|>\nclass RubocopTool:\n \"\"\"Review Bot tool to run rubocop.\"\"\"\n\n def build_base_command(self, **kwargs):\n \"\"\"Build the base command line used to review files. Args: **kwargs (dict, unused): Additional keyword arguments. Returns: list of unicode: The base command line.\"\"\"\n <|body_0|>\n\n def handle_file(self, f, path, base_command, **kwargs):\n \"\"\"Perform a review of a single file. Args: f (reviewbot.processing.review.File): The file to process. path (unicode): The local path to the patched file to review. base_command (list of unicode): The base command used to run rubocop. **kwargs (dict, unused): Additional keyword arguments.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n settings = self.settings\n except_list = split_comma_separated(settings.get('except', '').strip())\n cmdline = [config['exe_paths']['rubocop'], '--format=json', '--display-style-guide']\n if except_list:\n cmdline.append('--except=%s' % ','.join(except_list))\n return cmdline\n<|end_body_0|>\n\n<|body_start_1|>\n output = execute(base_command + [path], ignore_errors=True)\n try:\n results = json.loads(output)\n except ValueError:\n lines = output.splitlines()\n f.comment('RuboCop could not analyze this file, due to the following errors:\\n\\n```%s```' % lines[0].strip(), first_line=None, rich_text=True)\n return\n if results['summary']['offense_count'] > 0:\n for offense in results['files'][0]['offenses']:\n cop_name = offense['cop_name']\n message = offense['message']\n location = offense['location']\n prefix = '%s: ' % cop_name\n if message.startswith(prefix):\n message = message[len(prefix):]\n first_line = location.get('start_line', location['line'])\n last_line = location.get('last_line', location['line'])\n start_column = location.get('start_column', location['column'])\n f.comment(message, first_line=first_line, num_lines=last_line - first_line + 1, start_column=start_column, severity=offense.get('severity'), error_code=cop_name, rich_text=True)\n<|end_body_1|>\n", "revision_id": "b59b566e127b5ef1b08f3189f1aa0194b7437d94", "skeleton": "<|skeleton|>\nclass RubocopTool:\n \"\"\"Review Bot tool to run rubocop.\"\"\"\n\n def build_base_command(self, **kwargs):\n \"\"\"Build the base command line used to review files. Args: **kwargs (dict, unused): Additional keyword arguments. Returns: list of unicode: The base command line.\"\"\"\n <|body_0|>\n\n def handle_file(self, f, path, base_command, **kwargs):\n \"\"\"Perform a review of a single file. Args: f (reviewbot.processing.review.File): The file to process. path (unicode): The local path to the patched file to review. base_command (list of unicode): The base command used to run rubocop. **kwargs (dict, unused): Additional keyword arguments.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RubocopTool:\n \"\"\"Review Bot tool to run rubocop.\"\"\"\n\n def build_base_command(self, **kwargs):\n \"\"\"Build the base command line used to review files. Args: **kwargs (dict, unused): Additional keyword arguments. Returns: list of unicode: The base command line.\"\"\"\n settings = self.settings\n except_list = split_comma_separated(settings.get('except', '').strip())\n cmdline = [config['exe_paths']['rubocop'], '--format=json', '--display-style-guide']\n if except_list:\n cmdline.append('--except=%s' % ','.join(except_list))\n return cmdline\n\n def handle_file(self, f, path, base_command, **kwargs):\n \"\"\"Perform a review of a single file. Args: f (reviewbot.processing.review.File): The file to process. path (unicode): The local path to the patched file to review. base_command (list of unicode): The base command used to run rubocop. **kwargs (dict, unused): Additional keyword arguments.\"\"\"\n output = execute(base_command + [path], ignore_errors=True)\n try:\n results = json.loads(output)\n except ValueError:\n lines = output.splitlines()\n f.comment('RuboCop could not analyze this file, due to the following errors:\\n\\n```%s```' % lines[0].strip(), first_line=None, rich_text=True)\n return\n if results['summary']['offense_count'] > 0:\n for offense in results['files'][0]['offenses']:\n cop_name = offense['cop_name']\n message = offense['message']\n location = offense['location']\n prefix = '%s: ' % cop_name\n if message.startswith(prefix):\n message = message[len(prefix):]\n first_line = location.get('start_line', location['line'])\n last_line = location.get('last_line', location['line'])\n start_column = location.get('start_column', location['column'])\n f.comment(message, first_line=first_line, num_lines=last_line - first_line + 1, start_column=start_column, severity=offense.get('severity'), error_code=cop_name, rich_text=True)\n", "source": "the_stack_v2_python_sparse", "source_path": "bot/reviewbot/tools/rubocop.py", "source_repo": "reviewboard/ReviewBot", "split": "val", "star_events_count": 110}
{"blob_id": "8f7e0dec1976d6cb361cd35f86a6a7b12fd5184f", "bodies": ["super(QBCStabilityAgent, self).__init__(candidate_data=candidate_data, seed_data=seed_data, n_query=n_query, hull_distance=hull_distance, parallel=parallel)\nself.alpha = alpha\nself.model = model\nself.n_members = n_members\nself.qbc = QBC(n_members=n_members, training_fraction=training_fraction, model=model)", "X_cand, X_seed, y_seed = self.update_data(candidate_data, seed_data)\nif not self.qbc.trained or retrain_committee:\n self.qbc.fit(X_seed, y_seed)\nself.cv_score = self.qbc.cv_score\npreds, stds = self.qbc.predict(X_cand)\nexpected = preds - stds * self.alpha\nself.update_candidate_stabilities(expected, sort=True, floor=-6.0)\nstability_filter = self.candidate_data['pred_stability'] <= self.hull_distance\nwithin_hull = self.candidate_data[stability_filter]\nreturn within_hull.head(self.n_query)"], "bodies_text": "<|body_start_0|>\n super(QBCStabilityAgent, self).__init__(candidate_data=candidate_data, seed_data=seed_data, n_query=n_query, hull_distance=hull_distance, parallel=parallel)\n self.alpha = alpha\n self.model = model\n self.n_members = n_members\n self.qbc = QBC(n_members=n_members, training_fraction=training_fraction, model=model)\n<|end_body_0|>\n\n<|body_start_1|>\n X_cand, X_seed, y_seed = self.update_data(candidate_data, seed_data)\n if not self.qbc.trained or retrain_committee:\n self.qbc.fit(X_seed, y_seed)\n self.cv_score = self.qbc.cv_score\n preds, stds = self.qbc.predict(X_cand)\n expected = preds - stds * self.alpha\n self.update_candidate_stabilities(expected, sort=True, floor=-6.0)\n stability_filter = self.candidate_data['pred_stability'] <= self.hull_distance\n within_hull = self.candidate_data[stability_filter]\n return within_hull.head(self.n_query)\n<|end_body_1|>\n", "class_docstring": "Agent which uses QBC to determine optimal hypotheses", "class_name": "QBCStabilityAgent", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QBCStabilityAgent:\n \"\"\"Agent which uses QBC to determine optimal hypotheses\"\"\"\n\n def __init__(self, candidate_data=None, seed_data=None, n_query=1, hull_distance=0.0, parallel=cpu_count(), alpha=0.5, training_fraction=0.5, model=None, n_members=10):\n \"\"\"Args: candidate_data (DataFrame): data about the candidates seed_data (DataFrame): data which to fit the Agent to n_query (int): number of hypotheses to generate hull_distance (float): hull distance as a criteria for which to deem a given material as \"stable\" parallel (bool): whether to use multiprocessing for phase stability analysis training_fraction (float): fraction of data to use for training committee members alpha (float): weighting factor for the stdev in making best-case predictions of the stability model (sklearn-style regressor): regressor n_members (int): number of committee members for the qbc\"\"\"\n <|body_0|>\n\n def get_hypotheses(self, candidate_data, seed_data=None, retrain_committee=True):\n \"\"\"Get hypotheses method for QBCStabilityAgent Args: candidate_data (pandas.DataFrame): dataframe of candidates seed_data (pandas.DataFrame): dataframe of prior data on which to fit GPUCB retrain_committee (bool): whether to retrain committee each time Returns: (pandas.DataFrame): top candidates from the GPUCB algorithm\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(QBCStabilityAgent, self).__init__(candidate_data=candidate_data, seed_data=seed_data, n_query=n_query, hull_distance=hull_distance, parallel=parallel)\n self.alpha = alpha\n self.model = model\n self.n_members = n_members\n self.qbc = QBC(n_members=n_members, training_fraction=training_fraction, model=model)\n<|end_body_0|>\n\n<|body_start_1|>\n X_cand, X_seed, y_seed = self.update_data(candidate_data, seed_data)\n if not self.qbc.trained or retrain_committee:\n self.qbc.fit(X_seed, y_seed)\n self.cv_score = self.qbc.cv_score\n preds, stds = self.qbc.predict(X_cand)\n expected = preds - stds * self.alpha\n self.update_candidate_stabilities(expected, sort=True, floor=-6.0)\n stability_filter = self.candidate_data['pred_stability'] <= self.hull_distance\n within_hull = self.candidate_data[stability_filter]\n return within_hull.head(self.n_query)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000343", "length_bytes": 38060, "license_type": "permissive", "methods": [{"docstring": "Args: candidate_data (DataFrame): data about the candidates seed_data (DataFrame): data which to fit the Agent to n_query (int): number of hypotheses to generate hull_distance (float): hull distance as a criteria for which to deem a given material as \"stable\" parallel (bool): whether to use multiprocessing for phase stability analysis training_fraction (float): fraction of data to use for training committee members alpha (float): weighting factor for the stdev in making best-case predictions of the stability model (sklearn-style regressor): regressor n_members (int): number of committee members for the qbc", "name": "__init__", "signature": "def __init__(self, candidate_data=None, seed_data=None, n_query=1, hull_distance=0.0, parallel=cpu_count(), alpha=0.5, training_fraction=0.5, model=None, n_members=10)"}, {"docstring": "Get hypotheses method for QBCStabilityAgent Args: candidate_data (pandas.DataFrame): dataframe of candidates seed_data (pandas.DataFrame): dataframe of prior data on which to fit GPUCB retrain_committee (bool): whether to retrain committee each time Returns: (pandas.DataFrame): top candidates from the GPUCB algorithm", "name": "get_hypotheses", "signature": "def get_hypotheses(self, candidate_data, seed_data=None, retrain_committee=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_042245", "prompt": "Implement the Python class `QBCStabilityAgent` described below.\n\nClass description:\nAgent which uses QBC to determine optimal hypotheses\n\nMethod signatures and docstrings:\n- def __init__(self, candidate_data=None, seed_data=None, n_query=1, hull_distance=0.0, parallel=cpu_count(), alpha=0.5, training_fraction=0.5, model=None, n_members=10): Args: candidate_data (DataFrame): data about the candidates seed_data (DataFrame): data which to fit the Agent to n_query (int): number of hypotheses to generate hull_distance (float): hull distance as a criteria for which to deem a given material as \"stable\" parallel (bool): whether to use multiprocessing for phase stability analysis training_fraction (float): fraction of data to use for training committee members alpha (float): weighting factor for the stdev in making best-case predictions of the stability model (sklearn-style regressor): regressor n_members (int): number of committee members for the qbc\n- def get_hypotheses(self, candidate_data, seed_data=None, retrain_committee=True): Get hypotheses method for QBCStabilityAgent Args: candidate_data (pandas.DataFrame): dataframe of candidates seed_data (pandas.DataFrame): dataframe of prior data on which to fit GPUCB retrain_committee (bool): whether to retrain committee each time Returns: (pandas.DataFrame): top candidates from the GPUCB algorithm", "prompted_full_text": "Implement the Python class `QBCStabilityAgent` described below.\n\nClass description:\nAgent which uses QBC to determine optimal hypotheses\n\nMethod signatures and docstrings:\n- def __init__(self, candidate_data=None, seed_data=None, n_query=1, hull_distance=0.0, parallel=cpu_count(), alpha=0.5, training_fraction=0.5, model=None, n_members=10): Args: candidate_data (DataFrame): data about the candidates seed_data (DataFrame): data which to fit the Agent to n_query (int): number of hypotheses to generate hull_distance (float): hull distance as a criteria for which to deem a given material as \"stable\" parallel (bool): whether to use multiprocessing for phase stability analysis training_fraction (float): fraction of data to use for training committee members alpha (float): weighting factor for the stdev in making best-case predictions of the stability model (sklearn-style regressor): regressor n_members (int): number of committee members for the qbc\n- def get_hypotheses(self, candidate_data, seed_data=None, retrain_committee=True): Get hypotheses method for QBCStabilityAgent Args: candidate_data (pandas.DataFrame): dataframe of candidates seed_data (pandas.DataFrame): dataframe of prior data on which to fit GPUCB retrain_committee (bool): whether to retrain committee each time Returns: (pandas.DataFrame): top candidates from the GPUCB algorithm\n\n<|skeleton|>\nclass QBCStabilityAgent:\n \"\"\"Agent which uses QBC to determine optimal hypotheses\"\"\"\n\n def __init__(self, candidate_data=None, seed_data=None, n_query=1, hull_distance=0.0, parallel=cpu_count(), alpha=0.5, training_fraction=0.5, model=None, n_members=10):\n \"\"\"Args: candidate_data (DataFrame): data about the candidates seed_data (DataFrame): data which to fit the Agent to n_query (int): number of hypotheses to generate hull_distance (float): hull distance as a criteria for which to deem a given material as \"stable\" parallel (bool): whether to use multiprocessing for phase stability analysis training_fraction (float): fraction of data to use for training committee members alpha (float): weighting factor for the stdev in making best-case predictions of the stability model (sklearn-style regressor): regressor n_members (int): number of committee members for the qbc\"\"\"\n <|body_0|>\n\n def get_hypotheses(self, candidate_data, seed_data=None, retrain_committee=True):\n \"\"\"Get hypotheses method for QBCStabilityAgent Args: candidate_data (pandas.DataFrame): dataframe of candidates seed_data (pandas.DataFrame): dataframe of prior data on which to fit GPUCB retrain_committee (bool): whether to retrain committee each time Returns: (pandas.DataFrame): top candidates from the GPUCB algorithm\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(QBCStabilityAgent, self).__init__(candidate_data=candidate_data, seed_data=seed_data, n_query=n_query, hull_distance=hull_distance, parallel=parallel)\n self.alpha = alpha\n self.model = model\n self.n_members = n_members\n self.qbc = QBC(n_members=n_members, training_fraction=training_fraction, model=model)\n<|end_body_0|>\n\n<|body_start_1|>\n X_cand, X_seed, y_seed = self.update_data(candidate_data, seed_data)\n if not self.qbc.trained or retrain_committee:\n self.qbc.fit(X_seed, y_seed)\n self.cv_score = self.qbc.cv_score\n preds, stds = self.qbc.predict(X_cand)\n expected = preds - stds * self.alpha\n self.update_candidate_stabilities(expected, sort=True, floor=-6.0)\n stability_filter = self.candidate_data['pred_stability'] <= self.hull_distance\n within_hull = self.candidate_data[stability_filter]\n return within_hull.head(self.n_query)\n<|end_body_1|>\n", "revision_id": "905f5d577513d1ca5a54fac3d381525e0fe3576a", "skeleton": "<|skeleton|>\nclass QBCStabilityAgent:\n \"\"\"Agent which uses QBC to determine optimal hypotheses\"\"\"\n\n def __init__(self, candidate_data=None, seed_data=None, n_query=1, hull_distance=0.0, parallel=cpu_count(), alpha=0.5, training_fraction=0.5, model=None, n_members=10):\n \"\"\"Args: candidate_data (DataFrame): data about the candidates seed_data (DataFrame): data which to fit the Agent to n_query (int): number of hypotheses to generate hull_distance (float): hull distance as a criteria for which to deem a given material as \"stable\" parallel (bool): whether to use multiprocessing for phase stability analysis training_fraction (float): fraction of data to use for training committee members alpha (float): weighting factor for the stdev in making best-case predictions of the stability model (sklearn-style regressor): regressor n_members (int): number of committee members for the qbc\"\"\"\n <|body_0|>\n\n def get_hypotheses(self, candidate_data, seed_data=None, retrain_committee=True):\n \"\"\"Get hypotheses method for QBCStabilityAgent Args: candidate_data (pandas.DataFrame): dataframe of candidates seed_data (pandas.DataFrame): dataframe of prior data on which to fit GPUCB retrain_committee (bool): whether to retrain committee each time Returns: (pandas.DataFrame): top candidates from the GPUCB algorithm\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QBCStabilityAgent:\n \"\"\"Agent which uses QBC to determine optimal hypotheses\"\"\"\n\n def __init__(self, candidate_data=None, seed_data=None, n_query=1, hull_distance=0.0, parallel=cpu_count(), alpha=0.5, training_fraction=0.5, model=None, n_members=10):\n \"\"\"Args: candidate_data (DataFrame): data about the candidates seed_data (DataFrame): data which to fit the Agent to n_query (int): number of hypotheses to generate hull_distance (float): hull distance as a criteria for which to deem a given material as \"stable\" parallel (bool): whether to use multiprocessing for phase stability analysis training_fraction (float): fraction of data to use for training committee members alpha (float): weighting factor for the stdev in making best-case predictions of the stability model (sklearn-style regressor): regressor n_members (int): number of committee members for the qbc\"\"\"\n super(QBCStabilityAgent, self).__init__(candidate_data=candidate_data, seed_data=seed_data, n_query=n_query, hull_distance=hull_distance, parallel=parallel)\n self.alpha = alpha\n self.model = model\n self.n_members = n_members\n self.qbc = QBC(n_members=n_members, training_fraction=training_fraction, model=model)\n\n def get_hypotheses(self, candidate_data, seed_data=None, retrain_committee=True):\n \"\"\"Get hypotheses method for QBCStabilityAgent Args: candidate_data (pandas.DataFrame): dataframe of candidates seed_data (pandas.DataFrame): dataframe of prior data on which to fit GPUCB retrain_committee (bool): whether to retrain committee each time Returns: (pandas.DataFrame): top candidates from the GPUCB algorithm\"\"\"\n X_cand, X_seed, y_seed = self.update_data(candidate_data, seed_data)\n if not self.qbc.trained or retrain_committee:\n self.qbc.fit(X_seed, y_seed)\n self.cv_score = self.qbc.cv_score\n preds, stds = self.qbc.predict(X_cand)\n expected = preds - stds * self.alpha\n self.update_candidate_stabilities(expected, sort=True, floor=-6.0)\n stability_filter = self.candidate_data['pred_stability'] <= self.hull_distance\n within_hull = self.candidate_data[stability_filter]\n return within_hull.head(self.n_query)\n", "source": "the_stack_v2_python_sparse", "source_path": "camd/agent/stability.py", "source_repo": "apalizha/CAMD", "split": "val", "star_events_count": 0}
{"blob_id": "92c2f47565c71a667c4196a5b4ec0d08139e29e5", "bodies": ["count = 0\nm = len(obstacleGrid)\nn = len(obstacleGrid[0])\nif obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n\ndef uniquePathsR(x, y):\n nonlocal count\n if x == m - 1 and y == n - 1:\n count += 1\n return\n if x < m - 1 and (not obstacleGrid[x + 1][y]):\n uniquePathsR(x + 1, y)\n if y < n - 1 and (not obstacleGrid[x][y + 1]):\n uniquePathsR(x, y + 1)\nuniquePathsR(0, 0)\nreturn count", "if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\nn = len(obstacleGrid)\nm = len(obstacleGrid[0])\ndp = [[0] * m for _ in range(n)]\nfor i in range(m):\n if obstacleGrid[0][i] == 0:\n dp[0][i] = 1\n else:\n break\nfor i in range(n):\n if obstacleGrid[i][0] == 0:\n dp[i][0] = 1\n else:\n break\nfor i in range(1, n):\n for j in range(1, m):\n if obstacleGrid[i][j] == 0:\n dp[i][j] = dp[i - 1][j] + dp[i][j - 1]\nreturn dp[n - 1][m - 1]"], "bodies_text": "<|body_start_0|>\n count = 0\n m = len(obstacleGrid)\n n = len(obstacleGrid[0])\n if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n\n def uniquePathsR(x, y):\n nonlocal count\n if x == m - 1 and y == n - 1:\n count += 1\n return\n if x < m - 1 and (not obstacleGrid[x + 1][y]):\n uniquePathsR(x + 1, y)\n if y < n - 1 and (not obstacleGrid[x][y + 1]):\n uniquePathsR(x, y + 1)\n uniquePathsR(0, 0)\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n n = len(obstacleGrid)\n m = len(obstacleGrid[0])\n dp = [[0] * m for _ in range(n)]\n for i in range(m):\n if obstacleGrid[0][i] == 0:\n dp[0][i] = 1\n else:\n break\n for i in range(n):\n if obstacleGrid[i][0] == 0:\n dp[i][0] = 1\n else:\n break\n for i in range(1, n):\n for j in range(1, m):\n if obstacleGrid[i][j] == 0:\n dp[i][j] = dp[i - 1][j] + dp[i][j - 1]\n return dp[n - 1][m - 1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def uniquePathsWithObstacles1(self, obstacleGrid):\n \"\"\":type obstacleGrid: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def uniquePathsWithObstacles(self, obstacleGrid):\n \"\"\":type obstacleGrid: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = 0\n m = len(obstacleGrid)\n n = len(obstacleGrid[0])\n if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n\n def uniquePathsR(x, y):\n nonlocal count\n if x == m - 1 and y == n - 1:\n count += 1\n return\n if x < m - 1 and (not obstacleGrid[x + 1][y]):\n uniquePathsR(x + 1, y)\n if y < n - 1 and (not obstacleGrid[x][y + 1]):\n uniquePathsR(x, y + 1)\n uniquePathsR(0, 0)\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n n = len(obstacleGrid)\n m = len(obstacleGrid[0])\n dp = [[0] * m for _ in range(n)]\n for i in range(m):\n if obstacleGrid[0][i] == 0:\n dp[0][i] = 1\n else:\n break\n for i in range(n):\n if obstacleGrid[i][0] == 0:\n dp[i][0] = 1\n else:\n break\n for i in range(1, n):\n for j in range(1, m):\n if obstacleGrid[i][j] == 0:\n dp[i][j] = dp[i - 1][j] + dp[i][j - 1]\n return dp[n - 1][m - 1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000344", "length_bytes": 1671, "license_type": "no_license", "methods": [{"docstring": ":type obstacleGrid: List[List[int]] :rtype: int", "name": "uniquePathsWithObstacles1", "signature": "def uniquePathsWithObstacles1(self, obstacleGrid)"}, {"docstring": ":type obstacleGrid: List[List[int]] :rtype: int", "name": "uniquePathsWithObstacles", "signature": "def uniquePathsWithObstacles(self, obstacleGrid)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_035179", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def uniquePathsWithObstacles1(self, obstacleGrid): :type obstacleGrid: List[List[int]] :rtype: int\n- def uniquePathsWithObstacles(self, obstacleGrid): :type obstacleGrid: List[List[int]] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def uniquePathsWithObstacles1(self, obstacleGrid): :type obstacleGrid: List[List[int]] :rtype: int\n- def uniquePathsWithObstacles(self, obstacleGrid): :type obstacleGrid: List[List[int]] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def uniquePathsWithObstacles1(self, obstacleGrid):\n \"\"\":type obstacleGrid: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def uniquePathsWithObstacles(self, obstacleGrid):\n \"\"\":type obstacleGrid: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = 0\n m = len(obstacleGrid)\n n = len(obstacleGrid[0])\n if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n\n def uniquePathsR(x, y):\n nonlocal count\n if x == m - 1 and y == n - 1:\n count += 1\n return\n if x < m - 1 and (not obstacleGrid[x + 1][y]):\n uniquePathsR(x + 1, y)\n if y < n - 1 and (not obstacleGrid[x][y + 1]):\n uniquePathsR(x, y + 1)\n uniquePathsR(0, 0)\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n n = len(obstacleGrid)\n m = len(obstacleGrid[0])\n dp = [[0] * m for _ in range(n)]\n for i in range(m):\n if obstacleGrid[0][i] == 0:\n dp[0][i] = 1\n else:\n break\n for i in range(n):\n if obstacleGrid[i][0] == 0:\n dp[i][0] = 1\n else:\n break\n for i in range(1, n):\n for j in range(1, m):\n if obstacleGrid[i][j] == 0:\n dp[i][j] = dp[i - 1][j] + dp[i][j - 1]\n return dp[n - 1][m - 1]\n<|end_body_1|>\n", "revision_id": "4a1747b6497305f3821612d9c358a6795b1690da", "skeleton": "<|skeleton|>\nclass Solution:\n\n def uniquePathsWithObstacles1(self, obstacleGrid):\n \"\"\":type obstacleGrid: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def uniquePathsWithObstacles(self, obstacleGrid):\n \"\"\":type obstacleGrid: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def uniquePathsWithObstacles1(self, obstacleGrid):\n \"\"\":type obstacleGrid: List[List[int]] :rtype: int\"\"\"\n count = 0\n m = len(obstacleGrid)\n n = len(obstacleGrid[0])\n if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n\n def uniquePathsR(x, y):\n nonlocal count\n if x == m - 1 and y == n - 1:\n count += 1\n return\n if x < m - 1 and (not obstacleGrid[x + 1][y]):\n uniquePathsR(x + 1, y)\n if y < n - 1 and (not obstacleGrid[x][y + 1]):\n uniquePathsR(x, y + 1)\n uniquePathsR(0, 0)\n return count\n\n def uniquePathsWithObstacles(self, obstacleGrid):\n \"\"\":type obstacleGrid: List[List[int]] :rtype: int\"\"\"\n if obstacleGrid[0][0] or obstacleGrid[-1][-1]:\n return 0\n n = len(obstacleGrid)\n m = len(obstacleGrid[0])\n dp = [[0] * m for _ in range(n)]\n for i in range(m):\n if obstacleGrid[0][i] == 0:\n dp[0][i] = 1\n else:\n break\n for i in range(n):\n if obstacleGrid[i][0] == 0:\n dp[i][0] = 1\n else:\n break\n for i in range(1, n):\n for j in range(1, m):\n if obstacleGrid[i][j] == 0:\n dp[i][j] = dp[i - 1][j] + dp[i][j - 1]\n return dp[n - 1][m - 1]\n", "source": "the_stack_v2_python_sparse", "source_path": "DynamicProgramming/q063_unique_paths_ii.py", "source_repo": "sevenhe716/LeetCode", "split": "val", "star_events_count": 0}
{"blob_id": "887da8ef8932c2e4934591b924c66abee5f91afa", "bodies": ["super().__init__(resourceurl, path_download_dir, config_path)\nself.pysftpref = pysftp\nself.sftpconnector = None\nself.remotepath = None", "try:\n host = self.parsed_url.hostname\n port = self.parsed_url.port\n if port is None:\n self.set_port_from_config()\n port = self.port\n username = self.parsed_url.username\n password = self.parsed_url.password\n self.sftpconnector = self.pysftpref.Connection(host, username=username, password=password, port=port)\n self.sftpconnector.timeout = self.timeout\n self.remotepath = self.org_file_name\n if self.remotedir:\n self.sftpconnector.cwd(self.remotedir)\n self.remotepath = os.path.join(self.remotedir, self.remotepath)\n self.connectionactive = True\n self.size_of_file_to_download = self.sftpconnector.stat(self.remotepath).st_size\n if self.size_of_file_to_download == 0:\n raise Exception(' Aborting, as not able to determine length of the content to be downloaded for url {0}', self.resourceurl)\nexcept:\n raise", "try:\n self.sftpconnector.close()\n self.connectionactive = False\nexcept:\n raise", "try:\n self.disconnect()\nexcept:\n raise\nfinally:\n self.delete_file()", "try:\n if self.configparser:\n ports = self.configparser['ports']\n port = ports.get(self.protocol, 22)\n self.port = int(port)\n else:\n self.port = 22\nexcept:\n self.port = 22", "try:\n super().download_resource(resourceidx)\n self.connect()\n\n def update_progress(bytestransferred, bytesleft):\n self.size_of_file_downloaded = bytestransferred\n self.sftpconnector.get(self.remotepath, self.path_downloaded_file, update_progress)\n try:\n self.disconnect()\n except:\n if self.size_of_file_to_download == self.size_of_file_downloaded:\n pass\n else:\n raise\nexcept:\n self.abortdownload()\n raise"], "bodies_text": "<|body_start_0|>\n super().__init__(resourceurl, path_download_dir, config_path)\n self.pysftpref = pysftp\n self.sftpconnector = None\n self.remotepath = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n host = self.parsed_url.hostname\n port = self.parsed_url.port\n if port is None:\n self.set_port_from_config()\n port = self.port\n username = self.parsed_url.username\n password = self.parsed_url.password\n self.sftpconnector = self.pysftpref.Connection(host, username=username, password=password, port=port)\n self.sftpconnector.timeout = self.timeout\n self.remotepath = self.org_file_name\n if self.remotedir:\n self.sftpconnector.cwd(self.remotedir)\n self.remotepath = os.path.join(self.remotedir, self.remotepath)\n self.connectionactive = True\n self.size_of_file_to_download = self.sftpconnector.stat(self.remotepath).st_size\n if self.size_of_file_to_download == 0:\n raise Exception(' Aborting, as not able to determine length of the content to be downloaded for url {0}', self.resourceurl)\n except:\n raise\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.sftpconnector.close()\n self.connectionactive = False\n except:\n raise\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n self.disconnect()\n except:\n raise\n finally:\n self.delete_file()\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n if self.configparser:\n ports = self.configparser['ports']\n port = ports.get(self.protocol, 22)\n self.port = int(port)\n else:\n self.port = 22\n except:\n self.port = 22\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n super().download_resource(resourceidx)\n self.connect()\n\n def update_progress(bytestransferred, bytesleft):\n self.size_of_file_downloaded = bytestransferred\n self.sftpconnector.get(self.remotepath, self.path_downloaded_file, update_progress)\n try:\n self.disconnect()\n except:\n if self.size_of_file_to_download == self.size_of_file_downloaded:\n pass\n else:\n raise\n except:\n self.abortdownload()\n raise\n<|end_body_5|>\n", "class_docstring": "", "class_name": "SFTPDownloader", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SFTPDownloader:\n\n def __init__(self, resourceurl, path_download_dir, config_path=None):\n \"\"\"Returns downloader object for SFTP resoucrce\"\"\"\n <|body_0|>\n\n def connect(self):\n \"\"\"This method extracts connection information from the url and tries to create a SFTP Connection\"\"\"\n <|body_1|>\n\n def disconnect(self):\n \"\"\"This method tries to stop all connections which were created while trying to download an SFTP resource\"\"\"\n <|body_2|>\n\n def abortdownload(self):\n \"\"\"This method tries to stop any active SFTP connections created and delete the downloaded SFTP resource\"\"\"\n <|body_3|>\n\n def set_port_from_config(self):\n \"\"\"Sets the default port to be used for downloading SFTP file\"\"\"\n <|body_4|>\n\n def download_resource(self, resourceidx):\n \"\"\"This method tries to download a SFTP resource attached with the class. In case of partial download tries to delete the file downloaded\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(resourceurl, path_download_dir, config_path)\n self.pysftpref = pysftp\n self.sftpconnector = None\n self.remotepath = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n host = self.parsed_url.hostname\n port = self.parsed_url.port\n if port is None:\n self.set_port_from_config()\n port = self.port\n username = self.parsed_url.username\n password = self.parsed_url.password\n self.sftpconnector = self.pysftpref.Connection(host, username=username, password=password, port=port)\n self.sftpconnector.timeout = self.timeout\n self.remotepath = self.org_file_name\n if self.remotedir:\n self.sftpconnector.cwd(self.remotedir)\n self.remotepath = os.path.join(self.remotedir, self.remotepath)\n self.connectionactive = True\n self.size_of_file_to_download = self.sftpconnector.stat(self.remotepath).st_size\n if self.size_of_file_to_download == 0:\n raise Exception(' Aborting, as not able to determine length of the content to be downloaded for url {0}', self.resourceurl)\n except:\n raise\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.sftpconnector.close()\n self.connectionactive = False\n except:\n raise\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n self.disconnect()\n except:\n raise\n finally:\n self.delete_file()\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n if self.configparser:\n ports = self.configparser['ports']\n port = ports.get(self.protocol, 22)\n self.port = int(port)\n else:\n self.port = 22\n except:\n self.port = 22\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n super().download_resource(resourceidx)\n self.connect()\n\n def update_progress(bytestransferred, bytesleft):\n self.size_of_file_downloaded = bytestransferred\n self.sftpconnector.get(self.remotepath, self.path_downloaded_file, update_progress)\n try:\n self.disconnect()\n except:\n if self.size_of_file_to_download == self.size_of_file_downloaded:\n pass\n else:\n raise\n except:\n self.abortdownload()\n raise\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000345", "length_bytes": 3741, "license_type": "permissive", "methods": [{"docstring": "Returns downloader object for SFTP resoucrce", "name": "__init__", "signature": "def __init__(self, resourceurl, path_download_dir, config_path=None)"}, {"docstring": "This method extracts connection information from the url and tries to create a SFTP Connection", "name": "connect", "signature": "def connect(self)"}, {"docstring": "This method tries to stop all connections which were created while trying to download an SFTP resource", "name": "disconnect", "signature": "def disconnect(self)"}, {"docstring": "This method tries to stop any active SFTP connections created and delete the downloaded SFTP resource", "name": "abortdownload", "signature": "def abortdownload(self)"}, {"docstring": "Sets the default port to be used for downloading SFTP file", "name": "set_port_from_config", "signature": "def set_port_from_config(self)"}, {"docstring": "This method tries to download a SFTP resource attached with the class. In case of partial download tries to delete the file downloaded", "name": "download_resource", "signature": "def download_resource(self, resourceidx)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_val_002327", "prompt": "Implement the Python class `SFTPDownloader` described below.\n\nClass description:\nImplement the SFTPDownloader class.\n\nMethod signatures and docstrings:\n- def __init__(self, resourceurl, path_download_dir, config_path=None): Returns downloader object for SFTP resoucrce\n- def connect(self): This method extracts connection information from the url and tries to create a SFTP Connection\n- def disconnect(self): This method tries to stop all connections which were created while trying to download an SFTP resource\n- def abortdownload(self): This method tries to stop any active SFTP connections created and delete the downloaded SFTP resource\n- def set_port_from_config(self): Sets the default port to be used for downloading SFTP file\n- def download_resource(self, resourceidx): This method tries to download a SFTP resource attached with the class. In case of partial download tries to delete the file downloaded", "prompted_full_text": "Implement the Python class `SFTPDownloader` described below.\n\nClass description:\nImplement the SFTPDownloader class.\n\nMethod signatures and docstrings:\n- def __init__(self, resourceurl, path_download_dir, config_path=None): Returns downloader object for SFTP resoucrce\n- def connect(self): This method extracts connection information from the url and tries to create a SFTP Connection\n- def disconnect(self): This method tries to stop all connections which were created while trying to download an SFTP resource\n- def abortdownload(self): This method tries to stop any active SFTP connections created and delete the downloaded SFTP resource\n- def set_port_from_config(self): Sets the default port to be used for downloading SFTP file\n- def download_resource(self, resourceidx): This method tries to download a SFTP resource attached with the class. In case of partial download tries to delete the file downloaded\n\n<|skeleton|>\nclass SFTPDownloader:\n\n def __init__(self, resourceurl, path_download_dir, config_path=None):\n \"\"\"Returns downloader object for SFTP resoucrce\"\"\"\n <|body_0|>\n\n def connect(self):\n \"\"\"This method extracts connection information from the url and tries to create a SFTP Connection\"\"\"\n <|body_1|>\n\n def disconnect(self):\n \"\"\"This method tries to stop all connections which were created while trying to download an SFTP resource\"\"\"\n <|body_2|>\n\n def abortdownload(self):\n \"\"\"This method tries to stop any active SFTP connections created and delete the downloaded SFTP resource\"\"\"\n <|body_3|>\n\n def set_port_from_config(self):\n \"\"\"Sets the default port to be used for downloading SFTP file\"\"\"\n <|body_4|>\n\n def download_resource(self, resourceidx):\n \"\"\"This method tries to download a SFTP resource attached with the class. In case of partial download tries to delete the file downloaded\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(resourceurl, path_download_dir, config_path)\n self.pysftpref = pysftp\n self.sftpconnector = None\n self.remotepath = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n host = self.parsed_url.hostname\n port = self.parsed_url.port\n if port is None:\n self.set_port_from_config()\n port = self.port\n username = self.parsed_url.username\n password = self.parsed_url.password\n self.sftpconnector = self.pysftpref.Connection(host, username=username, password=password, port=port)\n self.sftpconnector.timeout = self.timeout\n self.remotepath = self.org_file_name\n if self.remotedir:\n self.sftpconnector.cwd(self.remotedir)\n self.remotepath = os.path.join(self.remotedir, self.remotepath)\n self.connectionactive = True\n self.size_of_file_to_download = self.sftpconnector.stat(self.remotepath).st_size\n if self.size_of_file_to_download == 0:\n raise Exception(' Aborting, as not able to determine length of the content to be downloaded for url {0}', self.resourceurl)\n except:\n raise\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.sftpconnector.close()\n self.connectionactive = False\n except:\n raise\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n self.disconnect()\n except:\n raise\n finally:\n self.delete_file()\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n if self.configparser:\n ports = self.configparser['ports']\n port = ports.get(self.protocol, 22)\n self.port = int(port)\n else:\n self.port = 22\n except:\n self.port = 22\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n super().download_resource(resourceidx)\n self.connect()\n\n def update_progress(bytestransferred, bytesleft):\n self.size_of_file_downloaded = bytestransferred\n self.sftpconnector.get(self.remotepath, self.path_downloaded_file, update_progress)\n try:\n self.disconnect()\n except:\n if self.size_of_file_to_download == self.size_of_file_downloaded:\n pass\n else:\n raise\n except:\n self.abortdownload()\n raise\n<|end_body_5|>\n", "revision_id": "7809e6d731a9d010e8514270f400408157ad7ab3", "skeleton": "<|skeleton|>\nclass SFTPDownloader:\n\n def __init__(self, resourceurl, path_download_dir, config_path=None):\n \"\"\"Returns downloader object for SFTP resoucrce\"\"\"\n <|body_0|>\n\n def connect(self):\n \"\"\"This method extracts connection information from the url and tries to create a SFTP Connection\"\"\"\n <|body_1|>\n\n def disconnect(self):\n \"\"\"This method tries to stop all connections which were created while trying to download an SFTP resource\"\"\"\n <|body_2|>\n\n def abortdownload(self):\n \"\"\"This method tries to stop any active SFTP connections created and delete the downloaded SFTP resource\"\"\"\n <|body_3|>\n\n def set_port_from_config(self):\n \"\"\"Sets the default port to be used for downloading SFTP file\"\"\"\n <|body_4|>\n\n def download_resource(self, resourceidx):\n \"\"\"This method tries to download a SFTP resource attached with the class. In case of partial download tries to delete the file downloaded\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SFTPDownloader:\n def __init__(self, resourceurl, path_download_dir, config_path=None):\n \"\"\"Returns downloader object for SFTP resoucrce\"\"\"\n super().__init__(resourceurl, path_download_dir, config_path)\n self.pysftpref = pysftp\n self.sftpconnector = None\n self.remotepath = None\n\n def connect(self):\n \"\"\"This method extracts connection information from the url and tries to create a SFTP Connection\"\"\"\n try:\n host = self.parsed_url.hostname\n port = self.parsed_url.port\n if port is None:\n self.set_port_from_config()\n port = self.port\n username = self.parsed_url.username\n password = self.parsed_url.password\n self.sftpconnector = self.pysftpref.Connection(host, username=username, password=password, port=port)\n self.sftpconnector.timeout = self.timeout\n self.remotepath = self.org_file_name\n if self.remotedir:\n self.sftpconnector.cwd(self.remotedir)\n self.remotepath = os.path.join(self.remotedir, self.remotepath)\n self.connectionactive = True\n self.size_of_file_to_download = self.sftpconnector.stat(self.remotepath).st_size\n if self.size_of_file_to_download == 0:\n raise Exception(' Aborting, as not able to determine length of the content to be downloaded for url {0}', self.resourceurl)\n except:\n raise\n\n def disconnect(self):\n \"\"\"This method tries to stop all connections which were created while trying to download an SFTP resource\"\"\"\n try:\n self.sftpconnector.close()\n self.connectionactive = False\n except:\n raise\n\n def abortdownload(self):\n \"\"\"This method tries to stop any active SFTP connections created and delete the downloaded SFTP resource\"\"\"\n try:\n self.disconnect()\n except:\n raise\n finally:\n self.delete_file()\n\n def set_port_from_config(self):\n \"\"\"Sets the default port to be used for downloading SFTP file\"\"\"\n try:\n if self.configparser:\n ports = self.configparser['ports']\n port = ports.get(self.protocol, 22)\n self.port = int(port)\n else:\n self.port = 22\n except:\n self.port = 22\n\n def download_resource(self, resourceidx):\n \"\"\"This method tries to download a SFTP resource attached with the class. In case of partial download tries to delete the file downloaded\"\"\"\n try:\n super().download_resource(resourceidx)\n self.connect()\n\n def update_progress(bytestransferred, bytesleft):\n self.size_of_file_downloaded = bytestransferred\n self.sftpconnector.get(self.remotepath, self.path_downloaded_file, update_progress)\n try:\n self.disconnect()\n except:\n if self.size_of_file_to_download == self.size_of_file_downloaded:\n pass\n else:\n raise\n except:\n self.abortdownload()\n raise\n", "source": "the_stack_v2_python_sparse", "source_path": "resourcedownloader/downloadservice/sftp_downloader.py", "source_repo": "abhishek9sharma/pydownloader", "split": "val", "star_events_count": 0}
{"blob_id": "8c35d01814ceaaa992a4cb5a448c4814ef34a939", "bodies": ["self._attr_name = 'IPv6' if ipv6 else None\nself._attr_unique_id = f'{hostname}_{ipv6}'\nself.hostname = hostname\nself.resolver = aiodns.DNSResolver()\nself.resolver.nameservers = [resolver]\nself.querytype = 'AAAA' if ipv6 else 'A'\nself._attr_extra_state_attributes = {'Resolver': resolver, 'Querytype': self.querytype}\nself._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, hostname)}, manufacturer='DNS', model=aiodns.__version__, name=name)", "try:\n response = await self.resolver.query(self.hostname, self.querytype)\nexcept DNSError as err:\n _LOGGER.warning('Exception while resolving host: %s', err)\n response = None\nif response:\n self._attr_native_value = response[0].host\n self._attr_available = True\nelse:\n self._attr_available = False"], "bodies_text": "<|body_start_0|>\n self._attr_name = 'IPv6' if ipv6 else None\n self._attr_unique_id = f'{hostname}_{ipv6}'\n self.hostname = hostname\n self.resolver = aiodns.DNSResolver()\n self.resolver.nameservers = [resolver]\n self.querytype = 'AAAA' if ipv6 else 'A'\n self._attr_extra_state_attributes = {'Resolver': resolver, 'Querytype': self.querytype}\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, hostname)}, manufacturer='DNS', model=aiodns.__version__, name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n response = await self.resolver.query(self.hostname, self.querytype)\n except DNSError as err:\n _LOGGER.warning('Exception while resolving host: %s', err)\n response = None\n if response:\n self._attr_native_value = response[0].host\n self._attr_available = True\n else:\n self._attr_available = False\n<|end_body_1|>\n", "class_docstring": "Implementation of a DNS IP sensor.", "class_name": "WanIpSensor", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WanIpSensor:\n \"\"\"Implementation of a DNS IP sensor.\"\"\"\n\n def __init__(self, name: str, hostname: str, resolver: str, ipv6: bool) -> None:\n \"\"\"Initialize the DNS IP sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self) -> None:\n \"\"\"Get the current DNS IP address for hostname.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._attr_name = 'IPv6' if ipv6 else None\n self._attr_unique_id = f'{hostname}_{ipv6}'\n self.hostname = hostname\n self.resolver = aiodns.DNSResolver()\n self.resolver.nameservers = [resolver]\n self.querytype = 'AAAA' if ipv6 else 'A'\n self._attr_extra_state_attributes = {'Resolver': resolver, 'Querytype': self.querytype}\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, hostname)}, manufacturer='DNS', model=aiodns.__version__, name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n response = await self.resolver.query(self.hostname, self.querytype)\n except DNSError as err:\n _LOGGER.warning('Exception while resolving host: %s', err)\n response = None\n if response:\n self._attr_native_value = response[0].host\n self._attr_available = True\n else:\n self._attr_available = False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000346", "length_bytes": 2840, "license_type": "permissive", "methods": [{"docstring": "Initialize the DNS IP sensor.", "name": "__init__", "signature": "def __init__(self, name: str, hostname: str, resolver: str, ipv6: bool) -> None"}, {"docstring": "Get the current DNS IP address for hostname.", "name": "async_update", "signature": "async def async_update(self) -> None"}], "n_methods": 2, "prompt": "Implement the Python class `WanIpSensor` described below.\n\nClass description:\nImplementation of a DNS IP sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, hostname: str, resolver: str, ipv6: bool) -> None: Initialize the DNS IP sensor.\n- async def async_update(self) -> None: Get the current DNS IP address for hostname.", "prompted_full_text": "Implement the Python class `WanIpSensor` described below.\n\nClass description:\nImplementation of a DNS IP sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, hostname: str, resolver: str, ipv6: bool) -> None: Initialize the DNS IP sensor.\n- async def async_update(self) -> None: Get the current DNS IP address for hostname.\n\n<|skeleton|>\nclass WanIpSensor:\n \"\"\"Implementation of a DNS IP sensor.\"\"\"\n\n def __init__(self, name: str, hostname: str, resolver: str, ipv6: bool) -> None:\n \"\"\"Initialize the DNS IP sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self) -> None:\n \"\"\"Get the current DNS IP address for hostname.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._attr_name = 'IPv6' if ipv6 else None\n self._attr_unique_id = f'{hostname}_{ipv6}'\n self.hostname = hostname\n self.resolver = aiodns.DNSResolver()\n self.resolver.nameservers = [resolver]\n self.querytype = 'AAAA' if ipv6 else 'A'\n self._attr_extra_state_attributes = {'Resolver': resolver, 'Querytype': self.querytype}\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, hostname)}, manufacturer='DNS', model=aiodns.__version__, name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n response = await self.resolver.query(self.hostname, self.querytype)\n except DNSError as err:\n _LOGGER.warning('Exception while resolving host: %s', err)\n response = None\n if response:\n self._attr_native_value = response[0].host\n self._attr_available = True\n else:\n self._attr_available = False\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass WanIpSensor:\n \"\"\"Implementation of a DNS IP sensor.\"\"\"\n\n def __init__(self, name: str, hostname: str, resolver: str, ipv6: bool) -> None:\n \"\"\"Initialize the DNS IP sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self) -> None:\n \"\"\"Get the current DNS IP address for hostname.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WanIpSensor:\n \"\"\"Implementation of a DNS IP sensor.\"\"\"\n\n def __init__(self, name: str, hostname: str, resolver: str, ipv6: bool) -> None:\n \"\"\"Initialize the DNS IP sensor.\"\"\"\n self._attr_name = 'IPv6' if ipv6 else None\n self._attr_unique_id = f'{hostname}_{ipv6}'\n self.hostname = hostname\n self.resolver = aiodns.DNSResolver()\n self.resolver.nameservers = [resolver]\n self.querytype = 'AAAA' if ipv6 else 'A'\n self._attr_extra_state_attributes = {'Resolver': resolver, 'Querytype': self.querytype}\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, hostname)}, manufacturer='DNS', model=aiodns.__version__, name=name)\n\n async def async_update(self) -> None:\n \"\"\"Get the current DNS IP address for hostname.\"\"\"\n try:\n response = await self.resolver.query(self.hostname, self.querytype)\n except DNSError as err:\n _LOGGER.warning('Exception while resolving host: %s', err)\n response = None\n if response:\n self._attr_native_value = response[0].host\n self._attr_available = True\n else:\n self._attr_available = False\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/dnsip/sensor.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501}
{"blob_id": "97b804d172bd91582e35b5d6897bbe44f6130447", "bodies": ["self.item = item\nself.request = request\nself.users_feed = users_feed\nself.tpl = tpl\nself.tpl_base = 'widgets/items_feed_views/feed_item_views/'", "try:\n noop = self.item.platform\n return self.POST\nexcept AttributeError:\n return self.PRODUCT", "item_type = self.item_type()\ntpl = ''\ncontext = {'item': self.item, 'user': self.item.influencer.shelf_user.userprofile if item_type == self.POST else self.item.user_prof, 'users_feed': self.users_feed}\nif item_type == self.POST:\n platform_name = self.item.platform.platform_name\n if platform_name == 'Twitter':\n tpl = 'tweet.html'\n elif platform_name == 'Instagram':\n tpl = 'instagram.html'\n elif self.item.post_type == 'blog':\n tpl = 'blog_post.html'\n context['post_tags'] = self.item.brand_tags.split(',') if self.item.brand_tags else []\n context['products'] = self.item.pmsms_for_self\nelse:\n tpl = 'product.html'\n context['item_owner'] = self.item.get_original_instance().user_prof\ntpl = self.tpl or tpl\nreturn render_to_string('{base}{tpl}'.format(base=self.tpl_base, tpl=tpl), context, context_instance=RequestContext(self.request))"], "bodies_text": "<|body_start_0|>\n self.item = item\n self.request = request\n self.users_feed = users_feed\n self.tpl = tpl\n self.tpl_base = 'widgets/items_feed_views/feed_item_views/'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n noop = self.item.platform\n return self.POST\n except AttributeError:\n return self.PRODUCT\n<|end_body_1|>\n\n<|body_start_2|>\n item_type = self.item_type()\n tpl = ''\n context = {'item': self.item, 'user': self.item.influencer.shelf_user.userprofile if item_type == self.POST else self.item.user_prof, 'users_feed': self.users_feed}\n if item_type == self.POST:\n platform_name = self.item.platform.platform_name\n if platform_name == 'Twitter':\n tpl = 'tweet.html'\n elif platform_name == 'Instagram':\n tpl = 'instagram.html'\n elif self.item.post_type == 'blog':\n tpl = 'blog_post.html'\n context['post_tags'] = self.item.brand_tags.split(',') if self.item.brand_tags else []\n context['products'] = self.item.pmsms_for_self\n else:\n tpl = 'product.html'\n context['item_owner'] = self.item.get_original_instance().user_prof\n tpl = self.tpl or tpl\n return render_to_string('{base}{tpl}'.format(base=self.tpl_base, tpl=tpl), context, context_instance=RequestContext(self.request))\n<|end_body_2|>\n", "class_docstring": "A feed item is an encapsulation of an item in the feed, which may be displayed in a different way depending on the type of item it is (i.e. Twitter tweet, Instagram Page, Blog post, etc). The available files we have for rendering are: * blog_post.html - renders a :class:`debra.models.Posts` instance having ``platform_name=='Blogspot' or platform_name=='Wordpress' or platform_name=='Custom'`` * collage_product.html - renders a single product shown in the feed of items when picking items for a *carousel* or *collage* * instagram.html - renders an :class:`debra.models.Posts` instance having ``platform_name=='Instagram'`` * product.html - renders an instance of a :class:`debra.models.ProductMode", "class_name": "FeedItem", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FeedItem:\n \"\"\"A feed item is an encapsulation of an item in the feed, which may be displayed in a different way depending on the type of item it is (i.e. Twitter tweet, Instagram Page, Blog post, etc). The available files we have for rendering are: * blog_post.html - renders a :class:`debra.models.Posts` instance having ``platform_name=='Blogspot' or platform_name=='Wordpress' or platform_name=='Custom'`` * collage_product.html - renders a single product shown in the feed of items when picking items for a *carousel* or *collage* * instagram.html - renders an :class:`debra.models.Posts` instance having ``platform_name=='Instagram'`` * product.html - renders an instance of a :class:`debra.models.ProductMode\"\"\"\n\n def __init__(self, item, request, users_feed=False, tpl=None):\n \"\"\":param item: either a :class:`debra.models.ProductModelShelfMap` or :class:`debra.models.Posts` instance to render :param request: an instance of a ``HttpRequest`` :param users_feed: if True, the user is viewing their own feed, False means they're looking at someone elses feed or the *inspiration feed* :param tpl: the name of the template file to use for rendering. If not set, the tpl is dynamically set based on the ``item`` type.\"\"\"\n <|body_0|>\n\n def item_type(self):\n \"\"\":return: a string representing the type of this **FeedItem**'s item\"\"\"\n <|body_1|>\n\n def render(self):\n \"\"\":return: this **FeedItem**'s item rendered to a string\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.item = item\n self.request = request\n self.users_feed = users_feed\n self.tpl = tpl\n self.tpl_base = 'widgets/items_feed_views/feed_item_views/'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n noop = self.item.platform\n return self.POST\n except AttributeError:\n return self.PRODUCT\n<|end_body_1|>\n\n<|body_start_2|>\n item_type = self.item_type()\n tpl = ''\n context = {'item': self.item, 'user': self.item.influencer.shelf_user.userprofile if item_type == self.POST else self.item.user_prof, 'users_feed': self.users_feed}\n if item_type == self.POST:\n platform_name = self.item.platform.platform_name\n if platform_name == 'Twitter':\n tpl = 'tweet.html'\n elif platform_name == 'Instagram':\n tpl = 'instagram.html'\n elif self.item.post_type == 'blog':\n tpl = 'blog_post.html'\n context['post_tags'] = self.item.brand_tags.split(',') if self.item.brand_tags else []\n context['products'] = self.item.pmsms_for_self\n else:\n tpl = 'product.html'\n context['item_owner'] = self.item.get_original_instance().user_prof\n tpl = self.tpl or tpl\n return render_to_string('{base}{tpl}'.format(base=self.tpl_base, tpl=tpl), context, context_instance=RequestContext(self.request))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000347", "length_bytes": 25658, "license_type": "no_license", "methods": [{"docstring": ":param item: either a :class:`debra.models.ProductModelShelfMap` or :class:`debra.models.Posts` instance to render :param request: an instance of a ``HttpRequest`` :param users_feed: if True, the user is viewing their own feed, False means they're looking at someone elses feed or the *inspiration feed* :param tpl: the name of the template file to use for rendering. If not set, the tpl is dynamically set based on the ``item`` type.", "name": "__init__", "signature": "def __init__(self, item, request, users_feed=False, tpl=None)"}, {"docstring": ":return: a string representing the type of this **FeedItem**'s item", "name": "item_type", "signature": "def item_type(self)"}, {"docstring": ":return: this **FeedItem**'s item rendered to a string", "name": "render", "signature": "def render(self)"}], "n_methods": 3, "prompt": "Implement the Python class `FeedItem` described below.\n\nClass description:\nA feed item is an encapsulation of an item in the feed, which may be displayed in a different way depending on the type of item it is (i.e. Twitter tweet, Instagram Page, Blog post, etc). The available files we have for rendering are: * blog_post.html - renders a :class:`debra.models.Posts` instance having ``platform_name=='Blogspot' or platform_name=='Wordpress' or platform_name=='Custom'`` * collage_product.html - renders a single product shown in the feed of items when picking items for a *carousel* or *collage* * instagram.html - renders an :class:`debra.models.Posts` instance having ``platform_name=='Instagram'`` * product.html - renders an instance of a :class:`debra.models.ProductMode\n\nMethod signatures and docstrings:\n- def __init__(self, item, request, users_feed=False, tpl=None): :param item: either a :class:`debra.models.ProductModelShelfMap` or :class:`debra.models.Posts` instance to render :param request: an instance of a ``HttpRequest`` :param users_feed: if True, the user is viewing their own feed, False means they're looking at someone elses feed or the *inspiration feed* :param tpl: the name of the template file to use for rendering. If not set, the tpl is dynamically set based on the ``item`` type.\n- def item_type(self): :return: a string representing the type of this **FeedItem**'s item\n- def render(self): :return: this **FeedItem**'s item rendered to a string", "prompted_full_text": "Implement the Python class `FeedItem` described below.\n\nClass description:\nA feed item is an encapsulation of an item in the feed, which may be displayed in a different way depending on the type of item it is (i.e. Twitter tweet, Instagram Page, Blog post, etc). The available files we have for rendering are: * blog_post.html - renders a :class:`debra.models.Posts` instance having ``platform_name=='Blogspot' or platform_name=='Wordpress' or platform_name=='Custom'`` * collage_product.html - renders a single product shown in the feed of items when picking items for a *carousel* or *collage* * instagram.html - renders an :class:`debra.models.Posts` instance having ``platform_name=='Instagram'`` * product.html - renders an instance of a :class:`debra.models.ProductMode\n\nMethod signatures and docstrings:\n- def __init__(self, item, request, users_feed=False, tpl=None): :param item: either a :class:`debra.models.ProductModelShelfMap` or :class:`debra.models.Posts` instance to render :param request: an instance of a ``HttpRequest`` :param users_feed: if True, the user is viewing their own feed, False means they're looking at someone elses feed or the *inspiration feed* :param tpl: the name of the template file to use for rendering. If not set, the tpl is dynamically set based on the ``item`` type.\n- def item_type(self): :return: a string representing the type of this **FeedItem**'s item\n- def render(self): :return: this **FeedItem**'s item rendered to a string\n\n<|skeleton|>\nclass FeedItem:\n \"\"\"A feed item is an encapsulation of an item in the feed, which may be displayed in a different way depending on the type of item it is (i.e. Twitter tweet, Instagram Page, Blog post, etc). The available files we have for rendering are: * blog_post.html - renders a :class:`debra.models.Posts` instance having ``platform_name=='Blogspot' or platform_name=='Wordpress' or platform_name=='Custom'`` * collage_product.html - renders a single product shown in the feed of items when picking items for a *carousel* or *collage* * instagram.html - renders an :class:`debra.models.Posts` instance having ``platform_name=='Instagram'`` * product.html - renders an instance of a :class:`debra.models.ProductMode\"\"\"\n\n def __init__(self, item, request, users_feed=False, tpl=None):\n \"\"\":param item: either a :class:`debra.models.ProductModelShelfMap` or :class:`debra.models.Posts` instance to render :param request: an instance of a ``HttpRequest`` :param users_feed: if True, the user is viewing their own feed, False means they're looking at someone elses feed or the *inspiration feed* :param tpl: the name of the template file to use for rendering. If not set, the tpl is dynamically set based on the ``item`` type.\"\"\"\n <|body_0|>\n\n def item_type(self):\n \"\"\":return: a string representing the type of this **FeedItem**'s item\"\"\"\n <|body_1|>\n\n def render(self):\n \"\"\":return: this **FeedItem**'s item rendered to a string\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.item = item\n self.request = request\n self.users_feed = users_feed\n self.tpl = tpl\n self.tpl_base = 'widgets/items_feed_views/feed_item_views/'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n noop = self.item.platform\n return self.POST\n except AttributeError:\n return self.PRODUCT\n<|end_body_1|>\n\n<|body_start_2|>\n item_type = self.item_type()\n tpl = ''\n context = {'item': self.item, 'user': self.item.influencer.shelf_user.userprofile if item_type == self.POST else self.item.user_prof, 'users_feed': self.users_feed}\n if item_type == self.POST:\n platform_name = self.item.platform.platform_name\n if platform_name == 'Twitter':\n tpl = 'tweet.html'\n elif platform_name == 'Instagram':\n tpl = 'instagram.html'\n elif self.item.post_type == 'blog':\n tpl = 'blog_post.html'\n context['post_tags'] = self.item.brand_tags.split(',') if self.item.brand_tags else []\n context['products'] = self.item.pmsms_for_self\n else:\n tpl = 'product.html'\n context['item_owner'] = self.item.get_original_instance().user_prof\n tpl = self.tpl or tpl\n return render_to_string('{base}{tpl}'.format(base=self.tpl_base, tpl=tpl), context, context_instance=RequestContext(self.request))\n<|end_body_2|>\n", "revision_id": "2f15c4ddd8bbb112c407d222ae48746b626c674f", "skeleton": "<|skeleton|>\nclass FeedItem:\n \"\"\"A feed item is an encapsulation of an item in the feed, which may be displayed in a different way depending on the type of item it is (i.e. Twitter tweet, Instagram Page, Blog post, etc). The available files we have for rendering are: * blog_post.html - renders a :class:`debra.models.Posts` instance having ``platform_name=='Blogspot' or platform_name=='Wordpress' or platform_name=='Custom'`` * collage_product.html - renders a single product shown in the feed of items when picking items for a *carousel* or *collage* * instagram.html - renders an :class:`debra.models.Posts` instance having ``platform_name=='Instagram'`` * product.html - renders an instance of a :class:`debra.models.ProductMode\"\"\"\n\n def __init__(self, item, request, users_feed=False, tpl=None):\n \"\"\":param item: either a :class:`debra.models.ProductModelShelfMap` or :class:`debra.models.Posts` instance to render :param request: an instance of a ``HttpRequest`` :param users_feed: if True, the user is viewing their own feed, False means they're looking at someone elses feed or the *inspiration feed* :param tpl: the name of the template file to use for rendering. If not set, the tpl is dynamically set based on the ``item`` type.\"\"\"\n <|body_0|>\n\n def item_type(self):\n \"\"\":return: a string representing the type of this **FeedItem**'s item\"\"\"\n <|body_1|>\n\n def render(self):\n \"\"\":return: this **FeedItem**'s item rendered to a string\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FeedItem:\n \"\"\"A feed item is an encapsulation of an item in the feed, which may be displayed in a different way depending on the type of item it is (i.e. Twitter tweet, Instagram Page, Blog post, etc). The available files we have for rendering are: * blog_post.html - renders a :class:`debra.models.Posts` instance having ``platform_name=='Blogspot' or platform_name=='Wordpress' or platform_name=='Custom'`` * collage_product.html - renders a single product shown in the feed of items when picking items for a *carousel* or *collage* * instagram.html - renders an :class:`debra.models.Posts` instance having ``platform_name=='Instagram'`` * product.html - renders an instance of a :class:`debra.models.ProductMode\"\"\"\n\n def __init__(self, item, request, users_feed=False, tpl=None):\n \"\"\":param item: either a :class:`debra.models.ProductModelShelfMap` or :class:`debra.models.Posts` instance to render :param request: an instance of a ``HttpRequest`` :param users_feed: if True, the user is viewing their own feed, False means they're looking at someone elses feed or the *inspiration feed* :param tpl: the name of the template file to use for rendering. If not set, the tpl is dynamically set based on the ``item`` type.\"\"\"\n self.item = item\n self.request = request\n self.users_feed = users_feed\n self.tpl = tpl\n self.tpl_base = 'widgets/items_feed_views/feed_item_views/'\n\n def item_type(self):\n \"\"\":return: a string representing the type of this **FeedItem**'s item\"\"\"\n try:\n noop = self.item.platform\n return self.POST\n except AttributeError:\n return self.PRODUCT\n\n def render(self):\n \"\"\":return: this **FeedItem**'s item rendered to a string\"\"\"\n item_type = self.item_type()\n tpl = ''\n context = {'item': self.item, 'user': self.item.influencer.shelf_user.userprofile if item_type == self.POST else self.item.user_prof, 'users_feed': self.users_feed}\n if item_type == self.POST:\n platform_name = self.item.platform.platform_name\n if platform_name == 'Twitter':\n tpl = 'tweet.html'\n elif platform_name == 'Instagram':\n tpl = 'instagram.html'\n elif self.item.post_type == 'blog':\n tpl = 'blog_post.html'\n context['post_tags'] = self.item.brand_tags.split(',') if self.item.brand_tags else []\n context['products'] = self.item.pmsms_for_self\n else:\n tpl = 'product.html'\n context['item_owner'] = self.item.get_original_instance().user_prof\n tpl = self.tpl or tpl\n return render_to_string('{base}{tpl}'.format(base=self.tpl_base, tpl=tpl), context, context_instance=RequestContext(self.request))\n", "source": "the_stack_v2_python_sparse", "source_path": "Projects/miami_metro/debra/widgets.py", "source_repo": "TopWebGhost/Angular-Influencer", "split": "val", "star_events_count": 1}
{"blob_id": "4069cd2c18ba1fc1e07b14ba1b053564b427db9d", "bodies": ["self.model = model\nself.y = y\nself.p0 = p0\nself.μs = [μ0]\nself.Fμs = [0]\nself.covs = [prior.cov]\nself.prior = prior\nself.obserr = obs_error\nself.counter = 0\nself.obs_measures = []\nself.state_measures = []\nself.costs = []\nself.γs = []", "μ = self.μs[-1]\nFμ, jac = self.model(μ, self.p0)\nrhs = jac.T @ self.obserr.covi @ (self.y - Fμ - self.obserr.mean) - self.prior.covi @ (μ - self.prior.mean)\ncovi = self.prior.covi + jac.T @ self.obserr.covi @ jac\nlhs = covi + γ * self.prior.covi\ndiff = np.linalg.solve(lhs, rhs)\nif only is not None:\n diff_ = diff\n diff = np.zeros_like(diff)\n diff[only] = diff_[only]\nself.μs.append(μ + diff)\nself.Fμs.append(Fμ)\nself.covs.append(np.linalg.inv(covi))\nself.counter += 1\nself.γs.append(γ)\nself.state_measures.append(float(diff.T @ covi @ diff))\nm = self.obserr.cov @ np.linalg.inv(jac @ self.prior.cov @ jac.T + self.obserr.cov) @ self.obserr.cov\nd = self.Fμs[-2] - self.Fμs[-1]\nself.obs_measures.append(float(d.T @ m @ d))\nv1 = self.y - Fμ - self.obserr.mean\nv2 = μ - self.prior.mean\ncost = v1.T @ self.obserr.covi @ v1 + v2.T @ self.prior.covi @ v2\nself.costs.append(float(cost))"], "bodies_text": "<|body_start_0|>\n self.model = model\n self.y = y\n self.p0 = p0\n self.μs = [μ0]\n self.Fμs = [0]\n self.covs = [prior.cov]\n self.prior = prior\n self.obserr = obs_error\n self.counter = 0\n self.obs_measures = []\n self.state_measures = []\n self.costs = []\n self.γs = []\n<|end_body_0|>\n\n<|body_start_1|>\n μ = self.μs[-1]\n Fμ, jac = self.model(μ, self.p0)\n rhs = jac.T @ self.obserr.covi @ (self.y - Fμ - self.obserr.mean) - self.prior.covi @ (μ - self.prior.mean)\n covi = self.prior.covi + jac.T @ self.obserr.covi @ jac\n lhs = covi + γ * self.prior.covi\n diff = np.linalg.solve(lhs, rhs)\n if only is not None:\n diff_ = diff\n diff = np.zeros_like(diff)\n diff[only] = diff_[only]\n self.μs.append(μ + diff)\n self.Fμs.append(Fμ)\n self.covs.append(np.linalg.inv(covi))\n self.counter += 1\n self.γs.append(γ)\n self.state_measures.append(float(diff.T @ covi @ diff))\n m = self.obserr.cov @ np.linalg.inv(jac @ self.prior.cov @ jac.T + self.obserr.cov) @ self.obserr.cov\n d = self.Fμs[-2] - self.Fμs[-1]\n self.obs_measures.append(float(d.T @ m @ d))\n v1 = self.y - Fμ - self.obserr.mean\n v2 = μ - self.prior.mean\n cost = v1.T @ self.obserr.covi @ v1 + v2.T @ self.prior.covi @ v2\n self.costs.append(float(cost))\n<|end_body_1|>\n", "class_docstring": "Iteration helper for optimal estimation retrievals. Automatically evaluates cost function values (.costs), observation vector distances (.obs_measures) and state vector distances (.state_measures) for determination of convergence.", "class_name": "OptimalEstimationRetrieval", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OptimalEstimationRetrieval:\n \"\"\"Iteration helper for optimal estimation retrievals. Automatically evaluates cost function values (.costs), observation vector distances (.obs_measures) and state vector distances (.state_measures) for determination of convergence.\"\"\"\n\n def __init__(self, *, model, y, p0, μ0, prior, obs_error):\n \"\"\"Set up an optimal estimation retrieval. z: retrival grid model: forward model (accepts state vector and surface pressure, returns simulated observation and Jacobian) params: a sequence of parameters to control the Levenberg-Marquard minimization. Last value is repeated if sequence is too short. y: observation vector p0: surface pressure in hPa μ0: first guess of state vector prior: prior distribution of atmospheric state obs_error: observation/model error distribution\"\"\"\n <|body_0|>\n\n def iterate(self, γ, only=None):\n \"\"\"Levenberg-Marquard step with 5.36 from Rodgers (2000). This method does not update γ, instead the current γ has to be specified during the method call. The used value of γ is however added to .γs for later reference. The 'only' parameter is just for test purposes. Use the specialized Virtual HATPROs instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.model = model\n self.y = y\n self.p0 = p0\n self.μs = [μ0]\n self.Fμs = [0]\n self.covs = [prior.cov]\n self.prior = prior\n self.obserr = obs_error\n self.counter = 0\n self.obs_measures = []\n self.state_measures = []\n self.costs = []\n self.γs = []\n<|end_body_0|>\n\n<|body_start_1|>\n μ = self.μs[-1]\n Fμ, jac = self.model(μ, self.p0)\n rhs = jac.T @ self.obserr.covi @ (self.y - Fμ - self.obserr.mean) - self.prior.covi @ (μ - self.prior.mean)\n covi = self.prior.covi + jac.T @ self.obserr.covi @ jac\n lhs = covi + γ * self.prior.covi\n diff = np.linalg.solve(lhs, rhs)\n if only is not None:\n diff_ = diff\n diff = np.zeros_like(diff)\n diff[only] = diff_[only]\n self.μs.append(μ + diff)\n self.Fμs.append(Fμ)\n self.covs.append(np.linalg.inv(covi))\n self.counter += 1\n self.γs.append(γ)\n self.state_measures.append(float(diff.T @ covi @ diff))\n m = self.obserr.cov @ np.linalg.inv(jac @ self.prior.cov @ jac.T + self.obserr.cov) @ self.obserr.cov\n d = self.Fμs[-2] - self.Fμs[-1]\n self.obs_measures.append(float(d.T @ m @ d))\n v1 = self.y - Fμ - self.obserr.mean\n v2 = μ - self.prior.mean\n cost = v1.T @ self.obserr.covi @ v1 + v2.T @ self.prior.covi @ v2\n self.costs.append(float(cost))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000348", "length_bytes": 11637, "license_type": "permissive", "methods": [{"docstring": "Set up an optimal estimation retrieval. z: retrival grid model: forward model (accepts state vector and surface pressure, returns simulated observation and Jacobian) params: a sequence of parameters to control the Levenberg-Marquard minimization. Last value is repeated if sequence is too short. y: observation vector p0: surface pressure in hPa μ0: first guess of state vector prior: prior distribution of atmospheric state obs_error: observation/model error distribution", "name": "__init__", "signature": "def __init__(self, *, model, y, p0, μ0, prior, obs_error)"}, {"docstring": "Levenberg-Marquard step with 5.36 from Rodgers (2000). This method does not update γ, instead the current γ has to be specified during the method call. The used value of γ is however added to .γs for later reference. The 'only' parameter is just for test purposes. Use the specialized Virtual HATPROs instead.", "name": "iterate", "signature": "def iterate(self, γ, only=None)"}], "n_methods": 2, "prompt": "Implement the Python class `OptimalEstimationRetrieval` described below.\n\nClass description:\nIteration helper for optimal estimation retrievals. Automatically evaluates cost function values (.costs), observation vector distances (.obs_measures) and state vector distances (.state_measures) for determination of convergence.\n\nMethod signatures and docstrings:\n- def __init__(self, *, model, y, p0, μ0, prior, obs_error): Set up an optimal estimation retrieval. z: retrival grid model: forward model (accepts state vector and surface pressure, returns simulated observation and Jacobian) params: a sequence of parameters to control the Levenberg-Marquard minimization. Last value is repeated if sequence is too short. y: observation vector p0: surface pressure in hPa μ0: first guess of state vector prior: prior distribution of atmospheric state obs_error: observation/model error distribution\n- def iterate(self, γ, only=None): Levenberg-Marquard step with 5.36 from Rodgers (2000). This method does not update γ, instead the current γ has to be specified during the method call. The used value of γ is however added to .γs for later reference. The 'only' parameter is just for test purposes. Use the specialized Virtual HATPROs instead.", "prompted_full_text": "Implement the Python class `OptimalEstimationRetrieval` described below.\n\nClass description:\nIteration helper for optimal estimation retrievals. Automatically evaluates cost function values (.costs), observation vector distances (.obs_measures) and state vector distances (.state_measures) for determination of convergence.\n\nMethod signatures and docstrings:\n- def __init__(self, *, model, y, p0, μ0, prior, obs_error): Set up an optimal estimation retrieval. z: retrival grid model: forward model (accepts state vector and surface pressure, returns simulated observation and Jacobian) params: a sequence of parameters to control the Levenberg-Marquard minimization. Last value is repeated if sequence is too short. y: observation vector p0: surface pressure in hPa μ0: first guess of state vector prior: prior distribution of atmospheric state obs_error: observation/model error distribution\n- def iterate(self, γ, only=None): Levenberg-Marquard step with 5.36 from Rodgers (2000). This method does not update γ, instead the current γ has to be specified during the method call. The used value of γ is however added to .γs for later reference. The 'only' parameter is just for test purposes. Use the specialized Virtual HATPROs instead.\n\n<|skeleton|>\nclass OptimalEstimationRetrieval:\n \"\"\"Iteration helper for optimal estimation retrievals. Automatically evaluates cost function values (.costs), observation vector distances (.obs_measures) and state vector distances (.state_measures) for determination of convergence.\"\"\"\n\n def __init__(self, *, model, y, p0, μ0, prior, obs_error):\n \"\"\"Set up an optimal estimation retrieval. z: retrival grid model: forward model (accepts state vector and surface pressure, returns simulated observation and Jacobian) params: a sequence of parameters to control the Levenberg-Marquard minimization. Last value is repeated if sequence is too short. y: observation vector p0: surface pressure in hPa μ0: first guess of state vector prior: prior distribution of atmospheric state obs_error: observation/model error distribution\"\"\"\n <|body_0|>\n\n def iterate(self, γ, only=None):\n \"\"\"Levenberg-Marquard step with 5.36 from Rodgers (2000). This method does not update γ, instead the current γ has to be specified during the method call. The used value of γ is however added to .γs for later reference. The 'only' parameter is just for test purposes. Use the specialized Virtual HATPROs instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.model = model\n self.y = y\n self.p0 = p0\n self.μs = [μ0]\n self.Fμs = [0]\n self.covs = [prior.cov]\n self.prior = prior\n self.obserr = obs_error\n self.counter = 0\n self.obs_measures = []\n self.state_measures = []\n self.costs = []\n self.γs = []\n<|end_body_0|>\n\n<|body_start_1|>\n μ = self.μs[-1]\n Fμ, jac = self.model(μ, self.p0)\n rhs = jac.T @ self.obserr.covi @ (self.y - Fμ - self.obserr.mean) - self.prior.covi @ (μ - self.prior.mean)\n covi = self.prior.covi + jac.T @ self.obserr.covi @ jac\n lhs = covi + γ * self.prior.covi\n diff = np.linalg.solve(lhs, rhs)\n if only is not None:\n diff_ = diff\n diff = np.zeros_like(diff)\n diff[only] = diff_[only]\n self.μs.append(μ + diff)\n self.Fμs.append(Fμ)\n self.covs.append(np.linalg.inv(covi))\n self.counter += 1\n self.γs.append(γ)\n self.state_measures.append(float(diff.T @ covi @ diff))\n m = self.obserr.cov @ np.linalg.inv(jac @ self.prior.cov @ jac.T + self.obserr.cov) @ self.obserr.cov\n d = self.Fμs[-2] - self.Fμs[-1]\n self.obs_measures.append(float(d.T @ m @ d))\n v1 = self.y - Fμ - self.obserr.mean\n v2 = μ - self.prior.mean\n cost = v1.T @ self.obserr.covi @ v1 + v2.T @ self.prior.covi @ v2\n self.costs.append(float(cost))\n<|end_body_1|>\n", "revision_id": "52918f8452b6459cf19fc43a3103f2e37215fdae", "skeleton": "<|skeleton|>\nclass OptimalEstimationRetrieval:\n \"\"\"Iteration helper for optimal estimation retrievals. Automatically evaluates cost function values (.costs), observation vector distances (.obs_measures) and state vector distances (.state_measures) for determination of convergence.\"\"\"\n\n def __init__(self, *, model, y, p0, μ0, prior, obs_error):\n \"\"\"Set up an optimal estimation retrieval. z: retrival grid model: forward model (accepts state vector and surface pressure, returns simulated observation and Jacobian) params: a sequence of parameters to control the Levenberg-Marquard minimization. Last value is repeated if sequence is too short. y: observation vector p0: surface pressure in hPa μ0: first guess of state vector prior: prior distribution of atmospheric state obs_error: observation/model error distribution\"\"\"\n <|body_0|>\n\n def iterate(self, γ, only=None):\n \"\"\"Levenberg-Marquard step with 5.36 from Rodgers (2000). This method does not update γ, instead the current γ has to be specified during the method call. The used value of γ is however added to .γs for later reference. The 'only' parameter is just for test purposes. Use the specialized Virtual HATPROs instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OptimalEstimationRetrieval:\n \"\"\"Iteration helper for optimal estimation retrievals. Automatically evaluates cost function values (.costs), observation vector distances (.obs_measures) and state vector distances (.state_measures) for determination of convergence.\"\"\"\n\n def __init__(self, *, model, y, p0, μ0, prior, obs_error):\n \"\"\"Set up an optimal estimation retrieval. z: retrival grid model: forward model (accepts state vector and surface pressure, returns simulated observation and Jacobian) params: a sequence of parameters to control the Levenberg-Marquard minimization. Last value is repeated if sequence is too short. y: observation vector p0: surface pressure in hPa μ0: first guess of state vector prior: prior distribution of atmospheric state obs_error: observation/model error distribution\"\"\"\n self.model = model\n self.y = y\n self.p0 = p0\n self.μs = [μ0]\n self.Fμs = [0]\n self.covs = [prior.cov]\n self.prior = prior\n self.obserr = obs_error\n self.counter = 0\n self.obs_measures = []\n self.state_measures = []\n self.costs = []\n self.γs = []\n\n def iterate(self, γ, only=None):\n \"\"\"Levenberg-Marquard step with 5.36 from Rodgers (2000). This method does not update γ, instead the current γ has to be specified during the method call. The used value of γ is however added to .γs for later reference. The 'only' parameter is just for test purposes. Use the specialized Virtual HATPROs instead.\"\"\"\n μ = self.μs[-1]\n Fμ, jac = self.model(μ, self.p0)\n rhs = jac.T @ self.obserr.covi @ (self.y - Fμ - self.obserr.mean) - self.prior.covi @ (μ - self.prior.mean)\n covi = self.prior.covi + jac.T @ self.obserr.covi @ jac\n lhs = covi + γ * self.prior.covi\n diff = np.linalg.solve(lhs, rhs)\n if only is not None:\n diff_ = diff\n diff = np.zeros_like(diff)\n diff[only] = diff_[only]\n self.μs.append(μ + diff)\n self.Fμs.append(Fμ)\n self.covs.append(np.linalg.inv(covi))\n self.counter += 1\n self.γs.append(γ)\n self.state_measures.append(float(diff.T @ covi @ diff))\n m = self.obserr.cov @ np.linalg.inv(jac @ self.prior.cov @ jac.T + self.obserr.cov) @ self.obserr.cov\n d = self.Fμs[-2] - self.Fμs[-1]\n self.obs_measures.append(float(d.T @ m @ d))\n v1 = self.y - Fμ - self.obserr.mean\n v2 = μ - self.prior.mean\n cost = v1.T @ self.obserr.covi @ v1 + v2.T @ self.prior.covi @ v2\n self.costs.append(float(cost))\n", "source": "the_stack_v2_python_sparse", "source_path": "software/optimal_estimation.py", "source_repo": "chpolste/MScAtmosphericSciences", "split": "val", "star_events_count": 3}
{"blob_id": "4bf8c143872fed0f9f4bc0f1e559ba9a345fa7a2", "bodies": ["super().__init__()\nself.query_layer = nn.Linear(50 * config.hidden_size, config.hidden_size, bias=True)\nself.key_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\nself.value_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\nself.dropout = nn.Dropout(0.1)", "embedded_query = x * (token_type_ids.unsqueeze(dim=-1) == 0)\nembedded_query = self.dropout(F.relu(embedded_query))\nembedded_query = embedded_query[:, :50, :]\nembedded_query = embedded_query.reshape((x.shape[0], 1, -1))\nembedded_query = self.query_layer(embedded_query)\nembedded_key = x * (token_type_ids.unsqueeze(dim=-1) == 1)\nembedded_key = self.key_layer(embedded_key)\nattention_rate = torch.matmul(embedded_key, torch.transpose(embedded_query, 1, 2))\nattention_rate = attention_rate / math.sqrt(embedded_key.shape[-1])\nattention_rate = attention_rate / 10\nattention_rate = F.softmax(attention_rate, 1)\nembedded_value = self.value_layer(x)\nembedded_value = embedded_value * attention_rate\nreturn embedded_value"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.query_layer = nn.Linear(50 * config.hidden_size, config.hidden_size, bias=True)\n self.key_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\n self.value_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\n self.dropout = nn.Dropout(0.1)\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_query = x * (token_type_ids.unsqueeze(dim=-1) == 0)\n embedded_query = self.dropout(F.relu(embedded_query))\n embedded_query = embedded_query[:, :50, :]\n embedded_query = embedded_query.reshape((x.shape[0], 1, -1))\n embedded_query = self.query_layer(embedded_query)\n embedded_key = x * (token_type_ids.unsqueeze(dim=-1) == 1)\n embedded_key = self.key_layer(embedded_key)\n attention_rate = torch.matmul(embedded_key, torch.transpose(embedded_query, 1, 2))\n attention_rate = attention_rate / math.sqrt(embedded_key.shape[-1])\n attention_rate = attention_rate / 10\n attention_rate = F.softmax(attention_rate, 1)\n embedded_value = self.value_layer(x)\n embedded_value = embedded_value * attention_rate\n return embedded_value\n<|end_body_1|>\n", "class_docstring": "Attention for query embedding", "class_name": "AttentionLayer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AttentionLayer:\n \"\"\"Attention for query embedding\"\"\"\n\n def __init__(self, config):\n \"\"\"Args: config (ModelArguments): ModelArguments\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor, token_type_ids: torch.Tensor) -> torch.Tensor:\n \"\"\"Args: x (torch.Tensor): Layer input token_type_ids (torch.Tensor): Token type ids of input_ids Returns: torch.Tensor: embedded value (batch_size * max_seq_legth * hidden_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.query_layer = nn.Linear(50 * config.hidden_size, config.hidden_size, bias=True)\n self.key_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\n self.value_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\n self.dropout = nn.Dropout(0.1)\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_query = x * (token_type_ids.unsqueeze(dim=-1) == 0)\n embedded_query = self.dropout(F.relu(embedded_query))\n embedded_query = embedded_query[:, :50, :]\n embedded_query = embedded_query.reshape((x.shape[0], 1, -1))\n embedded_query = self.query_layer(embedded_query)\n embedded_key = x * (token_type_ids.unsqueeze(dim=-1) == 1)\n embedded_key = self.key_layer(embedded_key)\n attention_rate = torch.matmul(embedded_key, torch.transpose(embedded_query, 1, 2))\n attention_rate = attention_rate / math.sqrt(embedded_key.shape[-1])\n attention_rate = attention_rate / 10\n attention_rate = F.softmax(attention_rate, 1)\n embedded_value = self.value_layer(x)\n embedded_value = embedded_value * attention_rate\n return embedded_value\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000349", "length_bytes": 4223, "license_type": "permissive", "methods": [{"docstring": "Args: config (ModelArguments): ModelArguments", "name": "__init__", "signature": "def __init__(self, config)"}, {"docstring": "Args: x (torch.Tensor): Layer input token_type_ids (torch.Tensor): Token type ids of input_ids Returns: torch.Tensor: embedded value (batch_size * max_seq_legth * hidden_size)", "name": "forward", "signature": "def forward(self, x: torch.Tensor, token_type_ids: torch.Tensor) -> torch.Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_035400", "prompt": "Implement the Python class `AttentionLayer` described below.\n\nClass description:\nAttention for query embedding\n\nMethod signatures and docstrings:\n- def __init__(self, config): Args: config (ModelArguments): ModelArguments\n- def forward(self, x: torch.Tensor, token_type_ids: torch.Tensor) -> torch.Tensor: Args: x (torch.Tensor): Layer input token_type_ids (torch.Tensor): Token type ids of input_ids Returns: torch.Tensor: embedded value (batch_size * max_seq_legth * hidden_size)", "prompted_full_text": "Implement the Python class `AttentionLayer` described below.\n\nClass description:\nAttention for query embedding\n\nMethod signatures and docstrings:\n- def __init__(self, config): Args: config (ModelArguments): ModelArguments\n- def forward(self, x: torch.Tensor, token_type_ids: torch.Tensor) -> torch.Tensor: Args: x (torch.Tensor): Layer input token_type_ids (torch.Tensor): Token type ids of input_ids Returns: torch.Tensor: embedded value (batch_size * max_seq_legth * hidden_size)\n\n<|skeleton|>\nclass AttentionLayer:\n \"\"\"Attention for query embedding\"\"\"\n\n def __init__(self, config):\n \"\"\"Args: config (ModelArguments): ModelArguments\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor, token_type_ids: torch.Tensor) -> torch.Tensor:\n \"\"\"Args: x (torch.Tensor): Layer input token_type_ids (torch.Tensor): Token type ids of input_ids Returns: torch.Tensor: embedded value (batch_size * max_seq_legth * hidden_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.query_layer = nn.Linear(50 * config.hidden_size, config.hidden_size, bias=True)\n self.key_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\n self.value_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\n self.dropout = nn.Dropout(0.1)\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_query = x * (token_type_ids.unsqueeze(dim=-1) == 0)\n embedded_query = self.dropout(F.relu(embedded_query))\n embedded_query = embedded_query[:, :50, :]\n embedded_query = embedded_query.reshape((x.shape[0], 1, -1))\n embedded_query = self.query_layer(embedded_query)\n embedded_key = x * (token_type_ids.unsqueeze(dim=-1) == 1)\n embedded_key = self.key_layer(embedded_key)\n attention_rate = torch.matmul(embedded_key, torch.transpose(embedded_query, 1, 2))\n attention_rate = attention_rate / math.sqrt(embedded_key.shape[-1])\n attention_rate = attention_rate / 10\n attention_rate = F.softmax(attention_rate, 1)\n embedded_value = self.value_layer(x)\n embedded_value = embedded_value * attention_rate\n return embedded_value\n<|end_body_1|>\n", "revision_id": "ea60d7a7b0f22c9e2e3b71d1d80cc2f00805e3fa", "skeleton": "<|skeleton|>\nclass AttentionLayer:\n \"\"\"Attention for query embedding\"\"\"\n\n def __init__(self, config):\n \"\"\"Args: config (ModelArguments): ModelArguments\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor, token_type_ids: torch.Tensor) -> torch.Tensor:\n \"\"\"Args: x (torch.Tensor): Layer input token_type_ids (torch.Tensor): Token type ids of input_ids Returns: torch.Tensor: embedded value (batch_size * max_seq_legth * hidden_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AttentionLayer:\n \"\"\"Attention for query embedding\"\"\"\n\n def __init__(self, config):\n \"\"\"Args: config (ModelArguments): ModelArguments\"\"\"\n super().__init__()\n self.query_layer = nn.Linear(50 * config.hidden_size, config.hidden_size, bias=True)\n self.key_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\n self.value_layer = nn.Linear(config.hidden_size, config.hidden_size, bias=True)\n self.dropout = nn.Dropout(0.1)\n\n def forward(self, x: torch.Tensor, token_type_ids: torch.Tensor) -> torch.Tensor:\n \"\"\"Args: x (torch.Tensor): Layer input token_type_ids (torch.Tensor): Token type ids of input_ids Returns: torch.Tensor: embedded value (batch_size * max_seq_legth * hidden_size)\"\"\"\n embedded_query = x * (token_type_ids.unsqueeze(dim=-1) == 0)\n embedded_query = self.dropout(F.relu(embedded_query))\n embedded_query = embedded_query[:, :50, :]\n embedded_query = embedded_query.reshape((x.shape[0], 1, -1))\n embedded_query = self.query_layer(embedded_query)\n embedded_key = x * (token_type_ids.unsqueeze(dim=-1) == 1)\n embedded_key = self.key_layer(embedded_key)\n attention_rate = torch.matmul(embedded_key, torch.transpose(embedded_query, 1, 2))\n attention_rate = attention_rate / math.sqrt(embedded_key.shape[-1])\n attention_rate = attention_rate / 10\n attention_rate = F.softmax(attention_rate, 1)\n embedded_value = self.value_layer(x)\n embedded_value = embedded_value * attention_rate\n return embedded_value\n", "source": "the_stack_v2_python_sparse", "source_path": "solution/reader/architectures/modeling_utils.py", "source_repo": "boostcampaitech2/mrc-level2-nlp-14", "split": "val", "star_events_count": 7}
{"blob_id": "296723e627096614f8c55abf90c049e54d29003b", "bodies": ["total_participants = len(participants)\nfirst_participant_index = 0\nremaining_days = []\nfor session_name, date_session_list in session_date_dict.items():\n current_participant_index = first_participant_index\n for session in date_session_list:\n current_participant = participants[current_participant_index]\n if session not in current_participant.leave_dates:\n current_participant.work_sessions.append(session)\n current_participant.session_count[session_name] += 1\n current_participant.total_working_sessions += 1\n current_participant.remaining_days -= 1\n else:\n remaining_days.append(session)\n current_participant_index = SequenceRosterAlgorithm.__get_next_index(total_participants, current_participant_index)\n first_participant_index = SequenceRosterAlgorithm.__get_start_index(total_participants, current_participant_index)\nreturn remaining_days", "next_index = current_index + 1\nif next_index == total_participants:\n next_index = 0\nreturn next_index", "next_start_index = current_start_index - 1\nif next_start_index <= 0:\n next_start_index = total_participants - 1\nreturn next_start_index"], "bodies_text": "<|body_start_0|>\n total_participants = len(participants)\n first_participant_index = 0\n remaining_days = []\n for session_name, date_session_list in session_date_dict.items():\n current_participant_index = first_participant_index\n for session in date_session_list:\n current_participant = participants[current_participant_index]\n if session not in current_participant.leave_dates:\n current_participant.work_sessions.append(session)\n current_participant.session_count[session_name] += 1\n current_participant.total_working_sessions += 1\n current_participant.remaining_days -= 1\n else:\n remaining_days.append(session)\n current_participant_index = SequenceRosterAlgorithm.__get_next_index(total_participants, current_participant_index)\n first_participant_index = SequenceRosterAlgorithm.__get_start_index(total_participants, current_participant_index)\n return remaining_days\n<|end_body_0|>\n\n<|body_start_1|>\n next_index = current_index + 1\n if next_index == total_participants:\n next_index = 0\n return next_index\n<|end_body_1|>\n\n<|body_start_2|>\n next_start_index = current_start_index - 1\n if next_start_index <= 0:\n next_start_index = total_participants - 1\n return next_start_index\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SequenceRosterAlgorithm", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SequenceRosterAlgorithm:\n\n def set_work_list(self, participants: List[ModelService.get_participant_model_class()], session_date_dict: Dict):\n \"\"\"This method implements the algorithm\"\"\"\n <|body_0|>\n\n def __get_next_index(self, total_participants: int, current_index: int):\n \"\"\"This method gives the index of next participant to be selected in participants list\"\"\"\n <|body_1|>\n\n def __get_start_index(self, total_participants: int, current_start_index: int):\n \"\"\"This method gives the starting index of participant for each list\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total_participants = len(participants)\n first_participant_index = 0\n remaining_days = []\n for session_name, date_session_list in session_date_dict.items():\n current_participant_index = first_participant_index\n for session in date_session_list:\n current_participant = participants[current_participant_index]\n if session not in current_participant.leave_dates:\n current_participant.work_sessions.append(session)\n current_participant.session_count[session_name] += 1\n current_participant.total_working_sessions += 1\n current_participant.remaining_days -= 1\n else:\n remaining_days.append(session)\n current_participant_index = SequenceRosterAlgorithm.__get_next_index(total_participants, current_participant_index)\n first_participant_index = SequenceRosterAlgorithm.__get_start_index(total_participants, current_participant_index)\n return remaining_days\n<|end_body_0|>\n\n<|body_start_1|>\n next_index = current_index + 1\n if next_index == total_participants:\n next_index = 0\n return next_index\n<|end_body_1|>\n\n<|body_start_2|>\n next_start_index = current_start_index - 1\n if next_start_index <= 0:\n next_start_index = total_participants - 1\n return next_start_index\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000350", "length_bytes": 2668, "license_type": "permissive", "methods": [{"docstring": "This method implements the algorithm", "name": "set_work_list", "signature": "def set_work_list(self, participants: List[ModelService.get_participant_model_class()], session_date_dict: Dict)"}, {"docstring": "This method gives the index of next participant to be selected in participants list", "name": "__get_next_index", "signature": "def __get_next_index(self, total_participants: int, current_index: int)"}, {"docstring": "This method gives the starting index of participant for each list", "name": "__get_start_index", "signature": "def __get_start_index(self, total_participants: int, current_start_index: int)"}], "n_methods": 3, "prompt": "Implement the Python class `SequenceRosterAlgorithm` described below.\n\nClass description:\nImplement the SequenceRosterAlgorithm class.\n\nMethod signatures and docstrings:\n- def set_work_list(self, participants: List[ModelService.get_participant_model_class()], session_date_dict: Dict): This method implements the algorithm\n- def __get_next_index(self, total_participants: int, current_index: int): This method gives the index of next participant to be selected in participants list\n- def __get_start_index(self, total_participants: int, current_start_index: int): This method gives the starting index of participant for each list", "prompted_full_text": "Implement the Python class `SequenceRosterAlgorithm` described below.\n\nClass description:\nImplement the SequenceRosterAlgorithm class.\n\nMethod signatures and docstrings:\n- def set_work_list(self, participants: List[ModelService.get_participant_model_class()], session_date_dict: Dict): This method implements the algorithm\n- def __get_next_index(self, total_participants: int, current_index: int): This method gives the index of next participant to be selected in participants list\n- def __get_start_index(self, total_participants: int, current_start_index: int): This method gives the starting index of participant for each list\n\n<|skeleton|>\nclass SequenceRosterAlgorithm:\n\n def set_work_list(self, participants: List[ModelService.get_participant_model_class()], session_date_dict: Dict):\n \"\"\"This method implements the algorithm\"\"\"\n <|body_0|>\n\n def __get_next_index(self, total_participants: int, current_index: int):\n \"\"\"This method gives the index of next participant to be selected in participants list\"\"\"\n <|body_1|>\n\n def __get_start_index(self, total_participants: int, current_start_index: int):\n \"\"\"This method gives the starting index of participant for each list\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total_participants = len(participants)\n first_participant_index = 0\n remaining_days = []\n for session_name, date_session_list in session_date_dict.items():\n current_participant_index = first_participant_index\n for session in date_session_list:\n current_participant = participants[current_participant_index]\n if session not in current_participant.leave_dates:\n current_participant.work_sessions.append(session)\n current_participant.session_count[session_name] += 1\n current_participant.total_working_sessions += 1\n current_participant.remaining_days -= 1\n else:\n remaining_days.append(session)\n current_participant_index = SequenceRosterAlgorithm.__get_next_index(total_participants, current_participant_index)\n first_participant_index = SequenceRosterAlgorithm.__get_start_index(total_participants, current_participant_index)\n return remaining_days\n<|end_body_0|>\n\n<|body_start_1|>\n next_index = current_index + 1\n if next_index == total_participants:\n next_index = 0\n return next_index\n<|end_body_1|>\n\n<|body_start_2|>\n next_start_index = current_start_index - 1\n if next_start_index <= 0:\n next_start_index = total_participants - 1\n return next_start_index\n<|end_body_2|>\n", "revision_id": "23131aff6e0c20497bde632ed32aadcad0947e56", "skeleton": "<|skeleton|>\nclass SequenceRosterAlgorithm:\n\n def set_work_list(self, participants: List[ModelService.get_participant_model_class()], session_date_dict: Dict):\n \"\"\"This method implements the algorithm\"\"\"\n <|body_0|>\n\n def __get_next_index(self, total_participants: int, current_index: int):\n \"\"\"This method gives the index of next participant to be selected in participants list\"\"\"\n <|body_1|>\n\n def __get_start_index(self, total_participants: int, current_start_index: int):\n \"\"\"This method gives the starting index of participant for each list\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SequenceRosterAlgorithm:\n def set_work_list(self, participants: List[ModelService.get_participant_model_class()], session_date_dict: Dict):\n \"\"\"This method implements the algorithm\"\"\"\n total_participants = len(participants)\n first_participant_index = 0\n remaining_days = []\n for session_name, date_session_list in session_date_dict.items():\n current_participant_index = first_participant_index\n for session in date_session_list:\n current_participant = participants[current_participant_index]\n if session not in current_participant.leave_dates:\n current_participant.work_sessions.append(session)\n current_participant.session_count[session_name] += 1\n current_participant.total_working_sessions += 1\n current_participant.remaining_days -= 1\n else:\n remaining_days.append(session)\n current_participant_index = SequenceRosterAlgorithm.__get_next_index(total_participants, current_participant_index)\n first_participant_index = SequenceRosterAlgorithm.__get_start_index(total_participants, current_participant_index)\n return remaining_days\n\n def __get_next_index(self, total_participants: int, current_index: int):\n \"\"\"This method gives the index of next participant to be selected in participants list\"\"\"\n next_index = current_index + 1\n if next_index == total_participants:\n next_index = 0\n return next_index\n\n def __get_start_index(self, total_participants: int, current_start_index: int):\n \"\"\"This method gives the starting index of participant for each list\"\"\"\n next_start_index = current_start_index - 1\n if next_start_index <= 0:\n next_start_index = total_participants - 1\n return next_start_index\n", "source": "the_stack_v2_python_sparse", "source_path": "roster-backend/roster_project/roster_api/utils/helpers/core_helpers/algorithms/core_algo/sequence_roster.py", "source_repo": "akhilanil/roster", "split": "val", "star_events_count": 0}
{"blob_id": "135e9cec5873d47fe68d3098bfb26f96166aea11", "bodies": ["dataframe = datasource.toDF()\nfor column_name, source_column, prefix in mappings:\n dataframe = dataframe.withColumn(column_name, format_string(prefix + '-%s', dataframe[source_column]))\nreturn DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_vertex_id_columns')", "dataframe = datasource.toDF()\ndataframe = dataframe.withColumn('~id', format_string('%s-%s', dataframe[from_column], dataframe[to_column]))\nreturn DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_edge_id_column')", "dataframe = datasource.toDF()\ndataframe = dataframe.withColumn('~label', lit(label))\nreturn DynamicFrame.fromDF(dataframe, datasource.glue_ctx, label)"], "bodies_text": "<|body_start_0|>\n dataframe = datasource.toDF()\n for column_name, source_column, prefix in mappings:\n dataframe = dataframe.withColumn(column_name, format_string(prefix + '-%s', dataframe[source_column]))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_vertex_id_columns')\n<|end_body_0|>\n\n<|body_start_1|>\n dataframe = datasource.toDF()\n dataframe = dataframe.withColumn('~id', format_string('%s-%s', dataframe[from_column], dataframe[to_column]))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_edge_id_column')\n<|end_body_1|>\n\n<|body_start_2|>\n dataframe = datasource.toDF()\n dataframe = dataframe.withColumn('~label', lit(label))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, label)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "GremlinCsvTransforms", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GremlinCsvTransforms:\n\n def create_prefixed_columns(cls, datasource, mappings):\n \"\"\"Creates columns in a DynamicFrame whose values are based on prefixed values from another column in the DynamicFrame. Example: >>> df = GremlinCsvTransforms.create_prefixed_columns(df, [('~id', 'productId', 'p'),('~to', 'supplierId', 's')])\"\"\"\n <|body_0|>\n\n def create_edge_id_column(cls, datasource, from_column, to_column):\n \"\"\"Creates an '~id' column in a DynamicFrame whose values are based on the specified from and to columns. Example: >>> df = GremlinCsvTransforms.create_edge_id_column(df, 'supplierId', 'productId')\"\"\"\n <|body_1|>\n\n def addLabel(cls, datasource, label):\n \"\"\"Adds a '~label' column to a DynamicFrame whose values comprise the supplier label. Example: >>> df = GremlinCsvTransforms.addLabel(df, 'Product')\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dataframe = datasource.toDF()\n for column_name, source_column, prefix in mappings:\n dataframe = dataframe.withColumn(column_name, format_string(prefix + '-%s', dataframe[source_column]))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_vertex_id_columns')\n<|end_body_0|>\n\n<|body_start_1|>\n dataframe = datasource.toDF()\n dataframe = dataframe.withColumn('~id', format_string('%s-%s', dataframe[from_column], dataframe[to_column]))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_edge_id_column')\n<|end_body_1|>\n\n<|body_start_2|>\n dataframe = datasource.toDF()\n dataframe = dataframe.withColumn('~label', lit(label))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, label)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000351", "length_bytes": 2731, "license_type": "permissive", "methods": [{"docstring": "Creates columns in a DynamicFrame whose values are based on prefixed values from another column in the DynamicFrame. Example: >>> df = GremlinCsvTransforms.create_prefixed_columns(df, [('~id', 'productId', 'p'),('~to', 'supplierId', 's')])", "name": "create_prefixed_columns", "signature": "def create_prefixed_columns(cls, datasource, mappings)"}, {"docstring": "Creates an '~id' column in a DynamicFrame whose values are based on the specified from and to columns. Example: >>> df = GremlinCsvTransforms.create_edge_id_column(df, 'supplierId', 'productId')", "name": "create_edge_id_column", "signature": "def create_edge_id_column(cls, datasource, from_column, to_column)"}, {"docstring": "Adds a '~label' column to a DynamicFrame whose values comprise the supplier label. Example: >>> df = GremlinCsvTransforms.addLabel(df, 'Product')", "name": "addLabel", "signature": "def addLabel(cls, datasource, label)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_023086", "prompt": "Implement the Python class `GremlinCsvTransforms` described below.\n\nClass description:\nImplement the GremlinCsvTransforms class.\n\nMethod signatures and docstrings:\n- def create_prefixed_columns(cls, datasource, mappings): Creates columns in a DynamicFrame whose values are based on prefixed values from another column in the DynamicFrame. Example: >>> df = GremlinCsvTransforms.create_prefixed_columns(df, [('~id', 'productId', 'p'),('~to', 'supplierId', 's')])\n- def create_edge_id_column(cls, datasource, from_column, to_column): Creates an '~id' column in a DynamicFrame whose values are based on the specified from and to columns. Example: >>> df = GremlinCsvTransforms.create_edge_id_column(df, 'supplierId', 'productId')\n- def addLabel(cls, datasource, label): Adds a '~label' column to a DynamicFrame whose values comprise the supplier label. Example: >>> df = GremlinCsvTransforms.addLabel(df, 'Product')", "prompted_full_text": "Implement the Python class `GremlinCsvTransforms` described below.\n\nClass description:\nImplement the GremlinCsvTransforms class.\n\nMethod signatures and docstrings:\n- def create_prefixed_columns(cls, datasource, mappings): Creates columns in a DynamicFrame whose values are based on prefixed values from another column in the DynamicFrame. Example: >>> df = GremlinCsvTransforms.create_prefixed_columns(df, [('~id', 'productId', 'p'),('~to', 'supplierId', 's')])\n- def create_edge_id_column(cls, datasource, from_column, to_column): Creates an '~id' column in a DynamicFrame whose values are based on the specified from and to columns. Example: >>> df = GremlinCsvTransforms.create_edge_id_column(df, 'supplierId', 'productId')\n- def addLabel(cls, datasource, label): Adds a '~label' column to a DynamicFrame whose values comprise the supplier label. Example: >>> df = GremlinCsvTransforms.addLabel(df, 'Product')\n\n<|skeleton|>\nclass GremlinCsvTransforms:\n\n def create_prefixed_columns(cls, datasource, mappings):\n \"\"\"Creates columns in a DynamicFrame whose values are based on prefixed values from another column in the DynamicFrame. Example: >>> df = GremlinCsvTransforms.create_prefixed_columns(df, [('~id', 'productId', 'p'),('~to', 'supplierId', 's')])\"\"\"\n <|body_0|>\n\n def create_edge_id_column(cls, datasource, from_column, to_column):\n \"\"\"Creates an '~id' column in a DynamicFrame whose values are based on the specified from and to columns. Example: >>> df = GremlinCsvTransforms.create_edge_id_column(df, 'supplierId', 'productId')\"\"\"\n <|body_1|>\n\n def addLabel(cls, datasource, label):\n \"\"\"Adds a '~label' column to a DynamicFrame whose values comprise the supplier label. Example: >>> df = GremlinCsvTransforms.addLabel(df, 'Product')\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dataframe = datasource.toDF()\n for column_name, source_column, prefix in mappings:\n dataframe = dataframe.withColumn(column_name, format_string(prefix + '-%s', dataframe[source_column]))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_vertex_id_columns')\n<|end_body_0|>\n\n<|body_start_1|>\n dataframe = datasource.toDF()\n dataframe = dataframe.withColumn('~id', format_string('%s-%s', dataframe[from_column], dataframe[to_column]))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_edge_id_column')\n<|end_body_1|>\n\n<|body_start_2|>\n dataframe = datasource.toDF()\n dataframe = dataframe.withColumn('~label', lit(label))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, label)\n<|end_body_2|>\n", "revision_id": "b81872022e9965e8a22b37cfed8d62bd9de6e017", "skeleton": "<|skeleton|>\nclass GremlinCsvTransforms:\n\n def create_prefixed_columns(cls, datasource, mappings):\n \"\"\"Creates columns in a DynamicFrame whose values are based on prefixed values from another column in the DynamicFrame. Example: >>> df = GremlinCsvTransforms.create_prefixed_columns(df, [('~id', 'productId', 'p'),('~to', 'supplierId', 's')])\"\"\"\n <|body_0|>\n\n def create_edge_id_column(cls, datasource, from_column, to_column):\n \"\"\"Creates an '~id' column in a DynamicFrame whose values are based on the specified from and to columns. Example: >>> df = GremlinCsvTransforms.create_edge_id_column(df, 'supplierId', 'productId')\"\"\"\n <|body_1|>\n\n def addLabel(cls, datasource, label):\n \"\"\"Adds a '~label' column to a DynamicFrame whose values comprise the supplier label. Example: >>> df = GremlinCsvTransforms.addLabel(df, 'Product')\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GremlinCsvTransforms:\n def create_prefixed_columns(cls, datasource, mappings):\n \"\"\"Creates columns in a DynamicFrame whose values are based on prefixed values from another column in the DynamicFrame. Example: >>> df = GremlinCsvTransforms.create_prefixed_columns(df, [('~id', 'productId', 'p'),('~to', 'supplierId', 's')])\"\"\"\n dataframe = datasource.toDF()\n for column_name, source_column, prefix in mappings:\n dataframe = dataframe.withColumn(column_name, format_string(prefix + '-%s', dataframe[source_column]))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_vertex_id_columns')\n\n def create_edge_id_column(cls, datasource, from_column, to_column):\n \"\"\"Creates an '~id' column in a DynamicFrame whose values are based on the specified from and to columns. Example: >>> df = GremlinCsvTransforms.create_edge_id_column(df, 'supplierId', 'productId')\"\"\"\n dataframe = datasource.toDF()\n dataframe = dataframe.withColumn('~id', format_string('%s-%s', dataframe[from_column], dataframe[to_column]))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, 'create_edge_id_column')\n\n def addLabel(cls, datasource, label):\n \"\"\"Adds a '~label' column to a DynamicFrame whose values comprise the supplier label. Example: >>> df = GremlinCsvTransforms.addLabel(df, 'Product')\"\"\"\n dataframe = datasource.toDF()\n dataframe = dataframe.withColumn('~label', lit(label))\n return DynamicFrame.fromDF(dataframe, datasource.glue_ctx, label)\n", "source": "the_stack_v2_python_sparse", "source_path": "glue-neptune/glue_neptune/GremlinCsvTransforms.py", "source_repo": "vivgoyal-aws/amazon-neptune-tools", "split": "val", "star_events_count": 0}
{"blob_id": "fe8540e1f9cd1760caa9b891945d32cb652b2039", "bodies": ["given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\nactual = deepcopy(given)\nfor a in actual:\n qs.quick_sort(a)\nexpect = deepcopy(given)\nfor e in expect:\n e.sort()\nfor g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)", "given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\nactual = deepcopy(given)\nfor a in actual:\n qs.quick_sort(a, algorithm='simple')\nexpect = deepcopy(given)\nfor e in expect:\n e.sort()\nfor g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)"], "bodies_text": "<|body_start_0|>\n given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\n actual = deepcopy(given)\n for a in actual:\n qs.quick_sort(a)\n expect = deepcopy(given)\n for e in expect:\n e.sort()\n for g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)\n<|end_body_0|>\n\n<|body_start_1|>\n given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\n actual = deepcopy(given)\n for a in actual:\n qs.quick_sort(a, algorithm='simple')\n expect = deepcopy(given)\n for e in expect:\n e.sort()\n for g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)\n<|end_body_1|>\n", "class_docstring": "Test the quick sort function", "class_name": "TestQuickSort", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestQuickSort:\n \"\"\"Test the quick sort function\"\"\"\n\n def test_quick_sort_median(self):\n \"\"\"Test the quick sort function for various cases. Should match the results of Python's sort func.\"\"\"\n <|body_0|>\n\n def test_quick_sort_simple(self):\n \"\"\"Test the quick sort function for various cases. This test case uses the 'simple' algorithm as the parameter. That means it simply chooses the middle index as pivot.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\n actual = deepcopy(given)\n for a in actual:\n qs.quick_sort(a)\n expect = deepcopy(given)\n for e in expect:\n e.sort()\n for g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)\n<|end_body_0|>\n\n<|body_start_1|>\n given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\n actual = deepcopy(given)\n for a in actual:\n qs.quick_sort(a, algorithm='simple')\n expect = deepcopy(given)\n for e in expect:\n e.sort()\n for g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000352", "length_bytes": 3778, "license_type": "no_license", "methods": [{"docstring": "Test the quick sort function for various cases. Should match the results of Python's sort func.", "name": "test_quick_sort_median", "signature": "def test_quick_sort_median(self)"}, {"docstring": "Test the quick sort function for various cases. This test case uses the 'simple' algorithm as the parameter. That means it simply chooses the middle index as pivot.", "name": "test_quick_sort_simple", "signature": "def test_quick_sort_simple(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002316", "prompt": "Implement the Python class `TestQuickSort` described below.\n\nClass description:\nTest the quick sort function\n\nMethod signatures and docstrings:\n- def test_quick_sort_median(self): Test the quick sort function for various cases. Should match the results of Python's sort func.\n- def test_quick_sort_simple(self): Test the quick sort function for various cases. This test case uses the 'simple' algorithm as the parameter. That means it simply chooses the middle index as pivot.", "prompted_full_text": "Implement the Python class `TestQuickSort` described below.\n\nClass description:\nTest the quick sort function\n\nMethod signatures and docstrings:\n- def test_quick_sort_median(self): Test the quick sort function for various cases. Should match the results of Python's sort func.\n- def test_quick_sort_simple(self): Test the quick sort function for various cases. This test case uses the 'simple' algorithm as the parameter. That means it simply chooses the middle index as pivot.\n\n<|skeleton|>\nclass TestQuickSort:\n \"\"\"Test the quick sort function\"\"\"\n\n def test_quick_sort_median(self):\n \"\"\"Test the quick sort function for various cases. Should match the results of Python's sort func.\"\"\"\n <|body_0|>\n\n def test_quick_sort_simple(self):\n \"\"\"Test the quick sort function for various cases. This test case uses the 'simple' algorithm as the parameter. That means it simply chooses the middle index as pivot.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\n actual = deepcopy(given)\n for a in actual:\n qs.quick_sort(a)\n expect = deepcopy(given)\n for e in expect:\n e.sort()\n for g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)\n<|end_body_0|>\n\n<|body_start_1|>\n given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\n actual = deepcopy(given)\n for a in actual:\n qs.quick_sort(a, algorithm='simple')\n expect = deepcopy(given)\n for e in expect:\n e.sort()\n for g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)\n<|end_body_1|>\n", "revision_id": "0e8b528207faa44977f5b9d446d45d13c4fb430d", "skeleton": "<|skeleton|>\nclass TestQuickSort:\n \"\"\"Test the quick sort function\"\"\"\n\n def test_quick_sort_median(self):\n \"\"\"Test the quick sort function for various cases. Should match the results of Python's sort func.\"\"\"\n <|body_0|>\n\n def test_quick_sort_simple(self):\n \"\"\"Test the quick sort function for various cases. This test case uses the 'simple' algorithm as the parameter. That means it simply chooses the middle index as pivot.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestQuickSort:\n \"\"\"Test the quick sort function\"\"\"\n\n def test_quick_sort_median(self):\n \"\"\"Test the quick sort function for various cases. Should match the results of Python's sort func.\"\"\"\n given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\n actual = deepcopy(given)\n for a in actual:\n qs.quick_sort(a)\n expect = deepcopy(given)\n for e in expect:\n e.sort()\n for g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)\n\n def test_quick_sort_simple(self):\n \"\"\"Test the quick sort function for various cases. This test case uses the 'simple' algorithm as the parameter. That means it simply chooses the middle index as pivot.\"\"\"\n given = [[9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [4, 1, 2, 1, 2, 4, 1, 2, 1, 4, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [random.randrange(0, 10) for _ in range(10)], [random.randrange(0, 10) for _ in range(1000)], [random.randrange(0, 1000) for _ in range(1000)], [random.randrange(0, 100) for _ in range(10 ** 5)], [random.randrange(0, 10 ** 5) for _ in range(10 ** 5)]]\n actual = deepcopy(given)\n for a in actual:\n qs.quick_sort(a, algorithm='simple')\n expect = deepcopy(given)\n for e in expect:\n e.sort()\n for g, e, a in zip(given, expect, actual):\n msg = '\\n\\ngiven:\\n{}\\nexpected:\\n{}\\nactual:\\n{}'\n msg = msg.format(g, e, a)\n self.assertListEqual(e, a, msg=msg)\n", "source": "the_stack_v2_python_sparse", "source_path": "__test__/quick_sort_test.py", "source_repo": "marcus-grant/python-cs", "split": "val", "star_events_count": 0}
{"blob_id": "29902334611082682612dbba02191ce6e09b6100", "bodies": ["xml.sax.handler.ContentHandler.__init__(self)\nself.styleRegistry = styleRegistry\nif not tagAliases:\n tagAliases = XmlMarkupTagAliases()\nself.tagAliases = tagAliases", "self.style = CascadingStyleStack()\nself.document = None\nself.block = None\nself.glyphs = None", "styleDict = None\nstyleAttr = attrs.get('style', None)\nif styleAttr:\n styleDict = self.styleRegistry.findMatch(styleAttr)\nif styleDict == None:\n styleDict = self.styleRegistry.findMatch(name)\nif styleDict == None:\n raise ValueError('No style found for: %s, %s' % (name, str(styleAttr)))\nself.style.push(styleDict)", "if name == 'document':\n self._pushStyle(name, attrs)\n self.document = self.style.makeNewDocument()\nelif name == 'block':\n if not self.document:\n raise XmlMarkupUnexpectedElementError('Block element encountered outside of document element.')\n self._pushStyle(name, attrs)\n self.block = self.style.makeNewBlock()\n self.glyphs = []\nelif name == 'inline':\n if not self.block:\n raise XmlMarkupUnexpectedElementError('Inline element encountered outside of block element.')\n self._pushStyle(name, attrs)\nelif self.tagAliases.has(name):\n baseElement = self.tagAliases.get(name)\n self.startElement(baseElement, {'style': name})\nelse:\n raise XmlMarkupUnknownElementError(name)", "if name == 'document':\n self.style.pop()\n self.document.layout()\nelif name == 'block':\n ellipsisGlyph = self.style.makeNewGlyphs('…')[0]\n self.block.setEllipsisGlyph(ellipsisGlyph)\n self.style.pop()\n self.block.addGlyphs(self.glyphs)\n self.document.addBlock(self.block)\n self.block = None\n self.glyphs = None\nelif name == 'inline':\n self.style.pop()\nelse:\n baseElement = self.tagAliases.get(name)\n self.endElement(baseElement)", "if self.glyphs != None:\n self.glyphs.extend(self.style.makeNewGlyphs(content))\nelse:\n content = content.strip()\n if content:\n raise XmlMarkupUnexpectedCharactersError(content)"], "bodies_text": "<|body_start_0|>\n xml.sax.handler.ContentHandler.__init__(self)\n self.styleRegistry = styleRegistry\n if not tagAliases:\n tagAliases = XmlMarkupTagAliases()\n self.tagAliases = tagAliases\n<|end_body_0|>\n\n<|body_start_1|>\n self.style = CascadingStyleStack()\n self.document = None\n self.block = None\n self.glyphs = None\n<|end_body_1|>\n\n<|body_start_2|>\n styleDict = None\n styleAttr = attrs.get('style', None)\n if styleAttr:\n styleDict = self.styleRegistry.findMatch(styleAttr)\n if styleDict == None:\n styleDict = self.styleRegistry.findMatch(name)\n if styleDict == None:\n raise ValueError('No style found for: %s, %s' % (name, str(styleAttr)))\n self.style.push(styleDict)\n<|end_body_2|>\n\n<|body_start_3|>\n if name == 'document':\n self._pushStyle(name, attrs)\n self.document = self.style.makeNewDocument()\n elif name == 'block':\n if not self.document:\n raise XmlMarkupUnexpectedElementError('Block element encountered outside of document element.')\n self._pushStyle(name, attrs)\n self.block = self.style.makeNewBlock()\n self.glyphs = []\n elif name == 'inline':\n if not self.block:\n raise XmlMarkupUnexpectedElementError('Inline element encountered outside of block element.')\n self._pushStyle(name, attrs)\n elif self.tagAliases.has(name):\n baseElement = self.tagAliases.get(name)\n self.startElement(baseElement, {'style': name})\n else:\n raise XmlMarkupUnknownElementError(name)\n<|end_body_3|>\n\n<|body_start_4|>\n if name == 'document':\n self.style.pop()\n self.document.layout()\n elif name == 'block':\n ellipsisGlyph = self.style.makeNewGlyphs('…')[0]\n self.block.setEllipsisGlyph(ellipsisGlyph)\n self.style.pop()\n self.block.addGlyphs(self.glyphs)\n self.document.addBlock(self.block)\n self.block = None\n self.glyphs = None\n elif name == 'inline':\n self.style.pop()\n else:\n baseElement = self.tagAliases.get(name)\n self.endElement(baseElement)\n<|end_body_4|>\n\n<|body_start_5|>\n if self.glyphs != None:\n self.glyphs.extend(self.style.makeNewGlyphs(content))\n else:\n content = content.strip()\n if content:\n raise XmlMarkupUnexpectedCharactersError(content)\n<|end_body_5|>\n", "class_docstring": "XML content handler for XML text layout markup.", "class_name": "_XmlMarkupHandler", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _XmlMarkupHandler:\n \"\"\"XML content handler for XML text layout markup.\"\"\"\n\n def __init__(self, styleRegistry, tagAliases=None):\n \"\"\"Initializes the content handler with the given style registry and tag aliases.\"\"\"\n <|body_0|>\n\n def startDocument(self):\n \"\"\"Called by the XML parser at the beginning of parsing the XML document.\"\"\"\n <|body_1|>\n\n def _pushStyle(self, name, attrs):\n \"\"\"Sets the current style to the style defined by the \"style\" attribute of the given tag. If that style doesn't exist, we use the style named by the tag.\"\"\"\n <|body_2|>\n\n def startElement(self, name, attrs):\n \"\"\"Handles the beginning of an XML element.\"\"\"\n <|body_3|>\n\n def endElement(self, name):\n \"\"\"Handles the end of an XML element.\"\"\"\n <|body_4|>\n\n def characters(self, content):\n \"\"\"Handles XML character data.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n xml.sax.handler.ContentHandler.__init__(self)\n self.styleRegistry = styleRegistry\n if not tagAliases:\n tagAliases = XmlMarkupTagAliases()\n self.tagAliases = tagAliases\n<|end_body_0|>\n\n<|body_start_1|>\n self.style = CascadingStyleStack()\n self.document = None\n self.block = None\n self.glyphs = None\n<|end_body_1|>\n\n<|body_start_2|>\n styleDict = None\n styleAttr = attrs.get('style', None)\n if styleAttr:\n styleDict = self.styleRegistry.findMatch(styleAttr)\n if styleDict == None:\n styleDict = self.styleRegistry.findMatch(name)\n if styleDict == None:\n raise ValueError('No style found for: %s, %s' % (name, str(styleAttr)))\n self.style.push(styleDict)\n<|end_body_2|>\n\n<|body_start_3|>\n if name == 'document':\n self._pushStyle(name, attrs)\n self.document = self.style.makeNewDocument()\n elif name == 'block':\n if not self.document:\n raise XmlMarkupUnexpectedElementError('Block element encountered outside of document element.')\n self._pushStyle(name, attrs)\n self.block = self.style.makeNewBlock()\n self.glyphs = []\n elif name == 'inline':\n if not self.block:\n raise XmlMarkupUnexpectedElementError('Inline element encountered outside of block element.')\n self._pushStyle(name, attrs)\n elif self.tagAliases.has(name):\n baseElement = self.tagAliases.get(name)\n self.startElement(baseElement, {'style': name})\n else:\n raise XmlMarkupUnknownElementError(name)\n<|end_body_3|>\n\n<|body_start_4|>\n if name == 'document':\n self.style.pop()\n self.document.layout()\n elif name == 'block':\n ellipsisGlyph = self.style.makeNewGlyphs('…')[0]\n self.block.setEllipsisGlyph(ellipsisGlyph)\n self.style.pop()\n self.block.addGlyphs(self.glyphs)\n self.document.addBlock(self.block)\n self.block = None\n self.glyphs = None\n elif name == 'inline':\n self.style.pop()\n else:\n baseElement = self.tagAliases.get(name)\n self.endElement(baseElement)\n<|end_body_4|>\n\n<|body_start_5|>\n if self.glyphs != None:\n self.glyphs.extend(self.style.makeNewGlyphs(content))\n else:\n content = content.strip()\n if content:\n raise XmlMarkupUnexpectedCharactersError(content)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000353", "length_bytes": 21802, "license_type": "permissive", "methods": [{"docstring": "Initializes the content handler with the given style registry and tag aliases.", "name": "__init__", "signature": "def __init__(self, styleRegistry, tagAliases=None)"}, {"docstring": "Called by the XML parser at the beginning of parsing the XML document.", "name": "startDocument", "signature": "def startDocument(self)"}, {"docstring": "Sets the current style to the style defined by the \"style\" attribute of the given tag. If that style doesn't exist, we use the style named by the tag.", "name": "_pushStyle", "signature": "def _pushStyle(self, name, attrs)"}, {"docstring": "Handles the beginning of an XML element.", "name": "startElement", "signature": "def startElement(self, name, attrs)"}, {"docstring": "Handles the end of an XML element.", "name": "endElement", "signature": "def endElement(self, name)"}, {"docstring": "Handles XML character data.", "name": "characters", "signature": "def characters(self, content)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_020052", "prompt": "Implement the Python class `_XmlMarkupHandler` described below.\n\nClass description:\nXML content handler for XML text layout markup.\n\nMethod signatures and docstrings:\n- def __init__(self, styleRegistry, tagAliases=None): Initializes the content handler with the given style registry and tag aliases.\n- def startDocument(self): Called by the XML parser at the beginning of parsing the XML document.\n- def _pushStyle(self, name, attrs): Sets the current style to the style defined by the \"style\" attribute of the given tag. If that style doesn't exist, we use the style named by the tag.\n- def startElement(self, name, attrs): Handles the beginning of an XML element.\n- def endElement(self, name): Handles the end of an XML element.\n- def characters(self, content): Handles XML character data.", "prompted_full_text": "Implement the Python class `_XmlMarkupHandler` described below.\n\nClass description:\nXML content handler for XML text layout markup.\n\nMethod signatures and docstrings:\n- def __init__(self, styleRegistry, tagAliases=None): Initializes the content handler with the given style registry and tag aliases.\n- def startDocument(self): Called by the XML parser at the beginning of parsing the XML document.\n- def _pushStyle(self, name, attrs): Sets the current style to the style defined by the \"style\" attribute of the given tag. If that style doesn't exist, we use the style named by the tag.\n- def startElement(self, name, attrs): Handles the beginning of an XML element.\n- def endElement(self, name): Handles the end of an XML element.\n- def characters(self, content): Handles XML character data.\n\n<|skeleton|>\nclass _XmlMarkupHandler:\n \"\"\"XML content handler for XML text layout markup.\"\"\"\n\n def __init__(self, styleRegistry, tagAliases=None):\n \"\"\"Initializes the content handler with the given style registry and tag aliases.\"\"\"\n <|body_0|>\n\n def startDocument(self):\n \"\"\"Called by the XML parser at the beginning of parsing the XML document.\"\"\"\n <|body_1|>\n\n def _pushStyle(self, name, attrs):\n \"\"\"Sets the current style to the style defined by the \"style\" attribute of the given tag. If that style doesn't exist, we use the style named by the tag.\"\"\"\n <|body_2|>\n\n def startElement(self, name, attrs):\n \"\"\"Handles the beginning of an XML element.\"\"\"\n <|body_3|>\n\n def endElement(self, name):\n \"\"\"Handles the end of an XML element.\"\"\"\n <|body_4|>\n\n def characters(self, content):\n \"\"\"Handles XML character data.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n xml.sax.handler.ContentHandler.__init__(self)\n self.styleRegistry = styleRegistry\n if not tagAliases:\n tagAliases = XmlMarkupTagAliases()\n self.tagAliases = tagAliases\n<|end_body_0|>\n\n<|body_start_1|>\n self.style = CascadingStyleStack()\n self.document = None\n self.block = None\n self.glyphs = None\n<|end_body_1|>\n\n<|body_start_2|>\n styleDict = None\n styleAttr = attrs.get('style', None)\n if styleAttr:\n styleDict = self.styleRegistry.findMatch(styleAttr)\n if styleDict == None:\n styleDict = self.styleRegistry.findMatch(name)\n if styleDict == None:\n raise ValueError('No style found for: %s, %s' % (name, str(styleAttr)))\n self.style.push(styleDict)\n<|end_body_2|>\n\n<|body_start_3|>\n if name == 'document':\n self._pushStyle(name, attrs)\n self.document = self.style.makeNewDocument()\n elif name == 'block':\n if not self.document:\n raise XmlMarkupUnexpectedElementError('Block element encountered outside of document element.')\n self._pushStyle(name, attrs)\n self.block = self.style.makeNewBlock()\n self.glyphs = []\n elif name == 'inline':\n if not self.block:\n raise XmlMarkupUnexpectedElementError('Inline element encountered outside of block element.')\n self._pushStyle(name, attrs)\n elif self.tagAliases.has(name):\n baseElement = self.tagAliases.get(name)\n self.startElement(baseElement, {'style': name})\n else:\n raise XmlMarkupUnknownElementError(name)\n<|end_body_3|>\n\n<|body_start_4|>\n if name == 'document':\n self.style.pop()\n self.document.layout()\n elif name == 'block':\n ellipsisGlyph = self.style.makeNewGlyphs('…')[0]\n self.block.setEllipsisGlyph(ellipsisGlyph)\n self.style.pop()\n self.block.addGlyphs(self.glyphs)\n self.document.addBlock(self.block)\n self.block = None\n self.glyphs = None\n elif name == 'inline':\n self.style.pop()\n else:\n baseElement = self.tagAliases.get(name)\n self.endElement(baseElement)\n<|end_body_4|>\n\n<|body_start_5|>\n if self.glyphs != None:\n self.glyphs.extend(self.style.makeNewGlyphs(content))\n else:\n content = content.strip()\n if content:\n raise XmlMarkupUnexpectedCharactersError(content)\n<|end_body_5|>\n", "revision_id": "61351f52f01367439e8810d2c482a9c9897545d8", "skeleton": "<|skeleton|>\nclass _XmlMarkupHandler:\n \"\"\"XML content handler for XML text layout markup.\"\"\"\n\n def __init__(self, styleRegistry, tagAliases=None):\n \"\"\"Initializes the content handler with the given style registry and tag aliases.\"\"\"\n <|body_0|>\n\n def startDocument(self):\n \"\"\"Called by the XML parser at the beginning of parsing the XML document.\"\"\"\n <|body_1|>\n\n def _pushStyle(self, name, attrs):\n \"\"\"Sets the current style to the style defined by the \"style\" attribute of the given tag. If that style doesn't exist, we use the style named by the tag.\"\"\"\n <|body_2|>\n\n def startElement(self, name, attrs):\n \"\"\"Handles the beginning of an XML element.\"\"\"\n <|body_3|>\n\n def endElement(self, name):\n \"\"\"Handles the end of an XML element.\"\"\"\n <|body_4|>\n\n def characters(self, content):\n \"\"\"Handles XML character data.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class _XmlMarkupHandler:\n \"\"\"XML content handler for XML text layout markup.\"\"\"\n\n def __init__(self, styleRegistry, tagAliases=None):\n \"\"\"Initializes the content handler with the given style registry and tag aliases.\"\"\"\n xml.sax.handler.ContentHandler.__init__(self)\n self.styleRegistry = styleRegistry\n if not tagAliases:\n tagAliases = XmlMarkupTagAliases()\n self.tagAliases = tagAliases\n\n def startDocument(self):\n \"\"\"Called by the XML parser at the beginning of parsing the XML document.\"\"\"\n self.style = CascadingStyleStack()\n self.document = None\n self.block = None\n self.glyphs = None\n\n def _pushStyle(self, name, attrs):\n \"\"\"Sets the current style to the style defined by the \"style\" attribute of the given tag. If that style doesn't exist, we use the style named by the tag.\"\"\"\n styleDict = None\n styleAttr = attrs.get('style', None)\n if styleAttr:\n styleDict = self.styleRegistry.findMatch(styleAttr)\n if styleDict == None:\n styleDict = self.styleRegistry.findMatch(name)\n if styleDict == None:\n raise ValueError('No style found for: %s, %s' % (name, str(styleAttr)))\n self.style.push(styleDict)\n\n def startElement(self, name, attrs):\n \"\"\"Handles the beginning of an XML element.\"\"\"\n if name == 'document':\n self._pushStyle(name, attrs)\n self.document = self.style.makeNewDocument()\n elif name == 'block':\n if not self.document:\n raise XmlMarkupUnexpectedElementError('Block element encountered outside of document element.')\n self._pushStyle(name, attrs)\n self.block = self.style.makeNewBlock()\n self.glyphs = []\n elif name == 'inline':\n if not self.block:\n raise XmlMarkupUnexpectedElementError('Inline element encountered outside of block element.')\n self._pushStyle(name, attrs)\n elif self.tagAliases.has(name):\n baseElement = self.tagAliases.get(name)\n self.startElement(baseElement, {'style': name})\n else:\n raise XmlMarkupUnknownElementError(name)\n\n def endElement(self, name):\n \"\"\"Handles the end of an XML element.\"\"\"\n if name == 'document':\n self.style.pop()\n self.document.layout()\n elif name == 'block':\n ellipsisGlyph = self.style.makeNewGlyphs('…')[0]\n self.block.setEllipsisGlyph(ellipsisGlyph)\n self.style.pop()\n self.block.addGlyphs(self.glyphs)\n self.document.addBlock(self.block)\n self.block = None\n self.glyphs = None\n elif name == 'inline':\n self.style.pop()\n else:\n baseElement = self.tagAliases.get(name)\n self.endElement(baseElement)\n\n def characters(self, content):\n \"\"\"Handles XML character data.\"\"\"\n if self.glyphs != None:\n self.glyphs.extend(self.style.makeNewGlyphs(content))\n else:\n content = content.strip()\n if content:\n raise XmlMarkupUnexpectedCharactersError(content)\n", "source": "the_stack_v2_python_sparse", "source_path": "enso/enso/graphics/xmltextlayout.py", "source_repo": "GChristensen/enso-portable", "split": "val", "star_events_count": 144}
{"blob_id": "4026ec5ce846e765bd87dbd5b56a0e05331efdff", "bodies": ["s2t.calculate_prensors\ns2t.calculate_prensors_with_graph\ns2t.get_default_options\ns2t.get_options_with_minimal_checks\ns2t.calculate_prensors_with_source_paths\ns2t.create_expression_from_prensor\ns2t.create_expression_from_file_descriptor_set\ns2t.create_expression_from_proto\ns2t.Expression\ns2t.create_path\ns2t.Path\ns2t.Step\ns2t.ChildNodeTensor\ns2t.LeafNodeTensor\ns2t.NodeTensor\ns2t.Prensor\ns2t.RootNodeTensor\ns2t.create_prensor_from_descendant_nodes\ns2t.create_prensor_from_root_and_children\ns2t.prensor_value", "from struct2tensor import expression_impl\nmodules = ['apply_schema', 'broadcast', 'depth_limit', 'filter_expression', 'index', 'map_prensor', 'map_prensor_to_prensor', 'map_values', 'parquet', 'placeholder', 'project', 'promote', 'promote_and_broadcast', 'proto', 'reroot', 'size', 'slice_expression']\nfor module in modules:\n getattr(expression_impl, module)"], "bodies_text": "<|body_start_0|>\n s2t.calculate_prensors\n s2t.calculate_prensors_with_graph\n s2t.get_default_options\n s2t.get_options_with_minimal_checks\n s2t.calculate_prensors_with_source_paths\n s2t.create_expression_from_prensor\n s2t.create_expression_from_file_descriptor_set\n s2t.create_expression_from_proto\n s2t.Expression\n s2t.create_path\n s2t.Path\n s2t.Step\n s2t.ChildNodeTensor\n s2t.LeafNodeTensor\n s2t.NodeTensor\n s2t.Prensor\n s2t.RootNodeTensor\n s2t.create_prensor_from_descendant_nodes\n s2t.create_prensor_from_root_and_children\n s2t.prensor_value\n<|end_body_0|>\n\n<|body_start_1|>\n from struct2tensor import expression_impl\n modules = ['apply_schema', 'broadcast', 'depth_limit', 'filter_expression', 'index', 'map_prensor', 'map_prensor_to_prensor', 'map_values', 'parquet', 'placeholder', 'project', 'promote', 'promote_and_broadcast', 'proto', 'reroot', 'size', 'slice_expression']\n for module in modules:\n getattr(expression_impl, module)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Struct2tensorModuleTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Struct2tensorModuleTest:\n\n def test_importing_struct2tensor_modules(self):\n \"\"\"This tests that the exposed packages in root __init__.py are found.\"\"\"\n <|body_0|>\n\n def test_importing_expression_impl_modules(self):\n \"\"\"This tests that the expression_impl/__init__.py imports are found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s2t.calculate_prensors\n s2t.calculate_prensors_with_graph\n s2t.get_default_options\n s2t.get_options_with_minimal_checks\n s2t.calculate_prensors_with_source_paths\n s2t.create_expression_from_prensor\n s2t.create_expression_from_file_descriptor_set\n s2t.create_expression_from_proto\n s2t.Expression\n s2t.create_path\n s2t.Path\n s2t.Step\n s2t.ChildNodeTensor\n s2t.LeafNodeTensor\n s2t.NodeTensor\n s2t.Prensor\n s2t.RootNodeTensor\n s2t.create_prensor_from_descendant_nodes\n s2t.create_prensor_from_root_and_children\n s2t.prensor_value\n<|end_body_0|>\n\n<|body_start_1|>\n from struct2tensor import expression_impl\n modules = ['apply_schema', 'broadcast', 'depth_limit', 'filter_expression', 'index', 'map_prensor', 'map_prensor_to_prensor', 'map_values', 'parquet', 'placeholder', 'project', 'promote', 'promote_and_broadcast', 'proto', 'reroot', 'size', 'slice_expression']\n for module in modules:\n getattr(expression_impl, module)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000354", "length_bytes": 2247, "license_type": "permissive", "methods": [{"docstring": "This tests that the exposed packages in root __init__.py are found.", "name": "test_importing_struct2tensor_modules", "signature": "def test_importing_struct2tensor_modules(self)"}, {"docstring": "This tests that the expression_impl/__init__.py imports are found.", "name": "test_importing_expression_impl_modules", "signature": "def test_importing_expression_impl_modules(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_002452", "prompt": "Implement the Python class `Struct2tensorModuleTest` described below.\n\nClass description:\nImplement the Struct2tensorModuleTest class.\n\nMethod signatures and docstrings:\n- def test_importing_struct2tensor_modules(self): This tests that the exposed packages in root __init__.py are found.\n- def test_importing_expression_impl_modules(self): This tests that the expression_impl/__init__.py imports are found.", "prompted_full_text": "Implement the Python class `Struct2tensorModuleTest` described below.\n\nClass description:\nImplement the Struct2tensorModuleTest class.\n\nMethod signatures and docstrings:\n- def test_importing_struct2tensor_modules(self): This tests that the exposed packages in root __init__.py are found.\n- def test_importing_expression_impl_modules(self): This tests that the expression_impl/__init__.py imports are found.\n\n<|skeleton|>\nclass Struct2tensorModuleTest:\n\n def test_importing_struct2tensor_modules(self):\n \"\"\"This tests that the exposed packages in root __init__.py are found.\"\"\"\n <|body_0|>\n\n def test_importing_expression_impl_modules(self):\n \"\"\"This tests that the expression_impl/__init__.py imports are found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s2t.calculate_prensors\n s2t.calculate_prensors_with_graph\n s2t.get_default_options\n s2t.get_options_with_minimal_checks\n s2t.calculate_prensors_with_source_paths\n s2t.create_expression_from_prensor\n s2t.create_expression_from_file_descriptor_set\n s2t.create_expression_from_proto\n s2t.Expression\n s2t.create_path\n s2t.Path\n s2t.Step\n s2t.ChildNodeTensor\n s2t.LeafNodeTensor\n s2t.NodeTensor\n s2t.Prensor\n s2t.RootNodeTensor\n s2t.create_prensor_from_descendant_nodes\n s2t.create_prensor_from_root_and_children\n s2t.prensor_value\n<|end_body_0|>\n\n<|body_start_1|>\n from struct2tensor import expression_impl\n modules = ['apply_schema', 'broadcast', 'depth_limit', 'filter_expression', 'index', 'map_prensor', 'map_prensor_to_prensor', 'map_values', 'parquet', 'placeholder', 'project', 'promote', 'promote_and_broadcast', 'proto', 'reroot', 'size', 'slice_expression']\n for module in modules:\n getattr(expression_impl, module)\n<|end_body_1|>\n", "revision_id": "86d8676ac295697853be8a194460e4d71de3990f", "skeleton": "<|skeleton|>\nclass Struct2tensorModuleTest:\n\n def test_importing_struct2tensor_modules(self):\n \"\"\"This tests that the exposed packages in root __init__.py are found.\"\"\"\n <|body_0|>\n\n def test_importing_expression_impl_modules(self):\n \"\"\"This tests that the expression_impl/__init__.py imports are found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Struct2tensorModuleTest:\n def test_importing_struct2tensor_modules(self):\n \"\"\"This tests that the exposed packages in root __init__.py are found.\"\"\"\n s2t.calculate_prensors\n s2t.calculate_prensors_with_graph\n s2t.get_default_options\n s2t.get_options_with_minimal_checks\n s2t.calculate_prensors_with_source_paths\n s2t.create_expression_from_prensor\n s2t.create_expression_from_file_descriptor_set\n s2t.create_expression_from_proto\n s2t.Expression\n s2t.create_path\n s2t.Path\n s2t.Step\n s2t.ChildNodeTensor\n s2t.LeafNodeTensor\n s2t.NodeTensor\n s2t.Prensor\n s2t.RootNodeTensor\n s2t.create_prensor_from_descendant_nodes\n s2t.create_prensor_from_root_and_children\n s2t.prensor_value\n\n def test_importing_expression_impl_modules(self):\n \"\"\"This tests that the expression_impl/__init__.py imports are found.\"\"\"\n from struct2tensor import expression_impl\n modules = ['apply_schema', 'broadcast', 'depth_limit', 'filter_expression', 'index', 'map_prensor', 'map_prensor_to_prensor', 'map_values', 'parquet', 'placeholder', 'project', 'promote', 'promote_and_broadcast', 'proto', 'reroot', 'size', 'slice_expression']\n for module in modules:\n getattr(expression_impl, module)\n", "source": "the_stack_v2_python_sparse", "source_path": "struct2tensor/struct2tensor_module_test.py", "source_repo": "google/struct2tensor", "split": "val", "star_events_count": 36}
{"blob_id": "3a97237460f401ce7245f6112d035bd6248e7a6a", "bodies": ["dp = [0] * (n + 1)\ndp[0] = dp[1] = 1\nfor i in range(2, n + 1):\n for j in range(i):\n dp[i] = dp[i] + dp[j] * dp[i - j - 1]\nreturn dp[n]", "if n == 1:\n return 1\nif n == 0:\n return 1\nres = 0\nfor i in range(n):\n res += self.f(i) * self.f(n - 1 - i)\nreturn res"], "bodies_text": "<|body_start_0|>\n dp = [0] * (n + 1)\n dp[0] = dp[1] = 1\n for i in range(2, n + 1):\n for j in range(i):\n dp[i] = dp[i] + dp[j] * dp[i - j - 1]\n return dp[n]\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 1:\n return 1\n if n == 0:\n return 1\n res = 0\n for i in range(n):\n res += self.f(i) * self.f(n - 1 - i)\n return res\n<|end_body_1|>\n", "class_docstring": "https://segmentfault.com/a/1190000003811919", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"https://segmentfault.com/a/1190000003811919\"\"\"\n\n def numTrees(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def f(self, n):\n \"\"\"Time Limit Exceeded method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp = [0] * (n + 1)\n dp[0] = dp[1] = 1\n for i in range(2, n + 1):\n for j in range(i):\n dp[i] = dp[i] + dp[j] * dp[i - j - 1]\n return dp[n]\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 1:\n return 1\n if n == 0:\n return 1\n res = 0\n for i in range(n):\n res += self.f(i) * self.f(n - 1 - i)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000355", "length_bytes": 646, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: int", "name": "numTrees", "signature": "def numTrees(self, n)"}, {"docstring": "Time Limit Exceeded method", "name": "f", "signature": "def f(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_047472", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nhttps://segmentfault.com/a/1190000003811919\n\nMethod signatures and docstrings:\n- def numTrees(self, n): :type n: int :rtype: int\n- def f(self, n): Time Limit Exceeded method", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nhttps://segmentfault.com/a/1190000003811919\n\nMethod signatures and docstrings:\n- def numTrees(self, n): :type n: int :rtype: int\n- def f(self, n): Time Limit Exceeded method\n\n<|skeleton|>\nclass Solution:\n \"\"\"https://segmentfault.com/a/1190000003811919\"\"\"\n\n def numTrees(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def f(self, n):\n \"\"\"Time Limit Exceeded method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp = [0] * (n + 1)\n dp[0] = dp[1] = 1\n for i in range(2, n + 1):\n for j in range(i):\n dp[i] = dp[i] + dp[j] * dp[i - j - 1]\n return dp[n]\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 1:\n return 1\n if n == 0:\n return 1\n res = 0\n for i in range(n):\n res += self.f(i) * self.f(n - 1 - i)\n return res\n<|end_body_1|>\n", "revision_id": "03d3e34522c8c819388634ab4b63077da864a4e1", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"https://segmentfault.com/a/1190000003811919\"\"\"\n\n def numTrees(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def f(self, n):\n \"\"\"Time Limit Exceeded method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n \"\"\"https://segmentfault.com/a/1190000003811919\"\"\"\n\n def numTrees(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n dp = [0] * (n + 1)\n dp[0] = dp[1] = 1\n for i in range(2, n + 1):\n for j in range(i):\n dp[i] = dp[i] + dp[j] * dp[i - j - 1]\n return dp[n]\n\n def f(self, n):\n \"\"\"Time Limit Exceeded method\"\"\"\n if n == 1:\n return 1\n if n == 0:\n return 1\n res = 0\n for i in range(n):\n res += self.f(i) * self.f(n - 1 - i)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "problems/96_unique_binary_search_trees.py", "source_repo": "xueyuanl/leetcode-py", "split": "val", "star_events_count": 4}
{"blob_id": "bf432d82d93968be576231dfed80bdc5018d2a13", "bodies": ["dt_fpaths = list(self.dt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\ngt_fpaths = list(self.gt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\nassert len(dt_fpaths) == len(gt_fpaths)\ndata: DefaultDict[str, np.ndarray] = defaultdict(list)\ncls_to_ninst: DefaultDict[str, int] = defaultdict(int)\nargs = [(self.dt_root_fpath, gt_fpath, self.cfg) for gt_fpath in gt_fpaths]\nwith Pool(os.cpu_count()) as p:\n accum = p.starmap(accumulate, args)\nfor frame_stats, frame_cls_to_inst in accum:\n for cls_name, cls_stats in frame_stats.items():\n data[cls_name].append(cls_stats)\n for cls_name, num_inst in frame_cls_to_inst.items():\n cls_to_ninst[cls_name] += num_inst\ndata = defaultdict(np.ndarray, {k: np.vstack(v) for k, v in data.items()})\ninit_data = {dt_cls: self.cfg.summary_default_vals for dt_cls in self.cfg.dt_classes}\nsummary = pd.DataFrame.from_dict(init_data, orient='index', columns=STATISTIC_NAMES)\nsummary_update = pd.DataFrame.from_dict(self.summarize(data, cls_to_ninst), orient='index', columns=STATISTIC_NAMES)\nsummary.update(summary_update)\nsummary = summary.round(SIGNIFICANT_DIGITS)\nsummary.index = summary.index.str.title()\nsummary.loc['Average Metrics'] = summary.mean().round(SIGNIFICANT_DIGITS)\nreturn summary", "summary: DefaultDict[str, List[float]] = defaultdict(list)\nrecalls_interp = np.linspace(0, 1, self.cfg.n_rec_samples)\nnum_ths = len(self.cfg.affinity_threshs)\nif not self.figs_fpath.is_dir():\n self.figs_fpath.mkdir(parents=True, exist_ok=True)\nfor cls_name, cls_stats in data.items():\n ninst = cls_to_ninst[cls_name]\n ranks = cls_stats[:, -1].argsort()[::-1]\n cls_stats = cls_stats[ranks]\n for i, _ in enumerate(self.cfg.affinity_threshs):\n tp = cls_stats[:, i].astype(bool)\n ap_th, precisions_interp = calc_ap(tp, recalls_interp, ninst)\n summary[cls_name] += [ap_th]\n if self.cfg.save_figs:\n plot(recalls_interp, precisions_interp, cls_name, self.figs_fpath)\n ap = np.array(summary[cls_name][:num_ths]).mean()\n tp_metrics_mask = ~np.isnan(cls_stats[:, num_ths:num_ths + N_TP_ERRORS]).all(axis=1)\n if ~tp_metrics_mask.any():\n tp_metrics = self.cfg.tp_normalization_terms\n else:\n tp_metrics = np.mean(cls_stats[:, num_ths:num_ths + N_TP_ERRORS][tp_metrics_mask], axis=0)\n tp_scores = 1 - tp_metrics / self.cfg.tp_normalization_terms\n cds = ap * tp_scores.mean()\n summary[cls_name] = [ap, *tp_metrics, cds]\nlogger.info(f'summary = {summary}')\nreturn summary"], "bodies_text": "<|body_start_0|>\n dt_fpaths = list(self.dt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\n gt_fpaths = list(self.gt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\n assert len(dt_fpaths) == len(gt_fpaths)\n data: DefaultDict[str, np.ndarray] = defaultdict(list)\n cls_to_ninst: DefaultDict[str, int] = defaultdict(int)\n args = [(self.dt_root_fpath, gt_fpath, self.cfg) for gt_fpath in gt_fpaths]\n with Pool(os.cpu_count()) as p:\n accum = p.starmap(accumulate, args)\n for frame_stats, frame_cls_to_inst in accum:\n for cls_name, cls_stats in frame_stats.items():\n data[cls_name].append(cls_stats)\n for cls_name, num_inst in frame_cls_to_inst.items():\n cls_to_ninst[cls_name] += num_inst\n data = defaultdict(np.ndarray, {k: np.vstack(v) for k, v in data.items()})\n init_data = {dt_cls: self.cfg.summary_default_vals for dt_cls in self.cfg.dt_classes}\n summary = pd.DataFrame.from_dict(init_data, orient='index', columns=STATISTIC_NAMES)\n summary_update = pd.DataFrame.from_dict(self.summarize(data, cls_to_ninst), orient='index', columns=STATISTIC_NAMES)\n summary.update(summary_update)\n summary = summary.round(SIGNIFICANT_DIGITS)\n summary.index = summary.index.str.title()\n summary.loc['Average Metrics'] = summary.mean().round(SIGNIFICANT_DIGITS)\n return summary\n<|end_body_0|>\n\n<|body_start_1|>\n summary: DefaultDict[str, List[float]] = defaultdict(list)\n recalls_interp = np.linspace(0, 1, self.cfg.n_rec_samples)\n num_ths = len(self.cfg.affinity_threshs)\n if not self.figs_fpath.is_dir():\n self.figs_fpath.mkdir(parents=True, exist_ok=True)\n for cls_name, cls_stats in data.items():\n ninst = cls_to_ninst[cls_name]\n ranks = cls_stats[:, -1].argsort()[::-1]\n cls_stats = cls_stats[ranks]\n for i, _ in enumerate(self.cfg.affinity_threshs):\n tp = cls_stats[:, i].astype(bool)\n ap_th, precisions_interp = calc_ap(tp, recalls_interp, ninst)\n summary[cls_name] += [ap_th]\n if self.cfg.save_figs:\n plot(recalls_interp, precisions_interp, cls_name, self.figs_fpath)\n ap = np.array(summary[cls_name][:num_ths]).mean()\n tp_metrics_mask = ~np.isnan(cls_stats[:, num_ths:num_ths + N_TP_ERRORS]).all(axis=1)\n if ~tp_metrics_mask.any():\n tp_metrics = self.cfg.tp_normalization_terms\n else:\n tp_metrics = np.mean(cls_stats[:, num_ths:num_ths + N_TP_ERRORS][tp_metrics_mask], axis=0)\n tp_scores = 1 - tp_metrics / self.cfg.tp_normalization_terms\n cds = ap * tp_scores.mean()\n summary[cls_name] = [ap, *tp_metrics, cds]\n logger.info(f'summary = {summary}')\n return summary\n<|end_body_1|>\n", "class_docstring": "Instantiates a DetectionEvaluator object for evaluation. Args: dt_fpath_root: Path to the folder which contains the detections. gt_fpath_root: Path to the folder which contains the split of logs. figs_fpath: Path to the folder which will contain the output figures. cfg: Detection configuration settings.", "class_name": "DetectionEvaluator", "detected_licenses": ["MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DetectionEvaluator:\n \"\"\"Instantiates a DetectionEvaluator object for evaluation. Args: dt_fpath_root: Path to the folder which contains the detections. gt_fpath_root: Path to the folder which contains the split of logs. figs_fpath: Path to the folder which will contain the output figures. cfg: Detection configuration settings.\"\"\"\n\n def evaluate(self) -> pd.DataFrame:\n \"\"\"Evaluate detection output and return metrics. The multiprocessing library is used for parallel assignment between detections and ground truth annotations. Returns: Evaluation metrics of shape (C + 1, K) where C + 1 is the number of classes. plus a row for their means. K refers to the number of evaluation metrics.\"\"\"\n <|body_0|>\n\n def summarize(self, data: DefaultDict[str, np.ndarray], cls_to_ninst: DefaultDict[str, int]) -> DefaultDict[str, List[float]]:\n \"\"\"Calculate and print the detection metrics. Args: data: The aggregated data used for summarization. cls_to_ninst: Map of classes to number of instances. Returns: summary: The summary statistics.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dt_fpaths = list(self.dt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\n gt_fpaths = list(self.gt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\n assert len(dt_fpaths) == len(gt_fpaths)\n data: DefaultDict[str, np.ndarray] = defaultdict(list)\n cls_to_ninst: DefaultDict[str, int] = defaultdict(int)\n args = [(self.dt_root_fpath, gt_fpath, self.cfg) for gt_fpath in gt_fpaths]\n with Pool(os.cpu_count()) as p:\n accum = p.starmap(accumulate, args)\n for frame_stats, frame_cls_to_inst in accum:\n for cls_name, cls_stats in frame_stats.items():\n data[cls_name].append(cls_stats)\n for cls_name, num_inst in frame_cls_to_inst.items():\n cls_to_ninst[cls_name] += num_inst\n data = defaultdict(np.ndarray, {k: np.vstack(v) for k, v in data.items()})\n init_data = {dt_cls: self.cfg.summary_default_vals for dt_cls in self.cfg.dt_classes}\n summary = pd.DataFrame.from_dict(init_data, orient='index', columns=STATISTIC_NAMES)\n summary_update = pd.DataFrame.from_dict(self.summarize(data, cls_to_ninst), orient='index', columns=STATISTIC_NAMES)\n summary.update(summary_update)\n summary = summary.round(SIGNIFICANT_DIGITS)\n summary.index = summary.index.str.title()\n summary.loc['Average Metrics'] = summary.mean().round(SIGNIFICANT_DIGITS)\n return summary\n<|end_body_0|>\n\n<|body_start_1|>\n summary: DefaultDict[str, List[float]] = defaultdict(list)\n recalls_interp = np.linspace(0, 1, self.cfg.n_rec_samples)\n num_ths = len(self.cfg.affinity_threshs)\n if not self.figs_fpath.is_dir():\n self.figs_fpath.mkdir(parents=True, exist_ok=True)\n for cls_name, cls_stats in data.items():\n ninst = cls_to_ninst[cls_name]\n ranks = cls_stats[:, -1].argsort()[::-1]\n cls_stats = cls_stats[ranks]\n for i, _ in enumerate(self.cfg.affinity_threshs):\n tp = cls_stats[:, i].astype(bool)\n ap_th, precisions_interp = calc_ap(tp, recalls_interp, ninst)\n summary[cls_name] += [ap_th]\n if self.cfg.save_figs:\n plot(recalls_interp, precisions_interp, cls_name, self.figs_fpath)\n ap = np.array(summary[cls_name][:num_ths]).mean()\n tp_metrics_mask = ~np.isnan(cls_stats[:, num_ths:num_ths + N_TP_ERRORS]).all(axis=1)\n if ~tp_metrics_mask.any():\n tp_metrics = self.cfg.tp_normalization_terms\n else:\n tp_metrics = np.mean(cls_stats[:, num_ths:num_ths + N_TP_ERRORS][tp_metrics_mask], axis=0)\n tp_scores = 1 - tp_metrics / self.cfg.tp_normalization_terms\n cds = ap * tp_scores.mean()\n summary[cls_name] = [ap, *tp_metrics, cds]\n logger.info(f'summary = {summary}')\n return summary\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000356", "length_bytes": 8609, "license_type": "permissive", "methods": [{"docstring": "Evaluate detection output and return metrics. The multiprocessing library is used for parallel assignment between detections and ground truth annotations. Returns: Evaluation metrics of shape (C + 1, K) where C + 1 is the number of classes. plus a row for their means. K refers to the number of evaluation metrics.", "name": "evaluate", "signature": "def evaluate(self) -> pd.DataFrame"}, {"docstring": "Calculate and print the detection metrics. Args: data: The aggregated data used for summarization. cls_to_ninst: Map of classes to number of instances. Returns: summary: The summary statistics.", "name": "summarize", "signature": "def summarize(self, data: DefaultDict[str, np.ndarray], cls_to_ninst: DefaultDict[str, int]) -> DefaultDict[str, List[float]]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002058", "prompt": "Implement the Python class `DetectionEvaluator` described below.\n\nClass description:\nInstantiates a DetectionEvaluator object for evaluation. Args: dt_fpath_root: Path to the folder which contains the detections. gt_fpath_root: Path to the folder which contains the split of logs. figs_fpath: Path to the folder which will contain the output figures. cfg: Detection configuration settings.\n\nMethod signatures and docstrings:\n- def evaluate(self) -> pd.DataFrame: Evaluate detection output and return metrics. The multiprocessing library is used for parallel assignment between detections and ground truth annotations. Returns: Evaluation metrics of shape (C + 1, K) where C + 1 is the number of classes. plus a row for their means. K refers to the number of evaluation metrics.\n- def summarize(self, data: DefaultDict[str, np.ndarray], cls_to_ninst: DefaultDict[str, int]) -> DefaultDict[str, List[float]]: Calculate and print the detection metrics. Args: data: The aggregated data used for summarization. cls_to_ninst: Map of classes to number of instances. Returns: summary: The summary statistics.", "prompted_full_text": "Implement the Python class `DetectionEvaluator` described below.\n\nClass description:\nInstantiates a DetectionEvaluator object for evaluation. Args: dt_fpath_root: Path to the folder which contains the detections. gt_fpath_root: Path to the folder which contains the split of logs. figs_fpath: Path to the folder which will contain the output figures. cfg: Detection configuration settings.\n\nMethod signatures and docstrings:\n- def evaluate(self) -> pd.DataFrame: Evaluate detection output and return metrics. The multiprocessing library is used for parallel assignment between detections and ground truth annotations. Returns: Evaluation metrics of shape (C + 1, K) where C + 1 is the number of classes. plus a row for their means. K refers to the number of evaluation metrics.\n- def summarize(self, data: DefaultDict[str, np.ndarray], cls_to_ninst: DefaultDict[str, int]) -> DefaultDict[str, List[float]]: Calculate and print the detection metrics. Args: data: The aggregated data used for summarization. cls_to_ninst: Map of classes to number of instances. Returns: summary: The summary statistics.\n\n<|skeleton|>\nclass DetectionEvaluator:\n \"\"\"Instantiates a DetectionEvaluator object for evaluation. Args: dt_fpath_root: Path to the folder which contains the detections. gt_fpath_root: Path to the folder which contains the split of logs. figs_fpath: Path to the folder which will contain the output figures. cfg: Detection configuration settings.\"\"\"\n\n def evaluate(self) -> pd.DataFrame:\n \"\"\"Evaluate detection output and return metrics. The multiprocessing library is used for parallel assignment between detections and ground truth annotations. Returns: Evaluation metrics of shape (C + 1, K) where C + 1 is the number of classes. plus a row for their means. K refers to the number of evaluation metrics.\"\"\"\n <|body_0|>\n\n def summarize(self, data: DefaultDict[str, np.ndarray], cls_to_ninst: DefaultDict[str, int]) -> DefaultDict[str, List[float]]:\n \"\"\"Calculate and print the detection metrics. Args: data: The aggregated data used for summarization. cls_to_ninst: Map of classes to number of instances. Returns: summary: The summary statistics.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dt_fpaths = list(self.dt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\n gt_fpaths = list(self.gt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\n assert len(dt_fpaths) == len(gt_fpaths)\n data: DefaultDict[str, np.ndarray] = defaultdict(list)\n cls_to_ninst: DefaultDict[str, int] = defaultdict(int)\n args = [(self.dt_root_fpath, gt_fpath, self.cfg) for gt_fpath in gt_fpaths]\n with Pool(os.cpu_count()) as p:\n accum = p.starmap(accumulate, args)\n for frame_stats, frame_cls_to_inst in accum:\n for cls_name, cls_stats in frame_stats.items():\n data[cls_name].append(cls_stats)\n for cls_name, num_inst in frame_cls_to_inst.items():\n cls_to_ninst[cls_name] += num_inst\n data = defaultdict(np.ndarray, {k: np.vstack(v) for k, v in data.items()})\n init_data = {dt_cls: self.cfg.summary_default_vals for dt_cls in self.cfg.dt_classes}\n summary = pd.DataFrame.from_dict(init_data, orient='index', columns=STATISTIC_NAMES)\n summary_update = pd.DataFrame.from_dict(self.summarize(data, cls_to_ninst), orient='index', columns=STATISTIC_NAMES)\n summary.update(summary_update)\n summary = summary.round(SIGNIFICANT_DIGITS)\n summary.index = summary.index.str.title()\n summary.loc['Average Metrics'] = summary.mean().round(SIGNIFICANT_DIGITS)\n return summary\n<|end_body_0|>\n\n<|body_start_1|>\n summary: DefaultDict[str, List[float]] = defaultdict(list)\n recalls_interp = np.linspace(0, 1, self.cfg.n_rec_samples)\n num_ths = len(self.cfg.affinity_threshs)\n if not self.figs_fpath.is_dir():\n self.figs_fpath.mkdir(parents=True, exist_ok=True)\n for cls_name, cls_stats in data.items():\n ninst = cls_to_ninst[cls_name]\n ranks = cls_stats[:, -1].argsort()[::-1]\n cls_stats = cls_stats[ranks]\n for i, _ in enumerate(self.cfg.affinity_threshs):\n tp = cls_stats[:, i].astype(bool)\n ap_th, precisions_interp = calc_ap(tp, recalls_interp, ninst)\n summary[cls_name] += [ap_th]\n if self.cfg.save_figs:\n plot(recalls_interp, precisions_interp, cls_name, self.figs_fpath)\n ap = np.array(summary[cls_name][:num_ths]).mean()\n tp_metrics_mask = ~np.isnan(cls_stats[:, num_ths:num_ths + N_TP_ERRORS]).all(axis=1)\n if ~tp_metrics_mask.any():\n tp_metrics = self.cfg.tp_normalization_terms\n else:\n tp_metrics = np.mean(cls_stats[:, num_ths:num_ths + N_TP_ERRORS][tp_metrics_mask], axis=0)\n tp_scores = 1 - tp_metrics / self.cfg.tp_normalization_terms\n cds = ap * tp_scores.mean()\n summary[cls_name] = [ap, *tp_metrics, cds]\n logger.info(f'summary = {summary}')\n return summary\n<|end_body_1|>\n", "revision_id": "2e2aed64d4a286821aece806134054c6d6e1d3cb", "skeleton": "<|skeleton|>\nclass DetectionEvaluator:\n \"\"\"Instantiates a DetectionEvaluator object for evaluation. Args: dt_fpath_root: Path to the folder which contains the detections. gt_fpath_root: Path to the folder which contains the split of logs. figs_fpath: Path to the folder which will contain the output figures. cfg: Detection configuration settings.\"\"\"\n\n def evaluate(self) -> pd.DataFrame:\n \"\"\"Evaluate detection output and return metrics. The multiprocessing library is used for parallel assignment between detections and ground truth annotations. Returns: Evaluation metrics of shape (C + 1, K) where C + 1 is the number of classes. plus a row for their means. K refers to the number of evaluation metrics.\"\"\"\n <|body_0|>\n\n def summarize(self, data: DefaultDict[str, np.ndarray], cls_to_ninst: DefaultDict[str, int]) -> DefaultDict[str, List[float]]:\n \"\"\"Calculate and print the detection metrics. Args: data: The aggregated data used for summarization. cls_to_ninst: Map of classes to number of instances. Returns: summary: The summary statistics.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DetectionEvaluator:\n \"\"\"Instantiates a DetectionEvaluator object for evaluation. Args: dt_fpath_root: Path to the folder which contains the detections. gt_fpath_root: Path to the folder which contains the split of logs. figs_fpath: Path to the folder which will contain the output figures. cfg: Detection configuration settings.\"\"\"\n\n def evaluate(self) -> pd.DataFrame:\n \"\"\"Evaluate detection output and return metrics. The multiprocessing library is used for parallel assignment between detections and ground truth annotations. Returns: Evaluation metrics of shape (C + 1, K) where C + 1 is the number of classes. plus a row for their means. K refers to the number of evaluation metrics.\"\"\"\n dt_fpaths = list(self.dt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\n gt_fpaths = list(self.gt_root_fpath.glob('*/per_sweep_annotations_amodal/*.json'))\n assert len(dt_fpaths) == len(gt_fpaths)\n data: DefaultDict[str, np.ndarray] = defaultdict(list)\n cls_to_ninst: DefaultDict[str, int] = defaultdict(int)\n args = [(self.dt_root_fpath, gt_fpath, self.cfg) for gt_fpath in gt_fpaths]\n with Pool(os.cpu_count()) as p:\n accum = p.starmap(accumulate, args)\n for frame_stats, frame_cls_to_inst in accum:\n for cls_name, cls_stats in frame_stats.items():\n data[cls_name].append(cls_stats)\n for cls_name, num_inst in frame_cls_to_inst.items():\n cls_to_ninst[cls_name] += num_inst\n data = defaultdict(np.ndarray, {k: np.vstack(v) for k, v in data.items()})\n init_data = {dt_cls: self.cfg.summary_default_vals for dt_cls in self.cfg.dt_classes}\n summary = pd.DataFrame.from_dict(init_data, orient='index', columns=STATISTIC_NAMES)\n summary_update = pd.DataFrame.from_dict(self.summarize(data, cls_to_ninst), orient='index', columns=STATISTIC_NAMES)\n summary.update(summary_update)\n summary = summary.round(SIGNIFICANT_DIGITS)\n summary.index = summary.index.str.title()\n summary.loc['Average Metrics'] = summary.mean().round(SIGNIFICANT_DIGITS)\n return summary\n\n def summarize(self, data: DefaultDict[str, np.ndarray], cls_to_ninst: DefaultDict[str, int]) -> DefaultDict[str, List[float]]:\n \"\"\"Calculate and print the detection metrics. Args: data: The aggregated data used for summarization. cls_to_ninst: Map of classes to number of instances. Returns: summary: The summary statistics.\"\"\"\n summary: DefaultDict[str, List[float]] = defaultdict(list)\n recalls_interp = np.linspace(0, 1, self.cfg.n_rec_samples)\n num_ths = len(self.cfg.affinity_threshs)\n if not self.figs_fpath.is_dir():\n self.figs_fpath.mkdir(parents=True, exist_ok=True)\n for cls_name, cls_stats in data.items():\n ninst = cls_to_ninst[cls_name]\n ranks = cls_stats[:, -1].argsort()[::-1]\n cls_stats = cls_stats[ranks]\n for i, _ in enumerate(self.cfg.affinity_threshs):\n tp = cls_stats[:, i].astype(bool)\n ap_th, precisions_interp = calc_ap(tp, recalls_interp, ninst)\n summary[cls_name] += [ap_th]\n if self.cfg.save_figs:\n plot(recalls_interp, precisions_interp, cls_name, self.figs_fpath)\n ap = np.array(summary[cls_name][:num_ths]).mean()\n tp_metrics_mask = ~np.isnan(cls_stats[:, num_ths:num_ths + N_TP_ERRORS]).all(axis=1)\n if ~tp_metrics_mask.any():\n tp_metrics = self.cfg.tp_normalization_terms\n else:\n tp_metrics = np.mean(cls_stats[:, num_ths:num_ths + N_TP_ERRORS][tp_metrics_mask], axis=0)\n tp_scores = 1 - tp_metrics / self.cfg.tp_normalization_terms\n cds = ap * tp_scores.mean()\n summary[cls_name] = [ap, *tp_metrics, cds]\n logger.info(f'summary = {summary}')\n return summary\n", "source": "the_stack_v2_python_sparse", "source_path": "argoverse/evaluation/detection/eval.py", "source_repo": "ChenChenGith/argoverse-api-ccuse", "split": "val", "star_events_count": 1}
{"blob_id": "9b4d32f3e8f632731c0b279ded68baf7f44b1cc8", "bodies": ["flags = [0, 1] + [True] * (max_num + 1)\ncount = 0\nprime = 2\nwhile prime <= math.sqrt(max_num):\n for i in range(prime * prime, len(flags), prime):\n flags[i] = False\n break\nreturn flags", "if n <= 1:\n return False\nfor x in range(2, n):\n if n % x == 0:\n return False\nreturn True", "if p <= 1:\n return 2\nprime = p\nfound = False\nwhile True:\n prime += 1\n if self.isPrime(prime):\n return prime\nreturn prime"], "bodies_text": "<|body_start_0|>\n flags = [0, 1] + [True] * (max_num + 1)\n count = 0\n prime = 2\n while prime <= math.sqrt(max_num):\n for i in range(prime * prime, len(flags), prime):\n flags[i] = False\n break\n return flags\n<|end_body_0|>\n\n<|body_start_1|>\n if n <= 1:\n return False\n for x in range(2, n):\n if n % x == 0:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if p <= 1:\n return 2\n prime = p\n found = False\n while True:\n prime += 1\n if self.isPrime(prime):\n return prime\n return prime\n<|end_body_2|>\n", "class_docstring": "", "class_name": "PrimeGen", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PrimeGen:\n\n def sieve_of_e(self, max_num):\n \"\"\"Generating a List of Primes: The Sieve of Eratosthenes * All nonprime nums are divisible by a prime number * Alg: - Start w/ list of all numbers up to max - Cross off alt numbers divisible by 2, by 3, ... - This gives us a list of all primes # >>> pg = PrimeGen() # >>> pg.sieve_of_e(13)\"\"\"\n <|body_0|>\n\n def isPrime(self, n):\n \"\"\"Return bool for primality of `n` >>> pg = PrimeGen() >>> pg.isPrime(13) True >>> pg.isPrime(2) True >>> pg.isPrime(14) False\"\"\"\n <|body_1|>\n\n def getNextPrime(self, p):\n \"\"\"Return the smallest prime number greater than prime `p` >>> pg = PrimeGen() >>> pg.getNextPrime(0) 2 >>> pg.getNextPrime(2) 3 >>> pg.getNextPrime(6) 7 >>> pg.getNextPrime(16) 17\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n flags = [0, 1] + [True] * (max_num + 1)\n count = 0\n prime = 2\n while prime <= math.sqrt(max_num):\n for i in range(prime * prime, len(flags), prime):\n flags[i] = False\n break\n return flags\n<|end_body_0|>\n\n<|body_start_1|>\n if n <= 1:\n return False\n for x in range(2, n):\n if n % x == 0:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if p <= 1:\n return 2\n prime = p\n found = False\n while True:\n prime += 1\n if self.isPrime(prime):\n return prime\n return prime\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000357", "length_bytes": 2359, "license_type": "no_license", "methods": [{"docstring": "Generating a List of Primes: The Sieve of Eratosthenes * All nonprime nums are divisible by a prime number * Alg: - Start w/ list of all numbers up to max - Cross off alt numbers divisible by 2, by 3, ... - This gives us a list of all primes # >>> pg = PrimeGen() # >>> pg.sieve_of_e(13)", "name": "sieve_of_e", "signature": "def sieve_of_e(self, max_num)"}, {"docstring": "Return bool for primality of `n` >>> pg = PrimeGen() >>> pg.isPrime(13) True >>> pg.isPrime(2) True >>> pg.isPrime(14) False", "name": "isPrime", "signature": "def isPrime(self, n)"}, {"docstring": "Return the smallest prime number greater than prime `p` >>> pg = PrimeGen() >>> pg.getNextPrime(0) 2 >>> pg.getNextPrime(2) 3 >>> pg.getNextPrime(6) 7 >>> pg.getNextPrime(16) 17", "name": "getNextPrime", "signature": "def getNextPrime(self, p)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_013833", "prompt": "Implement the Python class `PrimeGen` described below.\n\nClass description:\nImplement the PrimeGen class.\n\nMethod signatures and docstrings:\n- def sieve_of_e(self, max_num): Generating a List of Primes: The Sieve of Eratosthenes * All nonprime nums are divisible by a prime number * Alg: - Start w/ list of all numbers up to max - Cross off alt numbers divisible by 2, by 3, ... - This gives us a list of all primes # >>> pg = PrimeGen() # >>> pg.sieve_of_e(13)\n- def isPrime(self, n): Return bool for primality of `n` >>> pg = PrimeGen() >>> pg.isPrime(13) True >>> pg.isPrime(2) True >>> pg.isPrime(14) False\n- def getNextPrime(self, p): Return the smallest prime number greater than prime `p` >>> pg = PrimeGen() >>> pg.getNextPrime(0) 2 >>> pg.getNextPrime(2) 3 >>> pg.getNextPrime(6) 7 >>> pg.getNextPrime(16) 17", "prompted_full_text": "Implement the Python class `PrimeGen` described below.\n\nClass description:\nImplement the PrimeGen class.\n\nMethod signatures and docstrings:\n- def sieve_of_e(self, max_num): Generating a List of Primes: The Sieve of Eratosthenes * All nonprime nums are divisible by a prime number * Alg: - Start w/ list of all numbers up to max - Cross off alt numbers divisible by 2, by 3, ... - This gives us a list of all primes # >>> pg = PrimeGen() # >>> pg.sieve_of_e(13)\n- def isPrime(self, n): Return bool for primality of `n` >>> pg = PrimeGen() >>> pg.isPrime(13) True >>> pg.isPrime(2) True >>> pg.isPrime(14) False\n- def getNextPrime(self, p): Return the smallest prime number greater than prime `p` >>> pg = PrimeGen() >>> pg.getNextPrime(0) 2 >>> pg.getNextPrime(2) 3 >>> pg.getNextPrime(6) 7 >>> pg.getNextPrime(16) 17\n\n<|skeleton|>\nclass PrimeGen:\n\n def sieve_of_e(self, max_num):\n \"\"\"Generating a List of Primes: The Sieve of Eratosthenes * All nonprime nums are divisible by a prime number * Alg: - Start w/ list of all numbers up to max - Cross off alt numbers divisible by 2, by 3, ... - This gives us a list of all primes # >>> pg = PrimeGen() # >>> pg.sieve_of_e(13)\"\"\"\n <|body_0|>\n\n def isPrime(self, n):\n \"\"\"Return bool for primality of `n` >>> pg = PrimeGen() >>> pg.isPrime(13) True >>> pg.isPrime(2) True >>> pg.isPrime(14) False\"\"\"\n <|body_1|>\n\n def getNextPrime(self, p):\n \"\"\"Return the smallest prime number greater than prime `p` >>> pg = PrimeGen() >>> pg.getNextPrime(0) 2 >>> pg.getNextPrime(2) 3 >>> pg.getNextPrime(6) 7 >>> pg.getNextPrime(16) 17\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n flags = [0, 1] + [True] * (max_num + 1)\n count = 0\n prime = 2\n while prime <= math.sqrt(max_num):\n for i in range(prime * prime, len(flags), prime):\n flags[i] = False\n break\n return flags\n<|end_body_0|>\n\n<|body_start_1|>\n if n <= 1:\n return False\n for x in range(2, n):\n if n % x == 0:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if p <= 1:\n return 2\n prime = p\n found = False\n while True:\n prime += 1\n if self.isPrime(prime):\n return prime\n return prime\n<|end_body_2|>\n", "revision_id": "13495f1d3cb9c6bb1800046507c07ea0295e51f2", "skeleton": "<|skeleton|>\nclass PrimeGen:\n\n def sieve_of_e(self, max_num):\n \"\"\"Generating a List of Primes: The Sieve of Eratosthenes * All nonprime nums are divisible by a prime number * Alg: - Start w/ list of all numbers up to max - Cross off alt numbers divisible by 2, by 3, ... - This gives us a list of all primes # >>> pg = PrimeGen() # >>> pg.sieve_of_e(13)\"\"\"\n <|body_0|>\n\n def isPrime(self, n):\n \"\"\"Return bool for primality of `n` >>> pg = PrimeGen() >>> pg.isPrime(13) True >>> pg.isPrime(2) True >>> pg.isPrime(14) False\"\"\"\n <|body_1|>\n\n def getNextPrime(self, p):\n \"\"\"Return the smallest prime number greater than prime `p` >>> pg = PrimeGen() >>> pg.getNextPrime(0) 2 >>> pg.getNextPrime(2) 3 >>> pg.getNextPrime(6) 7 >>> pg.getNextPrime(16) 17\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PrimeGen:\n def sieve_of_e(self, max_num):\n \"\"\"Generating a List of Primes: The Sieve of Eratosthenes * All nonprime nums are divisible by a prime number * Alg: - Start w/ list of all numbers up to max - Cross off alt numbers divisible by 2, by 3, ... - This gives us a list of all primes # >>> pg = PrimeGen() # >>> pg.sieve_of_e(13)\"\"\"\n flags = [0, 1] + [True] * (max_num + 1)\n count = 0\n prime = 2\n while prime <= math.sqrt(max_num):\n for i in range(prime * prime, len(flags), prime):\n flags[i] = False\n break\n return flags\n\n def isPrime(self, n):\n \"\"\"Return bool for primality of `n` >>> pg = PrimeGen() >>> pg.isPrime(13) True >>> pg.isPrime(2) True >>> pg.isPrime(14) False\"\"\"\n if n <= 1:\n return False\n for x in range(2, n):\n if n % x == 0:\n return False\n return True\n\n def getNextPrime(self, p):\n \"\"\"Return the smallest prime number greater than prime `p` >>> pg = PrimeGen() >>> pg.getNextPrime(0) 2 >>> pg.getNextPrime(2) 3 >>> pg.getNextPrime(6) 7 >>> pg.getNextPrime(16) 17\"\"\"\n if p <= 1:\n return 2\n prime = p\n found = False\n while True:\n prime += 1\n if self.isPrime(prime):\n return prime\n return prime\n", "source": "the_stack_v2_python_sparse", "source_path": "CTCI_2020/logic-and-puzzles-ch6.py", "source_repo": "liv-yaa/Py_Code_Challenges", "split": "val", "star_events_count": 0}
{"blob_id": "aab5bd1e647b37ca5f7559ed125f5855a776face", "bodies": ["if not addon in PriorityAddon:\n PriorityAddon.add(addon)\n super(Priorities, self).add(addon)", "for addon in self:\n PriorityAddon.discard(addon)\nsuper(Priorities, self).clear()"], "bodies_text": "<|body_start_0|>\n if not addon in PriorityAddon:\n PriorityAddon.add(addon)\n super(Priorities, self).add(addon)\n<|end_body_0|>\n\n<|body_start_1|>\n for addon in self:\n PriorityAddon.discard(addon)\n super(Priorities, self).clear()\n<|end_body_1|>\n", "class_docstring": "Set to manager Priority Addons used in Warmup", "class_name": "Priorities", "detected_licenses": ["Artistic-1.0", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Priorities:\n \"\"\"Set to manager Priority Addons used in Warmup\"\"\"\n\n def add(self, addon):\n \"\"\"Overriding method that also adds all addons to PriorityAddon\"\"\"\n <|body_0|>\n\n def clear(self):\n \"\"\"Overriding method that also removes addons from PriorityAddon\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not addon in PriorityAddon:\n PriorityAddon.add(addon)\n super(Priorities, self).add(addon)\n<|end_body_0|>\n\n<|body_start_1|>\n for addon in self:\n PriorityAddon.discard(addon)\n super(Priorities, self).clear()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000358", "length_bytes": 16792, "license_type": "permissive", "methods": [{"docstring": "Overriding method that also adds all addons to PriorityAddon", "name": "add", "signature": "def add(self, addon)"}, {"docstring": "Overriding method that also removes addons from PriorityAddon", "name": "clear", "signature": "def clear(self)"}], "n_methods": 2, "prompt": "Implement the Python class `Priorities` described below.\n\nClass description:\nSet to manager Priority Addons used in Warmup\n\nMethod signatures and docstrings:\n- def add(self, addon): Overriding method that also adds all addons to PriorityAddon\n- def clear(self): Overriding method that also removes addons from PriorityAddon", "prompted_full_text": "Implement the Python class `Priorities` described below.\n\nClass description:\nSet to manager Priority Addons used in Warmup\n\nMethod signatures and docstrings:\n- def add(self, addon): Overriding method that also adds all addons to PriorityAddon\n- def clear(self): Overriding method that also removes addons from PriorityAddon\n\n<|skeleton|>\nclass Priorities:\n \"\"\"Set to manager Priority Addons used in Warmup\"\"\"\n\n def add(self, addon):\n \"\"\"Overriding method that also adds all addons to PriorityAddon\"\"\"\n <|body_0|>\n\n def clear(self):\n \"\"\"Overriding method that also removes addons from PriorityAddon\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not addon in PriorityAddon:\n PriorityAddon.add(addon)\n super(Priorities, self).add(addon)\n<|end_body_0|>\n\n<|body_start_1|>\n for addon in self:\n PriorityAddon.discard(addon)\n super(Priorities, self).clear()\n<|end_body_1|>\n", "revision_id": "ebf4624626266f552189a32612b8d09cd5b4c5a3", "skeleton": "<|skeleton|>\nclass Priorities:\n \"\"\"Set to manager Priority Addons used in Warmup\"\"\"\n\n def add(self, addon):\n \"\"\"Overriding method that also adds all addons to PriorityAddon\"\"\"\n <|body_0|>\n\n def clear(self):\n \"\"\"Overriding method that also removes addons from PriorityAddon\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Priorities:\n \"\"\"Set to manager Priority Addons used in Warmup\"\"\"\n\n def add(self, addon):\n \"\"\"Overriding method that also adds all addons to PriorityAddon\"\"\"\n if not addon in PriorityAddon:\n PriorityAddon.add(addon)\n super(Priorities, self).add(addon)\n\n def clear(self):\n \"\"\"Overriding method that also removes addons from PriorityAddon\"\"\"\n for addon in self:\n PriorityAddon.discard(addon)\n super(Priorities, self).clear()\n", "source": "the_stack_v2_python_sparse", "source_path": "cstrike/addons/eventscripts/gungame51/scripts/included/gg_warmup_round/gg_warmup_round.py", "source_repo": "GunGame-Dev-Team/GunGame51", "split": "val", "star_events_count": 0}
{"blob_id": "f80a07cb8763c1dd3686ec7eef68755a3c43e78b", "bodies": ["m, n = (len(s1), len(s2))\nif n < m:\n return False\nl1 = [0] * 26\nfor c in s1:\n l1[ord(c) - ord('a')] += 1\nfor c in s2[:m]:\n l1[ord(c) - ord('a')] -= 1\nif not any(l1):\n return True\ni = 0\nfor c in s2[m:]:\n l1[ord(c) - ord('a')] -= 1\n l1[ord(s2[i]) - ord('a')] += 1\n if not any(l1):\n return True\n i += 1\nreturn False", "m, n = (len(s1), len(s2))\nif n < m:\n return False\nl1 = [0] * 26\nfor c in s1:\n l1[ord(c) - ord('a')] += 1\nl2 = [0] * 26\nfor c in s2[:m]:\n l2[ord(c) - ord('a')] += 1\nif l1 == l2:\n return True\ni = 0\nfor c in s2[m:]:\n l2[ord(c) - ord('a')] += 1\n l2[ord(s2[i]) - ord('a')] -= 1\n if l1 == l2:\n return True\n i += 1\nreturn False"], "bodies_text": "<|body_start_0|>\n m, n = (len(s1), len(s2))\n if n < m:\n return False\n l1 = [0] * 26\n for c in s1:\n l1[ord(c) - ord('a')] += 1\n for c in s2[:m]:\n l1[ord(c) - ord('a')] -= 1\n if not any(l1):\n return True\n i = 0\n for c in s2[m:]:\n l1[ord(c) - ord('a')] -= 1\n l1[ord(s2[i]) - ord('a')] += 1\n if not any(l1):\n return True\n i += 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n m, n = (len(s1), len(s2))\n if n < m:\n return False\n l1 = [0] * 26\n for c in s1:\n l1[ord(c) - ord('a')] += 1\n l2 = [0] * 26\n for c in s2[:m]:\n l2[ord(c) - ord('a')] += 1\n if l1 == l2:\n return True\n i = 0\n for c in s2[m:]:\n l2[ord(c) - ord('a')] += 1\n l2[ord(s2[i]) - ord('a')] -= 1\n if l1 == l2:\n return True\n i += 1\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"CREATED AT: 2022/2/11 Runtime: 92 ms, faster than 68.01% Memory Usage: 14 MB, less than 83.82% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\"\"\"\n <|body_0|>\n\n def checkInclusion2(self, s1: str, s2: str) -> bool:\n \"\"\"CREATED AT: 2022/2/11 Runtime: 48 ms, faster than 99.79% Memory Usage: 14 MB, less than 93.32% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(s1), len(s2))\n if n < m:\n return False\n l1 = [0] * 26\n for c in s1:\n l1[ord(c) - ord('a')] += 1\n for c in s2[:m]:\n l1[ord(c) - ord('a')] -= 1\n if not any(l1):\n return True\n i = 0\n for c in s2[m:]:\n l1[ord(c) - ord('a')] -= 1\n l1[ord(s2[i]) - ord('a')] += 1\n if not any(l1):\n return True\n i += 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n m, n = (len(s1), len(s2))\n if n < m:\n return False\n l1 = [0] * 26\n for c in s1:\n l1[ord(c) - ord('a')] += 1\n l2 = [0] * 26\n for c in s2[:m]:\n l2[ord(c) - ord('a')] += 1\n if l1 == l2:\n return True\n i = 0\n for c in s2[m:]:\n l2[ord(c) - ord('a')] += 1\n l2[ord(s2[i]) - ord('a')] -= 1\n if l1 == l2:\n return True\n i += 1\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000359", "length_bytes": 2274, "license_type": "permissive", "methods": [{"docstring": "CREATED AT: 2022/2/11 Runtime: 92 ms, faster than 68.01% Memory Usage: 14 MB, less than 83.82% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:", "name": "checkInclusion", "signature": "def checkInclusion(self, s1: str, s2: str) -> bool"}, {"docstring": "CREATED AT: 2022/2/11 Runtime: 48 ms, faster than 99.79% Memory Usage: 14 MB, less than 93.32% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:", "name": "checkInclusion2", "signature": "def checkInclusion2(self, s1: str, s2: str) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000293", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def checkInclusion(self, s1: str, s2: str) -> bool: CREATED AT: 2022/2/11 Runtime: 92 ms, faster than 68.01% Memory Usage: 14 MB, less than 83.82% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\n- def checkInclusion2(self, s1: str, s2: str) -> bool: CREATED AT: 2022/2/11 Runtime: 48 ms, faster than 99.79% Memory Usage: 14 MB, less than 93.32% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def checkInclusion(self, s1: str, s2: str) -> bool: CREATED AT: 2022/2/11 Runtime: 92 ms, faster than 68.01% Memory Usage: 14 MB, less than 83.82% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\n- def checkInclusion2(self, s1: str, s2: str) -> bool: CREATED AT: 2022/2/11 Runtime: 48 ms, faster than 99.79% Memory Usage: 14 MB, less than 93.32% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\n\n<|skeleton|>\nclass Solution:\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"CREATED AT: 2022/2/11 Runtime: 92 ms, faster than 68.01% Memory Usage: 14 MB, less than 83.82% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\"\"\"\n <|body_0|>\n\n def checkInclusion2(self, s1: str, s2: str) -> bool:\n \"\"\"CREATED AT: 2022/2/11 Runtime: 48 ms, faster than 99.79% Memory Usage: 14 MB, less than 93.32% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(s1), len(s2))\n if n < m:\n return False\n l1 = [0] * 26\n for c in s1:\n l1[ord(c) - ord('a')] += 1\n for c in s2[:m]:\n l1[ord(c) - ord('a')] -= 1\n if not any(l1):\n return True\n i = 0\n for c in s2[m:]:\n l1[ord(c) - ord('a')] -= 1\n l1[ord(s2[i]) - ord('a')] += 1\n if not any(l1):\n return True\n i += 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n m, n = (len(s1), len(s2))\n if n < m:\n return False\n l1 = [0] * 26\n for c in s1:\n l1[ord(c) - ord('a')] += 1\n l2 = [0] * 26\n for c in s2[:m]:\n l2[ord(c) - ord('a')] += 1\n if l1 == l2:\n return True\n i = 0\n for c in s2[m:]:\n l2[ord(c) - ord('a')] += 1\n l2[ord(s2[i]) - ord('a')] -= 1\n if l1 == l2:\n return True\n i += 1\n return False\n<|end_body_1|>\n", "revision_id": "4dd1e54d8d08f7e6590bc76abd08ecaacaf775e5", "skeleton": "<|skeleton|>\nclass Solution:\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"CREATED AT: 2022/2/11 Runtime: 92 ms, faster than 68.01% Memory Usage: 14 MB, less than 83.82% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\"\"\"\n <|body_0|>\n\n def checkInclusion2(self, s1: str, s2: str) -> bool:\n \"\"\"CREATED AT: 2022/2/11 Runtime: 48 ms, faster than 99.79% Memory Usage: 14 MB, less than 93.32% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"CREATED AT: 2022/2/11 Runtime: 92 ms, faster than 68.01% Memory Usage: 14 MB, less than 83.82% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\"\"\"\n m, n = (len(s1), len(s2))\n if n < m:\n return False\n l1 = [0] * 26\n for c in s1:\n l1[ord(c) - ord('a')] += 1\n for c in s2[:m]:\n l1[ord(c) - ord('a')] -= 1\n if not any(l1):\n return True\n i = 0\n for c in s2[m:]:\n l1[ord(c) - ord('a')] -= 1\n l1[ord(s2[i]) - ord('a')] += 1\n if not any(l1):\n return True\n i += 1\n return False\n\n def checkInclusion2(self, s1: str, s2: str) -> bool:\n \"\"\"CREATED AT: 2022/2/11 Runtime: 48 ms, faster than 99.79% Memory Usage: 14 MB, less than 93.32% 1 <= s1.length, s2.length <= 10^4 s1 and s2 consist of lowercase English letters. :param s1: :param s2: :return:\"\"\"\n m, n = (len(s1), len(s2))\n if n < m:\n return False\n l1 = [0] * 26\n for c in s1:\n l1[ord(c) - ord('a')] += 1\n l2 = [0] * 26\n for c in s2[:m]:\n l2[ord(c) - ord('a')] += 1\n if l1 == l2:\n return True\n i = 0\n for c in s2[m:]:\n l2[ord(c) - ord('a')] += 1\n l2[ord(s2[i]) - ord('a')] -= 1\n if l1 == l2:\n return True\n i += 1\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "src/567-PermutationInString.py", "source_repo": "Jiezhi/myleetcode", "split": "val", "star_events_count": 1}
{"blob_id": "00f277ec85fc7b9958882e7659d44286a75fffa6", "bodies": ["try:\n self.predicted_intent = eval_store.intent_predictions[0]\nexcept LookupError:\n self.predicted_intent = None\nself.target_entities = eval_store.entity_targets\nself.predicted_entities = eval_store.entity_predictions\nintent = {'name': eval_store.intent_targets[0]}\nsuper().__init__(event.text, intent, eval_store.entity_targets, event.parse_data, event.timestamp, event.input_channel)", "from rasa.shared.core.events import format_message\nif force_comment_generation or self.predicted_intent != self.intent['name']:\n predicted_message = format_message(self.text, self.predicted_intent, self.predicted_entities)\n return f'predicted: {self.predicted_intent}: {predicted_message}'\nelse:\n return None", "if predicted['entity'] != entity['entity']:\n return 'predicted: ' + predicted['entity'] + ': ' + predicted['value']\nelse:\n return None", "from rasa.shared.core.events import format_message\ncorrect_message = format_message(self.text, self.intent.get('name'), self.entities)\nreturn f\"{self.intent.get('name')}: {correct_message} \""], "bodies_text": "<|body_start_0|>\n try:\n self.predicted_intent = eval_store.intent_predictions[0]\n except LookupError:\n self.predicted_intent = None\n self.target_entities = eval_store.entity_targets\n self.predicted_entities = eval_store.entity_predictions\n intent = {'name': eval_store.intent_targets[0]}\n super().__init__(event.text, intent, eval_store.entity_targets, event.parse_data, event.timestamp, event.input_channel)\n<|end_body_0|>\n\n<|body_start_1|>\n from rasa.shared.core.events import format_message\n if force_comment_generation or self.predicted_intent != self.intent['name']:\n predicted_message = format_message(self.text, self.predicted_intent, self.predicted_entities)\n return f'predicted: {self.predicted_intent}: {predicted_message}'\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if predicted['entity'] != entity['entity']:\n return 'predicted: ' + predicted['entity'] + ': ' + predicted['value']\n else:\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n from rasa.shared.core.events import format_message\n correct_message = format_message(self.text, self.intent.get('name'), self.entities)\n return f\"{self.intent.get('name')}: {correct_message} \"\n<|end_body_3|>\n", "class_docstring": "The NLU model predicted the wrong user utterance. Mostly used to mark wrong predictions and be able to dump them as stories.", "class_name": "WronglyClassifiedUserUtterance", "detected_licenses": ["LicenseRef-scancode-generic-cla", "Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WronglyClassifiedUserUtterance:\n \"\"\"The NLU model predicted the wrong user utterance. Mostly used to mark wrong predictions and be able to dump them as stories.\"\"\"\n\n def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None:\n \"\"\"Set `predicted_intent` and `predicted_entities` attributes.\"\"\"\n <|body_0|>\n\n def inline_comment(self, force_comment_generation: bool=False) -> Optional[Text]:\n \"\"\"A comment attached to this event. Used during dumping.\"\"\"\n <|body_1|>\n\n def inline_comment_for_entity(predicted: Dict[Text, Any], entity: Dict[Text, Any]) -> Optional[Text]:\n \"\"\"Returns the predicted entity which is then printed as a comment.\"\"\"\n <|body_2|>\n\n def as_story_string(self, e2e: bool=True) -> Text:\n \"\"\"Returns text representation of event.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self.predicted_intent = eval_store.intent_predictions[0]\n except LookupError:\n self.predicted_intent = None\n self.target_entities = eval_store.entity_targets\n self.predicted_entities = eval_store.entity_predictions\n intent = {'name': eval_store.intent_targets[0]}\n super().__init__(event.text, intent, eval_store.entity_targets, event.parse_data, event.timestamp, event.input_channel)\n<|end_body_0|>\n\n<|body_start_1|>\n from rasa.shared.core.events import format_message\n if force_comment_generation or self.predicted_intent != self.intent['name']:\n predicted_message = format_message(self.text, self.predicted_intent, self.predicted_entities)\n return f'predicted: {self.predicted_intent}: {predicted_message}'\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if predicted['entity'] != entity['entity']:\n return 'predicted: ' + predicted['entity'] + ': ' + predicted['value']\n else:\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n from rasa.shared.core.events import format_message\n correct_message = format_message(self.text, self.intent.get('name'), self.entities)\n return f\"{self.intent.get('name')}: {correct_message} \"\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000360", "length_bytes": 48935, "license_type": "permissive", "methods": [{"docstring": "Set `predicted_intent` and `predicted_entities` attributes.", "name": "__init__", "signature": "def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None"}, {"docstring": "A comment attached to this event. Used during dumping.", "name": "inline_comment", "signature": "def inline_comment(self, force_comment_generation: bool=False) -> Optional[Text]"}, {"docstring": "Returns the predicted entity which is then printed as a comment.", "name": "inline_comment_for_entity", "signature": "def inline_comment_for_entity(predicted: Dict[Text, Any], entity: Dict[Text, Any]) -> Optional[Text]"}, {"docstring": "Returns text representation of event.", "name": "as_story_string", "signature": "def as_story_string(self, e2e: bool=True) -> Text"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_001658", "prompt": "Implement the Python class `WronglyClassifiedUserUtterance` described below.\n\nClass description:\nThe NLU model predicted the wrong user utterance. Mostly used to mark wrong predictions and be able to dump them as stories.\n\nMethod signatures and docstrings:\n- def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None: Set `predicted_intent` and `predicted_entities` attributes.\n- def inline_comment(self, force_comment_generation: bool=False) -> Optional[Text]: A comment attached to this event. Used during dumping.\n- def inline_comment_for_entity(predicted: Dict[Text, Any], entity: Dict[Text, Any]) -> Optional[Text]: Returns the predicted entity which is then printed as a comment.\n- def as_story_string(self, e2e: bool=True) -> Text: Returns text representation of event.", "prompted_full_text": "Implement the Python class `WronglyClassifiedUserUtterance` described below.\n\nClass description:\nThe NLU model predicted the wrong user utterance. Mostly used to mark wrong predictions and be able to dump them as stories.\n\nMethod signatures and docstrings:\n- def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None: Set `predicted_intent` and `predicted_entities` attributes.\n- def inline_comment(self, force_comment_generation: bool=False) -> Optional[Text]: A comment attached to this event. Used during dumping.\n- def inline_comment_for_entity(predicted: Dict[Text, Any], entity: Dict[Text, Any]) -> Optional[Text]: Returns the predicted entity which is then printed as a comment.\n- def as_story_string(self, e2e: bool=True) -> Text: Returns text representation of event.\n\n<|skeleton|>\nclass WronglyClassifiedUserUtterance:\n \"\"\"The NLU model predicted the wrong user utterance. Mostly used to mark wrong predictions and be able to dump them as stories.\"\"\"\n\n def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None:\n \"\"\"Set `predicted_intent` and `predicted_entities` attributes.\"\"\"\n <|body_0|>\n\n def inline_comment(self, force_comment_generation: bool=False) -> Optional[Text]:\n \"\"\"A comment attached to this event. Used during dumping.\"\"\"\n <|body_1|>\n\n def inline_comment_for_entity(predicted: Dict[Text, Any], entity: Dict[Text, Any]) -> Optional[Text]:\n \"\"\"Returns the predicted entity which is then printed as a comment.\"\"\"\n <|body_2|>\n\n def as_story_string(self, e2e: bool=True) -> Text:\n \"\"\"Returns text representation of event.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self.predicted_intent = eval_store.intent_predictions[0]\n except LookupError:\n self.predicted_intent = None\n self.target_entities = eval_store.entity_targets\n self.predicted_entities = eval_store.entity_predictions\n intent = {'name': eval_store.intent_targets[0]}\n super().__init__(event.text, intent, eval_store.entity_targets, event.parse_data, event.timestamp, event.input_channel)\n<|end_body_0|>\n\n<|body_start_1|>\n from rasa.shared.core.events import format_message\n if force_comment_generation or self.predicted_intent != self.intent['name']:\n predicted_message = format_message(self.text, self.predicted_intent, self.predicted_entities)\n return f'predicted: {self.predicted_intent}: {predicted_message}'\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if predicted['entity'] != entity['entity']:\n return 'predicted: ' + predicted['entity'] + ': ' + predicted['value']\n else:\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n from rasa.shared.core.events import format_message\n correct_message = format_message(self.text, self.intent.get('name'), self.entities)\n return f\"{self.intent.get('name')}: {correct_message} \"\n<|end_body_3|>\n", "revision_id": "50857610bdf0c26dc61f3203a6cbb4bcf193768c", "skeleton": "<|skeleton|>\nclass WronglyClassifiedUserUtterance:\n \"\"\"The NLU model predicted the wrong user utterance. Mostly used to mark wrong predictions and be able to dump them as stories.\"\"\"\n\n def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None:\n \"\"\"Set `predicted_intent` and `predicted_entities` attributes.\"\"\"\n <|body_0|>\n\n def inline_comment(self, force_comment_generation: bool=False) -> Optional[Text]:\n \"\"\"A comment attached to this event. Used during dumping.\"\"\"\n <|body_1|>\n\n def inline_comment_for_entity(predicted: Dict[Text, Any], entity: Dict[Text, Any]) -> Optional[Text]:\n \"\"\"Returns the predicted entity which is then printed as a comment.\"\"\"\n <|body_2|>\n\n def as_story_string(self, e2e: bool=True) -> Text:\n \"\"\"Returns text representation of event.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WronglyClassifiedUserUtterance:\n \"\"\"The NLU model predicted the wrong user utterance. Mostly used to mark wrong predictions and be able to dump them as stories.\"\"\"\n\n def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None:\n \"\"\"Set `predicted_intent` and `predicted_entities` attributes.\"\"\"\n try:\n self.predicted_intent = eval_store.intent_predictions[0]\n except LookupError:\n self.predicted_intent = None\n self.target_entities = eval_store.entity_targets\n self.predicted_entities = eval_store.entity_predictions\n intent = {'name': eval_store.intent_targets[0]}\n super().__init__(event.text, intent, eval_store.entity_targets, event.parse_data, event.timestamp, event.input_channel)\n\n def inline_comment(self, force_comment_generation: bool=False) -> Optional[Text]:\n \"\"\"A comment attached to this event. Used during dumping.\"\"\"\n from rasa.shared.core.events import format_message\n if force_comment_generation or self.predicted_intent != self.intent['name']:\n predicted_message = format_message(self.text, self.predicted_intent, self.predicted_entities)\n return f'predicted: {self.predicted_intent}: {predicted_message}'\n else:\n return None\n\n def inline_comment_for_entity(predicted: Dict[Text, Any], entity: Dict[Text, Any]) -> Optional[Text]:\n \"\"\"Returns the predicted entity which is then printed as a comment.\"\"\"\n if predicted['entity'] != entity['entity']:\n return 'predicted: ' + predicted['entity'] + ': ' + predicted['value']\n else:\n return None\n\n def as_story_string(self, e2e: bool=True) -> Text:\n \"\"\"Returns text representation of event.\"\"\"\n from rasa.shared.core.events import format_message\n correct_message = format_message(self.text, self.intent.get('name'), self.entities)\n return f\"{self.intent.get('name')}: {correct_message} \"\n", "source": "the_stack_v2_python_sparse", "source_path": "rasa/core/test.py", "source_repo": "RasaHQ/rasa", "split": "val", "star_events_count": 13167}
{"blob_id": "a705c1dc4d518cdf234b011a27d35a60ed3cb0a9", "bodies": ["super().__init__(experiment)\nfrom dials.algorithms.shoebox import MaskEmpirical\nself.mask_empirical = MaskEmpirical(reference)\nself._reference = reference", "reflections = super().__call__(reflections, adjacency_list)\nif self.mask_empirical:\n self.mask_empirical(reflections)\nreturn reflections"], "bodies_text": "<|body_start_0|>\n super().__init__(experiment)\n from dials.algorithms.shoebox import MaskEmpirical\n self.mask_empirical = MaskEmpirical(reference)\n self._reference = reference\n<|end_body_0|>\n\n<|body_start_1|>\n reflections = super().__call__(reflections, adjacency_list)\n if self.mask_empirical:\n self.mask_empirical(reflections)\n return reflections\n<|end_body_1|>\n", "class_docstring": "A class to perform empirical masking", "class_name": "MaskerEmpirical", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MaskerEmpirical:\n \"\"\"A class to perform empirical masking\"\"\"\n\n def __init__(self, experiment, reference):\n \"\"\"Initialise the masking algorithms Params: experiment The experiment data\"\"\"\n <|body_0|>\n\n def __call__(self, reflections, adjacency_list=None):\n \"\"\"Mask the given reflections. Params: reflections The reflection list adjacency_list The adjacency_list (optional) Returns: The masked reflection list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(experiment)\n from dials.algorithms.shoebox import MaskEmpirical\n self.mask_empirical = MaskEmpirical(reference)\n self._reference = reference\n<|end_body_0|>\n\n<|body_start_1|>\n reflections = super().__call__(reflections, adjacency_list)\n if self.mask_empirical:\n self.mask_empirical(reflections)\n return reflections\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000361", "length_bytes": 3387, "license_type": "permissive", "methods": [{"docstring": "Initialise the masking algorithms Params: experiment The experiment data", "name": "__init__", "signature": "def __init__(self, experiment, reference)"}, {"docstring": "Mask the given reflections. Params: reflections The reflection list adjacency_list The adjacency_list (optional) Returns: The masked reflection list", "name": "__call__", "signature": "def __call__(self, reflections, adjacency_list=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_044725", "prompt": "Implement the Python class `MaskerEmpirical` described below.\n\nClass description:\nA class to perform empirical masking\n\nMethod signatures and docstrings:\n- def __init__(self, experiment, reference): Initialise the masking algorithms Params: experiment The experiment data\n- def __call__(self, reflections, adjacency_list=None): Mask the given reflections. Params: reflections The reflection list adjacency_list The adjacency_list (optional) Returns: The masked reflection list", "prompted_full_text": "Implement the Python class `MaskerEmpirical` described below.\n\nClass description:\nA class to perform empirical masking\n\nMethod signatures and docstrings:\n- def __init__(self, experiment, reference): Initialise the masking algorithms Params: experiment The experiment data\n- def __call__(self, reflections, adjacency_list=None): Mask the given reflections. Params: reflections The reflection list adjacency_list The adjacency_list (optional) Returns: The masked reflection list\n\n<|skeleton|>\nclass MaskerEmpirical:\n \"\"\"A class to perform empirical masking\"\"\"\n\n def __init__(self, experiment, reference):\n \"\"\"Initialise the masking algorithms Params: experiment The experiment data\"\"\"\n <|body_0|>\n\n def __call__(self, reflections, adjacency_list=None):\n \"\"\"Mask the given reflections. Params: reflections The reflection list adjacency_list The adjacency_list (optional) Returns: The masked reflection list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(experiment)\n from dials.algorithms.shoebox import MaskEmpirical\n self.mask_empirical = MaskEmpirical(reference)\n self._reference = reference\n<|end_body_0|>\n\n<|body_start_1|>\n reflections = super().__call__(reflections, adjacency_list)\n if self.mask_empirical:\n self.mask_empirical(reflections)\n return reflections\n<|end_body_1|>\n", "revision_id": "88bf7f7c5ac44defc046ebf0719cde748092cfff", "skeleton": "<|skeleton|>\nclass MaskerEmpirical:\n \"\"\"A class to perform empirical masking\"\"\"\n\n def __init__(self, experiment, reference):\n \"\"\"Initialise the masking algorithms Params: experiment The experiment data\"\"\"\n <|body_0|>\n\n def __call__(self, reflections, adjacency_list=None):\n \"\"\"Mask the given reflections. Params: reflections The reflection list adjacency_list The adjacency_list (optional) Returns: The masked reflection list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MaskerEmpirical:\n \"\"\"A class to perform empirical masking\"\"\"\n\n def __init__(self, experiment, reference):\n \"\"\"Initialise the masking algorithms Params: experiment The experiment data\"\"\"\n super().__init__(experiment)\n from dials.algorithms.shoebox import MaskEmpirical\n self.mask_empirical = MaskEmpirical(reference)\n self._reference = reference\n\n def __call__(self, reflections, adjacency_list=None):\n \"\"\"Mask the given reflections. Params: reflections The reflection list adjacency_list The adjacency_list (optional) Returns: The masked reflection list\"\"\"\n reflections = super().__call__(reflections, adjacency_list)\n if self.mask_empirical:\n self.mask_empirical(reflections)\n return reflections\n", "source": "the_stack_v2_python_sparse", "source_path": "src/dials/algorithms/shoebox/masker.py", "source_repo": "dials/dials", "split": "val", "star_events_count": 71}
{"blob_id": "38887582d053394b6e63d6d55f4b11ef026e593d", "bodies": ["newest = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\ncontent = jsonify({'biography': [{'short': newest.Short, 'full': newest.Full, 'createdAt': newest.Created, 'updatedAt': newest.Updated}]})\nreturn make_response(content, 200)", "data = json.loads(request.data.decode())\nbio = Biography(Short=data['short'], Full=data['full'], Created=get_datetime())\ndb.session.add(bio)\ndb.session.commit()\nserver = socket.gethostname()\ncontents = 'Location: {}{}{}'.format(server, url_for('BiographyView:index'), bio.BiographyID)\nreturn make_response(jsonify(contents), 201)", "data = json.loads(request.data.decode())\nbio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\nbio.Short = data['short']\nbio.Full = data['full']\nbio.Updated = get_datetime()\ndb.session.commit()\nreturn make_response('', 200)", "bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\nresult = []\nstatus_code = 204\ntry:\n result = patch_item(bio, request.get_json())\n db.session.commit()\nexcept Exception:\n result = {'success': False, 'error': 'Could not apply patch'}\n status_code = 422\nreturn make_response(jsonify(result), status_code)"], "bodies_text": "<|body_start_0|>\n newest = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n content = jsonify({'biography': [{'short': newest.Short, 'full': newest.Full, 'createdAt': newest.Created, 'updatedAt': newest.Updated}]})\n return make_response(content, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n data = json.loads(request.data.decode())\n bio = Biography(Short=data['short'], Full=data['full'], Created=get_datetime())\n db.session.add(bio)\n db.session.commit()\n server = socket.gethostname()\n contents = 'Location: {}{}{}'.format(server, url_for('BiographyView:index'), bio.BiographyID)\n return make_response(jsonify(contents), 201)\n<|end_body_1|>\n\n<|body_start_2|>\n data = json.loads(request.data.decode())\n bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n bio.Short = data['short']\n bio.Full = data['full']\n bio.Updated = get_datetime()\n db.session.commit()\n return make_response('', 200)\n<|end_body_2|>\n\n<|body_start_3|>\n bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n result = []\n status_code = 204\n try:\n result = patch_item(bio, request.get_json())\n db.session.commit()\n except Exception:\n result = {'success': False, 'error': 'Could not apply patch'}\n status_code = 422\n return make_response(jsonify(result), status_code)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "BiographyView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BiographyView:\n\n def index(self):\n \"\"\"Return the newest Biography entry.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Add a new Biography entry.\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"Overwrite the newest Biography with a new one.\"\"\"\n <|body_2|>\n\n def patch(self):\n \"\"\"Update the newest Biography entry partially.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n newest = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n content = jsonify({'biography': [{'short': newest.Short, 'full': newest.Full, 'createdAt': newest.Created, 'updatedAt': newest.Updated}]})\n return make_response(content, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n data = json.loads(request.data.decode())\n bio = Biography(Short=data['short'], Full=data['full'], Created=get_datetime())\n db.session.add(bio)\n db.session.commit()\n server = socket.gethostname()\n contents = 'Location: {}{}{}'.format(server, url_for('BiographyView:index'), bio.BiographyID)\n return make_response(jsonify(contents), 201)\n<|end_body_1|>\n\n<|body_start_2|>\n data = json.loads(request.data.decode())\n bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n bio.Short = data['short']\n bio.Full = data['full']\n bio.Updated = get_datetime()\n db.session.commit()\n return make_response('', 200)\n<|end_body_2|>\n\n<|body_start_3|>\n bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n result = []\n status_code = 204\n try:\n result = patch_item(bio, request.get_json())\n db.session.commit()\n except Exception:\n result = {'success': False, 'error': 'Could not apply patch'}\n status_code = 422\n return make_response(jsonify(result), status_code)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000362", "length_bytes": 2767, "license_type": "permissive", "methods": [{"docstring": "Return the newest Biography entry.", "name": "index", "signature": "def index(self)"}, {"docstring": "Add a new Biography entry.", "name": "post", "signature": "def post(self)"}, {"docstring": "Overwrite the newest Biography with a new one.", "name": "put", "signature": "def put(self)"}, {"docstring": "Update the newest Biography entry partially.", "name": "patch", "signature": "def patch(self)"}], "n_methods": 4, "prompt": "Implement the Python class `BiographyView` described below.\n\nClass description:\nImplement the BiographyView class.\n\nMethod signatures and docstrings:\n- def index(self): Return the newest Biography entry.\n- def post(self): Add a new Biography entry.\n- def put(self): Overwrite the newest Biography with a new one.\n- def patch(self): Update the newest Biography entry partially.", "prompted_full_text": "Implement the Python class `BiographyView` described below.\n\nClass description:\nImplement the BiographyView class.\n\nMethod signatures and docstrings:\n- def index(self): Return the newest Biography entry.\n- def post(self): Add a new Biography entry.\n- def put(self): Overwrite the newest Biography with a new one.\n- def patch(self): Update the newest Biography entry partially.\n\n<|skeleton|>\nclass BiographyView:\n\n def index(self):\n \"\"\"Return the newest Biography entry.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Add a new Biography entry.\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"Overwrite the newest Biography with a new one.\"\"\"\n <|body_2|>\n\n def patch(self):\n \"\"\"Update the newest Biography entry partially.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n newest = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n content = jsonify({'biography': [{'short': newest.Short, 'full': newest.Full, 'createdAt': newest.Created, 'updatedAt': newest.Updated}]})\n return make_response(content, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n data = json.loads(request.data.decode())\n bio = Biography(Short=data['short'], Full=data['full'], Created=get_datetime())\n db.session.add(bio)\n db.session.commit()\n server = socket.gethostname()\n contents = 'Location: {}{}{}'.format(server, url_for('BiographyView:index'), bio.BiographyID)\n return make_response(jsonify(contents), 201)\n<|end_body_1|>\n\n<|body_start_2|>\n data = json.loads(request.data.decode())\n bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n bio.Short = data['short']\n bio.Full = data['full']\n bio.Updated = get_datetime()\n db.session.commit()\n return make_response('', 200)\n<|end_body_2|>\n\n<|body_start_3|>\n bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n result = []\n status_code = 204\n try:\n result = patch_item(bio, request.get_json())\n db.session.commit()\n except Exception:\n result = {'success': False, 'error': 'Could not apply patch'}\n status_code = 422\n return make_response(jsonify(result), status_code)\n<|end_body_3|>\n", "revision_id": "62f8e8e904e379541193f0cbb91a8434b47f538f", "skeleton": "<|skeleton|>\nclass BiographyView:\n\n def index(self):\n \"\"\"Return the newest Biography entry.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Add a new Biography entry.\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"Overwrite the newest Biography with a new one.\"\"\"\n <|body_2|>\n\n def patch(self):\n \"\"\"Update the newest Biography entry partially.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BiographyView:\n def index(self):\n \"\"\"Return the newest Biography entry.\"\"\"\n newest = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n content = jsonify({'biography': [{'short': newest.Short, 'full': newest.Full, 'createdAt': newest.Created, 'updatedAt': newest.Updated}]})\n return make_response(content, 200)\n\n def post(self):\n \"\"\"Add a new Biography entry.\"\"\"\n data = json.loads(request.data.decode())\n bio = Biography(Short=data['short'], Full=data['full'], Created=get_datetime())\n db.session.add(bio)\n db.session.commit()\n server = socket.gethostname()\n contents = 'Location: {}{}{}'.format(server, url_for('BiographyView:index'), bio.BiographyID)\n return make_response(jsonify(contents), 201)\n\n def put(self):\n \"\"\"Overwrite the newest Biography with a new one.\"\"\"\n data = json.loads(request.data.decode())\n bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n bio.Short = data['short']\n bio.Full = data['full']\n bio.Updated = get_datetime()\n db.session.commit()\n return make_response('', 200)\n\n def patch(self):\n \"\"\"Update the newest Biography entry partially.\"\"\"\n bio = Biography.query.order_by(desc(Biography.BiographyID)).first_or_404()\n result = []\n status_code = 204\n try:\n result = patch_item(bio, request.get_json())\n db.session.commit()\n except Exception:\n result = {'success': False, 'error': 'Could not apply patch'}\n status_code = 422\n return make_response(jsonify(result), status_code)\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/biography/views.py", "source_repo": "Torniojaws/vortech-backend", "split": "val", "star_events_count": 0}
{"blob_id": "33bca751e0ccaf5b34229871903c28a10e232594", "bodies": ["QListWidget.__init__(self)\nself.current_directory = '/data/lemi-archive-2016-04/*'\nself.add_items()\nself.image_display_figure = image_display_figure\nself.itemDoubleClicked.connect(self.item_click)", "item_text = '..'\nitem = QListWidgetItem(item_text)\nself.addItem(item)\nfile_list = glob.glob(self.current_directory)\nfor item_text in file_list:\n if os.path.isfile(item_text) and (item_text.endswith('fits') or item_text.endswith('fit')):\n try:\n naxis1 = fits.getval(item_text, 'NAXIS1')\n naxis2 = fits.getval(item_text, 'NAXIS2')\n objname = fits.getval(item_text, 'OBJNAME')\n observer = fits.getval(item_text, 'OBSERVER')\n exptime = fits.getval(item_text, 'EXPTIME')\n try:\n filters = fits.getval(item_text, 'FILTERS')\n except:\n filters = 'not defined'\n item_text = item_text + ' (' + str(naxis1) + ',' + str(naxis2) + ') ' + objname + ' ' + observer + ' ' + str(exptime) + ' ' + str(filters)\n item = QListWidgetItem(item_text)\n self.addItem(item)\n except Exception as e:\n print(str(e))\n print(item_text)\n print('error accessing FITS file or not a FITS file')\n else:\n item_text = item_text + '/'\n item = QListWidgetItem(item_text)\n self.addItem(item)", "dir_file = str(item.text())\ndir_file = dir_file.split()[0]\nif str(dir_file) == '..':\n if self.current_directory.endswith('*'):\n if os.path.dirname(self.current_directory[:-2]) != '/':\n new_dir = os.path.dirname(self.current_directory[:-2]) + '/*'\n else:\n new_dir = os.path.dirname(self.current_directory[:-2]) + '*'\n else:\n new_dir = os.path.dirname(self.current_directory) + '*'\n self.set_current_directory(new_dir)\nelif os.path.isfile(str(dir_file)):\n self.image_display_figure.update_image(dir_file)\nelse:\n self.set_current_directory(dir_file + '*')", "self.current_directory = new_dir\nself.clear()\nself.add_items()\nself.update()"], "bodies_text": "<|body_start_0|>\n QListWidget.__init__(self)\n self.current_directory = '/data/lemi-archive-2016-04/*'\n self.add_items()\n self.image_display_figure = image_display_figure\n self.itemDoubleClicked.connect(self.item_click)\n<|end_body_0|>\n\n<|body_start_1|>\n item_text = '..'\n item = QListWidgetItem(item_text)\n self.addItem(item)\n file_list = glob.glob(self.current_directory)\n for item_text in file_list:\n if os.path.isfile(item_text) and (item_text.endswith('fits') or item_text.endswith('fit')):\n try:\n naxis1 = fits.getval(item_text, 'NAXIS1')\n naxis2 = fits.getval(item_text, 'NAXIS2')\n objname = fits.getval(item_text, 'OBJNAME')\n observer = fits.getval(item_text, 'OBSERVER')\n exptime = fits.getval(item_text, 'EXPTIME')\n try:\n filters = fits.getval(item_text, 'FILTERS')\n except:\n filters = 'not defined'\n item_text = item_text + ' (' + str(naxis1) + ',' + str(naxis2) + ') ' + objname + ' ' + observer + ' ' + str(exptime) + ' ' + str(filters)\n item = QListWidgetItem(item_text)\n self.addItem(item)\n except Exception as e:\n print(str(e))\n print(item_text)\n print('error accessing FITS file or not a FITS file')\n else:\n item_text = item_text + '/'\n item = QListWidgetItem(item_text)\n self.addItem(item)\n<|end_body_1|>\n\n<|body_start_2|>\n dir_file = str(item.text())\n dir_file = dir_file.split()[0]\n if str(dir_file) == '..':\n if self.current_directory.endswith('*'):\n if os.path.dirname(self.current_directory[:-2]) != '/':\n new_dir = os.path.dirname(self.current_directory[:-2]) + '/*'\n else:\n new_dir = os.path.dirname(self.current_directory[:-2]) + '*'\n else:\n new_dir = os.path.dirname(self.current_directory) + '*'\n self.set_current_directory(new_dir)\n elif os.path.isfile(str(dir_file)):\n self.image_display_figure.update_image(dir_file)\n else:\n self.set_current_directory(dir_file + '*')\n<|end_body_2|>\n\n<|body_start_3|>\n self.current_directory = new_dir\n self.clear()\n self.add_items()\n self.update()\n<|end_body_3|>\n", "class_docstring": "A widget containing a list of files and/or directories.", "class_name": "FileList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FileList:\n \"\"\"A widget containing a list of files and/or directories.\"\"\"\n\n def __init__(self, image_display_figure):\n \"\"\"Point the filelist widget to a default directory and fill it with the contents of that directory. Hook up a mouse double-click to call the 'item_click' method.\"\"\"\n <|body_0|>\n\n def add_items(self):\n \"\"\"Add items from the 'current_directory' to the list.\"\"\"\n <|body_1|>\n\n def item_click(self, item):\n \"\"\"The user clicked on an item, decide what to do based on the item.\"\"\"\n <|body_2|>\n\n def set_current_directory(self, new_dir):\n \"\"\"Set the current directory to 'new_dir' and then reload the list.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n QListWidget.__init__(self)\n self.current_directory = '/data/lemi-archive-2016-04/*'\n self.add_items()\n self.image_display_figure = image_display_figure\n self.itemDoubleClicked.connect(self.item_click)\n<|end_body_0|>\n\n<|body_start_1|>\n item_text = '..'\n item = QListWidgetItem(item_text)\n self.addItem(item)\n file_list = glob.glob(self.current_directory)\n for item_text in file_list:\n if os.path.isfile(item_text) and (item_text.endswith('fits') or item_text.endswith('fit')):\n try:\n naxis1 = fits.getval(item_text, 'NAXIS1')\n naxis2 = fits.getval(item_text, 'NAXIS2')\n objname = fits.getval(item_text, 'OBJNAME')\n observer = fits.getval(item_text, 'OBSERVER')\n exptime = fits.getval(item_text, 'EXPTIME')\n try:\n filters = fits.getval(item_text, 'FILTERS')\n except:\n filters = 'not defined'\n item_text = item_text + ' (' + str(naxis1) + ',' + str(naxis2) + ') ' + objname + ' ' + observer + ' ' + str(exptime) + ' ' + str(filters)\n item = QListWidgetItem(item_text)\n self.addItem(item)\n except Exception as e:\n print(str(e))\n print(item_text)\n print('error accessing FITS file or not a FITS file')\n else:\n item_text = item_text + '/'\n item = QListWidgetItem(item_text)\n self.addItem(item)\n<|end_body_1|>\n\n<|body_start_2|>\n dir_file = str(item.text())\n dir_file = dir_file.split()[0]\n if str(dir_file) == '..':\n if self.current_directory.endswith('*'):\n if os.path.dirname(self.current_directory[:-2]) != '/':\n new_dir = os.path.dirname(self.current_directory[:-2]) + '/*'\n else:\n new_dir = os.path.dirname(self.current_directory[:-2]) + '*'\n else:\n new_dir = os.path.dirname(self.current_directory) + '*'\n self.set_current_directory(new_dir)\n elif os.path.isfile(str(dir_file)):\n self.image_display_figure.update_image(dir_file)\n else:\n self.set_current_directory(dir_file + '*')\n<|end_body_2|>\n\n<|body_start_3|>\n self.current_directory = new_dir\n self.clear()\n self.add_items()\n self.update()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000363", "length_bytes": 4121, "license_type": "no_license", "methods": [{"docstring": "Point the filelist widget to a default directory and fill it with the contents of that directory. Hook up a mouse double-click to call the 'item_click' method.", "name": "__init__", "signature": "def __init__(self, image_display_figure)"}, {"docstring": "Add items from the 'current_directory' to the list.", "name": "add_items", "signature": "def add_items(self)"}, {"docstring": "The user clicked on an item, decide what to do based on the item.", "name": "item_click", "signature": "def item_click(self, item)"}, {"docstring": "Set the current directory to 'new_dir' and then reload the list.", "name": "set_current_directory", "signature": "def set_current_directory(self, new_dir)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_009038", "prompt": "Implement the Python class `FileList` described below.\n\nClass description:\nA widget containing a list of files and/or directories.\n\nMethod signatures and docstrings:\n- def __init__(self, image_display_figure): Point the filelist widget to a default directory and fill it with the contents of that directory. Hook up a mouse double-click to call the 'item_click' method.\n- def add_items(self): Add items from the 'current_directory' to the list.\n- def item_click(self, item): The user clicked on an item, decide what to do based on the item.\n- def set_current_directory(self, new_dir): Set the current directory to 'new_dir' and then reload the list.", "prompted_full_text": "Implement the Python class `FileList` described below.\n\nClass description:\nA widget containing a list of files and/or directories.\n\nMethod signatures and docstrings:\n- def __init__(self, image_display_figure): Point the filelist widget to a default directory and fill it with the contents of that directory. Hook up a mouse double-click to call the 'item_click' method.\n- def add_items(self): Add items from the 'current_directory' to the list.\n- def item_click(self, item): The user clicked on an item, decide what to do based on the item.\n- def set_current_directory(self, new_dir): Set the current directory to 'new_dir' and then reload the list.\n\n<|skeleton|>\nclass FileList:\n \"\"\"A widget containing a list of files and/or directories.\"\"\"\n\n def __init__(self, image_display_figure):\n \"\"\"Point the filelist widget to a default directory and fill it with the contents of that directory. Hook up a mouse double-click to call the 'item_click' method.\"\"\"\n <|body_0|>\n\n def add_items(self):\n \"\"\"Add items from the 'current_directory' to the list.\"\"\"\n <|body_1|>\n\n def item_click(self, item):\n \"\"\"The user clicked on an item, decide what to do based on the item.\"\"\"\n <|body_2|>\n\n def set_current_directory(self, new_dir):\n \"\"\"Set the current directory to 'new_dir' and then reload the list.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n QListWidget.__init__(self)\n self.current_directory = '/data/lemi-archive-2016-04/*'\n self.add_items()\n self.image_display_figure = image_display_figure\n self.itemDoubleClicked.connect(self.item_click)\n<|end_body_0|>\n\n<|body_start_1|>\n item_text = '..'\n item = QListWidgetItem(item_text)\n self.addItem(item)\n file_list = glob.glob(self.current_directory)\n for item_text in file_list:\n if os.path.isfile(item_text) and (item_text.endswith('fits') or item_text.endswith('fit')):\n try:\n naxis1 = fits.getval(item_text, 'NAXIS1')\n naxis2 = fits.getval(item_text, 'NAXIS2')\n objname = fits.getval(item_text, 'OBJNAME')\n observer = fits.getval(item_text, 'OBSERVER')\n exptime = fits.getval(item_text, 'EXPTIME')\n try:\n filters = fits.getval(item_text, 'FILTERS')\n except:\n filters = 'not defined'\n item_text = item_text + ' (' + str(naxis1) + ',' + str(naxis2) + ') ' + objname + ' ' + observer + ' ' + str(exptime) + ' ' + str(filters)\n item = QListWidgetItem(item_text)\n self.addItem(item)\n except Exception as e:\n print(str(e))\n print(item_text)\n print('error accessing FITS file or not a FITS file')\n else:\n item_text = item_text + '/'\n item = QListWidgetItem(item_text)\n self.addItem(item)\n<|end_body_1|>\n\n<|body_start_2|>\n dir_file = str(item.text())\n dir_file = dir_file.split()[0]\n if str(dir_file) == '..':\n if self.current_directory.endswith('*'):\n if os.path.dirname(self.current_directory[:-2]) != '/':\n new_dir = os.path.dirname(self.current_directory[:-2]) + '/*'\n else:\n new_dir = os.path.dirname(self.current_directory[:-2]) + '*'\n else:\n new_dir = os.path.dirname(self.current_directory) + '*'\n self.set_current_directory(new_dir)\n elif os.path.isfile(str(dir_file)):\n self.image_display_figure.update_image(dir_file)\n else:\n self.set_current_directory(dir_file + '*')\n<|end_body_2|>\n\n<|body_start_3|>\n self.current_directory = new_dir\n self.clear()\n self.add_items()\n self.update()\n<|end_body_3|>\n", "revision_id": "dc719571a46b0185e6d96b3556c4a6c80f2ac77e", "skeleton": "<|skeleton|>\nclass FileList:\n \"\"\"A widget containing a list of files and/or directories.\"\"\"\n\n def __init__(self, image_display_figure):\n \"\"\"Point the filelist widget to a default directory and fill it with the contents of that directory. Hook up a mouse double-click to call the 'item_click' method.\"\"\"\n <|body_0|>\n\n def add_items(self):\n \"\"\"Add items from the 'current_directory' to the list.\"\"\"\n <|body_1|>\n\n def item_click(self, item):\n \"\"\"The user clicked on an item, decide what to do based on the item.\"\"\"\n <|body_2|>\n\n def set_current_directory(self, new_dir):\n \"\"\"Set the current directory to 'new_dir' and then reload the list.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FileList:\n \"\"\"A widget containing a list of files and/or directories.\"\"\"\n\n def __init__(self, image_display_figure):\n \"\"\"Point the filelist widget to a default directory and fill it with the contents of that directory. Hook up a mouse double-click to call the 'item_click' method.\"\"\"\n QListWidget.__init__(self)\n self.current_directory = '/data/lemi-archive-2016-04/*'\n self.add_items()\n self.image_display_figure = image_display_figure\n self.itemDoubleClicked.connect(self.item_click)\n\n def add_items(self):\n \"\"\"Add items from the 'current_directory' to the list.\"\"\"\n item_text = '..'\n item = QListWidgetItem(item_text)\n self.addItem(item)\n file_list = glob.glob(self.current_directory)\n for item_text in file_list:\n if os.path.isfile(item_text) and (item_text.endswith('fits') or item_text.endswith('fit')):\n try:\n naxis1 = fits.getval(item_text, 'NAXIS1')\n naxis2 = fits.getval(item_text, 'NAXIS2')\n objname = fits.getval(item_text, 'OBJNAME')\n observer = fits.getval(item_text, 'OBSERVER')\n exptime = fits.getval(item_text, 'EXPTIME')\n try:\n filters = fits.getval(item_text, 'FILTERS')\n except:\n filters = 'not defined'\n item_text = item_text + ' (' + str(naxis1) + ',' + str(naxis2) + ') ' + objname + ' ' + observer + ' ' + str(exptime) + ' ' + str(filters)\n item = QListWidgetItem(item_text)\n self.addItem(item)\n except Exception as e:\n print(str(e))\n print(item_text)\n print('error accessing FITS file or not a FITS file')\n else:\n item_text = item_text + '/'\n item = QListWidgetItem(item_text)\n self.addItem(item)\n\n def item_click(self, item):\n \"\"\"The user clicked on an item, decide what to do based on the item.\"\"\"\n dir_file = str(item.text())\n dir_file = dir_file.split()[0]\n if str(dir_file) == '..':\n if self.current_directory.endswith('*'):\n if os.path.dirname(self.current_directory[:-2]) != '/':\n new_dir = os.path.dirname(self.current_directory[:-2]) + '/*'\n else:\n new_dir = os.path.dirname(self.current_directory[:-2]) + '*'\n else:\n new_dir = os.path.dirname(self.current_directory) + '*'\n self.set_current_directory(new_dir)\n elif os.path.isfile(str(dir_file)):\n self.image_display_figure.update_image(dir_file)\n else:\n self.set_current_directory(dir_file + '*')\n\n def set_current_directory(self, new_dir):\n \"\"\"Set the current directory to 'new_dir' and then reload the list.\"\"\"\n self.current_directory = new_dir\n self.clear()\n self.add_items()\n self.update()\n", "source": "the_stack_v2_python_sparse", "source_path": "00_ObjectOrientedProgramming/FileList.py", "source_repo": "mommermi/DeadParrots", "split": "val", "star_events_count": 0}
{"blob_id": "abade0ee490c3ca9767004ff1acf4954c0fbebd9", "bodies": ["data = parser.parse_args()\narguments = sum([1 if value else 0 for value in data.values()])\nif arguments > 1:\n return ({'message': 'Please filter by only one feature'}, 400)\nif arguments <= 0:\n return ({'message': 'Please filter by at least one feature: id or google_token'}, 400)\nif data['id']:\n user = UsersModel.find_by_id(user_id=data['id'])\n search_by = ('id', data['id'])\nelif data['google_token']:\n user = UsersModel.find_by_google_token(google_token=data['google_token'])\n search_by = ('Google token', data['google_token'])\nelse:\n return ({'message': 'Please filter by a valid feature: id or google_token'}, 400)\nif user:\n return ({'user': user.json()}, 200)\nelse:\n return ({'message': 'User with {} [{}] Not found'.format(search_by[0], search_by[1])}, 404)", "data = parser.parse_args()\nif not data['mail']:\n return ({'message': {'mail': 'Mail cant be empty'}}, 400)\nif not data['google_token']:\n return ({'message': {'google_token': 'Google token cant be empty'}}, 400)\nif not data['role']:\n return ({'message': {'role': 'Role cant be empty'}}, 400)\nif UsersModel.find_by_mail(data['mail']):\n return ({'message': 'Account with mail [{}] already exists'.format(data['mail'])}, 409)\nuser = UsersModel(google_token=data['google_token'], mail=data['mail'], role=data['role'])\nif data['role'] != '0':\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\nelse:\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)", "data = parser.parse_args()\nuser = UsersModel.find_by_id(user_id=user_id)\nif data['name']:\n user.name = data['name']\nif data['surname']:\n user.surname = data['surname']\nif data['national_id_document']:\n user.national_id_document = data['national_id_document']\nif data['country']:\n user.country = data['country']\nif data['mail']:\n user.mail = data['mail']\nif data['google_token']:\n user.google_token = data['google_token']\nif data['role']:\n user.role = data['role']\nif data['id_bank_data']:\n user.id_bank_data = data['id_bank_data']\ntry:\n user.save_to_db()\n return ({'user': UsersModel.find_by_id(user_id).json()}, 200)\nexcept:\n return ({'message': 'Error Description'}, 500)", "data = parser.parse_args()\nuser = UsersModel.find_by_id(user_id=user_id)\nif user:\n try:\n user.delete_from_db()\n return ({'message': 'User with id [{}] and all associated info deleted'.format(user_id)}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\nelse:\n return ({'message': 'User with id [{}] Not found'.format(user_id)}, 404)"], "bodies_text": "<|body_start_0|>\n data = parser.parse_args()\n arguments = sum([1 if value else 0 for value in data.values()])\n if arguments > 1:\n return ({'message': 'Please filter by only one feature'}, 400)\n if arguments <= 0:\n return ({'message': 'Please filter by at least one feature: id or google_token'}, 400)\n if data['id']:\n user = UsersModel.find_by_id(user_id=data['id'])\n search_by = ('id', data['id'])\n elif data['google_token']:\n user = UsersModel.find_by_google_token(google_token=data['google_token'])\n search_by = ('Google token', data['google_token'])\n else:\n return ({'message': 'Please filter by a valid feature: id or google_token'}, 400)\n if user:\n return ({'user': user.json()}, 200)\n else:\n return ({'message': 'User with {} [{}] Not found'.format(search_by[0], search_by[1])}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n data = parser.parse_args()\n if not data['mail']:\n return ({'message': {'mail': 'Mail cant be empty'}}, 400)\n if not data['google_token']:\n return ({'message': {'google_token': 'Google token cant be empty'}}, 400)\n if not data['role']:\n return ({'message': {'role': 'Role cant be empty'}}, 400)\n if UsersModel.find_by_mail(data['mail']):\n return ({'message': 'Account with mail [{}] already exists'.format(data['mail'])}, 409)\n user = UsersModel(google_token=data['google_token'], mail=data['mail'], role=data['role'])\n if data['role'] != '0':\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\n else:\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\n<|end_body_1|>\n\n<|body_start_2|>\n data = parser.parse_args()\n user = UsersModel.find_by_id(user_id=user_id)\n if data['name']:\n user.name = data['name']\n if data['surname']:\n user.surname = data['surname']\n if data['national_id_document']:\n user.national_id_document = data['national_id_document']\n if data['country']:\n user.country = data['country']\n if data['mail']:\n user.mail = data['mail']\n if data['google_token']:\n user.google_token = data['google_token']\n if data['role']:\n user.role = data['role']\n if data['id_bank_data']:\n user.id_bank_data = data['id_bank_data']\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_id(user_id).json()}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\n<|end_body_2|>\n\n<|body_start_3|>\n data = parser.parse_args()\n user = UsersModel.find_by_id(user_id=user_id)\n if user:\n try:\n user.delete_from_db()\n return ({'message': 'User with id [{}] and all associated info deleted'.format(user_id)}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\n else:\n return ({'message': 'User with id [{}] Not found'.format(user_id)}, 404)\n<|end_body_3|>\n", "class_docstring": "API Restful methods for Users", "class_name": "Users", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Users:\n \"\"\"API Restful methods for Users\"\"\"\n\n def get(self):\n \"\"\"GET method Gets an account by id or google token Param: string id or google token Return: dict (account ok / message)\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"POST method Adds a new account Return: dict (account created / message)\"\"\"\n <|body_1|>\n\n def put(self, user_id):\n \"\"\"PUT method Modifies a user Param: id Return: dict (user created)\"\"\"\n <|body_2|>\n\n def delete(self, user_id):\n \"\"\"DELETE method Removes an account Param: string id Return: dict (message ok / message)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = parser.parse_args()\n arguments = sum([1 if value else 0 for value in data.values()])\n if arguments > 1:\n return ({'message': 'Please filter by only one feature'}, 400)\n if arguments <= 0:\n return ({'message': 'Please filter by at least one feature: id or google_token'}, 400)\n if data['id']:\n user = UsersModel.find_by_id(user_id=data['id'])\n search_by = ('id', data['id'])\n elif data['google_token']:\n user = UsersModel.find_by_google_token(google_token=data['google_token'])\n search_by = ('Google token', data['google_token'])\n else:\n return ({'message': 'Please filter by a valid feature: id or google_token'}, 400)\n if user:\n return ({'user': user.json()}, 200)\n else:\n return ({'message': 'User with {} [{}] Not found'.format(search_by[0], search_by[1])}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n data = parser.parse_args()\n if not data['mail']:\n return ({'message': {'mail': 'Mail cant be empty'}}, 400)\n if not data['google_token']:\n return ({'message': {'google_token': 'Google token cant be empty'}}, 400)\n if not data['role']:\n return ({'message': {'role': 'Role cant be empty'}}, 400)\n if UsersModel.find_by_mail(data['mail']):\n return ({'message': 'Account with mail [{}] already exists'.format(data['mail'])}, 409)\n user = UsersModel(google_token=data['google_token'], mail=data['mail'], role=data['role'])\n if data['role'] != '0':\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\n else:\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\n<|end_body_1|>\n\n<|body_start_2|>\n data = parser.parse_args()\n user = UsersModel.find_by_id(user_id=user_id)\n if data['name']:\n user.name = data['name']\n if data['surname']:\n user.surname = data['surname']\n if data['national_id_document']:\n user.national_id_document = data['national_id_document']\n if data['country']:\n user.country = data['country']\n if data['mail']:\n user.mail = data['mail']\n if data['google_token']:\n user.google_token = data['google_token']\n if data['role']:\n user.role = data['role']\n if data['id_bank_data']:\n user.id_bank_data = data['id_bank_data']\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_id(user_id).json()}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\n<|end_body_2|>\n\n<|body_start_3|>\n data = parser.parse_args()\n user = UsersModel.find_by_id(user_id=user_id)\n if user:\n try:\n user.delete_from_db()\n return ({'message': 'User with id [{}] and all associated info deleted'.format(user_id)}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\n else:\n return ({'message': 'User with id [{}] Not found'.format(user_id)}, 404)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000364", "length_bytes": 5913, "license_type": "no_license", "methods": [{"docstring": "GET method Gets an account by id or google token Param: string id or google token Return: dict (account ok / message)", "name": "get", "signature": "def get(self)"}, {"docstring": "POST method Adds a new account Return: dict (account created / message)", "name": "post", "signature": "def post(self)"}, {"docstring": "PUT method Modifies a user Param: id Return: dict (user created)", "name": "put", "signature": "def put(self, user_id)"}, {"docstring": "DELETE method Removes an account Param: string id Return: dict (message ok / message)", "name": "delete", "signature": "def delete(self, user_id)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_048571", "prompt": "Implement the Python class `Users` described below.\n\nClass description:\nAPI Restful methods for Users\n\nMethod signatures and docstrings:\n- def get(self): GET method Gets an account by id or google token Param: string id or google token Return: dict (account ok / message)\n- def post(self): POST method Adds a new account Return: dict (account created / message)\n- def put(self, user_id): PUT method Modifies a user Param: id Return: dict (user created)\n- def delete(self, user_id): DELETE method Removes an account Param: string id Return: dict (message ok / message)", "prompted_full_text": "Implement the Python class `Users` described below.\n\nClass description:\nAPI Restful methods for Users\n\nMethod signatures and docstrings:\n- def get(self): GET method Gets an account by id or google token Param: string id or google token Return: dict (account ok / message)\n- def post(self): POST method Adds a new account Return: dict (account created / message)\n- def put(self, user_id): PUT method Modifies a user Param: id Return: dict (user created)\n- def delete(self, user_id): DELETE method Removes an account Param: string id Return: dict (message ok / message)\n\n<|skeleton|>\nclass Users:\n \"\"\"API Restful methods for Users\"\"\"\n\n def get(self):\n \"\"\"GET method Gets an account by id or google token Param: string id or google token Return: dict (account ok / message)\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"POST method Adds a new account Return: dict (account created / message)\"\"\"\n <|body_1|>\n\n def put(self, user_id):\n \"\"\"PUT method Modifies a user Param: id Return: dict (user created)\"\"\"\n <|body_2|>\n\n def delete(self, user_id):\n \"\"\"DELETE method Removes an account Param: string id Return: dict (message ok / message)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = parser.parse_args()\n arguments = sum([1 if value else 0 for value in data.values()])\n if arguments > 1:\n return ({'message': 'Please filter by only one feature'}, 400)\n if arguments <= 0:\n return ({'message': 'Please filter by at least one feature: id or google_token'}, 400)\n if data['id']:\n user = UsersModel.find_by_id(user_id=data['id'])\n search_by = ('id', data['id'])\n elif data['google_token']:\n user = UsersModel.find_by_google_token(google_token=data['google_token'])\n search_by = ('Google token', data['google_token'])\n else:\n return ({'message': 'Please filter by a valid feature: id or google_token'}, 400)\n if user:\n return ({'user': user.json()}, 200)\n else:\n return ({'message': 'User with {} [{}] Not found'.format(search_by[0], search_by[1])}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n data = parser.parse_args()\n if not data['mail']:\n return ({'message': {'mail': 'Mail cant be empty'}}, 400)\n if not data['google_token']:\n return ({'message': {'google_token': 'Google token cant be empty'}}, 400)\n if not data['role']:\n return ({'message': {'role': 'Role cant be empty'}}, 400)\n if UsersModel.find_by_mail(data['mail']):\n return ({'message': 'Account with mail [{}] already exists'.format(data['mail'])}, 409)\n user = UsersModel(google_token=data['google_token'], mail=data['mail'], role=data['role'])\n if data['role'] != '0':\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\n else:\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\n<|end_body_1|>\n\n<|body_start_2|>\n data = parser.parse_args()\n user = UsersModel.find_by_id(user_id=user_id)\n if data['name']:\n user.name = data['name']\n if data['surname']:\n user.surname = data['surname']\n if data['national_id_document']:\n user.national_id_document = data['national_id_document']\n if data['country']:\n user.country = data['country']\n if data['mail']:\n user.mail = data['mail']\n if data['google_token']:\n user.google_token = data['google_token']\n if data['role']:\n user.role = data['role']\n if data['id_bank_data']:\n user.id_bank_data = data['id_bank_data']\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_id(user_id).json()}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\n<|end_body_2|>\n\n<|body_start_3|>\n data = parser.parse_args()\n user = UsersModel.find_by_id(user_id=user_id)\n if user:\n try:\n user.delete_from_db()\n return ({'message': 'User with id [{}] and all associated info deleted'.format(user_id)}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\n else:\n return ({'message': 'User with id [{}] Not found'.format(user_id)}, 404)\n<|end_body_3|>\n", "revision_id": "1b05fd850b5dd56b183640e25e4370ca959ffe80", "skeleton": "<|skeleton|>\nclass Users:\n \"\"\"API Restful methods for Users\"\"\"\n\n def get(self):\n \"\"\"GET method Gets an account by id or google token Param: string id or google token Return: dict (account ok / message)\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"POST method Adds a new account Return: dict (account created / message)\"\"\"\n <|body_1|>\n\n def put(self, user_id):\n \"\"\"PUT method Modifies a user Param: id Return: dict (user created)\"\"\"\n <|body_2|>\n\n def delete(self, user_id):\n \"\"\"DELETE method Removes an account Param: string id Return: dict (message ok / message)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Users:\n \"\"\"API Restful methods for Users\"\"\"\n\n def get(self):\n \"\"\"GET method Gets an account by id or google token Param: string id or google token Return: dict (account ok / message)\"\"\"\n data = parser.parse_args()\n arguments = sum([1 if value else 0 for value in data.values()])\n if arguments > 1:\n return ({'message': 'Please filter by only one feature'}, 400)\n if arguments <= 0:\n return ({'message': 'Please filter by at least one feature: id or google_token'}, 400)\n if data['id']:\n user = UsersModel.find_by_id(user_id=data['id'])\n search_by = ('id', data['id'])\n elif data['google_token']:\n user = UsersModel.find_by_google_token(google_token=data['google_token'])\n search_by = ('Google token', data['google_token'])\n else:\n return ({'message': 'Please filter by a valid feature: id or google_token'}, 400)\n if user:\n return ({'user': user.json()}, 200)\n else:\n return ({'message': 'User with {} [{}] Not found'.format(search_by[0], search_by[1])}, 404)\n\n def post(self):\n \"\"\"POST method Adds a new account Return: dict (account created / message)\"\"\"\n data = parser.parse_args()\n if not data['mail']:\n return ({'message': {'mail': 'Mail cant be empty'}}, 400)\n if not data['google_token']:\n return ({'message': {'google_token': 'Google token cant be empty'}}, 400)\n if not data['role']:\n return ({'message': {'role': 'Role cant be empty'}}, 400)\n if UsersModel.find_by_mail(data['mail']):\n return ({'message': 'Account with mail [{}] already exists'.format(data['mail'])}, 409)\n user = UsersModel(google_token=data['google_token'], mail=data['mail'], role=data['role'])\n if data['role'] != '0':\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\n else:\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_mail(user.mail).json()}, 201)\n except:\n return ({'message': 'Error Description'}, 500)\n\n def put(self, user_id):\n \"\"\"PUT method Modifies a user Param: id Return: dict (user created)\"\"\"\n data = parser.parse_args()\n user = UsersModel.find_by_id(user_id=user_id)\n if data['name']:\n user.name = data['name']\n if data['surname']:\n user.surname = data['surname']\n if data['national_id_document']:\n user.national_id_document = data['national_id_document']\n if data['country']:\n user.country = data['country']\n if data['mail']:\n user.mail = data['mail']\n if data['google_token']:\n user.google_token = data['google_token']\n if data['role']:\n user.role = data['role']\n if data['id_bank_data']:\n user.id_bank_data = data['id_bank_data']\n try:\n user.save_to_db()\n return ({'user': UsersModel.find_by_id(user_id).json()}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\n\n def delete(self, user_id):\n \"\"\"DELETE method Removes an account Param: string id Return: dict (message ok / message)\"\"\"\n data = parser.parse_args()\n user = UsersModel.find_by_id(user_id=user_id)\n if user:\n try:\n user.delete_from_db()\n return ({'message': 'User with id [{}] and all associated info deleted'.format(user_id)}, 200)\n except:\n return ({'message': 'Error Description'}, 500)\n else:\n return ({'message': 'User with id [{}] Not found'.format(user_id)}, 404)\n", "source": "the_stack_v2_python_sparse", "source_path": "Deployment/resources/users.py", "source_repo": "UB-ES-2020-Motorent/MotoRent", "split": "val", "star_events_count": 2}
{"blob_id": "6b39ff9c53a979931ce7bf0c667a608b44034da3", "bodies": ["super(CnnFnn, self).__init__()\nself.num_var = num_var\nself.kernel_size = kernel_size\nself.stride = stride\nself.cnns = nn.ModuleList([nn.Sequential(nn.Conv3d(1, 1, (1, self.kernel_size, self.kernel_size), (1, self.stride, self.stride)), nn.ReLU(inplace=True)) for i in range(self.num_var)])\nself.input_dim = input_dim\nself.output_dim = output_dim\nself.hidden_dim = hidden_dim\nself.num_layers = num_layers\nself.learning_rate = learning_rate\nself.num_epochs = num_epochs\nself.fnn = ReluNet(input_dim=self.input_dim * 4, output_dim=self.output_dim, hidden_dim=self.hidden_dim, num_layers=self.num_layers, num_epochs=self.num_epochs)", "for i in range(self.num_var):\n input_x = torch.as_tensor(src[i]).float()\n input_x = input_x.to(device)\n x_out = self.cnns[i](input_x).squeeze(axis=1)\n x_flat = x_out.view(x_out.shape[0], x_out.shape[1], -1)\n if i == 0:\n x_append = x_flat\n else:\n x_append = torch.cat((x_append, x_flat), axis=-1)\nx_append = x_append.view(x_append.shape[0], -1)\nself.fnn = self.fnn.to(device)\noutput = self.fnn(x_append)\nreturn output", "optimizer = optim.Adam(self.parameters(), self.learning_rate)\ncriterion = torch.nn.MSELoss(reduction='mean')\nmax_epoch = self.num_epochs\nfor epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n print('Epoch: {}/{} Train Loss: {:.4f}'.format(epoch, max_epoch, train_epoch_loss / (j + 1)))", "val_trg = torch.as_tensor(val_trg).float().to(device)\noptimizer = optim.Adam(self.parameters(), self.learning_rate)\ncriterion = torch.nn.MSELoss(reduction='mean')\nhistory = np.zeros((self.num_epochs, 2))\nmax_epoch = self.num_epochs\nfor epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n self.eval()\n val_output = self.forward(val_src, device)\n loss = criterion(val_output, val_trg)\n val_epoch_loss = loss.item()\n history[epoch] = [train_epoch_loss / (j + 1), val_epoch_loss]\n print('Epoch: {}/{} Train Loss: {:.4f} Validation Loss:{:.4f}'.format(epoch, self.num_epochs, train_epoch_loss / (j + 1), val_epoch_loss))\nreturn history", "self.eval()\nwith torch.no_grad():\n output = self.forward(src, device)\nreturn output.detach().cpu().numpy()"], "bodies_text": "<|body_start_0|>\n super(CnnFnn, self).__init__()\n self.num_var = num_var\n self.kernel_size = kernel_size\n self.stride = stride\n self.cnns = nn.ModuleList([nn.Sequential(nn.Conv3d(1, 1, (1, self.kernel_size, self.kernel_size), (1, self.stride, self.stride)), nn.ReLU(inplace=True)) for i in range(self.num_var)])\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.learning_rate = learning_rate\n self.num_epochs = num_epochs\n self.fnn = ReluNet(input_dim=self.input_dim * 4, output_dim=self.output_dim, hidden_dim=self.hidden_dim, num_layers=self.num_layers, num_epochs=self.num_epochs)\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(self.num_var):\n input_x = torch.as_tensor(src[i]).float()\n input_x = input_x.to(device)\n x_out = self.cnns[i](input_x).squeeze(axis=1)\n x_flat = x_out.view(x_out.shape[0], x_out.shape[1], -1)\n if i == 0:\n x_append = x_flat\n else:\n x_append = torch.cat((x_append, x_flat), axis=-1)\n x_append = x_append.view(x_append.shape[0], -1)\n self.fnn = self.fnn.to(device)\n output = self.fnn(x_append)\n return output\n<|end_body_1|>\n\n<|body_start_2|>\n optimizer = optim.Adam(self.parameters(), self.learning_rate)\n criterion = torch.nn.MSELoss(reduction='mean')\n max_epoch = self.num_epochs\n for epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n print('Epoch: {}/{} Train Loss: {:.4f}'.format(epoch, max_epoch, train_epoch_loss / (j + 1)))\n<|end_body_2|>\n\n<|body_start_3|>\n val_trg = torch.as_tensor(val_trg).float().to(device)\n optimizer = optim.Adam(self.parameters(), self.learning_rate)\n criterion = torch.nn.MSELoss(reduction='mean')\n history = np.zeros((self.num_epochs, 2))\n max_epoch = self.num_epochs\n for epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n self.eval()\n val_output = self.forward(val_src, device)\n loss = criterion(val_output, val_trg)\n val_epoch_loss = loss.item()\n history[epoch] = [train_epoch_loss / (j + 1), val_epoch_loss]\n print('Epoch: {}/{} Train Loss: {:.4f} Validation Loss:{:.4f}'.format(epoch, self.num_epochs, train_epoch_loss / (j + 1), val_epoch_loss))\n return history\n<|end_body_3|>\n\n<|body_start_4|>\n self.eval()\n with torch.no_grad():\n output = self.forward(src, device)\n return output.detach().cpu().numpy()\n<|end_body_4|>\n", "class_docstring": "Class for CNN model", "class_name": "CnnFnn", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CnnFnn:\n \"\"\"Class for CNN model\"\"\"\n\n def __init__(self, num_var, input_dim, output_dim, kernel_size=9, stride=5, hidden_dim=100, num_layers=2, num_epochs=100, learning_rate=0.001):\n \"\"\"Initilize CNN model Args: num_var: int -- number of covariates as input, one CNN for each covariate input_dim: int -- dimension of the input for fully connected layers after apply cnn output_dim: int -- dimension of the output feature kernel_size: int -- Size of the convolving kernel srtide: int -- Stride of the convolution hidden_dim: int -- number of hidden units for fully connected layers num_layers: int -- number of hidden layers for fully connected layers num_epochs: int -- number of epochs to train learning_rate: float -- learning rate for ADAM\"\"\"\n <|body_0|>\n\n def forward(self, src, device):\n \"\"\"Forward function\"\"\"\n <|body_1|>\n\n def fit(self, train_loader, device):\n \"\"\"Fit function to CNN\"\"\"\n <|body_2|>\n\n def fit_cv(self, train_loader, val_src, val_trg, device):\n \"\"\"Fit function for hyper-parameter tuning\"\"\"\n <|body_3|>\n\n def predict(self, src, device):\n \"\"\"Predict function for trained CNN-FNN model to predict\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CnnFnn, self).__init__()\n self.num_var = num_var\n self.kernel_size = kernel_size\n self.stride = stride\n self.cnns = nn.ModuleList([nn.Sequential(nn.Conv3d(1, 1, (1, self.kernel_size, self.kernel_size), (1, self.stride, self.stride)), nn.ReLU(inplace=True)) for i in range(self.num_var)])\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.learning_rate = learning_rate\n self.num_epochs = num_epochs\n self.fnn = ReluNet(input_dim=self.input_dim * 4, output_dim=self.output_dim, hidden_dim=self.hidden_dim, num_layers=self.num_layers, num_epochs=self.num_epochs)\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(self.num_var):\n input_x = torch.as_tensor(src[i]).float()\n input_x = input_x.to(device)\n x_out = self.cnns[i](input_x).squeeze(axis=1)\n x_flat = x_out.view(x_out.shape[0], x_out.shape[1], -1)\n if i == 0:\n x_append = x_flat\n else:\n x_append = torch.cat((x_append, x_flat), axis=-1)\n x_append = x_append.view(x_append.shape[0], -1)\n self.fnn = self.fnn.to(device)\n output = self.fnn(x_append)\n return output\n<|end_body_1|>\n\n<|body_start_2|>\n optimizer = optim.Adam(self.parameters(), self.learning_rate)\n criterion = torch.nn.MSELoss(reduction='mean')\n max_epoch = self.num_epochs\n for epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n print('Epoch: {}/{} Train Loss: {:.4f}'.format(epoch, max_epoch, train_epoch_loss / (j + 1)))\n<|end_body_2|>\n\n<|body_start_3|>\n val_trg = torch.as_tensor(val_trg).float().to(device)\n optimizer = optim.Adam(self.parameters(), self.learning_rate)\n criterion = torch.nn.MSELoss(reduction='mean')\n history = np.zeros((self.num_epochs, 2))\n max_epoch = self.num_epochs\n for epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n self.eval()\n val_output = self.forward(val_src, device)\n loss = criterion(val_output, val_trg)\n val_epoch_loss = loss.item()\n history[epoch] = [train_epoch_loss / (j + 1), val_epoch_loss]\n print('Epoch: {}/{} Train Loss: {:.4f} Validation Loss:{:.4f}'.format(epoch, self.num_epochs, train_epoch_loss / (j + 1), val_epoch_loss))\n return history\n<|end_body_3|>\n\n<|body_start_4|>\n self.eval()\n with torch.no_grad():\n output = self.forward(src, device)\n return output.detach().cpu().numpy()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000365", "length_bytes": 6027, "license_type": "no_license", "methods": [{"docstring": "Initilize CNN model Args: num_var: int -- number of covariates as input, one CNN for each covariate input_dim: int -- dimension of the input for fully connected layers after apply cnn output_dim: int -- dimension of the output feature kernel_size: int -- Size of the convolving kernel srtide: int -- Stride of the convolution hidden_dim: int -- number of hidden units for fully connected layers num_layers: int -- number of hidden layers for fully connected layers num_epochs: int -- number of epochs to train learning_rate: float -- learning rate for ADAM", "name": "__init__", "signature": "def __init__(self, num_var, input_dim, output_dim, kernel_size=9, stride=5, hidden_dim=100, num_layers=2, num_epochs=100, learning_rate=0.001)"}, {"docstring": "Forward function", "name": "forward", "signature": "def forward(self, src, device)"}, {"docstring": "Fit function to CNN", "name": "fit", "signature": "def fit(self, train_loader, device)"}, {"docstring": "Fit function for hyper-parameter tuning", "name": "fit_cv", "signature": "def fit_cv(self, train_loader, val_src, val_trg, device)"}, {"docstring": "Predict function for trained CNN-FNN model to predict", "name": "predict", "signature": "def predict(self, src, device)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_val_001384", "prompt": "Implement the Python class `CnnFnn` described below.\n\nClass description:\nClass for CNN model\n\nMethod signatures and docstrings:\n- def __init__(self, num_var, input_dim, output_dim, kernel_size=9, stride=5, hidden_dim=100, num_layers=2, num_epochs=100, learning_rate=0.001): Initilize CNN model Args: num_var: int -- number of covariates as input, one CNN for each covariate input_dim: int -- dimension of the input for fully connected layers after apply cnn output_dim: int -- dimension of the output feature kernel_size: int -- Size of the convolving kernel srtide: int -- Stride of the convolution hidden_dim: int -- number of hidden units for fully connected layers num_layers: int -- number of hidden layers for fully connected layers num_epochs: int -- number of epochs to train learning_rate: float -- learning rate for ADAM\n- def forward(self, src, device): Forward function\n- def fit(self, train_loader, device): Fit function to CNN\n- def fit_cv(self, train_loader, val_src, val_trg, device): Fit function for hyper-parameter tuning\n- def predict(self, src, device): Predict function for trained CNN-FNN model to predict", "prompted_full_text": "Implement the Python class `CnnFnn` described below.\n\nClass description:\nClass for CNN model\n\nMethod signatures and docstrings:\n- def __init__(self, num_var, input_dim, output_dim, kernel_size=9, stride=5, hidden_dim=100, num_layers=2, num_epochs=100, learning_rate=0.001): Initilize CNN model Args: num_var: int -- number of covariates as input, one CNN for each covariate input_dim: int -- dimension of the input for fully connected layers after apply cnn output_dim: int -- dimension of the output feature kernel_size: int -- Size of the convolving kernel srtide: int -- Stride of the convolution hidden_dim: int -- number of hidden units for fully connected layers num_layers: int -- number of hidden layers for fully connected layers num_epochs: int -- number of epochs to train learning_rate: float -- learning rate for ADAM\n- def forward(self, src, device): Forward function\n- def fit(self, train_loader, device): Fit function to CNN\n- def fit_cv(self, train_loader, val_src, val_trg, device): Fit function for hyper-parameter tuning\n- def predict(self, src, device): Predict function for trained CNN-FNN model to predict\n\n<|skeleton|>\nclass CnnFnn:\n \"\"\"Class for CNN model\"\"\"\n\n def __init__(self, num_var, input_dim, output_dim, kernel_size=9, stride=5, hidden_dim=100, num_layers=2, num_epochs=100, learning_rate=0.001):\n \"\"\"Initilize CNN model Args: num_var: int -- number of covariates as input, one CNN for each covariate input_dim: int -- dimension of the input for fully connected layers after apply cnn output_dim: int -- dimension of the output feature kernel_size: int -- Size of the convolving kernel srtide: int -- Stride of the convolution hidden_dim: int -- number of hidden units for fully connected layers num_layers: int -- number of hidden layers for fully connected layers num_epochs: int -- number of epochs to train learning_rate: float -- learning rate for ADAM\"\"\"\n <|body_0|>\n\n def forward(self, src, device):\n \"\"\"Forward function\"\"\"\n <|body_1|>\n\n def fit(self, train_loader, device):\n \"\"\"Fit function to CNN\"\"\"\n <|body_2|>\n\n def fit_cv(self, train_loader, val_src, val_trg, device):\n \"\"\"Fit function for hyper-parameter tuning\"\"\"\n <|body_3|>\n\n def predict(self, src, device):\n \"\"\"Predict function for trained CNN-FNN model to predict\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CnnFnn, self).__init__()\n self.num_var = num_var\n self.kernel_size = kernel_size\n self.stride = stride\n self.cnns = nn.ModuleList([nn.Sequential(nn.Conv3d(1, 1, (1, self.kernel_size, self.kernel_size), (1, self.stride, self.stride)), nn.ReLU(inplace=True)) for i in range(self.num_var)])\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.learning_rate = learning_rate\n self.num_epochs = num_epochs\n self.fnn = ReluNet(input_dim=self.input_dim * 4, output_dim=self.output_dim, hidden_dim=self.hidden_dim, num_layers=self.num_layers, num_epochs=self.num_epochs)\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(self.num_var):\n input_x = torch.as_tensor(src[i]).float()\n input_x = input_x.to(device)\n x_out = self.cnns[i](input_x).squeeze(axis=1)\n x_flat = x_out.view(x_out.shape[0], x_out.shape[1], -1)\n if i == 0:\n x_append = x_flat\n else:\n x_append = torch.cat((x_append, x_flat), axis=-1)\n x_append = x_append.view(x_append.shape[0], -1)\n self.fnn = self.fnn.to(device)\n output = self.fnn(x_append)\n return output\n<|end_body_1|>\n\n<|body_start_2|>\n optimizer = optim.Adam(self.parameters(), self.learning_rate)\n criterion = torch.nn.MSELoss(reduction='mean')\n max_epoch = self.num_epochs\n for epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n print('Epoch: {}/{} Train Loss: {:.4f}'.format(epoch, max_epoch, train_epoch_loss / (j + 1)))\n<|end_body_2|>\n\n<|body_start_3|>\n val_trg = torch.as_tensor(val_trg).float().to(device)\n optimizer = optim.Adam(self.parameters(), self.learning_rate)\n criterion = torch.nn.MSELoss(reduction='mean')\n history = np.zeros((self.num_epochs, 2))\n max_epoch = self.num_epochs\n for epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n self.eval()\n val_output = self.forward(val_src, device)\n loss = criterion(val_output, val_trg)\n val_epoch_loss = loss.item()\n history[epoch] = [train_epoch_loss / (j + 1), val_epoch_loss]\n print('Epoch: {}/{} Train Loss: {:.4f} Validation Loss:{:.4f}'.format(epoch, self.num_epochs, train_epoch_loss / (j + 1), val_epoch_loss))\n return history\n<|end_body_3|>\n\n<|body_start_4|>\n self.eval()\n with torch.no_grad():\n output = self.forward(src, device)\n return output.detach().cpu().numpy()\n<|end_body_4|>\n", "revision_id": "d7e651024b07587b46497183d90934561a4839e2", "skeleton": "<|skeleton|>\nclass CnnFnn:\n \"\"\"Class for CNN model\"\"\"\n\n def __init__(self, num_var, input_dim, output_dim, kernel_size=9, stride=5, hidden_dim=100, num_layers=2, num_epochs=100, learning_rate=0.001):\n \"\"\"Initilize CNN model Args: num_var: int -- number of covariates as input, one CNN for each covariate input_dim: int -- dimension of the input for fully connected layers after apply cnn output_dim: int -- dimension of the output feature kernel_size: int -- Size of the convolving kernel srtide: int -- Stride of the convolution hidden_dim: int -- number of hidden units for fully connected layers num_layers: int -- number of hidden layers for fully connected layers num_epochs: int -- number of epochs to train learning_rate: float -- learning rate for ADAM\"\"\"\n <|body_0|>\n\n def forward(self, src, device):\n \"\"\"Forward function\"\"\"\n <|body_1|>\n\n def fit(self, train_loader, device):\n \"\"\"Fit function to CNN\"\"\"\n <|body_2|>\n\n def fit_cv(self, train_loader, val_src, val_trg, device):\n \"\"\"Fit function for hyper-parameter tuning\"\"\"\n <|body_3|>\n\n def predict(self, src, device):\n \"\"\"Predict function for trained CNN-FNN model to predict\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CnnFnn:\n \"\"\"Class for CNN model\"\"\"\n\n def __init__(self, num_var, input_dim, output_dim, kernel_size=9, stride=5, hidden_dim=100, num_layers=2, num_epochs=100, learning_rate=0.001):\n \"\"\"Initilize CNN model Args: num_var: int -- number of covariates as input, one CNN for each covariate input_dim: int -- dimension of the input for fully connected layers after apply cnn output_dim: int -- dimension of the output feature kernel_size: int -- Size of the convolving kernel srtide: int -- Stride of the convolution hidden_dim: int -- number of hidden units for fully connected layers num_layers: int -- number of hidden layers for fully connected layers num_epochs: int -- number of epochs to train learning_rate: float -- learning rate for ADAM\"\"\"\n super(CnnFnn, self).__init__()\n self.num_var = num_var\n self.kernel_size = kernel_size\n self.stride = stride\n self.cnns = nn.ModuleList([nn.Sequential(nn.Conv3d(1, 1, (1, self.kernel_size, self.kernel_size), (1, self.stride, self.stride)), nn.ReLU(inplace=True)) for i in range(self.num_var)])\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.learning_rate = learning_rate\n self.num_epochs = num_epochs\n self.fnn = ReluNet(input_dim=self.input_dim * 4, output_dim=self.output_dim, hidden_dim=self.hidden_dim, num_layers=self.num_layers, num_epochs=self.num_epochs)\n\n def forward(self, src, device):\n \"\"\"Forward function\"\"\"\n for i in range(self.num_var):\n input_x = torch.as_tensor(src[i]).float()\n input_x = input_x.to(device)\n x_out = self.cnns[i](input_x).squeeze(axis=1)\n x_flat = x_out.view(x_out.shape[0], x_out.shape[1], -1)\n if i == 0:\n x_append = x_flat\n else:\n x_append = torch.cat((x_append, x_flat), axis=-1)\n x_append = x_append.view(x_append.shape[0], -1)\n self.fnn = self.fnn.to(device)\n output = self.fnn(x_append)\n return output\n\n def fit(self, train_loader, device):\n \"\"\"Fit function to CNN\"\"\"\n optimizer = optim.Adam(self.parameters(), self.learning_rate)\n criterion = torch.nn.MSELoss(reduction='mean')\n max_epoch = self.num_epochs\n for epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n print('Epoch: {}/{} Train Loss: {:.4f}'.format(epoch, max_epoch, train_epoch_loss / (j + 1)))\n\n def fit_cv(self, train_loader, val_src, val_trg, device):\n \"\"\"Fit function for hyper-parameter tuning\"\"\"\n val_trg = torch.as_tensor(val_trg).float().to(device)\n optimizer = optim.Adam(self.parameters(), self.learning_rate)\n criterion = torch.nn.MSELoss(reduction='mean')\n history = np.zeros((self.num_epochs, 2))\n max_epoch = self.num_epochs\n for epoch in range(max_epoch):\n self.train()\n train_epoch_loss = 0\n for j, (src, trg) in enumerate(train_loader):\n trg = torch.as_tensor(trg).float().to(device)\n train_output = self.forward(src, device)\n loss = criterion(train_output, trg)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_epoch_loss += loss.item()\n self.eval()\n val_output = self.forward(val_src, device)\n loss = criterion(val_output, val_trg)\n val_epoch_loss = loss.item()\n history[epoch] = [train_epoch_loss / (j + 1), val_epoch_loss]\n print('Epoch: {}/{} Train Loss: {:.4f} Validation Loss:{:.4f}'.format(epoch, self.num_epochs, train_epoch_loss / (j + 1), val_epoch_loss))\n return history\n\n def predict(self, src, device):\n \"\"\"Predict function for trained CNN-FNN model to predict\"\"\"\n self.eval()\n with torch.no_grad():\n output = self.forward(src, device)\n return output.detach().cpu().numpy()\n", "source": "the_stack_v2_python_sparse", "source_path": "model/cnn_fnn.py", "source_repo": "SSF-climate/SSF", "split": "val", "star_events_count": 7}
{"blob_id": "dd9da66b068ed0a0cd0e4bb90152487e71199a39", "bodies": ["self.update_graph('train')\nparams_net = self.model.get_net_parameters(grad_only=True)\nself.optimizer['train'].set_parameters(params_net)\nself.update_graph('valid')\nparams_arch = self.model.get_arch_parameters(grad_only=True)\nself.optimizer['valid'].set_parameters(params_arch)\nif self.comm.n_procs > 1:\n self._grads_net = [x.grad for x in params_net.values()]\n self._grads_arch = [x.grad for x in params_arch.values()]\n self.event.default_stream_synchronize()", "bz, p = (self.mbs_train, self.placeholder['train'])\nself.optimizer[key].zero_grad()\nif self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\nfor _ in range(self.accum_train):\n self._load_data(p, self.dataloader['train'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/train', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/train', loss * self.accum_train, bz)\nif self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_net, division=True, inplace=False)\n self.event.add_default_stream_event()\nself.optimizer[key].update()", "bz, p = (self.mbs_valid, self.placeholder['valid'])\nself.optimizer['valid'].zero_grad()\nif self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\nfor _ in range(self.accum_valid):\n self._load_data(p, self.dataloader['valid'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/valid', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/valid', loss * self.accum_valid, bz)\nif self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_arch, division=True, inplace=False)\n self.event.add_default_stream_event()\nself.optimizer['valid'].update()"], "bodies_text": "<|body_start_0|>\n self.update_graph('train')\n params_net = self.model.get_net_parameters(grad_only=True)\n self.optimizer['train'].set_parameters(params_net)\n self.update_graph('valid')\n params_arch = self.model.get_arch_parameters(grad_only=True)\n self.optimizer['valid'].set_parameters(params_arch)\n if self.comm.n_procs > 1:\n self._grads_net = [x.grad for x in params_net.values()]\n self._grads_arch = [x.grad for x in params_arch.values()]\n self.event.default_stream_synchronize()\n<|end_body_0|>\n\n<|body_start_1|>\n bz, p = (self.mbs_train, self.placeholder['train'])\n self.optimizer[key].zero_grad()\n if self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\n for _ in range(self.accum_train):\n self._load_data(p, self.dataloader['train'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/train', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/train', loss * self.accum_train, bz)\n if self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_net, division=True, inplace=False)\n self.event.add_default_stream_event()\n self.optimizer[key].update()\n<|end_body_1|>\n\n<|body_start_2|>\n bz, p = (self.mbs_valid, self.placeholder['valid'])\n self.optimizer['valid'].zero_grad()\n if self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\n for _ in range(self.accum_valid):\n self._load_data(p, self.dataloader['valid'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/valid', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/valid', loss * self.accum_valid, bz)\n if self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_arch, division=True, inplace=False)\n self.event.add_default_stream_event()\n self.optimizer['valid'].update()\n<|end_body_2|>\n", "class_docstring": "An implementation of DARTS: Differentiable Architecture Search.", "class_name": "DartsSearcher", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DartsSearcher:\n \"\"\"An implementation of DARTS: Differentiable Architecture Search.\"\"\"\n\n def callback_on_start(self):\n \"\"\"Builds the graphs and assigns parameters to the optimizers.\"\"\"\n <|body_0|>\n\n def train_on_batch(self, key='train'):\n \"\"\"Updates the model parameters.\"\"\"\n <|body_1|>\n\n def valid_on_batch(self):\n \"\"\"Updates the architecture parameters.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.update_graph('train')\n params_net = self.model.get_net_parameters(grad_only=True)\n self.optimizer['train'].set_parameters(params_net)\n self.update_graph('valid')\n params_arch = self.model.get_arch_parameters(grad_only=True)\n self.optimizer['valid'].set_parameters(params_arch)\n if self.comm.n_procs > 1:\n self._grads_net = [x.grad for x in params_net.values()]\n self._grads_arch = [x.grad for x in params_arch.values()]\n self.event.default_stream_synchronize()\n<|end_body_0|>\n\n<|body_start_1|>\n bz, p = (self.mbs_train, self.placeholder['train'])\n self.optimizer[key].zero_grad()\n if self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\n for _ in range(self.accum_train):\n self._load_data(p, self.dataloader['train'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/train', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/train', loss * self.accum_train, bz)\n if self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_net, division=True, inplace=False)\n self.event.add_default_stream_event()\n self.optimizer[key].update()\n<|end_body_1|>\n\n<|body_start_2|>\n bz, p = (self.mbs_valid, self.placeholder['valid'])\n self.optimizer['valid'].zero_grad()\n if self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\n for _ in range(self.accum_valid):\n self._load_data(p, self.dataloader['valid'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/valid', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/valid', loss * self.accum_valid, bz)\n if self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_arch, division=True, inplace=False)\n self.event.add_default_stream_event()\n self.optimizer['valid'].update()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000366", "length_bytes": 3326, "license_type": "permissive", "methods": [{"docstring": "Builds the graphs and assigns parameters to the optimizers.", "name": "callback_on_start", "signature": "def callback_on_start(self)"}, {"docstring": "Updates the model parameters.", "name": "train_on_batch", "signature": "def train_on_batch(self, key='train')"}, {"docstring": "Updates the architecture parameters.", "name": "valid_on_batch", "signature": "def valid_on_batch(self)"}], "n_methods": 3, "prompt": "Implement the Python class `DartsSearcher` described below.\n\nClass description:\nAn implementation of DARTS: Differentiable Architecture Search.\n\nMethod signatures and docstrings:\n- def callback_on_start(self): Builds the graphs and assigns parameters to the optimizers.\n- def train_on_batch(self, key='train'): Updates the model parameters.\n- def valid_on_batch(self): Updates the architecture parameters.", "prompted_full_text": "Implement the Python class `DartsSearcher` described below.\n\nClass description:\nAn implementation of DARTS: Differentiable Architecture Search.\n\nMethod signatures and docstrings:\n- def callback_on_start(self): Builds the graphs and assigns parameters to the optimizers.\n- def train_on_batch(self, key='train'): Updates the model parameters.\n- def valid_on_batch(self): Updates the architecture parameters.\n\n<|skeleton|>\nclass DartsSearcher:\n \"\"\"An implementation of DARTS: Differentiable Architecture Search.\"\"\"\n\n def callback_on_start(self):\n \"\"\"Builds the graphs and assigns parameters to the optimizers.\"\"\"\n <|body_0|>\n\n def train_on_batch(self, key='train'):\n \"\"\"Updates the model parameters.\"\"\"\n <|body_1|>\n\n def valid_on_batch(self):\n \"\"\"Updates the architecture parameters.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.update_graph('train')\n params_net = self.model.get_net_parameters(grad_only=True)\n self.optimizer['train'].set_parameters(params_net)\n self.update_graph('valid')\n params_arch = self.model.get_arch_parameters(grad_only=True)\n self.optimizer['valid'].set_parameters(params_arch)\n if self.comm.n_procs > 1:\n self._grads_net = [x.grad for x in params_net.values()]\n self._grads_arch = [x.grad for x in params_arch.values()]\n self.event.default_stream_synchronize()\n<|end_body_0|>\n\n<|body_start_1|>\n bz, p = (self.mbs_train, self.placeholder['train'])\n self.optimizer[key].zero_grad()\n if self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\n for _ in range(self.accum_train):\n self._load_data(p, self.dataloader['train'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/train', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/train', loss * self.accum_train, bz)\n if self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_net, division=True, inplace=False)\n self.event.add_default_stream_event()\n self.optimizer[key].update()\n<|end_body_1|>\n\n<|body_start_2|>\n bz, p = (self.mbs_valid, self.placeholder['valid'])\n self.optimizer['valid'].zero_grad()\n if self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\n for _ in range(self.accum_valid):\n self._load_data(p, self.dataloader['valid'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/valid', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/valid', loss * self.accum_valid, bz)\n if self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_arch, division=True, inplace=False)\n self.event.add_default_stream_event()\n self.optimizer['valid'].update()\n<|end_body_2|>\n", "revision_id": "269deb8229fda0f0901c47d21ac5ce244f403f63", "skeleton": "<|skeleton|>\nclass DartsSearcher:\n \"\"\"An implementation of DARTS: Differentiable Architecture Search.\"\"\"\n\n def callback_on_start(self):\n \"\"\"Builds the graphs and assigns parameters to the optimizers.\"\"\"\n <|body_0|>\n\n def train_on_batch(self, key='train'):\n \"\"\"Updates the model parameters.\"\"\"\n <|body_1|>\n\n def valid_on_batch(self):\n \"\"\"Updates the architecture parameters.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DartsSearcher:\n \"\"\"An implementation of DARTS: Differentiable Architecture Search.\"\"\"\n\n def callback_on_start(self):\n \"\"\"Builds the graphs and assigns parameters to the optimizers.\"\"\"\n self.update_graph('train')\n params_net = self.model.get_net_parameters(grad_only=True)\n self.optimizer['train'].set_parameters(params_net)\n self.update_graph('valid')\n params_arch = self.model.get_arch_parameters(grad_only=True)\n self.optimizer['valid'].set_parameters(params_arch)\n if self.comm.n_procs > 1:\n self._grads_net = [x.grad for x in params_net.values()]\n self._grads_arch = [x.grad for x in params_arch.values()]\n self.event.default_stream_synchronize()\n\n def train_on_batch(self, key='train'):\n \"\"\"Updates the model parameters.\"\"\"\n bz, p = (self.mbs_train, self.placeholder['train'])\n self.optimizer[key].zero_grad()\n if self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\n for _ in range(self.accum_train):\n self._load_data(p, self.dataloader['train'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/train', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/train', loss * self.accum_train, bz)\n if self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_net, division=True, inplace=False)\n self.event.add_default_stream_event()\n self.optimizer[key].update()\n\n def valid_on_batch(self):\n \"\"\"Updates the architecture parameters.\"\"\"\n bz, p = (self.mbs_valid, self.placeholder['valid'])\n self.optimizer['valid'].zero_grad()\n if self.comm.n_procs > 1:\n self.event.default_stream_synchronize()\n for _ in range(self.accum_valid):\n self._load_data(p, self.dataloader['valid'].next())\n p['loss'].forward(clear_no_need_grad=True)\n for k, m in p['metrics'].items():\n m.forward(clear_buffer=True)\n self.monitor.update(f'{k}/valid', m.d.copy(), bz)\n p['loss'].backward(clear_buffer=True)\n loss = p['loss'].d.copy()\n self.monitor.update('loss/valid', loss * self.accum_valid, bz)\n if self.comm.n_procs > 1:\n self.comm.all_reduce(self._grads_arch, division=True, inplace=False)\n self.event.add_default_stream_event()\n self.optimizer['valid'].update()\n", "source": "the_stack_v2_python_sparse", "source_path": "nnabla_nas/runner/searcher/darts.py", "source_repo": "jie311/nnabla-nas", "split": "val", "star_events_count": 0}
{"blob_id": "8a596f6fab3d5ccc43bb38f9713a6283aa43b37a", "bodies": ["try:\n return template_version_manager_api.get_by_id(pk, request=request)\nexcept exceptions.DoesNotExist:\n raise Http404", "try:\n template_version_manager_object = self.get_object(pk, request=request)\n serializer = TemplateVersionManagerSerializer(template_version_manager_object)\n return Response(serializer.data)\nexcept Http404:\n content = {'message': 'Template Version Manager not found.'}\n return Response(content, status=status.HTTP_404_NOT_FOUND)\nexcept Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)"], "bodies_text": "<|body_start_0|>\n try:\n return template_version_manager_api.get_by_id(pk, request=request)\n except exceptions.DoesNotExist:\n raise Http404\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n template_version_manager_object = self.get_object(pk, request=request)\n serializer = TemplateVersionManagerSerializer(template_version_manager_object)\n return Response(serializer.data)\n except Http404:\n content = {'message': 'Template Version Manager not found.'}\n return Response(content, status=status.HTTP_404_NOT_FOUND)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_1|>\n", "class_docstring": "Retrieve a TemplateVersionManager", "class_name": "TemplateVersionManagerDetail", "detected_licenses": ["NIST-Software"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TemplateVersionManagerDetail:\n \"\"\"Retrieve a TemplateVersionManager\"\"\"\n\n def get_object(self, pk, request):\n \"\"\"Get TemplateVersionManager from db Args: pk: ObjectId request: Returns: TemplateVersionManager\"\"\"\n <|body_0|>\n\n def get(self, request, pk):\n \"\"\"Retrieve a TemplateVersionManager Args: request: HTTP request pk: ObjectId Returns: - code: 200 content: TemplateVersionManager - code: 404 content: Object was not found - code: 500 content: Internal server error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return template_version_manager_api.get_by_id(pk, request=request)\n except exceptions.DoesNotExist:\n raise Http404\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n template_version_manager_object = self.get_object(pk, request=request)\n serializer = TemplateVersionManagerSerializer(template_version_manager_object)\n return Response(serializer.data)\n except Http404:\n content = {'message': 'Template Version Manager not found.'}\n return Response(content, status=status.HTTP_404_NOT_FOUND)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000367", "length_bytes": 12279, "license_type": "permissive", "methods": [{"docstring": "Get TemplateVersionManager from db Args: pk: ObjectId request: Returns: TemplateVersionManager", "name": "get_object", "signature": "def get_object(self, pk, request)"}, {"docstring": "Retrieve a TemplateVersionManager Args: request: HTTP request pk: ObjectId Returns: - code: 200 content: TemplateVersionManager - code: 404 content: Object was not found - code: 500 content: Internal server error", "name": "get", "signature": "def get(self, request, pk)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_045508", "prompt": "Implement the Python class `TemplateVersionManagerDetail` described below.\n\nClass description:\nRetrieve a TemplateVersionManager\n\nMethod signatures and docstrings:\n- def get_object(self, pk, request): Get TemplateVersionManager from db Args: pk: ObjectId request: Returns: TemplateVersionManager\n- def get(self, request, pk): Retrieve a TemplateVersionManager Args: request: HTTP request pk: ObjectId Returns: - code: 200 content: TemplateVersionManager - code: 404 content: Object was not found - code: 500 content: Internal server error", "prompted_full_text": "Implement the Python class `TemplateVersionManagerDetail` described below.\n\nClass description:\nRetrieve a TemplateVersionManager\n\nMethod signatures and docstrings:\n- def get_object(self, pk, request): Get TemplateVersionManager from db Args: pk: ObjectId request: Returns: TemplateVersionManager\n- def get(self, request, pk): Retrieve a TemplateVersionManager Args: request: HTTP request pk: ObjectId Returns: - code: 200 content: TemplateVersionManager - code: 404 content: Object was not found - code: 500 content: Internal server error\n\n<|skeleton|>\nclass TemplateVersionManagerDetail:\n \"\"\"Retrieve a TemplateVersionManager\"\"\"\n\n def get_object(self, pk, request):\n \"\"\"Get TemplateVersionManager from db Args: pk: ObjectId request: Returns: TemplateVersionManager\"\"\"\n <|body_0|>\n\n def get(self, request, pk):\n \"\"\"Retrieve a TemplateVersionManager Args: request: HTTP request pk: ObjectId Returns: - code: 200 content: TemplateVersionManager - code: 404 content: Object was not found - code: 500 content: Internal server error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return template_version_manager_api.get_by_id(pk, request=request)\n except exceptions.DoesNotExist:\n raise Http404\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n template_version_manager_object = self.get_object(pk, request=request)\n serializer = TemplateVersionManagerSerializer(template_version_manager_object)\n return Response(serializer.data)\n except Http404:\n content = {'message': 'Template Version Manager not found.'}\n return Response(content, status=status.HTTP_404_NOT_FOUND)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_1|>\n", "revision_id": "f032036d95076f92b164389fdbec7415567e7b0f", "skeleton": "<|skeleton|>\nclass TemplateVersionManagerDetail:\n \"\"\"Retrieve a TemplateVersionManager\"\"\"\n\n def get_object(self, pk, request):\n \"\"\"Get TemplateVersionManager from db Args: pk: ObjectId request: Returns: TemplateVersionManager\"\"\"\n <|body_0|>\n\n def get(self, request, pk):\n \"\"\"Retrieve a TemplateVersionManager Args: request: HTTP request pk: ObjectId Returns: - code: 200 content: TemplateVersionManager - code: 404 content: Object was not found - code: 500 content: Internal server error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TemplateVersionManagerDetail:\n \"\"\"Retrieve a TemplateVersionManager\"\"\"\n\n def get_object(self, pk, request):\n \"\"\"Get TemplateVersionManager from db Args: pk: ObjectId request: Returns: TemplateVersionManager\"\"\"\n try:\n return template_version_manager_api.get_by_id(pk, request=request)\n except exceptions.DoesNotExist:\n raise Http404\n\n def get(self, request, pk):\n \"\"\"Retrieve a TemplateVersionManager Args: request: HTTP request pk: ObjectId Returns: - code: 200 content: TemplateVersionManager - code: 404 content: Object was not found - code: 500 content: Internal server error\"\"\"\n try:\n template_version_manager_object = self.get_object(pk, request=request)\n serializer = TemplateVersionManagerSerializer(template_version_manager_object)\n return Response(serializer.data)\n except Http404:\n content = {'message': 'Template Version Manager not found.'}\n return Response(content, status=status.HTTP_404_NOT_FOUND)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n", "source": "the_stack_v2_python_sparse", "source_path": "core_main_app/rest/template_version_manager/views.py", "source_repo": "usnistgov/core_main_app", "split": "val", "star_events_count": 3}
{"blob_id": "84646463273a373a72a32de11730d3b054953826", "bodies": ["n = len(nums)\nif n == 0:\n return\ndp = [0] * n\ndp[0] = nums[0]\nfor i in range(1, n):\n dp[i] = dp[i - 1] + nums[i]\nself.dp = dp", "\"\"\"(i, j) = (0, j) - (0, i-1)\"\"\"\nif self.dp is None:\n return 0\nif i == 0:\n return self.dp[j]\nreturn self.dp[j] - self.dp[i - 1]"], "bodies_text": "<|body_start_0|>\n n = len(nums)\n if n == 0:\n return\n dp = [0] * n\n dp[0] = nums[0]\n for i in range(1, n):\n dp[i] = dp[i - 1] + nums[i]\n self.dp = dp\n<|end_body_0|>\n\n<|body_start_1|>\n \"\"\"(i, j) = (0, j) - (0, i-1)\"\"\"\n if self.dp is None:\n return 0\n if i == 0:\n return self.dp[j]\n return self.dp[j] - self.dp[i - 1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NumArray", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumArray:\n\n def __init__(self, nums: List[int]):\n \"\"\"Precompute, Time: O(n), Space: O(n)\"\"\"\n <|body_0|>\n\n def sumRange(self, i: int, j: int) -> int:\n \"\"\"Time: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(nums)\n if n == 0:\n return\n dp = [0] * n\n dp[0] = nums[0]\n for i in range(1, n):\n dp[i] = dp[i - 1] + nums[i]\n self.dp = dp\n<|end_body_0|>\n\n<|body_start_1|>\n \"\"\"(i, j) = (0, j) - (0, i-1)\"\"\"\n if self.dp is None:\n return 0\n if i == 0:\n return self.dp[j]\n return self.dp[j] - self.dp[i - 1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000368", "length_bytes": 1378, "license_type": "no_license", "methods": [{"docstring": "Precompute, Time: O(n), Space: O(n)", "name": "__init__", "signature": "def __init__(self, nums: List[int])"}, {"docstring": "Time: O(1)", "name": "sumRange", "signature": "def sumRange(self, i: int, j: int) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005383", "prompt": "Implement the Python class `NumArray` described below.\n\nClass description:\nImplement the NumArray class.\n\nMethod signatures and docstrings:\n- def __init__(self, nums: List[int]): Precompute, Time: O(n), Space: O(n)\n- def sumRange(self, i: int, j: int) -> int: Time: O(1)", "prompted_full_text": "Implement the Python class `NumArray` described below.\n\nClass description:\nImplement the NumArray class.\n\nMethod signatures and docstrings:\n- def __init__(self, nums: List[int]): Precompute, Time: O(n), Space: O(n)\n- def sumRange(self, i: int, j: int) -> int: Time: O(1)\n\n<|skeleton|>\nclass NumArray:\n\n def __init__(self, nums: List[int]):\n \"\"\"Precompute, Time: O(n), Space: O(n)\"\"\"\n <|body_0|>\n\n def sumRange(self, i: int, j: int) -> int:\n \"\"\"Time: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(nums)\n if n == 0:\n return\n dp = [0] * n\n dp[0] = nums[0]\n for i in range(1, n):\n dp[i] = dp[i - 1] + nums[i]\n self.dp = dp\n<|end_body_0|>\n\n<|body_start_1|>\n \"\"\"(i, j) = (0, j) - (0, i-1)\"\"\"\n if self.dp is None:\n return 0\n if i == 0:\n return self.dp[j]\n return self.dp[j] - self.dp[i - 1]\n<|end_body_1|>\n", "revision_id": "72136e3487d239f5b37e2d6393e034262a6bf599", "skeleton": "<|skeleton|>\nclass NumArray:\n\n def __init__(self, nums: List[int]):\n \"\"\"Precompute, Time: O(n), Space: O(n)\"\"\"\n <|body_0|>\n\n def sumRange(self, i: int, j: int) -> int:\n \"\"\"Time: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NumArray:\n def __init__(self, nums: List[int]):\n \"\"\"Precompute, Time: O(n), Space: O(n)\"\"\"\n n = len(nums)\n if n == 0:\n return\n dp = [0] * n\n dp[0] = nums[0]\n for i in range(1, n):\n dp[i] = dp[i - 1] + nums[i]\n self.dp = dp\n\n def sumRange(self, i: int, j: int) -> int:\n \"\"\"Time: O(1)\"\"\"\n \"\"\"(i, j) = (0, j) - (0, i-1)\"\"\"\n if self.dp is None:\n return 0\n if i == 0:\n return self.dp[j]\n return self.dp[j] - self.dp[i - 1]\n", "source": "the_stack_v2_python_sparse", "source_path": "python/303-Range Sum Query - Immutable.py", "source_repo": "cwza/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "870695019b596fccb346640fcc0cc1ea0d9c45d8", "bodies": ["self._dependents.append(name)\nif the_item is not None:\n self.add_value(name, the_item)", "the_item = super().load_item(*args, **kwargs)\nthe_item._dependents = self._dependents\nreturn the_item"], "bodies_text": "<|body_start_0|>\n self._dependents.append(name)\n if the_item is not None:\n self.add_value(name, the_item)\n<|end_body_0|>\n\n<|body_start_1|>\n the_item = super().load_item(*args, **kwargs)\n the_item._dependents = self._dependents\n return the_item\n<|end_body_1|>\n", "class_docstring": "base class for loading items from responses", "class_name": "BaseLoader", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseLoader:\n \"\"\"base class for loading items from responses\"\"\"\n\n def add_dependent(self, name, the_item):\n \"\"\"adds a dependent item to the item loader\"\"\"\n <|body_0|>\n\n def load_item(self, *args, **kwargs):\n \"\"\"subclasses load_item to handle dependents\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._dependents.append(name)\n if the_item is not None:\n self.add_value(name, the_item)\n<|end_body_0|>\n\n<|body_start_1|>\n the_item = super().load_item(*args, **kwargs)\n the_item._dependents = self._dependents\n return the_item\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000369", "length_bytes": 1609, "license_type": "permissive", "methods": [{"docstring": "adds a dependent item to the item loader", "name": "add_dependent", "signature": "def add_dependent(self, name, the_item)"}, {"docstring": "subclasses load_item to handle dependents", "name": "load_item", "signature": "def load_item(self, *args, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `BaseLoader` described below.\n\nClass description:\nbase class for loading items from responses\n\nMethod signatures and docstrings:\n- def add_dependent(self, name, the_item): adds a dependent item to the item loader\n- def load_item(self, *args, **kwargs): subclasses load_item to handle dependents", "prompted_full_text": "Implement the Python class `BaseLoader` described below.\n\nClass description:\nbase class for loading items from responses\n\nMethod signatures and docstrings:\n- def add_dependent(self, name, the_item): adds a dependent item to the item loader\n- def load_item(self, *args, **kwargs): subclasses load_item to handle dependents\n\n<|skeleton|>\nclass BaseLoader:\n \"\"\"base class for loading items from responses\"\"\"\n\n def add_dependent(self, name, the_item):\n \"\"\"adds a dependent item to the item loader\"\"\"\n <|body_0|>\n\n def load_item(self, *args, **kwargs):\n \"\"\"subclasses load_item to handle dependents\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._dependents.append(name)\n if the_item is not None:\n self.add_value(name, the_item)\n<|end_body_0|>\n\n<|body_start_1|>\n the_item = super().load_item(*args, **kwargs)\n the_item._dependents = self._dependents\n return the_item\n<|end_body_1|>\n", "revision_id": "c0c38c7b02f41f482b01f145b0348ecbb82952a9", "skeleton": "<|skeleton|>\nclass BaseLoader:\n \"\"\"base class for loading items from responses\"\"\"\n\n def add_dependent(self, name, the_item):\n \"\"\"adds a dependent item to the item loader\"\"\"\n <|body_0|>\n\n def load_item(self, *args, **kwargs):\n \"\"\"subclasses load_item to handle dependents\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BaseLoader:\n \"\"\"base class for loading items from responses\"\"\"\n\n def add_dependent(self, name, the_item):\n \"\"\"adds a dependent item to the item loader\"\"\"\n self._dependents.append(name)\n if the_item is not None:\n self.add_value(name, the_item)\n\n def load_item(self, *args, **kwargs):\n \"\"\"subclasses load_item to handle dependents\"\"\"\n the_item = super().load_item(*args, **kwargs)\n the_item._dependents = self._dependents\n return the_item\n", "source": "the_stack_v2_python_sparse", "source_path": "slick/item.py", "source_repo": "underscorenygren/slick", "split": "val", "star_events_count": 1}
{"blob_id": "3b1be573dd86c4f7d7ab714d67d36a4482c1077c", "bodies": ["self.logger = logging.getLogger('simple')\nself.cfg = cfg\nself.executor = executor\nself.tasklist = []\nfor key, anl_params in cfg['analysis'].items():\n try:\n self.tasklist.append(get_analysis_task(key, anl_params, cfg['storage']))\n except NameError as e:\n self.logger.error(f'Could not find a suitable analysis task: {e}')\n continue\n self.logger.info(f'Added {key} to analysis task list')", "self.logger.info(f'Submitting timechunk {timechunk.tb.chunk_idx} to analysis tasklist')\nfor task in self.tasklist:\n task.execute(timechunk, self.executor)"], "bodies_text": "<|body_start_0|>\n self.logger = logging.getLogger('simple')\n self.cfg = cfg\n self.executor = executor\n self.tasklist = []\n for key, anl_params in cfg['analysis'].items():\n try:\n self.tasklist.append(get_analysis_task(key, anl_params, cfg['storage']))\n except NameError as e:\n self.logger.error(f'Could not find a suitable analysis task: {e}')\n continue\n self.logger.info(f'Added {key} to analysis task list')\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.info(f'Submitting timechunk {timechunk.tb.chunk_idx} to analysis tasklist')\n for task in self.tasklist:\n task.execute(timechunk, self.executor)\n<|end_body_1|>\n", "class_docstring": "Defines an analysis task list. This class defines an task list that is executed in parallel on an executor.", "class_name": "tasklist", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass tasklist:\n \"\"\"Defines an analysis task list. This class defines an task list that is executed in parallel on an executor.\"\"\"\n\n def __init__(self, executor, cfg):\n \"\"\"Configures the analysis tasklist from a dictionary. For each key-value pair in \"cfg_analysis\", an analysis task is instantiated and appended to a list. On a call to execute, all tasks are launched with the current data. Args: executor (PEP-3148 style executor): Executor on which all analysis tasks are launched cfg: Delta configuration Returns: None\"\"\"\n <|body_0|>\n\n def execute(self, timechunk):\n \"\"\"Execute all analysis tasks. Args: timechunk (timechunk): A time-chunk of 2D image data. Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.logger = logging.getLogger('simple')\n self.cfg = cfg\n self.executor = executor\n self.tasklist = []\n for key, anl_params in cfg['analysis'].items():\n try:\n self.tasklist.append(get_analysis_task(key, anl_params, cfg['storage']))\n except NameError as e:\n self.logger.error(f'Could not find a suitable analysis task: {e}')\n continue\n self.logger.info(f'Added {key} to analysis task list')\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.info(f'Submitting timechunk {timechunk.tb.chunk_idx} to analysis tasklist')\n for task in self.tasklist:\n task.execute(timechunk, self.executor)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000370", "length_bytes": 1744, "license_type": "no_license", "methods": [{"docstring": "Configures the analysis tasklist from a dictionary. For each key-value pair in \"cfg_analysis\", an analysis task is instantiated and appended to a list. On a call to execute, all tasks are launched with the current data. Args: executor (PEP-3148 style executor): Executor on which all analysis tasks are launched cfg: Delta configuration Returns: None", "name": "__init__", "signature": "def __init__(self, executor, cfg)"}, {"docstring": "Execute all analysis tasks. Args: timechunk (timechunk): A time-chunk of 2D image data. Returns: None", "name": "execute", "signature": "def execute(self, timechunk)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_015866", "prompt": "Implement the Python class `tasklist` described below.\n\nClass description:\nDefines an analysis task list. This class defines an task list that is executed in parallel on an executor.\n\nMethod signatures and docstrings:\n- def __init__(self, executor, cfg): Configures the analysis tasklist from a dictionary. For each key-value pair in \"cfg_analysis\", an analysis task is instantiated and appended to a list. On a call to execute, all tasks are launched with the current data. Args: executor (PEP-3148 style executor): Executor on which all analysis tasks are launched cfg: Delta configuration Returns: None\n- def execute(self, timechunk): Execute all analysis tasks. Args: timechunk (timechunk): A time-chunk of 2D image data. Returns: None", "prompted_full_text": "Implement the Python class `tasklist` described below.\n\nClass description:\nDefines an analysis task list. This class defines an task list that is executed in parallel on an executor.\n\nMethod signatures and docstrings:\n- def __init__(self, executor, cfg): Configures the analysis tasklist from a dictionary. For each key-value pair in \"cfg_analysis\", an analysis task is instantiated and appended to a list. On a call to execute, all tasks are launched with the current data. Args: executor (PEP-3148 style executor): Executor on which all analysis tasks are launched cfg: Delta configuration Returns: None\n- def execute(self, timechunk): Execute all analysis tasks. Args: timechunk (timechunk): A time-chunk of 2D image data. Returns: None\n\n<|skeleton|>\nclass tasklist:\n \"\"\"Defines an analysis task list. This class defines an task list that is executed in parallel on an executor.\"\"\"\n\n def __init__(self, executor, cfg):\n \"\"\"Configures the analysis tasklist from a dictionary. For each key-value pair in \"cfg_analysis\", an analysis task is instantiated and appended to a list. On a call to execute, all tasks are launched with the current data. Args: executor (PEP-3148 style executor): Executor on which all analysis tasks are launched cfg: Delta configuration Returns: None\"\"\"\n <|body_0|>\n\n def execute(self, timechunk):\n \"\"\"Execute all analysis tasks. Args: timechunk (timechunk): A time-chunk of 2D image data. Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.logger = logging.getLogger('simple')\n self.cfg = cfg\n self.executor = executor\n self.tasklist = []\n for key, anl_params in cfg['analysis'].items():\n try:\n self.tasklist.append(get_analysis_task(key, anl_params, cfg['storage']))\n except NameError as e:\n self.logger.error(f'Could not find a suitable analysis task: {e}')\n continue\n self.logger.info(f'Added {key} to analysis task list')\n<|end_body_0|>\n\n<|body_start_1|>\n self.logger.info(f'Submitting timechunk {timechunk.tb.chunk_idx} to analysis tasklist')\n for task in self.tasklist:\n task.execute(timechunk, self.executor)\n<|end_body_1|>\n", "revision_id": "7ce63705e18c427f448c8d720c950a54add07966", "skeleton": "<|skeleton|>\nclass tasklist:\n \"\"\"Defines an analysis task list. This class defines an task list that is executed in parallel on an executor.\"\"\"\n\n def __init__(self, executor, cfg):\n \"\"\"Configures the analysis tasklist from a dictionary. For each key-value pair in \"cfg_analysis\", an analysis task is instantiated and appended to a list. On a call to execute, all tasks are launched with the current data. Args: executor (PEP-3148 style executor): Executor on which all analysis tasks are launched cfg: Delta configuration Returns: None\"\"\"\n <|body_0|>\n\n def execute(self, timechunk):\n \"\"\"Execute all analysis tasks. Args: timechunk (timechunk): A time-chunk of 2D image data. Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class tasklist:\n \"\"\"Defines an analysis task list. This class defines an task list that is executed in parallel on an executor.\"\"\"\n\n def __init__(self, executor, cfg):\n \"\"\"Configures the analysis tasklist from a dictionary. For each key-value pair in \"cfg_analysis\", an analysis task is instantiated and appended to a list. On a call to execute, all tasks are launched with the current data. Args: executor (PEP-3148 style executor): Executor on which all analysis tasks are launched cfg: Delta configuration Returns: None\"\"\"\n self.logger = logging.getLogger('simple')\n self.cfg = cfg\n self.executor = executor\n self.tasklist = []\n for key, anl_params in cfg['analysis'].items():\n try:\n self.tasklist.append(get_analysis_task(key, anl_params, cfg['storage']))\n except NameError as e:\n self.logger.error(f'Could not find a suitable analysis task: {e}')\n continue\n self.logger.info(f'Added {key} to analysis task list')\n\n def execute(self, timechunk):\n \"\"\"Execute all analysis tasks. Args: timechunk (timechunk): A time-chunk of 2D image data. Returns: None\"\"\"\n self.logger.info(f'Submitting timechunk {timechunk.tb.chunk_idx} to analysis tasklist')\n for task in self.tasklist:\n task.execute(timechunk, self.executor)\n", "source": "the_stack_v2_python_sparse", "source_path": "delta/analysis/task_list.py", "source_repo": "rkube/delta", "split": "val", "star_events_count": 7}
{"blob_id": "ce32b15ea03e11064a2f4ac2212e74a0e50bda54", "bodies": ["filepaths = []\nfor filename in cls.xml_filenames:\n filepath = os.path.join(cls._OUTPUT_SUBFOLDER, f'{cls._PREFIX}_{cls._CP_WRITE_UNIT_NUMBER}.save', filename)\n filepaths.append(filepath)\nreturn filepaths", "super().define(spec)\nspec.input('metadata.options.parser_name', valid_type=str, default='quantumespresso.cp')\nspec.output('output_trajectory', valid_type=orm.TrajectoryData)\nspec.output('output_parameters', valid_type=orm.Dict)\nspec.default_output_node = 'output_parameters'\nspec.exit_code(301, 'ERROR_NO_RETRIEVED_TEMPORARY_FOLDER', message='The retrieved temporary folder could not be accessed.')\nspec.exit_code(303, 'ERROR_MISSING_XML_FILE', message='The required XML file is not present in the retrieved folder.')\nspec.exit_code(304, 'ERROR_OUTPUT_XML_MULTIPLE', message='The retrieved folder contains multiple XML files.')\nspec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The required XML file could not be read.')\nspec.exit_code(330, 'ERROR_READING_POS_FILE', message='The required POS file could not be read.')\nspec.exit_code(340, 'ERROR_READING_TRAJECTORY_DATA', message='The required trajectory data could not be read.')", "settings = kwargs['settings']\nautopilot = settings.pop('AUTOPILOT', [])\nif not autopilot:\n return ''\nautopilot_card = 'AUTOPILOT\\n'\ntry:\n for event in autopilot:\n if isinstance(event['newvalue'], str):\n autopilot_card += f\"ON_STEP = {event['onstep']} : '{event['what']}' = {event['newvalue']}\\n\"\n else:\n autopilot_card += f\"ON_STEP = {event['onstep']} : {event['what']} = {event['newvalue']}\\n\"\nexcept KeyError as exception:\n raise exceptions.InputValidationError(f\"AUTOPILOT input: you must specify a list of dictionaries like the following:\\n [\\n {{'onstep' : 10, 'what' : 'dt', 'newvalue' : 5.0 }},\\n {{'onstep' : 20, 'what' : 'whatever', 'newvalue' : 'pippo'}}\\n ]\\n You specified {autopilot}\\n \") from exception\nautopilot_card += 'ENDRULES\\n'\nreturn autopilot_card"], "bodies_text": "<|body_start_0|>\n filepaths = []\n for filename in cls.xml_filenames:\n filepath = os.path.join(cls._OUTPUT_SUBFOLDER, f'{cls._PREFIX}_{cls._CP_WRITE_UNIT_NUMBER}.save', filename)\n filepaths.append(filepath)\n return filepaths\n<|end_body_0|>\n\n<|body_start_1|>\n super().define(spec)\n spec.input('metadata.options.parser_name', valid_type=str, default='quantumespresso.cp')\n spec.output('output_trajectory', valid_type=orm.TrajectoryData)\n spec.output('output_parameters', valid_type=orm.Dict)\n spec.default_output_node = 'output_parameters'\n spec.exit_code(301, 'ERROR_NO_RETRIEVED_TEMPORARY_FOLDER', message='The retrieved temporary folder could not be accessed.')\n spec.exit_code(303, 'ERROR_MISSING_XML_FILE', message='The required XML file is not present in the retrieved folder.')\n spec.exit_code(304, 'ERROR_OUTPUT_XML_MULTIPLE', message='The retrieved folder contains multiple XML files.')\n spec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The required XML file could not be read.')\n spec.exit_code(330, 'ERROR_READING_POS_FILE', message='The required POS file could not be read.')\n spec.exit_code(340, 'ERROR_READING_TRAJECTORY_DATA', message='The required trajectory data could not be read.')\n<|end_body_1|>\n\n<|body_start_2|>\n settings = kwargs['settings']\n autopilot = settings.pop('AUTOPILOT', [])\n if not autopilot:\n return ''\n autopilot_card = 'AUTOPILOT\\n'\n try:\n for event in autopilot:\n if isinstance(event['newvalue'], str):\n autopilot_card += f\"ON_STEP = {event['onstep']} : '{event['what']}' = {event['newvalue']}\\n\"\n else:\n autopilot_card += f\"ON_STEP = {event['onstep']} : {event['what']} = {event['newvalue']}\\n\"\n except KeyError as exception:\n raise exceptions.InputValidationError(f\"AUTOPILOT input: you must specify a list of dictionaries like the following:\\n [\\n {{'onstep' : 10, 'what' : 'dt', 'newvalue' : 5.0 }},\\n {{'onstep' : 20, 'what' : 'whatever', 'newvalue' : 'pippo'}}\\n ]\\n You specified {autopilot}\\n \") from exception\n autopilot_card += 'ENDRULES\\n'\n return autopilot_card\n<|end_body_2|>\n", "class_docstring": "`CalcJob` implementation for the cp.x code of Quantum ESPRESSO.", "class_name": "CpCalculation", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CpCalculation:\n \"\"\"`CalcJob` implementation for the cp.x code of Quantum ESPRESSO.\"\"\"\n\n def xml_filepaths(cls):\n \"\"\"Return a list of relative filepaths of XML files.\"\"\"\n <|body_0|>\n\n def define(cls, spec):\n \"\"\"Define the process specification.\"\"\"\n <|body_1|>\n\n def _generate_PWCP_input_tail(*args, **kwargs):\n \"\"\"Parse CP specific input parameters.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filepaths = []\n for filename in cls.xml_filenames:\n filepath = os.path.join(cls._OUTPUT_SUBFOLDER, f'{cls._PREFIX}_{cls._CP_WRITE_UNIT_NUMBER}.save', filename)\n filepaths.append(filepath)\n return filepaths\n<|end_body_0|>\n\n<|body_start_1|>\n super().define(spec)\n spec.input('metadata.options.parser_name', valid_type=str, default='quantumespresso.cp')\n spec.output('output_trajectory', valid_type=orm.TrajectoryData)\n spec.output('output_parameters', valid_type=orm.Dict)\n spec.default_output_node = 'output_parameters'\n spec.exit_code(301, 'ERROR_NO_RETRIEVED_TEMPORARY_FOLDER', message='The retrieved temporary folder could not be accessed.')\n spec.exit_code(303, 'ERROR_MISSING_XML_FILE', message='The required XML file is not present in the retrieved folder.')\n spec.exit_code(304, 'ERROR_OUTPUT_XML_MULTIPLE', message='The retrieved folder contains multiple XML files.')\n spec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The required XML file could not be read.')\n spec.exit_code(330, 'ERROR_READING_POS_FILE', message='The required POS file could not be read.')\n spec.exit_code(340, 'ERROR_READING_TRAJECTORY_DATA', message='The required trajectory data could not be read.')\n<|end_body_1|>\n\n<|body_start_2|>\n settings = kwargs['settings']\n autopilot = settings.pop('AUTOPILOT', [])\n if not autopilot:\n return ''\n autopilot_card = 'AUTOPILOT\\n'\n try:\n for event in autopilot:\n if isinstance(event['newvalue'], str):\n autopilot_card += f\"ON_STEP = {event['onstep']} : '{event['what']}' = {event['newvalue']}\\n\"\n else:\n autopilot_card += f\"ON_STEP = {event['onstep']} : {event['what']} = {event['newvalue']}\\n\"\n except KeyError as exception:\n raise exceptions.InputValidationError(f\"AUTOPILOT input: you must specify a list of dictionaries like the following:\\n [\\n {{'onstep' : 10, 'what' : 'dt', 'newvalue' : 5.0 }},\\n {{'onstep' : 20, 'what' : 'whatever', 'newvalue' : 'pippo'}}\\n ]\\n You specified {autopilot}\\n \") from exception\n autopilot_card += 'ENDRULES\\n'\n return autopilot_card\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000371", "length_bytes": 6243, "license_type": "permissive", "methods": [{"docstring": "Return a list of relative filepaths of XML files.", "name": "xml_filepaths", "signature": "def xml_filepaths(cls)"}, {"docstring": "Define the process specification.", "name": "define", "signature": "def define(cls, spec)"}, {"docstring": "Parse CP specific input parameters.", "name": "_generate_PWCP_input_tail", "signature": "def _generate_PWCP_input_tail(*args, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_039529", "prompt": "Implement the Python class `CpCalculation` described below.\n\nClass description:\n`CalcJob` implementation for the cp.x code of Quantum ESPRESSO.\n\nMethod signatures and docstrings:\n- def xml_filepaths(cls): Return a list of relative filepaths of XML files.\n- def define(cls, spec): Define the process specification.\n- def _generate_PWCP_input_tail(*args, **kwargs): Parse CP specific input parameters.", "prompted_full_text": "Implement the Python class `CpCalculation` described below.\n\nClass description:\n`CalcJob` implementation for the cp.x code of Quantum ESPRESSO.\n\nMethod signatures and docstrings:\n- def xml_filepaths(cls): Return a list of relative filepaths of XML files.\n- def define(cls, spec): Define the process specification.\n- def _generate_PWCP_input_tail(*args, **kwargs): Parse CP specific input parameters.\n\n<|skeleton|>\nclass CpCalculation:\n \"\"\"`CalcJob` implementation for the cp.x code of Quantum ESPRESSO.\"\"\"\n\n def xml_filepaths(cls):\n \"\"\"Return a list of relative filepaths of XML files.\"\"\"\n <|body_0|>\n\n def define(cls, spec):\n \"\"\"Define the process specification.\"\"\"\n <|body_1|>\n\n def _generate_PWCP_input_tail(*args, **kwargs):\n \"\"\"Parse CP specific input parameters.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filepaths = []\n for filename in cls.xml_filenames:\n filepath = os.path.join(cls._OUTPUT_SUBFOLDER, f'{cls._PREFIX}_{cls._CP_WRITE_UNIT_NUMBER}.save', filename)\n filepaths.append(filepath)\n return filepaths\n<|end_body_0|>\n\n<|body_start_1|>\n super().define(spec)\n spec.input('metadata.options.parser_name', valid_type=str, default='quantumespresso.cp')\n spec.output('output_trajectory', valid_type=orm.TrajectoryData)\n spec.output('output_parameters', valid_type=orm.Dict)\n spec.default_output_node = 'output_parameters'\n spec.exit_code(301, 'ERROR_NO_RETRIEVED_TEMPORARY_FOLDER', message='The retrieved temporary folder could not be accessed.')\n spec.exit_code(303, 'ERROR_MISSING_XML_FILE', message='The required XML file is not present in the retrieved folder.')\n spec.exit_code(304, 'ERROR_OUTPUT_XML_MULTIPLE', message='The retrieved folder contains multiple XML files.')\n spec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The required XML file could not be read.')\n spec.exit_code(330, 'ERROR_READING_POS_FILE', message='The required POS file could not be read.')\n spec.exit_code(340, 'ERROR_READING_TRAJECTORY_DATA', message='The required trajectory data could not be read.')\n<|end_body_1|>\n\n<|body_start_2|>\n settings = kwargs['settings']\n autopilot = settings.pop('AUTOPILOT', [])\n if not autopilot:\n return ''\n autopilot_card = 'AUTOPILOT\\n'\n try:\n for event in autopilot:\n if isinstance(event['newvalue'], str):\n autopilot_card += f\"ON_STEP = {event['onstep']} : '{event['what']}' = {event['newvalue']}\\n\"\n else:\n autopilot_card += f\"ON_STEP = {event['onstep']} : {event['what']} = {event['newvalue']}\\n\"\n except KeyError as exception:\n raise exceptions.InputValidationError(f\"AUTOPILOT input: you must specify a list of dictionaries like the following:\\n [\\n {{'onstep' : 10, 'what' : 'dt', 'newvalue' : 5.0 }},\\n {{'onstep' : 20, 'what' : 'whatever', 'newvalue' : 'pippo'}}\\n ]\\n You specified {autopilot}\\n \") from exception\n autopilot_card += 'ENDRULES\\n'\n return autopilot_card\n<|end_body_2|>\n", "revision_id": "7263f92ccabcfc9f828b9da5473e1aefbc4b8eca", "skeleton": "<|skeleton|>\nclass CpCalculation:\n \"\"\"`CalcJob` implementation for the cp.x code of Quantum ESPRESSO.\"\"\"\n\n def xml_filepaths(cls):\n \"\"\"Return a list of relative filepaths of XML files.\"\"\"\n <|body_0|>\n\n def define(cls, spec):\n \"\"\"Define the process specification.\"\"\"\n <|body_1|>\n\n def _generate_PWCP_input_tail(*args, **kwargs):\n \"\"\"Parse CP specific input parameters.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CpCalculation:\n \"\"\"`CalcJob` implementation for the cp.x code of Quantum ESPRESSO.\"\"\"\n\n def xml_filepaths(cls):\n \"\"\"Return a list of relative filepaths of XML files.\"\"\"\n filepaths = []\n for filename in cls.xml_filenames:\n filepath = os.path.join(cls._OUTPUT_SUBFOLDER, f'{cls._PREFIX}_{cls._CP_WRITE_UNIT_NUMBER}.save', filename)\n filepaths.append(filepath)\n return filepaths\n\n def define(cls, spec):\n \"\"\"Define the process specification.\"\"\"\n super().define(spec)\n spec.input('metadata.options.parser_name', valid_type=str, default='quantumespresso.cp')\n spec.output('output_trajectory', valid_type=orm.TrajectoryData)\n spec.output('output_parameters', valid_type=orm.Dict)\n spec.default_output_node = 'output_parameters'\n spec.exit_code(301, 'ERROR_NO_RETRIEVED_TEMPORARY_FOLDER', message='The retrieved temporary folder could not be accessed.')\n spec.exit_code(303, 'ERROR_MISSING_XML_FILE', message='The required XML file is not present in the retrieved folder.')\n spec.exit_code(304, 'ERROR_OUTPUT_XML_MULTIPLE', message='The retrieved folder contains multiple XML files.')\n spec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The required XML file could not be read.')\n spec.exit_code(330, 'ERROR_READING_POS_FILE', message='The required POS file could not be read.')\n spec.exit_code(340, 'ERROR_READING_TRAJECTORY_DATA', message='The required trajectory data could not be read.')\n\n def _generate_PWCP_input_tail(*args, **kwargs):\n \"\"\"Parse CP specific input parameters.\"\"\"\n settings = kwargs['settings']\n autopilot = settings.pop('AUTOPILOT', [])\n if not autopilot:\n return ''\n autopilot_card = 'AUTOPILOT\\n'\n try:\n for event in autopilot:\n if isinstance(event['newvalue'], str):\n autopilot_card += f\"ON_STEP = {event['onstep']} : '{event['what']}' = {event['newvalue']}\\n\"\n else:\n autopilot_card += f\"ON_STEP = {event['onstep']} : {event['what']} = {event['newvalue']}\\n\"\n except KeyError as exception:\n raise exceptions.InputValidationError(f\"AUTOPILOT input: you must specify a list of dictionaries like the following:\\n [\\n {{'onstep' : 10, 'what' : 'dt', 'newvalue' : 5.0 }},\\n {{'onstep' : 20, 'what' : 'whatever', 'newvalue' : 'pippo'}}\\n ]\\n You specified {autopilot}\\n \") from exception\n autopilot_card += 'ENDRULES\\n'\n return autopilot_card\n", "source": "the_stack_v2_python_sparse", "source_path": "src/aiida_quantumespresso/calculations/cp.py", "source_repo": "aiidateam/aiida-quantumespresso", "split": "val", "star_events_count": 56}
{"blob_id": "4565c7fe39e7d0ca3ad1c5afe5379b128b0f94e2", "bodies": ["matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\nresult = maximal_square(matrix)\nself.assertEqual(result, 0)", "matrix = [[0, 0, 1], [0, 0, 0], [0, 0, 0]]\nresult = maximal_square(matrix)\nself.assertEqual(result, 1)", "matrix = [[1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 0], [1, 1, 1, 0, 0, 0]]\nresult = maximal_square(matrix)\nself.assertEqual(result, 9)"], "bodies_text": "<|body_start_0|>\n matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n matrix = [[0, 0, 1], [0, 0, 0], [0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n matrix = [[1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 0], [1, 1, 1, 0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 9)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TestMaximalSquare", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestMaximalSquare:\n\n def test_returns_zero_if_no_ones(self):\n \"\"\"Takes in a matrix of 0s and returns 0\"\"\"\n <|body_0|>\n\n def test_returns_one_for_one_cell(self):\n \"\"\"Takes in a matrix with only one 1 and returns 1\"\"\"\n <|body_1|>\n\n def test_can_dynamically_calculate_larger_square_areas(self):\n \"\"\"Takes in a large matrix with a large square area and returns result\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n matrix = [[0, 0, 1], [0, 0, 0], [0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n matrix = [[1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 0], [1, 1, 1, 0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 9)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000372", "length_bytes": 1119, "license_type": "permissive", "methods": [{"docstring": "Takes in a matrix of 0s and returns 0", "name": "test_returns_zero_if_no_ones", "signature": "def test_returns_zero_if_no_ones(self)"}, {"docstring": "Takes in a matrix with only one 1 and returns 1", "name": "test_returns_one_for_one_cell", "signature": "def test_returns_one_for_one_cell(self)"}, {"docstring": "Takes in a large matrix with a large square area and returns result", "name": "test_can_dynamically_calculate_larger_square_areas", "signature": "def test_can_dynamically_calculate_larger_square_areas(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_013356", "prompt": "Implement the Python class `TestMaximalSquare` described below.\n\nClass description:\nImplement the TestMaximalSquare class.\n\nMethod signatures and docstrings:\n- def test_returns_zero_if_no_ones(self): Takes in a matrix of 0s and returns 0\n- def test_returns_one_for_one_cell(self): Takes in a matrix with only one 1 and returns 1\n- def test_can_dynamically_calculate_larger_square_areas(self): Takes in a large matrix with a large square area and returns result", "prompted_full_text": "Implement the Python class `TestMaximalSquare` described below.\n\nClass description:\nImplement the TestMaximalSquare class.\n\nMethod signatures and docstrings:\n- def test_returns_zero_if_no_ones(self): Takes in a matrix of 0s and returns 0\n- def test_returns_one_for_one_cell(self): Takes in a matrix with only one 1 and returns 1\n- def test_can_dynamically_calculate_larger_square_areas(self): Takes in a large matrix with a large square area and returns result\n\n<|skeleton|>\nclass TestMaximalSquare:\n\n def test_returns_zero_if_no_ones(self):\n \"\"\"Takes in a matrix of 0s and returns 0\"\"\"\n <|body_0|>\n\n def test_returns_one_for_one_cell(self):\n \"\"\"Takes in a matrix with only one 1 and returns 1\"\"\"\n <|body_1|>\n\n def test_can_dynamically_calculate_larger_square_areas(self):\n \"\"\"Takes in a large matrix with a large square area and returns result\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n matrix = [[0, 0, 1], [0, 0, 0], [0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n matrix = [[1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 0], [1, 1, 1, 0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 9)\n<|end_body_2|>\n", "revision_id": "27ffb6b32d6d18d279c51cfa45bf305a409be5c2", "skeleton": "<|skeleton|>\nclass TestMaximalSquare:\n\n def test_returns_zero_if_no_ones(self):\n \"\"\"Takes in a matrix of 0s and returns 0\"\"\"\n <|body_0|>\n\n def test_returns_one_for_one_cell(self):\n \"\"\"Takes in a matrix with only one 1 and returns 1\"\"\"\n <|body_1|>\n\n def test_can_dynamically_calculate_larger_square_areas(self):\n \"\"\"Takes in a large matrix with a large square area and returns result\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestMaximalSquare:\n def test_returns_zero_if_no_ones(self):\n \"\"\"Takes in a matrix of 0s and returns 0\"\"\"\n matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 0)\n\n def test_returns_one_for_one_cell(self):\n \"\"\"Takes in a matrix with only one 1 and returns 1\"\"\"\n matrix = [[0, 0, 1], [0, 0, 0], [0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 1)\n\n def test_can_dynamically_calculate_larger_square_areas(self):\n \"\"\"Takes in a large matrix with a large square area and returns result\"\"\"\n matrix = [[1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 0], [1, 1, 1, 0, 0, 0]]\n result = maximal_square(matrix)\n self.assertEqual(result, 9)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/leetcode/medium/maximal-square/test_maximal_square.py", "source_repo": "nwthomas/code-challenges", "split": "val", "star_events_count": 2}
{"blob_id": "0a9572e2c8f4b384d1e302617f2e8573c1ca4a8b", "bodies": ["for direction, direction_value in [('N', '6'), ('S', '7')]:\n for zone in range(1, 61):\n classdict[f'UTM_{zone}{direction}'] = f'32{direction_value}{zone:02}'\nreturn super().__new__(mcs, cls, bases, classdict)", "crs_value = cls._parse_crs(crs_value)\nif isinstance(crs_value, str) and (not cls.has_value(crs_value)) and crs_value.isdigit() and (len(crs_value) >= 4):\n crs_name = f'EPSG_{crs_value}'\n extend_enum(cls, crs_name, crs_value)\nreturn super().__call__(crs_value, *args, **kwargs)", "if isinstance(value, dict) and 'init' in value:\n value = value['init']\nif hasattr(value, 'to_epsg'):\n if value == CRSMeta._UNSUPPORTED_CRS:\n message = 'sentinelhub-py supports only WGS 84 coordinate reference system with coordinate order lng-lat. Given pyproj.CRS(4326) has coordinate order lat-lng. Be careful to use the correct order of coordinates.'\n warnings.warn(message, category=SHUserWarning)\n epsg_code = value.to_epsg()\n if epsg_code is not None:\n return str(epsg_code)\n if value == CRS.WGS84.pyproj_crs():\n return '4326'\n error_message = f'Failed to determine an EPSG code of the given CRS:\\n{repr(value)}'\n maybe_epsg = value.to_epsg(min_confidence=0)\n if maybe_epsg is not None:\n error_message = f'{error_message}\\nIt might be EPSG {maybe_epsg} but pyproj is not confident enough.'\n raise ValueError(error_message)\nif isinstance(value, (int, np.integer)):\n return str(value)\nif isinstance(value, str):\n if 'urn:ogc:def:crs' in value.lower():\n crs_template = re.compile('urn:ogc:def:crs:.+::(?P.+)', re.IGNORECASE)\n match = crs_template.match(value)\n if match is None:\n raise ValueError(f'The value {value} could not be parsed to a CRS.')\n value = match.group('code')\n if value.upper() == 'CRS84':\n return '4326'\n return value.lower().replace('epsg:', '').strip()\nreturn value"], "bodies_text": "<|body_start_0|>\n for direction, direction_value in [('N', '6'), ('S', '7')]:\n for zone in range(1, 61):\n classdict[f'UTM_{zone}{direction}'] = f'32{direction_value}{zone:02}'\n return super().__new__(mcs, cls, bases, classdict)\n<|end_body_0|>\n\n<|body_start_1|>\n crs_value = cls._parse_crs(crs_value)\n if isinstance(crs_value, str) and (not cls.has_value(crs_value)) and crs_value.isdigit() and (len(crs_value) >= 4):\n crs_name = f'EPSG_{crs_value}'\n extend_enum(cls, crs_name, crs_value)\n return super().__call__(crs_value, *args, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(value, dict) and 'init' in value:\n value = value['init']\n if hasattr(value, 'to_epsg'):\n if value == CRSMeta._UNSUPPORTED_CRS:\n message = 'sentinelhub-py supports only WGS 84 coordinate reference system with coordinate order lng-lat. Given pyproj.CRS(4326) has coordinate order lat-lng. Be careful to use the correct order of coordinates.'\n warnings.warn(message, category=SHUserWarning)\n epsg_code = value.to_epsg()\n if epsg_code is not None:\n return str(epsg_code)\n if value == CRS.WGS84.pyproj_crs():\n return '4326'\n error_message = f'Failed to determine an EPSG code of the given CRS:\\n{repr(value)}'\n maybe_epsg = value.to_epsg(min_confidence=0)\n if maybe_epsg is not None:\n error_message = f'{error_message}\\nIt might be EPSG {maybe_epsg} but pyproj is not confident enough.'\n raise ValueError(error_message)\n if isinstance(value, (int, np.integer)):\n return str(value)\n if isinstance(value, str):\n if 'urn:ogc:def:crs' in value.lower():\n crs_template = re.compile('urn:ogc:def:crs:.+::(?P.+)', re.IGNORECASE)\n match = crs_template.match(value)\n if match is None:\n raise ValueError(f'The value {value} could not be parsed to a CRS.')\n value = match.group('code')\n if value.upper() == 'CRS84':\n return '4326'\n return value.lower().replace('epsg:', '').strip()\n return value\n<|end_body_2|>\n", "class_docstring": "Metaclass used for building CRS Enum class", "class_name": "CRSMeta", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CRSMeta:\n \"\"\"Metaclass used for building CRS Enum class\"\"\"\n\n def __new__(mcs, cls, bases, classdict):\n \"\"\"This is executed at the beginning of runtime when CRS class is created\"\"\"\n <|body_0|>\n\n def __call__(cls, crs_value, *args, **kwargs):\n \"\"\"This is executed whenever CRS('something') is called\"\"\"\n <|body_1|>\n\n def _parse_crs(value: object) -> object:\n \"\"\"Method for parsing different inputs representing the same CRS enum. Examples: - 4326 - 'EPSG:3857' - {'init': 32633} - geojson['crs']['properties']['name'] string (urn:ogc:def:crs:...) - pyproj.CRS(32743)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for direction, direction_value in [('N', '6'), ('S', '7')]:\n for zone in range(1, 61):\n classdict[f'UTM_{zone}{direction}'] = f'32{direction_value}{zone:02}'\n return super().__new__(mcs, cls, bases, classdict)\n<|end_body_0|>\n\n<|body_start_1|>\n crs_value = cls._parse_crs(crs_value)\n if isinstance(crs_value, str) and (not cls.has_value(crs_value)) and crs_value.isdigit() and (len(crs_value) >= 4):\n crs_name = f'EPSG_{crs_value}'\n extend_enum(cls, crs_name, crs_value)\n return super().__call__(crs_value, *args, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(value, dict) and 'init' in value:\n value = value['init']\n if hasattr(value, 'to_epsg'):\n if value == CRSMeta._UNSUPPORTED_CRS:\n message = 'sentinelhub-py supports only WGS 84 coordinate reference system with coordinate order lng-lat. Given pyproj.CRS(4326) has coordinate order lat-lng. Be careful to use the correct order of coordinates.'\n warnings.warn(message, category=SHUserWarning)\n epsg_code = value.to_epsg()\n if epsg_code is not None:\n return str(epsg_code)\n if value == CRS.WGS84.pyproj_crs():\n return '4326'\n error_message = f'Failed to determine an EPSG code of the given CRS:\\n{repr(value)}'\n maybe_epsg = value.to_epsg(min_confidence=0)\n if maybe_epsg is not None:\n error_message = f'{error_message}\\nIt might be EPSG {maybe_epsg} but pyproj is not confident enough.'\n raise ValueError(error_message)\n if isinstance(value, (int, np.integer)):\n return str(value)\n if isinstance(value, str):\n if 'urn:ogc:def:crs' in value.lower():\n crs_template = re.compile('urn:ogc:def:crs:.+::(?P.+)', re.IGNORECASE)\n match = crs_template.match(value)\n if match is None:\n raise ValueError(f'The value {value} could not be parsed to a CRS.')\n value = match.group('code')\n if value.upper() == 'CRS84':\n return '4326'\n return value.lower().replace('epsg:', '').strip()\n return value\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000373", "length_bytes": 13474, "license_type": "permissive", "methods": [{"docstring": "This is executed at the beginning of runtime when CRS class is created", "name": "__new__", "signature": "def __new__(mcs, cls, bases, classdict)"}, {"docstring": "This is executed whenever CRS('something') is called", "name": "__call__", "signature": "def __call__(cls, crs_value, *args, **kwargs)"}, {"docstring": "Method for parsing different inputs representing the same CRS enum. Examples: - 4326 - 'EPSG:3857' - {'init': 32633} - geojson['crs']['properties']['name'] string (urn:ogc:def:crs:...) - pyproj.CRS(32743)", "name": "_parse_crs", "signature": "def _parse_crs(value: object) -> object"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_031830", "prompt": "Implement the Python class `CRSMeta` described below.\n\nClass description:\nMetaclass used for building CRS Enum class\n\nMethod signatures and docstrings:\n- def __new__(mcs, cls, bases, classdict): This is executed at the beginning of runtime when CRS class is created\n- def __call__(cls, crs_value, *args, **kwargs): This is executed whenever CRS('something') is called\n- def _parse_crs(value: object) -> object: Method for parsing different inputs representing the same CRS enum. Examples: - 4326 - 'EPSG:3857' - {'init': 32633} - geojson['crs']['properties']['name'] string (urn:ogc:def:crs:...) - pyproj.CRS(32743)", "prompted_full_text": "Implement the Python class `CRSMeta` described below.\n\nClass description:\nMetaclass used for building CRS Enum class\n\nMethod signatures and docstrings:\n- def __new__(mcs, cls, bases, classdict): This is executed at the beginning of runtime when CRS class is created\n- def __call__(cls, crs_value, *args, **kwargs): This is executed whenever CRS('something') is called\n- def _parse_crs(value: object) -> object: Method for parsing different inputs representing the same CRS enum. Examples: - 4326 - 'EPSG:3857' - {'init': 32633} - geojson['crs']['properties']['name'] string (urn:ogc:def:crs:...) - pyproj.CRS(32743)\n\n<|skeleton|>\nclass CRSMeta:\n \"\"\"Metaclass used for building CRS Enum class\"\"\"\n\n def __new__(mcs, cls, bases, classdict):\n \"\"\"This is executed at the beginning of runtime when CRS class is created\"\"\"\n <|body_0|>\n\n def __call__(cls, crs_value, *args, **kwargs):\n \"\"\"This is executed whenever CRS('something') is called\"\"\"\n <|body_1|>\n\n def _parse_crs(value: object) -> object:\n \"\"\"Method for parsing different inputs representing the same CRS enum. Examples: - 4326 - 'EPSG:3857' - {'init': 32633} - geojson['crs']['properties']['name'] string (urn:ogc:def:crs:...) - pyproj.CRS(32743)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for direction, direction_value in [('N', '6'), ('S', '7')]:\n for zone in range(1, 61):\n classdict[f'UTM_{zone}{direction}'] = f'32{direction_value}{zone:02}'\n return super().__new__(mcs, cls, bases, classdict)\n<|end_body_0|>\n\n<|body_start_1|>\n crs_value = cls._parse_crs(crs_value)\n if isinstance(crs_value, str) and (not cls.has_value(crs_value)) and crs_value.isdigit() and (len(crs_value) >= 4):\n crs_name = f'EPSG_{crs_value}'\n extend_enum(cls, crs_name, crs_value)\n return super().__call__(crs_value, *args, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(value, dict) and 'init' in value:\n value = value['init']\n if hasattr(value, 'to_epsg'):\n if value == CRSMeta._UNSUPPORTED_CRS:\n message = 'sentinelhub-py supports only WGS 84 coordinate reference system with coordinate order lng-lat. Given pyproj.CRS(4326) has coordinate order lat-lng. Be careful to use the correct order of coordinates.'\n warnings.warn(message, category=SHUserWarning)\n epsg_code = value.to_epsg()\n if epsg_code is not None:\n return str(epsg_code)\n if value == CRS.WGS84.pyproj_crs():\n return '4326'\n error_message = f'Failed to determine an EPSG code of the given CRS:\\n{repr(value)}'\n maybe_epsg = value.to_epsg(min_confidence=0)\n if maybe_epsg is not None:\n error_message = f'{error_message}\\nIt might be EPSG {maybe_epsg} but pyproj is not confident enough.'\n raise ValueError(error_message)\n if isinstance(value, (int, np.integer)):\n return str(value)\n if isinstance(value, str):\n if 'urn:ogc:def:crs' in value.lower():\n crs_template = re.compile('urn:ogc:def:crs:.+::(?P.+)', re.IGNORECASE)\n match = crs_template.match(value)\n if match is None:\n raise ValueError(f'The value {value} could not be parsed to a CRS.')\n value = match.group('code')\n if value.upper() == 'CRS84':\n return '4326'\n return value.lower().replace('epsg:', '').strip()\n return value\n<|end_body_2|>\n", "revision_id": "98d0327e3929999ec07645f77b16fceb7f9c88b9", "skeleton": "<|skeleton|>\nclass CRSMeta:\n \"\"\"Metaclass used for building CRS Enum class\"\"\"\n\n def __new__(mcs, cls, bases, classdict):\n \"\"\"This is executed at the beginning of runtime when CRS class is created\"\"\"\n <|body_0|>\n\n def __call__(cls, crs_value, *args, **kwargs):\n \"\"\"This is executed whenever CRS('something') is called\"\"\"\n <|body_1|>\n\n def _parse_crs(value: object) -> object:\n \"\"\"Method for parsing different inputs representing the same CRS enum. Examples: - 4326 - 'EPSG:3857' - {'init': 32633} - geojson['crs']['properties']['name'] string (urn:ogc:def:crs:...) - pyproj.CRS(32743)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CRSMeta:\n \"\"\"Metaclass used for building CRS Enum class\"\"\"\n\n def __new__(mcs, cls, bases, classdict):\n \"\"\"This is executed at the beginning of runtime when CRS class is created\"\"\"\n for direction, direction_value in [('N', '6'), ('S', '7')]:\n for zone in range(1, 61):\n classdict[f'UTM_{zone}{direction}'] = f'32{direction_value}{zone:02}'\n return super().__new__(mcs, cls, bases, classdict)\n\n def __call__(cls, crs_value, *args, **kwargs):\n \"\"\"This is executed whenever CRS('something') is called\"\"\"\n crs_value = cls._parse_crs(crs_value)\n if isinstance(crs_value, str) and (not cls.has_value(crs_value)) and crs_value.isdigit() and (len(crs_value) >= 4):\n crs_name = f'EPSG_{crs_value}'\n extend_enum(cls, crs_name, crs_value)\n return super().__call__(crs_value, *args, **kwargs)\n\n def _parse_crs(value: object) -> object:\n \"\"\"Method for parsing different inputs representing the same CRS enum. Examples: - 4326 - 'EPSG:3857' - {'init': 32633} - geojson['crs']['properties']['name'] string (urn:ogc:def:crs:...) - pyproj.CRS(32743)\"\"\"\n if isinstance(value, dict) and 'init' in value:\n value = value['init']\n if hasattr(value, 'to_epsg'):\n if value == CRSMeta._UNSUPPORTED_CRS:\n message = 'sentinelhub-py supports only WGS 84 coordinate reference system with coordinate order lng-lat. Given pyproj.CRS(4326) has coordinate order lat-lng. Be careful to use the correct order of coordinates.'\n warnings.warn(message, category=SHUserWarning)\n epsg_code = value.to_epsg()\n if epsg_code is not None:\n return str(epsg_code)\n if value == CRS.WGS84.pyproj_crs():\n return '4326'\n error_message = f'Failed to determine an EPSG code of the given CRS:\\n{repr(value)}'\n maybe_epsg = value.to_epsg(min_confidence=0)\n if maybe_epsg is not None:\n error_message = f'{error_message}\\nIt might be EPSG {maybe_epsg} but pyproj is not confident enough.'\n raise ValueError(error_message)\n if isinstance(value, (int, np.integer)):\n return str(value)\n if isinstance(value, str):\n if 'urn:ogc:def:crs' in value.lower():\n crs_template = re.compile('urn:ogc:def:crs:.+::(?P.+)', re.IGNORECASE)\n match = crs_template.match(value)\n if match is None:\n raise ValueError(f'The value {value} could not be parsed to a CRS.')\n value = match.group('code')\n if value.upper() == 'CRS84':\n return '4326'\n return value.lower().replace('epsg:', '').strip()\n return value\n", "source": "the_stack_v2_python_sparse", "source_path": "sentinelhub/constants.py", "source_repo": "sentinel-hub/sentinelhub-py", "split": "val", "star_events_count": 704}
{"blob_id": "01b6df14387be33a3b5d3799fe92f2424b671c44", "bodies": ["self.arrayOne = [10, 15, 8, 12, 94, 81, 5, 2, 11]\nself.arrayTwo = [10, 8, 5, 15, 2, 12, 11, 94, 81]\nself.output = True\nreturn (self.arrayOne, self.arrayTwo, self.output)", "arrayOne, arrayTwo, output = self.setUp()\noutput_method = sameBSTs(arrayOne, arrayTwo)\nself.assertEqual(output, output_method)"], "bodies_text": "<|body_start_0|>\n self.arrayOne = [10, 15, 8, 12, 94, 81, 5, 2, 11]\n self.arrayTwo = [10, 8, 5, 15, 2, 12, 11, 94, 81]\n self.output = True\n return (self.arrayOne, self.arrayTwo, self.output)\n<|end_body_0|>\n\n<|body_start_1|>\n arrayOne, arrayTwo, output = self.setUp()\n output_method = sameBSTs(arrayOne, arrayTwo)\n self.assertEqual(output, output_method)\n<|end_body_1|>\n", "class_docstring": "Class with unittests for SameBSTs.py", "class_name": "test_SameBSTs", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass test_SameBSTs:\n \"\"\"Class with unittests for SameBSTs.py\"\"\"\n\n def setUp(self):\n \"\"\"Sets up input.\"\"\"\n <|body_0|>\n\n def test_user_input(self):\n \"\"\"Checks if method works properly. Input cannot be empty string.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.arrayOne = [10, 15, 8, 12, 94, 81, 5, 2, 11]\n self.arrayTwo = [10, 8, 5, 15, 2, 12, 11, 94, 81]\n self.output = True\n return (self.arrayOne, self.arrayTwo, self.output)\n<|end_body_0|>\n\n<|body_start_1|>\n arrayOne, arrayTwo, output = self.setUp()\n output_method = sameBSTs(arrayOne, arrayTwo)\n self.assertEqual(output, output_method)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000374", "length_bytes": 952, "license_type": "no_license", "methods": [{"docstring": "Sets up input.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Checks if method works properly. Input cannot be empty string.", "name": "test_user_input", "signature": "def test_user_input(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009961", "prompt": "Implement the Python class `test_SameBSTs` described below.\n\nClass description:\nClass with unittests for SameBSTs.py\n\nMethod signatures and docstrings:\n- def setUp(self): Sets up input.\n- def test_user_input(self): Checks if method works properly. Input cannot be empty string.", "prompted_full_text": "Implement the Python class `test_SameBSTs` described below.\n\nClass description:\nClass with unittests for SameBSTs.py\n\nMethod signatures and docstrings:\n- def setUp(self): Sets up input.\n- def test_user_input(self): Checks if method works properly. Input cannot be empty string.\n\n<|skeleton|>\nclass test_SameBSTs:\n \"\"\"Class with unittests for SameBSTs.py\"\"\"\n\n def setUp(self):\n \"\"\"Sets up input.\"\"\"\n <|body_0|>\n\n def test_user_input(self):\n \"\"\"Checks if method works properly. Input cannot be empty string.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.arrayOne = [10, 15, 8, 12, 94, 81, 5, 2, 11]\n self.arrayTwo = [10, 8, 5, 15, 2, 12, 11, 94, 81]\n self.output = True\n return (self.arrayOne, self.arrayTwo, self.output)\n<|end_body_0|>\n\n<|body_start_1|>\n arrayOne, arrayTwo, output = self.setUp()\n output_method = sameBSTs(arrayOne, arrayTwo)\n self.assertEqual(output, output_method)\n<|end_body_1|>\n", "revision_id": "3aa62ad36c3b06b2a3b05f1f8e2a9e21d68b371f", "skeleton": "<|skeleton|>\nclass test_SameBSTs:\n \"\"\"Class with unittests for SameBSTs.py\"\"\"\n\n def setUp(self):\n \"\"\"Sets up input.\"\"\"\n <|body_0|>\n\n def test_user_input(self):\n \"\"\"Checks if method works properly. Input cannot be empty string.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class test_SameBSTs:\n \"\"\"Class with unittests for SameBSTs.py\"\"\"\n\n def setUp(self):\n \"\"\"Sets up input.\"\"\"\n self.arrayOne = [10, 15, 8, 12, 94, 81, 5, 2, 11]\n self.arrayTwo = [10, 8, 5, 15, 2, 12, 11, 94, 81]\n self.output = True\n return (self.arrayOne, self.arrayTwo, self.output)\n\n def test_user_input(self):\n \"\"\"Checks if method works properly. Input cannot be empty string.\"\"\"\n arrayOne, arrayTwo, output = self.setUp()\n output_method = sameBSTs(arrayOne, arrayTwo)\n self.assertEqual(output, output_method)\n", "source": "the_stack_v2_python_sparse", "source_path": "AlgoExpert_algorithms/Hard/SameBSTs/test_SameBSTs.py", "source_repo": "JakubKazimierski/PythonPortfolio", "split": "val", "star_events_count": 9}
{"blob_id": "e71b41c8c13ba836918437e412749a802aea56b0", "bodies": ["if len(nums) == 0:\n return []\ni, j = (0, 0)\nwhile j < len(nums):\n if nums[j] % 2 != 0:\n nums[i], nums[j] = (nums[j], nums[i])\n i += 1\n j += 1\nreturn nums", "if len(nums) == 0:\n return []\ni, j = (0, len(nums) - 1)\nwhile i < j:\n while i < j and nums[i] % 2 != 0:\n i += 1\n while i < j and nums[j] % 2 == 0:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\nreturn nums"], "bodies_text": "<|body_start_0|>\n if len(nums) == 0:\n return []\n i, j = (0, 0)\n while j < len(nums):\n if nums[j] % 2 != 0:\n nums[i], nums[j] = (nums[j], nums[i])\n i += 1\n j += 1\n return nums\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) == 0:\n return []\n i, j = (0, len(nums) - 1)\n while i < j:\n while i < j and nums[i] % 2 != 0:\n i += 1\n while i < j and nums[j] % 2 == 0:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\n return nums\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"双指针, 动态规划 使i为奇偶边缘的第一个偶数下标 j向右走 :param nums: :return:\"\"\"\n <|body_0|>\n\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"双指针 i开头 j末尾 :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) == 0:\n return []\n i, j = (0, 0)\n while j < len(nums):\n if nums[j] % 2 != 0:\n nums[i], nums[j] = (nums[j], nums[i])\n i += 1\n j += 1\n return nums\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) == 0:\n return []\n i, j = (0, len(nums) - 1)\n while i < j:\n while i < j and nums[i] % 2 != 0:\n i += 1\n while i < j and nums[j] % 2 == 0:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\n return nums\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000375", "length_bytes": 1963, "license_type": "no_license", "methods": [{"docstring": "双指针, 动态规划 使i为奇偶边缘的第一个偶数下标 j向右走 :param nums: :return:", "name": "exchange", "signature": "def exchange(self, nums: List[int]) -> List[int]"}, {"docstring": "双指针 i开头 j末尾 :param nums: :return:", "name": "exchange", "signature": "def exchange(self, nums: List[int]) -> List[int]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_025995", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def exchange(self, nums: List[int]) -> List[int]: 双指针, 动态规划 使i为奇偶边缘的第一个偶数下标 j向右走 :param nums: :return:\n- def exchange(self, nums: List[int]) -> List[int]: 双指针 i开头 j末尾 :param nums: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def exchange(self, nums: List[int]) -> List[int]: 双指针, 动态规划 使i为奇偶边缘的第一个偶数下标 j向右走 :param nums: :return:\n- def exchange(self, nums: List[int]) -> List[int]: 双指针 i开头 j末尾 :param nums: :return:\n\n<|skeleton|>\nclass Solution:\n\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"双指针, 动态规划 使i为奇偶边缘的第一个偶数下标 j向右走 :param nums: :return:\"\"\"\n <|body_0|>\n\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"双指针 i开头 j末尾 :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) == 0:\n return []\n i, j = (0, 0)\n while j < len(nums):\n if nums[j] % 2 != 0:\n nums[i], nums[j] = (nums[j], nums[i])\n i += 1\n j += 1\n return nums\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) == 0:\n return []\n i, j = (0, len(nums) - 1)\n while i < j:\n while i < j and nums[i] % 2 != 0:\n i += 1\n while i < j and nums[j] % 2 == 0:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\n return nums\n<|end_body_1|>\n", "revision_id": "b1680014ce3f55ba952a1e64241c0cbb783cc436", "skeleton": "<|skeleton|>\nclass Solution:\n\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"双指针, 动态规划 使i为奇偶边缘的第一个偶数下标 j向右走 :param nums: :return:\"\"\"\n <|body_0|>\n\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"双指针 i开头 j末尾 :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"双指针, 动态规划 使i为奇偶边缘的第一个偶数下标 j向右走 :param nums: :return:\"\"\"\n if len(nums) == 0:\n return []\n i, j = (0, 0)\n while j < len(nums):\n if nums[j] % 2 != 0:\n nums[i], nums[j] = (nums[j], nums[i])\n i += 1\n j += 1\n return nums\n\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"双指针 i开头 j末尾 :param nums: :return:\"\"\"\n if len(nums) == 0:\n return []\n i, j = (0, len(nums) - 1)\n while i < j:\n while i < j and nums[i] % 2 != 0:\n i += 1\n while i < j and nums[j] % 2 == 0:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\n return nums\n", "source": "the_stack_v2_python_sparse", "source_path": "21.py", "source_repo": "sun510001/leetcode_jianzhi_offer_2", "split": "val", "star_events_count": 0}
{"blob_id": "580d6f77880dacd5e5c3d74b1516cfde6f8b9ba6", "bodies": ["try:\n if not isinstance(data['project_id'], int) or not isinstance(data['id'], int):\n return JsonResponse(code='999995', msg='参数有误!')\n if not data['name'] or not data['host']:\n return JsonResponse(code='999995', msg='参数有误!')\nexcept KeyError:\n return JsonResponse(code='999995', msg='参数有误!')", "data = JSONParser().parse(request)\nresult = self.parameter_check(data)\nif result:\n return result\ntry:\n pro_data = Project.objects.get(id=data['project_id'])\n if not request.user.is_superuser and pro_data.user.is_superuser:\n return JsonResponse(code='999983', msg='无操作权限!')\nexcept ObjectDoesNotExist:\n return JsonResponse(code='999995', msg='项目不存在!')\npro_data = ProjectSerializer(pro_data)\nif not pro_data.data['status']:\n return JsonResponse(code='999985', msg='该项目已禁用')\ntry:\n obi = GlobalHost.objects.get(id=data['id'])\nexcept ObjectDoesNotExist:\n return JsonResponse(code='999992', msg='host不存在!')\nhost_name = GlobalHost.objects.filter(name=data['name']).exclude(id=data['id'])\nif len(host_name):\n return JsonResponse(code='999997', msg='存在相同名称!')\nelse:\n serializer = GlobalHostSerializer(data=data)\n with transaction.atomic():\n if serializer.is_valid():\n serializer.update(instance=obi, validated_data=data)\n record_dynamic(project=data['project_id'], _type='修改', operationObject='域名', user=request.user.pk, data=data['name'])\n return JsonResponse(code='999999', msg='成功!')\n return JsonResponse(code='999998', msg='失败!')"], "bodies_text": "<|body_start_0|>\n try:\n if not isinstance(data['project_id'], int) or not isinstance(data['id'], int):\n return JsonResponse(code='999995', msg='参数有误!')\n if not data['name'] or not data['host']:\n return JsonResponse(code='999995', msg='参数有误!')\n except KeyError:\n return JsonResponse(code='999995', msg='参数有误!')\n<|end_body_0|>\n\n<|body_start_1|>\n data = JSONParser().parse(request)\n result = self.parameter_check(data)\n if result:\n return result\n try:\n pro_data = Project.objects.get(id=data['project_id'])\n if not request.user.is_superuser and pro_data.user.is_superuser:\n return JsonResponse(code='999983', msg='无操作权限!')\n except ObjectDoesNotExist:\n return JsonResponse(code='999995', msg='项目不存在!')\n pro_data = ProjectSerializer(pro_data)\n if not pro_data.data['status']:\n return JsonResponse(code='999985', msg='该项目已禁用')\n try:\n obi = GlobalHost.objects.get(id=data['id'])\n except ObjectDoesNotExist:\n return JsonResponse(code='999992', msg='host不存在!')\n host_name = GlobalHost.objects.filter(name=data['name']).exclude(id=data['id'])\n if len(host_name):\n return JsonResponse(code='999997', msg='存在相同名称!')\n else:\n serializer = GlobalHostSerializer(data=data)\n with transaction.atomic():\n if serializer.is_valid():\n serializer.update(instance=obi, validated_data=data)\n record_dynamic(project=data['project_id'], _type='修改', operationObject='域名', user=request.user.pk, data=data['name'])\n return JsonResponse(code='999999', msg='成功!')\n return JsonResponse(code='999998', msg='失败!')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "UpdateHost", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UpdateHost:\n\n def parameter_check(self, data):\n \"\"\"校验参数 :param data: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"修改host域名 :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n if not isinstance(data['project_id'], int) or not isinstance(data['id'], int):\n return JsonResponse(code='999995', msg='参数有误!')\n if not data['name'] or not data['host']:\n return JsonResponse(code='999995', msg='参数有误!')\n except KeyError:\n return JsonResponse(code='999995', msg='参数有误!')\n<|end_body_0|>\n\n<|body_start_1|>\n data = JSONParser().parse(request)\n result = self.parameter_check(data)\n if result:\n return result\n try:\n pro_data = Project.objects.get(id=data['project_id'])\n if not request.user.is_superuser and pro_data.user.is_superuser:\n return JsonResponse(code='999983', msg='无操作权限!')\n except ObjectDoesNotExist:\n return JsonResponse(code='999995', msg='项目不存在!')\n pro_data = ProjectSerializer(pro_data)\n if not pro_data.data['status']:\n return JsonResponse(code='999985', msg='该项目已禁用')\n try:\n obi = GlobalHost.objects.get(id=data['id'])\n except ObjectDoesNotExist:\n return JsonResponse(code='999992', msg='host不存在!')\n host_name = GlobalHost.objects.filter(name=data['name']).exclude(id=data['id'])\n if len(host_name):\n return JsonResponse(code='999997', msg='存在相同名称!')\n else:\n serializer = GlobalHostSerializer(data=data)\n with transaction.atomic():\n if serializer.is_valid():\n serializer.update(instance=obi, validated_data=data)\n record_dynamic(project=data['project_id'], _type='修改', operationObject='域名', user=request.user.pk, data=data['name'])\n return JsonResponse(code='999999', msg='成功!')\n return JsonResponse(code='999998', msg='失败!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000376", "length_bytes": 12982, "license_type": "permissive", "methods": [{"docstring": "校验参数 :param data: :return:", "name": "parameter_check", "signature": "def parameter_check(self, data)"}, {"docstring": "修改host域名 :param request: :return:", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_051990", "prompt": "Implement the Python class `UpdateHost` described below.\n\nClass description:\nImplement the UpdateHost class.\n\nMethod signatures and docstrings:\n- def parameter_check(self, data): 校验参数 :param data: :return:\n- def post(self, request): 修改host域名 :param request: :return:", "prompted_full_text": "Implement the Python class `UpdateHost` described below.\n\nClass description:\nImplement the UpdateHost class.\n\nMethod signatures and docstrings:\n- def parameter_check(self, data): 校验参数 :param data: :return:\n- def post(self, request): 修改host域名 :param request: :return:\n\n<|skeleton|>\nclass UpdateHost:\n\n def parameter_check(self, data):\n \"\"\"校验参数 :param data: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"修改host域名 :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n if not isinstance(data['project_id'], int) or not isinstance(data['id'], int):\n return JsonResponse(code='999995', msg='参数有误!')\n if not data['name'] or not data['host']:\n return JsonResponse(code='999995', msg='参数有误!')\n except KeyError:\n return JsonResponse(code='999995', msg='参数有误!')\n<|end_body_0|>\n\n<|body_start_1|>\n data = JSONParser().parse(request)\n result = self.parameter_check(data)\n if result:\n return result\n try:\n pro_data = Project.objects.get(id=data['project_id'])\n if not request.user.is_superuser and pro_data.user.is_superuser:\n return JsonResponse(code='999983', msg='无操作权限!')\n except ObjectDoesNotExist:\n return JsonResponse(code='999995', msg='项目不存在!')\n pro_data = ProjectSerializer(pro_data)\n if not pro_data.data['status']:\n return JsonResponse(code='999985', msg='该项目已禁用')\n try:\n obi = GlobalHost.objects.get(id=data['id'])\n except ObjectDoesNotExist:\n return JsonResponse(code='999992', msg='host不存在!')\n host_name = GlobalHost.objects.filter(name=data['name']).exclude(id=data['id'])\n if len(host_name):\n return JsonResponse(code='999997', msg='存在相同名称!')\n else:\n serializer = GlobalHostSerializer(data=data)\n with transaction.atomic():\n if serializer.is_valid():\n serializer.update(instance=obi, validated_data=data)\n record_dynamic(project=data['project_id'], _type='修改', operationObject='域名', user=request.user.pk, data=data['name'])\n return JsonResponse(code='999999', msg='成功!')\n return JsonResponse(code='999998', msg='失败!')\n<|end_body_1|>\n", "revision_id": "6d08f58fa6985415ef7beae733e6f8147026806e", "skeleton": "<|skeleton|>\nclass UpdateHost:\n\n def parameter_check(self, data):\n \"\"\"校验参数 :param data: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"修改host域名 :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UpdateHost:\n def parameter_check(self, data):\n \"\"\"校验参数 :param data: :return:\"\"\"\n try:\n if not isinstance(data['project_id'], int) or not isinstance(data['id'], int):\n return JsonResponse(code='999995', msg='参数有误!')\n if not data['name'] or not data['host']:\n return JsonResponse(code='999995', msg='参数有误!')\n except KeyError:\n return JsonResponse(code='999995', msg='参数有误!')\n\n def post(self, request):\n \"\"\"修改host域名 :param request: :return:\"\"\"\n data = JSONParser().parse(request)\n result = self.parameter_check(data)\n if result:\n return result\n try:\n pro_data = Project.objects.get(id=data['project_id'])\n if not request.user.is_superuser and pro_data.user.is_superuser:\n return JsonResponse(code='999983', msg='无操作权限!')\n except ObjectDoesNotExist:\n return JsonResponse(code='999995', msg='项目不存在!')\n pro_data = ProjectSerializer(pro_data)\n if not pro_data.data['status']:\n return JsonResponse(code='999985', msg='该项目已禁用')\n try:\n obi = GlobalHost.objects.get(id=data['id'])\n except ObjectDoesNotExist:\n return JsonResponse(code='999992', msg='host不存在!')\n host_name = GlobalHost.objects.filter(name=data['name']).exclude(id=data['id'])\n if len(host_name):\n return JsonResponse(code='999997', msg='存在相同名称!')\n else:\n serializer = GlobalHostSerializer(data=data)\n with transaction.atomic():\n if serializer.is_valid():\n serializer.update(instance=obi, validated_data=data)\n record_dynamic(project=data['project_id'], _type='修改', operationObject='域名', user=request.user.pk, data=data['name'])\n return JsonResponse(code='999999', msg='成功!')\n return JsonResponse(code='999998', msg='失败!')\n", "source": "the_stack_v2_python_sparse", "source_path": "api_test/api/global_parameter.py", "source_repo": "yourant/tapi", "split": "val", "star_events_count": 0}
{"blob_id": "04fe3b247339b0fe3ca60c638607d661ef5c3a28", "bodies": ["if not access_token:\n raise ValueError('Access token must be specified.')\nif not books_path:\n books_path = DropboxDownloader.DEFAULT_DROPBOX_PATH\nself.access_token = access_token\nself.books_path = books_path\nself.workers = workers\nself.logger = logger if logger else logging.getLogger(__name__)", "if not path and (not self.books_path):\n raise ValueError('Path to read data from is not specified')\nif not path:\n path = self.books_path\nclient = dropbox.client.DropboxClient(self.access_token)\nmeta = client.metadata(path)\nfiles = filepaths_from_metadata(meta)\nmoonreader_files = get_moonreader_files_from_filelist(files)\nif book_count is not None:\n file_pairs = get_same_book_files(moonreader_files)[:book_count]\nelse:\n file_pairs = get_same_book_files(moonreader_files)\nfor book_dict in dicts_from_pairs(client, file_pairs, workers=self.workers):\n try:\n book = Book.from_fobj_dict(book_dict)\n yield book\n except Exception:\n err_msg = 'Exception occured when creating book object.'\n logging.exception(err_msg)"], "bodies_text": "<|body_start_0|>\n if not access_token:\n raise ValueError('Access token must be specified.')\n if not books_path:\n books_path = DropboxDownloader.DEFAULT_DROPBOX_PATH\n self.access_token = access_token\n self.books_path = books_path\n self.workers = workers\n self.logger = logger if logger else logging.getLogger(__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n if not path and (not self.books_path):\n raise ValueError('Path to read data from is not specified')\n if not path:\n path = self.books_path\n client = dropbox.client.DropboxClient(self.access_token)\n meta = client.metadata(path)\n files = filepaths_from_metadata(meta)\n moonreader_files = get_moonreader_files_from_filelist(files)\n if book_count is not None:\n file_pairs = get_same_book_files(moonreader_files)[:book_count]\n else:\n file_pairs = get_same_book_files(moonreader_files)\n for book_dict in dicts_from_pairs(client, file_pairs, workers=self.workers):\n try:\n book = Book.from_fobj_dict(book_dict)\n yield book\n except Exception:\n err_msg = 'Exception occured when creating book object.'\n logging.exception(err_msg)\n<|end_body_1|>\n", "class_docstring": "Class to obtain bookdata from dropbox syncronized account", "class_name": "DropboxDownloader", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DropboxDownloader:\n \"\"\"Class to obtain bookdata from dropbox syncronized account\"\"\"\n\n def __init__(self, access_token, books_path='', workers=8, logger=None):\n \"\"\":param access_token: Dropbox access token :param books_path: Absolute path to dropbox's dir with syncronized notes :param workers: number of concurrent workers to download data from Dropbox :param logger: class responsible for logging\"\"\"\n <|body_0|>\n\n def get_books(self, path='', book_count=None):\n \"\"\"Obtains book objects from dropbox folder :param path: Dropbox directory with syncronized book data :param book_count: number of books to read\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not access_token:\n raise ValueError('Access token must be specified.')\n if not books_path:\n books_path = DropboxDownloader.DEFAULT_DROPBOX_PATH\n self.access_token = access_token\n self.books_path = books_path\n self.workers = workers\n self.logger = logger if logger else logging.getLogger(__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n if not path and (not self.books_path):\n raise ValueError('Path to read data from is not specified')\n if not path:\n path = self.books_path\n client = dropbox.client.DropboxClient(self.access_token)\n meta = client.metadata(path)\n files = filepaths_from_metadata(meta)\n moonreader_files = get_moonreader_files_from_filelist(files)\n if book_count is not None:\n file_pairs = get_same_book_files(moonreader_files)[:book_count]\n else:\n file_pairs = get_same_book_files(moonreader_files)\n for book_dict in dicts_from_pairs(client, file_pairs, workers=self.workers):\n try:\n book = Book.from_fobj_dict(book_dict)\n yield book\n except Exception:\n err_msg = 'Exception occured when creating book object.'\n logging.exception(err_msg)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000377", "length_bytes": 2414, "license_type": "permissive", "methods": [{"docstring": ":param access_token: Dropbox access token :param books_path: Absolute path to dropbox's dir with syncronized notes :param workers: number of concurrent workers to download data from Dropbox :param logger: class responsible for logging", "name": "__init__", "signature": "def __init__(self, access_token, books_path='', workers=8, logger=None)"}, {"docstring": "Obtains book objects from dropbox folder :param path: Dropbox directory with syncronized book data :param book_count: number of books to read", "name": "get_books", "signature": "def get_books(self, path='', book_count=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049192", "prompt": "Implement the Python class `DropboxDownloader` described below.\n\nClass description:\nClass to obtain bookdata from dropbox syncronized account\n\nMethod signatures and docstrings:\n- def __init__(self, access_token, books_path='', workers=8, logger=None): :param access_token: Dropbox access token :param books_path: Absolute path to dropbox's dir with syncronized notes :param workers: number of concurrent workers to download data from Dropbox :param logger: class responsible for logging\n- def get_books(self, path='', book_count=None): Obtains book objects from dropbox folder :param path: Dropbox directory with syncronized book data :param book_count: number of books to read", "prompted_full_text": "Implement the Python class `DropboxDownloader` described below.\n\nClass description:\nClass to obtain bookdata from dropbox syncronized account\n\nMethod signatures and docstrings:\n- def __init__(self, access_token, books_path='', workers=8, logger=None): :param access_token: Dropbox access token :param books_path: Absolute path to dropbox's dir with syncronized notes :param workers: number of concurrent workers to download data from Dropbox :param logger: class responsible for logging\n- def get_books(self, path='', book_count=None): Obtains book objects from dropbox folder :param path: Dropbox directory with syncronized book data :param book_count: number of books to read\n\n<|skeleton|>\nclass DropboxDownloader:\n \"\"\"Class to obtain bookdata from dropbox syncronized account\"\"\"\n\n def __init__(self, access_token, books_path='', workers=8, logger=None):\n \"\"\":param access_token: Dropbox access token :param books_path: Absolute path to dropbox's dir with syncronized notes :param workers: number of concurrent workers to download data from Dropbox :param logger: class responsible for logging\"\"\"\n <|body_0|>\n\n def get_books(self, path='', book_count=None):\n \"\"\"Obtains book objects from dropbox folder :param path: Dropbox directory with syncronized book data :param book_count: number of books to read\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not access_token:\n raise ValueError('Access token must be specified.')\n if not books_path:\n books_path = DropboxDownloader.DEFAULT_DROPBOX_PATH\n self.access_token = access_token\n self.books_path = books_path\n self.workers = workers\n self.logger = logger if logger else logging.getLogger(__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n if not path and (not self.books_path):\n raise ValueError('Path to read data from is not specified')\n if not path:\n path = self.books_path\n client = dropbox.client.DropboxClient(self.access_token)\n meta = client.metadata(path)\n files = filepaths_from_metadata(meta)\n moonreader_files = get_moonreader_files_from_filelist(files)\n if book_count is not None:\n file_pairs = get_same_book_files(moonreader_files)[:book_count]\n else:\n file_pairs = get_same_book_files(moonreader_files)\n for book_dict in dicts_from_pairs(client, file_pairs, workers=self.workers):\n try:\n book = Book.from_fobj_dict(book_dict)\n yield book\n except Exception:\n err_msg = 'Exception occured when creating book object.'\n logging.exception(err_msg)\n<|end_body_1|>\n", "revision_id": "d938e082d141e2e720d17ce573ac342e7653146d", "skeleton": "<|skeleton|>\nclass DropboxDownloader:\n \"\"\"Class to obtain bookdata from dropbox syncronized account\"\"\"\n\n def __init__(self, access_token, books_path='', workers=8, logger=None):\n \"\"\":param access_token: Dropbox access token :param books_path: Absolute path to dropbox's dir with syncronized notes :param workers: number of concurrent workers to download data from Dropbox :param logger: class responsible for logging\"\"\"\n <|body_0|>\n\n def get_books(self, path='', book_count=None):\n \"\"\"Obtains book objects from dropbox folder :param path: Dropbox directory with syncronized book data :param book_count: number of books to read\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DropboxDownloader:\n \"\"\"Class to obtain bookdata from dropbox syncronized account\"\"\"\n\n def __init__(self, access_token, books_path='', workers=8, logger=None):\n \"\"\":param access_token: Dropbox access token :param books_path: Absolute path to dropbox's dir with syncronized notes :param workers: number of concurrent workers to download data from Dropbox :param logger: class responsible for logging\"\"\"\n if not access_token:\n raise ValueError('Access token must be specified.')\n if not books_path:\n books_path = DropboxDownloader.DEFAULT_DROPBOX_PATH\n self.access_token = access_token\n self.books_path = books_path\n self.workers = workers\n self.logger = logger if logger else logging.getLogger(__name__)\n\n def get_books(self, path='', book_count=None):\n \"\"\"Obtains book objects from dropbox folder :param path: Dropbox directory with syncronized book data :param book_count: number of books to read\"\"\"\n if not path and (not self.books_path):\n raise ValueError('Path to read data from is not specified')\n if not path:\n path = self.books_path\n client = dropbox.client.DropboxClient(self.access_token)\n meta = client.metadata(path)\n files = filepaths_from_metadata(meta)\n moonreader_files = get_moonreader_files_from_filelist(files)\n if book_count is not None:\n file_pairs = get_same_book_files(moonreader_files)[:book_count]\n else:\n file_pairs = get_same_book_files(moonreader_files)\n for book_dict in dicts_from_pairs(client, file_pairs, workers=self.workers):\n try:\n book = Book.from_fobj_dict(book_dict)\n yield book\n except Exception:\n err_msg = 'Exception occured when creating book object.'\n logging.exception(err_msg)\n", "source": "the_stack_v2_python_sparse", "source_path": "moonreader_tools/handlers/dropbox.py", "source_repo": "Python3pkg/MoonReader_tools", "split": "val", "star_events_count": 0}
{"blob_id": "d223e3078bd71735dbae8d125303765bb890b809", "bodies": ["team = Team.get(id_=team_id)\nif not team:\n self.error(404, 'Team not found')\nform = forms.Team()\nform.name.data = team.name\nself.render('team.html', title=u'Team: {}'.format(team.name), form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))", "team = Team.get(id_=team_id)\nif not team:\n self.error(404, 'Team not found')\nform = forms.Team(self.request.arguments)\nif form.validate():\n Team.update(id_=team_id, name=form.name.data)\n self.redirect('/team/{}'.format(team.id))\n return\nself.render('team.html', title='New team', form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))"], "bodies_text": "<|body_start_0|>\n team = Team.get(id_=team_id)\n if not team:\n self.error(404, 'Team not found')\n form = forms.Team()\n form.name.data = team.name\n self.render('team.html', title=u'Team: {}'.format(team.name), form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))\n<|end_body_0|>\n\n<|body_start_1|>\n team = Team.get(id_=team_id)\n if not team:\n self.error(404, 'Team not found')\n form = forms.Team(self.request.arguments)\n if form.validate():\n Team.update(id_=team_id, name=form.name.data)\n self.redirect('/team/{}'.format(team.id))\n return\n self.render('team.html', title='New team', form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))\n<|end_body_1|>\n", "class_docstring": "Handles the editing of a team.", "class_name": "Edit_handler", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Edit_handler:\n \"\"\"Handles the editing of a team.\"\"\"\n\n def get(self, team_id):\n \"\"\"Renders the edit team form. :param team_id: int\"\"\"\n <|body_0|>\n\n def post(self, team_id):\n \"\"\"Validates and updates the team. Redirects to the new team if successful. :param team_id: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n team = Team.get(id_=team_id)\n if not team:\n self.error(404, 'Team not found')\n form = forms.Team()\n form.name.data = team.name\n self.render('team.html', title=u'Team: {}'.format(team.name), form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))\n<|end_body_0|>\n\n<|body_start_1|>\n team = Team.get(id_=team_id)\n if not team:\n self.error(404, 'Team not found')\n form = forms.Team(self.request.arguments)\n if form.validate():\n Team.update(id_=team_id, name=form.name.data)\n self.redirect('/team/{}'.format(team.id))\n return\n self.render('team.html', title='New team', form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000378", "length_bytes": 3222, "license_type": "no_license", "methods": [{"docstring": "Renders the edit team form. :param team_id: int", "name": "get", "signature": "def get(self, team_id)"}, {"docstring": "Validates and updates the team. Redirects to the new team if successful. :param team_id: int", "name": "post", "signature": "def post(self, team_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040888", "prompt": "Implement the Python class `Edit_handler` described below.\n\nClass description:\nHandles the editing of a team.\n\nMethod signatures and docstrings:\n- def get(self, team_id): Renders the edit team form. :param team_id: int\n- def post(self, team_id): Validates and updates the team. Redirects to the new team if successful. :param team_id: int", "prompted_full_text": "Implement the Python class `Edit_handler` described below.\n\nClass description:\nHandles the editing of a team.\n\nMethod signatures and docstrings:\n- def get(self, team_id): Renders the edit team form. :param team_id: int\n- def post(self, team_id): Validates and updates the team. Redirects to the new team if successful. :param team_id: int\n\n<|skeleton|>\nclass Edit_handler:\n \"\"\"Handles the editing of a team.\"\"\"\n\n def get(self, team_id):\n \"\"\"Renders the edit team form. :param team_id: int\"\"\"\n <|body_0|>\n\n def post(self, team_id):\n \"\"\"Validates and updates the team. Redirects to the new team if successful. :param team_id: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n team = Team.get(id_=team_id)\n if not team:\n self.error(404, 'Team not found')\n form = forms.Team()\n form.name.data = team.name\n self.render('team.html', title=u'Team: {}'.format(team.name), form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))\n<|end_body_0|>\n\n<|body_start_1|>\n team = Team.get(id_=team_id)\n if not team:\n self.error(404, 'Team not found')\n form = forms.Team(self.request.arguments)\n if form.validate():\n Team.update(id_=team_id, name=form.name.data)\n self.redirect('/team/{}'.format(team.id))\n return\n self.render('team.html', title='New team', form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))\n<|end_body_1|>\n", "revision_id": "3f331c7169c90d1fac0d1922b011b56eebbd086a", "skeleton": "<|skeleton|>\nclass Edit_handler:\n \"\"\"Handles the editing of a team.\"\"\"\n\n def get(self, team_id):\n \"\"\"Renders the edit team form. :param team_id: int\"\"\"\n <|body_0|>\n\n def post(self, team_id):\n \"\"\"Validates and updates the team. Redirects to the new team if successful. :param team_id: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Edit_handler:\n \"\"\"Handles the editing of a team.\"\"\"\n\n def get(self, team_id):\n \"\"\"Renders the edit team form. :param team_id: int\"\"\"\n team = Team.get(id_=team_id)\n if not team:\n self.error(404, 'Team not found')\n form = forms.Team()\n form.name.data = team.name\n self.render('team.html', title=u'Team: {}'.format(team.name), form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))\n\n def post(self, team_id):\n \"\"\"Validates and updates the team. Redirects to the new team if successful. :param team_id: int\"\"\"\n team = Team.get(id_=team_id)\n if not team:\n self.error(404, 'Team not found')\n form = forms.Team(self.request.arguments)\n if form.validate():\n Team.update(id_=team_id, name=form.name.data)\n self.redirect('/team/{}'.format(team.id))\n return\n self.render('team.html', title='New team', form=form, edit=True, users=Users.get(), team=team, members=Users_team.get(team_id=team_id))\n", "source": "the_stack_v2_python_sparse", "source_path": "src/tlog/web/handlers/team.py", "source_repo": "thomaserlang/TLog", "split": "val", "star_events_count": 2}
{"blob_id": "d75b29568f6455a63668499a1049e96d83460512", "bodies": ["self.key = key\nself.value = value\nself.lifetime = lifetime\nself.resetTimeout()", "if self.timeout == 0:\n return True\nelse:\n return self.timeout > time.time()", "if self.lifetime == 0:\n self.timeout = 0\nelse:\n self.timeout = time.time() + self.lifetime"], "bodies_text": "<|body_start_0|>\n self.key = key\n self.value = value\n self.lifetime = lifetime\n self.resetTimeout()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.timeout == 0:\n return True\n else:\n return self.timeout > time.time()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.lifetime == 0:\n self.timeout = 0\n else:\n self.timeout = time.time() + self.lifetime\n<|end_body_2|>\n", "class_docstring": "Cache Item", "class_name": "CacheItem", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CacheItem:\n \"\"\"Cache Item\"\"\"\n\n def __init__(self, key, value, lifetime=0):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def checkTimeout(self):\n \"\"\"Check timeout\"\"\"\n <|body_1|>\n\n def resetTimeout(self):\n \"\"\"Reset timeout\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.key = key\n self.value = value\n self.lifetime = lifetime\n self.resetTimeout()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.timeout == 0:\n return True\n else:\n return self.timeout > time.time()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.lifetime == 0:\n self.timeout = 0\n else:\n self.timeout = time.time() + self.lifetime\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000379", "length_bytes": 8313, "license_type": "permissive", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, key, value, lifetime=0)"}, {"docstring": "Check timeout", "name": "checkTimeout", "signature": "def checkTimeout(self)"}, {"docstring": "Reset timeout", "name": "resetTimeout", "signature": "def resetTimeout(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003365", "prompt": "Implement the Python class `CacheItem` described below.\n\nClass description:\nCache Item\n\nMethod signatures and docstrings:\n- def __init__(self, key, value, lifetime=0): Constructor\n- def checkTimeout(self): Check timeout\n- def resetTimeout(self): Reset timeout", "prompted_full_text": "Implement the Python class `CacheItem` described below.\n\nClass description:\nCache Item\n\nMethod signatures and docstrings:\n- def __init__(self, key, value, lifetime=0): Constructor\n- def checkTimeout(self): Check timeout\n- def resetTimeout(self): Reset timeout\n\n<|skeleton|>\nclass CacheItem:\n \"\"\"Cache Item\"\"\"\n\n def __init__(self, key, value, lifetime=0):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def checkTimeout(self):\n \"\"\"Check timeout\"\"\"\n <|body_1|>\n\n def resetTimeout(self):\n \"\"\"Reset timeout\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.key = key\n self.value = value\n self.lifetime = lifetime\n self.resetTimeout()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.timeout == 0:\n return True\n else:\n return self.timeout > time.time()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.lifetime == 0:\n self.timeout = 0\n else:\n self.timeout = time.time() + self.lifetime\n<|end_body_2|>\n", "revision_id": "7afc157e63533548a5f258222db3c20a1eb21dfb", "skeleton": "<|skeleton|>\nclass CacheItem:\n \"\"\"Cache Item\"\"\"\n\n def __init__(self, key, value, lifetime=0):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def checkTimeout(self):\n \"\"\"Check timeout\"\"\"\n <|body_1|>\n\n def resetTimeout(self):\n \"\"\"Reset timeout\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CacheItem:\n \"\"\"Cache Item\"\"\"\n\n def __init__(self, key, value, lifetime=0):\n \"\"\"Constructor\"\"\"\n self.key = key\n self.value = value\n self.lifetime = lifetime\n self.resetTimeout()\n\n def checkTimeout(self):\n \"\"\"Check timeout\"\"\"\n if self.timeout == 0:\n return True\n else:\n return self.timeout > time.time()\n\n def resetTimeout(self):\n \"\"\"Reset timeout\"\"\"\n if self.lifetime == 0:\n self.timeout = 0\n else:\n self.timeout = time.time() + self.lifetime\n", "source": "the_stack_v2_python_sparse", "source_path": "src/Core/Cache.py", "source_repo": "ilausuch/CacheServer", "split": "val", "star_events_count": 0}
{"blob_id": "8accc383aaa6aaee6c02b33b413589ee235316db", "bodies": ["response = jsonify(get_student(id))\nresponse.headers.add('Access-Control-Allow-Origin', '*')\nreturn response", "response = jsonify(delete_student(id))\nresponse.headers.add('Access-Control-Allow-Origin', '*')\nreturn response", "json_data = request.get_json(force=True)\nresponse = jsonify(update_student(id, **json_data))\nresponse.headers.add('Access-Control-Allow-Origin', '*')\nreturn response"], "bodies_text": "<|body_start_0|>\n response = jsonify(get_student(id))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n response = jsonify(delete_student(id))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n json_data = request.get_json(force=True)\n response = jsonify(update_student(id, **json_data))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_2|>\n", "class_docstring": "", "class_name": "studentByID", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass studentByID:\n\n def get(self, id):\n \"\"\"Returns details of a student\"\"\"\n <|body_0|>\n\n def delete(self, id):\n \"\"\"Delete a student\"\"\"\n <|body_1|>\n\n def put(self, id):\n \"\"\"Update a student\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = jsonify(get_student(id))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n response = jsonify(delete_student(id))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n json_data = request.get_json(force=True)\n response = jsonify(update_student(id, **json_data))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000380", "length_bytes": 2530, "license_type": "permissive", "methods": [{"docstring": "Returns details of a student", "name": "get", "signature": "def get(self, id)"}, {"docstring": "Delete a student", "name": "delete", "signature": "def delete(self, id)"}, {"docstring": "Update a student", "name": "put", "signature": "def put(self, id)"}], "n_methods": 3, "prompt": "Implement the Python class `studentByID` described below.\n\nClass description:\nImplement the studentByID class.\n\nMethod signatures and docstrings:\n- def get(self, id): Returns details of a student\n- def delete(self, id): Delete a student\n- def put(self, id): Update a student", "prompted_full_text": "Implement the Python class `studentByID` described below.\n\nClass description:\nImplement the studentByID class.\n\nMethod signatures and docstrings:\n- def get(self, id): Returns details of a student\n- def delete(self, id): Delete a student\n- def put(self, id): Update a student\n\n<|skeleton|>\nclass studentByID:\n\n def get(self, id):\n \"\"\"Returns details of a student\"\"\"\n <|body_0|>\n\n def delete(self, id):\n \"\"\"Delete a student\"\"\"\n <|body_1|>\n\n def put(self, id):\n \"\"\"Update a student\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = jsonify(get_student(id))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n response = jsonify(delete_student(id))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n json_data = request.get_json(force=True)\n response = jsonify(update_student(id, **json_data))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n<|end_body_2|>\n", "revision_id": "09ab988f6570bd8fe64316afb5801f0ae837e3c6", "skeleton": "<|skeleton|>\nclass studentByID:\n\n def get(self, id):\n \"\"\"Returns details of a student\"\"\"\n <|body_0|>\n\n def delete(self, id):\n \"\"\"Delete a student\"\"\"\n <|body_1|>\n\n def put(self, id):\n \"\"\"Update a student\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class studentByID:\n def get(self, id):\n \"\"\"Returns details of a student\"\"\"\n response = jsonify(get_student(id))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n def delete(self, id):\n \"\"\"Delete a student\"\"\"\n response = jsonify(delete_student(id))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n def put(self, id):\n \"\"\"Update a student\"\"\"\n json_data = request.get_json(force=True)\n response = jsonify(update_student(id, **json_data))\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n", "source": "the_stack_v2_python_sparse", "source_path": "src/api/endpoints/students.py", "source_repo": "douglasramos/pcs3443-escola-aviacao", "split": "val", "star_events_count": 1}
{"blob_id": "654ca8b7292df6eb880b4b68a6a5b0db3564bc39", "bodies": ["super().__init__()\nself.view = View()\nself.publishers = {}\nself.subscribers = {}\nself.view.publishers.itemDoubleClicked.connect(self.register)\nself.view.subscribers.itemClicked.connect(self.update_feed)\nself.view.send_button.clicked.connect(self.send)\nself.view.update_button.clicked.connect(self.update)\np1 = Publisher('Der beste Verlag')\np1.add_newspaper('Die beste Zeitung')\np1.add_newspaper('Die besten News')\np2 = Publisher('Der coolste Verlag')\np2.add_newspaper('Die coolste Zeitung')\nself.publishers[p1.name] = p1\nself.publishers[p2.name] = p2\ns1 = Subscriber('Hans')\ns2 = Subscriber('Peter')\ns3 = Subscriber('Günther')\nself.subscribers[s1.name] = s1\nself.subscribers[s2.name] = s2\nself.subscribers[s3.name] = s3\nself.update()", "self.view.publishers.clear()\nself.view.subscribers.clear()\nfor publisher in self.publishers.values():\n p_widget = QTreeWidgetItem()\n p_widget.setText(0, publisher.name)\n for newspaper in publisher.newspapers:\n n_widget = QTreeWidgetItem(p_widget)\n n_widget.setText(0, newspaper)\n self.view.publishers.addTopLevelItem(p_widget)\nfor subscriber in self.subscribers:\n s_widget = QTreeWidgetItem()\n s_widget.setText(0, subscriber)\n self.view.subscribers.addTopLevelItem(s_widget)", "subscriber = self.subscribers[item.text(column)]\nif subscriber:\n self.view.feed.setText('\\n'.join(subscriber.feed))", "for selected in self.view.publishers.selectedItems():\n if selected.parent():\n newspaper = self.publishers[selected.parent().text(0)].newspapers[selected.text(0)]\n newspaper.publish(self.view.input.toPlainText())\n print('Updating newspaper', newspaper.name)\nself.view.input.clear()", "for selected_subscriber in self.view.subscribers.selectedItems():\n subscriber = self.subscribers[selected_subscriber.text(column)]\n for selected_observable in self.view.publishers.selectedItems():\n if selected_observable.parent():\n observable = self.publishers[selected_observable.parent().text(0)].newspapers[selected_observable.text(0)]\n else:\n observable = self.publishers[item.text(column)]\n if subscriber and observable:\n observable.register(subscriber)\n print('%s subscribed to %s' % (subscriber.name, observable.name))"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.view = View()\n self.publishers = {}\n self.subscribers = {}\n self.view.publishers.itemDoubleClicked.connect(self.register)\n self.view.subscribers.itemClicked.connect(self.update_feed)\n self.view.send_button.clicked.connect(self.send)\n self.view.update_button.clicked.connect(self.update)\n p1 = Publisher('Der beste Verlag')\n p1.add_newspaper('Die beste Zeitung')\n p1.add_newspaper('Die besten News')\n p2 = Publisher('Der coolste Verlag')\n p2.add_newspaper('Die coolste Zeitung')\n self.publishers[p1.name] = p1\n self.publishers[p2.name] = p2\n s1 = Subscriber('Hans')\n s2 = Subscriber('Peter')\n s3 = Subscriber('Günther')\n self.subscribers[s1.name] = s1\n self.subscribers[s2.name] = s2\n self.subscribers[s3.name] = s3\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.view.publishers.clear()\n self.view.subscribers.clear()\n for publisher in self.publishers.values():\n p_widget = QTreeWidgetItem()\n p_widget.setText(0, publisher.name)\n for newspaper in publisher.newspapers:\n n_widget = QTreeWidgetItem(p_widget)\n n_widget.setText(0, newspaper)\n self.view.publishers.addTopLevelItem(p_widget)\n for subscriber in self.subscribers:\n s_widget = QTreeWidgetItem()\n s_widget.setText(0, subscriber)\n self.view.subscribers.addTopLevelItem(s_widget)\n<|end_body_1|>\n\n<|body_start_2|>\n subscriber = self.subscribers[item.text(column)]\n if subscriber:\n self.view.feed.setText('\\n'.join(subscriber.feed))\n<|end_body_2|>\n\n<|body_start_3|>\n for selected in self.view.publishers.selectedItems():\n if selected.parent():\n newspaper = self.publishers[selected.parent().text(0)].newspapers[selected.text(0)]\n newspaper.publish(self.view.input.toPlainText())\n print('Updating newspaper', newspaper.name)\n self.view.input.clear()\n<|end_body_3|>\n\n<|body_start_4|>\n for selected_subscriber in self.view.subscribers.selectedItems():\n subscriber = self.subscribers[selected_subscriber.text(column)]\n for selected_observable in self.view.publishers.selectedItems():\n if selected_observable.parent():\n observable = self.publishers[selected_observable.parent().text(0)].newspapers[selected_observable.text(0)]\n else:\n observable = self.publishers[item.text(column)]\n if subscriber and observable:\n observable.register(subscriber)\n print('%s subscribed to %s' % (subscriber.name, observable.name))\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Control", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Control:\n\n def __init__(self):\n \"\"\"The GUI's control unit used to update data and process user input\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Updates listed publishers, newspapers and subscribers\"\"\"\n <|body_1|>\n\n def update_feed(self, item, column):\n \"\"\"Updates the news feed whenever a new subscriber is selected :param item: QTreeWidgetItem :param column: int\"\"\"\n <|body_2|>\n\n def send(self):\n \"\"\"Sends the text currently in the input box to a selected newspaper\"\"\"\n <|body_3|>\n\n def register(self, item, column):\n \"\"\"Registers the selected subscriber to the selected publisher's and / or newspaper's observer list :param item: QTreeWidgetItem :param column: int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.view = View()\n self.publishers = {}\n self.subscribers = {}\n self.view.publishers.itemDoubleClicked.connect(self.register)\n self.view.subscribers.itemClicked.connect(self.update_feed)\n self.view.send_button.clicked.connect(self.send)\n self.view.update_button.clicked.connect(self.update)\n p1 = Publisher('Der beste Verlag')\n p1.add_newspaper('Die beste Zeitung')\n p1.add_newspaper('Die besten News')\n p2 = Publisher('Der coolste Verlag')\n p2.add_newspaper('Die coolste Zeitung')\n self.publishers[p1.name] = p1\n self.publishers[p2.name] = p2\n s1 = Subscriber('Hans')\n s2 = Subscriber('Peter')\n s3 = Subscriber('Günther')\n self.subscribers[s1.name] = s1\n self.subscribers[s2.name] = s2\n self.subscribers[s3.name] = s3\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.view.publishers.clear()\n self.view.subscribers.clear()\n for publisher in self.publishers.values():\n p_widget = QTreeWidgetItem()\n p_widget.setText(0, publisher.name)\n for newspaper in publisher.newspapers:\n n_widget = QTreeWidgetItem(p_widget)\n n_widget.setText(0, newspaper)\n self.view.publishers.addTopLevelItem(p_widget)\n for subscriber in self.subscribers:\n s_widget = QTreeWidgetItem()\n s_widget.setText(0, subscriber)\n self.view.subscribers.addTopLevelItem(s_widget)\n<|end_body_1|>\n\n<|body_start_2|>\n subscriber = self.subscribers[item.text(column)]\n if subscriber:\n self.view.feed.setText('\\n'.join(subscriber.feed))\n<|end_body_2|>\n\n<|body_start_3|>\n for selected in self.view.publishers.selectedItems():\n if selected.parent():\n newspaper = self.publishers[selected.parent().text(0)].newspapers[selected.text(0)]\n newspaper.publish(self.view.input.toPlainText())\n print('Updating newspaper', newspaper.name)\n self.view.input.clear()\n<|end_body_3|>\n\n<|body_start_4|>\n for selected_subscriber in self.view.subscribers.selectedItems():\n subscriber = self.subscribers[selected_subscriber.text(column)]\n for selected_observable in self.view.publishers.selectedItems():\n if selected_observable.parent():\n observable = self.publishers[selected_observable.parent().text(0)].newspapers[selected_observable.text(0)]\n else:\n observable = self.publishers[item.text(column)]\n if subscriber and observable:\n observable.register(subscriber)\n print('%s subscribed to %s' % (subscriber.name, observable.name))\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000381", "length_bytes": 3768, "license_type": "no_license", "methods": [{"docstring": "The GUI's control unit used to update data and process user input", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Updates listed publishers, newspapers and subscribers", "name": "update", "signature": "def update(self)"}, {"docstring": "Updates the news feed whenever a new subscriber is selected :param item: QTreeWidgetItem :param column: int", "name": "update_feed", "signature": "def update_feed(self, item, column)"}, {"docstring": "Sends the text currently in the input box to a selected newspaper", "name": "send", "signature": "def send(self)"}, {"docstring": "Registers the selected subscriber to the selected publisher's and / or newspaper's observer list :param item: QTreeWidgetItem :param column: int", "name": "register", "signature": "def register(self, item, column)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_040768", "prompt": "Implement the Python class `Control` described below.\n\nClass description:\nImplement the Control class.\n\nMethod signatures and docstrings:\n- def __init__(self): The GUI's control unit used to update data and process user input\n- def update(self): Updates listed publishers, newspapers and subscribers\n- def update_feed(self, item, column): Updates the news feed whenever a new subscriber is selected :param item: QTreeWidgetItem :param column: int\n- def send(self): Sends the text currently in the input box to a selected newspaper\n- def register(self, item, column): Registers the selected subscriber to the selected publisher's and / or newspaper's observer list :param item: QTreeWidgetItem :param column: int", "prompted_full_text": "Implement the Python class `Control` described below.\n\nClass description:\nImplement the Control class.\n\nMethod signatures and docstrings:\n- def __init__(self): The GUI's control unit used to update data and process user input\n- def update(self): Updates listed publishers, newspapers and subscribers\n- def update_feed(self, item, column): Updates the news feed whenever a new subscriber is selected :param item: QTreeWidgetItem :param column: int\n- def send(self): Sends the text currently in the input box to a selected newspaper\n- def register(self, item, column): Registers the selected subscriber to the selected publisher's and / or newspaper's observer list :param item: QTreeWidgetItem :param column: int\n\n<|skeleton|>\nclass Control:\n\n def __init__(self):\n \"\"\"The GUI's control unit used to update data and process user input\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Updates listed publishers, newspapers and subscribers\"\"\"\n <|body_1|>\n\n def update_feed(self, item, column):\n \"\"\"Updates the news feed whenever a new subscriber is selected :param item: QTreeWidgetItem :param column: int\"\"\"\n <|body_2|>\n\n def send(self):\n \"\"\"Sends the text currently in the input box to a selected newspaper\"\"\"\n <|body_3|>\n\n def register(self, item, column):\n \"\"\"Registers the selected subscriber to the selected publisher's and / or newspaper's observer list :param item: QTreeWidgetItem :param column: int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.view = View()\n self.publishers = {}\n self.subscribers = {}\n self.view.publishers.itemDoubleClicked.connect(self.register)\n self.view.subscribers.itemClicked.connect(self.update_feed)\n self.view.send_button.clicked.connect(self.send)\n self.view.update_button.clicked.connect(self.update)\n p1 = Publisher('Der beste Verlag')\n p1.add_newspaper('Die beste Zeitung')\n p1.add_newspaper('Die besten News')\n p2 = Publisher('Der coolste Verlag')\n p2.add_newspaper('Die coolste Zeitung')\n self.publishers[p1.name] = p1\n self.publishers[p2.name] = p2\n s1 = Subscriber('Hans')\n s2 = Subscriber('Peter')\n s3 = Subscriber('Günther')\n self.subscribers[s1.name] = s1\n self.subscribers[s2.name] = s2\n self.subscribers[s3.name] = s3\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.view.publishers.clear()\n self.view.subscribers.clear()\n for publisher in self.publishers.values():\n p_widget = QTreeWidgetItem()\n p_widget.setText(0, publisher.name)\n for newspaper in publisher.newspapers:\n n_widget = QTreeWidgetItem(p_widget)\n n_widget.setText(0, newspaper)\n self.view.publishers.addTopLevelItem(p_widget)\n for subscriber in self.subscribers:\n s_widget = QTreeWidgetItem()\n s_widget.setText(0, subscriber)\n self.view.subscribers.addTopLevelItem(s_widget)\n<|end_body_1|>\n\n<|body_start_2|>\n subscriber = self.subscribers[item.text(column)]\n if subscriber:\n self.view.feed.setText('\\n'.join(subscriber.feed))\n<|end_body_2|>\n\n<|body_start_3|>\n for selected in self.view.publishers.selectedItems():\n if selected.parent():\n newspaper = self.publishers[selected.parent().text(0)].newspapers[selected.text(0)]\n newspaper.publish(self.view.input.toPlainText())\n print('Updating newspaper', newspaper.name)\n self.view.input.clear()\n<|end_body_3|>\n\n<|body_start_4|>\n for selected_subscriber in self.view.subscribers.selectedItems():\n subscriber = self.subscribers[selected_subscriber.text(column)]\n for selected_observable in self.view.publishers.selectedItems():\n if selected_observable.parent():\n observable = self.publishers[selected_observable.parent().text(0)].newspapers[selected_observable.text(0)]\n else:\n observable = self.publishers[item.text(column)]\n if subscriber and observable:\n observable.register(subscriber)\n print('%s subscribed to %s' % (subscriber.name, observable.name))\n<|end_body_4|>\n", "revision_id": "113cee20f8ac8c94b7cd7ffa2bb6e2c0b1478412", "skeleton": "<|skeleton|>\nclass Control:\n\n def __init__(self):\n \"\"\"The GUI's control unit used to update data and process user input\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Updates listed publishers, newspapers and subscribers\"\"\"\n <|body_1|>\n\n def update_feed(self, item, column):\n \"\"\"Updates the news feed whenever a new subscriber is selected :param item: QTreeWidgetItem :param column: int\"\"\"\n <|body_2|>\n\n def send(self):\n \"\"\"Sends the text currently in the input box to a selected newspaper\"\"\"\n <|body_3|>\n\n def register(self, item, column):\n \"\"\"Registers the selected subscriber to the selected publisher's and / or newspaper's observer list :param item: QTreeWidgetItem :param column: int\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Control:\n def __init__(self):\n \"\"\"The GUI's control unit used to update data and process user input\"\"\"\n super().__init__()\n self.view = View()\n self.publishers = {}\n self.subscribers = {}\n self.view.publishers.itemDoubleClicked.connect(self.register)\n self.view.subscribers.itemClicked.connect(self.update_feed)\n self.view.send_button.clicked.connect(self.send)\n self.view.update_button.clicked.connect(self.update)\n p1 = Publisher('Der beste Verlag')\n p1.add_newspaper('Die beste Zeitung')\n p1.add_newspaper('Die besten News')\n p2 = Publisher('Der coolste Verlag')\n p2.add_newspaper('Die coolste Zeitung')\n self.publishers[p1.name] = p1\n self.publishers[p2.name] = p2\n s1 = Subscriber('Hans')\n s2 = Subscriber('Peter')\n s3 = Subscriber('Günther')\n self.subscribers[s1.name] = s1\n self.subscribers[s2.name] = s2\n self.subscribers[s3.name] = s3\n self.update()\n\n def update(self):\n \"\"\"Updates listed publishers, newspapers and subscribers\"\"\"\n self.view.publishers.clear()\n self.view.subscribers.clear()\n for publisher in self.publishers.values():\n p_widget = QTreeWidgetItem()\n p_widget.setText(0, publisher.name)\n for newspaper in publisher.newspapers:\n n_widget = QTreeWidgetItem(p_widget)\n n_widget.setText(0, newspaper)\n self.view.publishers.addTopLevelItem(p_widget)\n for subscriber in self.subscribers:\n s_widget = QTreeWidgetItem()\n s_widget.setText(0, subscriber)\n self.view.subscribers.addTopLevelItem(s_widget)\n\n def update_feed(self, item, column):\n \"\"\"Updates the news feed whenever a new subscriber is selected :param item: QTreeWidgetItem :param column: int\"\"\"\n subscriber = self.subscribers[item.text(column)]\n if subscriber:\n self.view.feed.setText('\\n'.join(subscriber.feed))\n\n def send(self):\n \"\"\"Sends the text currently in the input box to a selected newspaper\"\"\"\n for selected in self.view.publishers.selectedItems():\n if selected.parent():\n newspaper = self.publishers[selected.parent().text(0)].newspapers[selected.text(0)]\n newspaper.publish(self.view.input.toPlainText())\n print('Updating newspaper', newspaper.name)\n self.view.input.clear()\n\n def register(self, item, column):\n \"\"\"Registers the selected subscriber to the selected publisher's and / or newspaper's observer list :param item: QTreeWidgetItem :param column: int\"\"\"\n for selected_subscriber in self.view.subscribers.selectedItems():\n subscriber = self.subscribers[selected_subscriber.text(column)]\n for selected_observable in self.view.publishers.selectedItems():\n if selected_observable.parent():\n observable = self.publishers[selected_observable.parent().text(0)].newspapers[selected_observable.text(0)]\n else:\n observable = self.publishers[item.text(column)]\n if subscriber and observable:\n observable.register(subscriber)\n print('%s subscribed to %s' % (subscriber.name, observable.name))\n", "source": "the_stack_v2_python_sparse", "source_path": "13-observer/control/control.py", "source_repo": "mreichl-tgm/sew-4", "split": "val", "star_events_count": 0}
{"blob_id": "d8f2da941f0f28ce849d05c5b387b053fcb42a6d", "bodies": ["if not self.auth_based():\n self.set_secure_cookie('username', 'none')\n self.redirect('/')\n return\nif self.get_current_user():\n self.redirect('/')\n return\nself.render('login.html', error=self.get_argument('error', ''))", "username = self.get_argument('username', '')\npassword = self.get_argument('password', '')\naccounts_manager = srv_or_die('accountsmanager')\nif accounts_manager.check_permission(username, password):\n self.set_secure_cookie('username', username)\n self.redirect('/index.html')\nelse:\n self.clear_cookie('username')\n self.redirect('/auth/login?error=Wrong credentials!')"], "bodies_text": "<|body_start_0|>\n if not self.auth_based():\n self.set_secure_cookie('username', 'none')\n self.redirect('/')\n return\n if self.get_current_user():\n self.redirect('/')\n return\n self.render('login.html', error=self.get_argument('error', ''))\n<|end_body_0|>\n\n<|body_start_1|>\n username = self.get_argument('username', '')\n password = self.get_argument('password', '')\n accounts_manager = srv_or_die('accountsmanager')\n if accounts_manager.check_permission(username, password):\n self.set_secure_cookie('username', username)\n self.redirect('/index.html')\n else:\n self.clear_cookie('username')\n self.redirect('/auth/login?error=Wrong credentials!')\n<|end_body_1|>\n", "class_docstring": "Login page handler.", "class_name": "AuthLoginHandler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuthLoginHandler:\n \"\"\"Login page handler.\"\"\"\n\n def get(self):\n \"\"\"Render login page.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Process login credentials.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.auth_based():\n self.set_secure_cookie('username', 'none')\n self.redirect('/')\n return\n if self.get_current_user():\n self.redirect('/')\n return\n self.render('login.html', error=self.get_argument('error', ''))\n<|end_body_0|>\n\n<|body_start_1|>\n username = self.get_argument('username', '')\n password = self.get_argument('password', '')\n accounts_manager = srv_or_die('accountsmanager')\n if accounts_manager.check_permission(username, password):\n self.set_secure_cookie('username', username)\n self.redirect('/index.html')\n else:\n self.clear_cookie('username')\n self.redirect('/auth/login?error=Wrong credentials!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000382", "length_bytes": 16584, "license_type": "permissive", "methods": [{"docstring": "Render login page.", "name": "get", "signature": "def get(self)"}, {"docstring": "Process login credentials.", "name": "post", "signature": "def post(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_021465", "prompt": "Implement the Python class `AuthLoginHandler` described below.\n\nClass description:\nLogin page handler.\n\nMethod signatures and docstrings:\n- def get(self): Render login page.\n- def post(self): Process login credentials.", "prompted_full_text": "Implement the Python class `AuthLoginHandler` described below.\n\nClass description:\nLogin page handler.\n\nMethod signatures and docstrings:\n- def get(self): Render login page.\n- def post(self): Process login credentials.\n\n<|skeleton|>\nclass AuthLoginHandler:\n \"\"\"Login page handler.\"\"\"\n\n def get(self):\n \"\"\"Render login page.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Process login credentials.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.auth_based():\n self.set_secure_cookie('username', 'none')\n self.redirect('/')\n return\n if self.get_current_user():\n self.redirect('/')\n return\n self.render('login.html', error=self.get_argument('error', ''))\n<|end_body_0|>\n\n<|body_start_1|>\n username = self.get_argument('username', '')\n password = self.get_argument('password', '')\n accounts_manager = srv_or_die('accountsmanager')\n if accounts_manager.check_permission(username, password):\n self.set_secure_cookie('username', username)\n self.redirect('/index.html')\n else:\n self.clear_cookie('username')\n self.redirect('/auth/login?error=Wrong credentials!')\n<|end_body_1|>\n", "revision_id": "38eac8eebf57da4bec07518383ab65a5544445fe", "skeleton": "<|skeleton|>\nclass AuthLoginHandler:\n \"\"\"Login page handler.\"\"\"\n\n def get(self):\n \"\"\"Render login page.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Process login credentials.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AuthLoginHandler:\n \"\"\"Login page handler.\"\"\"\n\n def get(self):\n \"\"\"Render login page.\"\"\"\n if not self.auth_based():\n self.set_secure_cookie('username', 'none')\n self.redirect('/')\n return\n if self.get_current_user():\n self.redirect('/')\n return\n self.render('login.html', error=self.get_argument('error', ''))\n\n def post(self):\n \"\"\"Process login credentials.\"\"\"\n username = self.get_argument('username', '')\n password = self.get_argument('password', '')\n accounts_manager = srv_or_die('accountsmanager')\n if accounts_manager.check_permission(username, password):\n self.set_secure_cookie('username', username)\n self.redirect('/index.html')\n else:\n self.clear_cookie('username')\n self.redirect('/auth/login?error=Wrong credentials!')\n", "source": "the_stack_v2_python_sparse", "source_path": "empower_core/apimanager/apimanager.py", "source_repo": "5g-empower/empower-core", "split": "val", "star_events_count": 3}
{"blob_id": "e55444e692aa513bb160f8ef4edc66ac950c9a42", "bodies": ["self.dimensionality = len(search_space)\nself.search_space = search_space\nself.numOfGeneratedPoints = 0\nself.returned_points = []\nself.hypercube_coordinates = []\nfor dimension in search_space:\n dim_indexes = [float(x) for x in range(len(dimension))]\n self.hypercube_coordinates.append(dim_indexes)\nself.hypercube = list(product(*self.hypercube_coordinates))", "sequence = sobol_seq.i4_sobol_generate(self.dimensionality, skip + number_of_data_points)[skip:]\nself.numOfGeneratedPoints += number_of_data_points\nreturn sequence", "skip = self.numOfGeneratedPoints\npoint = self.__generate_sobol_seq(skip=skip)[0]\npoint = [len(self.hypercube_coordinates[dimension_index]) * dimension_value for dimension_index, dimension_value in enumerate(point)]\ndistances_dict = {}\nfor hypercube_point in self.hypercube:\n if len(distances_dict) < self.numOfGeneratedPoints:\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n elif len(distances_dict) == self.numOfGeneratedPoints:\n distances = list(distances_dict.keys())\n distances.sort()\n del distances_dict[distances.pop()]\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\ndistances = list(distances_dict.keys())\ndistances.sort()\nfor current_distance in distances:\n if distances_dict[current_distance] not in self.returned_points:\n point = distances_dict[current_distance]\n self.returned_points.append(point)\n break\nresult_to_return = [self.search_space[int(dimension_index)][int(dimension_value)] for dimension_index, dimension_value in enumerate(point)]\nreturn result_to_return"], "bodies_text": "<|body_start_0|>\n self.dimensionality = len(search_space)\n self.search_space = search_space\n self.numOfGeneratedPoints = 0\n self.returned_points = []\n self.hypercube_coordinates = []\n for dimension in search_space:\n dim_indexes = [float(x) for x in range(len(dimension))]\n self.hypercube_coordinates.append(dim_indexes)\n self.hypercube = list(product(*self.hypercube_coordinates))\n<|end_body_0|>\n\n<|body_start_1|>\n sequence = sobol_seq.i4_sobol_generate(self.dimensionality, skip + number_of_data_points)[skip:]\n self.numOfGeneratedPoints += number_of_data_points\n return sequence\n<|end_body_1|>\n\n<|body_start_2|>\n skip = self.numOfGeneratedPoints\n point = self.__generate_sobol_seq(skip=skip)[0]\n point = [len(self.hypercube_coordinates[dimension_index]) * dimension_value for dimension_index, dimension_value in enumerate(point)]\n distances_dict = {}\n for hypercube_point in self.hypercube:\n if len(distances_dict) < self.numOfGeneratedPoints:\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n elif len(distances_dict) == self.numOfGeneratedPoints:\n distances = list(distances_dict.keys())\n distances.sort()\n del distances_dict[distances.pop()]\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n distances = list(distances_dict.keys())\n distances.sort()\n for current_distance in distances:\n if distances_dict[current_distance] not in self.returned_points:\n point = distances_dict[current_distance]\n self.returned_points.append(point)\n break\n result_to_return = [self.search_space[int(dimension_index)][int(dimension_value)] for dimension_index, dimension_value in enumerate(point)]\n return result_to_return\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SobolSequence", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SobolSequence:\n\n def __init__(self, selection_algorithm_config, search_space):\n \"\"\"Creates SobolSequence instance that stores information about number of generated points :param selection_algorithm_config: Dict with configuration of selection algorithm. :param search_space: list of dimensions that describes a\"\"\"\n <|body_0|>\n\n def __generate_sobol_seq(self, number_of_data_points=1, skip=0):\n \"\"\"Generates sobol sequence of uniformly distributed data points in N dimensional space. :param number_of_data_points: int - number of points that needed to be generated in this iteration :param skip: int - number of points to skip from the beginning of sequence, because sobol_seq.i4_sobol_generate stateless. :return: sobol sequence as numpy array.\"\"\"\n <|body_1|>\n\n def get_next_point(self):\n \"\"\"Will return next data point from initiated Sobol sequence imposed to the search space. :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dimensionality = len(search_space)\n self.search_space = search_space\n self.numOfGeneratedPoints = 0\n self.returned_points = []\n self.hypercube_coordinates = []\n for dimension in search_space:\n dim_indexes = [float(x) for x in range(len(dimension))]\n self.hypercube_coordinates.append(dim_indexes)\n self.hypercube = list(product(*self.hypercube_coordinates))\n<|end_body_0|>\n\n<|body_start_1|>\n sequence = sobol_seq.i4_sobol_generate(self.dimensionality, skip + number_of_data_points)[skip:]\n self.numOfGeneratedPoints += number_of_data_points\n return sequence\n<|end_body_1|>\n\n<|body_start_2|>\n skip = self.numOfGeneratedPoints\n point = self.__generate_sobol_seq(skip=skip)[0]\n point = [len(self.hypercube_coordinates[dimension_index]) * dimension_value for dimension_index, dimension_value in enumerate(point)]\n distances_dict = {}\n for hypercube_point in self.hypercube:\n if len(distances_dict) < self.numOfGeneratedPoints:\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n elif len(distances_dict) == self.numOfGeneratedPoints:\n distances = list(distances_dict.keys())\n distances.sort()\n del distances_dict[distances.pop()]\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n distances = list(distances_dict.keys())\n distances.sort()\n for current_distance in distances:\n if distances_dict[current_distance] not in self.returned_points:\n point = distances_dict[current_distance]\n self.returned_points.append(point)\n break\n result_to_return = [self.search_space[int(dimension_index)][int(dimension_value)] for dimension_index, dimension_value in enumerate(point)]\n return result_to_return\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000383", "length_bytes": 4166, "license_type": "permissive", "methods": [{"docstring": "Creates SobolSequence instance that stores information about number of generated points :param selection_algorithm_config: Dict with configuration of selection algorithm. :param search_space: list of dimensions that describes a", "name": "__init__", "signature": "def __init__(self, selection_algorithm_config, search_space)"}, {"docstring": "Generates sobol sequence of uniformly distributed data points in N dimensional space. :param number_of_data_points: int - number of points that needed to be generated in this iteration :param skip: int - number of points to skip from the beginning of sequence, because sobol_seq.i4_sobol_generate stateless. :return: sobol sequence as numpy array.", "name": "__generate_sobol_seq", "signature": "def __generate_sobol_seq(self, number_of_data_points=1, skip=0)"}, {"docstring": "Will return next data point from initiated Sobol sequence imposed to the search space. :return:", "name": "get_next_point", "signature": "def get_next_point(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_015012", "prompt": "Implement the Python class `SobolSequence` described below.\n\nClass description:\nImplement the SobolSequence class.\n\nMethod signatures and docstrings:\n- def __init__(self, selection_algorithm_config, search_space): Creates SobolSequence instance that stores information about number of generated points :param selection_algorithm_config: Dict with configuration of selection algorithm. :param search_space: list of dimensions that describes a\n- def __generate_sobol_seq(self, number_of_data_points=1, skip=0): Generates sobol sequence of uniformly distributed data points in N dimensional space. :param number_of_data_points: int - number of points that needed to be generated in this iteration :param skip: int - number of points to skip from the beginning of sequence, because sobol_seq.i4_sobol_generate stateless. :return: sobol sequence as numpy array.\n- def get_next_point(self): Will return next data point from initiated Sobol sequence imposed to the search space. :return:", "prompted_full_text": "Implement the Python class `SobolSequence` described below.\n\nClass description:\nImplement the SobolSequence class.\n\nMethod signatures and docstrings:\n- def __init__(self, selection_algorithm_config, search_space): Creates SobolSequence instance that stores information about number of generated points :param selection_algorithm_config: Dict with configuration of selection algorithm. :param search_space: list of dimensions that describes a\n- def __generate_sobol_seq(self, number_of_data_points=1, skip=0): Generates sobol sequence of uniformly distributed data points in N dimensional space. :param number_of_data_points: int - number of points that needed to be generated in this iteration :param skip: int - number of points to skip from the beginning of sequence, because sobol_seq.i4_sobol_generate stateless. :return: sobol sequence as numpy array.\n- def get_next_point(self): Will return next data point from initiated Sobol sequence imposed to the search space. :return:\n\n<|skeleton|>\nclass SobolSequence:\n\n def __init__(self, selection_algorithm_config, search_space):\n \"\"\"Creates SobolSequence instance that stores information about number of generated points :param selection_algorithm_config: Dict with configuration of selection algorithm. :param search_space: list of dimensions that describes a\"\"\"\n <|body_0|>\n\n def __generate_sobol_seq(self, number_of_data_points=1, skip=0):\n \"\"\"Generates sobol sequence of uniformly distributed data points in N dimensional space. :param number_of_data_points: int - number of points that needed to be generated in this iteration :param skip: int - number of points to skip from the beginning of sequence, because sobol_seq.i4_sobol_generate stateless. :return: sobol sequence as numpy array.\"\"\"\n <|body_1|>\n\n def get_next_point(self):\n \"\"\"Will return next data point from initiated Sobol sequence imposed to the search space. :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dimensionality = len(search_space)\n self.search_space = search_space\n self.numOfGeneratedPoints = 0\n self.returned_points = []\n self.hypercube_coordinates = []\n for dimension in search_space:\n dim_indexes = [float(x) for x in range(len(dimension))]\n self.hypercube_coordinates.append(dim_indexes)\n self.hypercube = list(product(*self.hypercube_coordinates))\n<|end_body_0|>\n\n<|body_start_1|>\n sequence = sobol_seq.i4_sobol_generate(self.dimensionality, skip + number_of_data_points)[skip:]\n self.numOfGeneratedPoints += number_of_data_points\n return sequence\n<|end_body_1|>\n\n<|body_start_2|>\n skip = self.numOfGeneratedPoints\n point = self.__generate_sobol_seq(skip=skip)[0]\n point = [len(self.hypercube_coordinates[dimension_index]) * dimension_value for dimension_index, dimension_value in enumerate(point)]\n distances_dict = {}\n for hypercube_point in self.hypercube:\n if len(distances_dict) < self.numOfGeneratedPoints:\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n elif len(distances_dict) == self.numOfGeneratedPoints:\n distances = list(distances_dict.keys())\n distances.sort()\n del distances_dict[distances.pop()]\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n distances = list(distances_dict.keys())\n distances.sort()\n for current_distance in distances:\n if distances_dict[current_distance] not in self.returned_points:\n point = distances_dict[current_distance]\n self.returned_points.append(point)\n break\n result_to_return = [self.search_space[int(dimension_index)][int(dimension_value)] for dimension_index, dimension_value in enumerate(point)]\n return result_to_return\n<|end_body_2|>\n", "revision_id": "2b99e0a069ce866cb1d436a8ab18cc8dea206b15", "skeleton": "<|skeleton|>\nclass SobolSequence:\n\n def __init__(self, selection_algorithm_config, search_space):\n \"\"\"Creates SobolSequence instance that stores information about number of generated points :param selection_algorithm_config: Dict with configuration of selection algorithm. :param search_space: list of dimensions that describes a\"\"\"\n <|body_0|>\n\n def __generate_sobol_seq(self, number_of_data_points=1, skip=0):\n \"\"\"Generates sobol sequence of uniformly distributed data points in N dimensional space. :param number_of_data_points: int - number of points that needed to be generated in this iteration :param skip: int - number of points to skip from the beginning of sequence, because sobol_seq.i4_sobol_generate stateless. :return: sobol sequence as numpy array.\"\"\"\n <|body_1|>\n\n def get_next_point(self):\n \"\"\"Will return next data point from initiated Sobol sequence imposed to the search space. :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SobolSequence:\n def __init__(self, selection_algorithm_config, search_space):\n \"\"\"Creates SobolSequence instance that stores information about number of generated points :param selection_algorithm_config: Dict with configuration of selection algorithm. :param search_space: list of dimensions that describes a\"\"\"\n self.dimensionality = len(search_space)\n self.search_space = search_space\n self.numOfGeneratedPoints = 0\n self.returned_points = []\n self.hypercube_coordinates = []\n for dimension in search_space:\n dim_indexes = [float(x) for x in range(len(dimension))]\n self.hypercube_coordinates.append(dim_indexes)\n self.hypercube = list(product(*self.hypercube_coordinates))\n\n def __generate_sobol_seq(self, number_of_data_points=1, skip=0):\n \"\"\"Generates sobol sequence of uniformly distributed data points in N dimensional space. :param number_of_data_points: int - number of points that needed to be generated in this iteration :param skip: int - number of points to skip from the beginning of sequence, because sobol_seq.i4_sobol_generate stateless. :return: sobol sequence as numpy array.\"\"\"\n sequence = sobol_seq.i4_sobol_generate(self.dimensionality, skip + number_of_data_points)[skip:]\n self.numOfGeneratedPoints += number_of_data_points\n return sequence\n\n def get_next_point(self):\n \"\"\"Will return next data point from initiated Sobol sequence imposed to the search space. :return:\"\"\"\n skip = self.numOfGeneratedPoints\n point = self.__generate_sobol_seq(skip=skip)[0]\n point = [len(self.hypercube_coordinates[dimension_index]) * dimension_value for dimension_index, dimension_value in enumerate(point)]\n distances_dict = {}\n for hypercube_point in self.hypercube:\n if len(distances_dict) < self.numOfGeneratedPoints:\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n elif len(distances_dict) == self.numOfGeneratedPoints:\n distances = list(distances_dict.keys())\n distances.sort()\n del distances_dict[distances.pop()]\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n distances = list(distances_dict.keys())\n distances.sort()\n for current_distance in distances:\n if distances_dict[current_distance] not in self.returned_points:\n point = distances_dict[current_distance]\n self.returned_points.append(point)\n break\n result_to_return = [self.search_space[int(dimension_index)][int(dimension_value)] for dimension_index, dimension_value in enumerate(point)]\n return result_to_return\n", "source": "the_stack_v2_python_sparse", "source_path": "main-node/selection/sobol.py", "source_repo": "Valavanca/benchmark", "split": "val", "star_events_count": 0}
{"blob_id": "d5b1a3a90afbce4c4d262058ae6d71fc57075a07", "bodies": ["self.pos = f_positive\nself.neg = f_negative\nself.max_lines = 100000\nself.lemmatizer = WordNetLemmatizer()\nself.lexicon = self._create_lexicon()", "lexicon = []\nwith open(self.pos, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\nwith open(self.neg, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\nlexicon = [self.lemmatizer.lemmatize(i) for i in lexicon]\nw_counts = Counter(lexicon)\nresult = []\nfor count in w_counts:\n if 1000 > w_counts[count] > 50:\n result.append(count)\nprint(len(result))\nreturn result", "featureset = []\nlexicon = self.lexicon\nwith open(filename, 'r') as f_handle:\n contents = f_handle.readlines()\n for line_word in contents[:self.max_lines]:\n _current_words = word_tokenize(line_word.lower())\n current_words = [self.lemmatizer.lemmatize(i) for i in _current_words]\n features = np.zeros(len(lexicon))\n for word in current_words:\n if word.lower() in lexicon:\n index_value = lexicon.index(word.lower())\n features[index_value] += 1\n features = list(features)\n featureset.append([features, classification])\nreturn featureset", "_features = []\n_features += self._sample_handling(self.pos, [1, 0])\n_features += self._sample_handling(self.neg, [0, 1])\nrandom.shuffle(_features)\nfeatures = np.array(_features)\ntesting_size = int(test_size * len(features))\ntraining_vectors = list(features[:, 0][:-testing_size])\ntraining_labels = list(features[:, 1][:-testing_size])\ntest_vectors = list(features[:, 0][-testing_size:])\ntest_labels = list(features[:, 1][-testing_size:])\nreturn (training_vectors, training_labels, test_vectors, test_labels)"], "bodies_text": "<|body_start_0|>\n self.pos = f_positive\n self.neg = f_negative\n self.max_lines = 100000\n self.lemmatizer = WordNetLemmatizer()\n self.lexicon = self._create_lexicon()\n<|end_body_0|>\n\n<|body_start_1|>\n lexicon = []\n with open(self.pos, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\n with open(self.neg, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\n lexicon = [self.lemmatizer.lemmatize(i) for i in lexicon]\n w_counts = Counter(lexicon)\n result = []\n for count in w_counts:\n if 1000 > w_counts[count] > 50:\n result.append(count)\n print(len(result))\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n featureset = []\n lexicon = self.lexicon\n with open(filename, 'r') as f_handle:\n contents = f_handle.readlines()\n for line_word in contents[:self.max_lines]:\n _current_words = word_tokenize(line_word.lower())\n current_words = [self.lemmatizer.lemmatize(i) for i in _current_words]\n features = np.zeros(len(lexicon))\n for word in current_words:\n if word.lower() in lexicon:\n index_value = lexicon.index(word.lower())\n features[index_value] += 1\n features = list(features)\n featureset.append([features, classification])\n return featureset\n<|end_body_2|>\n\n<|body_start_3|>\n _features = []\n _features += self._sample_handling(self.pos, [1, 0])\n _features += self._sample_handling(self.neg, [0, 1])\n random.shuffle(_features)\n features = np.array(_features)\n testing_size = int(test_size * len(features))\n training_vectors = list(features[:, 0][:-testing_size])\n training_labels = list(features[:, 1][:-testing_size])\n test_vectors = list(features[:, 0][-testing_size:])\n test_labels = list(features[:, 1][-testing_size:])\n return (training_vectors, training_labels, test_vectors, test_labels)\n<|end_body_3|>\n", "class_docstring": "Process Sentiment Data.", "class_name": "Data", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Data:\n \"\"\"Process Sentiment Data.\"\"\"\n\n def __init__(self, f_positive, f_negative):\n \"\"\"Method to instantiate the class. Args: f_positive: File with positive sentiments f_negative: File with negative sentiments Returns: None\"\"\"\n <|body_0|>\n\n def _create_lexicon(self):\n \"\"\"Create the lexicon from files. Args: None Returns: result: Output\"\"\"\n <|body_1|>\n\n def _sample_handling(self, filename, classification):\n \"\"\"Handle samples from file. Args: filename: Name of file classification: Classification of featureset for lines in file Returns: result: Output\"\"\"\n <|body_2|>\n\n def create_feature_sets_and_labels(self, test_size=0.1):\n \"\"\"Create feature sets and labels. Args: test_size: Size of test Returns: result: Output\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pos = f_positive\n self.neg = f_negative\n self.max_lines = 100000\n self.lemmatizer = WordNetLemmatizer()\n self.lexicon = self._create_lexicon()\n<|end_body_0|>\n\n<|body_start_1|>\n lexicon = []\n with open(self.pos, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\n with open(self.neg, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\n lexicon = [self.lemmatizer.lemmatize(i) for i in lexicon]\n w_counts = Counter(lexicon)\n result = []\n for count in w_counts:\n if 1000 > w_counts[count] > 50:\n result.append(count)\n print(len(result))\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n featureset = []\n lexicon = self.lexicon\n with open(filename, 'r') as f_handle:\n contents = f_handle.readlines()\n for line_word in contents[:self.max_lines]:\n _current_words = word_tokenize(line_word.lower())\n current_words = [self.lemmatizer.lemmatize(i) for i in _current_words]\n features = np.zeros(len(lexicon))\n for word in current_words:\n if word.lower() in lexicon:\n index_value = lexicon.index(word.lower())\n features[index_value] += 1\n features = list(features)\n featureset.append([features, classification])\n return featureset\n<|end_body_2|>\n\n<|body_start_3|>\n _features = []\n _features += self._sample_handling(self.pos, [1, 0])\n _features += self._sample_handling(self.neg, [0, 1])\n random.shuffle(_features)\n features = np.array(_features)\n testing_size = int(test_size * len(features))\n training_vectors = list(features[:, 0][:-testing_size])\n training_labels = list(features[:, 1][:-testing_size])\n test_vectors = list(features[:, 0][-testing_size:])\n test_labels = list(features[:, 1][-testing_size:])\n return (training_vectors, training_labels, test_vectors, test_labels)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000384", "length_bytes": 8117, "license_type": "no_license", "methods": [{"docstring": "Method to instantiate the class. Args: f_positive: File with positive sentiments f_negative: File with negative sentiments Returns: None", "name": "__init__", "signature": "def __init__(self, f_positive, f_negative)"}, {"docstring": "Create the lexicon from files. Args: None Returns: result: Output", "name": "_create_lexicon", "signature": "def _create_lexicon(self)"}, {"docstring": "Handle samples from file. Args: filename: Name of file classification: Classification of featureset for lines in file Returns: result: Output", "name": "_sample_handling", "signature": "def _sample_handling(self, filename, classification)"}, {"docstring": "Create feature sets and labels. Args: test_size: Size of test Returns: result: Output", "name": "create_feature_sets_and_labels", "signature": "def create_feature_sets_and_labels(self, test_size=0.1)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_021992", "prompt": "Implement the Python class `Data` described below.\n\nClass description:\nProcess Sentiment Data.\n\nMethod signatures and docstrings:\n- def __init__(self, f_positive, f_negative): Method to instantiate the class. Args: f_positive: File with positive sentiments f_negative: File with negative sentiments Returns: None\n- def _create_lexicon(self): Create the lexicon from files. Args: None Returns: result: Output\n- def _sample_handling(self, filename, classification): Handle samples from file. Args: filename: Name of file classification: Classification of featureset for lines in file Returns: result: Output\n- def create_feature_sets_and_labels(self, test_size=0.1): Create feature sets and labels. Args: test_size: Size of test Returns: result: Output", "prompted_full_text": "Implement the Python class `Data` described below.\n\nClass description:\nProcess Sentiment Data.\n\nMethod signatures and docstrings:\n- def __init__(self, f_positive, f_negative): Method to instantiate the class. Args: f_positive: File with positive sentiments f_negative: File with negative sentiments Returns: None\n- def _create_lexicon(self): Create the lexicon from files. Args: None Returns: result: Output\n- def _sample_handling(self, filename, classification): Handle samples from file. Args: filename: Name of file classification: Classification of featureset for lines in file Returns: result: Output\n- def create_feature_sets_and_labels(self, test_size=0.1): Create feature sets and labels. Args: test_size: Size of test Returns: result: Output\n\n<|skeleton|>\nclass Data:\n \"\"\"Process Sentiment Data.\"\"\"\n\n def __init__(self, f_positive, f_negative):\n \"\"\"Method to instantiate the class. Args: f_positive: File with positive sentiments f_negative: File with negative sentiments Returns: None\"\"\"\n <|body_0|>\n\n def _create_lexicon(self):\n \"\"\"Create the lexicon from files. Args: None Returns: result: Output\"\"\"\n <|body_1|>\n\n def _sample_handling(self, filename, classification):\n \"\"\"Handle samples from file. Args: filename: Name of file classification: Classification of featureset for lines in file Returns: result: Output\"\"\"\n <|body_2|>\n\n def create_feature_sets_and_labels(self, test_size=0.1):\n \"\"\"Create feature sets and labels. Args: test_size: Size of test Returns: result: Output\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pos = f_positive\n self.neg = f_negative\n self.max_lines = 100000\n self.lemmatizer = WordNetLemmatizer()\n self.lexicon = self._create_lexicon()\n<|end_body_0|>\n\n<|body_start_1|>\n lexicon = []\n with open(self.pos, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\n with open(self.neg, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\n lexicon = [self.lemmatizer.lemmatize(i) for i in lexicon]\n w_counts = Counter(lexicon)\n result = []\n for count in w_counts:\n if 1000 > w_counts[count] > 50:\n result.append(count)\n print(len(result))\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n featureset = []\n lexicon = self.lexicon\n with open(filename, 'r') as f_handle:\n contents = f_handle.readlines()\n for line_word in contents[:self.max_lines]:\n _current_words = word_tokenize(line_word.lower())\n current_words = [self.lemmatizer.lemmatize(i) for i in _current_words]\n features = np.zeros(len(lexicon))\n for word in current_words:\n if word.lower() in lexicon:\n index_value = lexicon.index(word.lower())\n features[index_value] += 1\n features = list(features)\n featureset.append([features, classification])\n return featureset\n<|end_body_2|>\n\n<|body_start_3|>\n _features = []\n _features += self._sample_handling(self.pos, [1, 0])\n _features += self._sample_handling(self.neg, [0, 1])\n random.shuffle(_features)\n features = np.array(_features)\n testing_size = int(test_size * len(features))\n training_vectors = list(features[:, 0][:-testing_size])\n training_labels = list(features[:, 1][:-testing_size])\n test_vectors = list(features[:, 0][-testing_size:])\n test_labels = list(features[:, 1][-testing_size:])\n return (training_vectors, training_labels, test_vectors, test_labels)\n<|end_body_3|>\n", "revision_id": "36a7996b140cccb9003cba8367364645e2d65d85", "skeleton": "<|skeleton|>\nclass Data:\n \"\"\"Process Sentiment Data.\"\"\"\n\n def __init__(self, f_positive, f_negative):\n \"\"\"Method to instantiate the class. Args: f_positive: File with positive sentiments f_negative: File with negative sentiments Returns: None\"\"\"\n <|body_0|>\n\n def _create_lexicon(self):\n \"\"\"Create the lexicon from files. Args: None Returns: result: Output\"\"\"\n <|body_1|>\n\n def _sample_handling(self, filename, classification):\n \"\"\"Handle samples from file. Args: filename: Name of file classification: Classification of featureset for lines in file Returns: result: Output\"\"\"\n <|body_2|>\n\n def create_feature_sets_and_labels(self, test_size=0.1):\n \"\"\"Create feature sets and labels. Args: test_size: Size of test Returns: result: Output\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Data:\n \"\"\"Process Sentiment Data.\"\"\"\n\n def __init__(self, f_positive, f_negative):\n \"\"\"Method to instantiate the class. Args: f_positive: File with positive sentiments f_negative: File with negative sentiments Returns: None\"\"\"\n self.pos = f_positive\n self.neg = f_negative\n self.max_lines = 100000\n self.lemmatizer = WordNetLemmatizer()\n self.lexicon = self._create_lexicon()\n\n def _create_lexicon(self):\n \"\"\"Create the lexicon from files. Args: None Returns: result: Output\"\"\"\n lexicon = []\n with open(self.pos, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\n with open(self.neg, 'r') as f_handle:\n contents = f_handle.readlines()\n for word in contents[:self.max_lines]:\n all_words = word_tokenize(word)\n lexicon += list(all_words)\n lexicon = [self.lemmatizer.lemmatize(i) for i in lexicon]\n w_counts = Counter(lexicon)\n result = []\n for count in w_counts:\n if 1000 > w_counts[count] > 50:\n result.append(count)\n print(len(result))\n return result\n\n def _sample_handling(self, filename, classification):\n \"\"\"Handle samples from file. Args: filename: Name of file classification: Classification of featureset for lines in file Returns: result: Output\"\"\"\n featureset = []\n lexicon = self.lexicon\n with open(filename, 'r') as f_handle:\n contents = f_handle.readlines()\n for line_word in contents[:self.max_lines]:\n _current_words = word_tokenize(line_word.lower())\n current_words = [self.lemmatizer.lemmatize(i) for i in _current_words]\n features = np.zeros(len(lexicon))\n for word in current_words:\n if word.lower() in lexicon:\n index_value = lexicon.index(word.lower())\n features[index_value] += 1\n features = list(features)\n featureset.append([features, classification])\n return featureset\n\n def create_feature_sets_and_labels(self, test_size=0.1):\n \"\"\"Create feature sets and labels. Args: test_size: Size of test Returns: result: Output\"\"\"\n _features = []\n _features += self._sample_handling(self.pos, [1, 0])\n _features += self._sample_handling(self.neg, [0, 1])\n random.shuffle(_features)\n features = np.array(_features)\n testing_size = int(test_size * len(features))\n training_vectors = list(features[:, 0][:-testing_size])\n training_labels = list(features[:, 1][:-testing_size])\n test_vectors = list(features[:, 0][-testing_size:])\n test_labels = list(features[:, 1][-testing_size:])\n return (training_vectors, training_labels, test_vectors, test_labels)\n", "source": "the_stack_v2_python_sparse", "source_path": "general/sentdex/tf-nltk-multilayer-perceptron.py", "source_repo": "palisadoes/AI", "split": "val", "star_events_count": 1}
{"blob_id": "a66d7e55d9d8a0d1ef4be039653a7afbe3d3ed21", "bodies": ["self.relaxed = relaxed\nself.structType = structType\nself.interstitType = interstitType\nself.cellDims = cellDims\nself.runCalcs = runCalcs", "cellStr = '_'.join([str(x) for x in self.cellDims])\noutObj = refDataStruct.getSelfInterstitialPlaneWaveStruct(self.structType, self.interstitType, self.relaxed, cellStr)\nreturn outObj"], "bodies_text": "<|body_start_0|>\n self.relaxed = relaxed\n self.structType = structType\n self.interstitType = interstitType\n self.cellDims = cellDims\n self.runCalcs = runCalcs\n<|end_body_0|>\n\n<|body_start_1|>\n cellStr = '_'.join([str(x) for x in self.cellDims])\n outObj = refDataStruct.getSelfInterstitialPlaneWaveStruct(self.structType, self.interstitType, self.relaxed, cellStr)\n return outObj\n<|end_body_1|>\n", "class_docstring": "Defines a type of interstitial independent of element and method used.", "class_name": "InterstitialType", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InterstitialType:\n \"\"\"Defines a type of interstitial independent of element and method used.\"\"\"\n\n def __init__(self, relaxed, structType, interstitType, cellDims, runCalcs):\n \"\"\"Description of function Args: relaxed: str, unrelaxed/relaxed_constant_p/relaxed_constant_v structType: str, the base crystal type (hcp/bcc/fcc) interstitType: str describing the deformation (e.g. octahedral) cellDims: iter(3 element), number of unit cells in each direction runCalcs: Bool, Whether to run calculations for this\"\"\"\n <|body_0|>\n\n def getInterStructFromRefDataStruct(self, refDataStruct):\n \"\"\"Gets the relevant structures using a reference object ( Args: refDataStruct: RefElementalDataBase object for one element Returns outStructs: UCell structures Raises: Errors\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.relaxed = relaxed\n self.structType = structType\n self.interstitType = interstitType\n self.cellDims = cellDims\n self.runCalcs = runCalcs\n<|end_body_0|>\n\n<|body_start_1|>\n cellStr = '_'.join([str(x) for x in self.cellDims])\n outObj = refDataStruct.getSelfInterstitialPlaneWaveStruct(self.structType, self.interstitType, self.relaxed, cellStr)\n return outObj\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000385", "length_bytes": 5576, "license_type": "no_license", "methods": [{"docstring": "Description of function Args: relaxed: str, unrelaxed/relaxed_constant_p/relaxed_constant_v structType: str, the base crystal type (hcp/bcc/fcc) interstitType: str describing the deformation (e.g. octahedral) cellDims: iter(3 element), number of unit cells in each direction runCalcs: Bool, Whether to run calculations for this", "name": "__init__", "signature": "def __init__(self, relaxed, structType, interstitType, cellDims, runCalcs)"}, {"docstring": "Gets the relevant structures using a reference object ( Args: refDataStruct: RefElementalDataBase object for one element Returns outStructs: UCell structures Raises: Errors", "name": "getInterStructFromRefDataStruct", "signature": "def getInterStructFromRefDataStruct(self, refDataStruct)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_048351", "prompt": "Implement the Python class `InterstitialType` described below.\n\nClass description:\nDefines a type of interstitial independent of element and method used.\n\nMethod signatures and docstrings:\n- def __init__(self, relaxed, structType, interstitType, cellDims, runCalcs): Description of function Args: relaxed: str, unrelaxed/relaxed_constant_p/relaxed_constant_v structType: str, the base crystal type (hcp/bcc/fcc) interstitType: str describing the deformation (e.g. octahedral) cellDims: iter(3 element), number of unit cells in each direction runCalcs: Bool, Whether to run calculations for this\n- def getInterStructFromRefDataStruct(self, refDataStruct): Gets the relevant structures using a reference object ( Args: refDataStruct: RefElementalDataBase object for one element Returns outStructs: UCell structures Raises: Errors", "prompted_full_text": "Implement the Python class `InterstitialType` described below.\n\nClass description:\nDefines a type of interstitial independent of element and method used.\n\nMethod signatures and docstrings:\n- def __init__(self, relaxed, structType, interstitType, cellDims, runCalcs): Description of function Args: relaxed: str, unrelaxed/relaxed_constant_p/relaxed_constant_v structType: str, the base crystal type (hcp/bcc/fcc) interstitType: str describing the deformation (e.g. octahedral) cellDims: iter(3 element), number of unit cells in each direction runCalcs: Bool, Whether to run calculations for this\n- def getInterStructFromRefDataStruct(self, refDataStruct): Gets the relevant structures using a reference object ( Args: refDataStruct: RefElementalDataBase object for one element Returns outStructs: UCell structures Raises: Errors\n\n<|skeleton|>\nclass InterstitialType:\n \"\"\"Defines a type of interstitial independent of element and method used.\"\"\"\n\n def __init__(self, relaxed, structType, interstitType, cellDims, runCalcs):\n \"\"\"Description of function Args: relaxed: str, unrelaxed/relaxed_constant_p/relaxed_constant_v structType: str, the base crystal type (hcp/bcc/fcc) interstitType: str describing the deformation (e.g. octahedral) cellDims: iter(3 element), number of unit cells in each direction runCalcs: Bool, Whether to run calculations for this\"\"\"\n <|body_0|>\n\n def getInterStructFromRefDataStruct(self, refDataStruct):\n \"\"\"Gets the relevant structures using a reference object ( Args: refDataStruct: RefElementalDataBase object for one element Returns outStructs: UCell structures Raises: Errors\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.relaxed = relaxed\n self.structType = structType\n self.interstitType = interstitType\n self.cellDims = cellDims\n self.runCalcs = runCalcs\n<|end_body_0|>\n\n<|body_start_1|>\n cellStr = '_'.join([str(x) for x in self.cellDims])\n outObj = refDataStruct.getSelfInterstitialPlaneWaveStruct(self.structType, self.interstitType, self.relaxed, cellStr)\n return outObj\n<|end_body_1|>\n", "revision_id": "8469a51c1580b923ca35a56811e92c065b424d68", "skeleton": "<|skeleton|>\nclass InterstitialType:\n \"\"\"Defines a type of interstitial independent of element and method used.\"\"\"\n\n def __init__(self, relaxed, structType, interstitType, cellDims, runCalcs):\n \"\"\"Description of function Args: relaxed: str, unrelaxed/relaxed_constant_p/relaxed_constant_v structType: str, the base crystal type (hcp/bcc/fcc) interstitType: str describing the deformation (e.g. octahedral) cellDims: iter(3 element), number of unit cells in each direction runCalcs: Bool, Whether to run calculations for this\"\"\"\n <|body_0|>\n\n def getInterStructFromRefDataStruct(self, refDataStruct):\n \"\"\"Gets the relevant structures using a reference object ( Args: refDataStruct: RefElementalDataBase object for one element Returns outStructs: UCell structures Raises: Errors\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InterstitialType:\n \"\"\"Defines a type of interstitial independent of element and method used.\"\"\"\n\n def __init__(self, relaxed, structType, interstitType, cellDims, runCalcs):\n \"\"\"Description of function Args: relaxed: str, unrelaxed/relaxed_constant_p/relaxed_constant_v structType: str, the base crystal type (hcp/bcc/fcc) interstitType: str describing the deformation (e.g. octahedral) cellDims: iter(3 element), number of unit cells in each direction runCalcs: Bool, Whether to run calculations for this\"\"\"\n self.relaxed = relaxed\n self.structType = structType\n self.interstitType = interstitType\n self.cellDims = cellDims\n self.runCalcs = runCalcs\n\n def getInterStructFromRefDataStruct(self, refDataStruct):\n \"\"\"Gets the relevant structures using a reference object ( Args: refDataStruct: RefElementalDataBase object for one element Returns outStructs: UCell structures Raises: Errors\"\"\"\n cellStr = '_'.join([str(x) for x in self.cellDims])\n outObj = refDataStruct.getSelfInterstitialPlaneWaveStruct(self.structType, self.interstitType, self.relaxed, cellStr)\n return outObj\n", "source": "the_stack_v2_python_sparse", "source_path": "gen_basis_helpers/job_utils/interstit_helpers.py", "source_repo": "RFogarty1/plato_gen_basis_helpers", "split": "val", "star_events_count": 3}
{"blob_id": "2d9a5c3abedaa7198d3a77028afff933471f9fe9", "bodies": ["for item in kdddx:\n if l0(item):\n return item", "count = 0\nfor item in kdddx:\n if l3(item):\n count += 1\nreturn count", "for i in kdddx1:\n if l(i):\n yield i", "mins = l1(kdddx2[0])\nfor c in range(1, len(kdddx2)):\n if l1(kdddx2[c]) < mins:\n mins = l1(kdddx2[c])\nreturn mins", "for i1 in range(len(kdddx3) - 1):\n for i2 in range(i1 + 1, len(kdddx3)):\n if l2(kdddx3[i2]) < l2(kdddx3[i1]):\n kdddx3[i2], kdddx3[i1] = (kdddx3[i1], kdddx3[i2])\nreturn kdddx3"], "bodies_text": "<|body_start_0|>\n for item in kdddx:\n if l0(item):\n return item\n<|end_body_0|>\n\n<|body_start_1|>\n count = 0\n for item in kdddx:\n if l3(item):\n count += 1\n return count\n<|end_body_1|>\n\n<|body_start_2|>\n for i in kdddx1:\n if l(i):\n yield i\n<|end_body_2|>\n\n<|body_start_3|>\n mins = l1(kdddx2[0])\n for c in range(1, len(kdddx2)):\n if l1(kdddx2[c]) < mins:\n mins = l1(kdddx2[c])\n return mins\n<|end_body_3|>\n\n<|body_start_4|>\n for i1 in range(len(kdddx3) - 1):\n for i2 in range(i1 + 1, len(kdddx3)):\n if l2(kdddx3[i2]) < l2(kdddx3[i1]):\n kdddx3[i2], kdddx3[i1] = (kdddx3[i1], kdddx3[i2])\n return kdddx3\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Iterablehelper", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Iterablehelper:\n\n def find_singel01(kdddx, l0):\n \"\"\":param kdddx: 输入可迭代对象 :param l0: 目标函数 :return: 返回单一值\"\"\"\n <|body_0|>\n\n def find_count(kdddx, l3):\n \"\"\":param kdddx: 输入可迭代对象 :param l3: 目标函数 :return: 返回目标数量\"\"\"\n <|body_1|>\n\n def find_all(kdddx1, l):\n \"\"\":param kdddx1: 输入可迭代对象 :param l: 目标函数 :return: 返回一列生成器\"\"\"\n <|body_2|>\n\n def find_min(kdddx2, l1):\n \"\"\":param kdddx2: 输入可迭代对象 :param l1: 目标函数 :return: 返回最小值\"\"\"\n <|body_3|>\n\n def order_by(kdddx3, l2):\n \"\"\":param kdddx3: 输入可迭代对象 :param l2: 目标函数 :return: 根据函数属性对可迭代进行升序处理\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for item in kdddx:\n if l0(item):\n return item\n<|end_body_0|>\n\n<|body_start_1|>\n count = 0\n for item in kdddx:\n if l3(item):\n count += 1\n return count\n<|end_body_1|>\n\n<|body_start_2|>\n for i in kdddx1:\n if l(i):\n yield i\n<|end_body_2|>\n\n<|body_start_3|>\n mins = l1(kdddx2[0])\n for c in range(1, len(kdddx2)):\n if l1(kdddx2[c]) < mins:\n mins = l1(kdddx2[c])\n return mins\n<|end_body_3|>\n\n<|body_start_4|>\n for i1 in range(len(kdddx3) - 1):\n for i2 in range(i1 + 1, len(kdddx3)):\n if l2(kdddx3[i2]) < l2(kdddx3[i1]):\n kdddx3[i2], kdddx3[i1] = (kdddx3[i1], kdddx3[i2])\n return kdddx3\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000386", "length_bytes": 1646, "license_type": "permissive", "methods": [{"docstring": ":param kdddx: 输入可迭代对象 :param l0: 目标函数 :return: 返回单一值", "name": "find_singel01", "signature": "def find_singel01(kdddx, l0)"}, {"docstring": ":param kdddx: 输入可迭代对象 :param l3: 目标函数 :return: 返回目标数量", "name": "find_count", "signature": "def find_count(kdddx, l3)"}, {"docstring": ":param kdddx1: 输入可迭代对象 :param l: 目标函数 :return: 返回一列生成器", "name": "find_all", "signature": "def find_all(kdddx1, l)"}, {"docstring": ":param kdddx2: 输入可迭代对象 :param l1: 目标函数 :return: 返回最小值", "name": "find_min", "signature": "def find_min(kdddx2, l1)"}, {"docstring": ":param kdddx3: 输入可迭代对象 :param l2: 目标函数 :return: 根据函数属性对可迭代进行升序处理", "name": "order_by", "signature": "def order_by(kdddx3, l2)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_033586", "prompt": "Implement the Python class `Iterablehelper` described below.\n\nClass description:\nImplement the Iterablehelper class.\n\nMethod signatures and docstrings:\n- def find_singel01(kdddx, l0): :param kdddx: 输入可迭代对象 :param l0: 目标函数 :return: 返回单一值\n- def find_count(kdddx, l3): :param kdddx: 输入可迭代对象 :param l3: 目标函数 :return: 返回目标数量\n- def find_all(kdddx1, l): :param kdddx1: 输入可迭代对象 :param l: 目标函数 :return: 返回一列生成器\n- def find_min(kdddx2, l1): :param kdddx2: 输入可迭代对象 :param l1: 目标函数 :return: 返回最小值\n- def order_by(kdddx3, l2): :param kdddx3: 输入可迭代对象 :param l2: 目标函数 :return: 根据函数属性对可迭代进行升序处理", "prompted_full_text": "Implement the Python class `Iterablehelper` described below.\n\nClass description:\nImplement the Iterablehelper class.\n\nMethod signatures and docstrings:\n- def find_singel01(kdddx, l0): :param kdddx: 输入可迭代对象 :param l0: 目标函数 :return: 返回单一值\n- def find_count(kdddx, l3): :param kdddx: 输入可迭代对象 :param l3: 目标函数 :return: 返回目标数量\n- def find_all(kdddx1, l): :param kdddx1: 输入可迭代对象 :param l: 目标函数 :return: 返回一列生成器\n- def find_min(kdddx2, l1): :param kdddx2: 输入可迭代对象 :param l1: 目标函数 :return: 返回最小值\n- def order_by(kdddx3, l2): :param kdddx3: 输入可迭代对象 :param l2: 目标函数 :return: 根据函数属性对可迭代进行升序处理\n\n<|skeleton|>\nclass Iterablehelper:\n\n def find_singel01(kdddx, l0):\n \"\"\":param kdddx: 输入可迭代对象 :param l0: 目标函数 :return: 返回单一值\"\"\"\n <|body_0|>\n\n def find_count(kdddx, l3):\n \"\"\":param kdddx: 输入可迭代对象 :param l3: 目标函数 :return: 返回目标数量\"\"\"\n <|body_1|>\n\n def find_all(kdddx1, l):\n \"\"\":param kdddx1: 输入可迭代对象 :param l: 目标函数 :return: 返回一列生成器\"\"\"\n <|body_2|>\n\n def find_min(kdddx2, l1):\n \"\"\":param kdddx2: 输入可迭代对象 :param l1: 目标函数 :return: 返回最小值\"\"\"\n <|body_3|>\n\n def order_by(kdddx3, l2):\n \"\"\":param kdddx3: 输入可迭代对象 :param l2: 目标函数 :return: 根据函数属性对可迭代进行升序处理\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for item in kdddx:\n if l0(item):\n return item\n<|end_body_0|>\n\n<|body_start_1|>\n count = 0\n for item in kdddx:\n if l3(item):\n count += 1\n return count\n<|end_body_1|>\n\n<|body_start_2|>\n for i in kdddx1:\n if l(i):\n yield i\n<|end_body_2|>\n\n<|body_start_3|>\n mins = l1(kdddx2[0])\n for c in range(1, len(kdddx2)):\n if l1(kdddx2[c]) < mins:\n mins = l1(kdddx2[c])\n return mins\n<|end_body_3|>\n\n<|body_start_4|>\n for i1 in range(len(kdddx3) - 1):\n for i2 in range(i1 + 1, len(kdddx3)):\n if l2(kdddx3[i2]) < l2(kdddx3[i1]):\n kdddx3[i2], kdddx3[i1] = (kdddx3[i1], kdddx3[i2])\n return kdddx3\n<|end_body_4|>\n", "revision_id": "d8ba30ea4bc2b662a2d6a87d247f813e5680d63e", "skeleton": "<|skeleton|>\nclass Iterablehelper:\n\n def find_singel01(kdddx, l0):\n \"\"\":param kdddx: 输入可迭代对象 :param l0: 目标函数 :return: 返回单一值\"\"\"\n <|body_0|>\n\n def find_count(kdddx, l3):\n \"\"\":param kdddx: 输入可迭代对象 :param l3: 目标函数 :return: 返回目标数量\"\"\"\n <|body_1|>\n\n def find_all(kdddx1, l):\n \"\"\":param kdddx1: 输入可迭代对象 :param l: 目标函数 :return: 返回一列生成器\"\"\"\n <|body_2|>\n\n def find_min(kdddx2, l1):\n \"\"\":param kdddx2: 输入可迭代对象 :param l1: 目标函数 :return: 返回最小值\"\"\"\n <|body_3|>\n\n def order_by(kdddx3, l2):\n \"\"\":param kdddx3: 输入可迭代对象 :param l2: 目标函数 :return: 根据函数属性对可迭代进行升序处理\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Iterablehelper:\n def find_singel01(kdddx, l0):\n \"\"\":param kdddx: 输入可迭代对象 :param l0: 目标函数 :return: 返回单一值\"\"\"\n for item in kdddx:\n if l0(item):\n return item\n\n def find_count(kdddx, l3):\n \"\"\":param kdddx: 输入可迭代对象 :param l3: 目标函数 :return: 返回目标数量\"\"\"\n count = 0\n for item in kdddx:\n if l3(item):\n count += 1\n return count\n\n def find_all(kdddx1, l):\n \"\"\":param kdddx1: 输入可迭代对象 :param l: 目标函数 :return: 返回一列生成器\"\"\"\n for i in kdddx1:\n if l(i):\n yield i\n\n def find_min(kdddx2, l1):\n \"\"\":param kdddx2: 输入可迭代对象 :param l1: 目标函数 :return: 返回最小值\"\"\"\n mins = l1(kdddx2[0])\n for c in range(1, len(kdddx2)):\n if l1(kdddx2[c]) < mins:\n mins = l1(kdddx2[c])\n return mins\n\n def order_by(kdddx3, l2):\n \"\"\":param kdddx3: 输入可迭代对象 :param l2: 目标函数 :return: 根据函数属性对可迭代进行升序处理\"\"\"\n for i1 in range(len(kdddx3) - 1):\n for i2 in range(i1 + 1, len(kdddx3)):\n if l2(kdddx3[i2]) < l2(kdddx3[i1]):\n kdddx3[i2], kdddx3[i1] = (kdddx3[i1], kdddx3[i2])\n return kdddx3\n", "source": "the_stack_v2_python_sparse", "source_path": "1-mouth01/gongju/gongjuren.py", "source_repo": "gary-gggggg/gary", "split": "val", "star_events_count": 4}
{"blob_id": "87448e454d17b39a79fc10d21c170edd1e05635d", "bodies": ["await self.async_set_unique_id(username)\ntry:\n self._abort_if_unique_id_configured({CONF_TOKEN: token})\nexcept AbortFlow:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n raise\nreturn self.async_create_entry(title=username, data={CONF_USERNAME: username, CONF_TOKEN: token})", "try:\n async with asyncio.timeout(10):\n if (acquired_token := token) is None:\n acquired_token = await pymelcloud.login(username, password, async_get_clientsession(self.hass))\n await pymelcloud.get_devices(acquired_token, async_get_clientsession(self.hass))\nexcept ClientResponseError as err:\n if err.status in (HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN):\n await async_create_import_issue(self.hass, self.context['source'], 'invalid_auth')\n return self.async_abort(reason='invalid_auth')\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\nexcept (asyncio.TimeoutError, ClientError):\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\nreturn await self._create_entry(username, acquired_token)", "if user_input is None:\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}))\nusername = user_input[CONF_USERNAME]\nreturn await self._create_client(username, password=user_input[CONF_PASSWORD])", "result = await self._create_client(user_input[CONF_USERNAME], token=user_input[CONF_TOKEN])\nif result['type'] == FlowResultType.CREATE_ENTRY:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\nreturn result"], "bodies_text": "<|body_start_0|>\n await self.async_set_unique_id(username)\n try:\n self._abort_if_unique_id_configured({CONF_TOKEN: token})\n except AbortFlow:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n raise\n return self.async_create_entry(title=username, data={CONF_USERNAME: username, CONF_TOKEN: token})\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n async with asyncio.timeout(10):\n if (acquired_token := token) is None:\n acquired_token = await pymelcloud.login(username, password, async_get_clientsession(self.hass))\n await pymelcloud.get_devices(acquired_token, async_get_clientsession(self.hass))\n except ClientResponseError as err:\n if err.status in (HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN):\n await async_create_import_issue(self.hass, self.context['source'], 'invalid_auth')\n return self.async_abort(reason='invalid_auth')\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\n except (asyncio.TimeoutError, ClientError):\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\n return await self._create_entry(username, acquired_token)\n<|end_body_1|>\n\n<|body_start_2|>\n if user_input is None:\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}))\n username = user_input[CONF_USERNAME]\n return await self._create_client(username, password=user_input[CONF_PASSWORD])\n<|end_body_2|>\n\n<|body_start_3|>\n result = await self._create_client(user_input[CONF_USERNAME], token=user_input[CONF_TOKEN])\n if result['type'] == FlowResultType.CREATE_ENTRY:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n return result\n<|end_body_3|>\n", "class_docstring": "Handle a config flow.", "class_name": "FlowHandler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FlowHandler:\n \"\"\"Handle a config flow.\"\"\"\n\n async def _create_entry(self, username: str, token: str):\n \"\"\"Register new entry.\"\"\"\n <|body_0|>\n\n async def _create_client(self, username: str, *, password: str | None=None, token: str | None=None):\n \"\"\"Create client.\"\"\"\n <|body_1|>\n\n async def async_step_user(self, user_input=None):\n \"\"\"User initiated config flow.\"\"\"\n <|body_2|>\n\n async def async_step_import(self, user_input):\n \"\"\"Import a config entry.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n await self.async_set_unique_id(username)\n try:\n self._abort_if_unique_id_configured({CONF_TOKEN: token})\n except AbortFlow:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n raise\n return self.async_create_entry(title=username, data={CONF_USERNAME: username, CONF_TOKEN: token})\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n async with asyncio.timeout(10):\n if (acquired_token := token) is None:\n acquired_token = await pymelcloud.login(username, password, async_get_clientsession(self.hass))\n await pymelcloud.get_devices(acquired_token, async_get_clientsession(self.hass))\n except ClientResponseError as err:\n if err.status in (HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN):\n await async_create_import_issue(self.hass, self.context['source'], 'invalid_auth')\n return self.async_abort(reason='invalid_auth')\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\n except (asyncio.TimeoutError, ClientError):\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\n return await self._create_entry(username, acquired_token)\n<|end_body_1|>\n\n<|body_start_2|>\n if user_input is None:\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}))\n username = user_input[CONF_USERNAME]\n return await self._create_client(username, password=user_input[CONF_PASSWORD])\n<|end_body_2|>\n\n<|body_start_3|>\n result = await self._create_client(user_input[CONF_USERNAME], token=user_input[CONF_TOKEN])\n if result['type'] == FlowResultType.CREATE_ENTRY:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n return result\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000387", "length_bytes": 4601, "license_type": "permissive", "methods": [{"docstring": "Register new entry.", "name": "_create_entry", "signature": "async def _create_entry(self, username: str, token: str)"}, {"docstring": "Create client.", "name": "_create_client", "signature": "async def _create_client(self, username: str, *, password: str | None=None, token: str | None=None)"}, {"docstring": "User initiated config flow.", "name": "async_step_user", "signature": "async def async_step_user(self, user_input=None)"}, {"docstring": "Import a config entry.", "name": "async_step_import", "signature": "async def async_step_import(self, user_input)"}], "n_methods": 4, "prompt": "Implement the Python class `FlowHandler` described below.\n\nClass description:\nHandle a config flow.\n\nMethod signatures and docstrings:\n- async def _create_entry(self, username: str, token: str): Register new entry.\n- async def _create_client(self, username: str, *, password: str | None=None, token: str | None=None): Create client.\n- async def async_step_user(self, user_input=None): User initiated config flow.\n- async def async_step_import(self, user_input): Import a config entry.", "prompted_full_text": "Implement the Python class `FlowHandler` described below.\n\nClass description:\nHandle a config flow.\n\nMethod signatures and docstrings:\n- async def _create_entry(self, username: str, token: str): Register new entry.\n- async def _create_client(self, username: str, *, password: str | None=None, token: str | None=None): Create client.\n- async def async_step_user(self, user_input=None): User initiated config flow.\n- async def async_step_import(self, user_input): Import a config entry.\n\n<|skeleton|>\nclass FlowHandler:\n \"\"\"Handle a config flow.\"\"\"\n\n async def _create_entry(self, username: str, token: str):\n \"\"\"Register new entry.\"\"\"\n <|body_0|>\n\n async def _create_client(self, username: str, *, password: str | None=None, token: str | None=None):\n \"\"\"Create client.\"\"\"\n <|body_1|>\n\n async def async_step_user(self, user_input=None):\n \"\"\"User initiated config flow.\"\"\"\n <|body_2|>\n\n async def async_step_import(self, user_input):\n \"\"\"Import a config entry.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n await self.async_set_unique_id(username)\n try:\n self._abort_if_unique_id_configured({CONF_TOKEN: token})\n except AbortFlow:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n raise\n return self.async_create_entry(title=username, data={CONF_USERNAME: username, CONF_TOKEN: token})\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n async with asyncio.timeout(10):\n if (acquired_token := token) is None:\n acquired_token = await pymelcloud.login(username, password, async_get_clientsession(self.hass))\n await pymelcloud.get_devices(acquired_token, async_get_clientsession(self.hass))\n except ClientResponseError as err:\n if err.status in (HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN):\n await async_create_import_issue(self.hass, self.context['source'], 'invalid_auth')\n return self.async_abort(reason='invalid_auth')\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\n except (asyncio.TimeoutError, ClientError):\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\n return await self._create_entry(username, acquired_token)\n<|end_body_1|>\n\n<|body_start_2|>\n if user_input is None:\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}))\n username = user_input[CONF_USERNAME]\n return await self._create_client(username, password=user_input[CONF_PASSWORD])\n<|end_body_2|>\n\n<|body_start_3|>\n result = await self._create_client(user_input[CONF_USERNAME], token=user_input[CONF_TOKEN])\n if result['type'] == FlowResultType.CREATE_ENTRY:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n return result\n<|end_body_3|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass FlowHandler:\n \"\"\"Handle a config flow.\"\"\"\n\n async def _create_entry(self, username: str, token: str):\n \"\"\"Register new entry.\"\"\"\n <|body_0|>\n\n async def _create_client(self, username: str, *, password: str | None=None, token: str | None=None):\n \"\"\"Create client.\"\"\"\n <|body_1|>\n\n async def async_step_user(self, user_input=None):\n \"\"\"User initiated config flow.\"\"\"\n <|body_2|>\n\n async def async_step_import(self, user_input):\n \"\"\"Import a config entry.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FlowHandler:\n \"\"\"Handle a config flow.\"\"\"\n\n async def _create_entry(self, username: str, token: str):\n \"\"\"Register new entry.\"\"\"\n await self.async_set_unique_id(username)\n try:\n self._abort_if_unique_id_configured({CONF_TOKEN: token})\n except AbortFlow:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n raise\n return self.async_create_entry(title=username, data={CONF_USERNAME: username, CONF_TOKEN: token})\n\n async def _create_client(self, username: str, *, password: str | None=None, token: str | None=None):\n \"\"\"Create client.\"\"\"\n try:\n async with asyncio.timeout(10):\n if (acquired_token := token) is None:\n acquired_token = await pymelcloud.login(username, password, async_get_clientsession(self.hass))\n await pymelcloud.get_devices(acquired_token, async_get_clientsession(self.hass))\n except ClientResponseError as err:\n if err.status in (HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN):\n await async_create_import_issue(self.hass, self.context['source'], 'invalid_auth')\n return self.async_abort(reason='invalid_auth')\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\n except (asyncio.TimeoutError, ClientError):\n await async_create_import_issue(self.hass, self.context['source'], 'cannot_connect')\n return self.async_abort(reason='cannot_connect')\n return await self._create_entry(username, acquired_token)\n\n async def async_step_user(self, user_input=None):\n \"\"\"User initiated config flow.\"\"\"\n if user_input is None:\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}))\n username = user_input[CONF_USERNAME]\n return await self._create_client(username, password=user_input[CONF_PASSWORD])\n\n async def async_step_import(self, user_input):\n \"\"\"Import a config entry.\"\"\"\n result = await self._create_client(user_input[CONF_USERNAME], token=user_input[CONF_TOKEN])\n if result['type'] == FlowResultType.CREATE_ENTRY:\n await async_create_import_issue(self.hass, self.context['source'], '', True)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/melcloud/config_flow.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501}
{"blob_id": "3e18e9ca5fce435798621d717e44599cb0102bfc", "bodies": ["global secretSpace\nfor i in string.digits:\n for j in string.digits:\n for k in string.digits:\n secretSpace.append(i + j + k)", "global secretSpace\nlastGuess = guessHistory[-1]\nlastClue = clueHistory[-1]\nnewSpace = []\nfor element in secretSpace:\n if getClues(element, lastGuess) == lastClue:\n newSpace.append(element)\nsecretSpace = newSpace", "if player1 == 'human':\n return getHumanGuess()\nelse:\n secret = ''\n for i in range(3):\n secret += random.choice(string.digits)\n secretHistory.append(secret)\n return secret", "fermis = picos = bagels = 0\n' Record whether each digit in secret has been matched with a digit\\n in guess'\nmatched = [False, False, False]\n' Count fermis '\nfor i in range(3):\n if guess[i] == secret[i]:\n matched[i] = True\n fermis += 1\n' Count picos '\nfor guessIdx in range(3):\n for secretIdx in range(3):\n ' Guess digit must be in different position than secret digit '\n if guessIdx == secretIdx:\n continue\n ' Secret digit must not already be matched '\n if matched[secretIdx] == True:\n continue\n if guess[guessIdx] == secret[secretIdx]:\n matched[secretIdx] = True\n picos += 1\n break\n' Each guess digit gets a clue, so clues add up to 3 '\nbagels = 3 - fermis - picos\nreturn (fermis, picos, bagels)"], "bodies_text": "<|body_start_0|>\n global secretSpace\n for i in string.digits:\n for j in string.digits:\n for k in string.digits:\n secretSpace.append(i + j + k)\n<|end_body_0|>\n\n<|body_start_1|>\n global secretSpace\n lastGuess = guessHistory[-1]\n lastClue = clueHistory[-1]\n newSpace = []\n for element in secretSpace:\n if getClues(element, lastGuess) == lastClue:\n newSpace.append(element)\n secretSpace = newSpace\n<|end_body_1|>\n\n<|body_start_2|>\n if player1 == 'human':\n return getHumanGuess()\n else:\n secret = ''\n for i in range(3):\n secret += random.choice(string.digits)\n secretHistory.append(secret)\n return secret\n<|end_body_2|>\n\n<|body_start_3|>\n fermis = picos = bagels = 0\n ' Record whether each digit in secret has been matched with a digit\\n in guess'\n matched = [False, False, False]\n ' Count fermis '\n for i in range(3):\n if guess[i] == secret[i]:\n matched[i] = True\n fermis += 1\n ' Count picos '\n for guessIdx in range(3):\n for secretIdx in range(3):\n ' Guess digit must be in different position than secret digit '\n if guessIdx == secretIdx:\n continue\n ' Secret digit must not already be matched '\n if matched[secretIdx] == True:\n continue\n if guess[guessIdx] == secret[secretIdx]:\n matched[secretIdx] = True\n picos += 1\n break\n ' Each guess digit gets a clue, so clues add up to 3 '\n bagels = 3 - fermis - picos\n return (fermis, picos, bagels)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "CompPlayer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CompPlayer:\n\n def populateSecretSpace(self):\n \"\"\"populate space of possible secrets with strings 000-999\"\"\"\n <|body_0|>\n\n def pruneSecretSpace(self):\n \"\"\"remove all secrets from secretSpace that yield a different set of clues when tested against the most recent guess\"\"\"\n <|body_1|>\n\n def makeSecret(self, player1):\n \"\"\"Return a three character string composed of random digits (000-999).\"\"\"\n <|body_2|>\n\n def getClues(self, secret, guess):\n \"\"\"In a game of bagels, given secret -- a three character string of digits guess -- a three character string of digits calculate and return a three-tuple of clues (fermis, picos, bagels). A fermi is a guess digit that matches the secret in value and position. A pico is a guess digit that matches a secret digit in value but not in position. A bagel is a guess digit that does not match a secret digit. Each guess digit may match at most one secret digit, and is assigned exactly one clue. Each secret digit may match at most one guess digit.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global secretSpace\n for i in string.digits:\n for j in string.digits:\n for k in string.digits:\n secretSpace.append(i + j + k)\n<|end_body_0|>\n\n<|body_start_1|>\n global secretSpace\n lastGuess = guessHistory[-1]\n lastClue = clueHistory[-1]\n newSpace = []\n for element in secretSpace:\n if getClues(element, lastGuess) == lastClue:\n newSpace.append(element)\n secretSpace = newSpace\n<|end_body_1|>\n\n<|body_start_2|>\n if player1 == 'human':\n return getHumanGuess()\n else:\n secret = ''\n for i in range(3):\n secret += random.choice(string.digits)\n secretHistory.append(secret)\n return secret\n<|end_body_2|>\n\n<|body_start_3|>\n fermis = picos = bagels = 0\n ' Record whether each digit in secret has been matched with a digit\\n in guess'\n matched = [False, False, False]\n ' Count fermis '\n for i in range(3):\n if guess[i] == secret[i]:\n matched[i] = True\n fermis += 1\n ' Count picos '\n for guessIdx in range(3):\n for secretIdx in range(3):\n ' Guess digit must be in different position than secret digit '\n if guessIdx == secretIdx:\n continue\n ' Secret digit must not already be matched '\n if matched[secretIdx] == True:\n continue\n if guess[guessIdx] == secret[secretIdx]:\n matched[secretIdx] = True\n picos += 1\n break\n ' Each guess digit gets a clue, so clues add up to 3 '\n bagels = 3 - fermis - picos\n return (fermis, picos, bagels)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000388", "length_bytes": 6273, "license_type": "no_license", "methods": [{"docstring": "populate space of possible secrets with strings 000-999", "name": "populateSecretSpace", "signature": "def populateSecretSpace(self)"}, {"docstring": "remove all secrets from secretSpace that yield a different set of clues when tested against the most recent guess", "name": "pruneSecretSpace", "signature": "def pruneSecretSpace(self)"}, {"docstring": "Return a three character string composed of random digits (000-999).", "name": "makeSecret", "signature": "def makeSecret(self, player1)"}, {"docstring": "In a game of bagels, given secret -- a three character string of digits guess -- a three character string of digits calculate and return a three-tuple of clues (fermis, picos, bagels). A fermi is a guess digit that matches the secret in value and position. A pico is a guess digit that matches a secret digit in value but not in position. A bagel is a guess digit that does not match a secret digit. Each guess digit may match at most one secret digit, and is assigned exactly one clue. Each secret digit may match at most one guess digit.", "name": "getClues", "signature": "def getClues(self, secret, guess)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_028569", "prompt": "Implement the Python class `CompPlayer` described below.\n\nClass description:\nImplement the CompPlayer class.\n\nMethod signatures and docstrings:\n- def populateSecretSpace(self): populate space of possible secrets with strings 000-999\n- def pruneSecretSpace(self): remove all secrets from secretSpace that yield a different set of clues when tested against the most recent guess\n- def makeSecret(self, player1): Return a three character string composed of random digits (000-999).\n- def getClues(self, secret, guess): In a game of bagels, given secret -- a three character string of digits guess -- a three character string of digits calculate and return a three-tuple of clues (fermis, picos, bagels). A fermi is a guess digit that matches the secret in value and position. A pico is a guess digit that matches a secret digit in value but not in position. A bagel is a guess digit that does not match a secret digit. Each guess digit may match at most one secret digit, and is assigned exactly one clue. Each secret digit may match at most one guess digit.", "prompted_full_text": "Implement the Python class `CompPlayer` described below.\n\nClass description:\nImplement the CompPlayer class.\n\nMethod signatures and docstrings:\n- def populateSecretSpace(self): populate space of possible secrets with strings 000-999\n- def pruneSecretSpace(self): remove all secrets from secretSpace that yield a different set of clues when tested against the most recent guess\n- def makeSecret(self, player1): Return a three character string composed of random digits (000-999).\n- def getClues(self, secret, guess): In a game of bagels, given secret -- a three character string of digits guess -- a three character string of digits calculate and return a three-tuple of clues (fermis, picos, bagels). A fermi is a guess digit that matches the secret in value and position. A pico is a guess digit that matches a secret digit in value but not in position. A bagel is a guess digit that does not match a secret digit. Each guess digit may match at most one secret digit, and is assigned exactly one clue. Each secret digit may match at most one guess digit.\n\n<|skeleton|>\nclass CompPlayer:\n\n def populateSecretSpace(self):\n \"\"\"populate space of possible secrets with strings 000-999\"\"\"\n <|body_0|>\n\n def pruneSecretSpace(self):\n \"\"\"remove all secrets from secretSpace that yield a different set of clues when tested against the most recent guess\"\"\"\n <|body_1|>\n\n def makeSecret(self, player1):\n \"\"\"Return a three character string composed of random digits (000-999).\"\"\"\n <|body_2|>\n\n def getClues(self, secret, guess):\n \"\"\"In a game of bagels, given secret -- a three character string of digits guess -- a three character string of digits calculate and return a three-tuple of clues (fermis, picos, bagels). A fermi is a guess digit that matches the secret in value and position. A pico is a guess digit that matches a secret digit in value but not in position. A bagel is a guess digit that does not match a secret digit. Each guess digit may match at most one secret digit, and is assigned exactly one clue. Each secret digit may match at most one guess digit.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global secretSpace\n for i in string.digits:\n for j in string.digits:\n for k in string.digits:\n secretSpace.append(i + j + k)\n<|end_body_0|>\n\n<|body_start_1|>\n global secretSpace\n lastGuess = guessHistory[-1]\n lastClue = clueHistory[-1]\n newSpace = []\n for element in secretSpace:\n if getClues(element, lastGuess) == lastClue:\n newSpace.append(element)\n secretSpace = newSpace\n<|end_body_1|>\n\n<|body_start_2|>\n if player1 == 'human':\n return getHumanGuess()\n else:\n secret = ''\n for i in range(3):\n secret += random.choice(string.digits)\n secretHistory.append(secret)\n return secret\n<|end_body_2|>\n\n<|body_start_3|>\n fermis = picos = bagels = 0\n ' Record whether each digit in secret has been matched with a digit\\n in guess'\n matched = [False, False, False]\n ' Count fermis '\n for i in range(3):\n if guess[i] == secret[i]:\n matched[i] = True\n fermis += 1\n ' Count picos '\n for guessIdx in range(3):\n for secretIdx in range(3):\n ' Guess digit must be in different position than secret digit '\n if guessIdx == secretIdx:\n continue\n ' Secret digit must not already be matched '\n if matched[secretIdx] == True:\n continue\n if guess[guessIdx] == secret[secretIdx]:\n matched[secretIdx] = True\n picos += 1\n break\n ' Each guess digit gets a clue, so clues add up to 3 '\n bagels = 3 - fermis - picos\n return (fermis, picos, bagels)\n<|end_body_3|>\n", "revision_id": "bdc54979f91bce6d74fcab78e0f05b95545f6930", "skeleton": "<|skeleton|>\nclass CompPlayer:\n\n def populateSecretSpace(self):\n \"\"\"populate space of possible secrets with strings 000-999\"\"\"\n <|body_0|>\n\n def pruneSecretSpace(self):\n \"\"\"remove all secrets from secretSpace that yield a different set of clues when tested against the most recent guess\"\"\"\n <|body_1|>\n\n def makeSecret(self, player1):\n \"\"\"Return a three character string composed of random digits (000-999).\"\"\"\n <|body_2|>\n\n def getClues(self, secret, guess):\n \"\"\"In a game of bagels, given secret -- a three character string of digits guess -- a three character string of digits calculate and return a three-tuple of clues (fermis, picos, bagels). A fermi is a guess digit that matches the secret in value and position. A pico is a guess digit that matches a secret digit in value but not in position. A bagel is a guess digit that does not match a secret digit. Each guess digit may match at most one secret digit, and is assigned exactly one clue. Each secret digit may match at most one guess digit.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CompPlayer:\n def populateSecretSpace(self):\n \"\"\"populate space of possible secrets with strings 000-999\"\"\"\n global secretSpace\n for i in string.digits:\n for j in string.digits:\n for k in string.digits:\n secretSpace.append(i + j + k)\n\n def pruneSecretSpace(self):\n \"\"\"remove all secrets from secretSpace that yield a different set of clues when tested against the most recent guess\"\"\"\n global secretSpace\n lastGuess = guessHistory[-1]\n lastClue = clueHistory[-1]\n newSpace = []\n for element in secretSpace:\n if getClues(element, lastGuess) == lastClue:\n newSpace.append(element)\n secretSpace = newSpace\n\n def makeSecret(self, player1):\n \"\"\"Return a three character string composed of random digits (000-999).\"\"\"\n if player1 == 'human':\n return getHumanGuess()\n else:\n secret = ''\n for i in range(3):\n secret += random.choice(string.digits)\n secretHistory.append(secret)\n return secret\n\n def getClues(self, secret, guess):\n \"\"\"In a game of bagels, given secret -- a three character string of digits guess -- a three character string of digits calculate and return a three-tuple of clues (fermis, picos, bagels). A fermi is a guess digit that matches the secret in value and position. A pico is a guess digit that matches a secret digit in value but not in position. A bagel is a guess digit that does not match a secret digit. Each guess digit may match at most one secret digit, and is assigned exactly one clue. Each secret digit may match at most one guess digit.\"\"\"\n fermis = picos = bagels = 0\n ' Record whether each digit in secret has been matched with a digit\\n in guess'\n matched = [False, False, False]\n ' Count fermis '\n for i in range(3):\n if guess[i] == secret[i]:\n matched[i] = True\n fermis += 1\n ' Count picos '\n for guessIdx in range(3):\n for secretIdx in range(3):\n ' Guess digit must be in different position than secret digit '\n if guessIdx == secretIdx:\n continue\n ' Secret digit must not already be matched '\n if matched[secretIdx] == True:\n continue\n if guess[guessIdx] == secret[secretIdx]:\n matched[secretIdx] = True\n picos += 1\n break\n ' Each guess digit gets a clue, so clues add up to 3 '\n bagels = 3 - fermis - picos\n return (fermis, picos, bagels)\n", "source": "the_stack_v2_python_sparse", "source_path": "HW16_AbrarRouf.py", "source_repo": "Cloud-IV/CS100-Programs", "split": "val", "star_events_count": 0}
{"blob_id": "268c21986fe539544344a86cb088d0a84dd94964", "bodies": ["import socket\npath_7z = shutil.which('7z')\nif path_7z is None:\n raise RuntimeError('Could not find 7z on the PATH')\nself._path_7z = path_7z\nmy_host = socket.gethostname()\nmy_rank = self.comm.rank\nall_host_ranks = self.comm.allgather((my_host, my_rank))\nunique_hosts = {}\nfor host, rank in all_host_ranks:\n if host not in unique_hosts:\n unique_hosts[host] = rank\n elif unique_hosts[host] > rank:\n unique_hosts[host] = rank\nself._num_hosts = len(unique_hosts)\nif unique_hosts[my_host] != my_rank:\n self._host_rank = None\nelse:\n self._host_rank = sorted(unique_hosts).index(my_host)\n self.log.debug(f'Lowest rank on {my_host}')", "if self._host_rank is not None:\n my_containers = self.containers[self._host_rank::self._num_hosts]\n for container in my_containers:\n self.log.info(f'Zipping {container}')\n if not container.endswith('.zarr') or not os.path.isdir(container):\n raise ValueError(f'{container} is not a valid .zarr directory')\n dest_file = container + '.zip'\n src_dir = container + '/.'\n command = [self._path_7z, 'a', '-tzip', '-mx=0', dest_file, src_dir]\n status = subprocess.run(command, capture_output=True)\n if status.returncode != 0:\n self.log.debug('Error occurred while zipping. Debug logs follow...')\n self.log.debug(f'stdout={status.stdout}')\n self.log.debug(f'stderr={status.stderr}')\n raise RuntimeError(f'Error occurred while zipping {container}.')\n self.log.info(f'Done zipping. Generated {dest_file}.')\n if self.remove:\n shutil.rmtree(container)\n self.log.info(f'Removed original container {container}.')\nself.comm.Barrier()\nraise pipeline.PipelineStopIteration"], "bodies_text": "<|body_start_0|>\n import socket\n path_7z = shutil.which('7z')\n if path_7z is None:\n raise RuntimeError('Could not find 7z on the PATH')\n self._path_7z = path_7z\n my_host = socket.gethostname()\n my_rank = self.comm.rank\n all_host_ranks = self.comm.allgather((my_host, my_rank))\n unique_hosts = {}\n for host, rank in all_host_ranks:\n if host not in unique_hosts:\n unique_hosts[host] = rank\n elif unique_hosts[host] > rank:\n unique_hosts[host] = rank\n self._num_hosts = len(unique_hosts)\n if unique_hosts[my_host] != my_rank:\n self._host_rank = None\n else:\n self._host_rank = sorted(unique_hosts).index(my_host)\n self.log.debug(f'Lowest rank on {my_host}')\n<|end_body_0|>\n\n<|body_start_1|>\n if self._host_rank is not None:\n my_containers = self.containers[self._host_rank::self._num_hosts]\n for container in my_containers:\n self.log.info(f'Zipping {container}')\n if not container.endswith('.zarr') or not os.path.isdir(container):\n raise ValueError(f'{container} is not a valid .zarr directory')\n dest_file = container + '.zip'\n src_dir = container + '/.'\n command = [self._path_7z, 'a', '-tzip', '-mx=0', dest_file, src_dir]\n status = subprocess.run(command, capture_output=True)\n if status.returncode != 0:\n self.log.debug('Error occurred while zipping. Debug logs follow...')\n self.log.debug(f'stdout={status.stdout}')\n self.log.debug(f'stderr={status.stderr}')\n raise RuntimeError(f'Error occurred while zipping {container}.')\n self.log.info(f'Done zipping. Generated {dest_file}.')\n if self.remove:\n shutil.rmtree(container)\n self.log.info(f'Removed original container {container}.')\n self.comm.Barrier()\n raise pipeline.PipelineStopIteration\n<|end_body_1|>\n", "class_docstring": "Zip up a Zarr container into a single file. This is useful to save on file quota and speed up IO by combining the chunk data into a single file. Note that the file cannot really be updated after this process has been performed. As this process is IO limited in most cases, it will attempt to parallelise the compression across different distinct nodes. That means at most only one rank per node will participate. Attributes ---------- containers : list The names of the Zarr containers to compress. The zipped files will have the same names with `.zip` appended. remove : bool Remove the original data when finished. Defaults to True.", "class_name": "ZipZarrContainers", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ZipZarrContainers:\n \"\"\"Zip up a Zarr container into a single file. This is useful to save on file quota and speed up IO by combining the chunk data into a single file. Note that the file cannot really be updated after this process has been performed. As this process is IO limited in most cases, it will attempt to parallelise the compression across different distinct nodes. That means at most only one rank per node will participate. Attributes ---------- containers : list The names of the Zarr containers to compress. The zipped files will have the same names with `.zip` appended. remove : bool Remove the original data when finished. Defaults to True.\"\"\"\n\n def setup(self, _=None):\n \"\"\"Setup the task. This routine does nothing at all with the input, but it means the process won't run until the (optional) requirement is received. This can be used to delay evaluation until you know that all the files are available.\"\"\"\n <|body_0|>\n\n def process(self):\n \"\"\"Compress the listed zarr containers. Only the lowest rank on each node will participate.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import socket\n path_7z = shutil.which('7z')\n if path_7z is None:\n raise RuntimeError('Could not find 7z on the PATH')\n self._path_7z = path_7z\n my_host = socket.gethostname()\n my_rank = self.comm.rank\n all_host_ranks = self.comm.allgather((my_host, my_rank))\n unique_hosts = {}\n for host, rank in all_host_ranks:\n if host not in unique_hosts:\n unique_hosts[host] = rank\n elif unique_hosts[host] > rank:\n unique_hosts[host] = rank\n self._num_hosts = len(unique_hosts)\n if unique_hosts[my_host] != my_rank:\n self._host_rank = None\n else:\n self._host_rank = sorted(unique_hosts).index(my_host)\n self.log.debug(f'Lowest rank on {my_host}')\n<|end_body_0|>\n\n<|body_start_1|>\n if self._host_rank is not None:\n my_containers = self.containers[self._host_rank::self._num_hosts]\n for container in my_containers:\n self.log.info(f'Zipping {container}')\n if not container.endswith('.zarr') or not os.path.isdir(container):\n raise ValueError(f'{container} is not a valid .zarr directory')\n dest_file = container + '.zip'\n src_dir = container + '/.'\n command = [self._path_7z, 'a', '-tzip', '-mx=0', dest_file, src_dir]\n status = subprocess.run(command, capture_output=True)\n if status.returncode != 0:\n self.log.debug('Error occurred while zipping. Debug logs follow...')\n self.log.debug(f'stdout={status.stdout}')\n self.log.debug(f'stderr={status.stderr}')\n raise RuntimeError(f'Error occurred while zipping {container}.')\n self.log.info(f'Done zipping. Generated {dest_file}.')\n if self.remove:\n shutil.rmtree(container)\n self.log.info(f'Removed original container {container}.')\n self.comm.Barrier()\n raise pipeline.PipelineStopIteration\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000389", "length_bytes": 38215, "license_type": "permissive", "methods": [{"docstring": "Setup the task. This routine does nothing at all with the input, but it means the process won't run until the (optional) requirement is received. This can be used to delay evaluation until you know that all the files are available.", "name": "setup", "signature": "def setup(self, _=None)"}, {"docstring": "Compress the listed zarr containers. Only the lowest rank on each node will participate.", "name": "process", "signature": "def process(self)"}], "n_methods": 2, "prompt": "Implement the Python class `ZipZarrContainers` described below.\n\nClass description:\nZip up a Zarr container into a single file. This is useful to save on file quota and speed up IO by combining the chunk data into a single file. Note that the file cannot really be updated after this process has been performed. As this process is IO limited in most cases, it will attempt to parallelise the compression across different distinct nodes. That means at most only one rank per node will participate. Attributes ---------- containers : list The names of the Zarr containers to compress. The zipped files will have the same names with `.zip` appended. remove : bool Remove the original data when finished. Defaults to True.\n\nMethod signatures and docstrings:\n- def setup(self, _=None): Setup the task. This routine does nothing at all with the input, but it means the process won't run until the (optional) requirement is received. This can be used to delay evaluation until you know that all the files are available.\n- def process(self): Compress the listed zarr containers. Only the lowest rank on each node will participate.", "prompted_full_text": "Implement the Python class `ZipZarrContainers` described below.\n\nClass description:\nZip up a Zarr container into a single file. This is useful to save on file quota and speed up IO by combining the chunk data into a single file. Note that the file cannot really be updated after this process has been performed. As this process is IO limited in most cases, it will attempt to parallelise the compression across different distinct nodes. That means at most only one rank per node will participate. Attributes ---------- containers : list The names of the Zarr containers to compress. The zipped files will have the same names with `.zip` appended. remove : bool Remove the original data when finished. Defaults to True.\n\nMethod signatures and docstrings:\n- def setup(self, _=None): Setup the task. This routine does nothing at all with the input, but it means the process won't run until the (optional) requirement is received. This can be used to delay evaluation until you know that all the files are available.\n- def process(self): Compress the listed zarr containers. Only the lowest rank on each node will participate.\n\n<|skeleton|>\nclass ZipZarrContainers:\n \"\"\"Zip up a Zarr container into a single file. This is useful to save on file quota and speed up IO by combining the chunk data into a single file. Note that the file cannot really be updated after this process has been performed. As this process is IO limited in most cases, it will attempt to parallelise the compression across different distinct nodes. That means at most only one rank per node will participate. Attributes ---------- containers : list The names of the Zarr containers to compress. The zipped files will have the same names with `.zip` appended. remove : bool Remove the original data when finished. Defaults to True.\"\"\"\n\n def setup(self, _=None):\n \"\"\"Setup the task. This routine does nothing at all with the input, but it means the process won't run until the (optional) requirement is received. This can be used to delay evaluation until you know that all the files are available.\"\"\"\n <|body_0|>\n\n def process(self):\n \"\"\"Compress the listed zarr containers. Only the lowest rank on each node will participate.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import socket\n path_7z = shutil.which('7z')\n if path_7z is None:\n raise RuntimeError('Could not find 7z on the PATH')\n self._path_7z = path_7z\n my_host = socket.gethostname()\n my_rank = self.comm.rank\n all_host_ranks = self.comm.allgather((my_host, my_rank))\n unique_hosts = {}\n for host, rank in all_host_ranks:\n if host not in unique_hosts:\n unique_hosts[host] = rank\n elif unique_hosts[host] > rank:\n unique_hosts[host] = rank\n self._num_hosts = len(unique_hosts)\n if unique_hosts[my_host] != my_rank:\n self._host_rank = None\n else:\n self._host_rank = sorted(unique_hosts).index(my_host)\n self.log.debug(f'Lowest rank on {my_host}')\n<|end_body_0|>\n\n<|body_start_1|>\n if self._host_rank is not None:\n my_containers = self.containers[self._host_rank::self._num_hosts]\n for container in my_containers:\n self.log.info(f'Zipping {container}')\n if not container.endswith('.zarr') or not os.path.isdir(container):\n raise ValueError(f'{container} is not a valid .zarr directory')\n dest_file = container + '.zip'\n src_dir = container + '/.'\n command = [self._path_7z, 'a', '-tzip', '-mx=0', dest_file, src_dir]\n status = subprocess.run(command, capture_output=True)\n if status.returncode != 0:\n self.log.debug('Error occurred while zipping. Debug logs follow...')\n self.log.debug(f'stdout={status.stdout}')\n self.log.debug(f'stderr={status.stderr}')\n raise RuntimeError(f'Error occurred while zipping {container}.')\n self.log.info(f'Done zipping. Generated {dest_file}.')\n if self.remove:\n shutil.rmtree(container)\n self.log.info(f'Removed original container {container}.')\n self.comm.Barrier()\n raise pipeline.PipelineStopIteration\n<|end_body_1|>\n", "revision_id": "544e485c03c125d260eb22ef467ae4d2e4dfed09", "skeleton": "<|skeleton|>\nclass ZipZarrContainers:\n \"\"\"Zip up a Zarr container into a single file. This is useful to save on file quota and speed up IO by combining the chunk data into a single file. Note that the file cannot really be updated after this process has been performed. As this process is IO limited in most cases, it will attempt to parallelise the compression across different distinct nodes. That means at most only one rank per node will participate. Attributes ---------- containers : list The names of the Zarr containers to compress. The zipped files will have the same names with `.zip` appended. remove : bool Remove the original data when finished. Defaults to True.\"\"\"\n\n def setup(self, _=None):\n \"\"\"Setup the task. This routine does nothing at all with the input, but it means the process won't run until the (optional) requirement is received. This can be used to delay evaluation until you know that all the files are available.\"\"\"\n <|body_0|>\n\n def process(self):\n \"\"\"Compress the listed zarr containers. Only the lowest rank on each node will participate.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ZipZarrContainers:\n \"\"\"Zip up a Zarr container into a single file. This is useful to save on file quota and speed up IO by combining the chunk data into a single file. Note that the file cannot really be updated after this process has been performed. As this process is IO limited in most cases, it will attempt to parallelise the compression across different distinct nodes. That means at most only one rank per node will participate. Attributes ---------- containers : list The names of the Zarr containers to compress. The zipped files will have the same names with `.zip` appended. remove : bool Remove the original data when finished. Defaults to True.\"\"\"\n\n def setup(self, _=None):\n \"\"\"Setup the task. This routine does nothing at all with the input, but it means the process won't run until the (optional) requirement is received. This can be used to delay evaluation until you know that all the files are available.\"\"\"\n import socket\n path_7z = shutil.which('7z')\n if path_7z is None:\n raise RuntimeError('Could not find 7z on the PATH')\n self._path_7z = path_7z\n my_host = socket.gethostname()\n my_rank = self.comm.rank\n all_host_ranks = self.comm.allgather((my_host, my_rank))\n unique_hosts = {}\n for host, rank in all_host_ranks:\n if host not in unique_hosts:\n unique_hosts[host] = rank\n elif unique_hosts[host] > rank:\n unique_hosts[host] = rank\n self._num_hosts = len(unique_hosts)\n if unique_hosts[my_host] != my_rank:\n self._host_rank = None\n else:\n self._host_rank = sorted(unique_hosts).index(my_host)\n self.log.debug(f'Lowest rank on {my_host}')\n\n def process(self):\n \"\"\"Compress the listed zarr containers. Only the lowest rank on each node will participate.\"\"\"\n if self._host_rank is not None:\n my_containers = self.containers[self._host_rank::self._num_hosts]\n for container in my_containers:\n self.log.info(f'Zipping {container}')\n if not container.endswith('.zarr') or not os.path.isdir(container):\n raise ValueError(f'{container} is not a valid .zarr directory')\n dest_file = container + '.zip'\n src_dir = container + '/.'\n command = [self._path_7z, 'a', '-tzip', '-mx=0', dest_file, src_dir]\n status = subprocess.run(command, capture_output=True)\n if status.returncode != 0:\n self.log.debug('Error occurred while zipping. Debug logs follow...')\n self.log.debug(f'stdout={status.stdout}')\n self.log.debug(f'stderr={status.stderr}')\n raise RuntimeError(f'Error occurred while zipping {container}.')\n self.log.info(f'Done zipping. Generated {dest_file}.')\n if self.remove:\n shutil.rmtree(container)\n self.log.info(f'Removed original container {container}.')\n self.comm.Barrier()\n raise pipeline.PipelineStopIteration\n", "source": "the_stack_v2_python_sparse", "source_path": "draco/core/io.py", "source_repo": "radiocosmology/draco", "split": "val", "star_events_count": 8}
{"blob_id": "0fe738215ae948139fc20c99688f79c768c44899", "bodies": ["self.X = X_init\nself.Y = Y_init\nself.l = l\nself.sigma_f = sigma_f\nself.K = self.kernel(X_init, X_init)", "first = np.sum(X1 ** 2, axis=1).reshape(-1, 1)\nsecond = np.sum(X2 ** 2, axis=1)\nthird = -2 * np.dot(X1, X2.T)\nsqdist = first + second + third\nkernel_1 = self.sigma_f ** 2\nkernel_2 = np.exp(-0.5 / self.l ** 2 * sqdist)\nkernel = kernel_1 * kernel_2\nreturn kernel"], "bodies_text": "<|body_start_0|>\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(X_init, X_init)\n<|end_body_0|>\n\n<|body_start_1|>\n first = np.sum(X1 ** 2, axis=1).reshape(-1, 1)\n second = np.sum(X2 ** 2, axis=1)\n third = -2 * np.dot(X1, X2.T)\n sqdist = first + second + third\n kernel_1 = self.sigma_f ** 2\n kernel_2 = np.exp(-0.5 / self.l ** 2 * sqdist)\n kernel = kernel_1 * kernel_2\n return kernel\n<|end_body_1|>\n", "class_docstring": "Gaussian Process", "class_name": "GaussianProcess", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GaussianProcess:\n \"\"\"Gaussian Process\"\"\"\n\n def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n \"\"\"* X_init is a numpy.ndarray of shape (t, 1) representing the inputs already sampled with the black-box function * Y_init is a numpy.ndarray of shape (t, 1) representing the outputs of the black-box function for each input in X_init - t is the number of initial samples * l is the length parameter for the kernel * sigma_f is the standard deviation given to the output of the black-box function * Sets the public instance attributes X, Y, l, and sigma_f corresponding to the respective constructor inputs * Sets the public instance attribute K, representing the current covariance kernel matrix for the Gaussian process Public instance method def kernel(self, X1, X2): that calculates the covariance k\"\"\"\n <|body_0|>\n\n def kernel(self, X1, X2):\n \"\"\"Isotropic squared exponential kernel. Computes a covariance matrix from points in X1 and X2. * X1 numpy.ndarray of shape (t, 1). * X2 numpy.ndarray of shape (t, 1). Returns Covariance matrix (m x n).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(X_init, X_init)\n<|end_body_0|>\n\n<|body_start_1|>\n first = np.sum(X1 ** 2, axis=1).reshape(-1, 1)\n second = np.sum(X2 ** 2, axis=1)\n third = -2 * np.dot(X1, X2.T)\n sqdist = first + second + third\n kernel_1 = self.sigma_f ** 2\n kernel_2 = np.exp(-0.5 / self.l ** 2 * sqdist)\n kernel = kernel_1 * kernel_2\n return kernel\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000390", "length_bytes": 2086, "license_type": "no_license", "methods": [{"docstring": "* X_init is a numpy.ndarray of shape (t, 1) representing the inputs already sampled with the black-box function * Y_init is a numpy.ndarray of shape (t, 1) representing the outputs of the black-box function for each input in X_init - t is the number of initial samples * l is the length parameter for the kernel * sigma_f is the standard deviation given to the output of the black-box function * Sets the public instance attributes X, Y, l, and sigma_f corresponding to the respective constructor inputs * Sets the public instance attribute K, representing the current covariance kernel matrix for the Gaussian process Public instance method def kernel(self, X1, X2): that calculates the covariance k", "name": "__init__", "signature": "def __init__(self, X_init, Y_init, l=1, sigma_f=1)"}, {"docstring": "Isotropic squared exponential kernel. Computes a covariance matrix from points in X1 and X2. * X1 numpy.ndarray of shape (t, 1). * X2 numpy.ndarray of shape (t, 1). Returns Covariance matrix (m x n).", "name": "kernel", "signature": "def kernel(self, X1, X2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_014993", "prompt": "Implement the Python class `GaussianProcess` described below.\n\nClass description:\nGaussian Process\n\nMethod signatures and docstrings:\n- def __init__(self, X_init, Y_init, l=1, sigma_f=1): * X_init is a numpy.ndarray of shape (t, 1) representing the inputs already sampled with the black-box function * Y_init is a numpy.ndarray of shape (t, 1) representing the outputs of the black-box function for each input in X_init - t is the number of initial samples * l is the length parameter for the kernel * sigma_f is the standard deviation given to the output of the black-box function * Sets the public instance attributes X, Y, l, and sigma_f corresponding to the respective constructor inputs * Sets the public instance attribute K, representing the current covariance kernel matrix for the Gaussian process Public instance method def kernel(self, X1, X2): that calculates the covariance k\n- def kernel(self, X1, X2): Isotropic squared exponential kernel. Computes a covariance matrix from points in X1 and X2. * X1 numpy.ndarray of shape (t, 1). * X2 numpy.ndarray of shape (t, 1). Returns Covariance matrix (m x n).", "prompted_full_text": "Implement the Python class `GaussianProcess` described below.\n\nClass description:\nGaussian Process\n\nMethod signatures and docstrings:\n- def __init__(self, X_init, Y_init, l=1, sigma_f=1): * X_init is a numpy.ndarray of shape (t, 1) representing the inputs already sampled with the black-box function * Y_init is a numpy.ndarray of shape (t, 1) representing the outputs of the black-box function for each input in X_init - t is the number of initial samples * l is the length parameter for the kernel * sigma_f is the standard deviation given to the output of the black-box function * Sets the public instance attributes X, Y, l, and sigma_f corresponding to the respective constructor inputs * Sets the public instance attribute K, representing the current covariance kernel matrix for the Gaussian process Public instance method def kernel(self, X1, X2): that calculates the covariance k\n- def kernel(self, X1, X2): Isotropic squared exponential kernel. Computes a covariance matrix from points in X1 and X2. * X1 numpy.ndarray of shape (t, 1). * X2 numpy.ndarray of shape (t, 1). Returns Covariance matrix (m x n).\n\n<|skeleton|>\nclass GaussianProcess:\n \"\"\"Gaussian Process\"\"\"\n\n def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n \"\"\"* X_init is a numpy.ndarray of shape (t, 1) representing the inputs already sampled with the black-box function * Y_init is a numpy.ndarray of shape (t, 1) representing the outputs of the black-box function for each input in X_init - t is the number of initial samples * l is the length parameter for the kernel * sigma_f is the standard deviation given to the output of the black-box function * Sets the public instance attributes X, Y, l, and sigma_f corresponding to the respective constructor inputs * Sets the public instance attribute K, representing the current covariance kernel matrix for the Gaussian process Public instance method def kernel(self, X1, X2): that calculates the covariance k\"\"\"\n <|body_0|>\n\n def kernel(self, X1, X2):\n \"\"\"Isotropic squared exponential kernel. Computes a covariance matrix from points in X1 and X2. * X1 numpy.ndarray of shape (t, 1). * X2 numpy.ndarray of shape (t, 1). Returns Covariance matrix (m x n).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(X_init, X_init)\n<|end_body_0|>\n\n<|body_start_1|>\n first = np.sum(X1 ** 2, axis=1).reshape(-1, 1)\n second = np.sum(X2 ** 2, axis=1)\n third = -2 * np.dot(X1, X2.T)\n sqdist = first + second + third\n kernel_1 = self.sigma_f ** 2\n kernel_2 = np.exp(-0.5 / self.l ** 2 * sqdist)\n kernel = kernel_1 * kernel_2\n return kernel\n<|end_body_1|>\n", "revision_id": "8ad4c2594ff78b345dbd92e9d54d2a143ac4071a", "skeleton": "<|skeleton|>\nclass GaussianProcess:\n \"\"\"Gaussian Process\"\"\"\n\n def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n \"\"\"* X_init is a numpy.ndarray of shape (t, 1) representing the inputs already sampled with the black-box function * Y_init is a numpy.ndarray of shape (t, 1) representing the outputs of the black-box function for each input in X_init - t is the number of initial samples * l is the length parameter for the kernel * sigma_f is the standard deviation given to the output of the black-box function * Sets the public instance attributes X, Y, l, and sigma_f corresponding to the respective constructor inputs * Sets the public instance attribute K, representing the current covariance kernel matrix for the Gaussian process Public instance method def kernel(self, X1, X2): that calculates the covariance k\"\"\"\n <|body_0|>\n\n def kernel(self, X1, X2):\n \"\"\"Isotropic squared exponential kernel. Computes a covariance matrix from points in X1 and X2. * X1 numpy.ndarray of shape (t, 1). * X2 numpy.ndarray of shape (t, 1). Returns Covariance matrix (m x n).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GaussianProcess:\n \"\"\"Gaussian Process\"\"\"\n\n def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n \"\"\"* X_init is a numpy.ndarray of shape (t, 1) representing the inputs already sampled with the black-box function * Y_init is a numpy.ndarray of shape (t, 1) representing the outputs of the black-box function for each input in X_init - t is the number of initial samples * l is the length parameter for the kernel * sigma_f is the standard deviation given to the output of the black-box function * Sets the public instance attributes X, Y, l, and sigma_f corresponding to the respective constructor inputs * Sets the public instance attribute K, representing the current covariance kernel matrix for the Gaussian process Public instance method def kernel(self, X1, X2): that calculates the covariance k\"\"\"\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(X_init, X_init)\n\n def kernel(self, X1, X2):\n \"\"\"Isotropic squared exponential kernel. Computes a covariance matrix from points in X1 and X2. * X1 numpy.ndarray of shape (t, 1). * X2 numpy.ndarray of shape (t, 1). Returns Covariance matrix (m x n).\"\"\"\n first = np.sum(X1 ** 2, axis=1).reshape(-1, 1)\n second = np.sum(X2 ** 2, axis=1)\n third = -2 * np.dot(X1, X2.T)\n sqdist = first + second + third\n kernel_1 = self.sigma_f ** 2\n kernel_2 = np.exp(-0.5 / self.l ** 2 * sqdist)\n kernel = kernel_1 * kernel_2\n return kernel\n", "source": "the_stack_v2_python_sparse", "source_path": "unsupervised_learning/0x03-hyperparameter_tuning/0-gp.py", "source_repo": "jorgezafra94/holbertonschool-machine_learning", "split": "val", "star_events_count": 1}
{"blob_id": "f8f4073eab5da078fc6cb79089d8964869315461", "bodies": ["annotations = load(self.ann_file)\nassert annotations, f'annotation file \"{self.ann_file}\" is empty.'\nif isinstance(annotations, list):\n raw_data_list = annotations\nelif isinstance(annotations, dict):\n if 'data_list' not in annotations or 'metainfo' not in annotations:\n raise ValueError('Annotation must have data_list and metainfo keys')\n metainfo = annotations['metainfo']\n raw_data_list = annotations['data_list']\n for k, v in metainfo.items():\n self._metainfo.setdefault(k, v)\nelse:\n raise TypeError(f'The annotations loaded from annotation file should be a list or dict, but got {type(annotations)}!')\ndata_list = []\nfor raw_data_info in raw_data_list:\n data_info = self.parse_data_info(raw_data_info)\n if isinstance(data_info, dict):\n data_list.append(data_info)\n elif isinstance(data_info, list):\n for item in data_info:\n if not isinstance(item, dict):\n raise TypeError(f'data_list must be list of dict, but got {type(item)}')\n data_list.extend(data_info)\n else:\n raise TypeError(f'data_info should be a dict or list of dict, but got {type(data_info)}')\nreturn data_list", "data_info = raw_data_info.copy()\nfor key in raw_data_info:\n data_info[key] = osp.join(self.data_root, data_info[key])\nreturn data_info"], "bodies_text": "<|body_start_0|>\n annotations = load(self.ann_file)\n assert annotations, f'annotation file \"{self.ann_file}\" is empty.'\n if isinstance(annotations, list):\n raw_data_list = annotations\n elif isinstance(annotations, dict):\n if 'data_list' not in annotations or 'metainfo' not in annotations:\n raise ValueError('Annotation must have data_list and metainfo keys')\n metainfo = annotations['metainfo']\n raw_data_list = annotations['data_list']\n for k, v in metainfo.items():\n self._metainfo.setdefault(k, v)\n else:\n raise TypeError(f'The annotations loaded from annotation file should be a list or dict, but got {type(annotations)}!')\n data_list = []\n for raw_data_info in raw_data_list:\n data_info = self.parse_data_info(raw_data_info)\n if isinstance(data_info, dict):\n data_list.append(data_info)\n elif isinstance(data_info, list):\n for item in data_info:\n if not isinstance(item, dict):\n raise TypeError(f'data_list must be list of dict, but got {type(item)}')\n data_list.extend(data_info)\n else:\n raise TypeError(f'data_info should be a dict or list of dict, but got {type(data_info)}')\n return data_list\n<|end_body_0|>\n\n<|body_start_1|>\n data_info = raw_data_info.copy()\n for key in raw_data_info:\n data_info[key] = osp.join(self.data_root, data_info[key])\n return data_info\n<|end_body_1|>\n", "class_docstring": "Adobe composition-1k dataset. The dataset loads (alpha, fg, bg) data and apply specified transforms to the data. You could specify whether composite merged image online or load composited merged image in pipeline. Example for online comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Example for offline comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"merged\": 'merged/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"merged\": 'merged/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Args: ann_file (str): Annotation file path. Defaults to ''. dat", "class_name": "AdobeComp1kDataset", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AdobeComp1kDataset:\n \"\"\"Adobe composition-1k dataset. The dataset loads (alpha, fg, bg) data and apply specified transforms to the data. You could specify whether composite merged image online or load composited merged image in pipeline. Example for online comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Example for offline comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"merged\": 'merged/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"merged\": 'merged/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Args: ann_file (str): Annotation file path. Defaults to ''. dat\"\"\"\n\n def load_data_list(self) -> List[dict]:\n \"\"\"Load annotations from an annotation file named as ``self.ann_file`` In order to be compatible to both new and old annotation format, we copy implementations from mmengine and do some modifications. Returns: list[dict]: A list of annotation.\"\"\"\n <|body_0|>\n\n def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:\n \"\"\"Join data_root to each path in data_info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n annotations = load(self.ann_file)\n assert annotations, f'annotation file \"{self.ann_file}\" is empty.'\n if isinstance(annotations, list):\n raw_data_list = annotations\n elif isinstance(annotations, dict):\n if 'data_list' not in annotations or 'metainfo' not in annotations:\n raise ValueError('Annotation must have data_list and metainfo keys')\n metainfo = annotations['metainfo']\n raw_data_list = annotations['data_list']\n for k, v in metainfo.items():\n self._metainfo.setdefault(k, v)\n else:\n raise TypeError(f'The annotations loaded from annotation file should be a list or dict, but got {type(annotations)}!')\n data_list = []\n for raw_data_info in raw_data_list:\n data_info = self.parse_data_info(raw_data_info)\n if isinstance(data_info, dict):\n data_list.append(data_info)\n elif isinstance(data_info, list):\n for item in data_info:\n if not isinstance(item, dict):\n raise TypeError(f'data_list must be list of dict, but got {type(item)}')\n data_list.extend(data_info)\n else:\n raise TypeError(f'data_info should be a dict or list of dict, but got {type(data_info)}')\n return data_list\n<|end_body_0|>\n\n<|body_start_1|>\n data_info = raw_data_info.copy()\n for key in raw_data_info:\n data_info[key] = osp.join(self.data_root, data_info[key])\n return data_info\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000391", "length_bytes": 5210, "license_type": "permissive", "methods": [{"docstring": "Load annotations from an annotation file named as ``self.ann_file`` In order to be compatible to both new and old annotation format, we copy implementations from mmengine and do some modifications. Returns: list[dict]: A list of annotation.", "name": "load_data_list", "signature": "def load_data_list(self) -> List[dict]"}, {"docstring": "Join data_root to each path in data_info.", "name": "parse_data_info", "signature": "def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]"}], "n_methods": 2, "prompt": "Implement the Python class `AdobeComp1kDataset` described below.\n\nClass description:\nAdobe composition-1k dataset. The dataset loads (alpha, fg, bg) data and apply specified transforms to the data. You could specify whether composite merged image online or load composited merged image in pipeline. Example for online comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Example for offline comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"merged\": 'merged/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"merged\": 'merged/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Args: ann_file (str): Annotation file path. Defaults to ''. dat\n\nMethod signatures and docstrings:\n- def load_data_list(self) -> List[dict]: Load annotations from an annotation file named as ``self.ann_file`` In order to be compatible to both new and old annotation format, we copy implementations from mmengine and do some modifications. Returns: list[dict]: A list of annotation.\n- def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]: Join data_root to each path in data_info.", "prompted_full_text": "Implement the Python class `AdobeComp1kDataset` described below.\n\nClass description:\nAdobe composition-1k dataset. The dataset loads (alpha, fg, bg) data and apply specified transforms to the data. You could specify whether composite merged image online or load composited merged image in pipeline. Example for online comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Example for offline comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"merged\": 'merged/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"merged\": 'merged/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Args: ann_file (str): Annotation file path. Defaults to ''. dat\n\nMethod signatures and docstrings:\n- def load_data_list(self) -> List[dict]: Load annotations from an annotation file named as ``self.ann_file`` In order to be compatible to both new and old annotation format, we copy implementations from mmengine and do some modifications. Returns: list[dict]: A list of annotation.\n- def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]: Join data_root to each path in data_info.\n\n<|skeleton|>\nclass AdobeComp1kDataset:\n \"\"\"Adobe composition-1k dataset. The dataset loads (alpha, fg, bg) data and apply specified transforms to the data. You could specify whether composite merged image online or load composited merged image in pipeline. Example for online comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Example for offline comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"merged\": 'merged/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"merged\": 'merged/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Args: ann_file (str): Annotation file path. Defaults to ''. dat\"\"\"\n\n def load_data_list(self) -> List[dict]:\n \"\"\"Load annotations from an annotation file named as ``self.ann_file`` In order to be compatible to both new and old annotation format, we copy implementations from mmengine and do some modifications. Returns: list[dict]: A list of annotation.\"\"\"\n <|body_0|>\n\n def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:\n \"\"\"Join data_root to each path in data_info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n annotations = load(self.ann_file)\n assert annotations, f'annotation file \"{self.ann_file}\" is empty.'\n if isinstance(annotations, list):\n raw_data_list = annotations\n elif isinstance(annotations, dict):\n if 'data_list' not in annotations or 'metainfo' not in annotations:\n raise ValueError('Annotation must have data_list and metainfo keys')\n metainfo = annotations['metainfo']\n raw_data_list = annotations['data_list']\n for k, v in metainfo.items():\n self._metainfo.setdefault(k, v)\n else:\n raise TypeError(f'The annotations loaded from annotation file should be a list or dict, but got {type(annotations)}!')\n data_list = []\n for raw_data_info in raw_data_list:\n data_info = self.parse_data_info(raw_data_info)\n if isinstance(data_info, dict):\n data_list.append(data_info)\n elif isinstance(data_info, list):\n for item in data_info:\n if not isinstance(item, dict):\n raise TypeError(f'data_list must be list of dict, but got {type(item)}')\n data_list.extend(data_info)\n else:\n raise TypeError(f'data_info should be a dict or list of dict, but got {type(data_info)}')\n return data_list\n<|end_body_0|>\n\n<|body_start_1|>\n data_info = raw_data_info.copy()\n for key in raw_data_info:\n data_info[key] = osp.join(self.data_root, data_info[key])\n return data_info\n<|end_body_1|>\n", "revision_id": "a382f143c0fd20d227e1e5524831ba26a568190d", "skeleton": "<|skeleton|>\nclass AdobeComp1kDataset:\n \"\"\"Adobe composition-1k dataset. The dataset loads (alpha, fg, bg) data and apply specified transforms to the data. You could specify whether composite merged image online or load composited merged image in pipeline. Example for online comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Example for offline comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"merged\": 'merged/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"merged\": 'merged/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Args: ann_file (str): Annotation file path. Defaults to ''. dat\"\"\"\n\n def load_data_list(self) -> List[dict]:\n \"\"\"Load annotations from an annotation file named as ``self.ann_file`` In order to be compatible to both new and old annotation format, we copy implementations from mmengine and do some modifications. Returns: list[dict]: A list of annotation.\"\"\"\n <|body_0|>\n\n def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:\n \"\"\"Join data_root to each path in data_info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AdobeComp1kDataset:\n \"\"\"Adobe composition-1k dataset. The dataset loads (alpha, fg, bg) data and apply specified transforms to the data. You could specify whether composite merged image online or load composited merged image in pipeline. Example for online comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Example for offline comp-1k dataset: :: [ { \"alpha\": 'alpha/000.png', \"merged\": 'merged/000.png', \"fg\": 'fg/000.png', \"bg\": 'bg/000.png' }, { \"alpha\": 'alpha/001.png', \"merged\": 'merged/001.png', \"fg\": 'fg/001.png', \"bg\": 'bg/001.png' }, ] Args: ann_file (str): Annotation file path. Defaults to ''. dat\"\"\"\n\n def load_data_list(self) -> List[dict]:\n \"\"\"Load annotations from an annotation file named as ``self.ann_file`` In order to be compatible to both new and old annotation format, we copy implementations from mmengine and do some modifications. Returns: list[dict]: A list of annotation.\"\"\"\n annotations = load(self.ann_file)\n assert annotations, f'annotation file \"{self.ann_file}\" is empty.'\n if isinstance(annotations, list):\n raw_data_list = annotations\n elif isinstance(annotations, dict):\n if 'data_list' not in annotations or 'metainfo' not in annotations:\n raise ValueError('Annotation must have data_list and metainfo keys')\n metainfo = annotations['metainfo']\n raw_data_list = annotations['data_list']\n for k, v in metainfo.items():\n self._metainfo.setdefault(k, v)\n else:\n raise TypeError(f'The annotations loaded from annotation file should be a list or dict, but got {type(annotations)}!')\n data_list = []\n for raw_data_info in raw_data_list:\n data_info = self.parse_data_info(raw_data_info)\n if isinstance(data_info, dict):\n data_list.append(data_info)\n elif isinstance(data_info, list):\n for item in data_info:\n if not isinstance(item, dict):\n raise TypeError(f'data_list must be list of dict, but got {type(item)}')\n data_list.extend(data_info)\n else:\n raise TypeError(f'data_info should be a dict or list of dict, but got {type(data_info)}')\n return data_list\n\n def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:\n \"\"\"Join data_root to each path in data_info.\"\"\"\n data_info = raw_data_info.copy()\n for key in raw_data_info:\n data_info[key] = osp.join(self.data_root, data_info[key])\n return data_info\n", "source": "the_stack_v2_python_sparse", "source_path": "mmagic/datasets/comp1k_dataset.py", "source_repo": "open-mmlab/mmagic", "split": "val", "star_events_count": 1370}
{"blob_id": "a193284ae99705ea719e6dee5f6c92dde2c39085", "bodies": ["rlt = []\nself._less_equal(0, k, rlt)\nreturn rlt", "item = self._data[idx]\nif item._key <= k:\n rl.append((item._key, item._value))\nif self._has_left(idx):\n self._less_equal(self._left(idx), k, rl)\nif self._has_right(idx):\n self._less_equal(self._right(idx), k, rl)"], "bodies_text": "<|body_start_0|>\n rlt = []\n self._less_equal(0, k, rlt)\n return rlt\n<|end_body_0|>\n\n<|body_start_1|>\n item = self._data[idx]\n if item._key <= k:\n rl.append((item._key, item._value))\n if self._has_left(idx):\n self._less_equal(self._left(idx), k, rl)\n if self._has_right(idx):\n self._less_equal(self._right(idx), k, rl)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "PriorityQueue", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PriorityQueue:\n\n def less_equal(self, k):\n \"\"\"Return the elements that having a key less than or equal to k.\"\"\"\n <|body_0|>\n\n def _less_equal(self, idx, k, rl):\n \"\"\"Append the elements that having a key less than or equal to `k` to `rl`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rlt = []\n self._less_equal(0, k, rlt)\n return rlt\n<|end_body_0|>\n\n<|body_start_1|>\n item = self._data[idx]\n if item._key <= k:\n rl.append((item._key, item._value))\n if self._has_left(idx):\n self._less_equal(self._left(idx), k, rl)\n if self._has_right(idx):\n self._less_equal(self._right(idx), k, rl)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000392", "length_bytes": 1037, "license_type": "no_license", "methods": [{"docstring": "Return the elements that having a key less than or equal to k.", "name": "less_equal", "signature": "def less_equal(self, k)"}, {"docstring": "Append the elements that having a key less than or equal to `k` to `rl`.", "name": "_less_equal", "signature": "def _less_equal(self, idx, k, rl)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_016544", "prompt": "Implement the Python class `PriorityQueue` described below.\n\nClass description:\nImplement the PriorityQueue class.\n\nMethod signatures and docstrings:\n- def less_equal(self, k): Return the elements that having a key less than or equal to k.\n- def _less_equal(self, idx, k, rl): Append the elements that having a key less than or equal to `k` to `rl`.", "prompted_full_text": "Implement the Python class `PriorityQueue` described below.\n\nClass description:\nImplement the PriorityQueue class.\n\nMethod signatures and docstrings:\n- def less_equal(self, k): Return the elements that having a key less than or equal to k.\n- def _less_equal(self, idx, k, rl): Append the elements that having a key less than or equal to `k` to `rl`.\n\n<|skeleton|>\nclass PriorityQueue:\n\n def less_equal(self, k):\n \"\"\"Return the elements that having a key less than or equal to k.\"\"\"\n <|body_0|>\n\n def _less_equal(self, idx, k, rl):\n \"\"\"Append the elements that having a key less than or equal to `k` to `rl`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rlt = []\n self._less_equal(0, k, rlt)\n return rlt\n<|end_body_0|>\n\n<|body_start_1|>\n item = self._data[idx]\n if item._key <= k:\n rl.append((item._key, item._value))\n if self._has_left(idx):\n self._less_equal(self._left(idx), k, rl)\n if self._has_right(idx):\n self._less_equal(self._right(idx), k, rl)\n<|end_body_1|>\n", "revision_id": "70b23ead7a89e46a84d9d914e7c8fa678edd1f90", "skeleton": "<|skeleton|>\nclass PriorityQueue:\n\n def less_equal(self, k):\n \"\"\"Return the elements that having a key less than or equal to k.\"\"\"\n <|body_0|>\n\n def _less_equal(self, idx, k, rl):\n \"\"\"Append the elements that having a key less than or equal to `k` to `rl`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PriorityQueue:\n def less_equal(self, k):\n \"\"\"Return the elements that having a key less than or equal to k.\"\"\"\n rlt = []\n self._less_equal(0, k, rlt)\n return rlt\n\n def _less_equal(self, idx, k, rl):\n \"\"\"Append the elements that having a key less than or equal to `k` to `rl`.\"\"\"\n item = self._data[idx]\n if item._key <= k:\n rl.append((item._key, item._value))\n if self._has_left(idx):\n self._less_equal(self._left(idx), k, rl)\n if self._has_right(idx):\n self._less_equal(self._right(idx), k, rl)\n", "source": "the_stack_v2_python_sparse", "source_path": "priority_queue_ch09/creativity/less_equal_to_k_c9_35.py", "source_repo": "wanyikang/dsap", "split": "val", "star_events_count": 1}
{"blob_id": "4adce9c92d97b54309dca1911899980e9df91dbe", "bodies": ["function = LegacyFunctionSpecification()\nfunction.addParameter('input', dtype='int32', direction=function.IN, description='Typical input parameter, the argument is passed by value to the function.')\nfunction.addParameter('output', dtype='float64', direction=function.OUT, description='Typical output parameter, the argument is passed by reference.\\nThe argument should point to a valid memory location.')\nfunction.addParameter('inout', dtype='float64', direction=function.INOUT, description='Some arguments can be both input as well as output. The function will update the value of the passed argument.')\nfunction.result_type = 'int32'\nfunction.result_doc = 'Function will return an error code.'\nreturn function", "function = LegacyFunctionSpecification()\nfunction.addParameter('value', dtype='float64', direction=function.OUT, description='The current value of the parameter.')\nfunction.result_type = 'int32'\nfunction.result_doc = '\\n 0 - OK\\n Current value was retrieved\\n -1 - ERROR\\n The code does not have support for this parameter, use this when\\n a code does not support a parameter pre-defined in a physical\\n domain\\n '\nreturn function", "function = LegacyFunctionSpecification()\nfunction.addParameter('value', dtype='float64', direction=function.IN, description='The new value of the parameter.')\nfunction.result_type = 'int32'\nfunction.result_doc = '\\n 0 - OK\\n New value of the parameter was set\\n -1 - ERROR\\n The code does not have support for this parameter\\n '\nreturn function", "function = LegacyFunctionSpecification()\nfunction.result_type = 'int32'\nfunction.result_doc = '\\n 0 - OK\\n Code is initialized\\n -1 - ERROR\\n Error happened during initialization, this error needs to be\\n further specified by every code implemention\\n '\nreturn function"], "bodies_text": "<|body_start_0|>\n function = LegacyFunctionSpecification()\n function.addParameter('input', dtype='int32', direction=function.IN, description='Typical input parameter, the argument is passed by value to the function.')\n function.addParameter('output', dtype='float64', direction=function.OUT, description='Typical output parameter, the argument is passed by reference.\\nThe argument should point to a valid memory location.')\n function.addParameter('inout', dtype='float64', direction=function.INOUT, description='Some arguments can be both input as well as output. The function will update the value of the passed argument.')\n function.result_type = 'int32'\n function.result_doc = 'Function will return an error code.'\n return function\n<|end_body_0|>\n\n<|body_start_1|>\n function = LegacyFunctionSpecification()\n function.addParameter('value', dtype='float64', direction=function.OUT, description='The current value of the parameter.')\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n Current value was retrieved\\n -1 - ERROR\\n The code does not have support for this parameter, use this when\\n a code does not support a parameter pre-defined in a physical\\n domain\\n '\n return function\n<|end_body_1|>\n\n<|body_start_2|>\n function = LegacyFunctionSpecification()\n function.addParameter('value', dtype='float64', direction=function.IN, description='The new value of the parameter.')\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n New value of the parameter was set\\n -1 - ERROR\\n The code does not have support for this parameter\\n '\n return function\n<|end_body_2|>\n\n<|body_start_3|>\n function = LegacyFunctionSpecification()\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n Code is initialized\\n -1 - ERROR\\n Error happened during initialization, this error needs to be\\n further specified by every code implemention\\n '\n return function\n<|end_body_3|>\n", "class_docstring": "", "class_name": "ExampleInterface", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExampleInterface:\n\n def example_function():\n \"\"\"Example template for the other functions defined in this specification. All functions should follow this example..\"\"\"\n <|body_0|>\n\n def get_example_parameter():\n \"\"\"Retrieve the current value of the parameter. Note, values can be any of the supported types.\"\"\"\n <|body_1|>\n\n def set_example_parameter():\n \"\"\"Update the value of the parameter. The type of the new value argument must be the same as the :meth:`get_example_parameter` function.\"\"\"\n <|body_2|>\n\n def initialize_code():\n \"\"\"Let the code perform initialization actions after all parameters have been set. Should be called once per running code instance.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n function = LegacyFunctionSpecification()\n function.addParameter('input', dtype='int32', direction=function.IN, description='Typical input parameter, the argument is passed by value to the function.')\n function.addParameter('output', dtype='float64', direction=function.OUT, description='Typical output parameter, the argument is passed by reference.\\nThe argument should point to a valid memory location.')\n function.addParameter('inout', dtype='float64', direction=function.INOUT, description='Some arguments can be both input as well as output. The function will update the value of the passed argument.')\n function.result_type = 'int32'\n function.result_doc = 'Function will return an error code.'\n return function\n<|end_body_0|>\n\n<|body_start_1|>\n function = LegacyFunctionSpecification()\n function.addParameter('value', dtype='float64', direction=function.OUT, description='The current value of the parameter.')\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n Current value was retrieved\\n -1 - ERROR\\n The code does not have support for this parameter, use this when\\n a code does not support a parameter pre-defined in a physical\\n domain\\n '\n return function\n<|end_body_1|>\n\n<|body_start_2|>\n function = LegacyFunctionSpecification()\n function.addParameter('value', dtype='float64', direction=function.IN, description='The new value of the parameter.')\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n New value of the parameter was set\\n -1 - ERROR\\n The code does not have support for this parameter\\n '\n return function\n<|end_body_2|>\n\n<|body_start_3|>\n function = LegacyFunctionSpecification()\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n Code is initialized\\n -1 - ERROR\\n Error happened during initialization, this error needs to be\\n further specified by every code implemention\\n '\n return function\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000393", "length_bytes": 3439, "license_type": "permissive", "methods": [{"docstring": "Example template for the other functions defined in this specification. All functions should follow this example..", "name": "example_function", "signature": "def example_function()"}, {"docstring": "Retrieve the current value of the parameter. Note, values can be any of the supported types.", "name": "get_example_parameter", "signature": "def get_example_parameter()"}, {"docstring": "Update the value of the parameter. The type of the new value argument must be the same as the :meth:`get_example_parameter` function.", "name": "set_example_parameter", "signature": "def set_example_parameter()"}, {"docstring": "Let the code perform initialization actions after all parameters have been set. Should be called once per running code instance.", "name": "initialize_code", "signature": "def initialize_code()"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_031266", "prompt": "Implement the Python class `ExampleInterface` described below.\n\nClass description:\nImplement the ExampleInterface class.\n\nMethod signatures and docstrings:\n- def example_function(): Example template for the other functions defined in this specification. All functions should follow this example..\n- def get_example_parameter(): Retrieve the current value of the parameter. Note, values can be any of the supported types.\n- def set_example_parameter(): Update the value of the parameter. The type of the new value argument must be the same as the :meth:`get_example_parameter` function.\n- def initialize_code(): Let the code perform initialization actions after all parameters have been set. Should be called once per running code instance.", "prompted_full_text": "Implement the Python class `ExampleInterface` described below.\n\nClass description:\nImplement the ExampleInterface class.\n\nMethod signatures and docstrings:\n- def example_function(): Example template for the other functions defined in this specification. All functions should follow this example..\n- def get_example_parameter(): Retrieve the current value of the parameter. Note, values can be any of the supported types.\n- def set_example_parameter(): Update the value of the parameter. The type of the new value argument must be the same as the :meth:`get_example_parameter` function.\n- def initialize_code(): Let the code perform initialization actions after all parameters have been set. Should be called once per running code instance.\n\n<|skeleton|>\nclass ExampleInterface:\n\n def example_function():\n \"\"\"Example template for the other functions defined in this specification. All functions should follow this example..\"\"\"\n <|body_0|>\n\n def get_example_parameter():\n \"\"\"Retrieve the current value of the parameter. Note, values can be any of the supported types.\"\"\"\n <|body_1|>\n\n def set_example_parameter():\n \"\"\"Update the value of the parameter. The type of the new value argument must be the same as the :meth:`get_example_parameter` function.\"\"\"\n <|body_2|>\n\n def initialize_code():\n \"\"\"Let the code perform initialization actions after all parameters have been set. Should be called once per running code instance.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n function = LegacyFunctionSpecification()\n function.addParameter('input', dtype='int32', direction=function.IN, description='Typical input parameter, the argument is passed by value to the function.')\n function.addParameter('output', dtype='float64', direction=function.OUT, description='Typical output parameter, the argument is passed by reference.\\nThe argument should point to a valid memory location.')\n function.addParameter('inout', dtype='float64', direction=function.INOUT, description='Some arguments can be both input as well as output. The function will update the value of the passed argument.')\n function.result_type = 'int32'\n function.result_doc = 'Function will return an error code.'\n return function\n<|end_body_0|>\n\n<|body_start_1|>\n function = LegacyFunctionSpecification()\n function.addParameter('value', dtype='float64', direction=function.OUT, description='The current value of the parameter.')\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n Current value was retrieved\\n -1 - ERROR\\n The code does not have support for this parameter, use this when\\n a code does not support a parameter pre-defined in a physical\\n domain\\n '\n return function\n<|end_body_1|>\n\n<|body_start_2|>\n function = LegacyFunctionSpecification()\n function.addParameter('value', dtype='float64', direction=function.IN, description='The new value of the parameter.')\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n New value of the parameter was set\\n -1 - ERROR\\n The code does not have support for this parameter\\n '\n return function\n<|end_body_2|>\n\n<|body_start_3|>\n function = LegacyFunctionSpecification()\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n Code is initialized\\n -1 - ERROR\\n Error happened during initialization, this error needs to be\\n further specified by every code implemention\\n '\n return function\n<|end_body_3|>\n", "revision_id": "b57c1e2fda1457d5025307be105c2aa59b19b574", "skeleton": "<|skeleton|>\nclass ExampleInterface:\n\n def example_function():\n \"\"\"Example template for the other functions defined in this specification. All functions should follow this example..\"\"\"\n <|body_0|>\n\n def get_example_parameter():\n \"\"\"Retrieve the current value of the parameter. Note, values can be any of the supported types.\"\"\"\n <|body_1|>\n\n def set_example_parameter():\n \"\"\"Update the value of the parameter. The type of the new value argument must be the same as the :meth:`get_example_parameter` function.\"\"\"\n <|body_2|>\n\n def initialize_code():\n \"\"\"Let the code perform initialization actions after all parameters have been set. Should be called once per running code instance.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ExampleInterface:\n def example_function():\n \"\"\"Example template for the other functions defined in this specification. All functions should follow this example..\"\"\"\n function = LegacyFunctionSpecification()\n function.addParameter('input', dtype='int32', direction=function.IN, description='Typical input parameter, the argument is passed by value to the function.')\n function.addParameter('output', dtype='float64', direction=function.OUT, description='Typical output parameter, the argument is passed by reference.\\nThe argument should point to a valid memory location.')\n function.addParameter('inout', dtype='float64', direction=function.INOUT, description='Some arguments can be both input as well as output. The function will update the value of the passed argument.')\n function.result_type = 'int32'\n function.result_doc = 'Function will return an error code.'\n return function\n\n def get_example_parameter():\n \"\"\"Retrieve the current value of the parameter. Note, values can be any of the supported types.\"\"\"\n function = LegacyFunctionSpecification()\n function.addParameter('value', dtype='float64', direction=function.OUT, description='The current value of the parameter.')\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n Current value was retrieved\\n -1 - ERROR\\n The code does not have support for this parameter, use this when\\n a code does not support a parameter pre-defined in a physical\\n domain\\n '\n return function\n\n def set_example_parameter():\n \"\"\"Update the value of the parameter. The type of the new value argument must be the same as the :meth:`get_example_parameter` function.\"\"\"\n function = LegacyFunctionSpecification()\n function.addParameter('value', dtype='float64', direction=function.IN, description='The new value of the parameter.')\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n New value of the parameter was set\\n -1 - ERROR\\n The code does not have support for this parameter\\n '\n return function\n\n def initialize_code():\n \"\"\"Let the code perform initialization actions after all parameters have been set. Should be called once per running code instance.\"\"\"\n function = LegacyFunctionSpecification()\n function.result_type = 'int32'\n function.result_doc = '\\n 0 - OK\\n Code is initialized\\n -1 - ERROR\\n Error happened during initialization, this error needs to be\\n further specified by every code implemention\\n '\n return function\n", "source": "the_stack_v2_python_sparse", "source_path": "src/amuse/community/interface/example.py", "source_repo": "amusecode/amuse", "split": "val", "star_events_count": 158}
{"blob_id": "f163413944d747da568e3daa022813b03070f873", "bodies": ["if not kwargs.get('obj_ids'):\n obj_model = facade.get_vlan_by_search(self.search)\n vlans = obj_model['query_set']\n only_main_property = False\nelse:\n obj_ids = kwargs['obj_ids'].split(';')\n vlans = facade.get_vlan_by_ids(obj_ids)\n obj_model = None\n only_main_property = True\nserializer_vips = serializers.VlanV3Serializer(vlans, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\nresponse = render_to_json(serializer_vips, main_property='vlans', obj_model=obj_model, request=request, only_main_property=only_main_property)\nreturn Response(response, status=status.HTTP_200_OK)", "data = request.DATA\njson_validate(SPECS.get('vlan_post')).validate(data)\nresponse = list()\nfor vlan in data['vlans']:\n vl = facade.create_vlan(vlan, request.user)\n response.append({'id': vl.id})\nreturn Response(response, status=status.HTTP_201_CREATED)", "data = request.DATA\njson_validate(SPECS.get('vlan_put')).validate(data)\nresponse = list()\nfor vlan in data['vlans']:\n vl = facade.update_vlan(vlan, request.user)\n response.append({'id': vl.id})\nreturn Response(response, status=status.HTTP_200_OK)", "obj_ids = kwargs['obj_ids'].split(';')\nfor obj_id in obj_ids:\n facade.delete_vlan(obj_id)\nreturn Response({}, status=status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n if not kwargs.get('obj_ids'):\n obj_model = facade.get_vlan_by_search(self.search)\n vlans = obj_model['query_set']\n only_main_property = False\n else:\n obj_ids = kwargs['obj_ids'].split(';')\n vlans = facade.get_vlan_by_ids(obj_ids)\n obj_model = None\n only_main_property = True\n serializer_vips = serializers.VlanV3Serializer(vlans, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\n response = render_to_json(serializer_vips, main_property='vlans', obj_model=obj_model, request=request, only_main_property=only_main_property)\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.DATA\n json_validate(SPECS.get('vlan_post')).validate(data)\n response = list()\n for vlan in data['vlans']:\n vl = facade.create_vlan(vlan, request.user)\n response.append({'id': vl.id})\n return Response(response, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.DATA\n json_validate(SPECS.get('vlan_put')).validate(data)\n response = list()\n for vlan in data['vlans']:\n vl = facade.update_vlan(vlan, request.user)\n response.append({'id': vl.id})\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_ids = kwargs['obj_ids'].split(';')\n for obj_id in obj_ids:\n facade.delete_vlan(obj_id)\n return Response({}, status=status.HTTP_200_OK)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "VlanDBView", "detected_licenses": ["Apache-2.0", "BSD-3-Clause", "MIT", "LicenseRef-scancode-public-domain", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VlanDBView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Returns a list of vlans with details by ids ou dict.\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Creates list of vlans.\"\"\"\n <|body_1|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Updates list of vlans.\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes list of vlans.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not kwargs.get('obj_ids'):\n obj_model = facade.get_vlan_by_search(self.search)\n vlans = obj_model['query_set']\n only_main_property = False\n else:\n obj_ids = kwargs['obj_ids'].split(';')\n vlans = facade.get_vlan_by_ids(obj_ids)\n obj_model = None\n only_main_property = True\n serializer_vips = serializers.VlanV3Serializer(vlans, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\n response = render_to_json(serializer_vips, main_property='vlans', obj_model=obj_model, request=request, only_main_property=only_main_property)\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.DATA\n json_validate(SPECS.get('vlan_post')).validate(data)\n response = list()\n for vlan in data['vlans']:\n vl = facade.create_vlan(vlan, request.user)\n response.append({'id': vl.id})\n return Response(response, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.DATA\n json_validate(SPECS.get('vlan_put')).validate(data)\n response = list()\n for vlan in data['vlans']:\n vl = facade.update_vlan(vlan, request.user)\n response.append({'id': vl.id})\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_ids = kwargs['obj_ids'].split(';')\n for obj_id in obj_ids:\n facade.delete_vlan(obj_id)\n return Response({}, status=status.HTTP_200_OK)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000394", "length_bytes": 6313, "license_type": "permissive", "methods": [{"docstring": "Returns a list of vlans with details by ids ou dict.", "name": "get", "signature": "def get(self, request, *args, **kwargs)"}, {"docstring": "Creates list of vlans.", "name": "post", "signature": "def post(self, request, *args, **kwargs)"}, {"docstring": "Updates list of vlans.", "name": "put", "signature": "def put(self, request, *args, **kwargs)"}, {"docstring": "Deletes list of vlans.", "name": "delete", "signature": "def delete(self, request, *args, **kwargs)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_019973", "prompt": "Implement the Python class `VlanDBView` described below.\n\nClass description:\nImplement the VlanDBView class.\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): Returns a list of vlans with details by ids ou dict.\n- def post(self, request, *args, **kwargs): Creates list of vlans.\n- def put(self, request, *args, **kwargs): Updates list of vlans.\n- def delete(self, request, *args, **kwargs): Deletes list of vlans.", "prompted_full_text": "Implement the Python class `VlanDBView` described below.\n\nClass description:\nImplement the VlanDBView class.\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): Returns a list of vlans with details by ids ou dict.\n- def post(self, request, *args, **kwargs): Creates list of vlans.\n- def put(self, request, *args, **kwargs): Updates list of vlans.\n- def delete(self, request, *args, **kwargs): Deletes list of vlans.\n\n<|skeleton|>\nclass VlanDBView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Returns a list of vlans with details by ids ou dict.\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Creates list of vlans.\"\"\"\n <|body_1|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Updates list of vlans.\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes list of vlans.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not kwargs.get('obj_ids'):\n obj_model = facade.get_vlan_by_search(self.search)\n vlans = obj_model['query_set']\n only_main_property = False\n else:\n obj_ids = kwargs['obj_ids'].split(';')\n vlans = facade.get_vlan_by_ids(obj_ids)\n obj_model = None\n only_main_property = True\n serializer_vips = serializers.VlanV3Serializer(vlans, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\n response = render_to_json(serializer_vips, main_property='vlans', obj_model=obj_model, request=request, only_main_property=only_main_property)\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.DATA\n json_validate(SPECS.get('vlan_post')).validate(data)\n response = list()\n for vlan in data['vlans']:\n vl = facade.create_vlan(vlan, request.user)\n response.append({'id': vl.id})\n return Response(response, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.DATA\n json_validate(SPECS.get('vlan_put')).validate(data)\n response = list()\n for vlan in data['vlans']:\n vl = facade.update_vlan(vlan, request.user)\n response.append({'id': vl.id})\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_ids = kwargs['obj_ids'].split(';')\n for obj_id in obj_ids:\n facade.delete_vlan(obj_id)\n return Response({}, status=status.HTTP_200_OK)\n<|end_body_3|>\n", "revision_id": "eb27e1d977a1c4bb1fee8fb51b8d8050c64696d9", "skeleton": "<|skeleton|>\nclass VlanDBView:\n\n def get(self, request, *args, **kwargs):\n \"\"\"Returns a list of vlans with details by ids ou dict.\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Creates list of vlans.\"\"\"\n <|body_1|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Updates list of vlans.\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes list of vlans.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VlanDBView:\n def get(self, request, *args, **kwargs):\n \"\"\"Returns a list of vlans with details by ids ou dict.\"\"\"\n if not kwargs.get('obj_ids'):\n obj_model = facade.get_vlan_by_search(self.search)\n vlans = obj_model['query_set']\n only_main_property = False\n else:\n obj_ids = kwargs['obj_ids'].split(';')\n vlans = facade.get_vlan_by_ids(obj_ids)\n obj_model = None\n only_main_property = True\n serializer_vips = serializers.VlanV3Serializer(vlans, many=True, fields=self.fields, include=self.include, exclude=self.exclude, kind=self.kind)\n response = render_to_json(serializer_vips, main_property='vlans', obj_model=obj_model, request=request, only_main_property=only_main_property)\n return Response(response, status=status.HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n \"\"\"Creates list of vlans.\"\"\"\n data = request.DATA\n json_validate(SPECS.get('vlan_post')).validate(data)\n response = list()\n for vlan in data['vlans']:\n vl = facade.create_vlan(vlan, request.user)\n response.append({'id': vl.id})\n return Response(response, status=status.HTTP_201_CREATED)\n\n def put(self, request, *args, **kwargs):\n \"\"\"Updates list of vlans.\"\"\"\n data = request.DATA\n json_validate(SPECS.get('vlan_put')).validate(data)\n response = list()\n for vlan in data['vlans']:\n vl = facade.update_vlan(vlan, request.user)\n response.append({'id': vl.id})\n return Response(response, status=status.HTTP_200_OK)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes list of vlans.\"\"\"\n obj_ids = kwargs['obj_ids'].split(';')\n for obj_id in obj_ids:\n facade.delete_vlan(obj_id)\n return Response({}, status=status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "networkapi/api_vlan/views/v3.py", "source_repo": "globocom/GloboNetworkAPI", "split": "val", "star_events_count": 86}
{"blob_id": "284c2b0d2658265df786f9555853414f068ad81c", "bodies": ["super().__init__(**kwargs)\nself.conv_layers = []\nfor i in range(config.num_duration_conv_layers):\n self.conv_layers.append(tf.keras.layers.Conv1D(config.duration_predictor_filters, config.duration_predictor_kernel_sizes, padding='same', name='conv_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))\n self.conv_layers.append(tf.keras.layers.Dropout(config.duration_predictor_dropout_probs))\nself.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)\nself.output_layer = tf.keras.layers.Dense(1)", "encoder_hidden_states, attention_mask = inputs\nattention_mask = tf.cast(tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype)\nmasked_encoder_hidden_states = encoder_hidden_states * attention_mask\noutputs = self.conv_layers_sequence(masked_encoder_hidden_states)\noutputs = self.output_layer(outputs)\nmasked_outputs = outputs * attention_mask\nreturn tf.squeeze(tf.nn.relu6(masked_outputs), -1)"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n self.conv_layers = []\n for i in range(config.num_duration_conv_layers):\n self.conv_layers.append(tf.keras.layers.Conv1D(config.duration_predictor_filters, config.duration_predictor_kernel_sizes, padding='same', name='conv_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))\n self.conv_layers.append(tf.keras.layers.Dropout(config.duration_predictor_dropout_probs))\n self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)\n self.output_layer = tf.keras.layers.Dense(1)\n<|end_body_0|>\n\n<|body_start_1|>\n encoder_hidden_states, attention_mask = inputs\n attention_mask = tf.cast(tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype)\n masked_encoder_hidden_states = encoder_hidden_states * attention_mask\n outputs = self.conv_layers_sequence(masked_encoder_hidden_states)\n outputs = self.output_layer(outputs)\n masked_outputs = outputs * attention_mask\n return tf.squeeze(tf.nn.relu6(masked_outputs), -1)\n<|end_body_1|>\n", "class_docstring": "FastSpeech duration predictor module.", "class_name": "TFFastSpeechDurationPredictor", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TFFastSpeechDurationPredictor:\n \"\"\"FastSpeech duration predictor module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n <|body_0|>\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.conv_layers = []\n for i in range(config.num_duration_conv_layers):\n self.conv_layers.append(tf.keras.layers.Conv1D(config.duration_predictor_filters, config.duration_predictor_kernel_sizes, padding='same', name='conv_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))\n self.conv_layers.append(tf.keras.layers.Dropout(config.duration_predictor_dropout_probs))\n self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)\n self.output_layer = tf.keras.layers.Dense(1)\n<|end_body_0|>\n\n<|body_start_1|>\n encoder_hidden_states, attention_mask = inputs\n attention_mask = tf.cast(tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype)\n masked_encoder_hidden_states = encoder_hidden_states * attention_mask\n outputs = self.conv_layers_sequence(masked_encoder_hidden_states)\n outputs = self.output_layer(outputs)\n masked_outputs = outputs * attention_mask\n return tf.squeeze(tf.nn.relu6(masked_outputs), -1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000395", "length_bytes": 33971, "license_type": "permissive", "methods": [{"docstring": "Init variables.", "name": "__init__", "signature": "def __init__(self, config, **kwargs)"}, {"docstring": "Call logic.", "name": "call", "signature": "def call(self, inputs, training=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002671", "prompt": "Implement the Python class `TFFastSpeechDurationPredictor` described below.\n\nClass description:\nFastSpeech duration predictor module.\n\nMethod signatures and docstrings:\n- def __init__(self, config, **kwargs): Init variables.\n- def call(self, inputs, training=False): Call logic.", "prompted_full_text": "Implement the Python class `TFFastSpeechDurationPredictor` described below.\n\nClass description:\nFastSpeech duration predictor module.\n\nMethod signatures and docstrings:\n- def __init__(self, config, **kwargs): Init variables.\n- def call(self, inputs, training=False): Call logic.\n\n<|skeleton|>\nclass TFFastSpeechDurationPredictor:\n \"\"\"FastSpeech duration predictor module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n <|body_0|>\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.conv_layers = []\n for i in range(config.num_duration_conv_layers):\n self.conv_layers.append(tf.keras.layers.Conv1D(config.duration_predictor_filters, config.duration_predictor_kernel_sizes, padding='same', name='conv_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))\n self.conv_layers.append(tf.keras.layers.Dropout(config.duration_predictor_dropout_probs))\n self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)\n self.output_layer = tf.keras.layers.Dense(1)\n<|end_body_0|>\n\n<|body_start_1|>\n encoder_hidden_states, attention_mask = inputs\n attention_mask = tf.cast(tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype)\n masked_encoder_hidden_states = encoder_hidden_states * attention_mask\n outputs = self.conv_layers_sequence(masked_encoder_hidden_states)\n outputs = self.output_layer(outputs)\n masked_outputs = outputs * attention_mask\n return tf.squeeze(tf.nn.relu6(masked_outputs), -1)\n<|end_body_1|>\n", "revision_id": "136877136355c82d7ba474ceb7a8f133bd84767e", "skeleton": "<|skeleton|>\nclass TFFastSpeechDurationPredictor:\n \"\"\"FastSpeech duration predictor module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n <|body_0|>\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TFFastSpeechDurationPredictor:\n \"\"\"FastSpeech duration predictor module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv_layers = []\n for i in range(config.num_duration_conv_layers):\n self.conv_layers.append(tf.keras.layers.Conv1D(config.duration_predictor_filters, config.duration_predictor_kernel_sizes, padding='same', name='conv_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm_._{}'.format(i)))\n self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))\n self.conv_layers.append(tf.keras.layers.Dropout(config.duration_predictor_dropout_probs))\n self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)\n self.output_layer = tf.keras.layers.Dense(1)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n encoder_hidden_states, attention_mask = inputs\n attention_mask = tf.cast(tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype)\n masked_encoder_hidden_states = encoder_hidden_states * attention_mask\n outputs = self.conv_layers_sequence(masked_encoder_hidden_states)\n outputs = self.output_layer(outputs)\n masked_outputs = outputs * attention_mask\n return tf.squeeze(tf.nn.relu6(masked_outputs), -1)\n", "source": "the_stack_v2_python_sparse", "source_path": "tensorflow_tts/models/fastspeech.py", "source_repo": "TensorSpeech/TensorFlowTTS", "split": "val", "star_events_count": 2889}
{"blob_id": "81065031a3f5302400e118a6183efdb2dda1ac34", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn TeleconferenceDeviceQuality()", "from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\nfrom .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\nfields: Dict[str, Callable[[Any], None]] = {'callChainId': lambda n: setattr(self, 'call_chain_id', n.get_uuid_value()), 'cloudServiceDeploymentEnvironment': lambda n: setattr(self, 'cloud_service_deployment_environment', n.get_str_value()), 'cloudServiceDeploymentId': lambda n: setattr(self, 'cloud_service_deployment_id', n.get_str_value()), 'cloudServiceInstanceName': lambda n: setattr(self, 'cloud_service_instance_name', n.get_str_value()), 'cloudServiceName': lambda n: setattr(self, 'cloud_service_name', n.get_str_value()), 'deviceDescription': lambda n: setattr(self, 'device_description', n.get_str_value()), 'deviceName': lambda n: setattr(self, 'device_name', n.get_str_value()), 'mediaLegId': lambda n: setattr(self, 'media_leg_id', n.get_uuid_value()), 'mediaQualityList': lambda n: setattr(self, 'media_quality_list', n.get_collection_of_object_values(TeleconferenceDeviceMediaQuality)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value()), 'participantId': lambda n: setattr(self, 'participant_id', n.get_uuid_value())}\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nwriter.write_uuid_value('callChainId', self.call_chain_id)\nwriter.write_str_value('cloudServiceDeploymentEnvironment', self.cloud_service_deployment_environment)\nwriter.write_str_value('cloudServiceDeploymentId', self.cloud_service_deployment_id)\nwriter.write_str_value('cloudServiceInstanceName', self.cloud_service_instance_name)\nwriter.write_str_value('cloudServiceName', self.cloud_service_name)\nwriter.write_str_value('deviceDescription', self.device_description)\nwriter.write_str_value('deviceName', self.device_name)\nwriter.write_uuid_value('mediaLegId', self.media_leg_id)\nwriter.write_collection_of_object_values('mediaQualityList', self.media_quality_list)\nwriter.write_str_value('@odata.type', self.odata_type)\nwriter.write_uuid_value('participantId', self.participant_id)\nwriter.write_additional_data_value(self.additional_data)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return TeleconferenceDeviceQuality()\n<|end_body_0|>\n\n<|body_start_1|>\n from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\n from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\n fields: Dict[str, Callable[[Any], None]] = {'callChainId': lambda n: setattr(self, 'call_chain_id', n.get_uuid_value()), 'cloudServiceDeploymentEnvironment': lambda n: setattr(self, 'cloud_service_deployment_environment', n.get_str_value()), 'cloudServiceDeploymentId': lambda n: setattr(self, 'cloud_service_deployment_id', n.get_str_value()), 'cloudServiceInstanceName': lambda n: setattr(self, 'cloud_service_instance_name', n.get_str_value()), 'cloudServiceName': lambda n: setattr(self, 'cloud_service_name', n.get_str_value()), 'deviceDescription': lambda n: setattr(self, 'device_description', n.get_str_value()), 'deviceName': lambda n: setattr(self, 'device_name', n.get_str_value()), 'mediaLegId': lambda n: setattr(self, 'media_leg_id', n.get_uuid_value()), 'mediaQualityList': lambda n: setattr(self, 'media_quality_list', n.get_collection_of_object_values(TeleconferenceDeviceMediaQuality)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value()), 'participantId': lambda n: setattr(self, 'participant_id', n.get_uuid_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_uuid_value('callChainId', self.call_chain_id)\n writer.write_str_value('cloudServiceDeploymentEnvironment', self.cloud_service_deployment_environment)\n writer.write_str_value('cloudServiceDeploymentId', self.cloud_service_deployment_id)\n writer.write_str_value('cloudServiceInstanceName', self.cloud_service_instance_name)\n writer.write_str_value('cloudServiceName', self.cloud_service_name)\n writer.write_str_value('deviceDescription', self.device_description)\n writer.write_str_value('deviceName', self.device_name)\n writer.write_uuid_value('mediaLegId', self.media_leg_id)\n writer.write_collection_of_object_values('mediaQualityList', self.media_quality_list)\n writer.write_str_value('@odata.type', self.odata_type)\n writer.write_uuid_value('participantId', self.participant_id)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TeleconferenceDeviceQuality", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TeleconferenceDeviceQuality:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TeleconferenceDeviceQuality:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TeleconferenceDeviceQuality\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return TeleconferenceDeviceQuality()\n<|end_body_0|>\n\n<|body_start_1|>\n from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\n from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\n fields: Dict[str, Callable[[Any], None]] = {'callChainId': lambda n: setattr(self, 'call_chain_id', n.get_uuid_value()), 'cloudServiceDeploymentEnvironment': lambda n: setattr(self, 'cloud_service_deployment_environment', n.get_str_value()), 'cloudServiceDeploymentId': lambda n: setattr(self, 'cloud_service_deployment_id', n.get_str_value()), 'cloudServiceInstanceName': lambda n: setattr(self, 'cloud_service_instance_name', n.get_str_value()), 'cloudServiceName': lambda n: setattr(self, 'cloud_service_name', n.get_str_value()), 'deviceDescription': lambda n: setattr(self, 'device_description', n.get_str_value()), 'deviceName': lambda n: setattr(self, 'device_name', n.get_str_value()), 'mediaLegId': lambda n: setattr(self, 'media_leg_id', n.get_uuid_value()), 'mediaQualityList': lambda n: setattr(self, 'media_quality_list', n.get_collection_of_object_values(TeleconferenceDeviceMediaQuality)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value()), 'participantId': lambda n: setattr(self, 'participant_id', n.get_uuid_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_uuid_value('callChainId', self.call_chain_id)\n writer.write_str_value('cloudServiceDeploymentEnvironment', self.cloud_service_deployment_environment)\n writer.write_str_value('cloudServiceDeploymentId', self.cloud_service_deployment_id)\n writer.write_str_value('cloudServiceInstanceName', self.cloud_service_instance_name)\n writer.write_str_value('cloudServiceName', self.cloud_service_name)\n writer.write_str_value('deviceDescription', self.device_description)\n writer.write_str_value('deviceName', self.device_name)\n writer.write_uuid_value('mediaLegId', self.media_leg_id)\n writer.write_collection_of_object_values('mediaQualityList', self.media_quality_list)\n writer.write_str_value('@odata.type', self.odata_type)\n writer.write_uuid_value('participantId', self.participant_id)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000396", "length_bytes": 6036, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TeleconferenceDeviceQuality", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TeleconferenceDeviceQuality"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_026130", "prompt": "Implement the Python class `TeleconferenceDeviceQuality` described below.\n\nClass description:\nImplement the TeleconferenceDeviceQuality class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TeleconferenceDeviceQuality: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TeleconferenceDeviceQuality\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `TeleconferenceDeviceQuality` described below.\n\nClass description:\nImplement the TeleconferenceDeviceQuality class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TeleconferenceDeviceQuality: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TeleconferenceDeviceQuality\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass TeleconferenceDeviceQuality:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TeleconferenceDeviceQuality:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TeleconferenceDeviceQuality\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return TeleconferenceDeviceQuality()\n<|end_body_0|>\n\n<|body_start_1|>\n from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\n from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\n fields: Dict[str, Callable[[Any], None]] = {'callChainId': lambda n: setattr(self, 'call_chain_id', n.get_uuid_value()), 'cloudServiceDeploymentEnvironment': lambda n: setattr(self, 'cloud_service_deployment_environment', n.get_str_value()), 'cloudServiceDeploymentId': lambda n: setattr(self, 'cloud_service_deployment_id', n.get_str_value()), 'cloudServiceInstanceName': lambda n: setattr(self, 'cloud_service_instance_name', n.get_str_value()), 'cloudServiceName': lambda n: setattr(self, 'cloud_service_name', n.get_str_value()), 'deviceDescription': lambda n: setattr(self, 'device_description', n.get_str_value()), 'deviceName': lambda n: setattr(self, 'device_name', n.get_str_value()), 'mediaLegId': lambda n: setattr(self, 'media_leg_id', n.get_uuid_value()), 'mediaQualityList': lambda n: setattr(self, 'media_quality_list', n.get_collection_of_object_values(TeleconferenceDeviceMediaQuality)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value()), 'participantId': lambda n: setattr(self, 'participant_id', n.get_uuid_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_uuid_value('callChainId', self.call_chain_id)\n writer.write_str_value('cloudServiceDeploymentEnvironment', self.cloud_service_deployment_environment)\n writer.write_str_value('cloudServiceDeploymentId', self.cloud_service_deployment_id)\n writer.write_str_value('cloudServiceInstanceName', self.cloud_service_instance_name)\n writer.write_str_value('cloudServiceName', self.cloud_service_name)\n writer.write_str_value('deviceDescription', self.device_description)\n writer.write_str_value('deviceName', self.device_name)\n writer.write_uuid_value('mediaLegId', self.media_leg_id)\n writer.write_collection_of_object_values('mediaQualityList', self.media_quality_list)\n writer.write_str_value('@odata.type', self.odata_type)\n writer.write_uuid_value('participantId', self.participant_id)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass TeleconferenceDeviceQuality:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TeleconferenceDeviceQuality:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TeleconferenceDeviceQuality\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TeleconferenceDeviceQuality:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TeleconferenceDeviceQuality:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TeleconferenceDeviceQuality\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return TeleconferenceDeviceQuality()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\n from .teleconference_device_media_quality import TeleconferenceDeviceMediaQuality\n fields: Dict[str, Callable[[Any], None]] = {'callChainId': lambda n: setattr(self, 'call_chain_id', n.get_uuid_value()), 'cloudServiceDeploymentEnvironment': lambda n: setattr(self, 'cloud_service_deployment_environment', n.get_str_value()), 'cloudServiceDeploymentId': lambda n: setattr(self, 'cloud_service_deployment_id', n.get_str_value()), 'cloudServiceInstanceName': lambda n: setattr(self, 'cloud_service_instance_name', n.get_str_value()), 'cloudServiceName': lambda n: setattr(self, 'cloud_service_name', n.get_str_value()), 'deviceDescription': lambda n: setattr(self, 'device_description', n.get_str_value()), 'deviceName': lambda n: setattr(self, 'device_name', n.get_str_value()), 'mediaLegId': lambda n: setattr(self, 'media_leg_id', n.get_uuid_value()), 'mediaQualityList': lambda n: setattr(self, 'media_quality_list', n.get_collection_of_object_values(TeleconferenceDeviceMediaQuality)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value()), 'participantId': lambda n: setattr(self, 'participant_id', n.get_uuid_value())}\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_uuid_value('callChainId', self.call_chain_id)\n writer.write_str_value('cloudServiceDeploymentEnvironment', self.cloud_service_deployment_environment)\n writer.write_str_value('cloudServiceDeploymentId', self.cloud_service_deployment_id)\n writer.write_str_value('cloudServiceInstanceName', self.cloud_service_instance_name)\n writer.write_str_value('cloudServiceName', self.cloud_service_name)\n writer.write_str_value('deviceDescription', self.device_description)\n writer.write_str_value('deviceName', self.device_name)\n writer.write_uuid_value('mediaLegId', self.media_leg_id)\n writer.write_collection_of_object_values('mediaQualityList', self.media_quality_list)\n writer.write_str_value('@odata.type', self.odata_type)\n writer.write_uuid_value('participantId', self.participant_id)\n writer.write_additional_data_value(self.additional_data)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/teleconference_device_quality.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135}
{"blob_id": "b59c1d34326e3f1876702753ebb9aff28d485e1f", "bodies": ["result_particles = []\nnd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\ntimestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\nself.handle_non_data(non_data, non_end, start)\nwhile chunk is not None:\n header_match = SIO_HEADER_MATCHER.match(chunk)\n if header_match.group(SIO_HEADER_GROUP_ID) == 'CS':\n data_match = ENG_MATCHER.match(chunk)\n if data_match:\n posix_time = int(header_match.group(SIO_HEADER_GROUP_TIMESTAMP), 16)\n log.debug('utc timestamp %s', datetime.utcfromtimestamp(posix_time))\n timestamp = ntplib.system_to_ntp_time(float(posix_time))\n sample = self._extract_sample(self._particle_class, None, data_match, internal_timestamp=timestamp)\n if sample:\n result_particles.append(sample)\n else:\n log.warn('CS data does not match REGEX')\n self._exception_callback(SampleException('CS data does not match REGEX'))\n elif header_match.group(SIO_HEADER_GROUP_ID) != 'PS':\n message = 'Unexpected Sio Header ID %s' % header_match.group(SIO_HEADER_GROUP_ID)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\nreturn result_particles", "if non_data is not None and non_end <= start:\n message = 'Found %d bytes of un-expected non-data' % len(non_data)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))"], "bodies_text": "<|body_start_0|>\n result_particles = []\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n while chunk is not None:\n header_match = SIO_HEADER_MATCHER.match(chunk)\n if header_match.group(SIO_HEADER_GROUP_ID) == 'CS':\n data_match = ENG_MATCHER.match(chunk)\n if data_match:\n posix_time = int(header_match.group(SIO_HEADER_GROUP_TIMESTAMP), 16)\n log.debug('utc timestamp %s', datetime.utcfromtimestamp(posix_time))\n timestamp = ntplib.system_to_ntp_time(float(posix_time))\n sample = self._extract_sample(self._particle_class, None, data_match, internal_timestamp=timestamp)\n if sample:\n result_particles.append(sample)\n else:\n log.warn('CS data does not match REGEX')\n self._exception_callback(SampleException('CS data does not match REGEX'))\n elif header_match.group(SIO_HEADER_GROUP_ID) != 'PS':\n message = 'Unexpected Sio Header ID %s' % header_match.group(SIO_HEADER_GROUP_ID)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n return result_particles\n<|end_body_0|>\n\n<|body_start_1|>\n if non_data is not None and non_end <= start:\n message = 'Found %d bytes of un-expected non-data' % len(non_data)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n<|end_body_1|>\n", "class_docstring": "Abstract Class for parsing Sio Eng Sio files", "class_name": "SioEngSioParser", "detected_licenses": ["BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SioEngSioParser:\n \"\"\"Abstract Class for parsing Sio Eng Sio files\"\"\"\n\n def parse_chunks(self):\n \"\"\"Parse out any pending data chunks in the chunker. If it is a valid data piece, build a particle, update the position and timestamp. Go until the chunker has no more valid data. @retval a list of tuples with sample particles encountered in this parsing, plus the state. An empty list of nothing was parsed.\"\"\"\n <|body_0|>\n\n def handle_non_data(self, non_data, non_end, start):\n \"\"\"Check for and handle any non-data that is found in the file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result_particles = []\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n while chunk is not None:\n header_match = SIO_HEADER_MATCHER.match(chunk)\n if header_match.group(SIO_HEADER_GROUP_ID) == 'CS':\n data_match = ENG_MATCHER.match(chunk)\n if data_match:\n posix_time = int(header_match.group(SIO_HEADER_GROUP_TIMESTAMP), 16)\n log.debug('utc timestamp %s', datetime.utcfromtimestamp(posix_time))\n timestamp = ntplib.system_to_ntp_time(float(posix_time))\n sample = self._extract_sample(self._particle_class, None, data_match, internal_timestamp=timestamp)\n if sample:\n result_particles.append(sample)\n else:\n log.warn('CS data does not match REGEX')\n self._exception_callback(SampleException('CS data does not match REGEX'))\n elif header_match.group(SIO_HEADER_GROUP_ID) != 'PS':\n message = 'Unexpected Sio Header ID %s' % header_match.group(SIO_HEADER_GROUP_ID)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n return result_particles\n<|end_body_0|>\n\n<|body_start_1|>\n if non_data is not None and non_end <= start:\n message = 'Found %d bytes of un-expected non-data' % len(non_data)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000397", "length_bytes": 6504, "license_type": "permissive", "methods": [{"docstring": "Parse out any pending data chunks in the chunker. If it is a valid data piece, build a particle, update the position and timestamp. Go until the chunker has no more valid data. @retval a list of tuples with sample particles encountered in this parsing, plus the state. An empty list of nothing was parsed.", "name": "parse_chunks", "signature": "def parse_chunks(self)"}, {"docstring": "Check for and handle any non-data that is found in the file", "name": "handle_non_data", "signature": "def handle_non_data(self, non_data, non_end, start)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032411", "prompt": "Implement the Python class `SioEngSioParser` described below.\n\nClass description:\nAbstract Class for parsing Sio Eng Sio files\n\nMethod signatures and docstrings:\n- def parse_chunks(self): Parse out any pending data chunks in the chunker. If it is a valid data piece, build a particle, update the position and timestamp. Go until the chunker has no more valid data. @retval a list of tuples with sample particles encountered in this parsing, plus the state. An empty list of nothing was parsed.\n- def handle_non_data(self, non_data, non_end, start): Check for and handle any non-data that is found in the file", "prompted_full_text": "Implement the Python class `SioEngSioParser` described below.\n\nClass description:\nAbstract Class for parsing Sio Eng Sio files\n\nMethod signatures and docstrings:\n- def parse_chunks(self): Parse out any pending data chunks in the chunker. If it is a valid data piece, build a particle, update the position and timestamp. Go until the chunker has no more valid data. @retval a list of tuples with sample particles encountered in this parsing, plus the state. An empty list of nothing was parsed.\n- def handle_non_data(self, non_data, non_end, start): Check for and handle any non-data that is found in the file\n\n<|skeleton|>\nclass SioEngSioParser:\n \"\"\"Abstract Class for parsing Sio Eng Sio files\"\"\"\n\n def parse_chunks(self):\n \"\"\"Parse out any pending data chunks in the chunker. If it is a valid data piece, build a particle, update the position and timestamp. Go until the chunker has no more valid data. @retval a list of tuples with sample particles encountered in this parsing, plus the state. An empty list of nothing was parsed.\"\"\"\n <|body_0|>\n\n def handle_non_data(self, non_data, non_end, start):\n \"\"\"Check for and handle any non-data that is found in the file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result_particles = []\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n while chunk is not None:\n header_match = SIO_HEADER_MATCHER.match(chunk)\n if header_match.group(SIO_HEADER_GROUP_ID) == 'CS':\n data_match = ENG_MATCHER.match(chunk)\n if data_match:\n posix_time = int(header_match.group(SIO_HEADER_GROUP_TIMESTAMP), 16)\n log.debug('utc timestamp %s', datetime.utcfromtimestamp(posix_time))\n timestamp = ntplib.system_to_ntp_time(float(posix_time))\n sample = self._extract_sample(self._particle_class, None, data_match, internal_timestamp=timestamp)\n if sample:\n result_particles.append(sample)\n else:\n log.warn('CS data does not match REGEX')\n self._exception_callback(SampleException('CS data does not match REGEX'))\n elif header_match.group(SIO_HEADER_GROUP_ID) != 'PS':\n message = 'Unexpected Sio Header ID %s' % header_match.group(SIO_HEADER_GROUP_ID)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n return result_particles\n<|end_body_0|>\n\n<|body_start_1|>\n if non_data is not None and non_end <= start:\n message = 'Found %d bytes of un-expected non-data' % len(non_data)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n<|end_body_1|>\n", "revision_id": "bdbf01f5614e7188ce19596704794466e5683b30", "skeleton": "<|skeleton|>\nclass SioEngSioParser:\n \"\"\"Abstract Class for parsing Sio Eng Sio files\"\"\"\n\n def parse_chunks(self):\n \"\"\"Parse out any pending data chunks in the chunker. If it is a valid data piece, build a particle, update the position and timestamp. Go until the chunker has no more valid data. @retval a list of tuples with sample particles encountered in this parsing, plus the state. An empty list of nothing was parsed.\"\"\"\n <|body_0|>\n\n def handle_non_data(self, non_data, non_end, start):\n \"\"\"Check for and handle any non-data that is found in the file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SioEngSioParser:\n \"\"\"Abstract Class for parsing Sio Eng Sio files\"\"\"\n\n def parse_chunks(self):\n \"\"\"Parse out any pending data chunks in the chunker. If it is a valid data piece, build a particle, update the position and timestamp. Go until the chunker has no more valid data. @retval a list of tuples with sample particles encountered in this parsing, plus the state. An empty list of nothing was parsed.\"\"\"\n result_particles = []\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n while chunk is not None:\n header_match = SIO_HEADER_MATCHER.match(chunk)\n if header_match.group(SIO_HEADER_GROUP_ID) == 'CS':\n data_match = ENG_MATCHER.match(chunk)\n if data_match:\n posix_time = int(header_match.group(SIO_HEADER_GROUP_TIMESTAMP), 16)\n log.debug('utc timestamp %s', datetime.utcfromtimestamp(posix_time))\n timestamp = ntplib.system_to_ntp_time(float(posix_time))\n sample = self._extract_sample(self._particle_class, None, data_match, internal_timestamp=timestamp)\n if sample:\n result_particles.append(sample)\n else:\n log.warn('CS data does not match REGEX')\n self._exception_callback(SampleException('CS data does not match REGEX'))\n elif header_match.group(SIO_HEADER_GROUP_ID) != 'PS':\n message = 'Unexpected Sio Header ID %s' % header_match.group(SIO_HEADER_GROUP_ID)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)\n timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)\n self.handle_non_data(non_data, non_end, start)\n return result_particles\n\n def handle_non_data(self, non_data, non_end, start):\n \"\"\"Check for and handle any non-data that is found in the file\"\"\"\n if non_data is not None and non_end <= start:\n message = 'Found %d bytes of un-expected non-data' % len(non_data)\n log.warn(message)\n self._exception_callback(UnexpectedDataException(message))\n", "source": "the_stack_v2_python_sparse", "source_path": "mi/dataset/parser/sio_eng_sio.py", "source_repo": "oceanobservatories/mi-instrument", "split": "val", "star_events_count": 1}
{"blob_id": "e05e0fe9ec1047a1ba206a3e120e6b817c8998df", "bodies": ["super().__init__(name, **kwargs)\nself.website_id = 'gsmhosting'\nself.website_type = 'complaint'\nself.forum_list_xpath = '/html/body/div/div[1]/div/table[5]/tbody'\nself.forum_url_xpath = './tr/td/table/tr/td[3]/div/a/@href'\nself.post_list_xpath = '//*[@id=\"threadslist\"]/tbody[2]/tr'\nself.post_url_xpath = './td[3]/div/a/@href'\nself.post_list_url_xpath = '//a[@rel=\"next\"]/@href'\nself.post_comment_num = './td[5]/a/text()'\nself.post_view_num = './td[6]/text()'\nself.item_list_path = '//*[@id=\"posts\"]/div'\nself.item_path = dict()\nself.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\nself.item_path['user_url'] = './/*[@class=\"bigusername\"]//@href'\nself.item_path['user_group'] = './div/div/div/table/tr[2]/td[1]/div[2]/text()'\nself.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\nself.item_path['registration_date'] = './/*[contains(text(),\"Join Date\")]//text()'\nself.item_path['region'] = './/*[contains(text(),\"Location\")]//text()'\nself.item_path['age'] = './/*[contains(text(),\"Age\")]//text()'\nself.item_path['user_comment_num'] = './/*[contains(text(),\"Posts\")]//text()'\nself.item_path['points'] = './/*[contains(text(),\"Thanks Meter\")]//text()'\nself.item_path['title'] = './div/div/div/table/tr[2]/td[2]/div[1]/strong/text()'\nself.item_path['time'] = './div/div/div/table/tr[1]/td[1]/text()'\nself.item_path['main_body'] = './div/div/div/table/tr[2]/td[2]/div/text()'\nself.item_path['floor'] = './div/div/div/table/tr[1]/td[2]/a/strong/text()'\nself.item_url = '/html/body/div[3]/div[1]/div/table[1]/tr/td[2]/div/table/tr/td[5]/a/@href'", "forums = response.xpath(self.forum_list_xpath)\nfor forum in forums:\n forum_url = forum.xpath(self.forum_url_xpath).extract_first()\n if forum_url:\n yield response.follow(forum_url, callback=self.parse_post)", "posts = response.xpath(self.post_list_xpath)\nfor post in posts:\n post_url = post.xpath(self.post_url_xpath).extract_first()\n if post_url:\n comment_num = post.xpath(self.post_comment_num).extract_first()\n view_num = post.xpath(self.post_view_num).extract_first()\n yield response.follow(post_url, callback=self.parse_comment, meta={'comment_num': comment_num, 'view_num': view_num})\npost_list_url = response.xpath(self.post_list_url_xpath).extract_first()\nif post_list_url:\n yield response.follow(post_list_url, callback=self.parse_post)", "if response.meta.get('post_id'):\n post_id = response.meta['post_id']\nelse:\n post_id = None\ncomments = response.xpath(self.item_list_path)\nif comments:\n for comment_data in comments[:-1]:\n item = DataLoader(item=dict(), selector=comment_data)\n for field, xpath in self.item_path.items():\n item.add_xpath(field, xpath)\n item.add_value('content_url', response.url)\n item = item.load_item()\n if item.get('floor') == '1':\n item['content_comment_num'] = response.meta['comment_num']\n item['view_num'] = response.meta['view_num']\n yield self.padding_item(item, -1)\n post_id = item['content_id']\n else:\n yield self.padding_item(item, post_id)\nitem_url = response.xpath(self.item_url).extract_first()\nif item_url:\n yield response.follow(item_url, callback=self.parse_comment, meta={'post_id': post_id})"], "bodies_text": "<|body_start_0|>\n super().__init__(name, **kwargs)\n self.website_id = 'gsmhosting'\n self.website_type = 'complaint'\n self.forum_list_xpath = '/html/body/div/div[1]/div/table[5]/tbody'\n self.forum_url_xpath = './tr/td/table/tr/td[3]/div/a/@href'\n self.post_list_xpath = '//*[@id=\"threadslist\"]/tbody[2]/tr'\n self.post_url_xpath = './td[3]/div/a/@href'\n self.post_list_url_xpath = '//a[@rel=\"next\"]/@href'\n self.post_comment_num = './td[5]/a/text()'\n self.post_view_num = './td[6]/text()'\n self.item_list_path = '//*[@id=\"posts\"]/div'\n self.item_path = dict()\n self.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\n self.item_path['user_url'] = './/*[@class=\"bigusername\"]//@href'\n self.item_path['user_group'] = './div/div/div/table/tr[2]/td[1]/div[2]/text()'\n self.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\n self.item_path['registration_date'] = './/*[contains(text(),\"Join Date\")]//text()'\n self.item_path['region'] = './/*[contains(text(),\"Location\")]//text()'\n self.item_path['age'] = './/*[contains(text(),\"Age\")]//text()'\n self.item_path['user_comment_num'] = './/*[contains(text(),\"Posts\")]//text()'\n self.item_path['points'] = './/*[contains(text(),\"Thanks Meter\")]//text()'\n self.item_path['title'] = './div/div/div/table/tr[2]/td[2]/div[1]/strong/text()'\n self.item_path['time'] = './div/div/div/table/tr[1]/td[1]/text()'\n self.item_path['main_body'] = './div/div/div/table/tr[2]/td[2]/div/text()'\n self.item_path['floor'] = './div/div/div/table/tr[1]/td[2]/a/strong/text()'\n self.item_url = '/html/body/div[3]/div[1]/div/table[1]/tr/td[2]/div/table/tr/td[5]/a/@href'\n<|end_body_0|>\n\n<|body_start_1|>\n forums = response.xpath(self.forum_list_xpath)\n for forum in forums:\n forum_url = forum.xpath(self.forum_url_xpath).extract_first()\n if forum_url:\n yield response.follow(forum_url, callback=self.parse_post)\n<|end_body_1|>\n\n<|body_start_2|>\n posts = response.xpath(self.post_list_xpath)\n for post in posts:\n post_url = post.xpath(self.post_url_xpath).extract_first()\n if post_url:\n comment_num = post.xpath(self.post_comment_num).extract_first()\n view_num = post.xpath(self.post_view_num).extract_first()\n yield response.follow(post_url, callback=self.parse_comment, meta={'comment_num': comment_num, 'view_num': view_num})\n post_list_url = response.xpath(self.post_list_url_xpath).extract_first()\n if post_list_url:\n yield response.follow(post_list_url, callback=self.parse_post)\n<|end_body_2|>\n\n<|body_start_3|>\n if response.meta.get('post_id'):\n post_id = response.meta['post_id']\n else:\n post_id = None\n comments = response.xpath(self.item_list_path)\n if comments:\n for comment_data in comments[:-1]:\n item = DataLoader(item=dict(), selector=comment_data)\n for field, xpath in self.item_path.items():\n item.add_xpath(field, xpath)\n item.add_value('content_url', response.url)\n item = item.load_item()\n if item.get('floor') == '1':\n item['content_comment_num'] = response.meta['comment_num']\n item['view_num'] = response.meta['view_num']\n yield self.padding_item(item, -1)\n post_id = item['content_id']\n else:\n yield self.padding_item(item, post_id)\n item_url = response.xpath(self.item_url).extract_first()\n if item_url:\n yield response.follow(item_url, callback=self.parse_comment, meta={'post_id': post_id})\n<|end_body_3|>\n", "class_docstring": "解析数据和爬虫逻辑类", "class_name": "MySpider", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MySpider:\n \"\"\"解析数据和爬虫逻辑类\"\"\"\n\n def __init__(self, name=None, **kwargs):\n \"\"\"完成解析前的初始化工作,主要是将用的到 xpath 配合完成 :param self: 类的对象自身 :param name: scrapy 会将 name 属性传递进来 :param kwargs: 字典形式的参数,用于更新 self.__dict__ :return None\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"构造帖子页请求\"\"\"\n <|body_1|>\n\n def parse_post(self, response):\n \"\"\"构造评论页请求\"\"\"\n <|body_2|>\n\n def parse_comment(self, response):\n \"\"\"解析评论页数据以及构造下一评论页请求\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(name, **kwargs)\n self.website_id = 'gsmhosting'\n self.website_type = 'complaint'\n self.forum_list_xpath = '/html/body/div/div[1]/div/table[5]/tbody'\n self.forum_url_xpath = './tr/td/table/tr/td[3]/div/a/@href'\n self.post_list_xpath = '//*[@id=\"threadslist\"]/tbody[2]/tr'\n self.post_url_xpath = './td[3]/div/a/@href'\n self.post_list_url_xpath = '//a[@rel=\"next\"]/@href'\n self.post_comment_num = './td[5]/a/text()'\n self.post_view_num = './td[6]/text()'\n self.item_list_path = '//*[@id=\"posts\"]/div'\n self.item_path = dict()\n self.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\n self.item_path['user_url'] = './/*[@class=\"bigusername\"]//@href'\n self.item_path['user_group'] = './div/div/div/table/tr[2]/td[1]/div[2]/text()'\n self.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\n self.item_path['registration_date'] = './/*[contains(text(),\"Join Date\")]//text()'\n self.item_path['region'] = './/*[contains(text(),\"Location\")]//text()'\n self.item_path['age'] = './/*[contains(text(),\"Age\")]//text()'\n self.item_path['user_comment_num'] = './/*[contains(text(),\"Posts\")]//text()'\n self.item_path['points'] = './/*[contains(text(),\"Thanks Meter\")]//text()'\n self.item_path['title'] = './div/div/div/table/tr[2]/td[2]/div[1]/strong/text()'\n self.item_path['time'] = './div/div/div/table/tr[1]/td[1]/text()'\n self.item_path['main_body'] = './div/div/div/table/tr[2]/td[2]/div/text()'\n self.item_path['floor'] = './div/div/div/table/tr[1]/td[2]/a/strong/text()'\n self.item_url = '/html/body/div[3]/div[1]/div/table[1]/tr/td[2]/div/table/tr/td[5]/a/@href'\n<|end_body_0|>\n\n<|body_start_1|>\n forums = response.xpath(self.forum_list_xpath)\n for forum in forums:\n forum_url = forum.xpath(self.forum_url_xpath).extract_first()\n if forum_url:\n yield response.follow(forum_url, callback=self.parse_post)\n<|end_body_1|>\n\n<|body_start_2|>\n posts = response.xpath(self.post_list_xpath)\n for post in posts:\n post_url = post.xpath(self.post_url_xpath).extract_first()\n if post_url:\n comment_num = post.xpath(self.post_comment_num).extract_first()\n view_num = post.xpath(self.post_view_num).extract_first()\n yield response.follow(post_url, callback=self.parse_comment, meta={'comment_num': comment_num, 'view_num': view_num})\n post_list_url = response.xpath(self.post_list_url_xpath).extract_first()\n if post_list_url:\n yield response.follow(post_list_url, callback=self.parse_post)\n<|end_body_2|>\n\n<|body_start_3|>\n if response.meta.get('post_id'):\n post_id = response.meta['post_id']\n else:\n post_id = None\n comments = response.xpath(self.item_list_path)\n if comments:\n for comment_data in comments[:-1]:\n item = DataLoader(item=dict(), selector=comment_data)\n for field, xpath in self.item_path.items():\n item.add_xpath(field, xpath)\n item.add_value('content_url', response.url)\n item = item.load_item()\n if item.get('floor') == '1':\n item['content_comment_num'] = response.meta['comment_num']\n item['view_num'] = response.meta['view_num']\n yield self.padding_item(item, -1)\n post_id = item['content_id']\n else:\n yield self.padding_item(item, post_id)\n item_url = response.xpath(self.item_url).extract_first()\n if item_url:\n yield response.follow(item_url, callback=self.parse_comment, meta={'post_id': post_id})\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000398", "length_bytes": 4938, "license_type": "no_license", "methods": [{"docstring": "完成解析前的初始化工作,主要是将用的到 xpath 配合完成 :param self: 类的对象自身 :param name: scrapy 会将 name 属性传递进来 :param kwargs: 字典形式的参数,用于更新 self.__dict__ :return None", "name": "__init__", "signature": "def __init__(self, name=None, **kwargs)"}, {"docstring": "构造帖子页请求", "name": "parse", "signature": "def parse(self, response)"}, {"docstring": "构造评论页请求", "name": "parse_post", "signature": "def parse_post(self, response)"}, {"docstring": "解析评论页数据以及构造下一评论页请求", "name": "parse_comment", "signature": "def parse_comment(self, response)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004067", "prompt": "Implement the Python class `MySpider` described below.\n\nClass description:\n解析数据和爬虫逻辑类\n\nMethod signatures and docstrings:\n- def __init__(self, name=None, **kwargs): 完成解析前的初始化工作,主要是将用的到 xpath 配合完成 :param self: 类的对象自身 :param name: scrapy 会将 name 属性传递进来 :param kwargs: 字典形式的参数,用于更新 self.__dict__ :return None\n- def parse(self, response): 构造帖子页请求\n- def parse_post(self, response): 构造评论页请求\n- def parse_comment(self, response): 解析评论页数据以及构造下一评论页请求", "prompted_full_text": "Implement the Python class `MySpider` described below.\n\nClass description:\n解析数据和爬虫逻辑类\n\nMethod signatures and docstrings:\n- def __init__(self, name=None, **kwargs): 完成解析前的初始化工作,主要是将用的到 xpath 配合完成 :param self: 类的对象自身 :param name: scrapy 会将 name 属性传递进来 :param kwargs: 字典形式的参数,用于更新 self.__dict__ :return None\n- def parse(self, response): 构造帖子页请求\n- def parse_post(self, response): 构造评论页请求\n- def parse_comment(self, response): 解析评论页数据以及构造下一评论页请求\n\n<|skeleton|>\nclass MySpider:\n \"\"\"解析数据和爬虫逻辑类\"\"\"\n\n def __init__(self, name=None, **kwargs):\n \"\"\"完成解析前的初始化工作,主要是将用的到 xpath 配合完成 :param self: 类的对象自身 :param name: scrapy 会将 name 属性传递进来 :param kwargs: 字典形式的参数,用于更新 self.__dict__ :return None\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"构造帖子页请求\"\"\"\n <|body_1|>\n\n def parse_post(self, response):\n \"\"\"构造评论页请求\"\"\"\n <|body_2|>\n\n def parse_comment(self, response):\n \"\"\"解析评论页数据以及构造下一评论页请求\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(name, **kwargs)\n self.website_id = 'gsmhosting'\n self.website_type = 'complaint'\n self.forum_list_xpath = '/html/body/div/div[1]/div/table[5]/tbody'\n self.forum_url_xpath = './tr/td/table/tr/td[3]/div/a/@href'\n self.post_list_xpath = '//*[@id=\"threadslist\"]/tbody[2]/tr'\n self.post_url_xpath = './td[3]/div/a/@href'\n self.post_list_url_xpath = '//a[@rel=\"next\"]/@href'\n self.post_comment_num = './td[5]/a/text()'\n self.post_view_num = './td[6]/text()'\n self.item_list_path = '//*[@id=\"posts\"]/div'\n self.item_path = dict()\n self.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\n self.item_path['user_url'] = './/*[@class=\"bigusername\"]//@href'\n self.item_path['user_group'] = './div/div/div/table/tr[2]/td[1]/div[2]/text()'\n self.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\n self.item_path['registration_date'] = './/*[contains(text(),\"Join Date\")]//text()'\n self.item_path['region'] = './/*[contains(text(),\"Location\")]//text()'\n self.item_path['age'] = './/*[contains(text(),\"Age\")]//text()'\n self.item_path['user_comment_num'] = './/*[contains(text(),\"Posts\")]//text()'\n self.item_path['points'] = './/*[contains(text(),\"Thanks Meter\")]//text()'\n self.item_path['title'] = './div/div/div/table/tr[2]/td[2]/div[1]/strong/text()'\n self.item_path['time'] = './div/div/div/table/tr[1]/td[1]/text()'\n self.item_path['main_body'] = './div/div/div/table/tr[2]/td[2]/div/text()'\n self.item_path['floor'] = './div/div/div/table/tr[1]/td[2]/a/strong/text()'\n self.item_url = '/html/body/div[3]/div[1]/div/table[1]/tr/td[2]/div/table/tr/td[5]/a/@href'\n<|end_body_0|>\n\n<|body_start_1|>\n forums = response.xpath(self.forum_list_xpath)\n for forum in forums:\n forum_url = forum.xpath(self.forum_url_xpath).extract_first()\n if forum_url:\n yield response.follow(forum_url, callback=self.parse_post)\n<|end_body_1|>\n\n<|body_start_2|>\n posts = response.xpath(self.post_list_xpath)\n for post in posts:\n post_url = post.xpath(self.post_url_xpath).extract_first()\n if post_url:\n comment_num = post.xpath(self.post_comment_num).extract_first()\n view_num = post.xpath(self.post_view_num).extract_first()\n yield response.follow(post_url, callback=self.parse_comment, meta={'comment_num': comment_num, 'view_num': view_num})\n post_list_url = response.xpath(self.post_list_url_xpath).extract_first()\n if post_list_url:\n yield response.follow(post_list_url, callback=self.parse_post)\n<|end_body_2|>\n\n<|body_start_3|>\n if response.meta.get('post_id'):\n post_id = response.meta['post_id']\n else:\n post_id = None\n comments = response.xpath(self.item_list_path)\n if comments:\n for comment_data in comments[:-1]:\n item = DataLoader(item=dict(), selector=comment_data)\n for field, xpath in self.item_path.items():\n item.add_xpath(field, xpath)\n item.add_value('content_url', response.url)\n item = item.load_item()\n if item.get('floor') == '1':\n item['content_comment_num'] = response.meta['comment_num']\n item['view_num'] = response.meta['view_num']\n yield self.padding_item(item, -1)\n post_id = item['content_id']\n else:\n yield self.padding_item(item, post_id)\n item_url = response.xpath(self.item_url).extract_first()\n if item_url:\n yield response.follow(item_url, callback=self.parse_comment, meta={'post_id': post_id})\n<|end_body_3|>\n", "revision_id": "1b42878b694fabc65a02228662ffdf819e5dcc71", "skeleton": "<|skeleton|>\nclass MySpider:\n \"\"\"解析数据和爬虫逻辑类\"\"\"\n\n def __init__(self, name=None, **kwargs):\n \"\"\"完成解析前的初始化工作,主要是将用的到 xpath 配合完成 :param self: 类的对象自身 :param name: scrapy 会将 name 属性传递进来 :param kwargs: 字典形式的参数,用于更新 self.__dict__ :return None\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"构造帖子页请求\"\"\"\n <|body_1|>\n\n def parse_post(self, response):\n \"\"\"构造评论页请求\"\"\"\n <|body_2|>\n\n def parse_comment(self, response):\n \"\"\"解析评论页数据以及构造下一评论页请求\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MySpider:\n \"\"\"解析数据和爬虫逻辑类\"\"\"\n\n def __init__(self, name=None, **kwargs):\n \"\"\"完成解析前的初始化工作,主要是将用的到 xpath 配合完成 :param self: 类的对象自身 :param name: scrapy 会将 name 属性传递进来 :param kwargs: 字典形式的参数,用于更新 self.__dict__ :return None\"\"\"\n super().__init__(name, **kwargs)\n self.website_id = 'gsmhosting'\n self.website_type = 'complaint'\n self.forum_list_xpath = '/html/body/div/div[1]/div/table[5]/tbody'\n self.forum_url_xpath = './tr/td/table/tr/td[3]/div/a/@href'\n self.post_list_xpath = '//*[@id=\"threadslist\"]/tbody[2]/tr'\n self.post_url_xpath = './td[3]/div/a/@href'\n self.post_list_url_xpath = '//a[@rel=\"next\"]/@href'\n self.post_comment_num = './td[5]/a/text()'\n self.post_view_num = './td[6]/text()'\n self.item_list_path = '//*[@id=\"posts\"]/div'\n self.item_path = dict()\n self.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\n self.item_path['user_url'] = './/*[@class=\"bigusername\"]//@href'\n self.item_path['user_group'] = './div/div/div/table/tr[2]/td[1]/div[2]/text()'\n self.item_path['user_name'] = './/*[@class=\"bigusername\"]//text()'\n self.item_path['registration_date'] = './/*[contains(text(),\"Join Date\")]//text()'\n self.item_path['region'] = './/*[contains(text(),\"Location\")]//text()'\n self.item_path['age'] = './/*[contains(text(),\"Age\")]//text()'\n self.item_path['user_comment_num'] = './/*[contains(text(),\"Posts\")]//text()'\n self.item_path['points'] = './/*[contains(text(),\"Thanks Meter\")]//text()'\n self.item_path['title'] = './div/div/div/table/tr[2]/td[2]/div[1]/strong/text()'\n self.item_path['time'] = './div/div/div/table/tr[1]/td[1]/text()'\n self.item_path['main_body'] = './div/div/div/table/tr[2]/td[2]/div/text()'\n self.item_path['floor'] = './div/div/div/table/tr[1]/td[2]/a/strong/text()'\n self.item_url = '/html/body/div[3]/div[1]/div/table[1]/tr/td[2]/div/table/tr/td[5]/a/@href'\n\n def parse(self, response):\n \"\"\"构造帖子页请求\"\"\"\n forums = response.xpath(self.forum_list_xpath)\n for forum in forums:\n forum_url = forum.xpath(self.forum_url_xpath).extract_first()\n if forum_url:\n yield response.follow(forum_url, callback=self.parse_post)\n\n def parse_post(self, response):\n \"\"\"构造评论页请求\"\"\"\n posts = response.xpath(self.post_list_xpath)\n for post in posts:\n post_url = post.xpath(self.post_url_xpath).extract_first()\n if post_url:\n comment_num = post.xpath(self.post_comment_num).extract_first()\n view_num = post.xpath(self.post_view_num).extract_first()\n yield response.follow(post_url, callback=self.parse_comment, meta={'comment_num': comment_num, 'view_num': view_num})\n post_list_url = response.xpath(self.post_list_url_xpath).extract_first()\n if post_list_url:\n yield response.follow(post_list_url, callback=self.parse_post)\n\n def parse_comment(self, response):\n \"\"\"解析评论页数据以及构造下一评论页请求\"\"\"\n if response.meta.get('post_id'):\n post_id = response.meta['post_id']\n else:\n post_id = None\n comments = response.xpath(self.item_list_path)\n if comments:\n for comment_data in comments[:-1]:\n item = DataLoader(item=dict(), selector=comment_data)\n for field, xpath in self.item_path.items():\n item.add_xpath(field, xpath)\n item.add_value('content_url', response.url)\n item = item.load_item()\n if item.get('floor') == '1':\n item['content_comment_num'] = response.meta['comment_num']\n item['view_num'] = response.meta['view_num']\n yield self.padding_item(item, -1)\n post_id = item['content_id']\n else:\n yield self.padding_item(item, post_id)\n item_url = response.xpath(self.item_url).extract_first()\n if item_url:\n yield response.follow(item_url, callback=self.parse_comment, meta={'post_id': post_id})\n", "source": "the_stack_v2_python_sparse", "source_path": "wujian/gsmhosting/gsmhosting/spiders/gsmhosting.py", "source_repo": "wangsanshi123/spiders", "split": "val", "star_events_count": 0}
{"blob_id": "51d8ddf36e8da35e289a8c71f068a7de11866157", "bodies": ["prime_factor_5_num = 0\nwhile n / 5 == n / float(5):\n prime_factor_5_num += 1\n n /= 5\nreturn prime_factor_5_num", "trailing_zero_numbers = 0\nfor i in range(1, n + 1):\n prime_factor_5_num = self.count_prime_factor_5(i)\n trailing_zero_numbers += prime_factor_5_num\nreturn trailing_zero_numbers"], "bodies_text": "<|body_start_0|>\n prime_factor_5_num = 0\n while n / 5 == n / float(5):\n prime_factor_5_num += 1\n n /= 5\n return prime_factor_5_num\n<|end_body_0|>\n\n<|body_start_1|>\n trailing_zero_numbers = 0\n for i in range(1, n + 1):\n prime_factor_5_num = self.count_prime_factor_5(i)\n trailing_zero_numbers += prime_factor_5_num\n return trailing_zero_numbers\n<|end_body_1|>\n", "class_docstring": "Status: Time Limit Exceeded Submitted: 0 minutes ago Last executed input: 8362 It's better than solution1, but not good enough.", "class_name": "SolutionFailed_2", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SolutionFailed_2:\n \"\"\"Status: Time Limit Exceeded Submitted: 0 minutes ago Last executed input: 8362 It's better than solution1, but not good enough.\"\"\"\n\n def count_prime_factor_5(self, n):\n \"\"\"Count prime factor 5 for positive intergers. 0 is not accepted.\"\"\"\n <|body_0|>\n\n def trailingZeroes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n prime_factor_5_num = 0\n while n / 5 == n / float(5):\n prime_factor_5_num += 1\n n /= 5\n return prime_factor_5_num\n<|end_body_0|>\n\n<|body_start_1|>\n trailing_zero_numbers = 0\n for i in range(1, n + 1):\n prime_factor_5_num = self.count_prime_factor_5(i)\n trailing_zero_numbers += prime_factor_5_num\n return trailing_zero_numbers\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000399", "length_bytes": 7335, "license_type": "no_license", "methods": [{"docstring": "Count prime factor 5 for positive intergers. 0 is not accepted.", "name": "count_prime_factor_5", "signature": "def count_prime_factor_5(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "trailingZeroes", "signature": "def trailingZeroes(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005276", "prompt": "Implement the Python class `SolutionFailed_2` described below.\n\nClass description:\nStatus: Time Limit Exceeded Submitted: 0 minutes ago Last executed input: 8362 It's better than solution1, but not good enough.\n\nMethod signatures and docstrings:\n- def count_prime_factor_5(self, n): Count prime factor 5 for positive intergers. 0 is not accepted.\n- def trailingZeroes(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `SolutionFailed_2` described below.\n\nClass description:\nStatus: Time Limit Exceeded Submitted: 0 minutes ago Last executed input: 8362 It's better than solution1, but not good enough.\n\nMethod signatures and docstrings:\n- def count_prime_factor_5(self, n): Count prime factor 5 for positive intergers. 0 is not accepted.\n- def trailingZeroes(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass SolutionFailed_2:\n \"\"\"Status: Time Limit Exceeded Submitted: 0 minutes ago Last executed input: 8362 It's better than solution1, but not good enough.\"\"\"\n\n def count_prime_factor_5(self, n):\n \"\"\"Count prime factor 5 for positive intergers. 0 is not accepted.\"\"\"\n <|body_0|>\n\n def trailingZeroes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n prime_factor_5_num = 0\n while n / 5 == n / float(5):\n prime_factor_5_num += 1\n n /= 5\n return prime_factor_5_num\n<|end_body_0|>\n\n<|body_start_1|>\n trailing_zero_numbers = 0\n for i in range(1, n + 1):\n prime_factor_5_num = self.count_prime_factor_5(i)\n trailing_zero_numbers += prime_factor_5_num\n return trailing_zero_numbers\n<|end_body_1|>\n", "revision_id": "2a7401c6e407db533877de6e20a2b523f7964fdb", "skeleton": "<|skeleton|>\nclass SolutionFailed_2:\n \"\"\"Status: Time Limit Exceeded Submitted: 0 minutes ago Last executed input: 8362 It's better than solution1, but not good enough.\"\"\"\n\n def count_prime_factor_5(self, n):\n \"\"\"Count prime factor 5 for positive intergers. 0 is not accepted.\"\"\"\n <|body_0|>\n\n def trailingZeroes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SolutionFailed_2:\n \"\"\"Status: Time Limit Exceeded Submitted: 0 minutes ago Last executed input: 8362 It's better than solution1, but not good enough.\"\"\"\n\n def count_prime_factor_5(self, n):\n \"\"\"Count prime factor 5 for positive intergers. 0 is not accepted.\"\"\"\n prime_factor_5_num = 0\n while n / 5 == n / float(5):\n prime_factor_5_num += 1\n n /= 5\n return prime_factor_5_num\n\n def trailingZeroes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n trailing_zero_numbers = 0\n for i in range(1, n + 1):\n prime_factor_5_num = self.count_prime_factor_5(i)\n trailing_zero_numbers += prime_factor_5_num\n return trailing_zero_numbers\n", "source": "the_stack_v2_python_sparse", "source_path": "THEORIES/algorithm/leetcode/Y172_Factorial_Trailing_Zeroes.py", "source_repo": "bb2qqq/tech_notes", "split": "val", "star_events_count": 0}
{"blob_id": "cb0478a205e80701d2e4e66815ecfa700d6940c5", "bodies": ["cmd = new_object(createAccountCmd, account)\ncreated = self.api_client().createAccount(cmd, method='POST')\nreturn new_object(Account, created, set_all=True)", "cmd = new_object(updateAccountCmd, account)\nupdated = self.api_client().updateAccount(cmd, method='POST')\nreturn new_object(Account, updated, set_all=True)", "if isinstance(account, int):\n account_id = account\nelse:\n account_id = account.id\ncmd = deleteAccountCmd()\ncmd.id = account_id\nresult = self.api_client().deleteAccount(cmd, method='POST')\nif not result.success:\n raise CloudstackAPIFailureException('deletion failed for id %s' % account_id)", "cmd = listAccountsCmd()\ncmd.listall = True\nresults = self.api_client().listAccounts(cmd)\nfor account in args:\n for result in results:\n if account.name == result.name:\n self.delete(result)", "cmd = listAccountsCmd()\ncopy_to_object(cmd, kwargs)\nresults = self.api_client().listAccounts(cmd)\nif results is None:\n return []\nreturn list([new_object(Account, a, set_all=True) for a in results])", "if not 'listall' in kwargs:\n kwargs = dict(kwargs)\n kwargs['listall'] = True\nresults = self.list(**kwargs)\nmatches = len(results)\nif matches == 0:\n raise CloudstackNoResultsException('No results found matching %s' % repr(kwargs))\nelif matches == 1:\n return results[0]\nelse:\n raise CloudstackMultipleResultsException('%s results found matching %s' % (matches, repr(kwargs)))"], "bodies_text": "<|body_start_0|>\n cmd = new_object(createAccountCmd, account)\n created = self.api_client().createAccount(cmd, method='POST')\n return new_object(Account, created, set_all=True)\n<|end_body_0|>\n\n<|body_start_1|>\n cmd = new_object(updateAccountCmd, account)\n updated = self.api_client().updateAccount(cmd, method='POST')\n return new_object(Account, updated, set_all=True)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(account, int):\n account_id = account\n else:\n account_id = account.id\n cmd = deleteAccountCmd()\n cmd.id = account_id\n result = self.api_client().deleteAccount(cmd, method='POST')\n if not result.success:\n raise CloudstackAPIFailureException('deletion failed for id %s' % account_id)\n<|end_body_2|>\n\n<|body_start_3|>\n cmd = listAccountsCmd()\n cmd.listall = True\n results = self.api_client().listAccounts(cmd)\n for account in args:\n for result in results:\n if account.name == result.name:\n self.delete(result)\n<|end_body_3|>\n\n<|body_start_4|>\n cmd = listAccountsCmd()\n copy_to_object(cmd, kwargs)\n results = self.api_client().listAccounts(cmd)\n if results is None:\n return []\n return list([new_object(Account, a, set_all=True) for a in results])\n<|end_body_4|>\n\n<|body_start_5|>\n if not 'listall' in kwargs:\n kwargs = dict(kwargs)\n kwargs['listall'] = True\n results = self.list(**kwargs)\n matches = len(results)\n if matches == 0:\n raise CloudstackNoResultsException('No results found matching %s' % repr(kwargs))\n elif matches == 1:\n return results[0]\n else:\n raise CloudstackMultipleResultsException('%s results found matching %s' % (matches, repr(kwargs)))\n<|end_body_5|>\n", "class_docstring": "", "class_name": "AccountAPI", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccountAPI:\n\n def create(self, account):\n \"\"\":type account: Account\"\"\"\n <|body_0|>\n\n def update(self, account):\n \"\"\":type account: Account\"\"\"\n <|body_1|>\n\n def delete(self, account):\n \"\"\":type account: Account|int\"\"\"\n <|body_2|>\n\n def delete_all(self, *args):\n \"\"\":type *args: list[Account]\"\"\"\n <|body_3|>\n\n def list(self, **kwargs):\n \"\"\":rtype: collections.Sequence[Account]\"\"\"\n <|body_4|>\n\n def find(self, **kwargs):\n \"\"\":rtype: Account\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cmd = new_object(createAccountCmd, account)\n created = self.api_client().createAccount(cmd, method='POST')\n return new_object(Account, created, set_all=True)\n<|end_body_0|>\n\n<|body_start_1|>\n cmd = new_object(updateAccountCmd, account)\n updated = self.api_client().updateAccount(cmd, method='POST')\n return new_object(Account, updated, set_all=True)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(account, int):\n account_id = account\n else:\n account_id = account.id\n cmd = deleteAccountCmd()\n cmd.id = account_id\n result = self.api_client().deleteAccount(cmd, method='POST')\n if not result.success:\n raise CloudstackAPIFailureException('deletion failed for id %s' % account_id)\n<|end_body_2|>\n\n<|body_start_3|>\n cmd = listAccountsCmd()\n cmd.listall = True\n results = self.api_client().listAccounts(cmd)\n for account in args:\n for result in results:\n if account.name == result.name:\n self.delete(result)\n<|end_body_3|>\n\n<|body_start_4|>\n cmd = listAccountsCmd()\n copy_to_object(cmd, kwargs)\n results = self.api_client().listAccounts(cmd)\n if results is None:\n return []\n return list([new_object(Account, a, set_all=True) for a in results])\n<|end_body_4|>\n\n<|body_start_5|>\n if not 'listall' in kwargs:\n kwargs = dict(kwargs)\n kwargs['listall'] = True\n results = self.list(**kwargs)\n matches = len(results)\n if matches == 0:\n raise CloudstackNoResultsException('No results found matching %s' % repr(kwargs))\n elif matches == 1:\n return results[0]\n else:\n raise CloudstackMultipleResultsException('%s results found matching %s' % (matches, repr(kwargs)))\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000400", "length_bytes": 3502, "license_type": "permissive", "methods": [{"docstring": ":type account: Account", "name": "create", "signature": "def create(self, account)"}, {"docstring": ":type account: Account", "name": "update", "signature": "def update(self, account)"}, {"docstring": ":type account: Account|int", "name": "delete", "signature": "def delete(self, account)"}, {"docstring": ":type *args: list[Account]", "name": "delete_all", "signature": "def delete_all(self, *args)"}, {"docstring": ":rtype: collections.Sequence[Account]", "name": "list", "signature": "def list(self, **kwargs)"}, {"docstring": ":rtype: Account", "name": "find", "signature": "def find(self, **kwargs)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_008993", "prompt": "Implement the Python class `AccountAPI` described below.\n\nClass description:\nImplement the AccountAPI class.\n\nMethod signatures and docstrings:\n- def create(self, account): :type account: Account\n- def update(self, account): :type account: Account\n- def delete(self, account): :type account: Account|int\n- def delete_all(self, *args): :type *args: list[Account]\n- def list(self, **kwargs): :rtype: collections.Sequence[Account]\n- def find(self, **kwargs): :rtype: Account", "prompted_full_text": "Implement the Python class `AccountAPI` described below.\n\nClass description:\nImplement the AccountAPI class.\n\nMethod signatures and docstrings:\n- def create(self, account): :type account: Account\n- def update(self, account): :type account: Account\n- def delete(self, account): :type account: Account|int\n- def delete_all(self, *args): :type *args: list[Account]\n- def list(self, **kwargs): :rtype: collections.Sequence[Account]\n- def find(self, **kwargs): :rtype: Account\n\n<|skeleton|>\nclass AccountAPI:\n\n def create(self, account):\n \"\"\":type account: Account\"\"\"\n <|body_0|>\n\n def update(self, account):\n \"\"\":type account: Account\"\"\"\n <|body_1|>\n\n def delete(self, account):\n \"\"\":type account: Account|int\"\"\"\n <|body_2|>\n\n def delete_all(self, *args):\n \"\"\":type *args: list[Account]\"\"\"\n <|body_3|>\n\n def list(self, **kwargs):\n \"\"\":rtype: collections.Sequence[Account]\"\"\"\n <|body_4|>\n\n def find(self, **kwargs):\n \"\"\":rtype: Account\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cmd = new_object(createAccountCmd, account)\n created = self.api_client().createAccount(cmd, method='POST')\n return new_object(Account, created, set_all=True)\n<|end_body_0|>\n\n<|body_start_1|>\n cmd = new_object(updateAccountCmd, account)\n updated = self.api_client().updateAccount(cmd, method='POST')\n return new_object(Account, updated, set_all=True)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(account, int):\n account_id = account\n else:\n account_id = account.id\n cmd = deleteAccountCmd()\n cmd.id = account_id\n result = self.api_client().deleteAccount(cmd, method='POST')\n if not result.success:\n raise CloudstackAPIFailureException('deletion failed for id %s' % account_id)\n<|end_body_2|>\n\n<|body_start_3|>\n cmd = listAccountsCmd()\n cmd.listall = True\n results = self.api_client().listAccounts(cmd)\n for account in args:\n for result in results:\n if account.name == result.name:\n self.delete(result)\n<|end_body_3|>\n\n<|body_start_4|>\n cmd = listAccountsCmd()\n copy_to_object(cmd, kwargs)\n results = self.api_client().listAccounts(cmd)\n if results is None:\n return []\n return list([new_object(Account, a, set_all=True) for a in results])\n<|end_body_4|>\n\n<|body_start_5|>\n if not 'listall' in kwargs:\n kwargs = dict(kwargs)\n kwargs['listall'] = True\n results = self.list(**kwargs)\n matches = len(results)\n if matches == 0:\n raise CloudstackNoResultsException('No results found matching %s' % repr(kwargs))\n elif matches == 1:\n return results[0]\n else:\n raise CloudstackMultipleResultsException('%s results found matching %s' % (matches, repr(kwargs)))\n<|end_body_5|>\n", "revision_id": "14e2d0186370cf2a23829d44068be1a48173ec65", "skeleton": "<|skeleton|>\nclass AccountAPI:\n\n def create(self, account):\n \"\"\":type account: Account\"\"\"\n <|body_0|>\n\n def update(self, account):\n \"\"\":type account: Account\"\"\"\n <|body_1|>\n\n def delete(self, account):\n \"\"\":type account: Account|int\"\"\"\n <|body_2|>\n\n def delete_all(self, *args):\n \"\"\":type *args: list[Account]\"\"\"\n <|body_3|>\n\n def list(self, **kwargs):\n \"\"\":rtype: collections.Sequence[Account]\"\"\"\n <|body_4|>\n\n def find(self, **kwargs):\n \"\"\":rtype: Account\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AccountAPI:\n def create(self, account):\n \"\"\":type account: Account\"\"\"\n cmd = new_object(createAccountCmd, account)\n created = self.api_client().createAccount(cmd, method='POST')\n return new_object(Account, created, set_all=True)\n\n def update(self, account):\n \"\"\":type account: Account\"\"\"\n cmd = new_object(updateAccountCmd, account)\n updated = self.api_client().updateAccount(cmd, method='POST')\n return new_object(Account, updated, set_all=True)\n\n def delete(self, account):\n \"\"\":type account: Account|int\"\"\"\n if isinstance(account, int):\n account_id = account\n else:\n account_id = account.id\n cmd = deleteAccountCmd()\n cmd.id = account_id\n result = self.api_client().deleteAccount(cmd, method='POST')\n if not result.success:\n raise CloudstackAPIFailureException('deletion failed for id %s' % account_id)\n\n def delete_all(self, *args):\n \"\"\":type *args: list[Account]\"\"\"\n cmd = listAccountsCmd()\n cmd.listall = True\n results = self.api_client().listAccounts(cmd)\n for account in args:\n for result in results:\n if account.name == result.name:\n self.delete(result)\n\n def list(self, **kwargs):\n \"\"\":rtype: collections.Sequence[Account]\"\"\"\n cmd = listAccountsCmd()\n copy_to_object(cmd, kwargs)\n results = self.api_client().listAccounts(cmd)\n if results is None:\n return []\n return list([new_object(Account, a, set_all=True) for a in results])\n\n def find(self, **kwargs):\n \"\"\":rtype: Account\"\"\"\n if not 'listall' in kwargs:\n kwargs = dict(kwargs)\n kwargs['listall'] = True\n results = self.list(**kwargs)\n matches = len(results)\n if matches == 0:\n raise CloudstackNoResultsException('No results found matching %s' % repr(kwargs))\n elif matches == 1:\n return results[0]\n else:\n raise CloudstackMultipleResultsException('%s results found matching %s' % (matches, repr(kwargs)))\n", "source": "the_stack_v2_python_sparse", "source_path": "csapi/account.py", "source_repo": "lsimons/cstest", "split": "val", "star_events_count": 0}
{"blob_id": "8e43b38f3de0f04edfb7c836634d04ff97584180", "bodies": ["result = 0\nfor i in range(len(points)):\n group = collections.defaultdict(int)\n for j in range(len(points)):\n if j == i:\n continue\n dx, dy = (points[i][0] - points[j][0], points[i][1] - points[j][1])\n group[dx ** 2 + dy ** 2] += 1\n for _, v in group.items():\n if v > 1:\n result += v * (v - 1)\nreturn result", "cnt = 0\nfor a, i in enumerate(points):\n dis_list = []\n for b, k in enumerate(points[:a] + points[a + 1:]):\n dis_list.append((k[0] - i[0]) ** 2 + (k[1] - i[1]) ** 2)\n for z in list(collections.Counter(dis_list).values()):\n if z > 1:\n cnt += z * (z - 1)\nreturn cnt"], "bodies_text": "<|body_start_0|>\n result = 0\n for i in range(len(points)):\n group = collections.defaultdict(int)\n for j in range(len(points)):\n if j == i:\n continue\n dx, dy = (points[i][0] - points[j][0], points[i][1] - points[j][1])\n group[dx ** 2 + dy ** 2] += 1\n for _, v in group.items():\n if v > 1:\n result += v * (v - 1)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n cnt = 0\n for a, i in enumerate(points):\n dis_list = []\n for b, k in enumerate(points[:a] + points[a + 1:]):\n dis_list.append((k[0] - i[0]) ** 2 + (k[1] - i[1]) ** 2)\n for z in list(collections.Counter(dis_list).values()):\n if z > 1:\n cnt += z * (z - 1)\n return cnt\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def numberOfBoomerangs2(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = 0\n for i in range(len(points)):\n group = collections.defaultdict(int)\n for j in range(len(points)):\n if j == i:\n continue\n dx, dy = (points[i][0] - points[j][0], points[i][1] - points[j][1])\n group[dx ** 2 + dy ** 2] += 1\n for _, v in group.items():\n if v > 1:\n result += v * (v - 1)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n cnt = 0\n for a, i in enumerate(points):\n dis_list = []\n for b, k in enumerate(points[:a] + points[a + 1:]):\n dis_list.append((k[0] - i[0]) ** 2 + (k[1] - i[1]) ** 2)\n for z in list(collections.Counter(dis_list).values()):\n if z > 1:\n cnt += z * (z - 1)\n return cnt\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000401", "length_bytes": 4025, "license_type": "no_license", "methods": [{"docstring": ":type points: List[List[int]] :rtype: int", "name": "numberOfBoomerangs", "signature": "def numberOfBoomerangs(self, points)"}, {"docstring": ":type points: List[List[int]] :rtype: int", "name": "numberOfBoomerangs2", "signature": "def numberOfBoomerangs2(self, points)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_029389", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numberOfBoomerangs(self, points): :type points: List[List[int]] :rtype: int\n- def numberOfBoomerangs2(self, points): :type points: List[List[int]] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numberOfBoomerangs(self, points): :type points: List[List[int]] :rtype: int\n- def numberOfBoomerangs2(self, points): :type points: List[List[int]] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def numberOfBoomerangs2(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = 0\n for i in range(len(points)):\n group = collections.defaultdict(int)\n for j in range(len(points)):\n if j == i:\n continue\n dx, dy = (points[i][0] - points[j][0], points[i][1] - points[j][1])\n group[dx ** 2 + dy ** 2] += 1\n for _, v in group.items():\n if v > 1:\n result += v * (v - 1)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n cnt = 0\n for a, i in enumerate(points):\n dis_list = []\n for b, k in enumerate(points[:a] + points[a + 1:]):\n dis_list.append((k[0] - i[0]) ** 2 + (k[1] - i[1]) ** 2)\n for z in list(collections.Counter(dis_list).values()):\n if z > 1:\n cnt += z * (z - 1)\n return cnt\n<|end_body_1|>\n", "revision_id": "035ef08434fa1ca781a6fb2f9eed3538b7d20c02", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def numberOfBoomerangs2(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n result = 0\n for i in range(len(points)):\n group = collections.defaultdict(int)\n for j in range(len(points)):\n if j == i:\n continue\n dx, dy = (points[i][0] - points[j][0], points[i][1] - points[j][1])\n group[dx ** 2 + dy ** 2] += 1\n for _, v in group.items():\n if v > 1:\n result += v * (v - 1)\n return result\n\n def numberOfBoomerangs2(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n cnt = 0\n for a, i in enumerate(points):\n dis_list = []\n for b, k in enumerate(points[:a] + points[a + 1:]):\n dis_list.append((k[0] - i[0]) ** 2 + (k[1] - i[1]) ** 2)\n for z in list(collections.Counter(dis_list).values()):\n if z > 1:\n cnt += z * (z - 1)\n return cnt\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode_python/Hash_table/number-of-boomerangs.py", "source_repo": "yennanliu/CS_basics", "split": "val", "star_events_count": 64}
{"blob_id": "2397703ef22e296c010110c84d6e51e3bedf26e4", "bodies": ["cache = {0: (0, True)}\nfor c in coins:\n cache[c] = (1, True)\nself.cacheHit = 0\nself.minChange = {}\n\ndef cc(coins, amount):\n if amount in cache:\n self.cacheHit += 1\n return cache[amount]\n for c in coins:\n if amount >= c:\n cache.get(amount - c)\n num, Flag = cc(coins, amount - c)\n if Flag:\n minc, flag = cache.get(amount, (0, False))\n if not flag or 1 + num < minc:\n cache[amount] = (num + 1, True)\n if amount not in cache:\n cache[amount] = (0, False)\n return cache[amount]\nnum, flag = cc(sorted(coins, reverse=True), amount)\nprint('cacheHit, ', self.cacheHit)\nif flag:\n return num\nelse:\n return -1", "dp = [amount + 1 for _ in range(amount + 1)]\ndp[0] = 0\nfor i in range(1, amount + 1):\n for c in coins:\n if c <= i:\n dp[i] = min(dp[i], dp[i - c] + 1)\nreturn -1 if dp[amount] > amount else dp[amount]"], "bodies_text": "<|body_start_0|>\n cache = {0: (0, True)}\n for c in coins:\n cache[c] = (1, True)\n self.cacheHit = 0\n self.minChange = {}\n\n def cc(coins, amount):\n if amount in cache:\n self.cacheHit += 1\n return cache[amount]\n for c in coins:\n if amount >= c:\n cache.get(amount - c)\n num, Flag = cc(coins, amount - c)\n if Flag:\n minc, flag = cache.get(amount, (0, False))\n if not flag or 1 + num < minc:\n cache[amount] = (num + 1, True)\n if amount not in cache:\n cache[amount] = (0, False)\n return cache[amount]\n num, flag = cc(sorted(coins, reverse=True), amount)\n print('cacheHit, ', self.cacheHit)\n if flag:\n return num\n else:\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [amount + 1 for _ in range(amount + 1)]\n dp[0] = 0\n for i in range(1, amount + 1):\n for c in coins:\n if c <= i:\n dp[i] = min(dp[i], dp[i - c] + 1)\n return -1 if dp[amount] > amount else dp[amount]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def coinChange(self, coins, amount):\n \"\"\":type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_0|>\n\n def coinChange(self, coins, amount):\n \"\"\":type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cache = {0: (0, True)}\n for c in coins:\n cache[c] = (1, True)\n self.cacheHit = 0\n self.minChange = {}\n\n def cc(coins, amount):\n if amount in cache:\n self.cacheHit += 1\n return cache[amount]\n for c in coins:\n if amount >= c:\n cache.get(amount - c)\n num, Flag = cc(coins, amount - c)\n if Flag:\n minc, flag = cache.get(amount, (0, False))\n if not flag or 1 + num < minc:\n cache[amount] = (num + 1, True)\n if amount not in cache:\n cache[amount] = (0, False)\n return cache[amount]\n num, flag = cc(sorted(coins, reverse=True), amount)\n print('cacheHit, ', self.cacheHit)\n if flag:\n return num\n else:\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [amount + 1 for _ in range(amount + 1)]\n dp[0] = 0\n for i in range(1, amount + 1):\n for c in coins:\n if c <= i:\n dp[i] = min(dp[i], dp[i - c] + 1)\n return -1 if dp[amount] > amount else dp[amount]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000402", "length_bytes": 2777, "license_type": "no_license", "methods": [{"docstring": ":type coins: List[int] :type amount: int :rtype: int", "name": "coinChange", "signature": "def coinChange(self, coins, amount)"}, {"docstring": ":type coins: List[int] :type amount: int :rtype: int", "name": "coinChange", "signature": "def coinChange(self, coins, amount)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_045870", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def coinChange(self, coins, amount): :type coins: List[int] :type amount: int :rtype: int\n- def coinChange(self, coins, amount): :type coins: List[int] :type amount: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def coinChange(self, coins, amount): :type coins: List[int] :type amount: int :rtype: int\n- def coinChange(self, coins, amount): :type coins: List[int] :type amount: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def coinChange(self, coins, amount):\n \"\"\":type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_0|>\n\n def coinChange(self, coins, amount):\n \"\"\":type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cache = {0: (0, True)}\n for c in coins:\n cache[c] = (1, True)\n self.cacheHit = 0\n self.minChange = {}\n\n def cc(coins, amount):\n if amount in cache:\n self.cacheHit += 1\n return cache[amount]\n for c in coins:\n if amount >= c:\n cache.get(amount - c)\n num, Flag = cc(coins, amount - c)\n if Flag:\n minc, flag = cache.get(amount, (0, False))\n if not flag or 1 + num < minc:\n cache[amount] = (num + 1, True)\n if amount not in cache:\n cache[amount] = (0, False)\n return cache[amount]\n num, flag = cc(sorted(coins, reverse=True), amount)\n print('cacheHit, ', self.cacheHit)\n if flag:\n return num\n else:\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [amount + 1 for _ in range(amount + 1)]\n dp[0] = 0\n for i in range(1, amount + 1):\n for c in coins:\n if c <= i:\n dp[i] = min(dp[i], dp[i - c] + 1)\n return -1 if dp[amount] > amount else dp[amount]\n<|end_body_1|>\n", "revision_id": "2a29426be1d690b6f90bc45b437900deee46d832", "skeleton": "<|skeleton|>\nclass Solution:\n\n def coinChange(self, coins, amount):\n \"\"\":type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_0|>\n\n def coinChange(self, coins, amount):\n \"\"\":type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def coinChange(self, coins, amount):\n \"\"\":type coins: List[int] :type amount: int :rtype: int\"\"\"\n cache = {0: (0, True)}\n for c in coins:\n cache[c] = (1, True)\n self.cacheHit = 0\n self.minChange = {}\n\n def cc(coins, amount):\n if amount in cache:\n self.cacheHit += 1\n return cache[amount]\n for c in coins:\n if amount >= c:\n cache.get(amount - c)\n num, Flag = cc(coins, amount - c)\n if Flag:\n minc, flag = cache.get(amount, (0, False))\n if not flag or 1 + num < minc:\n cache[amount] = (num + 1, True)\n if amount not in cache:\n cache[amount] = (0, False)\n return cache[amount]\n num, flag = cc(sorted(coins, reverse=True), amount)\n print('cacheHit, ', self.cacheHit)\n if flag:\n return num\n else:\n return -1\n\n def coinChange(self, coins, amount):\n \"\"\":type coins: List[int] :type amount: int :rtype: int\"\"\"\n dp = [amount + 1 for _ in range(amount + 1)]\n dp[0] = 0\n for i in range(1, amount + 1):\n for c in coins:\n if c <= i:\n dp[i] = min(dp[i], dp[i - c] + 1)\n return -1 if dp[amount] > amount else dp[amount]\n", "source": "the_stack_v2_python_sparse", "source_path": "src/leet/322-coin-change.py", "source_repo": "sevenseablue/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "68d0fef31cabf173e2c550d0d890103ae32e8051", "bodies": ["tape = s\npattern = p\nm = len(tape)\nn = len(pattern)\ndp = [[False for _ in xrange(n + 1)] for _ in xrange(m + 1)]\ndp[m][n] = True\nfor j in xrange(n - 1, -1, -1):\n if pattern[j] == '*':\n dp[m][j] = dp[m][j + 1]\nfor i in xrange(m - 1, -1, -1):\n for j in xrange(n - 1, -1, -1):\n if tape[i] == pattern[j] or pattern[j] == '?':\n dp[i][j] = dp[i + 1][j + 1]\n elif pattern[j] == '*':\n dp[i][j] = dp[i][j + 1] or dp[i + 1][j]\n else:\n dp[i][j] = False\nreturn dp[0][0]", "tape = s\npattern = p\nm = len(tape)\nn = len(pattern)\nif n - list(pattern).count('*') > m:\n return False\ndp = [False for _ in xrange(m + 1)]\ndp[0] = True\nfor j in xrange(1, n + 1):\n if pattern[j - 1] == '*':\n k = 0\n while k < m + 1 and dp[k] != True:\n k += 1\n for i in xrange(k, m + 1):\n dp[i] = True\n else:\n for i in xrange(m, 0, -1):\n dp[i] = dp[i - 1] and (tape[i - 1] == pattern[j - 1] or pattern[j - 1] == '?')\n dp[0] = dp[0] and pattern[j - 1] == '*'\nreturn dp[m]"], "bodies_text": "<|body_start_0|>\n tape = s\n pattern = p\n m = len(tape)\n n = len(pattern)\n dp = [[False for _ in xrange(n + 1)] for _ in xrange(m + 1)]\n dp[m][n] = True\n for j in xrange(n - 1, -1, -1):\n if pattern[j] == '*':\n dp[m][j] = dp[m][j + 1]\n for i in xrange(m - 1, -1, -1):\n for j in xrange(n - 1, -1, -1):\n if tape[i] == pattern[j] or pattern[j] == '?':\n dp[i][j] = dp[i + 1][j + 1]\n elif pattern[j] == '*':\n dp[i][j] = dp[i][j + 1] or dp[i + 1][j]\n else:\n dp[i][j] = False\n return dp[0][0]\n<|end_body_0|>\n\n<|body_start_1|>\n tape = s\n pattern = p\n m = len(tape)\n n = len(pattern)\n if n - list(pattern).count('*') > m:\n return False\n dp = [False for _ in xrange(m + 1)]\n dp[0] = True\n for j in xrange(1, n + 1):\n if pattern[j - 1] == '*':\n k = 0\n while k < m + 1 and dp[k] != True:\n k += 1\n for i in xrange(k, m + 1):\n dp[i] = True\n else:\n for i in xrange(m, 0, -1):\n dp[i] = dp[i - 1] and (tape[i - 1] == pattern[j - 1] or pattern[j - 1] == '?')\n dp[0] = dp[0] and pattern[j - 1] == '*'\n return dp[m]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isMatch_MLE(self, s, p):\n \"\"\"dp, similar to 011 Regular Expression Matching. Backward dp but Memory Limit Exceeds :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\"\"\"\n <|body_0|>\n\n def isMatch_forward(self, s, p):\n \"\"\"\"?\" is not the problem \"*\" is the problem Forward dp: dp starting from -1 if pattern[j]!=\"*\", dp[i][j] = dp[i-1][j-1] && tape[i] matches pattern[j] if pattern[j]==\"*\", dp[i][j] = any(dp[m][j-1]) w.r.t m Compact the 2-D dp to 1-D dp: iterate through j, since we only need know j-1 state, thus dropping the dimension for j in dp :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tape = s\n pattern = p\n m = len(tape)\n n = len(pattern)\n dp = [[False for _ in xrange(n + 1)] for _ in xrange(m + 1)]\n dp[m][n] = True\n for j in xrange(n - 1, -1, -1):\n if pattern[j] == '*':\n dp[m][j] = dp[m][j + 1]\n for i in xrange(m - 1, -1, -1):\n for j in xrange(n - 1, -1, -1):\n if tape[i] == pattern[j] or pattern[j] == '?':\n dp[i][j] = dp[i + 1][j + 1]\n elif pattern[j] == '*':\n dp[i][j] = dp[i][j + 1] or dp[i + 1][j]\n else:\n dp[i][j] = False\n return dp[0][0]\n<|end_body_0|>\n\n<|body_start_1|>\n tape = s\n pattern = p\n m = len(tape)\n n = len(pattern)\n if n - list(pattern).count('*') > m:\n return False\n dp = [False for _ in xrange(m + 1)]\n dp[0] = True\n for j in xrange(1, n + 1):\n if pattern[j - 1] == '*':\n k = 0\n while k < m + 1 and dp[k] != True:\n k += 1\n for i in xrange(k, m + 1):\n dp[i] = True\n else:\n for i in xrange(m, 0, -1):\n dp[i] = dp[i - 1] and (tape[i - 1] == pattern[j - 1] or pattern[j - 1] == '?')\n dp[0] = dp[0] and pattern[j - 1] == '*'\n return dp[m]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000403", "length_bytes": 3437, "license_type": "permissive", "methods": [{"docstring": "dp, similar to 011 Regular Expression Matching. Backward dp but Memory Limit Exceeds :param s: tape, an input string :param p: pattern, a pattern string :return: boolean", "name": "isMatch_MLE", "signature": "def isMatch_MLE(self, s, p)"}, {"docstring": "\"?\" is not the problem \"*\" is the problem Forward dp: dp starting from -1 if pattern[j]!=\"*\", dp[i][j] = dp[i-1][j-1] && tape[i] matches pattern[j] if pattern[j]==\"*\", dp[i][j] = any(dp[m][j-1]) w.r.t m Compact the 2-D dp to 1-D dp: iterate through j, since we only need know j-1 state, thus dropping the dimension for j in dp :param s: tape, an input string :param p: pattern, a pattern string :return: boolean", "name": "isMatch_forward", "signature": "def isMatch_forward(self, s, p)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006826", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isMatch_MLE(self, s, p): dp, similar to 011 Regular Expression Matching. Backward dp but Memory Limit Exceeds :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\n- def isMatch_forward(self, s, p): \"?\" is not the problem \"*\" is the problem Forward dp: dp starting from -1 if pattern[j]!=\"*\", dp[i][j] = dp[i-1][j-1] && tape[i] matches pattern[j] if pattern[j]==\"*\", dp[i][j] = any(dp[m][j-1]) w.r.t m Compact the 2-D dp to 1-D dp: iterate through j, since we only need know j-1 state, thus dropping the dimension for j in dp :param s: tape, an input string :param p: pattern, a pattern string :return: boolean", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isMatch_MLE(self, s, p): dp, similar to 011 Regular Expression Matching. Backward dp but Memory Limit Exceeds :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\n- def isMatch_forward(self, s, p): \"?\" is not the problem \"*\" is the problem Forward dp: dp starting from -1 if pattern[j]!=\"*\", dp[i][j] = dp[i-1][j-1] && tape[i] matches pattern[j] if pattern[j]==\"*\", dp[i][j] = any(dp[m][j-1]) w.r.t m Compact the 2-D dp to 1-D dp: iterate through j, since we only need know j-1 state, thus dropping the dimension for j in dp :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\n\n<|skeleton|>\nclass Solution:\n\n def isMatch_MLE(self, s, p):\n \"\"\"dp, similar to 011 Regular Expression Matching. Backward dp but Memory Limit Exceeds :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\"\"\"\n <|body_0|>\n\n def isMatch_forward(self, s, p):\n \"\"\"\"?\" is not the problem \"*\" is the problem Forward dp: dp starting from -1 if pattern[j]!=\"*\", dp[i][j] = dp[i-1][j-1] && tape[i] matches pattern[j] if pattern[j]==\"*\", dp[i][j] = any(dp[m][j-1]) w.r.t m Compact the 2-D dp to 1-D dp: iterate through j, since we only need know j-1 state, thus dropping the dimension for j in dp :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tape = s\n pattern = p\n m = len(tape)\n n = len(pattern)\n dp = [[False for _ in xrange(n + 1)] for _ in xrange(m + 1)]\n dp[m][n] = True\n for j in xrange(n - 1, -1, -1):\n if pattern[j] == '*':\n dp[m][j] = dp[m][j + 1]\n for i in xrange(m - 1, -1, -1):\n for j in xrange(n - 1, -1, -1):\n if tape[i] == pattern[j] or pattern[j] == '?':\n dp[i][j] = dp[i + 1][j + 1]\n elif pattern[j] == '*':\n dp[i][j] = dp[i][j + 1] or dp[i + 1][j]\n else:\n dp[i][j] = False\n return dp[0][0]\n<|end_body_0|>\n\n<|body_start_1|>\n tape = s\n pattern = p\n m = len(tape)\n n = len(pattern)\n if n - list(pattern).count('*') > m:\n return False\n dp = [False for _ in xrange(m + 1)]\n dp[0] = True\n for j in xrange(1, n + 1):\n if pattern[j - 1] == '*':\n k = 0\n while k < m + 1 and dp[k] != True:\n k += 1\n for i in xrange(k, m + 1):\n dp[i] = True\n else:\n for i in xrange(m, 0, -1):\n dp[i] = dp[i - 1] and (tape[i - 1] == pattern[j - 1] or pattern[j - 1] == '?')\n dp[0] = dp[0] and pattern[j - 1] == '*'\n return dp[m]\n<|end_body_1|>\n", "revision_id": "cbbd4a67ab342ada2421e13f82d660b1d47d4d20", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isMatch_MLE(self, s, p):\n \"\"\"dp, similar to 011 Regular Expression Matching. Backward dp but Memory Limit Exceeds :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\"\"\"\n <|body_0|>\n\n def isMatch_forward(self, s, p):\n \"\"\"\"?\" is not the problem \"*\" is the problem Forward dp: dp starting from -1 if pattern[j]!=\"*\", dp[i][j] = dp[i-1][j-1] && tape[i] matches pattern[j] if pattern[j]==\"*\", dp[i][j] = any(dp[m][j-1]) w.r.t m Compact the 2-D dp to 1-D dp: iterate through j, since we only need know j-1 state, thus dropping the dimension for j in dp :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isMatch_MLE(self, s, p):\n \"\"\"dp, similar to 011 Regular Expression Matching. Backward dp but Memory Limit Exceeds :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\"\"\"\n tape = s\n pattern = p\n m = len(tape)\n n = len(pattern)\n dp = [[False for _ in xrange(n + 1)] for _ in xrange(m + 1)]\n dp[m][n] = True\n for j in xrange(n - 1, -1, -1):\n if pattern[j] == '*':\n dp[m][j] = dp[m][j + 1]\n for i in xrange(m - 1, -1, -1):\n for j in xrange(n - 1, -1, -1):\n if tape[i] == pattern[j] or pattern[j] == '?':\n dp[i][j] = dp[i + 1][j + 1]\n elif pattern[j] == '*':\n dp[i][j] = dp[i][j + 1] or dp[i + 1][j]\n else:\n dp[i][j] = False\n return dp[0][0]\n\n def isMatch_forward(self, s, p):\n \"\"\"\"?\" is not the problem \"*\" is the problem Forward dp: dp starting from -1 if pattern[j]!=\"*\", dp[i][j] = dp[i-1][j-1] && tape[i] matches pattern[j] if pattern[j]==\"*\", dp[i][j] = any(dp[m][j-1]) w.r.t m Compact the 2-D dp to 1-D dp: iterate through j, since we only need know j-1 state, thus dropping the dimension for j in dp :param s: tape, an input string :param p: pattern, a pattern string :return: boolean\"\"\"\n tape = s\n pattern = p\n m = len(tape)\n n = len(pattern)\n if n - list(pattern).count('*') > m:\n return False\n dp = [False for _ in xrange(m + 1)]\n dp[0] = True\n for j in xrange(1, n + 1):\n if pattern[j - 1] == '*':\n k = 0\n while k < m + 1 and dp[k] != True:\n k += 1\n for i in xrange(k, m + 1):\n dp[i] = True\n else:\n for i in xrange(m, 0, -1):\n dp[i] = dp[i - 1] and (tape[i - 1] == pattern[j - 1] or pattern[j - 1] == '?')\n dp[0] = dp[0] and pattern[j - 1] == '*'\n return dp[m]\n", "source": "the_stack_v2_python_sparse", "source_path": "043 Wildcard Matching.py", "source_repo": "Aminaba123/LeetCode", "split": "val", "star_events_count": 1}
{"blob_id": "e7db5105c4911d0b308635db8bdfe18ec66d5180", "bodies": ["super(BoxEmb, self).__init__()\nhalf_emb_dim = int(embedding_dim / 2)\nmin = torch.empty(vocab_size * half_emb_dim).normal_(mean=0, std=0.2)\ndelta = torch.log(torch.empty(vocab_size * half_emb_dim).normal_(mean=1, std=0.2))\ninit_emb = torch.stack([min, delta], dim=1)\ninit_emb = init_emb.reshape(vocab_size, embedding_dim)\nself.emb = EMB(vocab_size, embedding_dim, init_emb=init_emb)", "string_emb, mask = self.emb(torch.from_numpy(np.asarray(set_lkup)).to(device))\nstring_constrain_emb = self.constrain(string_emb)\nreturn (string_constrain_emb, mask)", "batch_size, max_len_token, emb_dim = emb.shape\nhalf_emb_dim = int(emb_dim / 2)\nemb_reshape = emb.reshape(batch_size * max_len_token, emb_dim)\nemb_reshape = emb_reshape.reshape(batch_size, max_len_token, half_emb_dim, 2)\nmin_val = emb_reshape[:, :, :, 0]\ndelta_val = torch.exp(emb_reshape[:, :, :, 1])\nconstrain_emb = torch.stack([min_val, delta_val], dim=3)\nreturn constrain_emb"], "bodies_text": "<|body_start_0|>\n super(BoxEmb, self).__init__()\n half_emb_dim = int(embedding_dim / 2)\n min = torch.empty(vocab_size * half_emb_dim).normal_(mean=0, std=0.2)\n delta = torch.log(torch.empty(vocab_size * half_emb_dim).normal_(mean=1, std=0.2))\n init_emb = torch.stack([min, delta], dim=1)\n init_emb = init_emb.reshape(vocab_size, embedding_dim)\n self.emb = EMB(vocab_size, embedding_dim, init_emb=init_emb)\n<|end_body_0|>\n\n<|body_start_1|>\n string_emb, mask = self.emb(torch.from_numpy(np.asarray(set_lkup)).to(device))\n string_constrain_emb = self.constrain(string_emb)\n return (string_constrain_emb, mask)\n<|end_body_1|>\n\n<|body_start_2|>\n batch_size, max_len_token, emb_dim = emb.shape\n half_emb_dim = int(emb_dim / 2)\n emb_reshape = emb.reshape(batch_size * max_len_token, emb_dim)\n emb_reshape = emb_reshape.reshape(batch_size, max_len_token, half_emb_dim, 2)\n min_val = emb_reshape[:, :, :, 0]\n delta_val = torch.exp(emb_reshape[:, :, :, 1])\n constrain_emb = torch.stack([min_val, delta_val], dim=3)\n return constrain_emb\n<|end_body_2|>\n", "class_docstring": "", "class_name": "BoxEmb", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BoxEmb:\n\n def __init__(self, vocab_size, embedding_dim):\n \"\"\"param emb: embedding class param embedding_dim: embedding dimension param rnn_hidden_size: rnn_hidden_size dimension param is_bidirectional: is the rnn bidirectional\"\"\"\n <|body_0|>\n\n def embed(self, set_lkup):\n \"\"\"Embed using average string embedding :param set_lkup: :return: constrained emb # [batch_size, max_len_token, emb_dim/2, 2]\"\"\"\n <|body_1|>\n\n def constrain(self, emb):\n \"\"\"Ensure that element at index 0 is less than element at index 1 :param emb: # [batch_size, max_len_token, emb_dim/2, 2] :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BoxEmb, self).__init__()\n half_emb_dim = int(embedding_dim / 2)\n min = torch.empty(vocab_size * half_emb_dim).normal_(mean=0, std=0.2)\n delta = torch.log(torch.empty(vocab_size * half_emb_dim).normal_(mean=1, std=0.2))\n init_emb = torch.stack([min, delta], dim=1)\n init_emb = init_emb.reshape(vocab_size, embedding_dim)\n self.emb = EMB(vocab_size, embedding_dim, init_emb=init_emb)\n<|end_body_0|>\n\n<|body_start_1|>\n string_emb, mask = self.emb(torch.from_numpy(np.asarray(set_lkup)).to(device))\n string_constrain_emb = self.constrain(string_emb)\n return (string_constrain_emb, mask)\n<|end_body_1|>\n\n<|body_start_2|>\n batch_size, max_len_token, emb_dim = emb.shape\n half_emb_dim = int(emb_dim / 2)\n emb_reshape = emb.reshape(batch_size * max_len_token, emb_dim)\n emb_reshape = emb_reshape.reshape(batch_size, max_len_token, half_emb_dim, 2)\n min_val = emb_reshape[:, :, :, 0]\n delta_val = torch.exp(emb_reshape[:, :, :, 1])\n constrain_emb = torch.stack([min_val, delta_val], dim=3)\n return constrain_emb\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000404", "length_bytes": 3502, "license_type": "no_license", "methods": [{"docstring": "param emb: embedding class param embedding_dim: embedding dimension param rnn_hidden_size: rnn_hidden_size dimension param is_bidirectional: is the rnn bidirectional", "name": "__init__", "signature": "def __init__(self, vocab_size, embedding_dim)"}, {"docstring": "Embed using average string embedding :param set_lkup: :return: constrained emb # [batch_size, max_len_token, emb_dim/2, 2]", "name": "embed", "signature": "def embed(self, set_lkup)"}, {"docstring": "Ensure that element at index 0 is less than element at index 1 :param emb: # [batch_size, max_len_token, emb_dim/2, 2] :return:", "name": "constrain", "signature": "def constrain(self, emb)"}], "n_methods": 3, "prompt": "Implement the Python class `BoxEmb` described below.\n\nClass description:\nImplement the BoxEmb class.\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size, embedding_dim): param emb: embedding class param embedding_dim: embedding dimension param rnn_hidden_size: rnn_hidden_size dimension param is_bidirectional: is the rnn bidirectional\n- def embed(self, set_lkup): Embed using average string embedding :param set_lkup: :return: constrained emb # [batch_size, max_len_token, emb_dim/2, 2]\n- def constrain(self, emb): Ensure that element at index 0 is less than element at index 1 :param emb: # [batch_size, max_len_token, emb_dim/2, 2] :return:", "prompted_full_text": "Implement the Python class `BoxEmb` described below.\n\nClass description:\nImplement the BoxEmb class.\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size, embedding_dim): param emb: embedding class param embedding_dim: embedding dimension param rnn_hidden_size: rnn_hidden_size dimension param is_bidirectional: is the rnn bidirectional\n- def embed(self, set_lkup): Embed using average string embedding :param set_lkup: :return: constrained emb # [batch_size, max_len_token, emb_dim/2, 2]\n- def constrain(self, emb): Ensure that element at index 0 is less than element at index 1 :param emb: # [batch_size, max_len_token, emb_dim/2, 2] :return:\n\n<|skeleton|>\nclass BoxEmb:\n\n def __init__(self, vocab_size, embedding_dim):\n \"\"\"param emb: embedding class param embedding_dim: embedding dimension param rnn_hidden_size: rnn_hidden_size dimension param is_bidirectional: is the rnn bidirectional\"\"\"\n <|body_0|>\n\n def embed(self, set_lkup):\n \"\"\"Embed using average string embedding :param set_lkup: :return: constrained emb # [batch_size, max_len_token, emb_dim/2, 2]\"\"\"\n <|body_1|>\n\n def constrain(self, emb):\n \"\"\"Ensure that element at index 0 is less than element at index 1 :param emb: # [batch_size, max_len_token, emb_dim/2, 2] :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BoxEmb, self).__init__()\n half_emb_dim = int(embedding_dim / 2)\n min = torch.empty(vocab_size * half_emb_dim).normal_(mean=0, std=0.2)\n delta = torch.log(torch.empty(vocab_size * half_emb_dim).normal_(mean=1, std=0.2))\n init_emb = torch.stack([min, delta], dim=1)\n init_emb = init_emb.reshape(vocab_size, embedding_dim)\n self.emb = EMB(vocab_size, embedding_dim, init_emb=init_emb)\n<|end_body_0|>\n\n<|body_start_1|>\n string_emb, mask = self.emb(torch.from_numpy(np.asarray(set_lkup)).to(device))\n string_constrain_emb = self.constrain(string_emb)\n return (string_constrain_emb, mask)\n<|end_body_1|>\n\n<|body_start_2|>\n batch_size, max_len_token, emb_dim = emb.shape\n half_emb_dim = int(emb_dim / 2)\n emb_reshape = emb.reshape(batch_size * max_len_token, emb_dim)\n emb_reshape = emb_reshape.reshape(batch_size, max_len_token, half_emb_dim, 2)\n min_val = emb_reshape[:, :, :, 0]\n delta_val = torch.exp(emb_reshape[:, :, :, 1])\n constrain_emb = torch.stack([min_val, delta_val], dim=3)\n return constrain_emb\n<|end_body_2|>\n", "revision_id": "c0b2f83a7d4c0d5fa5effb7584e0e0acc6f877a0", "skeleton": "<|skeleton|>\nclass BoxEmb:\n\n def __init__(self, vocab_size, embedding_dim):\n \"\"\"param emb: embedding class param embedding_dim: embedding dimension param rnn_hidden_size: rnn_hidden_size dimension param is_bidirectional: is the rnn bidirectional\"\"\"\n <|body_0|>\n\n def embed(self, set_lkup):\n \"\"\"Embed using average string embedding :param set_lkup: :return: constrained emb # [batch_size, max_len_token, emb_dim/2, 2]\"\"\"\n <|body_1|>\n\n def constrain(self, emb):\n \"\"\"Ensure that element at index 0 is less than element at index 1 :param emb: # [batch_size, max_len_token, emb_dim/2, 2] :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BoxEmb:\n def __init__(self, vocab_size, embedding_dim):\n \"\"\"param emb: embedding class param embedding_dim: embedding dimension param rnn_hidden_size: rnn_hidden_size dimension param is_bidirectional: is the rnn bidirectional\"\"\"\n super(BoxEmb, self).__init__()\n half_emb_dim = int(embedding_dim / 2)\n min = torch.empty(vocab_size * half_emb_dim).normal_(mean=0, std=0.2)\n delta = torch.log(torch.empty(vocab_size * half_emb_dim).normal_(mean=1, std=0.2))\n init_emb = torch.stack([min, delta], dim=1)\n init_emb = init_emb.reshape(vocab_size, embedding_dim)\n self.emb = EMB(vocab_size, embedding_dim, init_emb=init_emb)\n\n def embed(self, set_lkup):\n \"\"\"Embed using average string embedding :param set_lkup: :return: constrained emb # [batch_size, max_len_token, emb_dim/2, 2]\"\"\"\n string_emb, mask = self.emb(torch.from_numpy(np.asarray(set_lkup)).to(device))\n string_constrain_emb = self.constrain(string_emb)\n return (string_constrain_emb, mask)\n\n def constrain(self, emb):\n \"\"\"Ensure that element at index 0 is less than element at index 1 :param emb: # [batch_size, max_len_token, emb_dim/2, 2] :return:\"\"\"\n batch_size, max_len_token, emb_dim = emb.shape\n half_emb_dim = int(emb_dim / 2)\n emb_reshape = emb.reshape(batch_size * max_len_token, emb_dim)\n emb_reshape = emb_reshape.reshape(batch_size, max_len_token, half_emb_dim, 2)\n min_val = emb_reshape[:, :, :, 0]\n delta_val = torch.exp(emb_reshape[:, :, :, 1])\n constrain_emb = torch.stack([min_val, delta_val], dim=3)\n return constrain_emb\n", "source": "the_stack_v2_python_sparse", "source_path": "src/main/base_models/embs/BoxEmb.py", "source_repo": "iesl/institution_hierarchies", "split": "val", "star_events_count": 3}
{"blob_id": "a09d7fcd85c38398535f08efde9d7a3c2c9b5f0d", "bodies": ["if not mr.auth.user_id:\n return {'error': 'User is not logged in.'}\njson_data = {}\nwith self.profiler.Phase('page processing'):\n json_data.update(self._GatherProjects(mr))\nreturn json_data", "with self.profiler.Phase('GetUserProjects'):\n project_lists = sitewide_helpers.GetUserProjects(mr.cnxn, self.services, mr.auth.user_pb, mr.auth.effective_ids, mr.auth.effective_ids)\n visible_ownership, _visible_deleted, visible_membership, visible_contrib = project_lists\nwith self.profiler.Phase('GetStarredProjects'):\n starred_projects = sitewide_helpers.GetViewableStarredProjects(mr.cnxn, self.services, mr.auth.user_id, mr.auth.effective_ids, mr.auth.user_pb)\nprojects_dict = {'memberof': [p.project_name for p in visible_membership], 'ownerof': [p.project_name for p in visible_ownership], 'contributorto': [p.project_name for p in visible_contrib], 'starred_projects': [p.project_name for p in starred_projects]}\nreturn projects_dict"], "bodies_text": "<|body_start_0|>\n if not mr.auth.user_id:\n return {'error': 'User is not logged in.'}\n json_data = {}\n with self.profiler.Phase('page processing'):\n json_data.update(self._GatherProjects(mr))\n return json_data\n<|end_body_0|>\n\n<|body_start_1|>\n with self.profiler.Phase('GetUserProjects'):\n project_lists = sitewide_helpers.GetUserProjects(mr.cnxn, self.services, mr.auth.user_pb, mr.auth.effective_ids, mr.auth.effective_ids)\n visible_ownership, _visible_deleted, visible_membership, visible_contrib = project_lists\n with self.profiler.Phase('GetStarredProjects'):\n starred_projects = sitewide_helpers.GetViewableStarredProjects(mr.cnxn, self.services, mr.auth.user_id, mr.auth.effective_ids, mr.auth.user_pb)\n projects_dict = {'memberof': [p.project_name for p in visible_membership], 'ownerof': [p.project_name for p in visible_ownership], 'contributorto': [p.project_name for p in visible_contrib], 'starred_projects': [p.project_name for p in starred_projects]}\n return projects_dict\n<|end_body_1|>\n", "class_docstring": "Servlet to get all of a user's projects in JSON format.", "class_name": "ProjectsJsonFeed", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProjectsJsonFeed:\n \"\"\"Servlet to get all of a user's projects in JSON format.\"\"\"\n\n def HandleRequest(self, mr):\n \"\"\"Retrieve list of a user's projects for the \"My projects\" menu. Args: mr: common information parsed from the HTTP request. Returns: Results dictionary in JSON format\"\"\"\n <|body_0|>\n\n def _GatherProjects(self, mr):\n \"\"\"Return a dict of project names the current user is involved in.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not mr.auth.user_id:\n return {'error': 'User is not logged in.'}\n json_data = {}\n with self.profiler.Phase('page processing'):\n json_data.update(self._GatherProjects(mr))\n return json_data\n<|end_body_0|>\n\n<|body_start_1|>\n with self.profiler.Phase('GetUserProjects'):\n project_lists = sitewide_helpers.GetUserProjects(mr.cnxn, self.services, mr.auth.user_pb, mr.auth.effective_ids, mr.auth.effective_ids)\n visible_ownership, _visible_deleted, visible_membership, visible_contrib = project_lists\n with self.profiler.Phase('GetStarredProjects'):\n starred_projects = sitewide_helpers.GetViewableStarredProjects(mr.cnxn, self.services, mr.auth.user_id, mr.auth.effective_ids, mr.auth.user_pb)\n projects_dict = {'memberof': [p.project_name for p in visible_membership], 'ownerof': [p.project_name for p in visible_ownership], 'contributorto': [p.project_name for p in visible_contrib], 'starred_projects': [p.project_name for p in starred_projects]}\n return projects_dict\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000405", "length_bytes": 1870, "license_type": "permissive", "methods": [{"docstring": "Retrieve list of a user's projects for the \"My projects\" menu. Args: mr: common information parsed from the HTTP request. Returns: Results dictionary in JSON format", "name": "HandleRequest", "signature": "def HandleRequest(self, mr)"}, {"docstring": "Return a dict of project names the current user is involved in.", "name": "_GatherProjects", "signature": "def _GatherProjects(self, mr)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002540", "prompt": "Implement the Python class `ProjectsJsonFeed` described below.\n\nClass description:\nServlet to get all of a user's projects in JSON format.\n\nMethod signatures and docstrings:\n- def HandleRequest(self, mr): Retrieve list of a user's projects for the \"My projects\" menu. Args: mr: common information parsed from the HTTP request. Returns: Results dictionary in JSON format\n- def _GatherProjects(self, mr): Return a dict of project names the current user is involved in.", "prompted_full_text": "Implement the Python class `ProjectsJsonFeed` described below.\n\nClass description:\nServlet to get all of a user's projects in JSON format.\n\nMethod signatures and docstrings:\n- def HandleRequest(self, mr): Retrieve list of a user's projects for the \"My projects\" menu. Args: mr: common information parsed from the HTTP request. Returns: Results dictionary in JSON format\n- def _GatherProjects(self, mr): Return a dict of project names the current user is involved in.\n\n<|skeleton|>\nclass ProjectsJsonFeed:\n \"\"\"Servlet to get all of a user's projects in JSON format.\"\"\"\n\n def HandleRequest(self, mr):\n \"\"\"Retrieve list of a user's projects for the \"My projects\" menu. Args: mr: common information parsed from the HTTP request. Returns: Results dictionary in JSON format\"\"\"\n <|body_0|>\n\n def _GatherProjects(self, mr):\n \"\"\"Return a dict of project names the current user is involved in.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not mr.auth.user_id:\n return {'error': 'User is not logged in.'}\n json_data = {}\n with self.profiler.Phase('page processing'):\n json_data.update(self._GatherProjects(mr))\n return json_data\n<|end_body_0|>\n\n<|body_start_1|>\n with self.profiler.Phase('GetUserProjects'):\n project_lists = sitewide_helpers.GetUserProjects(mr.cnxn, self.services, mr.auth.user_pb, mr.auth.effective_ids, mr.auth.effective_ids)\n visible_ownership, _visible_deleted, visible_membership, visible_contrib = project_lists\n with self.profiler.Phase('GetStarredProjects'):\n starred_projects = sitewide_helpers.GetViewableStarredProjects(mr.cnxn, self.services, mr.auth.user_id, mr.auth.effective_ids, mr.auth.user_pb)\n projects_dict = {'memberof': [p.project_name for p in visible_membership], 'ownerof': [p.project_name for p in visible_ownership], 'contributorto': [p.project_name for p in visible_contrib], 'starred_projects': [p.project_name for p in starred_projects]}\n return projects_dict\n<|end_body_1|>\n", "revision_id": "09064105713603f7bf75c772e8354800a1bfa256", "skeleton": "<|skeleton|>\nclass ProjectsJsonFeed:\n \"\"\"Servlet to get all of a user's projects in JSON format.\"\"\"\n\n def HandleRequest(self, mr):\n \"\"\"Retrieve list of a user's projects for the \"My projects\" menu. Args: mr: common information parsed from the HTTP request. Returns: Results dictionary in JSON format\"\"\"\n <|body_0|>\n\n def _GatherProjects(self, mr):\n \"\"\"Return a dict of project names the current user is involved in.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProjectsJsonFeed:\n \"\"\"Servlet to get all of a user's projects in JSON format.\"\"\"\n\n def HandleRequest(self, mr):\n \"\"\"Retrieve list of a user's projects for the \"My projects\" menu. Args: mr: common information parsed from the HTTP request. Returns: Results dictionary in JSON format\"\"\"\n if not mr.auth.user_id:\n return {'error': 'User is not logged in.'}\n json_data = {}\n with self.profiler.Phase('page processing'):\n json_data.update(self._GatherProjects(mr))\n return json_data\n\n def _GatherProjects(self, mr):\n \"\"\"Return a dict of project names the current user is involved in.\"\"\"\n with self.profiler.Phase('GetUserProjects'):\n project_lists = sitewide_helpers.GetUserProjects(mr.cnxn, self.services, mr.auth.user_pb, mr.auth.effective_ids, mr.auth.effective_ids)\n visible_ownership, _visible_deleted, visible_membership, visible_contrib = project_lists\n with self.profiler.Phase('GetStarredProjects'):\n starred_projects = sitewide_helpers.GetViewableStarredProjects(mr.cnxn, self.services, mr.auth.user_id, mr.auth.effective_ids, mr.auth.user_pb)\n projects_dict = {'memberof': [p.project_name for p in visible_membership], 'ownerof': [p.project_name for p in visible_ownership], 'contributorto': [p.project_name for p in visible_contrib], 'starred_projects': [p.project_name for p in starred_projects]}\n return projects_dict\n", "source": "the_stack_v2_python_sparse", "source_path": "appengine/monorail/sitewide/userprojects.py", "source_repo": "mcgreevy/chromium-infra", "split": "val", "star_events_count": 1}
{"blob_id": "64fe4ce8a91ee750082f5a674ebd15b825a17486", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn CreatePostRequestBody()", "from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\nfrom ....models.print_certificate_signing_request import PrintCertificateSigningRequest\nfields: Dict[str, Callable[[Any], None]] = {'certificateSigningRequest': lambda n: setattr(self, 'certificate_signing_request', n.get_object_value(PrintCertificateSigningRequest)), 'connectorId': lambda n: setattr(self, 'connector_id', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'hasPhysicalDevice': lambda n: setattr(self, 'has_physical_device', n.get_bool_value()), 'manufacturer': lambda n: setattr(self, 'manufacturer', n.get_str_value()), 'model': lambda n: setattr(self, 'model', n.get_str_value()), 'physicalDeviceId': lambda n: setattr(self, 'physical_device_id', n.get_str_value())}\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nwriter.write_object_value('certificateSigningRequest', self.certificate_signing_request)\nwriter.write_str_value('connectorId', self.connector_id)\nwriter.write_str_value('displayName', self.display_name)\nwriter.write_bool_value('hasPhysicalDevice', self.has_physical_device)\nwriter.write_str_value('manufacturer', self.manufacturer)\nwriter.write_str_value('model', self.model)\nwriter.write_str_value('physicalDeviceId', self.physical_device_id)\nwriter.write_additional_data_value(self.additional_data)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return CreatePostRequestBody()\n<|end_body_0|>\n\n<|body_start_1|>\n from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\n from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\n fields: Dict[str, Callable[[Any], None]] = {'certificateSigningRequest': lambda n: setattr(self, 'certificate_signing_request', n.get_object_value(PrintCertificateSigningRequest)), 'connectorId': lambda n: setattr(self, 'connector_id', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'hasPhysicalDevice': lambda n: setattr(self, 'has_physical_device', n.get_bool_value()), 'manufacturer': lambda n: setattr(self, 'manufacturer', n.get_str_value()), 'model': lambda n: setattr(self, 'model', n.get_str_value()), 'physicalDeviceId': lambda n: setattr(self, 'physical_device_id', n.get_str_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_object_value('certificateSigningRequest', self.certificate_signing_request)\n writer.write_str_value('connectorId', self.connector_id)\n writer.write_str_value('displayName', self.display_name)\n writer.write_bool_value('hasPhysicalDevice', self.has_physical_device)\n writer.write_str_value('manufacturer', self.manufacturer)\n writer.write_str_value('model', self.model)\n writer.write_str_value('physicalDeviceId', self.physical_device_id)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "CreatePostRequestBody", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CreatePostRequestBody:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> CreatePostRequestBody:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: CreatePostRequestBody\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return CreatePostRequestBody()\n<|end_body_0|>\n\n<|body_start_1|>\n from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\n from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\n fields: Dict[str, Callable[[Any], None]] = {'certificateSigningRequest': lambda n: setattr(self, 'certificate_signing_request', n.get_object_value(PrintCertificateSigningRequest)), 'connectorId': lambda n: setattr(self, 'connector_id', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'hasPhysicalDevice': lambda n: setattr(self, 'has_physical_device', n.get_bool_value()), 'manufacturer': lambda n: setattr(self, 'manufacturer', n.get_str_value()), 'model': lambda n: setattr(self, 'model', n.get_str_value()), 'physicalDeviceId': lambda n: setattr(self, 'physical_device_id', n.get_str_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_object_value('certificateSigningRequest', self.certificate_signing_request)\n writer.write_str_value('connectorId', self.connector_id)\n writer.write_str_value('displayName', self.display_name)\n writer.write_bool_value('hasPhysicalDevice', self.has_physical_device)\n writer.write_str_value('manufacturer', self.manufacturer)\n writer.write_str_value('model', self.model)\n writer.write_str_value('physicalDeviceId', self.physical_device_id)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000406", "length_bytes": 4101, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: CreatePostRequestBody", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> CreatePostRequestBody"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `CreatePostRequestBody` described below.\n\nClass description:\nImplement the CreatePostRequestBody class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> CreatePostRequestBody: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: CreatePostRequestBody\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `CreatePostRequestBody` described below.\n\nClass description:\nImplement the CreatePostRequestBody class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> CreatePostRequestBody: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: CreatePostRequestBody\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass CreatePostRequestBody:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> CreatePostRequestBody:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: CreatePostRequestBody\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return CreatePostRequestBody()\n<|end_body_0|>\n\n<|body_start_1|>\n from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\n from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\n fields: Dict[str, Callable[[Any], None]] = {'certificateSigningRequest': lambda n: setattr(self, 'certificate_signing_request', n.get_object_value(PrintCertificateSigningRequest)), 'connectorId': lambda n: setattr(self, 'connector_id', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'hasPhysicalDevice': lambda n: setattr(self, 'has_physical_device', n.get_bool_value()), 'manufacturer': lambda n: setattr(self, 'manufacturer', n.get_str_value()), 'model': lambda n: setattr(self, 'model', n.get_str_value()), 'physicalDeviceId': lambda n: setattr(self, 'physical_device_id', n.get_str_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_object_value('certificateSigningRequest', self.certificate_signing_request)\n writer.write_str_value('connectorId', self.connector_id)\n writer.write_str_value('displayName', self.display_name)\n writer.write_bool_value('hasPhysicalDevice', self.has_physical_device)\n writer.write_str_value('manufacturer', self.manufacturer)\n writer.write_str_value('model', self.model)\n writer.write_str_value('physicalDeviceId', self.physical_device_id)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass CreatePostRequestBody:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> CreatePostRequestBody:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: CreatePostRequestBody\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CreatePostRequestBody:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> CreatePostRequestBody:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: CreatePostRequestBody\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return CreatePostRequestBody()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\n from ....models.print_certificate_signing_request import PrintCertificateSigningRequest\n fields: Dict[str, Callable[[Any], None]] = {'certificateSigningRequest': lambda n: setattr(self, 'certificate_signing_request', n.get_object_value(PrintCertificateSigningRequest)), 'connectorId': lambda n: setattr(self, 'connector_id', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'hasPhysicalDevice': lambda n: setattr(self, 'has_physical_device', n.get_bool_value()), 'manufacturer': lambda n: setattr(self, 'manufacturer', n.get_str_value()), 'model': lambda n: setattr(self, 'model', n.get_str_value()), 'physicalDeviceId': lambda n: setattr(self, 'physical_device_id', n.get_str_value())}\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_object_value('certificateSigningRequest', self.certificate_signing_request)\n writer.write_str_value('connectorId', self.connector_id)\n writer.write_str_value('displayName', self.display_name)\n writer.write_bool_value('hasPhysicalDevice', self.has_physical_device)\n writer.write_str_value('manufacturer', self.manufacturer)\n writer.write_str_value('model', self.model)\n writer.write_str_value('physicalDeviceId', self.physical_device_id)\n writer.write_additional_data_value(self.additional_data)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/print/printers/create/create_post_request_body.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135}
{"blob_id": "e530f4bd6c0d89fcc03078e7d3044bfd7c2cd650", "bodies": ["Light.__init__(self, 'SpotLight')\nself.position = position\nself.direction = direction\nself.p = p\nself.dir_ray = Ray(position, self.direction - self.position)\nself.a = a\nself.b = b\nself.c = c", "d = shadowray.direction.length()\nf_att = np.clip(1 / (self.c + self.b * d + self.a * d * d), 0, 1)\ncos_angle = self.direction.dot(-shadowray.direction) / (self.direction.length() * shadowray.direction.length())\nreturn f_att * cos_angle ** self.p"], "bodies_text": "<|body_start_0|>\n Light.__init__(self, 'SpotLight')\n self.position = position\n self.direction = direction\n self.p = p\n self.dir_ray = Ray(position, self.direction - self.position)\n self.a = a\n self.b = b\n self.c = c\n<|end_body_0|>\n\n<|body_start_1|>\n d = shadowray.direction.length()\n f_att = np.clip(1 / (self.c + self.b * d + self.a * d * d), 0, 1)\n cos_angle = self.direction.dot(-shadowray.direction) / (self.direction.length() * shadowray.direction.length())\n return f_att * cos_angle ** self.p\n<|end_body_1|>\n", "class_docstring": "Class describing a spot light source", "class_name": "SpotLight", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SpotLight:\n \"\"\"Class describing a spot light source\"\"\"\n\n def __init__(self, position: Vec3, direction: Vec3, p: int, a=0.02, b=0.1, c=0) -> None:\n \"\"\"Constructor: param position: position of the light param direction: direction of the light param p: controls how much the spotlight is focussed params a, b, c: quadratic equation coefficients for ad^2 + bd + c\"\"\"\n <|body_0|>\n\n def intensity(self, shadowray):\n \"\"\"Point intensity calculation: param shadowray: ray from light to hitrecord point\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Light.__init__(self, 'SpotLight')\n self.position = position\n self.direction = direction\n self.p = p\n self.dir_ray = Ray(position, self.direction - self.position)\n self.a = a\n self.b = b\n self.c = c\n<|end_body_0|>\n\n<|body_start_1|>\n d = shadowray.direction.length()\n f_att = np.clip(1 / (self.c + self.b * d + self.a * d * d), 0, 1)\n cos_angle = self.direction.dot(-shadowray.direction) / (self.direction.length() * shadowray.direction.length())\n return f_att * cos_angle ** self.p\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000407", "length_bytes": 1416, "license_type": "permissive", "methods": [{"docstring": "Constructor: param position: position of the light param direction: direction of the light param p: controls how much the spotlight is focussed params a, b, c: quadratic equation coefficients for ad^2 + bd + c", "name": "__init__", "signature": "def __init__(self, position: Vec3, direction: Vec3, p: int, a=0.02, b=0.1, c=0) -> None"}, {"docstring": "Point intensity calculation: param shadowray: ray from light to hitrecord point", "name": "intensity", "signature": "def intensity(self, shadowray)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_010735", "prompt": "Implement the Python class `SpotLight` described below.\n\nClass description:\nClass describing a spot light source\n\nMethod signatures and docstrings:\n- def __init__(self, position: Vec3, direction: Vec3, p: int, a=0.02, b=0.1, c=0) -> None: Constructor: param position: position of the light param direction: direction of the light param p: controls how much the spotlight is focussed params a, b, c: quadratic equation coefficients for ad^2 + bd + c\n- def intensity(self, shadowray): Point intensity calculation: param shadowray: ray from light to hitrecord point", "prompted_full_text": "Implement the Python class `SpotLight` described below.\n\nClass description:\nClass describing a spot light source\n\nMethod signatures and docstrings:\n- def __init__(self, position: Vec3, direction: Vec3, p: int, a=0.02, b=0.1, c=0) -> None: Constructor: param position: position of the light param direction: direction of the light param p: controls how much the spotlight is focussed params a, b, c: quadratic equation coefficients for ad^2 + bd + c\n- def intensity(self, shadowray): Point intensity calculation: param shadowray: ray from light to hitrecord point\n\n<|skeleton|>\nclass SpotLight:\n \"\"\"Class describing a spot light source\"\"\"\n\n def __init__(self, position: Vec3, direction: Vec3, p: int, a=0.02, b=0.1, c=0) -> None:\n \"\"\"Constructor: param position: position of the light param direction: direction of the light param p: controls how much the spotlight is focussed params a, b, c: quadratic equation coefficients for ad^2 + bd + c\"\"\"\n <|body_0|>\n\n def intensity(self, shadowray):\n \"\"\"Point intensity calculation: param shadowray: ray from light to hitrecord point\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Light.__init__(self, 'SpotLight')\n self.position = position\n self.direction = direction\n self.p = p\n self.dir_ray = Ray(position, self.direction - self.position)\n self.a = a\n self.b = b\n self.c = c\n<|end_body_0|>\n\n<|body_start_1|>\n d = shadowray.direction.length()\n f_att = np.clip(1 / (self.c + self.b * d + self.a * d * d), 0, 1)\n cos_angle = self.direction.dot(-shadowray.direction) / (self.direction.length() * shadowray.direction.length())\n return f_att * cos_angle ** self.p\n<|end_body_1|>\n", "revision_id": "6fc0ccbc6fb24dcc2a8532aa22eb9574f1afdb3a", "skeleton": "<|skeleton|>\nclass SpotLight:\n \"\"\"Class describing a spot light source\"\"\"\n\n def __init__(self, position: Vec3, direction: Vec3, p: int, a=0.02, b=0.1, c=0) -> None:\n \"\"\"Constructor: param position: position of the light param direction: direction of the light param p: controls how much the spotlight is focussed params a, b, c: quadratic equation coefficients for ad^2 + bd + c\"\"\"\n <|body_0|>\n\n def intensity(self, shadowray):\n \"\"\"Point intensity calculation: param shadowray: ray from light to hitrecord point\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SpotLight:\n \"\"\"Class describing a spot light source\"\"\"\n\n def __init__(self, position: Vec3, direction: Vec3, p: int, a=0.02, b=0.1, c=0) -> None:\n \"\"\"Constructor: param position: position of the light param direction: direction of the light param p: controls how much the spotlight is focussed params a, b, c: quadratic equation coefficients for ad^2 + bd + c\"\"\"\n Light.__init__(self, 'SpotLight')\n self.position = position\n self.direction = direction\n self.p = p\n self.dir_ray = Ray(position, self.direction - self.position)\n self.a = a\n self.b = b\n self.c = c\n\n def intensity(self, shadowray):\n \"\"\"Point intensity calculation: param shadowray: ray from light to hitrecord point\"\"\"\n d = shadowray.direction.length()\n f_att = np.clip(1 / (self.c + self.b * d + self.a * d * d), 0, 1)\n cos_angle = self.direction.dot(-shadowray.direction) / (self.direction.length() * shadowray.direction.length())\n return f_att * cos_angle ** self.p\n", "source": "the_stack_v2_python_sparse", "source_path": "pyrt/light/spotlight.py", "source_repo": "martinchristen/pyRT", "split": "val", "star_events_count": 79}
{"blob_id": "334293a772a2a72baeeaaecdc4d7039d8ec914c9", "bodies": ["self.name = name\nself.index = index\nself.start = start[0]\nself.end = end[0]\nself.start_sub = start[1] if len(start) > 1 else False\nself.end_sub = end[1] if len(end) > 1 else False\nself.export = export_names.get(self.name, self.name)\nself.is_header = is_header\nself.node = None\nself.overlap_id = None", "if parent_node is not None:\n self.node = etree.SubElement(parent_node, self.export)\nelse:\n self.node = etree.Element(self.export)", "if self.export != self.name:\n return '<%s/%s %s %s-%s>' % (self.name, self.export, self.index, self.start, self.end)\nreturn '<%s %s %s-%s>' % (self.name, self.index, self.start, self.end)", "def get_sort_key(span, sub_positions=False, empty_span=False):\n \"\"\"Return a sort key for span which makes span comparison possible.\"\"\"\n hierarchy_index = elem_hierarchy.index(span.name) if span.name in elem_hierarchy else -1\n if empty_span:\n if sub_positions:\n return ((span.start, span.start_sub), hierarchy_index, (span.end, span.end_sub))\n else:\n return (span.start, hierarchy_index, span.end)\n elif sub_positions:\n return ((span.start, span.start_sub), (-span.end, -span.end_sub), hierarchy_index)\n else:\n return (span.start, -span.end, hierarchy_index)\nif (self.start, self.start_sub) == (self.end, self.end_sub) or (other_span.start, other_span.start_sub) == (other_span.end, other_span.end_sub):\n sort_key1 = get_sort_key(self, empty_span=True)\n sort_key2 = get_sort_key(other_span, empty_span=True)\nelif self.start_sub and other_span.start_sub:\n sort_key1 = get_sort_key(self, sub_positions=True)\n sort_key2 = get_sort_key(other_span, sub_positions=True)\nelse:\n sort_key1 = get_sort_key(self)\n sort_key2 = get_sort_key(other_span)\nreturn sort_key1 < sort_key2"], "bodies_text": "<|body_start_0|>\n self.name = name\n self.index = index\n self.start = start[0]\n self.end = end[0]\n self.start_sub = start[1] if len(start) > 1 else False\n self.end_sub = end[1] if len(end) > 1 else False\n self.export = export_names.get(self.name, self.name)\n self.is_header = is_header\n self.node = None\n self.overlap_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n if parent_node is not None:\n self.node = etree.SubElement(parent_node, self.export)\n else:\n self.node = etree.Element(self.export)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.export != self.name:\n return '<%s/%s %s %s-%s>' % (self.name, self.export, self.index, self.start, self.end)\n return '<%s %s %s-%s>' % (self.name, self.index, self.start, self.end)\n<|end_body_2|>\n\n<|body_start_3|>\n def get_sort_key(span, sub_positions=False, empty_span=False):\n \"\"\"Return a sort key for span which makes span comparison possible.\"\"\"\n hierarchy_index = elem_hierarchy.index(span.name) if span.name in elem_hierarchy else -1\n if empty_span:\n if sub_positions:\n return ((span.start, span.start_sub), hierarchy_index, (span.end, span.end_sub))\n else:\n return (span.start, hierarchy_index, span.end)\n elif sub_positions:\n return ((span.start, span.start_sub), (-span.end, -span.end_sub), hierarchy_index)\n else:\n return (span.start, -span.end, hierarchy_index)\n if (self.start, self.start_sub) == (self.end, self.end_sub) or (other_span.start, other_span.start_sub) == (other_span.end, other_span.end_sub):\n sort_key1 = get_sort_key(self, empty_span=True)\n sort_key2 = get_sort_key(other_span, empty_span=True)\n elif self.start_sub and other_span.start_sub:\n sort_key1 = get_sort_key(self, sub_positions=True)\n sort_key2 = get_sort_key(other_span, sub_positions=True)\n else:\n sort_key1 = get_sort_key(self)\n sort_key2 = get_sort_key(other_span)\n return sort_key1 < sort_key2\n<|end_body_3|>\n", "class_docstring": "Object to store span information.", "class_name": "Span", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Span:\n \"\"\"Object to store span information.\"\"\"\n\n def __init__(self, name, index, start, end, export_names, is_header):\n \"\"\"Set attributes.\"\"\"\n <|body_0|>\n\n def set_node(self, parent_node=None):\n \"\"\"Create an XML node under parent_node.\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Stringify the most interesting span info (for debugging mostly).\"\"\"\n <|body_2|>\n\n def __lt__(self, other_span):\n \"\"\"Return True if other_span comes after this span. Sort spans according to their position and hierarchy. Sort by: 1. start position (smaller indices first) 2. end position (larger indices first) 3. the calculated element hierarchy\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.index = index\n self.start = start[0]\n self.end = end[0]\n self.start_sub = start[1] if len(start) > 1 else False\n self.end_sub = end[1] if len(end) > 1 else False\n self.export = export_names.get(self.name, self.name)\n self.is_header = is_header\n self.node = None\n self.overlap_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n if parent_node is not None:\n self.node = etree.SubElement(parent_node, self.export)\n else:\n self.node = etree.Element(self.export)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.export != self.name:\n return '<%s/%s %s %s-%s>' % (self.name, self.export, self.index, self.start, self.end)\n return '<%s %s %s-%s>' % (self.name, self.index, self.start, self.end)\n<|end_body_2|>\n\n<|body_start_3|>\n def get_sort_key(span, sub_positions=False, empty_span=False):\n \"\"\"Return a sort key for span which makes span comparison possible.\"\"\"\n hierarchy_index = elem_hierarchy.index(span.name) if span.name in elem_hierarchy else -1\n if empty_span:\n if sub_positions:\n return ((span.start, span.start_sub), hierarchy_index, (span.end, span.end_sub))\n else:\n return (span.start, hierarchy_index, span.end)\n elif sub_positions:\n return ((span.start, span.start_sub), (-span.end, -span.end_sub), hierarchy_index)\n else:\n return (span.start, -span.end, hierarchy_index)\n if (self.start, self.start_sub) == (self.end, self.end_sub) or (other_span.start, other_span.start_sub) == (other_span.end, other_span.end_sub):\n sort_key1 = get_sort_key(self, empty_span=True)\n sort_key2 = get_sort_key(other_span, empty_span=True)\n elif self.start_sub and other_span.start_sub:\n sort_key1 = get_sort_key(self, sub_positions=True)\n sort_key2 = get_sort_key(other_span, sub_positions=True)\n else:\n sort_key1 = get_sort_key(self)\n sort_key2 = get_sort_key(other_span)\n return sort_key1 < sort_key2\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000408", "length_bytes": 32149, "license_type": "permissive", "methods": [{"docstring": "Set attributes.", "name": "__init__", "signature": "def __init__(self, name, index, start, end, export_names, is_header)"}, {"docstring": "Create an XML node under parent_node.", "name": "set_node", "signature": "def set_node(self, parent_node=None)"}, {"docstring": "Stringify the most interesting span info (for debugging mostly).", "name": "__repr__", "signature": "def __repr__(self)"}, {"docstring": "Return True if other_span comes after this span. Sort spans according to their position and hierarchy. Sort by: 1. start position (smaller indices first) 2. end position (larger indices first) 3. the calculated element hierarchy", "name": "__lt__", "signature": "def __lt__(self, other_span)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_033065", "prompt": "Implement the Python class `Span` described below.\n\nClass description:\nObject to store span information.\n\nMethod signatures and docstrings:\n- def __init__(self, name, index, start, end, export_names, is_header): Set attributes.\n- def set_node(self, parent_node=None): Create an XML node under parent_node.\n- def __repr__(self): Stringify the most interesting span info (for debugging mostly).\n- def __lt__(self, other_span): Return True if other_span comes after this span. Sort spans according to their position and hierarchy. Sort by: 1. start position (smaller indices first) 2. end position (larger indices first) 3. the calculated element hierarchy", "prompted_full_text": "Implement the Python class `Span` described below.\n\nClass description:\nObject to store span information.\n\nMethod signatures and docstrings:\n- def __init__(self, name, index, start, end, export_names, is_header): Set attributes.\n- def set_node(self, parent_node=None): Create an XML node under parent_node.\n- def __repr__(self): Stringify the most interesting span info (for debugging mostly).\n- def __lt__(self, other_span): Return True if other_span comes after this span. Sort spans according to their position and hierarchy. Sort by: 1. start position (smaller indices first) 2. end position (larger indices first) 3. the calculated element hierarchy\n\n<|skeleton|>\nclass Span:\n \"\"\"Object to store span information.\"\"\"\n\n def __init__(self, name, index, start, end, export_names, is_header):\n \"\"\"Set attributes.\"\"\"\n <|body_0|>\n\n def set_node(self, parent_node=None):\n \"\"\"Create an XML node under parent_node.\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Stringify the most interesting span info (for debugging mostly).\"\"\"\n <|body_2|>\n\n def __lt__(self, other_span):\n \"\"\"Return True if other_span comes after this span. Sort spans according to their position and hierarchy. Sort by: 1. start position (smaller indices first) 2. end position (larger indices first) 3. the calculated element hierarchy\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.index = index\n self.start = start[0]\n self.end = end[0]\n self.start_sub = start[1] if len(start) > 1 else False\n self.end_sub = end[1] if len(end) > 1 else False\n self.export = export_names.get(self.name, self.name)\n self.is_header = is_header\n self.node = None\n self.overlap_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n if parent_node is not None:\n self.node = etree.SubElement(parent_node, self.export)\n else:\n self.node = etree.Element(self.export)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.export != self.name:\n return '<%s/%s %s %s-%s>' % (self.name, self.export, self.index, self.start, self.end)\n return '<%s %s %s-%s>' % (self.name, self.index, self.start, self.end)\n<|end_body_2|>\n\n<|body_start_3|>\n def get_sort_key(span, sub_positions=False, empty_span=False):\n \"\"\"Return a sort key for span which makes span comparison possible.\"\"\"\n hierarchy_index = elem_hierarchy.index(span.name) if span.name in elem_hierarchy else -1\n if empty_span:\n if sub_positions:\n return ((span.start, span.start_sub), hierarchy_index, (span.end, span.end_sub))\n else:\n return (span.start, hierarchy_index, span.end)\n elif sub_positions:\n return ((span.start, span.start_sub), (-span.end, -span.end_sub), hierarchy_index)\n else:\n return (span.start, -span.end, hierarchy_index)\n if (self.start, self.start_sub) == (self.end, self.end_sub) or (other_span.start, other_span.start_sub) == (other_span.end, other_span.end_sub):\n sort_key1 = get_sort_key(self, empty_span=True)\n sort_key2 = get_sort_key(other_span, empty_span=True)\n elif self.start_sub and other_span.start_sub:\n sort_key1 = get_sort_key(self, sub_positions=True)\n sort_key2 = get_sort_key(other_span, sub_positions=True)\n else:\n sort_key1 = get_sort_key(self)\n sort_key2 = get_sort_key(other_span)\n return sort_key1 < sort_key2\n<|end_body_3|>\n", "revision_id": "d3eb0db9de7fca6b6945192dd7f0c9e4bbeebb55", "skeleton": "<|skeleton|>\nclass Span:\n \"\"\"Object to store span information.\"\"\"\n\n def __init__(self, name, index, start, end, export_names, is_header):\n \"\"\"Set attributes.\"\"\"\n <|body_0|>\n\n def set_node(self, parent_node=None):\n \"\"\"Create an XML node under parent_node.\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Stringify the most interesting span info (for debugging mostly).\"\"\"\n <|body_2|>\n\n def __lt__(self, other_span):\n \"\"\"Return True if other_span comes after this span. Sort spans according to their position and hierarchy. Sort by: 1. start position (smaller indices first) 2. end position (larger indices first) 3. the calculated element hierarchy\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Span:\n \"\"\"Object to store span information.\"\"\"\n\n def __init__(self, name, index, start, end, export_names, is_header):\n \"\"\"Set attributes.\"\"\"\n self.name = name\n self.index = index\n self.start = start[0]\n self.end = end[0]\n self.start_sub = start[1] if len(start) > 1 else False\n self.end_sub = end[1] if len(end) > 1 else False\n self.export = export_names.get(self.name, self.name)\n self.is_header = is_header\n self.node = None\n self.overlap_id = None\n\n def set_node(self, parent_node=None):\n \"\"\"Create an XML node under parent_node.\"\"\"\n if parent_node is not None:\n self.node = etree.SubElement(parent_node, self.export)\n else:\n self.node = etree.Element(self.export)\n\n def __repr__(self):\n \"\"\"Stringify the most interesting span info (for debugging mostly).\"\"\"\n if self.export != self.name:\n return '<%s/%s %s %s-%s>' % (self.name, self.export, self.index, self.start, self.end)\n return '<%s %s %s-%s>' % (self.name, self.index, self.start, self.end)\n\n def __lt__(self, other_span):\n \"\"\"Return True if other_span comes after this span. Sort spans according to their position and hierarchy. Sort by: 1. start position (smaller indices first) 2. end position (larger indices first) 3. the calculated element hierarchy\"\"\"\n def get_sort_key(span, sub_positions=False, empty_span=False):\n \"\"\"Return a sort key for span which makes span comparison possible.\"\"\"\n hierarchy_index = elem_hierarchy.index(span.name) if span.name in elem_hierarchy else -1\n if empty_span:\n if sub_positions:\n return ((span.start, span.start_sub), hierarchy_index, (span.end, span.end_sub))\n else:\n return (span.start, hierarchy_index, span.end)\n elif sub_positions:\n return ((span.start, span.start_sub), (-span.end, -span.end_sub), hierarchy_index)\n else:\n return (span.start, -span.end, hierarchy_index)\n if (self.start, self.start_sub) == (self.end, self.end_sub) or (other_span.start, other_span.start_sub) == (other_span.end, other_span.end_sub):\n sort_key1 = get_sort_key(self, empty_span=True)\n sort_key2 = get_sort_key(other_span, empty_span=True)\n elif self.start_sub and other_span.start_sub:\n sort_key1 = get_sort_key(self, sub_positions=True)\n sort_key2 = get_sort_key(other_span, sub_positions=True)\n else:\n sort_key1 = get_sort_key(self)\n sort_key2 = get_sort_key(other_span)\n return sort_key1 < sort_key2\n", "source": "the_stack_v2_python_sparse", "source_path": "sparv/api/util/export.py", "source_repo": "spraakbanken/sparv-pipeline", "split": "val", "star_events_count": 22}
{"blob_id": "9215a50d8192bf286b22af31e6d95d21ec732873", "bodies": ["if not isinstance(path, str):\n path = path.decode()\ntry:\n probe = ffmpeg.probe(path)\nexcept ffmpeg._run.Error as e:\n raise Exception('An error occurs with ffprobe (see ffprobe output below)\\n\\n{}'.format(e.stderr.decode()))\nif 'streams' not in probe or len(probe['streams']) == 0:\n raise Exception('No stream was found with ffprobe')\nmetadata = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'))\nn_channels = metadata['channels']\nif sample_rate is None:\n sample_rate = metadata['sample_rate']\noutput_kwargs = {'format': 'f32le', 'ar': sample_rate}\nif duration is not None:\n output_kwargs['t'] = _to_ffmpeg_time(duration)\nif offset is not None:\n output_kwargs['ss'] = _to_ffmpeg_time(offset)\nprocess = ffmpeg.input(path).output('pipe:', **output_kwargs).run_async(pipe_stdout=True, pipe_stderr=True)\nbuffer, _ = process.communicate()\nwaveform = np.frombuffer(buffer, dtype='\n if not isinstance(path, str):\n path = path.decode()\n try:\n probe = ffmpeg.probe(path)\n except ffmpeg._run.Error as e:\n raise Exception('An error occurs with ffprobe (see ffprobe output below)\\n\\n{}'.format(e.stderr.decode()))\n if 'streams' not in probe or len(probe['streams']) == 0:\n raise Exception('No stream was found with ffprobe')\n metadata = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'))\n n_channels = metadata['channels']\n if sample_rate is None:\n sample_rate = metadata['sample_rate']\n output_kwargs = {'format': 'f32le', 'ar': sample_rate}\n if duration is not None:\n output_kwargs['t'] = _to_ffmpeg_time(duration)\n if offset is not None:\n output_kwargs['ss'] = _to_ffmpeg_time(offset)\n process = ffmpeg.input(path).output('pipe:', **output_kwargs).run_async(pipe_stdout=True, pipe_stderr=True)\n buffer, _ = process.communicate()\n waveform = np.frombuffer(buffer, dtype='\n\n<|body_start_1|>\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n raise IOError(f'output directory does not exists: {directory}')\n input_kwargs = {'ar': sample_rate, 'ac': data.shape[1]}\n output_kwargs = {'ar': sample_rate, 'strict': '-2'}\n if bitrate:\n output_kwargs['audio_bitrate'] = bitrate\n if codec is not None and codec != 'wav':\n output_kwargs['codec'] = _to_ffmpeg_codec(codec)\n process = ffmpeg.input('pipe:', format='f32le', **input_kwargs).output(path, **output_kwargs).overwrite_output().run_async(pipe_stdin=True, pipe_stderr=True, quiet=True)\n try:\n process.stdin.write(data.astype('\n", "class_docstring": "[An AudioAdapter implementation that use FFMPEG binary through subprocess in order to perform I/O operation for audio processing. When created, FFMPEG binary path will be checked and expended, raising exception if not found. Such path could be infered using FFMPEG_PATH environment variable.]", "class_name": "FFMPEGProcessAudioAdapter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FFMPEGProcessAudioAdapter:\n \"\"\"[An AudioAdapter implementation that use FFMPEG binary through subprocess in order to perform I/O operation for audio processing. When created, FFMPEG binary path will be checked and expended, raising exception if not found. Such path could be infered using FFMPEG_PATH environment variable.]\"\"\"\n\n def load(self, path, offset=None, duration=None, sample_rate=None, dtype=np.float32):\n \"\"\"[Loads the audio file denoted by the given path and returns it data as a waveform.] Arguments: path {[type]} -- [Path of the audio file to load data from.] Keyword Arguments: offset {[type]} -- [(Optional) Start offset to load from in seconds.] (default: {None}) duration {[type]} -- [(Optional) Duration to load in seconds.] (default: {None}) sample_rate {[type]} -- [(Optional) Sample rate to load audio with.] (default: {None}) dtype {[type]} -- [(Optional) Numpy data type to use, default to float32.] (default: {np.float32})\"\"\"\n <|body_0|>\n\n def save(self, path, data, sample_rate, codec=None, bitrate=None):\n \"\"\"[Write waveform data to the file denoted by the given path using FFMPEG process.] Arguments: path {[type]} -- [Path of the audio file to save data in.] data {[type]} -- [Waveform data to write.] sample_rate {[type]} -- [Sample rate to write file in.] Keyword Arguments: codec {[type]} -- [(Optional) Writing codec to use.] (default: {None}) bitrate {[type]} -- [(Optional) Bitrate of the written audio file.] (default: {None})\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isinstance(path, str):\n path = path.decode()\n try:\n probe = ffmpeg.probe(path)\n except ffmpeg._run.Error as e:\n raise Exception('An error occurs with ffprobe (see ffprobe output below)\\n\\n{}'.format(e.stderr.decode()))\n if 'streams' not in probe or len(probe['streams']) == 0:\n raise Exception('No stream was found with ffprobe')\n metadata = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'))\n n_channels = metadata['channels']\n if sample_rate is None:\n sample_rate = metadata['sample_rate']\n output_kwargs = {'format': 'f32le', 'ar': sample_rate}\n if duration is not None:\n output_kwargs['t'] = _to_ffmpeg_time(duration)\n if offset is not None:\n output_kwargs['ss'] = _to_ffmpeg_time(offset)\n process = ffmpeg.input(path).output('pipe:', **output_kwargs).run_async(pipe_stdout=True, pipe_stderr=True)\n buffer, _ = process.communicate()\n waveform = np.frombuffer(buffer, dtype='\n\n<|body_start_1|>\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n raise IOError(f'output directory does not exists: {directory}')\n input_kwargs = {'ar': sample_rate, 'ac': data.shape[1]}\n output_kwargs = {'ar': sample_rate, 'strict': '-2'}\n if bitrate:\n output_kwargs['audio_bitrate'] = bitrate\n if codec is not None and codec != 'wav':\n output_kwargs['codec'] = _to_ffmpeg_codec(codec)\n process = ffmpeg.input('pipe:', format='f32le', **input_kwargs).output(path, **output_kwargs).overwrite_output().run_async(pipe_stdin=True, pipe_stderr=True, quiet=True)\n try:\n process.stdin.write(data.astype('\n", "id": "stack_v2_sparse_classes_75kplus_val_000409", "length_bytes": 5005, "license_type": "no_license", "methods": [{"docstring": "[Loads the audio file denoted by the given path and returns it data as a waveform.] Arguments: path {[type]} -- [Path of the audio file to load data from.] Keyword Arguments: offset {[type]} -- [(Optional) Start offset to load from in seconds.] (default: {None}) duration {[type]} -- [(Optional) Duration to load in seconds.] (default: {None}) sample_rate {[type]} -- [(Optional) Sample rate to load audio with.] (default: {None}) dtype {[type]} -- [(Optional) Numpy data type to use, default to float32.] (default: {np.float32})", "name": "load", "signature": "def load(self, path, offset=None, duration=None, sample_rate=None, dtype=np.float32)"}, {"docstring": "[Write waveform data to the file denoted by the given path using FFMPEG process.] Arguments: path {[type]} -- [Path of the audio file to save data in.] data {[type]} -- [Waveform data to write.] sample_rate {[type]} -- [Sample rate to write file in.] Keyword Arguments: codec {[type]} -- [(Optional) Writing codec to use.] (default: {None}) bitrate {[type]} -- [(Optional) Bitrate of the written audio file.] (default: {None})", "name": "save", "signature": "def save(self, path, data, sample_rate, codec=None, bitrate=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000491", "prompt": "Implement the Python class `FFMPEGProcessAudioAdapter` described below.\n\nClass description:\n[An AudioAdapter implementation that use FFMPEG binary through subprocess in order to perform I/O operation for audio processing. When created, FFMPEG binary path will be checked and expended, raising exception if not found. Such path could be infered using FFMPEG_PATH environment variable.]\n\nMethod signatures and docstrings:\n- def load(self, path, offset=None, duration=None, sample_rate=None, dtype=np.float32): [Loads the audio file denoted by the given path and returns it data as a waveform.] Arguments: path {[type]} -- [Path of the audio file to load data from.] Keyword Arguments: offset {[type]} -- [(Optional) Start offset to load from in seconds.] (default: {None}) duration {[type]} -- [(Optional) Duration to load in seconds.] (default: {None}) sample_rate {[type]} -- [(Optional) Sample rate to load audio with.] (default: {None}) dtype {[type]} -- [(Optional) Numpy data type to use, default to float32.] (default: {np.float32})\n- def save(self, path, data, sample_rate, codec=None, bitrate=None): [Write waveform data to the file denoted by the given path using FFMPEG process.] Arguments: path {[type]} -- [Path of the audio file to save data in.] data {[type]} -- [Waveform data to write.] sample_rate {[type]} -- [Sample rate to write file in.] Keyword Arguments: codec {[type]} -- [(Optional) Writing codec to use.] (default: {None}) bitrate {[type]} -- [(Optional) Bitrate of the written audio file.] (default: {None})", "prompted_full_text": "Implement the Python class `FFMPEGProcessAudioAdapter` described below.\n\nClass description:\n[An AudioAdapter implementation that use FFMPEG binary through subprocess in order to perform I/O operation for audio processing. When created, FFMPEG binary path will be checked and expended, raising exception if not found. Such path could be infered using FFMPEG_PATH environment variable.]\n\nMethod signatures and docstrings:\n- def load(self, path, offset=None, duration=None, sample_rate=None, dtype=np.float32): [Loads the audio file denoted by the given path and returns it data as a waveform.] Arguments: path {[type]} -- [Path of the audio file to load data from.] Keyword Arguments: offset {[type]} -- [(Optional) Start offset to load from in seconds.] (default: {None}) duration {[type]} -- [(Optional) Duration to load in seconds.] (default: {None}) sample_rate {[type]} -- [(Optional) Sample rate to load audio with.] (default: {None}) dtype {[type]} -- [(Optional) Numpy data type to use, default to float32.] (default: {np.float32})\n- def save(self, path, data, sample_rate, codec=None, bitrate=None): [Write waveform data to the file denoted by the given path using FFMPEG process.] Arguments: path {[type]} -- [Path of the audio file to save data in.] data {[type]} -- [Waveform data to write.] sample_rate {[type]} -- [Sample rate to write file in.] Keyword Arguments: codec {[type]} -- [(Optional) Writing codec to use.] (default: {None}) bitrate {[type]} -- [(Optional) Bitrate of the written audio file.] (default: {None})\n\n<|skeleton|>\nclass FFMPEGProcessAudioAdapter:\n \"\"\"[An AudioAdapter implementation that use FFMPEG binary through subprocess in order to perform I/O operation for audio processing. When created, FFMPEG binary path will be checked and expended, raising exception if not found. Such path could be infered using FFMPEG_PATH environment variable.]\"\"\"\n\n def load(self, path, offset=None, duration=None, sample_rate=None, dtype=np.float32):\n \"\"\"[Loads the audio file denoted by the given path and returns it data as a waveform.] Arguments: path {[type]} -- [Path of the audio file to load data from.] Keyword Arguments: offset {[type]} -- [(Optional) Start offset to load from in seconds.] (default: {None}) duration {[type]} -- [(Optional) Duration to load in seconds.] (default: {None}) sample_rate {[type]} -- [(Optional) Sample rate to load audio with.] (default: {None}) dtype {[type]} -- [(Optional) Numpy data type to use, default to float32.] (default: {np.float32})\"\"\"\n <|body_0|>\n\n def save(self, path, data, sample_rate, codec=None, bitrate=None):\n \"\"\"[Write waveform data to the file denoted by the given path using FFMPEG process.] Arguments: path {[type]} -- [Path of the audio file to save data in.] data {[type]} -- [Waveform data to write.] sample_rate {[type]} -- [Sample rate to write file in.] Keyword Arguments: codec {[type]} -- [(Optional) Writing codec to use.] (default: {None}) bitrate {[type]} -- [(Optional) Bitrate of the written audio file.] (default: {None})\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isinstance(path, str):\n path = path.decode()\n try:\n probe = ffmpeg.probe(path)\n except ffmpeg._run.Error as e:\n raise Exception('An error occurs with ffprobe (see ffprobe output below)\\n\\n{}'.format(e.stderr.decode()))\n if 'streams' not in probe or len(probe['streams']) == 0:\n raise Exception('No stream was found with ffprobe')\n metadata = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'))\n n_channels = metadata['channels']\n if sample_rate is None:\n sample_rate = metadata['sample_rate']\n output_kwargs = {'format': 'f32le', 'ar': sample_rate}\n if duration is not None:\n output_kwargs['t'] = _to_ffmpeg_time(duration)\n if offset is not None:\n output_kwargs['ss'] = _to_ffmpeg_time(offset)\n process = ffmpeg.input(path).output('pipe:', **output_kwargs).run_async(pipe_stdout=True, pipe_stderr=True)\n buffer, _ = process.communicate()\n waveform = np.frombuffer(buffer, dtype='\n\n<|body_start_1|>\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n raise IOError(f'output directory does not exists: {directory}')\n input_kwargs = {'ar': sample_rate, 'ac': data.shape[1]}\n output_kwargs = {'ar': sample_rate, 'strict': '-2'}\n if bitrate:\n output_kwargs['audio_bitrate'] = bitrate\n if codec is not None and codec != 'wav':\n output_kwargs['codec'] = _to_ffmpeg_codec(codec)\n process = ffmpeg.input('pipe:', format='f32le', **input_kwargs).output(path, **output_kwargs).overwrite_output().run_async(pipe_stdin=True, pipe_stderr=True, quiet=True)\n try:\n process.stdin.write(data.astype('\n", "revision_id": "d4b4f60b0a00b985da7a97f03596a855532240f2", "skeleton": "<|skeleton|>\nclass FFMPEGProcessAudioAdapter:\n \"\"\"[An AudioAdapter implementation that use FFMPEG binary through subprocess in order to perform I/O operation for audio processing. When created, FFMPEG binary path will be checked and expended, raising exception if not found. Such path could be infered using FFMPEG_PATH environment variable.]\"\"\"\n\n def load(self, path, offset=None, duration=None, sample_rate=None, dtype=np.float32):\n \"\"\"[Loads the audio file denoted by the given path and returns it data as a waveform.] Arguments: path {[type]} -- [Path of the audio file to load data from.] Keyword Arguments: offset {[type]} -- [(Optional) Start offset to load from in seconds.] (default: {None}) duration {[type]} -- [(Optional) Duration to load in seconds.] (default: {None}) sample_rate {[type]} -- [(Optional) Sample rate to load audio with.] (default: {None}) dtype {[type]} -- [(Optional) Numpy data type to use, default to float32.] (default: {np.float32})\"\"\"\n <|body_0|>\n\n def save(self, path, data, sample_rate, codec=None, bitrate=None):\n \"\"\"[Write waveform data to the file denoted by the given path using FFMPEG process.] Arguments: path {[type]} -- [Path of the audio file to save data in.] data {[type]} -- [Waveform data to write.] sample_rate {[type]} -- [Sample rate to write file in.] Keyword Arguments: codec {[type]} -- [(Optional) Writing codec to use.] (default: {None}) bitrate {[type]} -- [(Optional) Bitrate of the written audio file.] (default: {None})\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FFMPEGProcessAudioAdapter:\n \"\"\"[An AudioAdapter implementation that use FFMPEG binary through subprocess in order to perform I/O operation for audio processing. When created, FFMPEG binary path will be checked and expended, raising exception if not found. Such path could be infered using FFMPEG_PATH environment variable.]\"\"\"\n\n def load(self, path, offset=None, duration=None, sample_rate=None, dtype=np.float32):\n \"\"\"[Loads the audio file denoted by the given path and returns it data as a waveform.] Arguments: path {[type]} -- [Path of the audio file to load data from.] Keyword Arguments: offset {[type]} -- [(Optional) Start offset to load from in seconds.] (default: {None}) duration {[type]} -- [(Optional) Duration to load in seconds.] (default: {None}) sample_rate {[type]} -- [(Optional) Sample rate to load audio with.] (default: {None}) dtype {[type]} -- [(Optional) Numpy data type to use, default to float32.] (default: {np.float32})\"\"\"\n if not isinstance(path, str):\n path = path.decode()\n try:\n probe = ffmpeg.probe(path)\n except ffmpeg._run.Error as e:\n raise Exception('An error occurs with ffprobe (see ffprobe output below)\\n\\n{}'.format(e.stderr.decode()))\n if 'streams' not in probe or len(probe['streams']) == 0:\n raise Exception('No stream was found with ffprobe')\n metadata = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'))\n n_channels = metadata['channels']\n if sample_rate is None:\n sample_rate = metadata['sample_rate']\n output_kwargs = {'format': 'f32le', 'ar': sample_rate}\n if duration is not None:\n output_kwargs['t'] = _to_ffmpeg_time(duration)\n if offset is not None:\n output_kwargs['ss'] = _to_ffmpeg_time(offset)\n process = ffmpeg.input(path).output('pipe:', **output_kwargs).run_async(pipe_stdout=True, pipe_stderr=True)\n buffer, _ = process.communicate()\n waveform = np.frombuffer(buffer, dtype='\n if not request.user.has_perm('auth.view_group'):\n raise PermissionDenied()\n role_name = request.GET.get('name', None)\n data_source = Group.objects.prefetch_related('permissions').prefetch_related('user_set__groups')\n if role_name:\n self.queryset = data_source.filter(name=role_name)\n else:\n self.queryset = data_source.all()\n data = []\n for i in self.queryset:\n data.append({'id': i.id, 'name': i.name, 'default': i.default_group.default, 'permissions': build_cn_permission_list([p.codename for p in i.permissions.all()], 'short'), 'users': i.user_set.all().values('username', 'first_name')})\n return Response({'code': 0, 'msg': 'success', 'data': data})\n<|end_body_0|>\n\n<|body_start_1|>\n fields = [('*name', str, (verify_max_length, 20))]\n data = validate_post_data(request.body, fields)\n self.queryset, _ = Group.objects.update_or_create(name=data['name'])\n return Response({'code': 0, 'msg': 'success'}, status=HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n group_id = request.GET.get('id', None)\n try:\n self.queryset = Group.objects.get(pk=int(group_id))\n except TypeError:\n raise ParseError('id is not a number')\n except Group.DoesNotExist:\n raise NotFound('not found this group')\n self.queryset.delete()\n return Response({'code': 0, 'msg': 'success'})\n<|end_body_2|>\n", "class_docstring": "", "class_name": "GroupEndpoint", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GroupEndpoint:\n\n def get(self, request):\n \"\"\"查询所有可用的角色(组)\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"新增一个角色\"\"\"\n <|body_1|>\n\n def delete(self, request):\n \"\"\"删除一个角色\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not request.user.has_perm('auth.view_group'):\n raise PermissionDenied()\n role_name = request.GET.get('name', None)\n data_source = Group.objects.prefetch_related('permissions').prefetch_related('user_set__groups')\n if role_name:\n self.queryset = data_source.filter(name=role_name)\n else:\n self.queryset = data_source.all()\n data = []\n for i in self.queryset:\n data.append({'id': i.id, 'name': i.name, 'default': i.default_group.default, 'permissions': build_cn_permission_list([p.codename for p in i.permissions.all()], 'short'), 'users': i.user_set.all().values('username', 'first_name')})\n return Response({'code': 0, 'msg': 'success', 'data': data})\n<|end_body_0|>\n\n<|body_start_1|>\n fields = [('*name', str, (verify_max_length, 20))]\n data = validate_post_data(request.body, fields)\n self.queryset, _ = Group.objects.update_or_create(name=data['name'])\n return Response({'code': 0, 'msg': 'success'}, status=HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n group_id = request.GET.get('id', None)\n try:\n self.queryset = Group.objects.get(pk=int(group_id))\n except TypeError:\n raise ParseError('id is not a number')\n except Group.DoesNotExist:\n raise NotFound('not found this group')\n self.queryset.delete()\n return Response({'code': 0, 'msg': 'success'})\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000410", "length_bytes": 10426, "license_type": "no_license", "methods": [{"docstring": "查询所有可用的角色(组)", "name": "get", "signature": "def get(self, request)"}, {"docstring": "新增一个角色", "name": "post", "signature": "def post(self, request)"}, {"docstring": "删除一个角色", "name": "delete", "signature": "def delete(self, request)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_036471", "prompt": "Implement the Python class `GroupEndpoint` described below.\n\nClass description:\nImplement the GroupEndpoint class.\n\nMethod signatures and docstrings:\n- def get(self, request): 查询所有可用的角色(组)\n- def post(self, request): 新增一个角色\n- def delete(self, request): 删除一个角色", "prompted_full_text": "Implement the Python class `GroupEndpoint` described below.\n\nClass description:\nImplement the GroupEndpoint class.\n\nMethod signatures and docstrings:\n- def get(self, request): 查询所有可用的角色(组)\n- def post(self, request): 新增一个角色\n- def delete(self, request): 删除一个角色\n\n<|skeleton|>\nclass GroupEndpoint:\n\n def get(self, request):\n \"\"\"查询所有可用的角色(组)\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"新增一个角色\"\"\"\n <|body_1|>\n\n def delete(self, request):\n \"\"\"删除一个角色\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not request.user.has_perm('auth.view_group'):\n raise PermissionDenied()\n role_name = request.GET.get('name', None)\n data_source = Group.objects.prefetch_related('permissions').prefetch_related('user_set__groups')\n if role_name:\n self.queryset = data_source.filter(name=role_name)\n else:\n self.queryset = data_source.all()\n data = []\n for i in self.queryset:\n data.append({'id': i.id, 'name': i.name, 'default': i.default_group.default, 'permissions': build_cn_permission_list([p.codename for p in i.permissions.all()], 'short'), 'users': i.user_set.all().values('username', 'first_name')})\n return Response({'code': 0, 'msg': 'success', 'data': data})\n<|end_body_0|>\n\n<|body_start_1|>\n fields = [('*name', str, (verify_max_length, 20))]\n data = validate_post_data(request.body, fields)\n self.queryset, _ = Group.objects.update_or_create(name=data['name'])\n return Response({'code': 0, 'msg': 'success'}, status=HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n group_id = request.GET.get('id', None)\n try:\n self.queryset = Group.objects.get(pk=int(group_id))\n except TypeError:\n raise ParseError('id is not a number')\n except Group.DoesNotExist:\n raise NotFound('not found this group')\n self.queryset.delete()\n return Response({'code': 0, 'msg': 'success'})\n<|end_body_2|>\n", "revision_id": "41769039d6045306e3045bef9635126c9de99717", "skeleton": "<|skeleton|>\nclass GroupEndpoint:\n\n def get(self, request):\n \"\"\"查询所有可用的角色(组)\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"新增一个角色\"\"\"\n <|body_1|>\n\n def delete(self, request):\n \"\"\"删除一个角色\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GroupEndpoint:\n def get(self, request):\n \"\"\"查询所有可用的角色(组)\"\"\"\n if not request.user.has_perm('auth.view_group'):\n raise PermissionDenied()\n role_name = request.GET.get('name', None)\n data_source = Group.objects.prefetch_related('permissions').prefetch_related('user_set__groups')\n if role_name:\n self.queryset = data_source.filter(name=role_name)\n else:\n self.queryset = data_source.all()\n data = []\n for i in self.queryset:\n data.append({'id': i.id, 'name': i.name, 'default': i.default_group.default, 'permissions': build_cn_permission_list([p.codename for p in i.permissions.all()], 'short'), 'users': i.user_set.all().values('username', 'first_name')})\n return Response({'code': 0, 'msg': 'success', 'data': data})\n\n def post(self, request):\n \"\"\"新增一个角色\"\"\"\n fields = [('*name', str, (verify_max_length, 20))]\n data = validate_post_data(request.body, fields)\n self.queryset, _ = Group.objects.update_or_create(name=data['name'])\n return Response({'code': 0, 'msg': 'success'}, status=HTTP_201_CREATED)\n\n def delete(self, request):\n \"\"\"删除一个角色\"\"\"\n group_id = request.GET.get('id', None)\n try:\n self.queryset = Group.objects.get(pk=int(group_id))\n except TypeError:\n raise ParseError('id is not a number')\n except Group.DoesNotExist:\n raise NotFound('not found this group')\n self.queryset.delete()\n return Response({'code': 0, 'msg': 'success'})\n", "source": "the_stack_v2_python_sparse", "source_path": "user/user_group.py", "source_repo": "dengguibao/oss", "split": "val", "star_events_count": 0}
{"blob_id": "687c2a9ce6d6e85c7ec828cfd17ddaee1e8c5e94", "bodies": ["data = parser.parse_args()\nproject_type = data.get('type')\nif project_type == 'annotation':\n return project_dao.all(project_type=TASK_ANNOTATION)\nelif project_type == 'fusion':\n return project_dao.all(project_type=TASK_FUSION)\nreturn project_dao.all()", "req_dict = api.payload\nname = req_dict.get('name')\nif not name:\n return abort(400, 'Invalid parameters.')\nreturn project_dao.create(api.payload)"], "bodies_text": "<|body_start_0|>\n data = parser.parse_args()\n project_type = data.get('type')\n if project_type == 'annotation':\n return project_dao.all(project_type=TASK_ANNOTATION)\n elif project_type == 'fusion':\n return project_dao.all(project_type=TASK_FUSION)\n return project_dao.all()\n<|end_body_0|>\n\n<|body_start_1|>\n req_dict = api.payload\n name = req_dict.get('name')\n if not name:\n return abort(400, 'Invalid parameters.')\n return project_dao.create(api.payload)\n<|end_body_1|>\n", "class_docstring": "Shows a list of all projects, and lets you to add new projects.", "class_name": "ProjectList", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProjectList:\n \"\"\"Shows a list of all projects, and lets you to add new projects.\"\"\"\n\n def get(self):\n \"\"\"List all projects\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Create new project\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = parser.parse_args()\n project_type = data.get('type')\n if project_type == 'annotation':\n return project_dao.all(project_type=TASK_ANNOTATION)\n elif project_type == 'fusion':\n return project_dao.all(project_type=TASK_FUSION)\n return project_dao.all()\n<|end_body_0|>\n\n<|body_start_1|>\n req_dict = api.payload\n name = req_dict.get('name')\n if not name:\n return abort(400, 'Invalid parameters.')\n return project_dao.create(api.payload)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000411", "length_bytes": 5767, "license_type": "permissive", "methods": [{"docstring": "List all projects", "name": "get", "signature": "def get(self)"}, {"docstring": "Create new project", "name": "post", "signature": "def post(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049692", "prompt": "Implement the Python class `ProjectList` described below.\n\nClass description:\nShows a list of all projects, and lets you to add new projects.\n\nMethod signatures and docstrings:\n- def get(self): List all projects\n- def post(self): Create new project", "prompted_full_text": "Implement the Python class `ProjectList` described below.\n\nClass description:\nShows a list of all projects, and lets you to add new projects.\n\nMethod signatures and docstrings:\n- def get(self): List all projects\n- def post(self): Create new project\n\n<|skeleton|>\nclass ProjectList:\n \"\"\"Shows a list of all projects, and lets you to add new projects.\"\"\"\n\n def get(self):\n \"\"\"List all projects\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Create new project\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = parser.parse_args()\n project_type = data.get('type')\n if project_type == 'annotation':\n return project_dao.all(project_type=TASK_ANNOTATION)\n elif project_type == 'fusion':\n return project_dao.all(project_type=TASK_FUSION)\n return project_dao.all()\n<|end_body_0|>\n\n<|body_start_1|>\n req_dict = api.payload\n name = req_dict.get('name')\n if not name:\n return abort(400, 'Invalid parameters.')\n return project_dao.create(api.payload)\n<|end_body_1|>\n", "revision_id": "953c2916c38906b0941c015136f80b2e64dd94f3", "skeleton": "<|skeleton|>\nclass ProjectList:\n \"\"\"Shows a list of all projects, and lets you to add new projects.\"\"\"\n\n def get(self):\n \"\"\"List all projects\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Create new project\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProjectList:\n \"\"\"Shows a list of all projects, and lets you to add new projects.\"\"\"\n\n def get(self):\n \"\"\"List all projects\"\"\"\n data = parser.parse_args()\n project_type = data.get('type')\n if project_type == 'annotation':\n return project_dao.all(project_type=TASK_ANNOTATION)\n elif project_type == 'fusion':\n return project_dao.all(project_type=TASK_FUSION)\n return project_dao.all()\n\n def post(self):\n \"\"\"Create new project\"\"\"\n req_dict = api.payload\n name = req_dict.get('name')\n if not name:\n return abort(400, 'Invalid parameters.')\n return project_dao.create(api.payload)\n", "source": "the_stack_v2_python_sparse", "source_path": "kgeditor/api_1_0/project.py", "source_repo": "LaiXinyi823/KGEditor", "split": "val", "star_events_count": 0}
{"blob_id": "0b1dbf783dd0c563ed306300d36c881ac0e8e75c", "bodies": ["sites = Site.objects.filter(status=SiteStatusChoices.STATUS_ACTIVE)\nmissing_clusters = Site.objects.filter(clusters__isnull=True).exclude(tags__name__in=['no-cluster'])\nfor site in sites:\n if site in missing_clusters:\n self.log_failure(site, 'Missing VM cluster')\n else:\n self.log_success(site)", "sites = Site.objects.filter(status__in=[1, 2]).prefetch_related('tags').annotate(vm_count=Count('clusters__virtual_machines')).order_by('name')\nfor site in sites:\n tags = site.tags.names()\n desired_count = 2\n special_tag = ''\n if 'special_tag' in [tag for tag in tags]:\n desired_count = 3\n special_tag = ' special_tag'\n if not site.vm_count:\n self.log_failure(site, 'No VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count == desired_count:\n self.log_success(site)\n elif site.vm_count > desired_count:\n self.log_warning(site, 'Too many VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count < desired_count:\n self.log_warning(site, 'Too few VMs ({}/{}){}'.format(site.vm_count, desired_count, special_tag))\n else:\n self.log_info(site, 'Unknown status')"], "bodies_text": "<|body_start_0|>\n sites = Site.objects.filter(status=SiteStatusChoices.STATUS_ACTIVE)\n missing_clusters = Site.objects.filter(clusters__isnull=True).exclude(tags__name__in=['no-cluster'])\n for site in sites:\n if site in missing_clusters:\n self.log_failure(site, 'Missing VM cluster')\n else:\n self.log_success(site)\n<|end_body_0|>\n\n<|body_start_1|>\n sites = Site.objects.filter(status__in=[1, 2]).prefetch_related('tags').annotate(vm_count=Count('clusters__virtual_machines')).order_by('name')\n for site in sites:\n tags = site.tags.names()\n desired_count = 2\n special_tag = ''\n if 'special_tag' in [tag for tag in tags]:\n desired_count = 3\n special_tag = ' special_tag'\n if not site.vm_count:\n self.log_failure(site, 'No VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count == desired_count:\n self.log_success(site)\n elif site.vm_count > desired_count:\n self.log_warning(site, 'Too many VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count < desired_count:\n self.log_warning(site, 'Too few VMs ({}/{}){}'.format(site.vm_count, desired_count, special_tag))\n else:\n self.log_info(site, 'Unknown status')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "VirtualizationReport", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VirtualizationReport:\n\n def test_cluster_exists(self):\n \"\"\"Cluster exists for site.\"\"\"\n <|body_0|>\n\n def test_vms_exist(self):\n \"\"\"Correct number of VMs (account for special tag or not)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sites = Site.objects.filter(status=SiteStatusChoices.STATUS_ACTIVE)\n missing_clusters = Site.objects.filter(clusters__isnull=True).exclude(tags__name__in=['no-cluster'])\n for site in sites:\n if site in missing_clusters:\n self.log_failure(site, 'Missing VM cluster')\n else:\n self.log_success(site)\n<|end_body_0|>\n\n<|body_start_1|>\n sites = Site.objects.filter(status__in=[1, 2]).prefetch_related('tags').annotate(vm_count=Count('clusters__virtual_machines')).order_by('name')\n for site in sites:\n tags = site.tags.names()\n desired_count = 2\n special_tag = ''\n if 'special_tag' in [tag for tag in tags]:\n desired_count = 3\n special_tag = ' special_tag'\n if not site.vm_count:\n self.log_failure(site, 'No VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count == desired_count:\n self.log_success(site)\n elif site.vm_count > desired_count:\n self.log_warning(site, 'Too many VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count < desired_count:\n self.log_warning(site, 'Too few VMs ({}/{}){}'.format(site.vm_count, desired_count, special_tag))\n else:\n self.log_info(site, 'Unknown status')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000412", "length_bytes": 2158, "license_type": "permissive", "methods": [{"docstring": "Cluster exists for site.", "name": "test_cluster_exists", "signature": "def test_cluster_exists(self)"}, {"docstring": "Correct number of VMs (account for special tag or not)", "name": "test_vms_exist", "signature": "def test_vms_exist(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_044236", "prompt": "Implement the Python class `VirtualizationReport` described below.\n\nClass description:\nImplement the VirtualizationReport class.\n\nMethod signatures and docstrings:\n- def test_cluster_exists(self): Cluster exists for site.\n- def test_vms_exist(self): Correct number of VMs (account for special tag or not)", "prompted_full_text": "Implement the Python class `VirtualizationReport` described below.\n\nClass description:\nImplement the VirtualizationReport class.\n\nMethod signatures and docstrings:\n- def test_cluster_exists(self): Cluster exists for site.\n- def test_vms_exist(self): Correct number of VMs (account for special tag or not)\n\n<|skeleton|>\nclass VirtualizationReport:\n\n def test_cluster_exists(self):\n \"\"\"Cluster exists for site.\"\"\"\n <|body_0|>\n\n def test_vms_exist(self):\n \"\"\"Correct number of VMs (account for special tag or not)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sites = Site.objects.filter(status=SiteStatusChoices.STATUS_ACTIVE)\n missing_clusters = Site.objects.filter(clusters__isnull=True).exclude(tags__name__in=['no-cluster'])\n for site in sites:\n if site in missing_clusters:\n self.log_failure(site, 'Missing VM cluster')\n else:\n self.log_success(site)\n<|end_body_0|>\n\n<|body_start_1|>\n sites = Site.objects.filter(status__in=[1, 2]).prefetch_related('tags').annotate(vm_count=Count('clusters__virtual_machines')).order_by('name')\n for site in sites:\n tags = site.tags.names()\n desired_count = 2\n special_tag = ''\n if 'special_tag' in [tag for tag in tags]:\n desired_count = 3\n special_tag = ' special_tag'\n if not site.vm_count:\n self.log_failure(site, 'No VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count == desired_count:\n self.log_success(site)\n elif site.vm_count > desired_count:\n self.log_warning(site, 'Too many VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count < desired_count:\n self.log_warning(site, 'Too few VMs ({}/{}){}'.format(site.vm_count, desired_count, special_tag))\n else:\n self.log_info(site, 'Unknown status')\n<|end_body_1|>\n", "revision_id": "0b074640df267065c1b748d4cbc1c2ef33a87f5b", "skeleton": "<|skeleton|>\nclass VirtualizationReport:\n\n def test_cluster_exists(self):\n \"\"\"Cluster exists for site.\"\"\"\n <|body_0|>\n\n def test_vms_exist(self):\n \"\"\"Correct number of VMs (account for special tag or not)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VirtualizationReport:\n def test_cluster_exists(self):\n \"\"\"Cluster exists for site.\"\"\"\n sites = Site.objects.filter(status=SiteStatusChoices.STATUS_ACTIVE)\n missing_clusters = Site.objects.filter(clusters__isnull=True).exclude(tags__name__in=['no-cluster'])\n for site in sites:\n if site in missing_clusters:\n self.log_failure(site, 'Missing VM cluster')\n else:\n self.log_success(site)\n\n def test_vms_exist(self):\n \"\"\"Correct number of VMs (account for special tag or not)\"\"\"\n sites = Site.objects.filter(status__in=[1, 2]).prefetch_related('tags').annotate(vm_count=Count('clusters__virtual_machines')).order_by('name')\n for site in sites:\n tags = site.tags.names()\n desired_count = 2\n special_tag = ''\n if 'special_tag' in [tag for tag in tags]:\n desired_count = 3\n special_tag = ' special_tag'\n if not site.vm_count:\n self.log_failure(site, 'No VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count == desired_count:\n self.log_success(site)\n elif site.vm_count > desired_count:\n self.log_warning(site, 'Too many VMs ({}/{})'.format(site.vm_count, desired_count))\n elif site.vm_count < desired_count:\n self.log_warning(site, 'Too few VMs ({}/{}){}'.format(site.vm_count, desired_count, special_tag))\n else:\n self.log_info(site, 'Unknown status')\n", "source": "the_stack_v2_python_sparse", "source_path": "reports/virtualization-reports/vm_counts.py", "source_repo": "tyler-8/reports", "split": "val", "star_events_count": 0}
{"blob_id": "f0cd623056a2bb137ff21dcf8f159717f0f1d627", "bodies": ["ans = []\nstart = 0\nfor p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\nreturn ans", "start = 0\nans = []\nnums.append(float('INF'))\nfor i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\nreturn ans"], "bodies_text": "<|body_start_0|>\n ans = []\n start = 0\n for p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n ans = []\n nums.append(float('INF'))\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def summaryRanges_group(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_onepass(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = []\n start = 0\n for p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n ans = []\n nums.append(float('INF'))\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000413", "length_bytes": 1366, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: List[str]", "name": "summaryRanges_group", "signature": "def summaryRanges_group(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[str]", "name": "summaryRanges_onepass", "signature": "def summaryRanges_onepass(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_042840", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def summaryRanges_group(self, nums): :type nums: List[int] :rtype: List[str]\n- def summaryRanges_onepass(self, nums): :type nums: List[int] :rtype: List[str]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def summaryRanges_group(self, nums): :type nums: List[int] :rtype: List[str]\n- def summaryRanges_onepass(self, nums): :type nums: List[int] :rtype: List[str]\n\n<|skeleton|>\nclass Solution:\n\n def summaryRanges_group(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_onepass(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = []\n start = 0\n for p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n ans = []\n nums.append(float('INF'))\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\n return ans\n<|end_body_1|>\n", "revision_id": "0e99f9a5226507706b3ee66fd04bae813755ef40", "skeleton": "<|skeleton|>\nclass Solution:\n\n def summaryRanges_group(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_onepass(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def summaryRanges_group(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n ans = []\n start = 0\n for p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\n return ans\n\n def summaryRanges_onepass(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n start = 0\n ans = []\n nums.append(float('INF'))\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "medium/arrayandstring/test_228_Summary_Ranges.py", "source_repo": "wuxu1019/leetcode_sophia", "split": "val", "star_events_count": 1}
{"blob_id": "123e5b5c1b11e053571532f5b77d82d1821601e2", "bodies": ["freshCnt = 0\nfor r in grid:\n freshCnt += r.count(1)\nif freshCnt == 0:\n return 0\nres = 0\nwhile freshCnt != 0:\n grid, rottenCnt = self.after1min(grid)\n if rottenCnt == 0:\n return -1\n res, freshCnt = (res + 1, freshCnt - rottenCnt)\nreturn res", "rot = set()\nfor i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 2:\n coordinates = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n for c in coordinates:\n if 0 <= i + c[0] and i + c[0] < len(grid) and (0 <= j + c[1]) and (j + c[1] < len(grid[0])):\n if (i + c[0], j + c[1]) not in rot and grid[i + c[0]][j + c[1]] == 1:\n rot.add((i + c[0], j + c[1]))\nfor c in rot:\n grid[c[0]][c[1]] = 2\nreturn (grid, len(rot))"], "bodies_text": "<|body_start_0|>\n freshCnt = 0\n for r in grid:\n freshCnt += r.count(1)\n if freshCnt == 0:\n return 0\n res = 0\n while freshCnt != 0:\n grid, rottenCnt = self.after1min(grid)\n if rottenCnt == 0:\n return -1\n res, freshCnt = (res + 1, freshCnt - rottenCnt)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n rot = set()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 2:\n coordinates = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n for c in coordinates:\n if 0 <= i + c[0] and i + c[0] < len(grid) and (0 <= j + c[1]) and (j + c[1] < len(grid[0])):\n if (i + c[0], j + c[1]) not in rot and grid[i + c[0]][j + c[1]] == 1:\n rot.add((i + c[0], j + c[1]))\n for c in rot:\n grid[c[0]][c[1]] = 2\n return (grid, len(rot))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def orangesRotting(self, grid):\n \"\"\":type grid: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def after1min(self, grid):\n \"\"\"update the grid and return the rotten oranges in next min\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n freshCnt = 0\n for r in grid:\n freshCnt += r.count(1)\n if freshCnt == 0:\n return 0\n res = 0\n while freshCnt != 0:\n grid, rottenCnt = self.after1min(grid)\n if rottenCnt == 0:\n return -1\n res, freshCnt = (res + 1, freshCnt - rottenCnt)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n rot = set()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 2:\n coordinates = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n for c in coordinates:\n if 0 <= i + c[0] and i + c[0] < len(grid) and (0 <= j + c[1]) and (j + c[1] < len(grid[0])):\n if (i + c[0], j + c[1]) not in rot and grid[i + c[0]][j + c[1]] == 1:\n rot.add((i + c[0], j + c[1]))\n for c in rot:\n grid[c[0]][c[1]] = 2\n return (grid, len(rot))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000414", "length_bytes": 1586, "license_type": "no_license", "methods": [{"docstring": ":type grid: List[List[int]] :rtype: int", "name": "orangesRotting", "signature": "def orangesRotting(self, grid)"}, {"docstring": "update the grid and return the rotten oranges in next min", "name": "after1min", "signature": "def after1min(self, grid)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001792", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def orangesRotting(self, grid): :type grid: List[List[int]] :rtype: int\n- def after1min(self, grid): update the grid and return the rotten oranges in next min", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def orangesRotting(self, grid): :type grid: List[List[int]] :rtype: int\n- def after1min(self, grid): update the grid and return the rotten oranges in next min\n\n<|skeleton|>\nclass Solution:\n\n def orangesRotting(self, grid):\n \"\"\":type grid: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def after1min(self, grid):\n \"\"\"update the grid and return the rotten oranges in next min\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n freshCnt = 0\n for r in grid:\n freshCnt += r.count(1)\n if freshCnt == 0:\n return 0\n res = 0\n while freshCnt != 0:\n grid, rottenCnt = self.after1min(grid)\n if rottenCnt == 0:\n return -1\n res, freshCnt = (res + 1, freshCnt - rottenCnt)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n rot = set()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 2:\n coordinates = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n for c in coordinates:\n if 0 <= i + c[0] and i + c[0] < len(grid) and (0 <= j + c[1]) and (j + c[1] < len(grid[0])):\n if (i + c[0], j + c[1]) not in rot and grid[i + c[0]][j + c[1]] == 1:\n rot.add((i + c[0], j + c[1]))\n for c in rot:\n grid[c[0]][c[1]] = 2\n return (grid, len(rot))\n<|end_body_1|>\n", "revision_id": "b4da922c4e8406c486760639b71e3ec50283ca43", "skeleton": "<|skeleton|>\nclass Solution:\n\n def orangesRotting(self, grid):\n \"\"\":type grid: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def after1min(self, grid):\n \"\"\"update the grid and return the rotten oranges in next min\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def orangesRotting(self, grid):\n \"\"\":type grid: List[List[int]] :rtype: int\"\"\"\n freshCnt = 0\n for r in grid:\n freshCnt += r.count(1)\n if freshCnt == 0:\n return 0\n res = 0\n while freshCnt != 0:\n grid, rottenCnt = self.after1min(grid)\n if rottenCnt == 0:\n return -1\n res, freshCnt = (res + 1, freshCnt - rottenCnt)\n return res\n\n def after1min(self, grid):\n \"\"\"update the grid and return the rotten oranges in next min\"\"\"\n rot = set()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 2:\n coordinates = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n for c in coordinates:\n if 0 <= i + c[0] and i + c[0] < len(grid) and (0 <= j + c[1]) and (j + c[1] < len(grid[0])):\n if (i + c[0], j + c[1]) not in rot and grid[i + c[0]][j + c[1]] == 1:\n rot.add((i + c[0], j + c[1]))\n for c in rot:\n grid[c[0]][c[1]] = 2\n return (grid, len(rot))\n", "source": "the_stack_v2_python_sparse", "source_path": "current_session/python/994.py", "source_repo": "YJL33/LeetCode", "split": "val", "star_events_count": 3}
{"blob_id": "5d11ebb9d268450dd3bd9977d91a5aab77672a16", "bodies": ["clID = request.data['clID']\nusername = request.data['username']\ntoken = request.data['token']\nuserType = request.data['userType']\ntokenResults = tokenAuthenticate(username, token)\nif len(tokenResults) != 0:\n client.close()\n return Response(tokenResults)\nif not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\nresults = retrieveCheckList(clID)\nclient.close()\nreturn Response(results)", "username = request.data['username']\ntoken = request.data['token']\nuserType = request.data['userType']\nclID = request.data['clID']\nresults = tokenAuthenticate(username, token)\nif len(results) != 0:\n client.close()\n return Response(results)\nif not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\nresults = logCheckList(clID, True)\nif 'error' in results:\n return Response(results)\nresults = deleteCheckList(clID)\nclient.close()\nreturn Response(results)"], "bodies_text": "<|body_start_0|>\n clID = request.data['clID']\n username = request.data['username']\n token = request.data['token']\n userType = request.data['userType']\n tokenResults = tokenAuthenticate(username, token)\n if len(tokenResults) != 0:\n client.close()\n return Response(tokenResults)\n if not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\n results = retrieveCheckList(clID)\n client.close()\n return Response(results)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.data['username']\n token = request.data['token']\n userType = request.data['userType']\n clID = request.data['clID']\n results = tokenAuthenticate(username, token)\n if len(results) != 0:\n client.close()\n return Response(results)\n if not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\n results = logCheckList(clID, True)\n if 'error' in results:\n return Response(results)\n results = deleteCheckList(clID)\n client.close()\n return Response(results)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ManageCL", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ManageCL:\n\n def post(self, request):\n \"\"\"Retrieve checklist with the given clID.\"\"\"\n <|body_0|>\n\n def delete(self, request):\n \"\"\"Deletes checklist with the given clID.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n clID = request.data['clID']\n username = request.data['username']\n token = request.data['token']\n userType = request.data['userType']\n tokenResults = tokenAuthenticate(username, token)\n if len(tokenResults) != 0:\n client.close()\n return Response(tokenResults)\n if not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\n results = retrieveCheckList(clID)\n client.close()\n return Response(results)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.data['username']\n token = request.data['token']\n userType = request.data['userType']\n clID = request.data['clID']\n results = tokenAuthenticate(username, token)\n if len(results) != 0:\n client.close()\n return Response(results)\n if not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\n results = logCheckList(clID, True)\n if 'error' in results:\n return Response(results)\n results = deleteCheckList(clID)\n client.close()\n return Response(results)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000415", "length_bytes": 10395, "license_type": "permissive", "methods": [{"docstring": "Retrieve checklist with the given clID.", "name": "post", "signature": "def post(self, request)"}, {"docstring": "Deletes checklist with the given clID.", "name": "delete", "signature": "def delete(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_027742", "prompt": "Implement the Python class `ManageCL` described below.\n\nClass description:\nImplement the ManageCL class.\n\nMethod signatures and docstrings:\n- def post(self, request): Retrieve checklist with the given clID.\n- def delete(self, request): Deletes checklist with the given clID.", "prompted_full_text": "Implement the Python class `ManageCL` described below.\n\nClass description:\nImplement the ManageCL class.\n\nMethod signatures and docstrings:\n- def post(self, request): Retrieve checklist with the given clID.\n- def delete(self, request): Deletes checklist with the given clID.\n\n<|skeleton|>\nclass ManageCL:\n\n def post(self, request):\n \"\"\"Retrieve checklist with the given clID.\"\"\"\n <|body_0|>\n\n def delete(self, request):\n \"\"\"Deletes checklist with the given clID.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n clID = request.data['clID']\n username = request.data['username']\n token = request.data['token']\n userType = request.data['userType']\n tokenResults = tokenAuthenticate(username, token)\n if len(tokenResults) != 0:\n client.close()\n return Response(tokenResults)\n if not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\n results = retrieveCheckList(clID)\n client.close()\n return Response(results)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.data['username']\n token = request.data['token']\n userType = request.data['userType']\n clID = request.data['clID']\n results = tokenAuthenticate(username, token)\n if len(results) != 0:\n client.close()\n return Response(results)\n if not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\n results = logCheckList(clID, True)\n if 'error' in results:\n return Response(results)\n results = deleteCheckList(clID)\n client.close()\n return Response(results)\n<|end_body_1|>\n", "revision_id": "4e25d31a77f0338d2bb19b0c41a6ea32069d3f7c", "skeleton": "<|skeleton|>\nclass ManageCL:\n\n def post(self, request):\n \"\"\"Retrieve checklist with the given clID.\"\"\"\n <|body_0|>\n\n def delete(self, request):\n \"\"\"Deletes checklist with the given clID.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ManageCL:\n def post(self, request):\n \"\"\"Retrieve checklist with the given clID.\"\"\"\n clID = request.data['clID']\n username = request.data['username']\n token = request.data['token']\n userType = request.data['userType']\n tokenResults = tokenAuthenticate(username, token)\n if len(tokenResults) != 0:\n client.close()\n return Response(tokenResults)\n if not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\n results = retrieveCheckList(clID)\n client.close()\n return Response(results)\n\n def delete(self, request):\n \"\"\"Deletes checklist with the given clID.\"\"\"\n username = request.data['username']\n token = request.data['token']\n userType = request.data['userType']\n clID = request.data['clID']\n results = tokenAuthenticate(username, token)\n if len(results) != 0:\n client.close()\n return Response(results)\n if not isCM(userType):\n client.close()\n return Response({'error': 'invalid userType'})\n results = logCheckList(clID, True)\n if 'error' in results:\n return Response(results)\n results = deleteCheckList(clID)\n client.close()\n return Response(results)\n", "source": "the_stack_v2_python_sparse", "source_path": "SpringBoard/app/views/checklistViews.py", "source_repo": "ngjunxiang/NM3YM-SpringBoard-Backend", "split": "val", "star_events_count": 1}
{"blob_id": "16729f3a38b91c6e75c33555acb2b2a9eb74db9f", "bodies": ["sources = db_session.query(models.Sources).all()\nresults = [service_detail_from_source(source) for source in sources]\nreturn results", "variables = db_session.query(models.Variables).all()\nresults = [parameter_info_from_variable(variable) for variable in variables]\nreturn results", "variable = db_session.query(models.Variables).filter_by(VariableCode=TXHISParameterCode).one()\nresults = []\nfor mapping in variable.variable_mapping:\n source = mapping.SourceInfo\n source_with_param = SourceWithRemoteParamCode()\n source_with_param.WSDLLink = source.WSDLLink\n source_with_param.SourceName = source.NetworkName\n source_with_param.RemoteParamCode = mapping.RemoteVariableCode\n results.append(source_with_param)\nreturn results", "try:\n source = db_session.query(models.Sources).filter_by(NetworkName=SourceNetworkName, WSDLLink=WSDLLink).one()\nexcept sa_exceptions.NoResultFound:\n raise Exception('Could not match (network, parameter, wsdllink): (%s, %s, %s)' % (SourceNetworkName, TXHISParameterCode, WSDLLink))\nfor mapping in source.availableParameterInfo:\n if mapping.variable.VariableCode == TXHISParameterCode:\n return mapping.RemoteVariableCode"], "bodies_text": "<|body_start_0|>\n sources = db_session.query(models.Sources).all()\n results = [service_detail_from_source(source) for source in sources]\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n variables = db_session.query(models.Variables).all()\n results = [parameter_info_from_variable(variable) for variable in variables]\n return results\n<|end_body_1|>\n\n<|body_start_2|>\n variable = db_session.query(models.Variables).filter_by(VariableCode=TXHISParameterCode).one()\n results = []\n for mapping in variable.variable_mapping:\n source = mapping.SourceInfo\n source_with_param = SourceWithRemoteParamCode()\n source_with_param.WSDLLink = source.WSDLLink\n source_with_param.SourceName = source.NetworkName\n source_with_param.RemoteParamCode = mapping.RemoteVariableCode\n results.append(source_with_param)\n return results\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n source = db_session.query(models.Sources).filter_by(NetworkName=SourceNetworkName, WSDLLink=WSDLLink).one()\n except sa_exceptions.NoResultFound:\n raise Exception('Could not match (network, parameter, wsdllink): (%s, %s, %s)' % (SourceNetworkName, TXHISParameterCode, WSDLLink))\n for mapping in source.availableParameterInfo:\n if mapping.variable.VariableCode == TXHISParameterCode:\n return mapping.RemoteVariableCode\n<|end_body_3|>\n", "class_docstring": "", "class_name": "CentralRegistryService", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CentralRegistryService:\n\n def GetSourcesGEMSS(self):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_0|>\n\n def GetTXHISParameters(self):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_1|>\n\n def GetHISParamAvailableSources(self, TXHISParameterCode):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_2|>\n\n def GetRemoteParameterCode(self, SourceNetworkName, TXHISParameterCode, WSDLLink):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sources = db_session.query(models.Sources).all()\n results = [service_detail_from_source(source) for source in sources]\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n variables = db_session.query(models.Variables).all()\n results = [parameter_info_from_variable(variable) for variable in variables]\n return results\n<|end_body_1|>\n\n<|body_start_2|>\n variable = db_session.query(models.Variables).filter_by(VariableCode=TXHISParameterCode).one()\n results = []\n for mapping in variable.variable_mapping:\n source = mapping.SourceInfo\n source_with_param = SourceWithRemoteParamCode()\n source_with_param.WSDLLink = source.WSDLLink\n source_with_param.SourceName = source.NetworkName\n source_with_param.RemoteParamCode = mapping.RemoteVariableCode\n results.append(source_with_param)\n return results\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n source = db_session.query(models.Sources).filter_by(NetworkName=SourceNetworkName, WSDLLink=WSDLLink).one()\n except sa_exceptions.NoResultFound:\n raise Exception('Could not match (network, parameter, wsdllink): (%s, %s, %s)' % (SourceNetworkName, TXHISParameterCode, WSDLLink))\n for mapping in source.availableParameterInfo:\n if mapping.variable.VariableCode == TXHISParameterCode:\n return mapping.RemoteVariableCode\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000416", "length_bytes": 5465, "license_type": "no_license", "methods": [{"docstring": "Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array", "name": "GetSourcesGEMSS", "signature": "def GetSourcesGEMSS(self)"}, {"docstring": "Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array", "name": "GetTXHISParameters", "signature": "def GetTXHISParameters(self)"}, {"docstring": "Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array", "name": "GetHISParamAvailableSources", "signature": "def GetHISParamAvailableSources(self, TXHISParameterCode)"}, {"docstring": "Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array", "name": "GetRemoteParameterCode", "signature": "def GetRemoteParameterCode(self, SourceNetworkName, TXHISParameterCode, WSDLLink)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_042906", "prompt": "Implement the Python class `CentralRegistryService` described below.\n\nClass description:\nImplement the CentralRegistryService class.\n\nMethod signatures and docstrings:\n- def GetSourcesGEMSS(self): Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\n- def GetTXHISParameters(self): Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\n- def GetHISParamAvailableSources(self, TXHISParameterCode): Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\n- def GetRemoteParameterCode(self, SourceNetworkName, TXHISParameterCode, WSDLLink): Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array", "prompted_full_text": "Implement the Python class `CentralRegistryService` described below.\n\nClass description:\nImplement the CentralRegistryService class.\n\nMethod signatures and docstrings:\n- def GetSourcesGEMSS(self): Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\n- def GetTXHISParameters(self): Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\n- def GetHISParamAvailableSources(self, TXHISParameterCode): Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\n- def GetRemoteParameterCode(self, SourceNetworkName, TXHISParameterCode, WSDLLink): Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\n\n<|skeleton|>\nclass CentralRegistryService:\n\n def GetSourcesGEMSS(self):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_0|>\n\n def GetTXHISParameters(self):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_1|>\n\n def GetHISParamAvailableSources(self, TXHISParameterCode):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_2|>\n\n def GetRemoteParameterCode(self, SourceNetworkName, TXHISParameterCode, WSDLLink):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sources = db_session.query(models.Sources).all()\n results = [service_detail_from_source(source) for source in sources]\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n variables = db_session.query(models.Variables).all()\n results = [parameter_info_from_variable(variable) for variable in variables]\n return results\n<|end_body_1|>\n\n<|body_start_2|>\n variable = db_session.query(models.Variables).filter_by(VariableCode=TXHISParameterCode).one()\n results = []\n for mapping in variable.variable_mapping:\n source = mapping.SourceInfo\n source_with_param = SourceWithRemoteParamCode()\n source_with_param.WSDLLink = source.WSDLLink\n source_with_param.SourceName = source.NetworkName\n source_with_param.RemoteParamCode = mapping.RemoteVariableCode\n results.append(source_with_param)\n return results\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n source = db_session.query(models.Sources).filter_by(NetworkName=SourceNetworkName, WSDLLink=WSDLLink).one()\n except sa_exceptions.NoResultFound:\n raise Exception('Could not match (network, parameter, wsdllink): (%s, %s, %s)' % (SourceNetworkName, TXHISParameterCode, WSDLLink))\n for mapping in source.availableParameterInfo:\n if mapping.variable.VariableCode == TXHISParameterCode:\n return mapping.RemoteVariableCode\n<|end_body_3|>\n", "revision_id": "bc2e2bc0a347599a5dee127be9d03d6d082e7001", "skeleton": "<|skeleton|>\nclass CentralRegistryService:\n\n def GetSourcesGEMSS(self):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_0|>\n\n def GetTXHISParameters(self):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_1|>\n\n def GetHISParamAvailableSources(self, TXHISParameterCode):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_2|>\n\n def GetRemoteParameterCode(self, SourceNetworkName, TXHISParameterCode, WSDLLink):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CentralRegistryService:\n def GetSourcesGEMSS(self):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n sources = db_session.query(models.Sources).all()\n results = [service_detail_from_source(source) for source in sources]\n return results\n\n def GetTXHISParameters(self):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n variables = db_session.query(models.Variables).all()\n results = [parameter_info_from_variable(variable) for variable in variables]\n return results\n\n def GetHISParamAvailableSources(self, TXHISParameterCode):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n variable = db_session.query(models.Variables).filter_by(VariableCode=TXHISParameterCode).one()\n results = []\n for mapping in variable.variable_mapping:\n source = mapping.SourceInfo\n source_with_param = SourceWithRemoteParamCode()\n source_with_param.WSDLLink = source.WSDLLink\n source_with_param.SourceName = source.NetworkName\n source_with_param.RemoteParamCode = mapping.RemoteVariableCode\n results.append(source_with_param)\n return results\n\n def GetRemoteParameterCode(self, SourceNetworkName, TXHISParameterCode, WSDLLink):\n \"\"\"Docstrings for service methods appear as documentation in the wsdl what fun @param name the name to say hello to @param the number of times to say hello @return the completed array\"\"\"\n try:\n source = db_session.query(models.Sources).filter_by(NetworkName=SourceNetworkName, WSDLLink=WSDLLink).one()\n except sa_exceptions.NoResultFound:\n raise Exception('Could not match (network, parameter, wsdllink): (%s, %s, %s)' % (SourceNetworkName, TXHISParameterCode, WSDLLink))\n for mapping in source.availableParameterInfo:\n if mapping.variable.VariableCode == TXHISParameterCode:\n return mapping.RemoteVariableCode\n", "source": "the_stack_v2_python_sparse", "source_path": "wdft_central/wdft_central/service.py", "source_repo": "twdb/txhis", "split": "val", "star_events_count": 0}
{"blob_id": "bd7647ef3faa4e0493ceeeaf97c5d85ee3f6f2bd", "bodies": ["super(DropItem, self).__init__()\nif itemType:\n self.device_type = itemType\nself.image = QtGui.QImage(environ['images'] + self.device_type + '.gif')\nif self.image.isNull():\n mainWidgets['log'].append('Unknown node type ' + str(self.device_type))\n return\nself.setCursor(QtCore.Qt.OpenHandCursor)\nif itemType in unimplementedTypes:\n self.setToolTip(self.device_type.center(13) + '\\nImplement me.')\n self.setEnabled(False)\nelse:\n self.setToolTip(self.device_type.center(21) + '\\nDrag onto the canvas.')", "painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, options['smoothing'])\nif not self.isEnabled():\n transparency = QtGui.QImage(self.image)\n transparency.fill(QtGui.qRgba(0, 0, 0, 50))\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), transparency)\npainter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), self.image)\ndevice_text = self.device_type\nif options['names']:\n painter.drawText(QtCore.QRectF(-70, self.image.height() / 2, 145, 60), device_text, QtGui.QTextOption(QtCore.Qt.AlignHCenter))", "rect = self.image.rect()\ntoR = QtCore.QRectF(rect.left() - rect.width() / 2, rect.top() - rect.height() / 2, rect.width(), rect.height())\nreturn toR", "if event.button() != QtCore.Qt.LeftButton:\n event.ignore()\n return\ndrag = QtGui.QDrag(event.widget())\nmime = QtCore.QMimeData()\nmime.setText(self.device_type)\ndrag.setMimeData(mime)\ndrag.setPixmap(QtGui.QPixmap.fromImage(self.image))\ndrag.setHotSpot(QtCore.QPoint(15, 30))\ndrag.start()"], "bodies_text": "<|body_start_0|>\n super(DropItem, self).__init__()\n if itemType:\n self.device_type = itemType\n self.image = QtGui.QImage(environ['images'] + self.device_type + '.gif')\n if self.image.isNull():\n mainWidgets['log'].append('Unknown node type ' + str(self.device_type))\n return\n self.setCursor(QtCore.Qt.OpenHandCursor)\n if itemType in unimplementedTypes:\n self.setToolTip(self.device_type.center(13) + '\\nImplement me.')\n self.setEnabled(False)\n else:\n self.setToolTip(self.device_type.center(21) + '\\nDrag onto the canvas.')\n<|end_body_0|>\n\n<|body_start_1|>\n painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, options['smoothing'])\n if not self.isEnabled():\n transparency = QtGui.QImage(self.image)\n transparency.fill(QtGui.qRgba(0, 0, 0, 50))\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), transparency)\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), self.image)\n device_text = self.device_type\n if options['names']:\n painter.drawText(QtCore.QRectF(-70, self.image.height() / 2, 145, 60), device_text, QtGui.QTextOption(QtCore.Qt.AlignHCenter))\n<|end_body_1|>\n\n<|body_start_2|>\n rect = self.image.rect()\n toR = QtCore.QRectF(rect.left() - rect.width() / 2, rect.top() - rect.height() / 2, rect.width(), rect.height())\n return toR\n<|end_body_2|>\n\n<|body_start_3|>\n if event.button() != QtCore.Qt.LeftButton:\n event.ignore()\n return\n drag = QtGui.QDrag(event.widget())\n mime = QtCore.QMimeData()\n mime.setText(self.device_type)\n drag.setMimeData(mime)\n drag.setPixmap(QtGui.QPixmap.fromImage(self.image))\n drag.setHotSpot(QtCore.QPoint(15, 30))\n drag.start()\n<|end_body_3|>\n", "class_docstring": "", "class_name": "DropItem", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DropItem:\n\n def __init__(self, itemType=None):\n \"\"\"Create a draggable item, which can be dropped into the canvas.\"\"\"\n <|body_0|>\n\n def paint(self, painter, option, widget):\n \"\"\"Draw the representation.\"\"\"\n <|body_1|>\n\n def boundingRect(self):\n \"\"\"Get the bounding rectangle of the item.\"\"\"\n <|body_2|>\n\n def mousePressEvent(self, event):\n \"\"\"Handle the mouse events on this item.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DropItem, self).__init__()\n if itemType:\n self.device_type = itemType\n self.image = QtGui.QImage(environ['images'] + self.device_type + '.gif')\n if self.image.isNull():\n mainWidgets['log'].append('Unknown node type ' + str(self.device_type))\n return\n self.setCursor(QtCore.Qt.OpenHandCursor)\n if itemType in unimplementedTypes:\n self.setToolTip(self.device_type.center(13) + '\\nImplement me.')\n self.setEnabled(False)\n else:\n self.setToolTip(self.device_type.center(21) + '\\nDrag onto the canvas.')\n<|end_body_0|>\n\n<|body_start_1|>\n painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, options['smoothing'])\n if not self.isEnabled():\n transparency = QtGui.QImage(self.image)\n transparency.fill(QtGui.qRgba(0, 0, 0, 50))\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), transparency)\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), self.image)\n device_text = self.device_type\n if options['names']:\n painter.drawText(QtCore.QRectF(-70, self.image.height() / 2, 145, 60), device_text, QtGui.QTextOption(QtCore.Qt.AlignHCenter))\n<|end_body_1|>\n\n<|body_start_2|>\n rect = self.image.rect()\n toR = QtCore.QRectF(rect.left() - rect.width() / 2, rect.top() - rect.height() / 2, rect.width(), rect.height())\n return toR\n<|end_body_2|>\n\n<|body_start_3|>\n if event.button() != QtCore.Qt.LeftButton:\n event.ignore()\n return\n drag = QtGui.QDrag(event.widget())\n mime = QtCore.QMimeData()\n mime.setText(self.device_type)\n drag.setMimeData(mime)\n drag.setPixmap(QtGui.QPixmap.fromImage(self.image))\n drag.setHotSpot(QtCore.QPoint(15, 30))\n drag.start()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000417", "length_bytes": 14238, "license_type": "permissive", "methods": [{"docstring": "Create a draggable item, which can be dropped into the canvas.", "name": "__init__", "signature": "def __init__(self, itemType=None)"}, {"docstring": "Draw the representation.", "name": "paint", "signature": "def paint(self, painter, option, widget)"}, {"docstring": "Get the bounding rectangle of the item.", "name": "boundingRect", "signature": "def boundingRect(self)"}, {"docstring": "Handle the mouse events on this item.", "name": "mousePressEvent", "signature": "def mousePressEvent(self, event)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_049689", "prompt": "Implement the Python class `DropItem` described below.\n\nClass description:\nImplement the DropItem class.\n\nMethod signatures and docstrings:\n- def __init__(self, itemType=None): Create a draggable item, which can be dropped into the canvas.\n- def paint(self, painter, option, widget): Draw the representation.\n- def boundingRect(self): Get the bounding rectangle of the item.\n- def mousePressEvent(self, event): Handle the mouse events on this item.", "prompted_full_text": "Implement the Python class `DropItem` described below.\n\nClass description:\nImplement the DropItem class.\n\nMethod signatures and docstrings:\n- def __init__(self, itemType=None): Create a draggable item, which can be dropped into the canvas.\n- def paint(self, painter, option, widget): Draw the representation.\n- def boundingRect(self): Get the bounding rectangle of the item.\n- def mousePressEvent(self, event): Handle the mouse events on this item.\n\n<|skeleton|>\nclass DropItem:\n\n def __init__(self, itemType=None):\n \"\"\"Create a draggable item, which can be dropped into the canvas.\"\"\"\n <|body_0|>\n\n def paint(self, painter, option, widget):\n \"\"\"Draw the representation.\"\"\"\n <|body_1|>\n\n def boundingRect(self):\n \"\"\"Get the bounding rectangle of the item.\"\"\"\n <|body_2|>\n\n def mousePressEvent(self, event):\n \"\"\"Handle the mouse events on this item.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DropItem, self).__init__()\n if itemType:\n self.device_type = itemType\n self.image = QtGui.QImage(environ['images'] + self.device_type + '.gif')\n if self.image.isNull():\n mainWidgets['log'].append('Unknown node type ' + str(self.device_type))\n return\n self.setCursor(QtCore.Qt.OpenHandCursor)\n if itemType in unimplementedTypes:\n self.setToolTip(self.device_type.center(13) + '\\nImplement me.')\n self.setEnabled(False)\n else:\n self.setToolTip(self.device_type.center(21) + '\\nDrag onto the canvas.')\n<|end_body_0|>\n\n<|body_start_1|>\n painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, options['smoothing'])\n if not self.isEnabled():\n transparency = QtGui.QImage(self.image)\n transparency.fill(QtGui.qRgba(0, 0, 0, 50))\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), transparency)\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), self.image)\n device_text = self.device_type\n if options['names']:\n painter.drawText(QtCore.QRectF(-70, self.image.height() / 2, 145, 60), device_text, QtGui.QTextOption(QtCore.Qt.AlignHCenter))\n<|end_body_1|>\n\n<|body_start_2|>\n rect = self.image.rect()\n toR = QtCore.QRectF(rect.left() - rect.width() / 2, rect.top() - rect.height() / 2, rect.width(), rect.height())\n return toR\n<|end_body_2|>\n\n<|body_start_3|>\n if event.button() != QtCore.Qt.LeftButton:\n event.ignore()\n return\n drag = QtGui.QDrag(event.widget())\n mime = QtCore.QMimeData()\n mime.setText(self.device_type)\n drag.setMimeData(mime)\n drag.setPixmap(QtGui.QPixmap.fromImage(self.image))\n drag.setHotSpot(QtCore.QPoint(15, 30))\n drag.start()\n<|end_body_3|>\n", "revision_id": "d095076113c1e84c33f52ef46a3df1f8bc8ffa43", "skeleton": "<|skeleton|>\nclass DropItem:\n\n def __init__(self, itemType=None):\n \"\"\"Create a draggable item, which can be dropped into the canvas.\"\"\"\n <|body_0|>\n\n def paint(self, painter, option, widget):\n \"\"\"Draw the representation.\"\"\"\n <|body_1|>\n\n def boundingRect(self):\n \"\"\"Get the bounding rectangle of the item.\"\"\"\n <|body_2|>\n\n def mousePressEvent(self, event):\n \"\"\"Handle the mouse events on this item.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DropItem:\n def __init__(self, itemType=None):\n \"\"\"Create a draggable item, which can be dropped into the canvas.\"\"\"\n super(DropItem, self).__init__()\n if itemType:\n self.device_type = itemType\n self.image = QtGui.QImage(environ['images'] + self.device_type + '.gif')\n if self.image.isNull():\n mainWidgets['log'].append('Unknown node type ' + str(self.device_type))\n return\n self.setCursor(QtCore.Qt.OpenHandCursor)\n if itemType in unimplementedTypes:\n self.setToolTip(self.device_type.center(13) + '\\nImplement me.')\n self.setEnabled(False)\n else:\n self.setToolTip(self.device_type.center(21) + '\\nDrag onto the canvas.')\n\n def paint(self, painter, option, widget):\n \"\"\"Draw the representation.\"\"\"\n painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, options['smoothing'])\n if not self.isEnabled():\n transparency = QtGui.QImage(self.image)\n transparency.fill(QtGui.qRgba(0, 0, 0, 50))\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), transparency)\n painter.drawImage(QtCore.QPoint(-self.image.width() / 2, -self.image.height() / 2), self.image)\n device_text = self.device_type\n if options['names']:\n painter.drawText(QtCore.QRectF(-70, self.image.height() / 2, 145, 60), device_text, QtGui.QTextOption(QtCore.Qt.AlignHCenter))\n\n def boundingRect(self):\n \"\"\"Get the bounding rectangle of the item.\"\"\"\n rect = self.image.rect()\n toR = QtCore.QRectF(rect.left() - rect.width() / 2, rect.top() - rect.height() / 2, rect.width(), rect.height())\n return toR\n\n def mousePressEvent(self, event):\n \"\"\"Handle the mouse events on this item.\"\"\"\n if event.button() != QtCore.Qt.LeftButton:\n event.ignore()\n return\n drag = QtGui.QDrag(event.widget())\n mime = QtCore.QMimeData()\n mime.setText(self.device_type)\n drag.setMimeData(mime)\n drag.setPixmap(QtGui.QPixmap.fromImage(self.image))\n drag.setHotSpot(QtCore.QPoint(15, 30))\n drag.start()\n", "source": "the_stack_v2_python_sparse", "source_path": "frontend/src/gbuilder/UI/Node.py", "source_repo": "citelab/gini5", "split": "val", "star_events_count": 12}
{"blob_id": "2c5b3255f2bc9fa3d96f5af3b5885f817ee45e34", "bodies": ["self._padding = padding\nself._padding_mode = padding_mode\nsuper(Pad1D, self).__init__(**kwargs)", "if self._padding_mode == 'zero':\n paddings = ((0, 0), self._padding, (0, 0))\n outputs = tf.pad(inputs, paddings)\nelif self._padding_mode == 'wrap':\n outputs = tf.concat([inputs[:, -self._padding[0]:, :], inputs, inputs[:, :self._padding[1], :]], axis=1)\nelif self._padding_mode == 'repeat':\n outputs = tf.concat([tf.repeat(inputs[:, :1, :], self._padding[0], axis=1), inputs, tf.repeat(inputs[:, -1:, :], self._padding[1], axis=1)], axis=1)\nelse:\n raise ValueError(f'Padding mode {self._padding_mode} not supported.')\nreturn outputs"], "bodies_text": "<|body_start_0|>\n self._padding = padding\n self._padding_mode = padding_mode\n super(Pad1D, self).__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self._padding_mode == 'zero':\n paddings = ((0, 0), self._padding, (0, 0))\n outputs = tf.pad(inputs, paddings)\n elif self._padding_mode == 'wrap':\n outputs = tf.concat([inputs[:, -self._padding[0]:, :], inputs, inputs[:, :self._padding[1], :]], axis=1)\n elif self._padding_mode == 'repeat':\n outputs = tf.concat([tf.repeat(inputs[:, :1, :], self._padding[0], axis=1), inputs, tf.repeat(inputs[:, -1:, :], self._padding[1], axis=1)], axis=1)\n else:\n raise ValueError(f'Padding mode {self._padding_mode} not supported.')\n return outputs\n<|end_body_1|>\n", "class_docstring": "Pads a (batch, size, channels) tensor.", "class_name": "Pad1D", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Pad1D:\n \"\"\"Pads a (batch, size, channels) tensor.\"\"\"\n\n def __init__(self, padding, padding_mode, **kwargs):\n \"\"\"Creates the padding layer. Args: padding: (pad_left, pad_right) pair. How many elements to add to both sides of the second dimension. If this is used for the very first layer of the feelers CNN, this can be thought as the number of feeler entries to add before the first feeler entry and after the last. padding_mode: One of 'zero', 'wrap', or 'repeat'. - zero: For padding out with zeroes. - wrap: For padding wrapping around the tensor. - repeat: For padding with repeated values from the tensor edges. **kwargs: Other kwargs that should be sent to the layer superclass.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Apply the layer into inputs. Args: inputs: A tensor with shape (batch, size, channels). Returns: Output tensor after applying the layer.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._padding = padding\n self._padding_mode = padding_mode\n super(Pad1D, self).__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self._padding_mode == 'zero':\n paddings = ((0, 0), self._padding, (0, 0))\n outputs = tf.pad(inputs, paddings)\n elif self._padding_mode == 'wrap':\n outputs = tf.concat([inputs[:, -self._padding[0]:, :], inputs, inputs[:, :self._padding[1], :]], axis=1)\n elif self._padding_mode == 'repeat':\n outputs = tf.concat([tf.repeat(inputs[:, :1, :], self._padding[0], axis=1), inputs, tf.repeat(inputs[:, -1:, :], self._padding[1], axis=1)], axis=1)\n else:\n raise ValueError(f'Padding mode {self._padding_mode} not supported.')\n return outputs\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000418", "length_bytes": 14886, "license_type": "permissive", "methods": [{"docstring": "Creates the padding layer. Args: padding: (pad_left, pad_right) pair. How many elements to add to both sides of the second dimension. If this is used for the very first layer of the feelers CNN, this can be thought as the number of feeler entries to add before the first feeler entry and after the last. padding_mode: One of 'zero', 'wrap', or 'repeat'. - zero: For padding out with zeroes. - wrap: For padding wrapping around the tensor. - repeat: For padding with repeated values from the tensor edges. **kwargs: Other kwargs that should be sent to the layer superclass.", "name": "__init__", "signature": "def __init__(self, padding, padding_mode, **kwargs)"}, {"docstring": "Apply the layer into inputs. Args: inputs: A tensor with shape (batch, size, channels). Returns: Output tensor after applying the layer.", "name": "call", "signature": "def call(self, inputs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034099", "prompt": "Implement the Python class `Pad1D` described below.\n\nClass description:\nPads a (batch, size, channels) tensor.\n\nMethod signatures and docstrings:\n- def __init__(self, padding, padding_mode, **kwargs): Creates the padding layer. Args: padding: (pad_left, pad_right) pair. How many elements to add to both sides of the second dimension. If this is used for the very first layer of the feelers CNN, this can be thought as the number of feeler entries to add before the first feeler entry and after the last. padding_mode: One of 'zero', 'wrap', or 'repeat'. - zero: For padding out with zeroes. - wrap: For padding wrapping around the tensor. - repeat: For padding with repeated values from the tensor edges. **kwargs: Other kwargs that should be sent to the layer superclass.\n- def call(self, inputs): Apply the layer into inputs. Args: inputs: A tensor with shape (batch, size, channels). Returns: Output tensor after applying the layer.", "prompted_full_text": "Implement the Python class `Pad1D` described below.\n\nClass description:\nPads a (batch, size, channels) tensor.\n\nMethod signatures and docstrings:\n- def __init__(self, padding, padding_mode, **kwargs): Creates the padding layer. Args: padding: (pad_left, pad_right) pair. How many elements to add to both sides of the second dimension. If this is used for the very first layer of the feelers CNN, this can be thought as the number of feeler entries to add before the first feeler entry and after the last. padding_mode: One of 'zero', 'wrap', or 'repeat'. - zero: For padding out with zeroes. - wrap: For padding wrapping around the tensor. - repeat: For padding with repeated values from the tensor edges. **kwargs: Other kwargs that should be sent to the layer superclass.\n- def call(self, inputs): Apply the layer into inputs. Args: inputs: A tensor with shape (batch, size, channels). Returns: Output tensor after applying the layer.\n\n<|skeleton|>\nclass Pad1D:\n \"\"\"Pads a (batch, size, channels) tensor.\"\"\"\n\n def __init__(self, padding, padding_mode, **kwargs):\n \"\"\"Creates the padding layer. Args: padding: (pad_left, pad_right) pair. How many elements to add to both sides of the second dimension. If this is used for the very first layer of the feelers CNN, this can be thought as the number of feeler entries to add before the first feeler entry and after the last. padding_mode: One of 'zero', 'wrap', or 'repeat'. - zero: For padding out with zeroes. - wrap: For padding wrapping around the tensor. - repeat: For padding with repeated values from the tensor edges. **kwargs: Other kwargs that should be sent to the layer superclass.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Apply the layer into inputs. Args: inputs: A tensor with shape (batch, size, channels). Returns: Output tensor after applying the layer.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._padding = padding\n self._padding_mode = padding_mode\n super(Pad1D, self).__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self._padding_mode == 'zero':\n paddings = ((0, 0), self._padding, (0, 0))\n outputs = tf.pad(inputs, paddings)\n elif self._padding_mode == 'wrap':\n outputs = tf.concat([inputs[:, -self._padding[0]:, :], inputs, inputs[:, :self._padding[1], :]], axis=1)\n elif self._padding_mode == 'repeat':\n outputs = tf.concat([tf.repeat(inputs[:, :1, :], self._padding[0], axis=1), inputs, tf.repeat(inputs[:, -1:, :], self._padding[1], axis=1)], axis=1)\n else:\n raise ValueError(f'Padding mode {self._padding_mode} not supported.')\n return outputs\n<|end_body_1|>\n", "revision_id": "26ab377a6853463b2efce40970e54d44b91e79ca", "skeleton": "<|skeleton|>\nclass Pad1D:\n \"\"\"Pads a (batch, size, channels) tensor.\"\"\"\n\n def __init__(self, padding, padding_mode, **kwargs):\n \"\"\"Creates the padding layer. Args: padding: (pad_left, pad_right) pair. How many elements to add to both sides of the second dimension. If this is used for the very first layer of the feelers CNN, this can be thought as the number of feeler entries to add before the first feeler entry and after the last. padding_mode: One of 'zero', 'wrap', or 'repeat'. - zero: For padding out with zeroes. - wrap: For padding wrapping around the tensor. - repeat: For padding with repeated values from the tensor edges. **kwargs: Other kwargs that should be sent to the layer superclass.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Apply the layer into inputs. Args: inputs: A tensor with shape (batch, size, channels). Returns: Output tensor after applying the layer.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Pad1D:\n \"\"\"Pads a (batch, size, channels) tensor.\"\"\"\n\n def __init__(self, padding, padding_mode, **kwargs):\n \"\"\"Creates the padding layer. Args: padding: (pad_left, pad_right) pair. How many elements to add to both sides of the second dimension. If this is used for the very first layer of the feelers CNN, this can be thought as the number of feeler entries to add before the first feeler entry and after the last. padding_mode: One of 'zero', 'wrap', or 'repeat'. - zero: For padding out with zeroes. - wrap: For padding wrapping around the tensor. - repeat: For padding with repeated values from the tensor edges. **kwargs: Other kwargs that should be sent to the layer superclass.\"\"\"\n self._padding = padding\n self._padding_mode = padding_mode\n super(Pad1D, self).__init__(**kwargs)\n\n def call(self, inputs):\n \"\"\"Apply the layer into inputs. Args: inputs: A tensor with shape (batch, size, channels). Returns: Output tensor after applying the layer.\"\"\"\n if self._padding_mode == 'zero':\n paddings = ((0, 0), self._padding, (0, 0))\n outputs = tf.pad(inputs, paddings)\n elif self._padding_mode == 'wrap':\n outputs = tf.concat([inputs[:, -self._padding[0]:, :], inputs, inputs[:, :self._padding[1], :]], axis=1)\n elif self._padding_mode == 'repeat':\n outputs = tf.concat([tf.repeat(inputs[:, :1, :], self._padding[0], axis=1), inputs, tf.repeat(inputs[:, -1:, :], self._padding[1], axis=1)], axis=1)\n else:\n raise ValueError(f'Padding mode {self._padding_mode} not supported.')\n return outputs\n", "source": "the_stack_v2_python_sparse", "source_path": "service/learner/brains/layers.py", "source_repo": "stewartmiles/falken", "split": "val", "star_events_count": 1}
{"blob_id": "6a85e86260d833c5bdacebb6338f77072fdcca23", "bodies": ["try:\n self._authenticate_user_dn(password)\n self._check_requirements()\nexcept self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n return False\nexcept ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n return False\nexcept Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\nreturn True", "user = None\ntry:\n self._get_or_create_user()\n user = self._user\nexcept self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\nexcept ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\nexcept Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\nreturn user"], "bodies_text": "<|body_start_0|>\n try:\n self._authenticate_user_dn(password)\n self._check_requirements()\n except self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n return False\n except ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n return False\n except Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n user = None\n try:\n self._get_or_create_user()\n user = self._user\n except self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n except ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n except Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\n return user\n<|end_body_1|>\n", "class_docstring": "", "class_name": "_PolyauthenticationLDAPUser", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _PolyauthenticationLDAPUser:\n\n def ldap_authenticate(self, password):\n \"\"\"Searches LDAP user and populates his attributes\"\"\"\n <|body_0|>\n\n def add_user_to_db(self):\n \"\"\"Creating user id DB\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self._authenticate_user_dn(password)\n self._check_requirements()\n except self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n return False\n except ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n return False\n except Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n user = None\n try:\n self._get_or_create_user()\n user = self._user\n except self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n except ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n except Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\n return user\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000419", "length_bytes": 3949, "license_type": "no_license", "methods": [{"docstring": "Searches LDAP user and populates his attributes", "name": "ldap_authenticate", "signature": "def ldap_authenticate(self, password)"}, {"docstring": "Creating user id DB", "name": "add_user_to_db", "signature": "def add_user_to_db(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006590", "prompt": "Implement the Python class `_PolyauthenticationLDAPUser` described below.\n\nClass description:\nImplement the _PolyauthenticationLDAPUser class.\n\nMethod signatures and docstrings:\n- def ldap_authenticate(self, password): Searches LDAP user and populates his attributes\n- def add_user_to_db(self): Creating user id DB", "prompted_full_text": "Implement the Python class `_PolyauthenticationLDAPUser` described below.\n\nClass description:\nImplement the _PolyauthenticationLDAPUser class.\n\nMethod signatures and docstrings:\n- def ldap_authenticate(self, password): Searches LDAP user and populates his attributes\n- def add_user_to_db(self): Creating user id DB\n\n<|skeleton|>\nclass _PolyauthenticationLDAPUser:\n\n def ldap_authenticate(self, password):\n \"\"\"Searches LDAP user and populates his attributes\"\"\"\n <|body_0|>\n\n def add_user_to_db(self):\n \"\"\"Creating user id DB\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self._authenticate_user_dn(password)\n self._check_requirements()\n except self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n return False\n except ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n return False\n except Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n user = None\n try:\n self._get_or_create_user()\n user = self._user\n except self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n except ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n except Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\n return user\n<|end_body_1|>\n", "revision_id": "3bbf6d6347f8615f2d6a3052017f8f69a4244476", "skeleton": "<|skeleton|>\nclass _PolyauthenticationLDAPUser:\n\n def ldap_authenticate(self, password):\n \"\"\"Searches LDAP user and populates his attributes\"\"\"\n <|body_0|>\n\n def add_user_to_db(self):\n \"\"\"Creating user id DB\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class _PolyauthenticationLDAPUser:\n def ldap_authenticate(self, password):\n \"\"\"Searches LDAP user and populates his attributes\"\"\"\n try:\n self._authenticate_user_dn(password)\n self._check_requirements()\n except self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n return False\n except ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n return False\n except Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\n return True\n\n def add_user_to_db(self):\n \"\"\"Creating user id DB\"\"\"\n user = None\n try:\n self._get_or_create_user()\n user = self._user\n except self.AuthenticationFailed as e:\n logger.debug(u'Authentication failed for %s: %s' % (self._username, e))\n except ldap.LDAPError as e:\n logger.warning(u'Caught LDAPError while authenticating %s: %s', self._username, pprint.pformat(e))\n except Exception:\n logger.exception(u'Caught Exception while authenticating %s', self._username)\n raise\n return user\n", "source": "the_stack_v2_python_sparse", "source_path": "src/polyana-web-authentication/polyauthentication/backends.py", "source_repo": "KirpichenkovPavel/merger", "split": "val", "star_events_count": 0}
{"blob_id": "2b2fd3c7305379f2d1e963645bbe1b960fe1ff2d", "bodies": ["super(Attn, self).__init__()\nself.score_type = score_type\nself.hidden_size = hidden_size\nif score_type == 'general':\n self.attn = nn.Linear(hidden_size, hidden_size)\nelif score_type == 'concat':\n self.attn = nn.Linear(hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))", "if self.score_type == 'dot':\n energy = decoder_rnn_output.squeeze(0).dot(encoder_output.squeeze(0))\nelif self.score_type == 'general':\n energy = self.attn(encoder_output)\n energy = decoder_rnn_output.squeeze(0).dot(energy.squeeze(0))\nelif self.score_type == 'concat':\n h_o = torch.cat((decoder_rnn_output, encoder_output), 1)\n energy = self.attn(h_o)\n energy = self.v.squeeze(0).dot(energy.squeeze(0))\nreturn energy", "target_seqlen = rnn_outputs.size()[0]\ninput_seqlen = encoder_outputs.size()[0]\nbatch_size = encoder_outputs.size()[1]\nrnn_outputs = rnn_outputs.transpose(0, 1)\nencoder_outputs = encoder_outputs.transpose(0, 1)\nif self.score_type == 'general':\n encoder_outputs = self.attn(encoder_outputs).transpose(1, 2)\n attn_energies = rnn_outputs.bmm(encoder_outputs)\n res = my_log_softmax(attn_energies)\n return res\nattn_energies = get_variable(torch.zeros(batch_size, target_seqlen, input_seqlen))\nfor b in range(batch_size):\n decoder_rnn_output = rnn_outputs[b]\n for i in range(seq_len):\n encoder_output = encoder_outputs[i, b, :].squeeze(0)\n attn_energies[b, i] = self.score(decoder_rnn_output, encoder_output)\nattn_weights = get_variable(torch.zeros(this_batch_size, seq_len))\nfor b in range(this_batch_size):\n attn_weights[b] = F.softmax(attn_energies[b])\nreturn attn_weights"], "bodies_text": "<|body_start_0|>\n super(Attn, self).__init__()\n self.score_type = score_type\n self.hidden_size = hidden_size\n if score_type == 'general':\n self.attn = nn.Linear(hidden_size, hidden_size)\n elif score_type == 'concat':\n self.attn = nn.Linear(hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))\n<|end_body_0|>\n\n<|body_start_1|>\n if self.score_type == 'dot':\n energy = decoder_rnn_output.squeeze(0).dot(encoder_output.squeeze(0))\n elif self.score_type == 'general':\n energy = self.attn(encoder_output)\n energy = decoder_rnn_output.squeeze(0).dot(energy.squeeze(0))\n elif self.score_type == 'concat':\n h_o = torch.cat((decoder_rnn_output, encoder_output), 1)\n energy = self.attn(h_o)\n energy = self.v.squeeze(0).dot(energy.squeeze(0))\n return energy\n<|end_body_1|>\n\n<|body_start_2|>\n target_seqlen = rnn_outputs.size()[0]\n input_seqlen = encoder_outputs.size()[0]\n batch_size = encoder_outputs.size()[1]\n rnn_outputs = rnn_outputs.transpose(0, 1)\n encoder_outputs = encoder_outputs.transpose(0, 1)\n if self.score_type == 'general':\n encoder_outputs = self.attn(encoder_outputs).transpose(1, 2)\n attn_energies = rnn_outputs.bmm(encoder_outputs)\n res = my_log_softmax(attn_energies)\n return res\n attn_energies = get_variable(torch.zeros(batch_size, target_seqlen, input_seqlen))\n for b in range(batch_size):\n decoder_rnn_output = rnn_outputs[b]\n for i in range(seq_len):\n encoder_output = encoder_outputs[i, b, :].squeeze(0)\n attn_energies[b, i] = self.score(decoder_rnn_output, encoder_output)\n attn_weights = get_variable(torch.zeros(this_batch_size, seq_len))\n for b in range(this_batch_size):\n attn_weights[b] = F.softmax(attn_energies[b])\n return attn_weights\n<|end_body_2|>\n", "class_docstring": "计算对齐向量,只有general可以使用", "class_name": "Attn", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Attn:\n \"\"\"计算对齐向量,只有general可以使用\"\"\"\n\n def __init__(self, score_type, hidden_size):\n \"\"\"Args: score_type: 计算score的方法,'dot', 'general', 'concat' hidden_size: Encoder和Decoder的hidden_size\"\"\"\n <|body_0|>\n\n def score(self, decoder_rnn_output, encoder_output):\n \"\"\"计算Decoder中yt与Encoder中hs的打分。算出所有得分,再softmax就可以算出对齐向量。 下面均是单个batch Args: decoder_rnn_output: [1, h],Decoder中顶层RNN的输出[1,h] < [1,b,h] encoder_output: [1, h],Encoder最后的输出[1,h] < [s,b,h]> Returns: energy: 即Yt与Xs的得分\"\"\"\n <|body_1|>\n\n def forward(self, rnn_outputs, encoder_outputs):\n \"\"\"ts个时刻,计算ts个与is的对齐向量,也是注意力权值 Args: rnn_outputs: Decoder中GRU的输出[ts, b, h] encoder_outputs: Encoder的最后的输出, [is, b, h] Returns: attn_weights: Yt与所有Xs的注意力权值,[b, ts, is]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Attn, self).__init__()\n self.score_type = score_type\n self.hidden_size = hidden_size\n if score_type == 'general':\n self.attn = nn.Linear(hidden_size, hidden_size)\n elif score_type == 'concat':\n self.attn = nn.Linear(hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))\n<|end_body_0|>\n\n<|body_start_1|>\n if self.score_type == 'dot':\n energy = decoder_rnn_output.squeeze(0).dot(encoder_output.squeeze(0))\n elif self.score_type == 'general':\n energy = self.attn(encoder_output)\n energy = decoder_rnn_output.squeeze(0).dot(energy.squeeze(0))\n elif self.score_type == 'concat':\n h_o = torch.cat((decoder_rnn_output, encoder_output), 1)\n energy = self.attn(h_o)\n energy = self.v.squeeze(0).dot(energy.squeeze(0))\n return energy\n<|end_body_1|>\n\n<|body_start_2|>\n target_seqlen = rnn_outputs.size()[0]\n input_seqlen = encoder_outputs.size()[0]\n batch_size = encoder_outputs.size()[1]\n rnn_outputs = rnn_outputs.transpose(0, 1)\n encoder_outputs = encoder_outputs.transpose(0, 1)\n if self.score_type == 'general':\n encoder_outputs = self.attn(encoder_outputs).transpose(1, 2)\n attn_energies = rnn_outputs.bmm(encoder_outputs)\n res = my_log_softmax(attn_energies)\n return res\n attn_energies = get_variable(torch.zeros(batch_size, target_seqlen, input_seqlen))\n for b in range(batch_size):\n decoder_rnn_output = rnn_outputs[b]\n for i in range(seq_len):\n encoder_output = encoder_outputs[i, b, :].squeeze(0)\n attn_energies[b, i] = self.score(decoder_rnn_output, encoder_output)\n attn_weights = get_variable(torch.zeros(this_batch_size, seq_len))\n for b in range(this_batch_size):\n attn_weights[b] = F.softmax(attn_energies[b])\n return attn_weights\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000420", "length_bytes": 12285, "license_type": "no_license", "methods": [{"docstring": "Args: score_type: 计算score的方法,'dot', 'general', 'concat' hidden_size: Encoder和Decoder的hidden_size", "name": "__init__", "signature": "def __init__(self, score_type, hidden_size)"}, {"docstring": "计算Decoder中yt与Encoder中hs的打分。算出所有得分,再softmax就可以算出对齐向量。 下面均是单个batch Args: decoder_rnn_output: [1, h],Decoder中顶层RNN的输出[1,h] < [1,b,h] encoder_output: [1, h],Encoder最后的输出[1,h] < [s,b,h]> Returns: energy: 即Yt与Xs的得分", "name": "score", "signature": "def score(self, decoder_rnn_output, encoder_output)"}, {"docstring": "ts个时刻,计算ts个与is的对齐向量,也是注意力权值 Args: rnn_outputs: Decoder中GRU的输出[ts, b, h] encoder_outputs: Encoder的最后的输出, [is, b, h] Returns: attn_weights: Yt与所有Xs的注意力权值,[b, ts, is]", "name": "forward", "signature": "def forward(self, rnn_outputs, encoder_outputs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_023819", "prompt": "Implement the Python class `Attn` described below.\n\nClass description:\n计算对齐向量,只有general可以使用\n\nMethod signatures and docstrings:\n- def __init__(self, score_type, hidden_size): Args: score_type: 计算score的方法,'dot', 'general', 'concat' hidden_size: Encoder和Decoder的hidden_size\n- def score(self, decoder_rnn_output, encoder_output): 计算Decoder中yt与Encoder中hs的打分。算出所有得分,再softmax就可以算出对齐向量。 下面均是单个batch Args: decoder_rnn_output: [1, h],Decoder中顶层RNN的输出[1,h] < [1,b,h] encoder_output: [1, h],Encoder最后的输出[1,h] < [s,b,h]> Returns: energy: 即Yt与Xs的得分\n- def forward(self, rnn_outputs, encoder_outputs): ts个时刻,计算ts个与is的对齐向量,也是注意力权值 Args: rnn_outputs: Decoder中GRU的输出[ts, b, h] encoder_outputs: Encoder的最后的输出, [is, b, h] Returns: attn_weights: Yt与所有Xs的注意力权值,[b, ts, is]", "prompted_full_text": "Implement the Python class `Attn` described below.\n\nClass description:\n计算对齐向量,只有general可以使用\n\nMethod signatures and docstrings:\n- def __init__(self, score_type, hidden_size): Args: score_type: 计算score的方法,'dot', 'general', 'concat' hidden_size: Encoder和Decoder的hidden_size\n- def score(self, decoder_rnn_output, encoder_output): 计算Decoder中yt与Encoder中hs的打分。算出所有得分,再softmax就可以算出对齐向量。 下面均是单个batch Args: decoder_rnn_output: [1, h],Decoder中顶层RNN的输出[1,h] < [1,b,h] encoder_output: [1, h],Encoder最后的输出[1,h] < [s,b,h]> Returns: energy: 即Yt与Xs的得分\n- def forward(self, rnn_outputs, encoder_outputs): ts个时刻,计算ts个与is的对齐向量,也是注意力权值 Args: rnn_outputs: Decoder中GRU的输出[ts, b, h] encoder_outputs: Encoder的最后的输出, [is, b, h] Returns: attn_weights: Yt与所有Xs的注意力权值,[b, ts, is]\n\n<|skeleton|>\nclass Attn:\n \"\"\"计算对齐向量,只有general可以使用\"\"\"\n\n def __init__(self, score_type, hidden_size):\n \"\"\"Args: score_type: 计算score的方法,'dot', 'general', 'concat' hidden_size: Encoder和Decoder的hidden_size\"\"\"\n <|body_0|>\n\n def score(self, decoder_rnn_output, encoder_output):\n \"\"\"计算Decoder中yt与Encoder中hs的打分。算出所有得分,再softmax就可以算出对齐向量。 下面均是单个batch Args: decoder_rnn_output: [1, h],Decoder中顶层RNN的输出[1,h] < [1,b,h] encoder_output: [1, h],Encoder最后的输出[1,h] < [s,b,h]> Returns: energy: 即Yt与Xs的得分\"\"\"\n <|body_1|>\n\n def forward(self, rnn_outputs, encoder_outputs):\n \"\"\"ts个时刻,计算ts个与is的对齐向量,也是注意力权值 Args: rnn_outputs: Decoder中GRU的输出[ts, b, h] encoder_outputs: Encoder的最后的输出, [is, b, h] Returns: attn_weights: Yt与所有Xs的注意力权值,[b, ts, is]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Attn, self).__init__()\n self.score_type = score_type\n self.hidden_size = hidden_size\n if score_type == 'general':\n self.attn = nn.Linear(hidden_size, hidden_size)\n elif score_type == 'concat':\n self.attn = nn.Linear(hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))\n<|end_body_0|>\n\n<|body_start_1|>\n if self.score_type == 'dot':\n energy = decoder_rnn_output.squeeze(0).dot(encoder_output.squeeze(0))\n elif self.score_type == 'general':\n energy = self.attn(encoder_output)\n energy = decoder_rnn_output.squeeze(0).dot(energy.squeeze(0))\n elif self.score_type == 'concat':\n h_o = torch.cat((decoder_rnn_output, encoder_output), 1)\n energy = self.attn(h_o)\n energy = self.v.squeeze(0).dot(energy.squeeze(0))\n return energy\n<|end_body_1|>\n\n<|body_start_2|>\n target_seqlen = rnn_outputs.size()[0]\n input_seqlen = encoder_outputs.size()[0]\n batch_size = encoder_outputs.size()[1]\n rnn_outputs = rnn_outputs.transpose(0, 1)\n encoder_outputs = encoder_outputs.transpose(0, 1)\n if self.score_type == 'general':\n encoder_outputs = self.attn(encoder_outputs).transpose(1, 2)\n attn_energies = rnn_outputs.bmm(encoder_outputs)\n res = my_log_softmax(attn_energies)\n return res\n attn_energies = get_variable(torch.zeros(batch_size, target_seqlen, input_seqlen))\n for b in range(batch_size):\n decoder_rnn_output = rnn_outputs[b]\n for i in range(seq_len):\n encoder_output = encoder_outputs[i, b, :].squeeze(0)\n attn_energies[b, i] = self.score(decoder_rnn_output, encoder_output)\n attn_weights = get_variable(torch.zeros(this_batch_size, seq_len))\n for b in range(this_batch_size):\n attn_weights[b] = F.softmax(attn_energies[b])\n return attn_weights\n<|end_body_2|>\n", "revision_id": "c54079e1b6bb84654ec3e3fc08597ba493f50d5e", "skeleton": "<|skeleton|>\nclass Attn:\n \"\"\"计算对齐向量,只有general可以使用\"\"\"\n\n def __init__(self, score_type, hidden_size):\n \"\"\"Args: score_type: 计算score的方法,'dot', 'general', 'concat' hidden_size: Encoder和Decoder的hidden_size\"\"\"\n <|body_0|>\n\n def score(self, decoder_rnn_output, encoder_output):\n \"\"\"计算Decoder中yt与Encoder中hs的打分。算出所有得分,再softmax就可以算出对齐向量。 下面均是单个batch Args: decoder_rnn_output: [1, h],Decoder中顶层RNN的输出[1,h] < [1,b,h] encoder_output: [1, h],Encoder最后的输出[1,h] < [s,b,h]> Returns: energy: 即Yt与Xs的得分\"\"\"\n <|body_1|>\n\n def forward(self, rnn_outputs, encoder_outputs):\n \"\"\"ts个时刻,计算ts个与is的对齐向量,也是注意力权值 Args: rnn_outputs: Decoder中GRU的输出[ts, b, h] encoder_outputs: Encoder的最后的输出, [is, b, h] Returns: attn_weights: Yt与所有Xs的注意力权值,[b, ts, is]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Attn:\n \"\"\"计算对齐向量,只有general可以使用\"\"\"\n\n def __init__(self, score_type, hidden_size):\n \"\"\"Args: score_type: 计算score的方法,'dot', 'general', 'concat' hidden_size: Encoder和Decoder的hidden_size\"\"\"\n super(Attn, self).__init__()\n self.score_type = score_type\n self.hidden_size = hidden_size\n if score_type == 'general':\n self.attn = nn.Linear(hidden_size, hidden_size)\n elif score_type == 'concat':\n self.attn = nn.Linear(hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))\n\n def score(self, decoder_rnn_output, encoder_output):\n \"\"\"计算Decoder中yt与Encoder中hs的打分。算出所有得分,再softmax就可以算出对齐向量。 下面均是单个batch Args: decoder_rnn_output: [1, h],Decoder中顶层RNN的输出[1,h] < [1,b,h] encoder_output: [1, h],Encoder最后的输出[1,h] < [s,b,h]> Returns: energy: 即Yt与Xs的得分\"\"\"\n if self.score_type == 'dot':\n energy = decoder_rnn_output.squeeze(0).dot(encoder_output.squeeze(0))\n elif self.score_type == 'general':\n energy = self.attn(encoder_output)\n energy = decoder_rnn_output.squeeze(0).dot(energy.squeeze(0))\n elif self.score_type == 'concat':\n h_o = torch.cat((decoder_rnn_output, encoder_output), 1)\n energy = self.attn(h_o)\n energy = self.v.squeeze(0).dot(energy.squeeze(0))\n return energy\n\n def forward(self, rnn_outputs, encoder_outputs):\n \"\"\"ts个时刻,计算ts个与is的对齐向量,也是注意力权值 Args: rnn_outputs: Decoder中GRU的输出[ts, b, h] encoder_outputs: Encoder的最后的输出, [is, b, h] Returns: attn_weights: Yt与所有Xs的注意力权值,[b, ts, is]\"\"\"\n target_seqlen = rnn_outputs.size()[0]\n input_seqlen = encoder_outputs.size()[0]\n batch_size = encoder_outputs.size()[1]\n rnn_outputs = rnn_outputs.transpose(0, 1)\n encoder_outputs = encoder_outputs.transpose(0, 1)\n if self.score_type == 'general':\n encoder_outputs = self.attn(encoder_outputs).transpose(1, 2)\n attn_energies = rnn_outputs.bmm(encoder_outputs)\n res = my_log_softmax(attn_energies)\n return res\n attn_energies = get_variable(torch.zeros(batch_size, target_seqlen, input_seqlen))\n for b in range(batch_size):\n decoder_rnn_output = rnn_outputs[b]\n for i in range(seq_len):\n encoder_output = encoder_outputs[i, b, :].squeeze(0)\n attn_energies[b, i] = self.score(decoder_rnn_output, encoder_output)\n attn_weights = get_variable(torch.zeros(this_batch_size, seq_len))\n for b in range(this_batch_size):\n attn_weights[b] = F.softmax(attn_energies[b])\n return attn_weights\n", "source": "the_stack_v2_python_sparse", "source_path": "en-zh-translation/model.py", "source_repo": "JeremyKwok666/NLP-Demos", "split": "val", "star_events_count": 0}
{"blob_id": "69999a306e94579cff7357c9b3a6b767d7589861", "bodies": ["nasa0 = NASA(coeffs=[11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], Tmin=(300.0, 'K'), Tmax=(1000.0, 'K'), comment='This data is completely made up and unphysical')\nnasa1 = NASA(coeffs=[21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0], Tmin=(1000.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is also completely made up and unphysical')\nself.thermo = MultiNASA(polynomials=[nasa0, nasa1], Tmin=(300.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is completely made up and unphysical')", "import cPickle\nthermo = cPickle.loads(cPickle.dumps(self.thermo))\nself.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\nfor poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\nself.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\nself.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\nself.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\nself.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\nself.assertEqual(self.thermo.comment, thermo.comment)", "exec('thermo = {0!r}'.format(self.thermo))\nself.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\nfor poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\nself.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\nself.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\nself.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\nself.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\nself.assertEqual(self.thermo.comment, thermo.comment)"], "bodies_text": "<|body_start_0|>\n nasa0 = NASA(coeffs=[11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], Tmin=(300.0, 'K'), Tmax=(1000.0, 'K'), comment='This data is completely made up and unphysical')\n nasa1 = NASA(coeffs=[21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0], Tmin=(1000.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is also completely made up and unphysical')\n self.thermo = MultiNASA(polynomials=[nasa0, nasa1], Tmin=(300.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is completely made up and unphysical')\n<|end_body_0|>\n\n<|body_start_1|>\n import cPickle\n thermo = cPickle.loads(cPickle.dumps(self.thermo))\n self.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\n for poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\n self.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\n self.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\n self.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\n self.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\n self.assertEqual(self.thermo.comment, thermo.comment)\n<|end_body_1|>\n\n<|body_start_2|>\n exec('thermo = {0!r}'.format(self.thermo))\n self.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\n for poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\n self.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\n self.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\n self.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\n self.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\n self.assertEqual(self.thermo.comment, thermo.comment)\n<|end_body_2|>\n", "class_docstring": "Contains unit tests of the MultiNASA class.", "class_name": "TestMultiNASA", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestMultiNASA:\n \"\"\"Contains unit tests of the MultiNASA class.\"\"\"\n\n def setUp(self):\n \"\"\"A function run before each unit test in this class.\"\"\"\n <|body_0|>\n\n def testPickle(self):\n \"\"\"Test that a MultiNASA object can be successfully pickled and unpickled with no loss of information.\"\"\"\n <|body_1|>\n\n def testOutput(self):\n \"\"\"Test that we can reconstruct a MultiNASA object from its repr() output with no loss of information.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n nasa0 = NASA(coeffs=[11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], Tmin=(300.0, 'K'), Tmax=(1000.0, 'K'), comment='This data is completely made up and unphysical')\n nasa1 = NASA(coeffs=[21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0], Tmin=(1000.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is also completely made up and unphysical')\n self.thermo = MultiNASA(polynomials=[nasa0, nasa1], Tmin=(300.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is completely made up and unphysical')\n<|end_body_0|>\n\n<|body_start_1|>\n import cPickle\n thermo = cPickle.loads(cPickle.dumps(self.thermo))\n self.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\n for poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\n self.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\n self.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\n self.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\n self.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\n self.assertEqual(self.thermo.comment, thermo.comment)\n<|end_body_1|>\n\n<|body_start_2|>\n exec('thermo = {0!r}'.format(self.thermo))\n self.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\n for poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\n self.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\n self.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\n self.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\n self.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\n self.assertEqual(self.thermo.comment, thermo.comment)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000421", "length_bytes": 32390, "license_type": "permissive", "methods": [{"docstring": "A function run before each unit test in this class.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Test that a MultiNASA object can be successfully pickled and unpickled with no loss of information.", "name": "testPickle", "signature": "def testPickle(self)"}, {"docstring": "Test that we can reconstruct a MultiNASA object from its repr() output with no loss of information.", "name": "testOutput", "signature": "def testOutput(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_002709", "prompt": "Implement the Python class `TestMultiNASA` described below.\n\nClass description:\nContains unit tests of the MultiNASA class.\n\nMethod signatures and docstrings:\n- def setUp(self): A function run before each unit test in this class.\n- def testPickle(self): Test that a MultiNASA object can be successfully pickled and unpickled with no loss of information.\n- def testOutput(self): Test that we can reconstruct a MultiNASA object from its repr() output with no loss of information.", "prompted_full_text": "Implement the Python class `TestMultiNASA` described below.\n\nClass description:\nContains unit tests of the MultiNASA class.\n\nMethod signatures and docstrings:\n- def setUp(self): A function run before each unit test in this class.\n- def testPickle(self): Test that a MultiNASA object can be successfully pickled and unpickled with no loss of information.\n- def testOutput(self): Test that we can reconstruct a MultiNASA object from its repr() output with no loss of information.\n\n<|skeleton|>\nclass TestMultiNASA:\n \"\"\"Contains unit tests of the MultiNASA class.\"\"\"\n\n def setUp(self):\n \"\"\"A function run before each unit test in this class.\"\"\"\n <|body_0|>\n\n def testPickle(self):\n \"\"\"Test that a MultiNASA object can be successfully pickled and unpickled with no loss of information.\"\"\"\n <|body_1|>\n\n def testOutput(self):\n \"\"\"Test that we can reconstruct a MultiNASA object from its repr() output with no loss of information.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n nasa0 = NASA(coeffs=[11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], Tmin=(300.0, 'K'), Tmax=(1000.0, 'K'), comment='This data is completely made up and unphysical')\n nasa1 = NASA(coeffs=[21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0], Tmin=(1000.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is also completely made up and unphysical')\n self.thermo = MultiNASA(polynomials=[nasa0, nasa1], Tmin=(300.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is completely made up and unphysical')\n<|end_body_0|>\n\n<|body_start_1|>\n import cPickle\n thermo = cPickle.loads(cPickle.dumps(self.thermo))\n self.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\n for poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\n self.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\n self.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\n self.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\n self.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\n self.assertEqual(self.thermo.comment, thermo.comment)\n<|end_body_1|>\n\n<|body_start_2|>\n exec('thermo = {0!r}'.format(self.thermo))\n self.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\n for poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\n self.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\n self.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\n self.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\n self.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\n self.assertEqual(self.thermo.comment, thermo.comment)\n<|end_body_2|>\n", "revision_id": "7cc7c3bfb330786526c56113d98c785bcaaa161a", "skeleton": "<|skeleton|>\nclass TestMultiNASA:\n \"\"\"Contains unit tests of the MultiNASA class.\"\"\"\n\n def setUp(self):\n \"\"\"A function run before each unit test in this class.\"\"\"\n <|body_0|>\n\n def testPickle(self):\n \"\"\"Test that a MultiNASA object can be successfully pickled and unpickled with no loss of information.\"\"\"\n <|body_1|>\n\n def testOutput(self):\n \"\"\"Test that we can reconstruct a MultiNASA object from its repr() output with no loss of information.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestMultiNASA:\n \"\"\"Contains unit tests of the MultiNASA class.\"\"\"\n\n def setUp(self):\n \"\"\"A function run before each unit test in this class.\"\"\"\n nasa0 = NASA(coeffs=[11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0], Tmin=(300.0, 'K'), Tmax=(1000.0, 'K'), comment='This data is completely made up and unphysical')\n nasa1 = NASA(coeffs=[21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0], Tmin=(1000.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is also completely made up and unphysical')\n self.thermo = MultiNASA(polynomials=[nasa0, nasa1], Tmin=(300.0, 'K'), Tmax=(6000.0, 'K'), comment='This data is completely made up and unphysical')\n\n def testPickle(self):\n \"\"\"Test that a MultiNASA object can be successfully pickled and unpickled with no loss of information.\"\"\"\n import cPickle\n thermo = cPickle.loads(cPickle.dumps(self.thermo))\n self.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\n for poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\n self.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\n self.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\n self.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\n self.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\n self.assertEqual(self.thermo.comment, thermo.comment)\n\n def testOutput(self):\n \"\"\"Test that we can reconstruct a MultiNASA object from its repr() output with no loss of information.\"\"\"\n exec('thermo = {0!r}'.format(self.thermo))\n self.assertEqual(len(self.thermo.polynomials), len(thermo.polynomials))\n for poly0, poly in zip(self.thermo.polynomials, thermo.polynomials):\n self.assertEqual(poly0.cm2, poly.cm2)\n self.assertEqual(poly0.cm1, poly.cm1)\n self.assertEqual(poly0.c0, poly.c0)\n self.assertEqual(poly0.c1, poly.c1)\n self.assertEqual(poly0.c2, poly.c2)\n self.assertEqual(poly0.c3, poly.c3)\n self.assertEqual(poly0.c4, poly.c4)\n self.assertEqual(poly0.c5, poly.c5)\n self.assertEqual(poly0.c6, poly.c6)\n self.assertEqual(poly0.Tmin.value, poly.Tmin.value)\n self.assertEqual(poly0.Tmin.units, poly.Tmin.units)\n self.assertEqual(poly0.Tmax.value, poly.Tmax.value)\n self.assertEqual(poly0.Tmax.units, poly.Tmax.units)\n self.assertEqual(poly0.comment, poly.comment)\n self.assertEqual(self.thermo.Tmin.value, thermo.Tmin.value)\n self.assertEqual(self.thermo.Tmin.units, thermo.Tmin.units)\n self.assertEqual(self.thermo.Tmax.value, thermo.Tmax.value)\n self.assertEqual(self.thermo.Tmax.units, thermo.Tmax.units)\n self.assertEqual(self.thermo.comment, thermo.comment)\n", "source": "the_stack_v2_python_sparse", "source_path": "unittest/thermoTest.py", "source_repo": "sean-v8/RMG-Py", "split": "val", "star_events_count": 0}
{"blob_id": "646a7319cc86ef33fa426c9985e2edc83175c21f", "bodies": ["self.X = X_init\nself.Y = Y_init\nself.l = l\nself.sigma_f = sigma_f\nself.K = self.kernel(self.X, self.X)", "first = np.sum(X1 ** 2, 1).reshape(-1, 1)\nsecond = np.sum(X2 ** 2, 1)\ndist_sq = first + second - 2 * np.dot(X1, X2.T)\nkernel = self.sigma_f ** 2 * np.exp(-0.5 / self.l ** 2 * dist_sq)\nreturn kernel"], "bodies_text": "<|body_start_0|>\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(self.X, self.X)\n<|end_body_0|>\n\n<|body_start_1|>\n first = np.sum(X1 ** 2, 1).reshape(-1, 1)\n second = np.sum(X2 ** 2, 1)\n dist_sq = first + second - 2 * np.dot(X1, X2.T)\n kernel = self.sigma_f ** 2 * np.exp(-0.5 / self.l ** 2 * dist_sq)\n return kernel\n<|end_body_1|>\n", "class_docstring": "A class that represents a gaussian process", "class_name": "GaussianProcess", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GaussianProcess:\n \"\"\"A class that represents a gaussian process\"\"\"\n\n def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n \"\"\"constructor for class\"\"\"\n <|body_0|>\n\n def kernel(self, X1, X2):\n \"\"\"Returns the covariance matrix\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(self.X, self.X)\n<|end_body_0|>\n\n<|body_start_1|>\n first = np.sum(X1 ** 2, 1).reshape(-1, 1)\n second = np.sum(X2 ** 2, 1)\n dist_sq = first + second - 2 * np.dot(X1, X2.T)\n kernel = self.sigma_f ** 2 * np.exp(-0.5 / self.l ** 2 * dist_sq)\n return kernel\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000422", "length_bytes": 779, "license_type": "no_license", "methods": [{"docstring": "constructor for class", "name": "__init__", "signature": "def __init__(self, X_init, Y_init, l=1, sigma_f=1)"}, {"docstring": "Returns the covariance matrix", "name": "kernel", "signature": "def kernel(self, X1, X2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_017786", "prompt": "Implement the Python class `GaussianProcess` described below.\n\nClass description:\nA class that represents a gaussian process\n\nMethod signatures and docstrings:\n- def __init__(self, X_init, Y_init, l=1, sigma_f=1): constructor for class\n- def kernel(self, X1, X2): Returns the covariance matrix", "prompted_full_text": "Implement the Python class `GaussianProcess` described below.\n\nClass description:\nA class that represents a gaussian process\n\nMethod signatures and docstrings:\n- def __init__(self, X_init, Y_init, l=1, sigma_f=1): constructor for class\n- def kernel(self, X1, X2): Returns the covariance matrix\n\n<|skeleton|>\nclass GaussianProcess:\n \"\"\"A class that represents a gaussian process\"\"\"\n\n def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n \"\"\"constructor for class\"\"\"\n <|body_0|>\n\n def kernel(self, X1, X2):\n \"\"\"Returns the covariance matrix\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(self.X, self.X)\n<|end_body_0|>\n\n<|body_start_1|>\n first = np.sum(X1 ** 2, 1).reshape(-1, 1)\n second = np.sum(X2 ** 2, 1)\n dist_sq = first + second - 2 * np.dot(X1, X2.T)\n kernel = self.sigma_f ** 2 * np.exp(-0.5 / self.l ** 2 * dist_sq)\n return kernel\n<|end_body_1|>\n", "revision_id": "91300120d38acb6440a6dbb8c408b1193c07de88", "skeleton": "<|skeleton|>\nclass GaussianProcess:\n \"\"\"A class that represents a gaussian process\"\"\"\n\n def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n \"\"\"constructor for class\"\"\"\n <|body_0|>\n\n def kernel(self, X1, X2):\n \"\"\"Returns the covariance matrix\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GaussianProcess:\n \"\"\"A class that represents a gaussian process\"\"\"\n\n def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n \"\"\"constructor for class\"\"\"\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(self.X, self.X)\n\n def kernel(self, X1, X2):\n \"\"\"Returns the covariance matrix\"\"\"\n first = np.sum(X1 ** 2, 1).reshape(-1, 1)\n second = np.sum(X2 ** 2, 1)\n dist_sq = first + second - 2 * np.dot(X1, X2.T)\n kernel = self.sigma_f ** 2 * np.exp(-0.5 / self.l ** 2 * dist_sq)\n return kernel\n", "source": "the_stack_v2_python_sparse", "source_path": "unsupervised_learning/0x03-hyperparameter_tuning/0-gp.py", "source_repo": "anaruzz/holbertonschool-machine_learning", "split": "val", "star_events_count": 0}
{"blob_id": "328333e39e08143d2f5baa92665b415619f10277", "bodies": ["exc_type = type(exc)\nif exc_type in (exceptions.BadResponse, exceptions.ConnectionFailed):\n raise Recoverable('Encountered %s; try again' % exc) from exc\nelif exc_type is exceptions.RequestFailed:\n if exc.status_code >= 500:\n msg = 'Classifier service choked: %i' % exc.status_code\n raise Recoverable(msg) from exc\n self.fail(exc, 'Unrecoverable exception: %i' % exc.status_code)\nself.fail(exc, 'Unhandled exception')", "try:\n self.process_result(Classifier.classify(content), trigger, emit)\nexcept Exception as exc:\n self.handle_classifier_exception(exc)", "suggestions, flags, counts = result\nresults = [{'category': suggestion.category, 'probability': suggestion.probability} for suggestion in suggestions]\nemit(AddClassifierResults(creator=self.agent, results=results))\nfor flag in flags:\n now = datetime.now(UTC).isoformat()\n comment = 'flag from classification succeeded at %s' % now\n flag_type = self.CLASSIFIER_FLAGS.get(flag.key)\n if flag_type is None:\n continue\n emit(AddContentFlag(creator=self.agent, flag_type=flag_type, flag_data=flag.value, comment=comment))\nemit(AddFeature(creator=self.agent, feature_type=Feature.Type.CHARACTER_COUNT, feature_value=counts.chars))\nemit(AddFeature(creator=self.agent, feature_type=Feature.Type.PAGE_COUNT, feature_value=counts.pages))\nemit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_COUNT, feature_value=counts.stops))\nemit(AddFeature(creator=self.agent, feature_type=Feature.Type.WORD_COUNT, feature_value=counts.words))\nemit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_PERCENT, feature_value=counts.stops / counts.words))"], "bodies_text": "<|body_start_0|>\n exc_type = type(exc)\n if exc_type in (exceptions.BadResponse, exceptions.ConnectionFailed):\n raise Recoverable('Encountered %s; try again' % exc) from exc\n elif exc_type is exceptions.RequestFailed:\n if exc.status_code >= 500:\n msg = 'Classifier service choked: %i' % exc.status_code\n raise Recoverable(msg) from exc\n self.fail(exc, 'Unrecoverable exception: %i' % exc.status_code)\n self.fail(exc, 'Unhandled exception')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.process_result(Classifier.classify(content), trigger, emit)\n except Exception as exc:\n self.handle_classifier_exception(exc)\n<|end_body_1|>\n\n<|body_start_2|>\n suggestions, flags, counts = result\n results = [{'category': suggestion.category, 'probability': suggestion.probability} for suggestion in suggestions]\n emit(AddClassifierResults(creator=self.agent, results=results))\n for flag in flags:\n now = datetime.now(UTC).isoformat()\n comment = 'flag from classification succeeded at %s' % now\n flag_type = self.CLASSIFIER_FLAGS.get(flag.key)\n if flag_type is None:\n continue\n emit(AddContentFlag(creator=self.agent, flag_type=flag_type, flag_data=flag.value, comment=comment))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.CHARACTER_COUNT, feature_value=counts.chars))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.PAGE_COUNT, feature_value=counts.pages))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_COUNT, feature_value=counts.stops))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.WORD_COUNT, feature_value=counts.words))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_PERCENT, feature_value=counts.stops / counts.words))\n<|end_body_2|>\n", "class_docstring": "Extract plain text and poll the autoclassifier. In addition to generating classification suggestions, the current implementation of the autoclassifier also generates features (like word counts) and content flags (e.g. possible language issues, line numbers).", "class_name": "RunAutoclassifier", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RunAutoclassifier:\n \"\"\"Extract plain text and poll the autoclassifier. In addition to generating classification suggestions, the current implementation of the autoclassifier also generates features (like word counts) and content flags (e.g. possible language issues, line numbers).\"\"\"\n\n def handle_classifier_exception(self, exc: Exception) -> None:\n \"\"\"Handle exceptions raised when calling the classifier service.\"\"\"\n <|body_0|>\n\n def call_classifier(self, content: bytes, trigger: Trigger, emit: Callable) -> None:\n \"\"\"Send plain text content to the autoclassifier.\"\"\"\n <|body_1|>\n\n def process_result(self, result: Tuple, trigger: Trigger, emit: Callable) -> None:\n \"\"\"Process the results returned by the autoclassifier.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n exc_type = type(exc)\n if exc_type in (exceptions.BadResponse, exceptions.ConnectionFailed):\n raise Recoverable('Encountered %s; try again' % exc) from exc\n elif exc_type is exceptions.RequestFailed:\n if exc.status_code >= 500:\n msg = 'Classifier service choked: %i' % exc.status_code\n raise Recoverable(msg) from exc\n self.fail(exc, 'Unrecoverable exception: %i' % exc.status_code)\n self.fail(exc, 'Unhandled exception')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.process_result(Classifier.classify(content), trigger, emit)\n except Exception as exc:\n self.handle_classifier_exception(exc)\n<|end_body_1|>\n\n<|body_start_2|>\n suggestions, flags, counts = result\n results = [{'category': suggestion.category, 'probability': suggestion.probability} for suggestion in suggestions]\n emit(AddClassifierResults(creator=self.agent, results=results))\n for flag in flags:\n now = datetime.now(UTC).isoformat()\n comment = 'flag from classification succeeded at %s' % now\n flag_type = self.CLASSIFIER_FLAGS.get(flag.key)\n if flag_type is None:\n continue\n emit(AddContentFlag(creator=self.agent, flag_type=flag_type, flag_data=flag.value, comment=comment))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.CHARACTER_COUNT, feature_value=counts.chars))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.PAGE_COUNT, feature_value=counts.pages))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_COUNT, feature_value=counts.stops))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.WORD_COUNT, feature_value=counts.words))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_PERCENT, feature_value=counts.stops / counts.words))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000423", "length_bytes": 8823, "license_type": "permissive", "methods": [{"docstring": "Handle exceptions raised when calling the classifier service.", "name": "handle_classifier_exception", "signature": "def handle_classifier_exception(self, exc: Exception) -> None"}, {"docstring": "Send plain text content to the autoclassifier.", "name": "call_classifier", "signature": "def call_classifier(self, content: bytes, trigger: Trigger, emit: Callable) -> None"}, {"docstring": "Process the results returned by the autoclassifier.", "name": "process_result", "signature": "def process_result(self, result: Tuple, trigger: Trigger, emit: Callable) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_001415", "prompt": "Implement the Python class `RunAutoclassifier` described below.\n\nClass description:\nExtract plain text and poll the autoclassifier. In addition to generating classification suggestions, the current implementation of the autoclassifier also generates features (like word counts) and content flags (e.g. possible language issues, line numbers).\n\nMethod signatures and docstrings:\n- def handle_classifier_exception(self, exc: Exception) -> None: Handle exceptions raised when calling the classifier service.\n- def call_classifier(self, content: bytes, trigger: Trigger, emit: Callable) -> None: Send plain text content to the autoclassifier.\n- def process_result(self, result: Tuple, trigger: Trigger, emit: Callable) -> None: Process the results returned by the autoclassifier.", "prompted_full_text": "Implement the Python class `RunAutoclassifier` described below.\n\nClass description:\nExtract plain text and poll the autoclassifier. In addition to generating classification suggestions, the current implementation of the autoclassifier also generates features (like word counts) and content flags (e.g. possible language issues, line numbers).\n\nMethod signatures and docstrings:\n- def handle_classifier_exception(self, exc: Exception) -> None: Handle exceptions raised when calling the classifier service.\n- def call_classifier(self, content: bytes, trigger: Trigger, emit: Callable) -> None: Send plain text content to the autoclassifier.\n- def process_result(self, result: Tuple, trigger: Trigger, emit: Callable) -> None: Process the results returned by the autoclassifier.\n\n<|skeleton|>\nclass RunAutoclassifier:\n \"\"\"Extract plain text and poll the autoclassifier. In addition to generating classification suggestions, the current implementation of the autoclassifier also generates features (like word counts) and content flags (e.g. possible language issues, line numbers).\"\"\"\n\n def handle_classifier_exception(self, exc: Exception) -> None:\n \"\"\"Handle exceptions raised when calling the classifier service.\"\"\"\n <|body_0|>\n\n def call_classifier(self, content: bytes, trigger: Trigger, emit: Callable) -> None:\n \"\"\"Send plain text content to the autoclassifier.\"\"\"\n <|body_1|>\n\n def process_result(self, result: Tuple, trigger: Trigger, emit: Callable) -> None:\n \"\"\"Process the results returned by the autoclassifier.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n exc_type = type(exc)\n if exc_type in (exceptions.BadResponse, exceptions.ConnectionFailed):\n raise Recoverable('Encountered %s; try again' % exc) from exc\n elif exc_type is exceptions.RequestFailed:\n if exc.status_code >= 500:\n msg = 'Classifier service choked: %i' % exc.status_code\n raise Recoverable(msg) from exc\n self.fail(exc, 'Unrecoverable exception: %i' % exc.status_code)\n self.fail(exc, 'Unhandled exception')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.process_result(Classifier.classify(content), trigger, emit)\n except Exception as exc:\n self.handle_classifier_exception(exc)\n<|end_body_1|>\n\n<|body_start_2|>\n suggestions, flags, counts = result\n results = [{'category': suggestion.category, 'probability': suggestion.probability} for suggestion in suggestions]\n emit(AddClassifierResults(creator=self.agent, results=results))\n for flag in flags:\n now = datetime.now(UTC).isoformat()\n comment = 'flag from classification succeeded at %s' % now\n flag_type = self.CLASSIFIER_FLAGS.get(flag.key)\n if flag_type is None:\n continue\n emit(AddContentFlag(creator=self.agent, flag_type=flag_type, flag_data=flag.value, comment=comment))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.CHARACTER_COUNT, feature_value=counts.chars))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.PAGE_COUNT, feature_value=counts.pages))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_COUNT, feature_value=counts.stops))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.WORD_COUNT, feature_value=counts.words))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_PERCENT, feature_value=counts.stops / counts.words))\n<|end_body_2|>\n", "revision_id": "6077ce4e0685d67ce7010800083a898857158112", "skeleton": "<|skeleton|>\nclass RunAutoclassifier:\n \"\"\"Extract plain text and poll the autoclassifier. In addition to generating classification suggestions, the current implementation of the autoclassifier also generates features (like word counts) and content flags (e.g. possible language issues, line numbers).\"\"\"\n\n def handle_classifier_exception(self, exc: Exception) -> None:\n \"\"\"Handle exceptions raised when calling the classifier service.\"\"\"\n <|body_0|>\n\n def call_classifier(self, content: bytes, trigger: Trigger, emit: Callable) -> None:\n \"\"\"Send plain text content to the autoclassifier.\"\"\"\n <|body_1|>\n\n def process_result(self, result: Tuple, trigger: Trigger, emit: Callable) -> None:\n \"\"\"Process the results returned by the autoclassifier.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RunAutoclassifier:\n \"\"\"Extract plain text and poll the autoclassifier. In addition to generating classification suggestions, the current implementation of the autoclassifier also generates features (like word counts) and content flags (e.g. possible language issues, line numbers).\"\"\"\n\n def handle_classifier_exception(self, exc: Exception) -> None:\n \"\"\"Handle exceptions raised when calling the classifier service.\"\"\"\n exc_type = type(exc)\n if exc_type in (exceptions.BadResponse, exceptions.ConnectionFailed):\n raise Recoverable('Encountered %s; try again' % exc) from exc\n elif exc_type is exceptions.RequestFailed:\n if exc.status_code >= 500:\n msg = 'Classifier service choked: %i' % exc.status_code\n raise Recoverable(msg) from exc\n self.fail(exc, 'Unrecoverable exception: %i' % exc.status_code)\n self.fail(exc, 'Unhandled exception')\n\n def call_classifier(self, content: bytes, trigger: Trigger, emit: Callable) -> None:\n \"\"\"Send plain text content to the autoclassifier.\"\"\"\n try:\n self.process_result(Classifier.classify(content), trigger, emit)\n except Exception as exc:\n self.handle_classifier_exception(exc)\n\n def process_result(self, result: Tuple, trigger: Trigger, emit: Callable) -> None:\n \"\"\"Process the results returned by the autoclassifier.\"\"\"\n suggestions, flags, counts = result\n results = [{'category': suggestion.category, 'probability': suggestion.probability} for suggestion in suggestions]\n emit(AddClassifierResults(creator=self.agent, results=results))\n for flag in flags:\n now = datetime.now(UTC).isoformat()\n comment = 'flag from classification succeeded at %s' % now\n flag_type = self.CLASSIFIER_FLAGS.get(flag.key)\n if flag_type is None:\n continue\n emit(AddContentFlag(creator=self.agent, flag_type=flag_type, flag_data=flag.value, comment=comment))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.CHARACTER_COUNT, feature_value=counts.chars))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.PAGE_COUNT, feature_value=counts.pages))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_COUNT, feature_value=counts.stops))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.WORD_COUNT, feature_value=counts.words))\n emit(AddFeature(creator=self.agent, feature_type=Feature.Type.STOPWORD_PERCENT, feature_value=counts.stops / counts.words))\n", "source": "the_stack_v2_python_sparse", "source_path": "agent/agent/process/classification_and_content.py", "source_repo": "arXiv/arxiv-submission-core", "split": "val", "star_events_count": 14}
{"blob_id": "980e7804901a30bc37d9f3ff8fa6e30ab1f8211c", "bodies": ["self.data_format = data_format\nself.channel_axis = 1 if data_format == 'channels_first' else 3\nself.load_path = load_path", "shortcut = x\nif expansion != 1:\n x = tf.layers.conv2d(x, input_filters * expansion, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\nx = tf.keras.layers.DepthwiseConv2D(3, strides=stride, padding='same', data_format=self.data_format, use_bias=False)(x)\nx = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\nx = tf.nn.relu6(x)\nx = tf.layers.conv2d(x, output_filters, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\nx = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\nif stride == 1 and input_filters == output_filters:\n return shortcut + x\nelse:\n return x", "input_filters = int(x.get_shape()[self.channel_axis])\nx = self.block(x, input_filters, output_filters, expansion, stride, training)\nfor _ in range(1, repeat):\n input_filters = output_filters\n x = self.block(x, input_filters, output_filters, expansion, 1, training)\nreturn x", "outs = []\nx = tf.layers.conv2d(x, 32, 3, strides=2, use_bias=False, padding='same', data_format=self.data_format)\nx = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\nx = tf.nn.relu6(x)\nexpansion_list = [1] + [6] * 6\noutput_filter_list = [16, 24, 32, 64, 96, 160, 320]\nrepeat_list = [1, 2, 3, 4, 3, 3, 1]\nstride_list = [1, 2, 2, 2, 1, 2, 1]\nfor i in range(7):\n x = self.blocks(x, expansion_list[i], output_filter_list[i], repeat_list[i], stride_list[i], training)\n if i in [1, 2, 4, 6]:\n outs.append(x)\nreturn outs"], "bodies_text": "<|body_start_0|>\n self.data_format = data_format\n self.channel_axis = 1 if data_format == 'channels_first' else 3\n self.load_path = load_path\n<|end_body_0|>\n\n<|body_start_1|>\n shortcut = x\n if expansion != 1:\n x = tf.layers.conv2d(x, input_filters * expansion, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n x = tf.keras.layers.DepthwiseConv2D(3, strides=stride, padding='same', data_format=self.data_format, use_bias=False)(x)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n x = tf.layers.conv2d(x, output_filters, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n if stride == 1 and input_filters == output_filters:\n return shortcut + x\n else:\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n input_filters = int(x.get_shape()[self.channel_axis])\n x = self.block(x, input_filters, output_filters, expansion, stride, training)\n for _ in range(1, repeat):\n input_filters = output_filters\n x = self.block(x, input_filters, output_filters, expansion, 1, training)\n return x\n<|end_body_2|>\n\n<|body_start_3|>\n outs = []\n x = tf.layers.conv2d(x, 32, 3, strides=2, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n expansion_list = [1] + [6] * 6\n output_filter_list = [16, 24, 32, 64, 96, 160, 320]\n repeat_list = [1, 2, 3, 4, 3, 3, 1]\n stride_list = [1, 2, 2, 2, 1, 2, 1]\n for i in range(7):\n x = self.blocks(x, expansion_list[i], output_filter_list[i], repeat_list[i], stride_list[i], training)\n if i in [1, 2, 4, 6]:\n outs.append(x)\n return outs\n<|end_body_3|>\n", "class_docstring": "Backbone of mobilenet v2.", "class_name": "MobileNetV2Backbone", "detected_licenses": ["Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MobileNetV2Backbone:\n \"\"\"Backbone of mobilenet v2.\"\"\"\n\n def __init__(self, load_path=None, data_format='channels_first'):\n \"\"\"Construct MobileNetV2 class. :param load_path: path for saved model\"\"\"\n <|body_0|>\n\n def block(self, x, input_filters, output_filters, expansion, stride, training):\n \"\"\"Mobilenetv2 block.\"\"\"\n <|body_1|>\n\n def blocks(self, x, expansion, output_filters, repeat, stride, training):\n \"\"\"Mobilenetv2 blocks.\"\"\"\n <|body_2|>\n\n def __call__(self, x, training):\n \"\"\"Do an inference on MobileNetV2. :param x: input tensor :return: output tensor\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data_format = data_format\n self.channel_axis = 1 if data_format == 'channels_first' else 3\n self.load_path = load_path\n<|end_body_0|>\n\n<|body_start_1|>\n shortcut = x\n if expansion != 1:\n x = tf.layers.conv2d(x, input_filters * expansion, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n x = tf.keras.layers.DepthwiseConv2D(3, strides=stride, padding='same', data_format=self.data_format, use_bias=False)(x)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n x = tf.layers.conv2d(x, output_filters, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n if stride == 1 and input_filters == output_filters:\n return shortcut + x\n else:\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n input_filters = int(x.get_shape()[self.channel_axis])\n x = self.block(x, input_filters, output_filters, expansion, stride, training)\n for _ in range(1, repeat):\n input_filters = output_filters\n x = self.block(x, input_filters, output_filters, expansion, 1, training)\n return x\n<|end_body_2|>\n\n<|body_start_3|>\n outs = []\n x = tf.layers.conv2d(x, 32, 3, strides=2, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n expansion_list = [1] + [6] * 6\n output_filter_list = [16, 24, 32, 64, 96, 160, 320]\n repeat_list = [1, 2, 3, 4, 3, 3, 1]\n stride_list = [1, 2, 2, 2, 1, 2, 1]\n for i in range(7):\n x = self.blocks(x, expansion_list[i], output_filter_list[i], repeat_list[i], stride_list[i], training)\n if i in [1, 2, 4, 6]:\n outs.append(x)\n return outs\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000424", "length_bytes": 3285, "license_type": "permissive", "methods": [{"docstring": "Construct MobileNetV2 class. :param load_path: path for saved model", "name": "__init__", "signature": "def __init__(self, load_path=None, data_format='channels_first')"}, {"docstring": "Mobilenetv2 block.", "name": "block", "signature": "def block(self, x, input_filters, output_filters, expansion, stride, training)"}, {"docstring": "Mobilenetv2 blocks.", "name": "blocks", "signature": "def blocks(self, x, expansion, output_filters, repeat, stride, training)"}, {"docstring": "Do an inference on MobileNetV2. :param x: input tensor :return: output tensor", "name": "__call__", "signature": "def __call__(self, x, training)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_011003", "prompt": "Implement the Python class `MobileNetV2Backbone` described below.\n\nClass description:\nBackbone of mobilenet v2.\n\nMethod signatures and docstrings:\n- def __init__(self, load_path=None, data_format='channels_first'): Construct MobileNetV2 class. :param load_path: path for saved model\n- def block(self, x, input_filters, output_filters, expansion, stride, training): Mobilenetv2 block.\n- def blocks(self, x, expansion, output_filters, repeat, stride, training): Mobilenetv2 blocks.\n- def __call__(self, x, training): Do an inference on MobileNetV2. :param x: input tensor :return: output tensor", "prompted_full_text": "Implement the Python class `MobileNetV2Backbone` described below.\n\nClass description:\nBackbone of mobilenet v2.\n\nMethod signatures and docstrings:\n- def __init__(self, load_path=None, data_format='channels_first'): Construct MobileNetV2 class. :param load_path: path for saved model\n- def block(self, x, input_filters, output_filters, expansion, stride, training): Mobilenetv2 block.\n- def blocks(self, x, expansion, output_filters, repeat, stride, training): Mobilenetv2 blocks.\n- def __call__(self, x, training): Do an inference on MobileNetV2. :param x: input tensor :return: output tensor\n\n<|skeleton|>\nclass MobileNetV2Backbone:\n \"\"\"Backbone of mobilenet v2.\"\"\"\n\n def __init__(self, load_path=None, data_format='channels_first'):\n \"\"\"Construct MobileNetV2 class. :param load_path: path for saved model\"\"\"\n <|body_0|>\n\n def block(self, x, input_filters, output_filters, expansion, stride, training):\n \"\"\"Mobilenetv2 block.\"\"\"\n <|body_1|>\n\n def blocks(self, x, expansion, output_filters, repeat, stride, training):\n \"\"\"Mobilenetv2 blocks.\"\"\"\n <|body_2|>\n\n def __call__(self, x, training):\n \"\"\"Do an inference on MobileNetV2. :param x: input tensor :return: output tensor\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data_format = data_format\n self.channel_axis = 1 if data_format == 'channels_first' else 3\n self.load_path = load_path\n<|end_body_0|>\n\n<|body_start_1|>\n shortcut = x\n if expansion != 1:\n x = tf.layers.conv2d(x, input_filters * expansion, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n x = tf.keras.layers.DepthwiseConv2D(3, strides=stride, padding='same', data_format=self.data_format, use_bias=False)(x)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n x = tf.layers.conv2d(x, output_filters, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n if stride == 1 and input_filters == output_filters:\n return shortcut + x\n else:\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n input_filters = int(x.get_shape()[self.channel_axis])\n x = self.block(x, input_filters, output_filters, expansion, stride, training)\n for _ in range(1, repeat):\n input_filters = output_filters\n x = self.block(x, input_filters, output_filters, expansion, 1, training)\n return x\n<|end_body_2|>\n\n<|body_start_3|>\n outs = []\n x = tf.layers.conv2d(x, 32, 3, strides=2, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n expansion_list = [1] + [6] * 6\n output_filter_list = [16, 24, 32, 64, 96, 160, 320]\n repeat_list = [1, 2, 3, 4, 3, 3, 1]\n stride_list = [1, 2, 2, 2, 1, 2, 1]\n for i in range(7):\n x = self.blocks(x, expansion_list[i], output_filter_list[i], repeat_list[i], stride_list[i], training)\n if i in [1, 2, 4, 6]:\n outs.append(x)\n return outs\n<|end_body_3|>\n", "revision_id": "df51ed9c1d6dbde1deef63f2a037a369f8554406", "skeleton": "<|skeleton|>\nclass MobileNetV2Backbone:\n \"\"\"Backbone of mobilenet v2.\"\"\"\n\n def __init__(self, load_path=None, data_format='channels_first'):\n \"\"\"Construct MobileNetV2 class. :param load_path: path for saved model\"\"\"\n <|body_0|>\n\n def block(self, x, input_filters, output_filters, expansion, stride, training):\n \"\"\"Mobilenetv2 block.\"\"\"\n <|body_1|>\n\n def blocks(self, x, expansion, output_filters, repeat, stride, training):\n \"\"\"Mobilenetv2 blocks.\"\"\"\n <|body_2|>\n\n def __call__(self, x, training):\n \"\"\"Do an inference on MobileNetV2. :param x: input tensor :return: output tensor\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MobileNetV2Backbone:\n \"\"\"Backbone of mobilenet v2.\"\"\"\n\n def __init__(self, load_path=None, data_format='channels_first'):\n \"\"\"Construct MobileNetV2 class. :param load_path: path for saved model\"\"\"\n self.data_format = data_format\n self.channel_axis = 1 if data_format == 'channels_first' else 3\n self.load_path = load_path\n\n def block(self, x, input_filters, output_filters, expansion, stride, training):\n \"\"\"Mobilenetv2 block.\"\"\"\n shortcut = x\n if expansion != 1:\n x = tf.layers.conv2d(x, input_filters * expansion, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n x = tf.keras.layers.DepthwiseConv2D(3, strides=stride, padding='same', data_format=self.data_format, use_bias=False)(x)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n x = tf.layers.conv2d(x, output_filters, kernel_size=1, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n if stride == 1 and input_filters == output_filters:\n return shortcut + x\n else:\n return x\n\n def blocks(self, x, expansion, output_filters, repeat, stride, training):\n \"\"\"Mobilenetv2 blocks.\"\"\"\n input_filters = int(x.get_shape()[self.channel_axis])\n x = self.block(x, input_filters, output_filters, expansion, stride, training)\n for _ in range(1, repeat):\n input_filters = output_filters\n x = self.block(x, input_filters, output_filters, expansion, 1, training)\n return x\n\n def __call__(self, x, training):\n \"\"\"Do an inference on MobileNetV2. :param x: input tensor :return: output tensor\"\"\"\n outs = []\n x = tf.layers.conv2d(x, 32, 3, strides=2, use_bias=False, padding='same', data_format=self.data_format)\n x = tf.layers.batch_normalization(x, axis=self.channel_axis, training=training)\n x = tf.nn.relu6(x)\n expansion_list = [1] + [6] * 6\n output_filter_list = [16, 24, 32, 64, 96, 160, 320]\n repeat_list = [1, 2, 3, 4, 3, 3, 1]\n stride_list = [1, 2, 2, 2, 1, 2, 1]\n for i in range(7):\n x = self.blocks(x, expansion_list[i], output_filter_list[i], repeat_list[i], stride_list[i], training)\n if i in [1, 2, 4, 6]:\n outs.append(x)\n return outs\n", "source": "the_stack_v2_python_sparse", "source_path": "built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/search_space/networks/tensorflow/customs/adelaide_nn/mobilenetv2_backbone.py", "source_repo": "Huawei-Ascend/modelzoo", "split": "val", "star_events_count": 1}
{"blob_id": "19f66a603a1a6c8d9aac34ac2f5c817779472c96", "bodies": ["self.hana_helper = HanaGeneratorHelper(config)\nself.config = config\nself._extend_config()", "self.hana_helper._build_folder_structure(ConfigConstants.PROJECT_TEMPLATE_BASE_SDA_STRUCT, self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA), self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE))\nyaml_writer = MTAYamlWriter(self.config)\ngrant_writer = HDBGrantWriter(self.config)\nsynonym_writer = HDBSynonymWriter(self.config)\nrole_writer = HDBRoleWriter(self.config)\nconsumption_processor = HanaSDAConsumptionProcessor(self.config)\noutput_path = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA)\napp_id = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_APPID)\nmodule_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME)\nversion = self.config.get_entry(ConfigConstants.CONFIG_KEY_VERSION)\nschema = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA)\ngrant_service = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANT_SERVICE)\nyaml_writer.generate(output_path, app_id, module_name, version, schema, grant_service)\nremote_source = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_REMOTE_SOURCE)\ngrant_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH), remote_access=True, remote_source=remote_source)\nsynonym_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH))\nrole_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH), name=self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME))\nconsumption_processor.generate(model_only)\nreturn output_path", "project_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_PROJECT_NAME)\nsda_module_name = project_name + '_sda'\nsda_app_id = sda_module_name\nsda_schema = '\"' + (sda_module_name + '_SCHEMA').upper() + '\"'\nsda_output_path_hana = os.path.join(self.config.get_entry(ConfigConstants.CONFIG_KEY_OUTPUT_PATH), ConfigConstants.SDA_HANA_BASE_PATH)\nsda_output_path_module = os.path.join(sda_output_path_hana, sda_module_name)\nsda_output_path_module_src = os.path.join(sda_output_path_module, ConfigConstants.MODULE_SOURCE_PATH)\nsda_grants_path = os.path.join(sda_output_path_module_src, ConfigConstants.GRANTS_SOURCE_PATH)\nsda_synonyms_path = os.path.join(sda_output_path_module_src, ConfigConstants.SYNONYMS_SOURCE_PATH)\nsda_procedures_path = os.path.join(sda_output_path_module_src, ConfigConstants.PROCEDURES_SOURCE_PATH)\nsda_roles_path = os.path.join(sda_output_path_module_src, ConfigConstants.ROLES_SOURCE_PATH)\nsda_virtual_table_path = os.path.join(sda_output_path_module_src, ConfigConstants.VIRTUAL_TABLE_SOURCE_PATH)\nsda_cds_path = os.path.join(sda_output_path_module_src, ConfigConstants.CDS_SOURCE_PATH)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME, sda_module_name)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_APPID, sda_app_id)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA, sda_output_path_hana)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE, sda_output_path_module)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE_SRC, sda_output_path_module_src)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH, sda_grants_path)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH, sda_synonyms_path)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_PROCEDURES_PATH, sda_procedures_path)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH, sda_roles_path)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_VIRTUALTABLE_PATH, sda_virtual_table_path)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_CDS_PATH, sda_cds_path)\nself.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA, sda_schema)"], "bodies_text": "<|body_start_0|>\n self.hana_helper = HanaGeneratorHelper(config)\n self.config = config\n self._extend_config()\n<|end_body_0|>\n\n<|body_start_1|>\n self.hana_helper._build_folder_structure(ConfigConstants.PROJECT_TEMPLATE_BASE_SDA_STRUCT, self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA), self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE))\n yaml_writer = MTAYamlWriter(self.config)\n grant_writer = HDBGrantWriter(self.config)\n synonym_writer = HDBSynonymWriter(self.config)\n role_writer = HDBRoleWriter(self.config)\n consumption_processor = HanaSDAConsumptionProcessor(self.config)\n output_path = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA)\n app_id = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_APPID)\n module_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME)\n version = self.config.get_entry(ConfigConstants.CONFIG_KEY_VERSION)\n schema = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA)\n grant_service = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANT_SERVICE)\n yaml_writer.generate(output_path, app_id, module_name, version, schema, grant_service)\n remote_source = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_REMOTE_SOURCE)\n grant_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH), remote_access=True, remote_source=remote_source)\n synonym_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH))\n role_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH), name=self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME))\n consumption_processor.generate(model_only)\n return output_path\n<|end_body_1|>\n\n<|body_start_2|>\n project_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_PROJECT_NAME)\n sda_module_name = project_name + '_sda'\n sda_app_id = sda_module_name\n sda_schema = '\"' + (sda_module_name + '_SCHEMA').upper() + '\"'\n sda_output_path_hana = os.path.join(self.config.get_entry(ConfigConstants.CONFIG_KEY_OUTPUT_PATH), ConfigConstants.SDA_HANA_BASE_PATH)\n sda_output_path_module = os.path.join(sda_output_path_hana, sda_module_name)\n sda_output_path_module_src = os.path.join(sda_output_path_module, ConfigConstants.MODULE_SOURCE_PATH)\n sda_grants_path = os.path.join(sda_output_path_module_src, ConfigConstants.GRANTS_SOURCE_PATH)\n sda_synonyms_path = os.path.join(sda_output_path_module_src, ConfigConstants.SYNONYMS_SOURCE_PATH)\n sda_procedures_path = os.path.join(sda_output_path_module_src, ConfigConstants.PROCEDURES_SOURCE_PATH)\n sda_roles_path = os.path.join(sda_output_path_module_src, ConfigConstants.ROLES_SOURCE_PATH)\n sda_virtual_table_path = os.path.join(sda_output_path_module_src, ConfigConstants.VIRTUAL_TABLE_SOURCE_PATH)\n sda_cds_path = os.path.join(sda_output_path_module_src, ConfigConstants.CDS_SOURCE_PATH)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME, sda_module_name)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_APPID, sda_app_id)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA, sda_output_path_hana)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE, sda_output_path_module)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE_SRC, sda_output_path_module_src)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH, sda_grants_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH, sda_synonyms_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_PROCEDURES_PATH, sda_procedures_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH, sda_roles_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_VIRTUALTABLE_PATH, sda_virtual_table_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_CDS_PATH, sda_cds_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA, sda_schema)\n<|end_body_2|>\n", "class_docstring": "This class provides HANA specific generation functionality for the Smart Data Access (SDA) scenario. It only creates the artifact for the second SDA HDI container which loads and uses data out of the first container which has been created before this class os called. It also extend the config to cater for specific required config.", "class_name": "HanaSDAGenerator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HanaSDAGenerator:\n \"\"\"This class provides HANA specific generation functionality for the Smart Data Access (SDA) scenario. It only creates the artifact for the second SDA HDI container which loads and uses data out of the first container which has been created before this class os called. It also extend the config to cater for specific required config.\"\"\"\n\n def __init__(self, config):\n \"\"\"This is main entry point for generating the HANA related artifacts for the SDA scenario Parameters ---------- config : dict Central config object\"\"\"\n <|body_0|>\n\n def generate_artifacts(self, model_only=True):\n \"\"\"Generate the artifacts by first building up the required folder structure for artifact storage and then generating the different required files. Be aware that this method only generates the generic files and offloads the generation of artifacts where traversal of base and consumption layer elements is required. Parameters ---------- model_only: boolean In the sda case we are only interested in transferring the model using SDA. This forces the HANA artifact generation to cater only for this scenario. Returns ------- output_path : str Return the output path of the root folder where the related artifacts are stored.\"\"\"\n <|body_1|>\n\n def _extend_config(self):\n \"\"\"Extend the config to cater for HANA SDA generation specific config.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.hana_helper = HanaGeneratorHelper(config)\n self.config = config\n self._extend_config()\n<|end_body_0|>\n\n<|body_start_1|>\n self.hana_helper._build_folder_structure(ConfigConstants.PROJECT_TEMPLATE_BASE_SDA_STRUCT, self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA), self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE))\n yaml_writer = MTAYamlWriter(self.config)\n grant_writer = HDBGrantWriter(self.config)\n synonym_writer = HDBSynonymWriter(self.config)\n role_writer = HDBRoleWriter(self.config)\n consumption_processor = HanaSDAConsumptionProcessor(self.config)\n output_path = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA)\n app_id = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_APPID)\n module_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME)\n version = self.config.get_entry(ConfigConstants.CONFIG_KEY_VERSION)\n schema = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA)\n grant_service = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANT_SERVICE)\n yaml_writer.generate(output_path, app_id, module_name, version, schema, grant_service)\n remote_source = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_REMOTE_SOURCE)\n grant_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH), remote_access=True, remote_source=remote_source)\n synonym_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH))\n role_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH), name=self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME))\n consumption_processor.generate(model_only)\n return output_path\n<|end_body_1|>\n\n<|body_start_2|>\n project_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_PROJECT_NAME)\n sda_module_name = project_name + '_sda'\n sda_app_id = sda_module_name\n sda_schema = '\"' + (sda_module_name + '_SCHEMA').upper() + '\"'\n sda_output_path_hana = os.path.join(self.config.get_entry(ConfigConstants.CONFIG_KEY_OUTPUT_PATH), ConfigConstants.SDA_HANA_BASE_PATH)\n sda_output_path_module = os.path.join(sda_output_path_hana, sda_module_name)\n sda_output_path_module_src = os.path.join(sda_output_path_module, ConfigConstants.MODULE_SOURCE_PATH)\n sda_grants_path = os.path.join(sda_output_path_module_src, ConfigConstants.GRANTS_SOURCE_PATH)\n sda_synonyms_path = os.path.join(sda_output_path_module_src, ConfigConstants.SYNONYMS_SOURCE_PATH)\n sda_procedures_path = os.path.join(sda_output_path_module_src, ConfigConstants.PROCEDURES_SOURCE_PATH)\n sda_roles_path = os.path.join(sda_output_path_module_src, ConfigConstants.ROLES_SOURCE_PATH)\n sda_virtual_table_path = os.path.join(sda_output_path_module_src, ConfigConstants.VIRTUAL_TABLE_SOURCE_PATH)\n sda_cds_path = os.path.join(sda_output_path_module_src, ConfigConstants.CDS_SOURCE_PATH)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME, sda_module_name)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_APPID, sda_app_id)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA, sda_output_path_hana)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE, sda_output_path_module)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE_SRC, sda_output_path_module_src)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH, sda_grants_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH, sda_synonyms_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_PROCEDURES_PATH, sda_procedures_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH, sda_roles_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_VIRTUALTABLE_PATH, sda_virtual_table_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_CDS_PATH, sda_cds_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA, sda_schema)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000425", "length_bytes": 35977, "license_type": "permissive", "methods": [{"docstring": "This is main entry point for generating the HANA related artifacts for the SDA scenario Parameters ---------- config : dict Central config object", "name": "__init__", "signature": "def __init__(self, config)"}, {"docstring": "Generate the artifacts by first building up the required folder structure for artifact storage and then generating the different required files. Be aware that this method only generates the generic files and offloads the generation of artifacts where traversal of base and consumption layer elements is required. Parameters ---------- model_only: boolean In the sda case we are only interested in transferring the model using SDA. This forces the HANA artifact generation to cater only for this scenario. Returns ------- output_path : str Return the output path of the root folder where the related artifacts are stored.", "name": "generate_artifacts", "signature": "def generate_artifacts(self, model_only=True)"}, {"docstring": "Extend the config to cater for HANA SDA generation specific config.", "name": "_extend_config", "signature": "def _extend_config(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003552", "prompt": "Implement the Python class `HanaSDAGenerator` described below.\n\nClass description:\nThis class provides HANA specific generation functionality for the Smart Data Access (SDA) scenario. It only creates the artifact for the second SDA HDI container which loads and uses data out of the first container which has been created before this class os called. It also extend the config to cater for specific required config.\n\nMethod signatures and docstrings:\n- def __init__(self, config): This is main entry point for generating the HANA related artifacts for the SDA scenario Parameters ---------- config : dict Central config object\n- def generate_artifacts(self, model_only=True): Generate the artifacts by first building up the required folder structure for artifact storage and then generating the different required files. Be aware that this method only generates the generic files and offloads the generation of artifacts where traversal of base and consumption layer elements is required. Parameters ---------- model_only: boolean In the sda case we are only interested in transferring the model using SDA. This forces the HANA artifact generation to cater only for this scenario. Returns ------- output_path : str Return the output path of the root folder where the related artifacts are stored.\n- def _extend_config(self): Extend the config to cater for HANA SDA generation specific config.", "prompted_full_text": "Implement the Python class `HanaSDAGenerator` described below.\n\nClass description:\nThis class provides HANA specific generation functionality for the Smart Data Access (SDA) scenario. It only creates the artifact for the second SDA HDI container which loads and uses data out of the first container which has been created before this class os called. It also extend the config to cater for specific required config.\n\nMethod signatures and docstrings:\n- def __init__(self, config): This is main entry point for generating the HANA related artifacts for the SDA scenario Parameters ---------- config : dict Central config object\n- def generate_artifacts(self, model_only=True): Generate the artifacts by first building up the required folder structure for artifact storage and then generating the different required files. Be aware that this method only generates the generic files and offloads the generation of artifacts where traversal of base and consumption layer elements is required. Parameters ---------- model_only: boolean In the sda case we are only interested in transferring the model using SDA. This forces the HANA artifact generation to cater only for this scenario. Returns ------- output_path : str Return the output path of the root folder where the related artifacts are stored.\n- def _extend_config(self): Extend the config to cater for HANA SDA generation specific config.\n\n<|skeleton|>\nclass HanaSDAGenerator:\n \"\"\"This class provides HANA specific generation functionality for the Smart Data Access (SDA) scenario. It only creates the artifact for the second SDA HDI container which loads and uses data out of the first container which has been created before this class os called. It also extend the config to cater for specific required config.\"\"\"\n\n def __init__(self, config):\n \"\"\"This is main entry point for generating the HANA related artifacts for the SDA scenario Parameters ---------- config : dict Central config object\"\"\"\n <|body_0|>\n\n def generate_artifacts(self, model_only=True):\n \"\"\"Generate the artifacts by first building up the required folder structure for artifact storage and then generating the different required files. Be aware that this method only generates the generic files and offloads the generation of artifacts where traversal of base and consumption layer elements is required. Parameters ---------- model_only: boolean In the sda case we are only interested in transferring the model using SDA. This forces the HANA artifact generation to cater only for this scenario. Returns ------- output_path : str Return the output path of the root folder where the related artifacts are stored.\"\"\"\n <|body_1|>\n\n def _extend_config(self):\n \"\"\"Extend the config to cater for HANA SDA generation specific config.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.hana_helper = HanaGeneratorHelper(config)\n self.config = config\n self._extend_config()\n<|end_body_0|>\n\n<|body_start_1|>\n self.hana_helper._build_folder_structure(ConfigConstants.PROJECT_TEMPLATE_BASE_SDA_STRUCT, self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA), self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE))\n yaml_writer = MTAYamlWriter(self.config)\n grant_writer = HDBGrantWriter(self.config)\n synonym_writer = HDBSynonymWriter(self.config)\n role_writer = HDBRoleWriter(self.config)\n consumption_processor = HanaSDAConsumptionProcessor(self.config)\n output_path = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA)\n app_id = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_APPID)\n module_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME)\n version = self.config.get_entry(ConfigConstants.CONFIG_KEY_VERSION)\n schema = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA)\n grant_service = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANT_SERVICE)\n yaml_writer.generate(output_path, app_id, module_name, version, schema, grant_service)\n remote_source = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_REMOTE_SOURCE)\n grant_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH), remote_access=True, remote_source=remote_source)\n synonym_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH))\n role_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH), name=self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME))\n consumption_processor.generate(model_only)\n return output_path\n<|end_body_1|>\n\n<|body_start_2|>\n project_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_PROJECT_NAME)\n sda_module_name = project_name + '_sda'\n sda_app_id = sda_module_name\n sda_schema = '\"' + (sda_module_name + '_SCHEMA').upper() + '\"'\n sda_output_path_hana = os.path.join(self.config.get_entry(ConfigConstants.CONFIG_KEY_OUTPUT_PATH), ConfigConstants.SDA_HANA_BASE_PATH)\n sda_output_path_module = os.path.join(sda_output_path_hana, sda_module_name)\n sda_output_path_module_src = os.path.join(sda_output_path_module, ConfigConstants.MODULE_SOURCE_PATH)\n sda_grants_path = os.path.join(sda_output_path_module_src, ConfigConstants.GRANTS_SOURCE_PATH)\n sda_synonyms_path = os.path.join(sda_output_path_module_src, ConfigConstants.SYNONYMS_SOURCE_PATH)\n sda_procedures_path = os.path.join(sda_output_path_module_src, ConfigConstants.PROCEDURES_SOURCE_PATH)\n sda_roles_path = os.path.join(sda_output_path_module_src, ConfigConstants.ROLES_SOURCE_PATH)\n sda_virtual_table_path = os.path.join(sda_output_path_module_src, ConfigConstants.VIRTUAL_TABLE_SOURCE_PATH)\n sda_cds_path = os.path.join(sda_output_path_module_src, ConfigConstants.CDS_SOURCE_PATH)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME, sda_module_name)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_APPID, sda_app_id)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA, sda_output_path_hana)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE, sda_output_path_module)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE_SRC, sda_output_path_module_src)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH, sda_grants_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH, sda_synonyms_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_PROCEDURES_PATH, sda_procedures_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH, sda_roles_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_VIRTUALTABLE_PATH, sda_virtual_table_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_CDS_PATH, sda_cds_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA, sda_schema)\n<|end_body_2|>\n", "revision_id": "72b512b05fe2c238c09e20027e3ae3cfcd976771", "skeleton": "<|skeleton|>\nclass HanaSDAGenerator:\n \"\"\"This class provides HANA specific generation functionality for the Smart Data Access (SDA) scenario. It only creates the artifact for the second SDA HDI container which loads and uses data out of the first container which has been created before this class os called. It also extend the config to cater for specific required config.\"\"\"\n\n def __init__(self, config):\n \"\"\"This is main entry point for generating the HANA related artifacts for the SDA scenario Parameters ---------- config : dict Central config object\"\"\"\n <|body_0|>\n\n def generate_artifacts(self, model_only=True):\n \"\"\"Generate the artifacts by first building up the required folder structure for artifact storage and then generating the different required files. Be aware that this method only generates the generic files and offloads the generation of artifacts where traversal of base and consumption layer elements is required. Parameters ---------- model_only: boolean In the sda case we are only interested in transferring the model using SDA. This forces the HANA artifact generation to cater only for this scenario. Returns ------- output_path : str Return the output path of the root folder where the related artifacts are stored.\"\"\"\n <|body_1|>\n\n def _extend_config(self):\n \"\"\"Extend the config to cater for HANA SDA generation specific config.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HanaSDAGenerator:\n \"\"\"This class provides HANA specific generation functionality for the Smart Data Access (SDA) scenario. It only creates the artifact for the second SDA HDI container which loads and uses data out of the first container which has been created before this class os called. It also extend the config to cater for specific required config.\"\"\"\n\n def __init__(self, config):\n \"\"\"This is main entry point for generating the HANA related artifacts for the SDA scenario Parameters ---------- config : dict Central config object\"\"\"\n self.hana_helper = HanaGeneratorHelper(config)\n self.config = config\n self._extend_config()\n\n def generate_artifacts(self, model_only=True):\n \"\"\"Generate the artifacts by first building up the required folder structure for artifact storage and then generating the different required files. Be aware that this method only generates the generic files and offloads the generation of artifacts where traversal of base and consumption layer elements is required. Parameters ---------- model_only: boolean In the sda case we are only interested in transferring the model using SDA. This forces the HANA artifact generation to cater only for this scenario. Returns ------- output_path : str Return the output path of the root folder where the related artifacts are stored.\"\"\"\n self.hana_helper._build_folder_structure(ConfigConstants.PROJECT_TEMPLATE_BASE_SDA_STRUCT, self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA), self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE))\n yaml_writer = MTAYamlWriter(self.config)\n grant_writer = HDBGrantWriter(self.config)\n synonym_writer = HDBSynonymWriter(self.config)\n role_writer = HDBRoleWriter(self.config)\n consumption_processor = HanaSDAConsumptionProcessor(self.config)\n output_path = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA)\n app_id = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_APPID)\n module_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME)\n version = self.config.get_entry(ConfigConstants.CONFIG_KEY_VERSION)\n schema = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA)\n grant_service = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANT_SERVICE)\n yaml_writer.generate(output_path, app_id, module_name, version, schema, grant_service)\n remote_source = self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_REMOTE_SOURCE)\n grant_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH), remote_access=True, remote_source=remote_source)\n synonym_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH))\n role_writer.generate(self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH), name=self.config.get_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME))\n consumption_processor.generate(model_only)\n return output_path\n\n def _extend_config(self):\n \"\"\"Extend the config to cater for HANA SDA generation specific config.\"\"\"\n project_name = self.config.get_entry(ConfigConstants.CONFIG_KEY_PROJECT_NAME)\n sda_module_name = project_name + '_sda'\n sda_app_id = sda_module_name\n sda_schema = '\"' + (sda_module_name + '_SCHEMA').upper() + '\"'\n sda_output_path_hana = os.path.join(self.config.get_entry(ConfigConstants.CONFIG_KEY_OUTPUT_PATH), ConfigConstants.SDA_HANA_BASE_PATH)\n sda_output_path_module = os.path.join(sda_output_path_hana, sda_module_name)\n sda_output_path_module_src = os.path.join(sda_output_path_module, ConfigConstants.MODULE_SOURCE_PATH)\n sda_grants_path = os.path.join(sda_output_path_module_src, ConfigConstants.GRANTS_SOURCE_PATH)\n sda_synonyms_path = os.path.join(sda_output_path_module_src, ConfigConstants.SYNONYMS_SOURCE_PATH)\n sda_procedures_path = os.path.join(sda_output_path_module_src, ConfigConstants.PROCEDURES_SOURCE_PATH)\n sda_roles_path = os.path.join(sda_output_path_module_src, ConfigConstants.ROLES_SOURCE_PATH)\n sda_virtual_table_path = os.path.join(sda_output_path_module_src, ConfigConstants.VIRTUAL_TABLE_SOURCE_PATH)\n sda_cds_path = os.path.join(sda_output_path_module_src, ConfigConstants.CDS_SOURCE_PATH)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_MODULE_NAME, sda_module_name)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_APPID, sda_app_id)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_HANA, sda_output_path_hana)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE, sda_output_path_module)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_OUTPUT_PATH_MODULE_SRC, sda_output_path_module_src)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_GRANTS_PATH, sda_grants_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SYNONYMS_PATH, sda_synonyms_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_PROCEDURES_PATH, sda_procedures_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_ROLES_PATH, sda_roles_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_VIRTUALTABLE_PATH, sda_virtual_table_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_CDS_PATH, sda_cds_path)\n self.config.add_entry(ConfigConstants.CONFIG_KEY_SDA_SCHEMA, sda_schema)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python-API/extension/artifact/src/hana_ml_artifact/generators/hana.py", "source_repo": "SAP-samples/hana-ml-samples", "split": "val", "star_events_count": 83}
{"blob_id": "3aeab709699d99f931b9e6dc8a73d3796df1b450", "bodies": ["query = Plant.objects.order_by('-id')[:10]\nserializer = PlantSerializer(query, many=True)\nreturn Response(serializer.data)", "query_dict = dict(request.GET)\nfor key, value in query_dict.iteritems():\n if len(value) > 1:\n query_dict.pop('key')\n elif key == 'accepted' or key == 'public':\n if value[0] == 'True':\n query_dict[key] = True\n else:\n query_dict[key] = False\n else:\n query_dict[key] = value[0]\nquery = Plant.objects.filter(**query_dict)[:10]\nserializer = PlantSerializer(query, many=True)\nreturn Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n query = Plant.objects.order_by('-id')[:10]\n serializer = PlantSerializer(query, many=True)\n return Response(serializer.data)\n<|end_body_0|>\n\n<|body_start_1|>\n query_dict = dict(request.GET)\n for key, value in query_dict.iteritems():\n if len(value) > 1:\n query_dict.pop('key')\n elif key == 'accepted' or key == 'public':\n if value[0] == 'True':\n query_dict[key] = True\n else:\n query_dict[key] = False\n else:\n query_dict[key] = value[0]\n query = Plant.objects.filter(**query_dict)[:10]\n serializer = PlantSerializer(query, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "class_docstring": "This viewset is responsible for the ``plant`` endpoint.", "class_name": "PlantViewSet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PlantViewSet:\n \"\"\"This viewset is responsible for the ``plant`` endpoint.\"\"\"\n\n def list(self, request):\n \"\"\"GET method implementation of listing the latest ``Plant`` models Instead of listing all of the models, we are only going to display the last 10\"\"\"\n <|body_0|>\n\n def search(self, request):\n \"\"\"GET method implementation of listing all the ``Plant`` models that meet the requirement\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query = Plant.objects.order_by('-id')[:10]\n serializer = PlantSerializer(query, many=True)\n return Response(serializer.data)\n<|end_body_0|>\n\n<|body_start_1|>\n query_dict = dict(request.GET)\n for key, value in query_dict.iteritems():\n if len(value) > 1:\n query_dict.pop('key')\n elif key == 'accepted' or key == 'public':\n if value[0] == 'True':\n query_dict[key] = True\n else:\n query_dict[key] = False\n else:\n query_dict[key] = value[0]\n query = Plant.objects.filter(**query_dict)[:10]\n serializer = PlantSerializer(query, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000426", "length_bytes": 1767, "license_type": "no_license", "methods": [{"docstring": "GET method implementation of listing the latest ``Plant`` models Instead of listing all of the models, we are only going to display the last 10", "name": "list", "signature": "def list(self, request)"}, {"docstring": "GET method implementation of listing all the ``Plant`` models that meet the requirement", "name": "search", "signature": "def search(self, request)"}], "n_methods": 2, "prompt": "Implement the Python class `PlantViewSet` described below.\n\nClass description:\nThis viewset is responsible for the ``plant`` endpoint.\n\nMethod signatures and docstrings:\n- def list(self, request): GET method implementation of listing the latest ``Plant`` models Instead of listing all of the models, we are only going to display the last 10\n- def search(self, request): GET method implementation of listing all the ``Plant`` models that meet the requirement", "prompted_full_text": "Implement the Python class `PlantViewSet` described below.\n\nClass description:\nThis viewset is responsible for the ``plant`` endpoint.\n\nMethod signatures and docstrings:\n- def list(self, request): GET method implementation of listing the latest ``Plant`` models Instead of listing all of the models, we are only going to display the last 10\n- def search(self, request): GET method implementation of listing all the ``Plant`` models that meet the requirement\n\n<|skeleton|>\nclass PlantViewSet:\n \"\"\"This viewset is responsible for the ``plant`` endpoint.\"\"\"\n\n def list(self, request):\n \"\"\"GET method implementation of listing the latest ``Plant`` models Instead of listing all of the models, we are only going to display the last 10\"\"\"\n <|body_0|>\n\n def search(self, request):\n \"\"\"GET method implementation of listing all the ``Plant`` models that meet the requirement\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query = Plant.objects.order_by('-id')[:10]\n serializer = PlantSerializer(query, many=True)\n return Response(serializer.data)\n<|end_body_0|>\n\n<|body_start_1|>\n query_dict = dict(request.GET)\n for key, value in query_dict.iteritems():\n if len(value) > 1:\n query_dict.pop('key')\n elif key == 'accepted' or key == 'public':\n if value[0] == 'True':\n query_dict[key] = True\n else:\n query_dict[key] = False\n else:\n query_dict[key] = value[0]\n query = Plant.objects.filter(**query_dict)[:10]\n serializer = PlantSerializer(query, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "revision_id": "9ac5b045d7db43c4cd43d40cca8941cf1f53d642", "skeleton": "<|skeleton|>\nclass PlantViewSet:\n \"\"\"This viewset is responsible for the ``plant`` endpoint.\"\"\"\n\n def list(self, request):\n \"\"\"GET method implementation of listing the latest ``Plant`` models Instead of listing all of the models, we are only going to display the last 10\"\"\"\n <|body_0|>\n\n def search(self, request):\n \"\"\"GET method implementation of listing all the ``Plant`` models that meet the requirement\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PlantViewSet:\n \"\"\"This viewset is responsible for the ``plant`` endpoint.\"\"\"\n\n def list(self, request):\n \"\"\"GET method implementation of listing the latest ``Plant`` models Instead of listing all of the models, we are only going to display the last 10\"\"\"\n query = Plant.objects.order_by('-id')[:10]\n serializer = PlantSerializer(query, many=True)\n return Response(serializer.data)\n\n def search(self, request):\n \"\"\"GET method implementation of listing all the ``Plant`` models that meet the requirement\"\"\"\n query_dict = dict(request.GET)\n for key, value in query_dict.iteritems():\n if len(value) > 1:\n query_dict.pop('key')\n elif key == 'accepted' or key == 'public':\n if value[0] == 'True':\n query_dict[key] = True\n else:\n query_dict[key] = False\n else:\n query_dict[key] = value[0]\n query = Plant.objects.filter(**query_dict)[:10]\n serializer = PlantSerializer(query, many=True)\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "terrarium/web/api/plants/views.py", "source_repo": "roystchiang/terrarium", "split": "val", "star_events_count": 0}
{"blob_id": "498349361381e88270775de2cdbd57bdc27e37bf", "bodies": ["self.patients = {}\nself.audit_time = []\nself.audit_beds = []\nself.bed_count = 0\nself.admissions = 0\nreturn", "self.audit_time.append(time)\nself.audit_beds.append(self.bed_count)\nreturn", "self.audit_report = pd.DataFrame()\nself.audit_report['Time'] = self.audit_time\nself.audit_report['Occupied_beds'] = self.audit_beds\nself.audit_report['Median_beds'] = self.audit_report['Occupied_beds'].quantile(0.5)\nself.audit_report['Beds_5_percent'] = self.audit_report['Occupied_beds'].quantile(0.05)\nself.audit_report['Beds_95_percent'] = self.audit_report['Occupied_beds'].quantile(0.95)\nreturn", "plt.plot(self.audit_report['Time'], self.audit_report['Occupied_beds'], color='k', marker='o', linestyle='solid', markevery=1, label='Occupied beds')\nplt.plot(self.audit_report['Time'], self.audit_report['Beds_5_percent'], color='0.5', linestyle='dashdot', markevery=1, label='5th percentile')\nplt.plot(self.audit_report['Time'], self.audit_report['Median_beds'], color='0.5', linestyle='dashed', label='Median')\nplt.plot(self.audit_report['Time'], self.audit_report['Beds_95_percent'], color='0.5', linestyle='dashdot', label='95th percentile')\nplt.xlabel('Day')\nplt.ylabel('Occupied beds')\nplt.title('Occupied beds (individual days with 5th, 50th and 95th ' + 'percentiles)')\nplt.show()\nreturn"], "bodies_text": "<|body_start_0|>\n self.patients = {}\n self.audit_time = []\n self.audit_beds = []\n self.bed_count = 0\n self.admissions = 0\n return\n<|end_body_0|>\n\n<|body_start_1|>\n self.audit_time.append(time)\n self.audit_beds.append(self.bed_count)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n self.audit_report = pd.DataFrame()\n self.audit_report['Time'] = self.audit_time\n self.audit_report['Occupied_beds'] = self.audit_beds\n self.audit_report['Median_beds'] = self.audit_report['Occupied_beds'].quantile(0.5)\n self.audit_report['Beds_5_percent'] = self.audit_report['Occupied_beds'].quantile(0.05)\n self.audit_report['Beds_95_percent'] = self.audit_report['Occupied_beds'].quantile(0.95)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n plt.plot(self.audit_report['Time'], self.audit_report['Occupied_beds'], color='k', marker='o', linestyle='solid', markevery=1, label='Occupied beds')\n plt.plot(self.audit_report['Time'], self.audit_report['Beds_5_percent'], color='0.5', linestyle='dashdot', markevery=1, label='5th percentile')\n plt.plot(self.audit_report['Time'], self.audit_report['Median_beds'], color='0.5', linestyle='dashed', label='Median')\n plt.plot(self.audit_report['Time'], self.audit_report['Beds_95_percent'], color='0.5', linestyle='dashdot', label='95th percentile')\n plt.xlabel('Day')\n plt.ylabel('Occupied beds')\n plt.title('Occupied beds (individual days with 5th, 50th and 95th ' + 'percentiles)')\n plt.show()\n return\n<|end_body_3|>\n", "class_docstring": "Hospital class holds: 1) Dictionary of patients present 2) List of audit times 3) List of beds occupied at each audit time 4) Current total beds occupied 5) Admissions to data Methods: __init__: Set up hospital instance audit: records number of beds occupied build_audit_report: builds audit report at end of run (calculate 5th, 50th and 95th percentile bed occupancy. chart: plot beds occupied over time (at end of run)", "class_name": "Hospital", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Hospital:\n \"\"\"Hospital class holds: 1) Dictionary of patients present 2) List of audit times 3) List of beds occupied at each audit time 4) Current total beds occupied 5) Admissions to data Methods: __init__: Set up hospital instance audit: records number of beds occupied build_audit_report: builds audit report at end of run (calculate 5th, 50th and 95th percentile bed occupancy. chart: plot beds occupied over time (at end of run)\"\"\"\n\n def __init__(self):\n \"\"\"Constructor method for hospital class\" Initialise object with attributes.\"\"\"\n <|body_0|>\n\n def audit(self, time):\n \"\"\"Audit method. When called appends current simulation time to audit_time list, and appends current bed count to audit_beds.\"\"\"\n <|body_1|>\n\n def build_audit_report(self):\n \"\"\"This method is called at end of run. It creates a pandas DataFrame, transfers audit times and bed counts to the DataFrame, and calculates/stores 5th, 50th and 95th percentiles.\"\"\"\n <|body_2|>\n\n def chart(self):\n \"\"\"This method is called at end of run. It plots beds occupancy over the model run, with 5%, 50% and 95% percentiles.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.patients = {}\n self.audit_time = []\n self.audit_beds = []\n self.bed_count = 0\n self.admissions = 0\n return\n<|end_body_0|>\n\n<|body_start_1|>\n self.audit_time.append(time)\n self.audit_beds.append(self.bed_count)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n self.audit_report = pd.DataFrame()\n self.audit_report['Time'] = self.audit_time\n self.audit_report['Occupied_beds'] = self.audit_beds\n self.audit_report['Median_beds'] = self.audit_report['Occupied_beds'].quantile(0.5)\n self.audit_report['Beds_5_percent'] = self.audit_report['Occupied_beds'].quantile(0.05)\n self.audit_report['Beds_95_percent'] = self.audit_report['Occupied_beds'].quantile(0.95)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n plt.plot(self.audit_report['Time'], self.audit_report['Occupied_beds'], color='k', marker='o', linestyle='solid', markevery=1, label='Occupied beds')\n plt.plot(self.audit_report['Time'], self.audit_report['Beds_5_percent'], color='0.5', linestyle='dashdot', markevery=1, label='5th percentile')\n plt.plot(self.audit_report['Time'], self.audit_report['Median_beds'], color='0.5', linestyle='dashed', label='Median')\n plt.plot(self.audit_report['Time'], self.audit_report['Beds_95_percent'], color='0.5', linestyle='dashdot', label='95th percentile')\n plt.xlabel('Day')\n plt.ylabel('Occupied beds')\n plt.title('Occupied beds (individual days with 5th, 50th and 95th ' + 'percentiles)')\n plt.show()\n return\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000427", "length_bytes": 11444, "license_type": "no_license", "methods": [{"docstring": "Constructor method for hospital class\" Initialise object with attributes.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Audit method. When called appends current simulation time to audit_time list, and appends current bed count to audit_beds.", "name": "audit", "signature": "def audit(self, time)"}, {"docstring": "This method is called at end of run. It creates a pandas DataFrame, transfers audit times and bed counts to the DataFrame, and calculates/stores 5th, 50th and 95th percentiles.", "name": "build_audit_report", "signature": "def build_audit_report(self)"}, {"docstring": "This method is called at end of run. It plots beds occupancy over the model run, with 5%, 50% and 95% percentiles.", "name": "chart", "signature": "def chart(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_051645", "prompt": "Implement the Python class `Hospital` described below.\n\nClass description:\nHospital class holds: 1) Dictionary of patients present 2) List of audit times 3) List of beds occupied at each audit time 4) Current total beds occupied 5) Admissions to data Methods: __init__: Set up hospital instance audit: records number of beds occupied build_audit_report: builds audit report at end of run (calculate 5th, 50th and 95th percentile bed occupancy. chart: plot beds occupied over time (at end of run)\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor method for hospital class\" Initialise object with attributes.\n- def audit(self, time): Audit method. When called appends current simulation time to audit_time list, and appends current bed count to audit_beds.\n- def build_audit_report(self): This method is called at end of run. It creates a pandas DataFrame, transfers audit times and bed counts to the DataFrame, and calculates/stores 5th, 50th and 95th percentiles.\n- def chart(self): This method is called at end of run. It plots beds occupancy over the model run, with 5%, 50% and 95% percentiles.", "prompted_full_text": "Implement the Python class `Hospital` described below.\n\nClass description:\nHospital class holds: 1) Dictionary of patients present 2) List of audit times 3) List of beds occupied at each audit time 4) Current total beds occupied 5) Admissions to data Methods: __init__: Set up hospital instance audit: records number of beds occupied build_audit_report: builds audit report at end of run (calculate 5th, 50th and 95th percentile bed occupancy. chart: plot beds occupied over time (at end of run)\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor method for hospital class\" Initialise object with attributes.\n- def audit(self, time): Audit method. When called appends current simulation time to audit_time list, and appends current bed count to audit_beds.\n- def build_audit_report(self): This method is called at end of run. It creates a pandas DataFrame, transfers audit times and bed counts to the DataFrame, and calculates/stores 5th, 50th and 95th percentiles.\n- def chart(self): This method is called at end of run. It plots beds occupancy over the model run, with 5%, 50% and 95% percentiles.\n\n<|skeleton|>\nclass Hospital:\n \"\"\"Hospital class holds: 1) Dictionary of patients present 2) List of audit times 3) List of beds occupied at each audit time 4) Current total beds occupied 5) Admissions to data Methods: __init__: Set up hospital instance audit: records number of beds occupied build_audit_report: builds audit report at end of run (calculate 5th, 50th and 95th percentile bed occupancy. chart: plot beds occupied over time (at end of run)\"\"\"\n\n def __init__(self):\n \"\"\"Constructor method for hospital class\" Initialise object with attributes.\"\"\"\n <|body_0|>\n\n def audit(self, time):\n \"\"\"Audit method. When called appends current simulation time to audit_time list, and appends current bed count to audit_beds.\"\"\"\n <|body_1|>\n\n def build_audit_report(self):\n \"\"\"This method is called at end of run. It creates a pandas DataFrame, transfers audit times and bed counts to the DataFrame, and calculates/stores 5th, 50th and 95th percentiles.\"\"\"\n <|body_2|>\n\n def chart(self):\n \"\"\"This method is called at end of run. It plots beds occupancy over the model run, with 5%, 50% and 95% percentiles.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.patients = {}\n self.audit_time = []\n self.audit_beds = []\n self.bed_count = 0\n self.admissions = 0\n return\n<|end_body_0|>\n\n<|body_start_1|>\n self.audit_time.append(time)\n self.audit_beds.append(self.bed_count)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n self.audit_report = pd.DataFrame()\n self.audit_report['Time'] = self.audit_time\n self.audit_report['Occupied_beds'] = self.audit_beds\n self.audit_report['Median_beds'] = self.audit_report['Occupied_beds'].quantile(0.5)\n self.audit_report['Beds_5_percent'] = self.audit_report['Occupied_beds'].quantile(0.05)\n self.audit_report['Beds_95_percent'] = self.audit_report['Occupied_beds'].quantile(0.95)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n plt.plot(self.audit_report['Time'], self.audit_report['Occupied_beds'], color='k', marker='o', linestyle='solid', markevery=1, label='Occupied beds')\n plt.plot(self.audit_report['Time'], self.audit_report['Beds_5_percent'], color='0.5', linestyle='dashdot', markevery=1, label='5th percentile')\n plt.plot(self.audit_report['Time'], self.audit_report['Median_beds'], color='0.5', linestyle='dashed', label='Median')\n plt.plot(self.audit_report['Time'], self.audit_report['Beds_95_percent'], color='0.5', linestyle='dashdot', label='95th percentile')\n plt.xlabel('Day')\n plt.ylabel('Occupied beds')\n plt.title('Occupied beds (individual days with 5th, 50th and 95th ' + 'percentiles)')\n plt.show()\n return\n<|end_body_3|>\n", "revision_id": "b16f2b16f161cd68e0532e7a3381518e0d565bdf", "skeleton": "<|skeleton|>\nclass Hospital:\n \"\"\"Hospital class holds: 1) Dictionary of patients present 2) List of audit times 3) List of beds occupied at each audit time 4) Current total beds occupied 5) Admissions to data Methods: __init__: Set up hospital instance audit: records number of beds occupied build_audit_report: builds audit report at end of run (calculate 5th, 50th and 95th percentile bed occupancy. chart: plot beds occupied over time (at end of run)\"\"\"\n\n def __init__(self):\n \"\"\"Constructor method for hospital class\" Initialise object with attributes.\"\"\"\n <|body_0|>\n\n def audit(self, time):\n \"\"\"Audit method. When called appends current simulation time to audit_time list, and appends current bed count to audit_beds.\"\"\"\n <|body_1|>\n\n def build_audit_report(self):\n \"\"\"This method is called at end of run. It creates a pandas DataFrame, transfers audit times and bed counts to the DataFrame, and calculates/stores 5th, 50th and 95th percentiles.\"\"\"\n <|body_2|>\n\n def chart(self):\n \"\"\"This method is called at end of run. It plots beds occupancy over the model run, with 5%, 50% and 95% percentiles.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Hospital:\n \"\"\"Hospital class holds: 1) Dictionary of patients present 2) List of audit times 3) List of beds occupied at each audit time 4) Current total beds occupied 5) Admissions to data Methods: __init__: Set up hospital instance audit: records number of beds occupied build_audit_report: builds audit report at end of run (calculate 5th, 50th and 95th percentile bed occupancy. chart: plot beds occupied over time (at end of run)\"\"\"\n\n def __init__(self):\n \"\"\"Constructor method for hospital class\" Initialise object with attributes.\"\"\"\n self.patients = {}\n self.audit_time = []\n self.audit_beds = []\n self.bed_count = 0\n self.admissions = 0\n return\n\n def audit(self, time):\n \"\"\"Audit method. When called appends current simulation time to audit_time list, and appends current bed count to audit_beds.\"\"\"\n self.audit_time.append(time)\n self.audit_beds.append(self.bed_count)\n return\n\n def build_audit_report(self):\n \"\"\"This method is called at end of run. It creates a pandas DataFrame, transfers audit times and bed counts to the DataFrame, and calculates/stores 5th, 50th and 95th percentiles.\"\"\"\n self.audit_report = pd.DataFrame()\n self.audit_report['Time'] = self.audit_time\n self.audit_report['Occupied_beds'] = self.audit_beds\n self.audit_report['Median_beds'] = self.audit_report['Occupied_beds'].quantile(0.5)\n self.audit_report['Beds_5_percent'] = self.audit_report['Occupied_beds'].quantile(0.05)\n self.audit_report['Beds_95_percent'] = self.audit_report['Occupied_beds'].quantile(0.95)\n return\n\n def chart(self):\n \"\"\"This method is called at end of run. It plots beds occupancy over the model run, with 5%, 50% and 95% percentiles.\"\"\"\n plt.plot(self.audit_report['Time'], self.audit_report['Occupied_beds'], color='k', marker='o', linestyle='solid', markevery=1, label='Occupied beds')\n plt.plot(self.audit_report['Time'], self.audit_report['Beds_5_percent'], color='0.5', linestyle='dashdot', markevery=1, label='5th percentile')\n plt.plot(self.audit_report['Time'], self.audit_report['Median_beds'], color='0.5', linestyle='dashed', label='Median')\n plt.plot(self.audit_report['Time'], self.audit_report['Beds_95_percent'], color='0.5', linestyle='dashdot', label='95th percentile')\n plt.xlabel('Day')\n plt.ylabel('Occupied beds')\n plt.title('Occupied beds (individual days with 5th, 50th and 95th ' + 'percentiles)')\n plt.show()\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "py_files/0087_bed_occupancy_object_based.py", "source_repo": "tospolkaw/CloanGit_PythonHealthcare", "split": "val", "star_events_count": 1}
{"blob_id": "90e08401cf50ef80d42fcfa6c388650e5fc09d54", "bodies": ["favorite_nodes = UserFavoriteNode.get_favorite_nodes(get_jwt_identity())\nschema = GenericNodeSchema(many=True)\nresponse = json.loads(schema.dumps(favorite_nodes).data)\nreturn jsonify_response(response, 200)", "user_id = get_jwt_identity()\ndata = json.loads(request.data)\nif 'nodeId' not in data or 'nodeType' not in data:\n return jsonify_response({'error': '`nodeId` or `nodeType` not provided.'}, 400)\nvertex_class = self.vertex_types[data['nodeType']]\nvertex = vertex_class.filter(id=data['nodeId'])\nif not vertex:\n return jsonify_response({'error': 'Node does not exist.'}, 404)\nvertex = vertex[0]\nexisting_edge = UserFavoriteNode.filter(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\nif existing_edge:\n return jsonify_response({'error': 'Favorite node already exists.'}, 400)\nvertex_roles = vertex.get_user_permissions(user_id)\ndirect_vertex_role = vertex_roles['direct_role']\nindirect_vertex_roles = vertex_roles['indirect_roles']\nif direct_vertex_role or [i for i in indirect_vertex_roles if 'lead' in i or 'admin' in i]:\n edge = UserFavoriteNode.create(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n schema = GenericNodeSchema()\n response = json.loads(schema.dumps(vertex).data)\n return jsonify_response(response, 201)\nreturn jsonify_response({'error': 'User does not have access to node'}, 403)"], "bodies_text": "<|body_start_0|>\n favorite_nodes = UserFavoriteNode.get_favorite_nodes(get_jwt_identity())\n schema = GenericNodeSchema(many=True)\n response = json.loads(schema.dumps(favorite_nodes).data)\n return jsonify_response(response, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n user_id = get_jwt_identity()\n data = json.loads(request.data)\n if 'nodeId' not in data or 'nodeType' not in data:\n return jsonify_response({'error': '`nodeId` or `nodeType` not provided.'}, 400)\n vertex_class = self.vertex_types[data['nodeType']]\n vertex = vertex_class.filter(id=data['nodeId'])\n if not vertex:\n return jsonify_response({'error': 'Node does not exist.'}, 404)\n vertex = vertex[0]\n existing_edge = UserFavoriteNode.filter(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n if existing_edge:\n return jsonify_response({'error': 'Favorite node already exists.'}, 400)\n vertex_roles = vertex.get_user_permissions(user_id)\n direct_vertex_role = vertex_roles['direct_role']\n indirect_vertex_roles = vertex_roles['indirect_roles']\n if direct_vertex_role or [i for i in indirect_vertex_roles if 'lead' in i or 'admin' in i]:\n edge = UserFavoriteNode.create(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n schema = GenericNodeSchema()\n response = json.loads(schema.dumps(vertex).data)\n return jsonify_response(response, 201)\n return jsonify_response({'error': 'User does not have access to node'}, 403)\n<|end_body_1|>\n", "class_docstring": "Container for the LIST and CREATE endpoints for favorite nodes", "class_name": "ListCreateFavoriteNodes", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ListCreateFavoriteNodes:\n \"\"\"Container for the LIST and CREATE endpoints for favorite nodes\"\"\"\n\n def get(self):\n \"\"\"Returns all of the user's favorite nodes that he has access to UNTESTED\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Endpoint used for favoriting a node for the currently authenticated user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n favorite_nodes = UserFavoriteNode.get_favorite_nodes(get_jwt_identity())\n schema = GenericNodeSchema(many=True)\n response = json.loads(schema.dumps(favorite_nodes).data)\n return jsonify_response(response, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n user_id = get_jwt_identity()\n data = json.loads(request.data)\n if 'nodeId' not in data or 'nodeType' not in data:\n return jsonify_response({'error': '`nodeId` or `nodeType` not provided.'}, 400)\n vertex_class = self.vertex_types[data['nodeType']]\n vertex = vertex_class.filter(id=data['nodeId'])\n if not vertex:\n return jsonify_response({'error': 'Node does not exist.'}, 404)\n vertex = vertex[0]\n existing_edge = UserFavoriteNode.filter(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n if existing_edge:\n return jsonify_response({'error': 'Favorite node already exists.'}, 400)\n vertex_roles = vertex.get_user_permissions(user_id)\n direct_vertex_role = vertex_roles['direct_role']\n indirect_vertex_roles = vertex_roles['indirect_roles']\n if direct_vertex_role or [i for i in indirect_vertex_roles if 'lead' in i or 'admin' in i]:\n edge = UserFavoriteNode.create(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n schema = GenericNodeSchema()\n response = json.loads(schema.dumps(vertex).data)\n return jsonify_response(response, 201)\n return jsonify_response({'error': 'User does not have access to node'}, 403)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000428", "length_bytes": 44865, "license_type": "no_license", "methods": [{"docstring": "Returns all of the user's favorite nodes that he has access to UNTESTED", "name": "get", "signature": "def get(self)"}, {"docstring": "Endpoint used for favoriting a node for the currently authenticated user", "name": "post", "signature": "def post(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003534", "prompt": "Implement the Python class `ListCreateFavoriteNodes` described below.\n\nClass description:\nContainer for the LIST and CREATE endpoints for favorite nodes\n\nMethod signatures and docstrings:\n- def get(self): Returns all of the user's favorite nodes that he has access to UNTESTED\n- def post(self): Endpoint used for favoriting a node for the currently authenticated user", "prompted_full_text": "Implement the Python class `ListCreateFavoriteNodes` described below.\n\nClass description:\nContainer for the LIST and CREATE endpoints for favorite nodes\n\nMethod signatures and docstrings:\n- def get(self): Returns all of the user's favorite nodes that he has access to UNTESTED\n- def post(self): Endpoint used for favoriting a node for the currently authenticated user\n\n<|skeleton|>\nclass ListCreateFavoriteNodes:\n \"\"\"Container for the LIST and CREATE endpoints for favorite nodes\"\"\"\n\n def get(self):\n \"\"\"Returns all of the user's favorite nodes that he has access to UNTESTED\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Endpoint used for favoriting a node for the currently authenticated user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n favorite_nodes = UserFavoriteNode.get_favorite_nodes(get_jwt_identity())\n schema = GenericNodeSchema(many=True)\n response = json.loads(schema.dumps(favorite_nodes).data)\n return jsonify_response(response, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n user_id = get_jwt_identity()\n data = json.loads(request.data)\n if 'nodeId' not in data or 'nodeType' not in data:\n return jsonify_response({'error': '`nodeId` or `nodeType` not provided.'}, 400)\n vertex_class = self.vertex_types[data['nodeType']]\n vertex = vertex_class.filter(id=data['nodeId'])\n if not vertex:\n return jsonify_response({'error': 'Node does not exist.'}, 404)\n vertex = vertex[0]\n existing_edge = UserFavoriteNode.filter(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n if existing_edge:\n return jsonify_response({'error': 'Favorite node already exists.'}, 400)\n vertex_roles = vertex.get_user_permissions(user_id)\n direct_vertex_role = vertex_roles['direct_role']\n indirect_vertex_roles = vertex_roles['indirect_roles']\n if direct_vertex_role or [i for i in indirect_vertex_roles if 'lead' in i or 'admin' in i]:\n edge = UserFavoriteNode.create(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n schema = GenericNodeSchema()\n response = json.loads(schema.dumps(vertex).data)\n return jsonify_response(response, 201)\n return jsonify_response({'error': 'User does not have access to node'}, 403)\n<|end_body_1|>\n", "revision_id": "00434985013b65fe45b0a8c8a7f0b50bb727087a", "skeleton": "<|skeleton|>\nclass ListCreateFavoriteNodes:\n \"\"\"Container for the LIST and CREATE endpoints for favorite nodes\"\"\"\n\n def get(self):\n \"\"\"Returns all of the user's favorite nodes that he has access to UNTESTED\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Endpoint used for favoriting a node for the currently authenticated user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ListCreateFavoriteNodes:\n \"\"\"Container for the LIST and CREATE endpoints for favorite nodes\"\"\"\n\n def get(self):\n \"\"\"Returns all of the user's favorite nodes that he has access to UNTESTED\"\"\"\n favorite_nodes = UserFavoriteNode.get_favorite_nodes(get_jwt_identity())\n schema = GenericNodeSchema(many=True)\n response = json.loads(schema.dumps(favorite_nodes).data)\n return jsonify_response(response, 200)\n\n def post(self):\n \"\"\"Endpoint used for favoriting a node for the currently authenticated user\"\"\"\n user_id = get_jwt_identity()\n data = json.loads(request.data)\n if 'nodeId' not in data or 'nodeType' not in data:\n return jsonify_response({'error': '`nodeId` or `nodeType` not provided.'}, 400)\n vertex_class = self.vertex_types[data['nodeType']]\n vertex = vertex_class.filter(id=data['nodeId'])\n if not vertex:\n return jsonify_response({'error': 'Node does not exist.'}, 404)\n vertex = vertex[0]\n existing_edge = UserFavoriteNode.filter(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n if existing_edge:\n return jsonify_response({'error': 'Favorite node already exists.'}, 400)\n vertex_roles = vertex.get_user_permissions(user_id)\n direct_vertex_role = vertex_roles['direct_role']\n indirect_vertex_roles = vertex_roles['indirect_roles']\n if direct_vertex_role or [i for i in indirect_vertex_roles if 'lead' in i or 'admin' in i]:\n edge = UserFavoriteNode.create(outv_id=user_id, inv_id=vertex.id, outv_label='user', inv_label=data['nodeType'])\n schema = GenericNodeSchema()\n response = json.loads(schema.dumps(vertex).data)\n return jsonify_response(response, 201)\n return jsonify_response({'error': 'User does not have access to node'}, 403)\n", "source": "the_stack_v2_python_sparse", "source_path": "core/views.py", "source_repo": "gingerComms/gingerCommsAPIs", "split": "val", "star_events_count": 0}
{"blob_id": "0ad745f2cc5287117b896134db8e161a88cf0526", "bodies": ["X = check_array(X, dtype=np.float32, accept_sparse='csc')\npreds = super().predict(X, check_input=check_input)\nif uncertainty is not None or quantiles is not None:\n if uncertainty is not None:\n quantiles = [uncertainty / 2, 1 - uncertainty / 2]\n else:\n quantiles = list(quantiles or [])\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n quantile_vals = np.empty((X.shape[0], len(quantiles)))\n for leaf in unique_leaves:\n for idx, quantile in enumerate(quantiles):\n X_leaf = X_leaves == leaf\n y_leaf = self.y_train_[self.y_train_leaves_ == leaf]\n quantile_vals[X_leaf, idx] = weighted_percentile(y_leaf, quantile)\n return (preds, quantile_vals)\nelse:\n return preds", "y = np.asarray(y)\nif np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\nX, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=False)\nsuper().fit(X, y, sample_weight=sample_weight, check_input=check_input)\nself.y_train_ = y\nself.y_train_leaves_ = self.tree_.apply(X)\nreturn self"], "bodies_text": "<|body_start_0|>\n X = check_array(X, dtype=np.float32, accept_sparse='csc')\n preds = super().predict(X, check_input=check_input)\n if uncertainty is not None or quantiles is not None:\n if uncertainty is not None:\n quantiles = [uncertainty / 2, 1 - uncertainty / 2]\n else:\n quantiles = list(quantiles or [])\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n quantile_vals = np.empty((X.shape[0], len(quantiles)))\n for leaf in unique_leaves:\n for idx, quantile in enumerate(quantiles):\n X_leaf = X_leaves == leaf\n y_leaf = self.y_train_[self.y_train_leaves_ == leaf]\n quantile_vals[X_leaf, idx] = weighted_percentile(y_leaf, quantile)\n return (preds, quantile_vals)\n else:\n return preds\n<|end_body_0|>\n\n<|body_start_1|>\n y = np.asarray(y)\n if np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\n X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=False)\n super().fit(X, y, sample_weight=sample_weight, check_input=check_input)\n self.y_train_ = y\n self.y_train_leaves_ = self.tree_.apply(X)\n return self\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BaseTreeQuantileRegressor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseTreeQuantileRegressor:\n\n def predict(self, X: np.ndarray, uncertainty: Optional[float]=None, quantiles: Optional[np.ndarray]=None, check_input: bool=True) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n \"\"\"Predict regression value for X. Args: X (array-like or sparse matrix): The input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csr_matrix`. uncertainty (float or None, optional): Value ranging from 0 to 1. If None then no prediction intervals will be returned. Defaults to None. quantiles (sequence of floats or None, optional): List of quantiles to output, as an alternative to the `uncertainty` argument, and will not be used if that argument is set. If None then `uncertainty` is used. Defaults to None. check_input (boolean, optional): Allow to bypass several input checking. Don't use this param\"\"\"\n <|body_0|>\n\n def fit(self, X: np.ndarray, y: np.ndarray, sample_weight: Optional[np.ndarray]=None, check_input: bool=True):\n \"\"\"Build a decision tree classifier from the training set (X, y). Args: X (array-like or sparse matrix) The training input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csc_matrix`. y (array-like): The target values (class labels) as integers or strings, of shape [n_samples] or [n_samples, n_outputs]. sample_weight (array-like or None, optional): Sample weights of shape = [n_samples]. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n X = check_array(X, dtype=np.float32, accept_sparse='csc')\n preds = super().predict(X, check_input=check_input)\n if uncertainty is not None or quantiles is not None:\n if uncertainty is not None:\n quantiles = [uncertainty / 2, 1 - uncertainty / 2]\n else:\n quantiles = list(quantiles or [])\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n quantile_vals = np.empty((X.shape[0], len(quantiles)))\n for leaf in unique_leaves:\n for idx, quantile in enumerate(quantiles):\n X_leaf = X_leaves == leaf\n y_leaf = self.y_train_[self.y_train_leaves_ == leaf]\n quantile_vals[X_leaf, idx] = weighted_percentile(y_leaf, quantile)\n return (preds, quantile_vals)\n else:\n return preds\n<|end_body_0|>\n\n<|body_start_1|>\n y = np.asarray(y)\n if np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\n X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=False)\n super().fit(X, y, sample_weight=sample_weight, check_input=check_input)\n self.y_train_ = y\n self.y_train_leaves_ = self.tree_.apply(X)\n return self\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000429", "length_bytes": 10540, "license_type": "permissive", "methods": [{"docstring": "Predict regression value for X. Args: X (array-like or sparse matrix): The input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csr_matrix`. uncertainty (float or None, optional): Value ranging from 0 to 1. If None then no prediction intervals will be returned. Defaults to None. quantiles (sequence of floats or None, optional): List of quantiles to output, as an alternative to the `uncertainty` argument, and will not be used if that argument is set. If None then `uncertainty` is used. Defaults to None. check_input (boolean, optional): Allow to bypass several input checking. Don't use this param", "name": "predict", "signature": "def predict(self, X: np.ndarray, uncertainty: Optional[float]=None, quantiles: Optional[np.ndarray]=None, check_input: bool=True) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]"}, {"docstring": "Build a decision tree classifier from the training set (X, y). Args: X (array-like or sparse matrix) The training input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csc_matrix`. y (array-like): The target values (class labels) as integers or strings, of shape [n_samples] or [n_samples, n_outputs]. sample_weight (array-like or None, optional): Sample weights of shape = [n_samples]. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in", "name": "fit", "signature": "def fit(self, X: np.ndarray, y: np.ndarray, sample_weight: Optional[np.ndarray]=None, check_input: bool=True)"}], "n_methods": 2, "prompt": "Implement the Python class `BaseTreeQuantileRegressor` described below.\n\nClass description:\nImplement the BaseTreeQuantileRegressor class.\n\nMethod signatures and docstrings:\n- def predict(self, X: np.ndarray, uncertainty: Optional[float]=None, quantiles: Optional[np.ndarray]=None, check_input: bool=True) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: Predict regression value for X. Args: X (array-like or sparse matrix): The input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csr_matrix`. uncertainty (float or None, optional): Value ranging from 0 to 1. If None then no prediction intervals will be returned. Defaults to None. quantiles (sequence of floats or None, optional): List of quantiles to output, as an alternative to the `uncertainty` argument, and will not be used if that argument is set. If None then `uncertainty` is used. Defaults to None. check_input (boolean, optional): Allow to bypass several input checking. Don't use this param\n- def fit(self, X: np.ndarray, y: np.ndarray, sample_weight: Optional[np.ndarray]=None, check_input: bool=True): Build a decision tree classifier from the training set (X, y). Args: X (array-like or sparse matrix) The training input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csc_matrix`. y (array-like): The target values (class labels) as integers or strings, of shape [n_samples] or [n_samples, n_outputs]. sample_weight (array-like or None, optional): Sample weights of shape = [n_samples]. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in", "prompted_full_text": "Implement the Python class `BaseTreeQuantileRegressor` described below.\n\nClass description:\nImplement the BaseTreeQuantileRegressor class.\n\nMethod signatures and docstrings:\n- def predict(self, X: np.ndarray, uncertainty: Optional[float]=None, quantiles: Optional[np.ndarray]=None, check_input: bool=True) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: Predict regression value for X. Args: X (array-like or sparse matrix): The input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csr_matrix`. uncertainty (float or None, optional): Value ranging from 0 to 1. If None then no prediction intervals will be returned. Defaults to None. quantiles (sequence of floats or None, optional): List of quantiles to output, as an alternative to the `uncertainty` argument, and will not be used if that argument is set. If None then `uncertainty` is used. Defaults to None. check_input (boolean, optional): Allow to bypass several input checking. Don't use this param\n- def fit(self, X: np.ndarray, y: np.ndarray, sample_weight: Optional[np.ndarray]=None, check_input: bool=True): Build a decision tree classifier from the training set (X, y). Args: X (array-like or sparse matrix) The training input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csc_matrix`. y (array-like): The target values (class labels) as integers or strings, of shape [n_samples] or [n_samples, n_outputs]. sample_weight (array-like or None, optional): Sample weights of shape = [n_samples]. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in\n\n<|skeleton|>\nclass BaseTreeQuantileRegressor:\n\n def predict(self, X: np.ndarray, uncertainty: Optional[float]=None, quantiles: Optional[np.ndarray]=None, check_input: bool=True) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n \"\"\"Predict regression value for X. Args: X (array-like or sparse matrix): The input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csr_matrix`. uncertainty (float or None, optional): Value ranging from 0 to 1. If None then no prediction intervals will be returned. Defaults to None. quantiles (sequence of floats or None, optional): List of quantiles to output, as an alternative to the `uncertainty` argument, and will not be used if that argument is set. If None then `uncertainty` is used. Defaults to None. check_input (boolean, optional): Allow to bypass several input checking. Don't use this param\"\"\"\n <|body_0|>\n\n def fit(self, X: np.ndarray, y: np.ndarray, sample_weight: Optional[np.ndarray]=None, check_input: bool=True):\n \"\"\"Build a decision tree classifier from the training set (X, y). Args: X (array-like or sparse matrix) The training input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csc_matrix`. y (array-like): The target values (class labels) as integers or strings, of shape [n_samples] or [n_samples, n_outputs]. sample_weight (array-like or None, optional): Sample weights of shape = [n_samples]. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n X = check_array(X, dtype=np.float32, accept_sparse='csc')\n preds = super().predict(X, check_input=check_input)\n if uncertainty is not None or quantiles is not None:\n if uncertainty is not None:\n quantiles = [uncertainty / 2, 1 - uncertainty / 2]\n else:\n quantiles = list(quantiles or [])\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n quantile_vals = np.empty((X.shape[0], len(quantiles)))\n for leaf in unique_leaves:\n for idx, quantile in enumerate(quantiles):\n X_leaf = X_leaves == leaf\n y_leaf = self.y_train_[self.y_train_leaves_ == leaf]\n quantile_vals[X_leaf, idx] = weighted_percentile(y_leaf, quantile)\n return (preds, quantile_vals)\n else:\n return preds\n<|end_body_0|>\n\n<|body_start_1|>\n y = np.asarray(y)\n if np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\n X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=False)\n super().fit(X, y, sample_weight=sample_weight, check_input=check_input)\n self.y_train_ = y\n self.y_train_leaves_ = self.tree_.apply(X)\n return self\n<|end_body_1|>\n", "revision_id": "d31c31261b91f854d2529eab605165a1cfe5ec70", "skeleton": "<|skeleton|>\nclass BaseTreeQuantileRegressor:\n\n def predict(self, X: np.ndarray, uncertainty: Optional[float]=None, quantiles: Optional[np.ndarray]=None, check_input: bool=True) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n \"\"\"Predict regression value for X. Args: X (array-like or sparse matrix): The input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csr_matrix`. uncertainty (float or None, optional): Value ranging from 0 to 1. If None then no prediction intervals will be returned. Defaults to None. quantiles (sequence of floats or None, optional): List of quantiles to output, as an alternative to the `uncertainty` argument, and will not be used if that argument is set. If None then `uncertainty` is used. Defaults to None. check_input (boolean, optional): Allow to bypass several input checking. Don't use this param\"\"\"\n <|body_0|>\n\n def fit(self, X: np.ndarray, y: np.ndarray, sample_weight: Optional[np.ndarray]=None, check_input: bool=True):\n \"\"\"Build a decision tree classifier from the training set (X, y). Args: X (array-like or sparse matrix) The training input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csc_matrix`. y (array-like): The target values (class labels) as integers or strings, of shape [n_samples] or [n_samples, n_outputs]. sample_weight (array-like or None, optional): Sample weights of shape = [n_samples]. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BaseTreeQuantileRegressor:\n def predict(self, X: np.ndarray, uncertainty: Optional[float]=None, quantiles: Optional[np.ndarray]=None, check_input: bool=True) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n \"\"\"Predict regression value for X. Args: X (array-like or sparse matrix): The input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csr_matrix`. uncertainty (float or None, optional): Value ranging from 0 to 1. If None then no prediction intervals will be returned. Defaults to None. quantiles (sequence of floats or None, optional): List of quantiles to output, as an alternative to the `uncertainty` argument, and will not be used if that argument is set. If None then `uncertainty` is used. Defaults to None. check_input (boolean, optional): Allow to bypass several input checking. Don't use this param\"\"\"\n X = check_array(X, dtype=np.float32, accept_sparse='csc')\n preds = super().predict(X, check_input=check_input)\n if uncertainty is not None or quantiles is not None:\n if uncertainty is not None:\n quantiles = [uncertainty / 2, 1 - uncertainty / 2]\n else:\n quantiles = list(quantiles or [])\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n quantile_vals = np.empty((X.shape[0], len(quantiles)))\n for leaf in unique_leaves:\n for idx, quantile in enumerate(quantiles):\n X_leaf = X_leaves == leaf\n y_leaf = self.y_train_[self.y_train_leaves_ == leaf]\n quantile_vals[X_leaf, idx] = weighted_percentile(y_leaf, quantile)\n return (preds, quantile_vals)\n else:\n return preds\n\n def fit(self, X: np.ndarray, y: np.ndarray, sample_weight: Optional[np.ndarray]=None, check_input: bool=True):\n \"\"\"Build a decision tree classifier from the training set (X, y). Args: X (array-like or sparse matrix) The training input samples, of shape [n_samples, n_features]. Internally, it will be converted to `dtype=np.float32` and if a sparse matrix is provided to a sparse `csc_matrix`. y (array-like): The target values (class labels) as integers or strings, of shape [n_samples] or [n_samples, n_outputs]. sample_weight (array-like or None, optional): Sample weights of shape = [n_samples]. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in\"\"\"\n y = np.asarray(y)\n if np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\n X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=False)\n super().fit(X, y, sample_weight=sample_weight, check_input=check_input)\n self.y_train_ = y\n self.y_train_leaves_ = self.tree_.apply(X)\n return self\n", "source": "the_stack_v2_python_sparse", "source_path": "src/doubt/models/tree/tree.py", "source_repo": "saattrupdan/doubt", "split": "val", "star_events_count": 46}
{"blob_id": "ede245c1153671c53c95964f6dc82857f549b61c", "bodies": ["mobile_phone = self.cleaned_data['mobile_phone']\nexists = models.UserInfo.objects.filter(mobile_phone=mobile_phone).exists()\nif not exists:\n raise ValidationError('手机号未注册,请先注册')\nreturn mobile_phone", "code = self.cleaned_data['code']\nmobile_phone = self.cleaned_data.get('mobile_phone')\nif not mobile_phone:\n return code\npool = redis.ConnectionPool(host='127.0.0.1', port=6379, encoding='utf-8', max_connections=1000)\nconn = redis.Redis(connection_pool=pool)\nredis_code = conn.get(mobile_phone)\nif not redis_code:\n raise ValidationError('验证码失效或未发送,请重新发送')\nredis_str_code = redis_code.decode('utf-8')\nif code.strip() != redis_str_code:\n raise ValidationError('验证码错误,请重新输入')\nreturn code"], "bodies_text": "<|body_start_0|>\n mobile_phone = self.cleaned_data['mobile_phone']\n exists = models.UserInfo.objects.filter(mobile_phone=mobile_phone).exists()\n if not exists:\n raise ValidationError('手机号未注册,请先注册')\n return mobile_phone\n<|end_body_0|>\n\n<|body_start_1|>\n code = self.cleaned_data['code']\n mobile_phone = self.cleaned_data.get('mobile_phone')\n if not mobile_phone:\n return code\n pool = redis.ConnectionPool(host='127.0.0.1', port=6379, encoding='utf-8', max_connections=1000)\n conn = redis.Redis(connection_pool=pool)\n redis_code = conn.get(mobile_phone)\n if not redis_code:\n raise ValidationError('验证码失效或未发送,请重新发送')\n redis_str_code = redis_code.decode('utf-8')\n if code.strip() != redis_str_code:\n raise ValidationError('验证码错误,请重新输入')\n return code\n<|end_body_1|>\n", "class_docstring": "添加手机号字段", "class_name": "LoginSMSForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoginSMSForm:\n \"\"\"添加手机号字段\"\"\"\n\n def clean_mobile_phone(self):\n \"\"\"校验手机号\"\"\"\n <|body_0|>\n\n def clean_code(self):\n \"\"\"校验验证码\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mobile_phone = self.cleaned_data['mobile_phone']\n exists = models.UserInfo.objects.filter(mobile_phone=mobile_phone).exists()\n if not exists:\n raise ValidationError('手机号未注册,请先注册')\n return mobile_phone\n<|end_body_0|>\n\n<|body_start_1|>\n code = self.cleaned_data['code']\n mobile_phone = self.cleaned_data.get('mobile_phone')\n if not mobile_phone:\n return code\n pool = redis.ConnectionPool(host='127.0.0.1', port=6379, encoding='utf-8', max_connections=1000)\n conn = redis.Redis(connection_pool=pool)\n redis_code = conn.get(mobile_phone)\n if not redis_code:\n raise ValidationError('验证码失效或未发送,请重新发送')\n redis_str_code = redis_code.decode('utf-8')\n if code.strip() != redis_str_code:\n raise ValidationError('验证码错误,请重新输入')\n return code\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000430", "length_bytes": 10735, "license_type": "no_license", "methods": [{"docstring": "校验手机号", "name": "clean_mobile_phone", "signature": "def clean_mobile_phone(self)"}, {"docstring": "校验验证码", "name": "clean_code", "signature": "def clean_code(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_016728", "prompt": "Implement the Python class `LoginSMSForm` described below.\n\nClass description:\n添加手机号字段\n\nMethod signatures and docstrings:\n- def clean_mobile_phone(self): 校验手机号\n- def clean_code(self): 校验验证码", "prompted_full_text": "Implement the Python class `LoginSMSForm` described below.\n\nClass description:\n添加手机号字段\n\nMethod signatures and docstrings:\n- def clean_mobile_phone(self): 校验手机号\n- def clean_code(self): 校验验证码\n\n<|skeleton|>\nclass LoginSMSForm:\n \"\"\"添加手机号字段\"\"\"\n\n def clean_mobile_phone(self):\n \"\"\"校验手机号\"\"\"\n <|body_0|>\n\n def clean_code(self):\n \"\"\"校验验证码\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mobile_phone = self.cleaned_data['mobile_phone']\n exists = models.UserInfo.objects.filter(mobile_phone=mobile_phone).exists()\n if not exists:\n raise ValidationError('手机号未注册,请先注册')\n return mobile_phone\n<|end_body_0|>\n\n<|body_start_1|>\n code = self.cleaned_data['code']\n mobile_phone = self.cleaned_data.get('mobile_phone')\n if not mobile_phone:\n return code\n pool = redis.ConnectionPool(host='127.0.0.1', port=6379, encoding='utf-8', max_connections=1000)\n conn = redis.Redis(connection_pool=pool)\n redis_code = conn.get(mobile_phone)\n if not redis_code:\n raise ValidationError('验证码失效或未发送,请重新发送')\n redis_str_code = redis_code.decode('utf-8')\n if code.strip() != redis_str_code:\n raise ValidationError('验证码错误,请重新输入')\n return code\n<|end_body_1|>\n", "revision_id": "0ed15cbcb903a9aefcf6038c71120c4cb43d63bc", "skeleton": "<|skeleton|>\nclass LoginSMSForm:\n \"\"\"添加手机号字段\"\"\"\n\n def clean_mobile_phone(self):\n \"\"\"校验手机号\"\"\"\n <|body_0|>\n\n def clean_code(self):\n \"\"\"校验验证码\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LoginSMSForm:\n \"\"\"添加手机号字段\"\"\"\n\n def clean_mobile_phone(self):\n \"\"\"校验手机号\"\"\"\n mobile_phone = self.cleaned_data['mobile_phone']\n exists = models.UserInfo.objects.filter(mobile_phone=mobile_phone).exists()\n if not exists:\n raise ValidationError('手机号未注册,请先注册')\n return mobile_phone\n\n def clean_code(self):\n \"\"\"校验验证码\"\"\"\n code = self.cleaned_data['code']\n mobile_phone = self.cleaned_data.get('mobile_phone')\n if not mobile_phone:\n return code\n pool = redis.ConnectionPool(host='127.0.0.1', port=6379, encoding='utf-8', max_connections=1000)\n conn = redis.Redis(connection_pool=pool)\n redis_code = conn.get(mobile_phone)\n if not redis_code:\n raise ValidationError('验证码失效或未发送,请重新发送')\n redis_str_code = redis_code.decode('utf-8')\n if code.strip() != redis_str_code:\n raise ValidationError('验证码错误,请重新输入')\n return code\n", "source": "the_stack_v2_python_sparse", "source_path": "web/forms/account.py", "source_repo": "y297374507/saas", "split": "val", "star_events_count": 0}
{"blob_id": "3e7258a6637a4d264b6cafabe9ffdd3eaef5b084", "bodies": ["table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask, table['UT1_UTC_A'].data, table['UT1_UTC_B'].data)\ntable['UT1_UTC'].unit = table['UT1_UTC_A'].unit\ntable['UT1Flag'] = np.where(table['UT1_UTC_B'].mask, table['UT1Flag_A'].data, 'B')\ntable['PM_x'] = np.where(table['PM_X_B'].mask, table['PM_x_A'].data, table['PM_X_B'].data)\ntable['PM_x'].unit = table['PM_x_A'].unit\ntable['PM_y'] = np.where(table['PM_Y_B'].mask, table['PM_y_A'].data, table['PM_Y_B'].data)\ntable['PM_y'].unit = table['PM_y_A'].unit\ntable['PolPMFlag'] = np.where(table['PM_X_B'].mask, table['PolPMFlag_A'].data, 'B')\nsuper(IERS_A, self).__init__(table.filled())", "if readme is None:\n readme = IERS_A_README\niers_a = Table.read(file, format='cds', readme=readme)\nreturn cls(iers_a[~iers_a['UT1_UTC_A'].mask & ~iers_a['PolPMFlag_A'].mask])", "ut1flag = self['UT1Flag'][i]\nsource = np.ones_like(i) * FROM_IERS_B\nsource[ut1flag == 'I'] = FROM_IERS_A\nsource[ut1flag == 'P'] = FROM_IERS_A_PREDICTION\nreturn source", "pmflag = self['PolPMFlag'][i]\nsource = np.ones_like(i) * FROM_IERS_B\nsource[pmflag == 'I'] = FROM_IERS_A\nsource[pmflag == 'P'] = FROM_IERS_A_PREDICTION\nreturn source"], "bodies_text": "<|body_start_0|>\n table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask, table['UT1_UTC_A'].data, table['UT1_UTC_B'].data)\n table['UT1_UTC'].unit = table['UT1_UTC_A'].unit\n table['UT1Flag'] = np.where(table['UT1_UTC_B'].mask, table['UT1Flag_A'].data, 'B')\n table['PM_x'] = np.where(table['PM_X_B'].mask, table['PM_x_A'].data, table['PM_X_B'].data)\n table['PM_x'].unit = table['PM_x_A'].unit\n table['PM_y'] = np.where(table['PM_Y_B'].mask, table['PM_y_A'].data, table['PM_Y_B'].data)\n table['PM_y'].unit = table['PM_y_A'].unit\n table['PolPMFlag'] = np.where(table['PM_X_B'].mask, table['PolPMFlag_A'].data, 'B')\n super(IERS_A, self).__init__(table.filled())\n<|end_body_0|>\n\n<|body_start_1|>\n if readme is None:\n readme = IERS_A_README\n iers_a = Table.read(file, format='cds', readme=readme)\n return cls(iers_a[~iers_a['UT1_UTC_A'].mask & ~iers_a['PolPMFlag_A'].mask])\n<|end_body_1|>\n\n<|body_start_2|>\n ut1flag = self['UT1Flag'][i]\n source = np.ones_like(i) * FROM_IERS_B\n source[ut1flag == 'I'] = FROM_IERS_A\n source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION\n return source\n<|end_body_2|>\n\n<|body_start_3|>\n pmflag = self['PolPMFlag'][i]\n source = np.ones_like(i) * FROM_IERS_B\n source[pmflag == 'I'] = FROM_IERS_A\n source[pmflag == 'P'] = FROM_IERS_A_PREDICTION\n return source\n<|end_body_3|>\n", "class_docstring": "IERS Table class targeted to IERS A, provided by USNO. These include rapid turnaround and predicted times. See http://maia.usno.navy.mil/ Notes ----- The IERS A file is not part of astropy. It can be downloaded from `iers.IERS_A_URL`. See `iers.__doc__` for instructions on how to use it in `Time`, etc.", "class_name": "IERS_A", "detected_licenses": ["Python-2.0", "Apache-2.0", "BSD-3-Clause", "LicenseRef-scancode-unknown"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IERS_A:\n \"\"\"IERS Table class targeted to IERS A, provided by USNO. These include rapid turnaround and predicted times. See http://maia.usno.navy.mil/ Notes ----- The IERS A file is not part of astropy. It can be downloaded from `iers.IERS_A_URL`. See `iers.__doc__` for instructions on how to use it in `Time`, etc.\"\"\"\n\n def __init__(self, table):\n \"\"\"Initialize an IERS-A table that is already read in. Use read or open class methods to read it from disk. Combines UT1-UTC values, taking UT1_UTC_B if available, else UT1_UTC_A\"\"\"\n <|body_0|>\n\n def read(cls, file=IERS_A_FILE, readme=None):\n \"\"\"Read IERS-A table from a finals2000a.* file provided by USNO. Parameters ---------- file : str full path to ascii file holding IERS-A data (default: `iers.IERS_A_FILE`) readme : str full path to ascii file holding CDS-style readme (default: package version, `iers.IERS_A_README`) Returns ------- `IERS_A` class instance\"\"\"\n <|body_1|>\n\n def ut1_utc_source(self, i):\n \"\"\"Set UT1-UTC source flag for entries in IERS table\"\"\"\n <|body_2|>\n\n def pm_source(self, i):\n \"\"\"Set polar motion source flag for entries in IERS table\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask, table['UT1_UTC_A'].data, table['UT1_UTC_B'].data)\n table['UT1_UTC'].unit = table['UT1_UTC_A'].unit\n table['UT1Flag'] = np.where(table['UT1_UTC_B'].mask, table['UT1Flag_A'].data, 'B')\n table['PM_x'] = np.where(table['PM_X_B'].mask, table['PM_x_A'].data, table['PM_X_B'].data)\n table['PM_x'].unit = table['PM_x_A'].unit\n table['PM_y'] = np.where(table['PM_Y_B'].mask, table['PM_y_A'].data, table['PM_Y_B'].data)\n table['PM_y'].unit = table['PM_y_A'].unit\n table['PolPMFlag'] = np.where(table['PM_X_B'].mask, table['PolPMFlag_A'].data, 'B')\n super(IERS_A, self).__init__(table.filled())\n<|end_body_0|>\n\n<|body_start_1|>\n if readme is None:\n readme = IERS_A_README\n iers_a = Table.read(file, format='cds', readme=readme)\n return cls(iers_a[~iers_a['UT1_UTC_A'].mask & ~iers_a['PolPMFlag_A'].mask])\n<|end_body_1|>\n\n<|body_start_2|>\n ut1flag = self['UT1Flag'][i]\n source = np.ones_like(i) * FROM_IERS_B\n source[ut1flag == 'I'] = FROM_IERS_A\n source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION\n return source\n<|end_body_2|>\n\n<|body_start_3|>\n pmflag = self['PolPMFlag'][i]\n source = np.ones_like(i) * FROM_IERS_B\n source[pmflag == 'I'] = FROM_IERS_A\n source[pmflag == 'P'] = FROM_IERS_A_PREDICTION\n return source\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000431", "length_bytes": 17233, "license_type": "permissive", "methods": [{"docstring": "Initialize an IERS-A table that is already read in. Use read or open class methods to read it from disk. Combines UT1-UTC values, taking UT1_UTC_B if available, else UT1_UTC_A", "name": "__init__", "signature": "def __init__(self, table)"}, {"docstring": "Read IERS-A table from a finals2000a.* file provided by USNO. Parameters ---------- file : str full path to ascii file holding IERS-A data (default: `iers.IERS_A_FILE`) readme : str full path to ascii file holding CDS-style readme (default: package version, `iers.IERS_A_README`) Returns ------- `IERS_A` class instance", "name": "read", "signature": "def read(cls, file=IERS_A_FILE, readme=None)"}, {"docstring": "Set UT1-UTC source flag for entries in IERS table", "name": "ut1_utc_source", "signature": "def ut1_utc_source(self, i)"}, {"docstring": "Set polar motion source flag for entries in IERS table", "name": "pm_source", "signature": "def pm_source(self, i)"}], "n_methods": 4, "prompt": "Implement the Python class `IERS_A` described below.\n\nClass description:\nIERS Table class targeted to IERS A, provided by USNO. These include rapid turnaround and predicted times. See http://maia.usno.navy.mil/ Notes ----- The IERS A file is not part of astropy. It can be downloaded from `iers.IERS_A_URL`. See `iers.__doc__` for instructions on how to use it in `Time`, etc.\n\nMethod signatures and docstrings:\n- def __init__(self, table): Initialize an IERS-A table that is already read in. Use read or open class methods to read it from disk. Combines UT1-UTC values, taking UT1_UTC_B if available, else UT1_UTC_A\n- def read(cls, file=IERS_A_FILE, readme=None): Read IERS-A table from a finals2000a.* file provided by USNO. Parameters ---------- file : str full path to ascii file holding IERS-A data (default: `iers.IERS_A_FILE`) readme : str full path to ascii file holding CDS-style readme (default: package version, `iers.IERS_A_README`) Returns ------- `IERS_A` class instance\n- def ut1_utc_source(self, i): Set UT1-UTC source flag for entries in IERS table\n- def pm_source(self, i): Set polar motion source flag for entries in IERS table", "prompted_full_text": "Implement the Python class `IERS_A` described below.\n\nClass description:\nIERS Table class targeted to IERS A, provided by USNO. These include rapid turnaround and predicted times. See http://maia.usno.navy.mil/ Notes ----- The IERS A file is not part of astropy. It can be downloaded from `iers.IERS_A_URL`. See `iers.__doc__` for instructions on how to use it in `Time`, etc.\n\nMethod signatures and docstrings:\n- def __init__(self, table): Initialize an IERS-A table that is already read in. Use read or open class methods to read it from disk. Combines UT1-UTC values, taking UT1_UTC_B if available, else UT1_UTC_A\n- def read(cls, file=IERS_A_FILE, readme=None): Read IERS-A table from a finals2000a.* file provided by USNO. Parameters ---------- file : str full path to ascii file holding IERS-A data (default: `iers.IERS_A_FILE`) readme : str full path to ascii file holding CDS-style readme (default: package version, `iers.IERS_A_README`) Returns ------- `IERS_A` class instance\n- def ut1_utc_source(self, i): Set UT1-UTC source flag for entries in IERS table\n- def pm_source(self, i): Set polar motion source flag for entries in IERS table\n\n<|skeleton|>\nclass IERS_A:\n \"\"\"IERS Table class targeted to IERS A, provided by USNO. These include rapid turnaround and predicted times. See http://maia.usno.navy.mil/ Notes ----- The IERS A file is not part of astropy. It can be downloaded from `iers.IERS_A_URL`. See `iers.__doc__` for instructions on how to use it in `Time`, etc.\"\"\"\n\n def __init__(self, table):\n \"\"\"Initialize an IERS-A table that is already read in. Use read or open class methods to read it from disk. Combines UT1-UTC values, taking UT1_UTC_B if available, else UT1_UTC_A\"\"\"\n <|body_0|>\n\n def read(cls, file=IERS_A_FILE, readme=None):\n \"\"\"Read IERS-A table from a finals2000a.* file provided by USNO. Parameters ---------- file : str full path to ascii file holding IERS-A data (default: `iers.IERS_A_FILE`) readme : str full path to ascii file holding CDS-style readme (default: package version, `iers.IERS_A_README`) Returns ------- `IERS_A` class instance\"\"\"\n <|body_1|>\n\n def ut1_utc_source(self, i):\n \"\"\"Set UT1-UTC source flag for entries in IERS table\"\"\"\n <|body_2|>\n\n def pm_source(self, i):\n \"\"\"Set polar motion source flag for entries in IERS table\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask, table['UT1_UTC_A'].data, table['UT1_UTC_B'].data)\n table['UT1_UTC'].unit = table['UT1_UTC_A'].unit\n table['UT1Flag'] = np.where(table['UT1_UTC_B'].mask, table['UT1Flag_A'].data, 'B')\n table['PM_x'] = np.where(table['PM_X_B'].mask, table['PM_x_A'].data, table['PM_X_B'].data)\n table['PM_x'].unit = table['PM_x_A'].unit\n table['PM_y'] = np.where(table['PM_Y_B'].mask, table['PM_y_A'].data, table['PM_Y_B'].data)\n table['PM_y'].unit = table['PM_y_A'].unit\n table['PolPMFlag'] = np.where(table['PM_X_B'].mask, table['PolPMFlag_A'].data, 'B')\n super(IERS_A, self).__init__(table.filled())\n<|end_body_0|>\n\n<|body_start_1|>\n if readme is None:\n readme = IERS_A_README\n iers_a = Table.read(file, format='cds', readme=readme)\n return cls(iers_a[~iers_a['UT1_UTC_A'].mask & ~iers_a['PolPMFlag_A'].mask])\n<|end_body_1|>\n\n<|body_start_2|>\n ut1flag = self['UT1Flag'][i]\n source = np.ones_like(i) * FROM_IERS_B\n source[ut1flag == 'I'] = FROM_IERS_A\n source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION\n return source\n<|end_body_2|>\n\n<|body_start_3|>\n pmflag = self['PolPMFlag'][i]\n source = np.ones_like(i) * FROM_IERS_B\n source[pmflag == 'I'] = FROM_IERS_A\n source[pmflag == 'P'] = FROM_IERS_A_PREDICTION\n return source\n<|end_body_3|>\n", "revision_id": "2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6", "skeleton": "<|skeleton|>\nclass IERS_A:\n \"\"\"IERS Table class targeted to IERS A, provided by USNO. These include rapid turnaround and predicted times. See http://maia.usno.navy.mil/ Notes ----- The IERS A file is not part of astropy. It can be downloaded from `iers.IERS_A_URL`. See `iers.__doc__` for instructions on how to use it in `Time`, etc.\"\"\"\n\n def __init__(self, table):\n \"\"\"Initialize an IERS-A table that is already read in. Use read or open class methods to read it from disk. Combines UT1-UTC values, taking UT1_UTC_B if available, else UT1_UTC_A\"\"\"\n <|body_0|>\n\n def read(cls, file=IERS_A_FILE, readme=None):\n \"\"\"Read IERS-A table from a finals2000a.* file provided by USNO. Parameters ---------- file : str full path to ascii file holding IERS-A data (default: `iers.IERS_A_FILE`) readme : str full path to ascii file holding CDS-style readme (default: package version, `iers.IERS_A_README`) Returns ------- `IERS_A` class instance\"\"\"\n <|body_1|>\n\n def ut1_utc_source(self, i):\n \"\"\"Set UT1-UTC source flag for entries in IERS table\"\"\"\n <|body_2|>\n\n def pm_source(self, i):\n \"\"\"Set polar motion source flag for entries in IERS table\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class IERS_A:\n \"\"\"IERS Table class targeted to IERS A, provided by USNO. These include rapid turnaround and predicted times. See http://maia.usno.navy.mil/ Notes ----- The IERS A file is not part of astropy. It can be downloaded from `iers.IERS_A_URL`. See `iers.__doc__` for instructions on how to use it in `Time`, etc.\"\"\"\n\n def __init__(self, table):\n \"\"\"Initialize an IERS-A table that is already read in. Use read or open class methods to read it from disk. Combines UT1-UTC values, taking UT1_UTC_B if available, else UT1_UTC_A\"\"\"\n table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask, table['UT1_UTC_A'].data, table['UT1_UTC_B'].data)\n table['UT1_UTC'].unit = table['UT1_UTC_A'].unit\n table['UT1Flag'] = np.where(table['UT1_UTC_B'].mask, table['UT1Flag_A'].data, 'B')\n table['PM_x'] = np.where(table['PM_X_B'].mask, table['PM_x_A'].data, table['PM_X_B'].data)\n table['PM_x'].unit = table['PM_x_A'].unit\n table['PM_y'] = np.where(table['PM_Y_B'].mask, table['PM_y_A'].data, table['PM_Y_B'].data)\n table['PM_y'].unit = table['PM_y_A'].unit\n table['PolPMFlag'] = np.where(table['PM_X_B'].mask, table['PolPMFlag_A'].data, 'B')\n super(IERS_A, self).__init__(table.filled())\n\n def read(cls, file=IERS_A_FILE, readme=None):\n \"\"\"Read IERS-A table from a finals2000a.* file provided by USNO. Parameters ---------- file : str full path to ascii file holding IERS-A data (default: `iers.IERS_A_FILE`) readme : str full path to ascii file holding CDS-style readme (default: package version, `iers.IERS_A_README`) Returns ------- `IERS_A` class instance\"\"\"\n if readme is None:\n readme = IERS_A_README\n iers_a = Table.read(file, format='cds', readme=readme)\n return cls(iers_a[~iers_a['UT1_UTC_A'].mask & ~iers_a['PolPMFlag_A'].mask])\n\n def ut1_utc_source(self, i):\n \"\"\"Set UT1-UTC source flag for entries in IERS table\"\"\"\n ut1flag = self['UT1Flag'][i]\n source = np.ones_like(i) * FROM_IERS_B\n source[ut1flag == 'I'] = FROM_IERS_A\n source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION\n return source\n\n def pm_source(self, i):\n \"\"\"Set polar motion source flag for entries in IERS table\"\"\"\n pmflag = self['PolPMFlag'][i]\n source = np.ones_like(i) * FROM_IERS_B\n source[pmflag == 'I'] = FROM_IERS_A\n source[pmflag == 'P'] = FROM_IERS_A_PREDICTION\n return source\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/python2.7/site-packages/astropy/utils/iers/iers.py", "source_repo": "wangyum/Anaconda", "split": "val", "star_events_count": 11}
{"blob_id": "942141b19900eda88e50fa78cdad2de0a173b99c", "bodies": ["self.client = xmlrpc.client.ServerProxy(server_url)\nself.user = user\nself.destination = destination\nself._running = False\nself._message_reader = threading.Thread(target=self._read_messages)\nself.lock = threading.Lock()\nself.logger = QChatLogger('QChatCLIRPCClient-{}'.format(user))", "print('Hello, this is {}'.format(self.user))\nself._running = True\nself._message_reader.start()\nwhile self._running:\n input_text = input('\\n[ {} ]: '.format(self.user))\n the_message = '{} @ {}'.format(input_text, time.time())\n with self.lock:\n self.client.send_message(self.user, self.destination, the_message)", "self.logger.info('Stopping the CLI RPC Client')\nself._running = False\nself._message_reader.join()", "while self._running:\n try:\n with self.lock:\n user_messages = self.client.get_messages(self.user)\n if user_messages:\n print('\\n')\n for sender, messages in user_messages.items():\n for message in messages:\n print('[ {} ]: {}\\n'.format(sender, message))\n print('[ {} ]: '.format(self.user), end='')\n except Exception:\n self.logger.exception('Failed getting messages for {}'.format(self.user))\n time.sleep(2)\n time.sleep(1)"], "bodies_text": "<|body_start_0|>\n self.client = xmlrpc.client.ServerProxy(server_url)\n self.user = user\n self.destination = destination\n self._running = False\n self._message_reader = threading.Thread(target=self._read_messages)\n self.lock = threading.Lock()\n self.logger = QChatLogger('QChatCLIRPCClient-{}'.format(user))\n<|end_body_0|>\n\n<|body_start_1|>\n print('Hello, this is {}'.format(self.user))\n self._running = True\n self._message_reader.start()\n while self._running:\n input_text = input('\\n[ {} ]: '.format(self.user))\n the_message = '{} @ {}'.format(input_text, time.time())\n with self.lock:\n self.client.send_message(self.user, self.destination, the_message)\n<|end_body_1|>\n\n<|body_start_2|>\n self.logger.info('Stopping the CLI RPC Client')\n self._running = False\n self._message_reader.join()\n<|end_body_2|>\n\n<|body_start_3|>\n while self._running:\n try:\n with self.lock:\n user_messages = self.client.get_messages(self.user)\n if user_messages:\n print('\\n')\n for sender, messages in user_messages.items():\n for message in messages:\n print('[ {} ]: {}\\n'.format(sender, message))\n print('[ {} ]: '.format(self.user), end='')\n except Exception:\n self.logger.exception('Failed getting messages for {}'.format(self.user))\n time.sleep(2)\n time.sleep(1)\n<|end_body_3|>\n", "class_docstring": "Simple RPC client that sends messages to an RPCServer", "class_name": "QChatCLIRPCClient", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QChatCLIRPCClient:\n \"\"\"Simple RPC client that sends messages to an RPCServer\"\"\"\n\n def __init__(self, user, destination, server_url):\n \"\"\"Initializes the RPC client :param user: str The user we wish to send a message as :param destination: str The peer we wish to send a message to :param server_url: str The RPCServer url to connect to, eg. http://127.0.0.1:6666\"\"\"\n <|body_0|>\n\n def start(self):\n \"\"\"Starts the RPC client and sends messages based on user input :return: None\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stops the RPC Client :return: None\"\"\"\n <|body_2|>\n\n def _read_messages(self):\n \"\"\"Polls the RPCServer for messages that belong to the user running the RPC client :return: None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.client = xmlrpc.client.ServerProxy(server_url)\n self.user = user\n self.destination = destination\n self._running = False\n self._message_reader = threading.Thread(target=self._read_messages)\n self.lock = threading.Lock()\n self.logger = QChatLogger('QChatCLIRPCClient-{}'.format(user))\n<|end_body_0|>\n\n<|body_start_1|>\n print('Hello, this is {}'.format(self.user))\n self._running = True\n self._message_reader.start()\n while self._running:\n input_text = input('\\n[ {} ]: '.format(self.user))\n the_message = '{} @ {}'.format(input_text, time.time())\n with self.lock:\n self.client.send_message(self.user, self.destination, the_message)\n<|end_body_1|>\n\n<|body_start_2|>\n self.logger.info('Stopping the CLI RPC Client')\n self._running = False\n self._message_reader.join()\n<|end_body_2|>\n\n<|body_start_3|>\n while self._running:\n try:\n with self.lock:\n user_messages = self.client.get_messages(self.user)\n if user_messages:\n print('\\n')\n for sender, messages in user_messages.items():\n for message in messages:\n print('[ {} ]: {}\\n'.format(sender, message))\n print('[ {} ]: '.format(self.user), end='')\n except Exception:\n self.logger.exception('Failed getting messages for {}'.format(self.user))\n time.sleep(2)\n time.sleep(1)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000432", "length_bytes": 5230, "license_type": "permissive", "methods": [{"docstring": "Initializes the RPC client :param user: str The user we wish to send a message as :param destination: str The peer we wish to send a message to :param server_url: str The RPCServer url to connect to, eg. http://127.0.0.1:6666", "name": "__init__", "signature": "def __init__(self, user, destination, server_url)"}, {"docstring": "Starts the RPC client and sends messages based on user input :return: None", "name": "start", "signature": "def start(self)"}, {"docstring": "Stops the RPC Client :return: None", "name": "stop", "signature": "def stop(self)"}, {"docstring": "Polls the RPCServer for messages that belong to the user running the RPC client :return: None", "name": "_read_messages", "signature": "def _read_messages(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_021632", "prompt": "Implement the Python class `QChatCLIRPCClient` described below.\n\nClass description:\nSimple RPC client that sends messages to an RPCServer\n\nMethod signatures and docstrings:\n- def __init__(self, user, destination, server_url): Initializes the RPC client :param user: str The user we wish to send a message as :param destination: str The peer we wish to send a message to :param server_url: str The RPCServer url to connect to, eg. http://127.0.0.1:6666\n- def start(self): Starts the RPC client and sends messages based on user input :return: None\n- def stop(self): Stops the RPC Client :return: None\n- def _read_messages(self): Polls the RPCServer for messages that belong to the user running the RPC client :return: None", "prompted_full_text": "Implement the Python class `QChatCLIRPCClient` described below.\n\nClass description:\nSimple RPC client that sends messages to an RPCServer\n\nMethod signatures and docstrings:\n- def __init__(self, user, destination, server_url): Initializes the RPC client :param user: str The user we wish to send a message as :param destination: str The peer we wish to send a message to :param server_url: str The RPCServer url to connect to, eg. http://127.0.0.1:6666\n- def start(self): Starts the RPC client and sends messages based on user input :return: None\n- def stop(self): Stops the RPC Client :return: None\n- def _read_messages(self): Polls the RPCServer for messages that belong to the user running the RPC client :return: None\n\n<|skeleton|>\nclass QChatCLIRPCClient:\n \"\"\"Simple RPC client that sends messages to an RPCServer\"\"\"\n\n def __init__(self, user, destination, server_url):\n \"\"\"Initializes the RPC client :param user: str The user we wish to send a message as :param destination: str The peer we wish to send a message to :param server_url: str The RPCServer url to connect to, eg. http://127.0.0.1:6666\"\"\"\n <|body_0|>\n\n def start(self):\n \"\"\"Starts the RPC client and sends messages based on user input :return: None\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stops the RPC Client :return: None\"\"\"\n <|body_2|>\n\n def _read_messages(self):\n \"\"\"Polls the RPCServer for messages that belong to the user running the RPC client :return: None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.client = xmlrpc.client.ServerProxy(server_url)\n self.user = user\n self.destination = destination\n self._running = False\n self._message_reader = threading.Thread(target=self._read_messages)\n self.lock = threading.Lock()\n self.logger = QChatLogger('QChatCLIRPCClient-{}'.format(user))\n<|end_body_0|>\n\n<|body_start_1|>\n print('Hello, this is {}'.format(self.user))\n self._running = True\n self._message_reader.start()\n while self._running:\n input_text = input('\\n[ {} ]: '.format(self.user))\n the_message = '{} @ {}'.format(input_text, time.time())\n with self.lock:\n self.client.send_message(self.user, self.destination, the_message)\n<|end_body_1|>\n\n<|body_start_2|>\n self.logger.info('Stopping the CLI RPC Client')\n self._running = False\n self._message_reader.join()\n<|end_body_2|>\n\n<|body_start_3|>\n while self._running:\n try:\n with self.lock:\n user_messages = self.client.get_messages(self.user)\n if user_messages:\n print('\\n')\n for sender, messages in user_messages.items():\n for message in messages:\n print('[ {} ]: {}\\n'.format(sender, message))\n print('[ {} ]: '.format(self.user), end='')\n except Exception:\n self.logger.exception('Failed getting messages for {}'.format(self.user))\n time.sleep(2)\n time.sleep(1)\n<|end_body_3|>\n", "revision_id": "a393d530b9d289ba2a75682cd1d4a07d40776785", "skeleton": "<|skeleton|>\nclass QChatCLIRPCClient:\n \"\"\"Simple RPC client that sends messages to an RPCServer\"\"\"\n\n def __init__(self, user, destination, server_url):\n \"\"\"Initializes the RPC client :param user: str The user we wish to send a message as :param destination: str The peer we wish to send a message to :param server_url: str The RPCServer url to connect to, eg. http://127.0.0.1:6666\"\"\"\n <|body_0|>\n\n def start(self):\n \"\"\"Starts the RPC client and sends messages based on user input :return: None\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stops the RPC Client :return: None\"\"\"\n <|body_2|>\n\n def _read_messages(self):\n \"\"\"Polls the RPCServer for messages that belong to the user running the RPC client :return: None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QChatCLIRPCClient:\n \"\"\"Simple RPC client that sends messages to an RPCServer\"\"\"\n\n def __init__(self, user, destination, server_url):\n \"\"\"Initializes the RPC client :param user: str The user we wish to send a message as :param destination: str The peer we wish to send a message to :param server_url: str The RPCServer url to connect to, eg. http://127.0.0.1:6666\"\"\"\n self.client = xmlrpc.client.ServerProxy(server_url)\n self.user = user\n self.destination = destination\n self._running = False\n self._message_reader = threading.Thread(target=self._read_messages)\n self.lock = threading.Lock()\n self.logger = QChatLogger('QChatCLIRPCClient-{}'.format(user))\n\n def start(self):\n \"\"\"Starts the RPC client and sends messages based on user input :return: None\"\"\"\n print('Hello, this is {}'.format(self.user))\n self._running = True\n self._message_reader.start()\n while self._running:\n input_text = input('\\n[ {} ]: '.format(self.user))\n the_message = '{} @ {}'.format(input_text, time.time())\n with self.lock:\n self.client.send_message(self.user, self.destination, the_message)\n\n def stop(self):\n \"\"\"Stops the RPC Client :return: None\"\"\"\n self.logger.info('Stopping the CLI RPC Client')\n self._running = False\n self._message_reader.join()\n\n def _read_messages(self):\n \"\"\"Polls the RPCServer for messages that belong to the user running the RPC client :return: None\"\"\"\n while self._running:\n try:\n with self.lock:\n user_messages = self.client.get_messages(self.user)\n if user_messages:\n print('\\n')\n for sender, messages in user_messages.items():\n for message in messages:\n print('[ {} ]: {}\\n'.format(sender, message))\n print('[ {} ]: '.format(self.user), end='')\n except Exception:\n self.logger.exception('Failed getting messages for {}'.format(self.user))\n time.sleep(2)\n time.sleep(1)\n", "source": "the_stack_v2_python_sparse", "source_path": "qchat/rpc.py", "source_repo": "mdskrzypczyk/QChat", "split": "val", "star_events_count": 4}
{"blob_id": "09ceeff88db61da4ecf6a84878bedc5302bdf39a", "bodies": ["if self.request.version == 'v6':\n return IngestStatusSerializerV6\nelif self.request.version == 'v7':\n return IngestStatusSerializerV6", "if request.version == 'v6' or request.version == 'v7':\n return self.list_impl(request)\nraise Http404()", "started = rest_util.parse_timestamp(request, 'started', required=False)\nended = rest_util.parse_timestamp(request, 'ended', required=False)\nrest_util.check_time_range(started, ended, max_duration=datetime.timedelta(days=31))\nuse_ingest_time = rest_util.parse_bool(request, 'use_ingest_time', default_value=False)\ningests = Ingest.objects.get_status(started, ended, use_ingest_time)\npage = self.paginate_queryset(ingests)\nserializer = self.get_serializer(page, many=True)\nreturn self.get_paginated_response(serializer.data)"], "bodies_text": "<|body_start_0|>\n if self.request.version == 'v6':\n return IngestStatusSerializerV6\n elif self.request.version == 'v7':\n return IngestStatusSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6' or request.version == 'v7':\n return self.list_impl(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended, max_duration=datetime.timedelta(days=31))\n use_ingest_time = rest_util.parse_bool(request, 'use_ingest_time', default_value=False)\n ingests = Ingest.objects.get_status(started, ended, use_ingest_time)\n page = self.paginate_queryset(ingests)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "class_docstring": "This view is the endpoint for retrieving summarized ingest status.", "class_name": "IngestsStatusView", "detected_licenses": ["LicenseRef-scancode-free-unknown", "Apache-2.0", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IngestsStatusView:\n \"\"\"This view is the endpoint for retrieving summarized ingest status.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def list_impl(self, request):\n \"\"\"Retrieves the ingest status information and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return IngestStatusSerializerV6\n elif self.request.version == 'v7':\n return IngestStatusSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6' or request.version == 'v7':\n return self.list_impl(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended, max_duration=datetime.timedelta(days=31))\n use_ingest_time = rest_util.parse_bool(request, 'use_ingest_time', default_value=False)\n ingests = Ingest.objects.get_status(started, ended, use_ingest_time)\n page = self.paginate_queryset(ingests)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000433", "length_bytes": 30689, "license_type": "permissive", "methods": [{"docstring": "Returns the appropriate serializer based off the requests version of the REST API", "name": "get_serializer_class", "signature": "def get_serializer_class(self)"}, {"docstring": "Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "list", "signature": "def list(self, request)"}, {"docstring": "Retrieves the ingest status information and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "list_impl", "signature": "def list_impl(self, request)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_052994", "prompt": "Implement the Python class `IngestsStatusView` described below.\n\nClass description:\nThis view is the endpoint for retrieving summarized ingest status.\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API\n- def list(self, request): Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def list_impl(self, request): Retrieves the ingest status information and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "prompted_full_text": "Implement the Python class `IngestsStatusView` described below.\n\nClass description:\nThis view is the endpoint for retrieving summarized ingest status.\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API\n- def list(self, request): Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def list_impl(self, request): Retrieves the ingest status information and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n\n<|skeleton|>\nclass IngestsStatusView:\n \"\"\"This view is the endpoint for retrieving summarized ingest status.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def list_impl(self, request):\n \"\"\"Retrieves the ingest status information and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return IngestStatusSerializerV6\n elif self.request.version == 'v7':\n return IngestStatusSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6' or request.version == 'v7':\n return self.list_impl(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended, max_duration=datetime.timedelta(days=31))\n use_ingest_time = rest_util.parse_bool(request, 'use_ingest_time', default_value=False)\n ingests = Ingest.objects.get_status(started, ended, use_ingest_time)\n page = self.paginate_queryset(ingests)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "revision_id": "28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b", "skeleton": "<|skeleton|>\nclass IngestsStatusView:\n \"\"\"This view is the endpoint for retrieving summarized ingest status.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def list_impl(self, request):\n \"\"\"Retrieves the ingest status information and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class IngestsStatusView:\n \"\"\"This view is the endpoint for retrieving summarized ingest status.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n if self.request.version == 'v6':\n return IngestStatusSerializerV6\n elif self.request.version == 'v7':\n return IngestStatusSerializerV6\n\n def list(self, request):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n if request.version == 'v6' or request.version == 'v7':\n return self.list_impl(request)\n raise Http404()\n\n def list_impl(self, request):\n \"\"\"Retrieves the ingest status information and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended, max_duration=datetime.timedelta(days=31))\n use_ingest_time = rest_util.parse_bool(request, 'use_ingest_time', default_value=False)\n ingests = Ingest.objects.get_status(started, ended, use_ingest_time)\n page = self.paginate_queryset(ingests)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "scale/ingest/views.py", "source_repo": "kfconsultant/scale", "split": "val", "star_events_count": 0}
{"blob_id": "bf352035591a769e7cfa4416d62b8036336da5c9", "bodies": ["Module.__init__(self, **kwargs)\nself._sources = [sources] if isinstance(sources, str) else sources\nself._publisher = self.add_child_object(publisher, Publisher)\nself._max_ellipticity = max_ellipticity\nself._correct_for_airmass = correct_for_airmass", "await Module.open(self)\nlog.info('Subscribing to new image events...')\nawait self.comm.register_event(NewImageEvent, self.process_new_image_event)", "if not isinstance(event, NewImageEvent):\n return False\nif self._sources is not None and sender not in self._sources:\n return False\nlog.info('Received new image event from %s.', sender)\ntry:\n log.info('Downloading file %s...', event.filename)\n image = await self.vfs.read_image(event.filename)\nexcept FileNotFoundError:\n log.error('Could not download image.')\n return False\ncat = image.catalog\nif cat is None:\n return False\ncat = cat[cat['ellipticity'] < self._max_ellipticity]\nwcs = WCS(image.header)\npix_size = abs(proj_plane_pixel_scales(wcs)[0] * 3600.0)\nseeing = np.mean(cat['fwhm']) * pix_size\nif self._correct_for_airmass:\n if 'AIRMASS' in image.header:\n seeing /= image.header['AIRMASS'] ** 0.6\n else:\n return False\nif self._publisher is not None:\n await self._publisher(time=Time.now().isot, seeing=seeing)\nreturn True"], "bodies_text": "<|body_start_0|>\n Module.__init__(self, **kwargs)\n self._sources = [sources] if isinstance(sources, str) else sources\n self._publisher = self.add_child_object(publisher, Publisher)\n self._max_ellipticity = max_ellipticity\n self._correct_for_airmass = correct_for_airmass\n<|end_body_0|>\n\n<|body_start_1|>\n await Module.open(self)\n log.info('Subscribing to new image events...')\n await self.comm.register_event(NewImageEvent, self.process_new_image_event)\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(event, NewImageEvent):\n return False\n if self._sources is not None and sender not in self._sources:\n return False\n log.info('Received new image event from %s.', sender)\n try:\n log.info('Downloading file %s...', event.filename)\n image = await self.vfs.read_image(event.filename)\n except FileNotFoundError:\n log.error('Could not download image.')\n return False\n cat = image.catalog\n if cat is None:\n return False\n cat = cat[cat['ellipticity'] < self._max_ellipticity]\n wcs = WCS(image.header)\n pix_size = abs(proj_plane_pixel_scales(wcs)[0] * 3600.0)\n seeing = np.mean(cat['fwhm']) * pix_size\n if self._correct_for_airmass:\n if 'AIRMASS' in image.header:\n seeing /= image.header['AIRMASS'] ** 0.6\n else:\n return False\n if self._publisher is not None:\n await self._publisher(time=Time.now().isot, seeing=seeing)\n return True\n<|end_body_2|>\n", "class_docstring": "Measures seeing on reduced images with a catalog.", "class_name": "Seeing", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Seeing:\n \"\"\"Measures seeing on reduced images with a catalog.\"\"\"\n\n def __init__(self, sources: Optional[Union[str, List[str]]]=None, publisher: Optional[Union[Publisher, Dict[str, Any]]]=None, max_ellipticity: float=0.2, correct_for_airmass: bool=True, **kwargs: Any):\n \"\"\"Creates a new seeing estimator. Args: sources: List of sources (e.g. cameras) to process images from or None for all. publisher: Publisher to publish results to. max_ellipticity: Maximum ellipticity for sources to consider. correct_for_zenith: Whether to correct seeing for airmass.\"\"\"\n <|body_0|>\n\n async def open(self) -> None:\n \"\"\"Open module.\"\"\"\n <|body_1|>\n\n async def process_new_image_event(self, event: Event, sender: str) -> bool:\n \"\"\"Puts a new images in the DB with the given ID. Args: event: New image event sender: Who sent the event? Returns: Success\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Module.__init__(self, **kwargs)\n self._sources = [sources] if isinstance(sources, str) else sources\n self._publisher = self.add_child_object(publisher, Publisher)\n self._max_ellipticity = max_ellipticity\n self._correct_for_airmass = correct_for_airmass\n<|end_body_0|>\n\n<|body_start_1|>\n await Module.open(self)\n log.info('Subscribing to new image events...')\n await self.comm.register_event(NewImageEvent, self.process_new_image_event)\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(event, NewImageEvent):\n return False\n if self._sources is not None and sender not in self._sources:\n return False\n log.info('Received new image event from %s.', sender)\n try:\n log.info('Downloading file %s...', event.filename)\n image = await self.vfs.read_image(event.filename)\n except FileNotFoundError:\n log.error('Could not download image.')\n return False\n cat = image.catalog\n if cat is None:\n return False\n cat = cat[cat['ellipticity'] < self._max_ellipticity]\n wcs = WCS(image.header)\n pix_size = abs(proj_plane_pixel_scales(wcs)[0] * 3600.0)\n seeing = np.mean(cat['fwhm']) * pix_size\n if self._correct_for_airmass:\n if 'AIRMASS' in image.header:\n seeing /= image.header['AIRMASS'] ** 0.6\n else:\n return False\n if self._publisher is not None:\n await self._publisher(time=Time.now().isot, seeing=seeing)\n return True\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000434", "length_bytes": 3630, "license_type": "permissive", "methods": [{"docstring": "Creates a new seeing estimator. Args: sources: List of sources (e.g. cameras) to process images from or None for all. publisher: Publisher to publish results to. max_ellipticity: Maximum ellipticity for sources to consider. correct_for_zenith: Whether to correct seeing for airmass.", "name": "__init__", "signature": "def __init__(self, sources: Optional[Union[str, List[str]]]=None, publisher: Optional[Union[Publisher, Dict[str, Any]]]=None, max_ellipticity: float=0.2, correct_for_airmass: bool=True, **kwargs: Any)"}, {"docstring": "Open module.", "name": "open", "signature": "async def open(self) -> None"}, {"docstring": "Puts a new images in the DB with the given ID. Args: event: New image event sender: Who sent the event? Returns: Success", "name": "process_new_image_event", "signature": "async def process_new_image_event(self, event: Event, sender: str) -> bool"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_044057", "prompt": "Implement the Python class `Seeing` described below.\n\nClass description:\nMeasures seeing on reduced images with a catalog.\n\nMethod signatures and docstrings:\n- def __init__(self, sources: Optional[Union[str, List[str]]]=None, publisher: Optional[Union[Publisher, Dict[str, Any]]]=None, max_ellipticity: float=0.2, correct_for_airmass: bool=True, **kwargs: Any): Creates a new seeing estimator. Args: sources: List of sources (e.g. cameras) to process images from or None for all. publisher: Publisher to publish results to. max_ellipticity: Maximum ellipticity for sources to consider. correct_for_zenith: Whether to correct seeing for airmass.\n- async def open(self) -> None: Open module.\n- async def process_new_image_event(self, event: Event, sender: str) -> bool: Puts a new images in the DB with the given ID. Args: event: New image event sender: Who sent the event? Returns: Success", "prompted_full_text": "Implement the Python class `Seeing` described below.\n\nClass description:\nMeasures seeing on reduced images with a catalog.\n\nMethod signatures and docstrings:\n- def __init__(self, sources: Optional[Union[str, List[str]]]=None, publisher: Optional[Union[Publisher, Dict[str, Any]]]=None, max_ellipticity: float=0.2, correct_for_airmass: bool=True, **kwargs: Any): Creates a new seeing estimator. Args: sources: List of sources (e.g. cameras) to process images from or None for all. publisher: Publisher to publish results to. max_ellipticity: Maximum ellipticity for sources to consider. correct_for_zenith: Whether to correct seeing for airmass.\n- async def open(self) -> None: Open module.\n- async def process_new_image_event(self, event: Event, sender: str) -> bool: Puts a new images in the DB with the given ID. Args: event: New image event sender: Who sent the event? Returns: Success\n\n<|skeleton|>\nclass Seeing:\n \"\"\"Measures seeing on reduced images with a catalog.\"\"\"\n\n def __init__(self, sources: Optional[Union[str, List[str]]]=None, publisher: Optional[Union[Publisher, Dict[str, Any]]]=None, max_ellipticity: float=0.2, correct_for_airmass: bool=True, **kwargs: Any):\n \"\"\"Creates a new seeing estimator. Args: sources: List of sources (e.g. cameras) to process images from or None for all. publisher: Publisher to publish results to. max_ellipticity: Maximum ellipticity for sources to consider. correct_for_zenith: Whether to correct seeing for airmass.\"\"\"\n <|body_0|>\n\n async def open(self) -> None:\n \"\"\"Open module.\"\"\"\n <|body_1|>\n\n async def process_new_image_event(self, event: Event, sender: str) -> bool:\n \"\"\"Puts a new images in the DB with the given ID. Args: event: New image event sender: Who sent the event? Returns: Success\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Module.__init__(self, **kwargs)\n self._sources = [sources] if isinstance(sources, str) else sources\n self._publisher = self.add_child_object(publisher, Publisher)\n self._max_ellipticity = max_ellipticity\n self._correct_for_airmass = correct_for_airmass\n<|end_body_0|>\n\n<|body_start_1|>\n await Module.open(self)\n log.info('Subscribing to new image events...')\n await self.comm.register_event(NewImageEvent, self.process_new_image_event)\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(event, NewImageEvent):\n return False\n if self._sources is not None and sender not in self._sources:\n return False\n log.info('Received new image event from %s.', sender)\n try:\n log.info('Downloading file %s...', event.filename)\n image = await self.vfs.read_image(event.filename)\n except FileNotFoundError:\n log.error('Could not download image.')\n return False\n cat = image.catalog\n if cat is None:\n return False\n cat = cat[cat['ellipticity'] < self._max_ellipticity]\n wcs = WCS(image.header)\n pix_size = abs(proj_plane_pixel_scales(wcs)[0] * 3600.0)\n seeing = np.mean(cat['fwhm']) * pix_size\n if self._correct_for_airmass:\n if 'AIRMASS' in image.header:\n seeing /= image.header['AIRMASS'] ** 0.6\n else:\n return False\n if self._publisher is not None:\n await self._publisher(time=Time.now().isot, seeing=seeing)\n return True\n<|end_body_2|>\n", "revision_id": "2d7a06e5485b61b6ca7e51d99b08651ea6021086", "skeleton": "<|skeleton|>\nclass Seeing:\n \"\"\"Measures seeing on reduced images with a catalog.\"\"\"\n\n def __init__(self, sources: Optional[Union[str, List[str]]]=None, publisher: Optional[Union[Publisher, Dict[str, Any]]]=None, max_ellipticity: float=0.2, correct_for_airmass: bool=True, **kwargs: Any):\n \"\"\"Creates a new seeing estimator. Args: sources: List of sources (e.g. cameras) to process images from or None for all. publisher: Publisher to publish results to. max_ellipticity: Maximum ellipticity for sources to consider. correct_for_zenith: Whether to correct seeing for airmass.\"\"\"\n <|body_0|>\n\n async def open(self) -> None:\n \"\"\"Open module.\"\"\"\n <|body_1|>\n\n async def process_new_image_event(self, event: Event, sender: str) -> bool:\n \"\"\"Puts a new images in the DB with the given ID. Args: event: New image event sender: Who sent the event? Returns: Success\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Seeing:\n \"\"\"Measures seeing on reduced images with a catalog.\"\"\"\n\n def __init__(self, sources: Optional[Union[str, List[str]]]=None, publisher: Optional[Union[Publisher, Dict[str, Any]]]=None, max_ellipticity: float=0.2, correct_for_airmass: bool=True, **kwargs: Any):\n \"\"\"Creates a new seeing estimator. Args: sources: List of sources (e.g. cameras) to process images from or None for all. publisher: Publisher to publish results to. max_ellipticity: Maximum ellipticity for sources to consider. correct_for_zenith: Whether to correct seeing for airmass.\"\"\"\n Module.__init__(self, **kwargs)\n self._sources = [sources] if isinstance(sources, str) else sources\n self._publisher = self.add_child_object(publisher, Publisher)\n self._max_ellipticity = max_ellipticity\n self._correct_for_airmass = correct_for_airmass\n\n async def open(self) -> None:\n \"\"\"Open module.\"\"\"\n await Module.open(self)\n log.info('Subscribing to new image events...')\n await self.comm.register_event(NewImageEvent, self.process_new_image_event)\n\n async def process_new_image_event(self, event: Event, sender: str) -> bool:\n \"\"\"Puts a new images in the DB with the given ID. Args: event: New image event sender: Who sent the event? Returns: Success\"\"\"\n if not isinstance(event, NewImageEvent):\n return False\n if self._sources is not None and sender not in self._sources:\n return False\n log.info('Received new image event from %s.', sender)\n try:\n log.info('Downloading file %s...', event.filename)\n image = await self.vfs.read_image(event.filename)\n except FileNotFoundError:\n log.error('Could not download image.')\n return False\n cat = image.catalog\n if cat is None:\n return False\n cat = cat[cat['ellipticity'] < self._max_ellipticity]\n wcs = WCS(image.header)\n pix_size = abs(proj_plane_pixel_scales(wcs)[0] * 3600.0)\n seeing = np.mean(cat['fwhm']) * pix_size\n if self._correct_for_airmass:\n if 'AIRMASS' in image.header:\n seeing /= image.header['AIRMASS'] ** 0.6\n else:\n return False\n if self._publisher is not None:\n await self._publisher(time=Time.now().isot, seeing=seeing)\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "pyobs/modules/image/seeing.py", "source_repo": "pyobs/pyobs-core", "split": "val", "star_events_count": 9}
{"blob_id": "427256978898c22ec7f70553bdf49741ace4e542", "bodies": ["self.uid = uid.encode()\nself.cb_obj = callback_obj\nself.cb_obj_gossip = callback_obj_gossip\nself.port = port\nself.chunks_size = chunks_size\nself.loop = asyncio.get_event_loop()\nself.udp_sock = UdpSender(self.loop, ip, int(port))\nself.token_size = 2 * struct.calcsize('i') + struct.calcsize('17s')\nself.tokens = {}\nself.tc_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nself.tc_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nself.tc_sock.setblocking(False)\nself.tc_sock.bind((ip, int(port)))\nself.tc_sock.listen()", "print('Listening for tcp connections')\nwhile True:\n conn, addr = await self.loop.sock_accept(self.tc_sock)\n if __debug__:\n print('{} got tcp connection from {}'.format(self.port, addr))\n asyncio.ensure_future(self.tcp_response(conn))", "print('Listening for udp connections')\nwhile True:\n data, addr = await self.udp_sock.recvfrom(self.chunks_size)\n if __debug__:\n print('{} got udp request from {}'.format(self.port, addr))\n asyncio.ensure_future(self.udp_response(data, addr))", "response = await self.check_msg(data)\nif response:\n await self.udp_sock.sendto(response, addr)", "int_size = struct.calcsize('i')\nrecv_msg_size = await self.loop.sock_recv(conn, int_size)\ntry:\n msg_size = struct.unpack('i', recv_msg_size)[0]\nexcept Exception as e:\n conn.close()\n return\nres = b''\nwhile len(res) < msg_size:\n res += await self.loop.sock_recv(conn, self.chunks_size)\n await asyncio.sleep(0)\nresponse = await self.check_msg(res)\nresponse_stream = io.BytesIO(response)\nstream = True\nwhile stream:\n stream = response_stream.read(self.chunks_size)\n try:\n await self.loop.sock_sendall(conn, stream)\n except Exception as e:\n conn.close()\n return\nconn.close()\nif __debug__:\n print('Connection closed')", "token = res[:self.token_size]\npayload = res[self.token_size:]\nmsg_type, msg_cntr, sender = struct.unpack('ii17s', token)\nif sender not in self.tokens.keys():\n if __debug__:\n print('Add new token')\n self.tokens[sender] = 0\nif self.tokens[sender] != msg_cntr:\n self.tokens[sender] = msg_cntr\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n if msg_type == 0:\n if payload:\n new_msg = await self.cb_obj.arrival(sender, payload)\n if new_msg:\n response = token + new_msg if new_msg else token\n else:\n response = token\n else:\n response = token\n elif msg_type == 1:\n await self.cb_obj_gossip.arrival(sender, payload)\n response = token\nelse:\n if __debug__:\n print('NO TOKEN ARRIVAL')\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n response = token\nreturn response"], "bodies_text": "<|body_start_0|>\n self.uid = uid.encode()\n self.cb_obj = callback_obj\n self.cb_obj_gossip = callback_obj_gossip\n self.port = port\n self.chunks_size = chunks_size\n self.loop = asyncio.get_event_loop()\n self.udp_sock = UdpSender(self.loop, ip, int(port))\n self.token_size = 2 * struct.calcsize('i') + struct.calcsize('17s')\n self.tokens = {}\n self.tc_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.tc_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.tc_sock.setblocking(False)\n self.tc_sock.bind((ip, int(port)))\n self.tc_sock.listen()\n<|end_body_0|>\n\n<|body_start_1|>\n print('Listening for tcp connections')\n while True:\n conn, addr = await self.loop.sock_accept(self.tc_sock)\n if __debug__:\n print('{} got tcp connection from {}'.format(self.port, addr))\n asyncio.ensure_future(self.tcp_response(conn))\n<|end_body_1|>\n\n<|body_start_2|>\n print('Listening for udp connections')\n while True:\n data, addr = await self.udp_sock.recvfrom(self.chunks_size)\n if __debug__:\n print('{} got udp request from {}'.format(self.port, addr))\n asyncio.ensure_future(self.udp_response(data, addr))\n<|end_body_2|>\n\n<|body_start_3|>\n response = await self.check_msg(data)\n if response:\n await self.udp_sock.sendto(response, addr)\n<|end_body_3|>\n\n<|body_start_4|>\n int_size = struct.calcsize('i')\n recv_msg_size = await self.loop.sock_recv(conn, int_size)\n try:\n msg_size = struct.unpack('i', recv_msg_size)[0]\n except Exception as e:\n conn.close()\n return\n res = b''\n while len(res) < msg_size:\n res += await self.loop.sock_recv(conn, self.chunks_size)\n await asyncio.sleep(0)\n response = await self.check_msg(res)\n response_stream = io.BytesIO(response)\n stream = True\n while stream:\n stream = response_stream.read(self.chunks_size)\n try:\n await self.loop.sock_sendall(conn, stream)\n except Exception as e:\n conn.close()\n return\n conn.close()\n if __debug__:\n print('Connection closed')\n<|end_body_4|>\n\n<|body_start_5|>\n token = res[:self.token_size]\n payload = res[self.token_size:]\n msg_type, msg_cntr, sender = struct.unpack('ii17s', token)\n if sender not in self.tokens.keys():\n if __debug__:\n print('Add new token')\n self.tokens[sender] = 0\n if self.tokens[sender] != msg_cntr:\n self.tokens[sender] = msg_cntr\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n if msg_type == 0:\n if payload:\n new_msg = await self.cb_obj.arrival(sender, payload)\n if new_msg:\n response = token + new_msg if new_msg else token\n else:\n response = token\n else:\n response = token\n elif msg_type == 1:\n await self.cb_obj_gossip.arrival(sender, payload)\n response = token\n else:\n if __debug__:\n print('NO TOKEN ARRIVAL')\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n response = token\n return response\n<|end_body_5|>\n", "class_docstring": "Creates a server recv channel for pingpong and gossip", "class_name": "ServerRecvChannel", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ServerRecvChannel:\n \"\"\"Creates a server recv channel for pingpong and gossip\"\"\"\n\n def __init__(self, uid, callback_obj, callback_obj_gossip, port, ip, chunks_size=1024):\n \"\"\"Initialize callbacks, parameters and create tcp/udp sockets\"\"\"\n <|body_0|>\n\n async def tcp_listen(self):\n \"\"\"Wait for tcp connections to arrive\"\"\"\n <|body_1|>\n\n async def udp_listen(self):\n \"\"\"Wait until udp message arrives.\"\"\"\n <|body_2|>\n\n async def udp_response(self, data, addr):\n \"\"\"Create udp response and send it.\"\"\"\n <|body_3|>\n\n async def tcp_response(self, conn):\n \"\"\"Receive tcp stream, create response and send it\"\"\"\n <|body_4|>\n\n async def check_msg(self, res):\n \"\"\"Determine message type and create response message accordingly\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.uid = uid.encode()\n self.cb_obj = callback_obj\n self.cb_obj_gossip = callback_obj_gossip\n self.port = port\n self.chunks_size = chunks_size\n self.loop = asyncio.get_event_loop()\n self.udp_sock = UdpSender(self.loop, ip, int(port))\n self.token_size = 2 * struct.calcsize('i') + struct.calcsize('17s')\n self.tokens = {}\n self.tc_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.tc_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.tc_sock.setblocking(False)\n self.tc_sock.bind((ip, int(port)))\n self.tc_sock.listen()\n<|end_body_0|>\n\n<|body_start_1|>\n print('Listening for tcp connections')\n while True:\n conn, addr = await self.loop.sock_accept(self.tc_sock)\n if __debug__:\n print('{} got tcp connection from {}'.format(self.port, addr))\n asyncio.ensure_future(self.tcp_response(conn))\n<|end_body_1|>\n\n<|body_start_2|>\n print('Listening for udp connections')\n while True:\n data, addr = await self.udp_sock.recvfrom(self.chunks_size)\n if __debug__:\n print('{} got udp request from {}'.format(self.port, addr))\n asyncio.ensure_future(self.udp_response(data, addr))\n<|end_body_2|>\n\n<|body_start_3|>\n response = await self.check_msg(data)\n if response:\n await self.udp_sock.sendto(response, addr)\n<|end_body_3|>\n\n<|body_start_4|>\n int_size = struct.calcsize('i')\n recv_msg_size = await self.loop.sock_recv(conn, int_size)\n try:\n msg_size = struct.unpack('i', recv_msg_size)[0]\n except Exception as e:\n conn.close()\n return\n res = b''\n while len(res) < msg_size:\n res += await self.loop.sock_recv(conn, self.chunks_size)\n await asyncio.sleep(0)\n response = await self.check_msg(res)\n response_stream = io.BytesIO(response)\n stream = True\n while stream:\n stream = response_stream.read(self.chunks_size)\n try:\n await self.loop.sock_sendall(conn, stream)\n except Exception as e:\n conn.close()\n return\n conn.close()\n if __debug__:\n print('Connection closed')\n<|end_body_4|>\n\n<|body_start_5|>\n token = res[:self.token_size]\n payload = res[self.token_size:]\n msg_type, msg_cntr, sender = struct.unpack('ii17s', token)\n if sender not in self.tokens.keys():\n if __debug__:\n print('Add new token')\n self.tokens[sender] = 0\n if self.tokens[sender] != msg_cntr:\n self.tokens[sender] = msg_cntr\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n if msg_type == 0:\n if payload:\n new_msg = await self.cb_obj.arrival(sender, payload)\n if new_msg:\n response = token + new_msg if new_msg else token\n else:\n response = token\n else:\n response = token\n elif msg_type == 1:\n await self.cb_obj_gossip.arrival(sender, payload)\n response = token\n else:\n if __debug__:\n print('NO TOKEN ARRIVAL')\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n response = token\n return response\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000435", "length_bytes": 5713, "license_type": "permissive", "methods": [{"docstring": "Initialize callbacks, parameters and create tcp/udp sockets", "name": "__init__", "signature": "def __init__(self, uid, callback_obj, callback_obj_gossip, port, ip, chunks_size=1024)"}, {"docstring": "Wait for tcp connections to arrive", "name": "tcp_listen", "signature": "async def tcp_listen(self)"}, {"docstring": "Wait until udp message arrives.", "name": "udp_listen", "signature": "async def udp_listen(self)"}, {"docstring": "Create udp response and send it.", "name": "udp_response", "signature": "async def udp_response(self, data, addr)"}, {"docstring": "Receive tcp stream, create response and send it", "name": "tcp_response", "signature": "async def tcp_response(self, conn)"}, {"docstring": "Determine message type and create response message accordingly", "name": "check_msg", "signature": "async def check_msg(self, res)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_050968", "prompt": "Implement the Python class `ServerRecvChannel` described below.\n\nClass description:\nCreates a server recv channel for pingpong and gossip\n\nMethod signatures and docstrings:\n- def __init__(self, uid, callback_obj, callback_obj_gossip, port, ip, chunks_size=1024): Initialize callbacks, parameters and create tcp/udp sockets\n- async def tcp_listen(self): Wait for tcp connections to arrive\n- async def udp_listen(self): Wait until udp message arrives.\n- async def udp_response(self, data, addr): Create udp response and send it.\n- async def tcp_response(self, conn): Receive tcp stream, create response and send it\n- async def check_msg(self, res): Determine message type and create response message accordingly", "prompted_full_text": "Implement the Python class `ServerRecvChannel` described below.\n\nClass description:\nCreates a server recv channel for pingpong and gossip\n\nMethod signatures and docstrings:\n- def __init__(self, uid, callback_obj, callback_obj_gossip, port, ip, chunks_size=1024): Initialize callbacks, parameters and create tcp/udp sockets\n- async def tcp_listen(self): Wait for tcp connections to arrive\n- async def udp_listen(self): Wait until udp message arrives.\n- async def udp_response(self, data, addr): Create udp response and send it.\n- async def tcp_response(self, conn): Receive tcp stream, create response and send it\n- async def check_msg(self, res): Determine message type and create response message accordingly\n\n<|skeleton|>\nclass ServerRecvChannel:\n \"\"\"Creates a server recv channel for pingpong and gossip\"\"\"\n\n def __init__(self, uid, callback_obj, callback_obj_gossip, port, ip, chunks_size=1024):\n \"\"\"Initialize callbacks, parameters and create tcp/udp sockets\"\"\"\n <|body_0|>\n\n async def tcp_listen(self):\n \"\"\"Wait for tcp connections to arrive\"\"\"\n <|body_1|>\n\n async def udp_listen(self):\n \"\"\"Wait until udp message arrives.\"\"\"\n <|body_2|>\n\n async def udp_response(self, data, addr):\n \"\"\"Create udp response and send it.\"\"\"\n <|body_3|>\n\n async def tcp_response(self, conn):\n \"\"\"Receive tcp stream, create response and send it\"\"\"\n <|body_4|>\n\n async def check_msg(self, res):\n \"\"\"Determine message type and create response message accordingly\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.uid = uid.encode()\n self.cb_obj = callback_obj\n self.cb_obj_gossip = callback_obj_gossip\n self.port = port\n self.chunks_size = chunks_size\n self.loop = asyncio.get_event_loop()\n self.udp_sock = UdpSender(self.loop, ip, int(port))\n self.token_size = 2 * struct.calcsize('i') + struct.calcsize('17s')\n self.tokens = {}\n self.tc_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.tc_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.tc_sock.setblocking(False)\n self.tc_sock.bind((ip, int(port)))\n self.tc_sock.listen()\n<|end_body_0|>\n\n<|body_start_1|>\n print('Listening for tcp connections')\n while True:\n conn, addr = await self.loop.sock_accept(self.tc_sock)\n if __debug__:\n print('{} got tcp connection from {}'.format(self.port, addr))\n asyncio.ensure_future(self.tcp_response(conn))\n<|end_body_1|>\n\n<|body_start_2|>\n print('Listening for udp connections')\n while True:\n data, addr = await self.udp_sock.recvfrom(self.chunks_size)\n if __debug__:\n print('{} got udp request from {}'.format(self.port, addr))\n asyncio.ensure_future(self.udp_response(data, addr))\n<|end_body_2|>\n\n<|body_start_3|>\n response = await self.check_msg(data)\n if response:\n await self.udp_sock.sendto(response, addr)\n<|end_body_3|>\n\n<|body_start_4|>\n int_size = struct.calcsize('i')\n recv_msg_size = await self.loop.sock_recv(conn, int_size)\n try:\n msg_size = struct.unpack('i', recv_msg_size)[0]\n except Exception as e:\n conn.close()\n return\n res = b''\n while len(res) < msg_size:\n res += await self.loop.sock_recv(conn, self.chunks_size)\n await asyncio.sleep(0)\n response = await self.check_msg(res)\n response_stream = io.BytesIO(response)\n stream = True\n while stream:\n stream = response_stream.read(self.chunks_size)\n try:\n await self.loop.sock_sendall(conn, stream)\n except Exception as e:\n conn.close()\n return\n conn.close()\n if __debug__:\n print('Connection closed')\n<|end_body_4|>\n\n<|body_start_5|>\n token = res[:self.token_size]\n payload = res[self.token_size:]\n msg_type, msg_cntr, sender = struct.unpack('ii17s', token)\n if sender not in self.tokens.keys():\n if __debug__:\n print('Add new token')\n self.tokens[sender] = 0\n if self.tokens[sender] != msg_cntr:\n self.tokens[sender] = msg_cntr\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n if msg_type == 0:\n if payload:\n new_msg = await self.cb_obj.arrival(sender, payload)\n if new_msg:\n response = token + new_msg if new_msg else token\n else:\n response = token\n else:\n response = token\n elif msg_type == 1:\n await self.cb_obj_gossip.arrival(sender, payload)\n response = token\n else:\n if __debug__:\n print('NO TOKEN ARRIVAL')\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n response = token\n return response\n<|end_body_5|>\n", "revision_id": "c44b71b782afcae360fb3ed90b1d43da78eae338", "skeleton": "<|skeleton|>\nclass ServerRecvChannel:\n \"\"\"Creates a server recv channel for pingpong and gossip\"\"\"\n\n def __init__(self, uid, callback_obj, callback_obj_gossip, port, ip, chunks_size=1024):\n \"\"\"Initialize callbacks, parameters and create tcp/udp sockets\"\"\"\n <|body_0|>\n\n async def tcp_listen(self):\n \"\"\"Wait for tcp connections to arrive\"\"\"\n <|body_1|>\n\n async def udp_listen(self):\n \"\"\"Wait until udp message arrives.\"\"\"\n <|body_2|>\n\n async def udp_response(self, data, addr):\n \"\"\"Create udp response and send it.\"\"\"\n <|body_3|>\n\n async def tcp_response(self, conn):\n \"\"\"Receive tcp stream, create response and send it\"\"\"\n <|body_4|>\n\n async def check_msg(self, res):\n \"\"\"Determine message type and create response message accordingly\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ServerRecvChannel:\n \"\"\"Creates a server recv channel for pingpong and gossip\"\"\"\n\n def __init__(self, uid, callback_obj, callback_obj_gossip, port, ip, chunks_size=1024):\n \"\"\"Initialize callbacks, parameters and create tcp/udp sockets\"\"\"\n self.uid = uid.encode()\n self.cb_obj = callback_obj\n self.cb_obj_gossip = callback_obj_gossip\n self.port = port\n self.chunks_size = chunks_size\n self.loop = asyncio.get_event_loop()\n self.udp_sock = UdpSender(self.loop, ip, int(port))\n self.token_size = 2 * struct.calcsize('i') + struct.calcsize('17s')\n self.tokens = {}\n self.tc_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.tc_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.tc_sock.setblocking(False)\n self.tc_sock.bind((ip, int(port)))\n self.tc_sock.listen()\n\n async def tcp_listen(self):\n \"\"\"Wait for tcp connections to arrive\"\"\"\n print('Listening for tcp connections')\n while True:\n conn, addr = await self.loop.sock_accept(self.tc_sock)\n if __debug__:\n print('{} got tcp connection from {}'.format(self.port, addr))\n asyncio.ensure_future(self.tcp_response(conn))\n\n async def udp_listen(self):\n \"\"\"Wait until udp message arrives.\"\"\"\n print('Listening for udp connections')\n while True:\n data, addr = await self.udp_sock.recvfrom(self.chunks_size)\n if __debug__:\n print('{} got udp request from {}'.format(self.port, addr))\n asyncio.ensure_future(self.udp_response(data, addr))\n\n async def udp_response(self, data, addr):\n \"\"\"Create udp response and send it.\"\"\"\n response = await self.check_msg(data)\n if response:\n await self.udp_sock.sendto(response, addr)\n\n async def tcp_response(self, conn):\n \"\"\"Receive tcp stream, create response and send it\"\"\"\n int_size = struct.calcsize('i')\n recv_msg_size = await self.loop.sock_recv(conn, int_size)\n try:\n msg_size = struct.unpack('i', recv_msg_size)[0]\n except Exception as e:\n conn.close()\n return\n res = b''\n while len(res) < msg_size:\n res += await self.loop.sock_recv(conn, self.chunks_size)\n await asyncio.sleep(0)\n response = await self.check_msg(res)\n response_stream = io.BytesIO(response)\n stream = True\n while stream:\n stream = response_stream.read(self.chunks_size)\n try:\n await self.loop.sock_sendall(conn, stream)\n except Exception as e:\n conn.close()\n return\n conn.close()\n if __debug__:\n print('Connection closed')\n\n async def check_msg(self, res):\n \"\"\"Determine message type and create response message accordingly\"\"\"\n token = res[:self.token_size]\n payload = res[self.token_size:]\n msg_type, msg_cntr, sender = struct.unpack('ii17s', token)\n if sender not in self.tokens.keys():\n if __debug__:\n print('Add new token')\n self.tokens[sender] = 0\n if self.tokens[sender] != msg_cntr:\n self.tokens[sender] = msg_cntr\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n if msg_type == 0:\n if payload:\n new_msg = await self.cb_obj.arrival(sender, payload)\n if new_msg:\n response = token + new_msg if new_msg else token\n else:\n response = token\n else:\n response = token\n elif msg_type == 1:\n await self.cb_obj_gossip.arrival(sender, payload)\n response = token\n else:\n if __debug__:\n print('NO TOKEN ARRIVAL')\n token = struct.pack('ii17s', msg_type, self.tokens[sender], self.uid)\n response = token\n return response\n", "source": "the_stack_v2_python_sparse", "source_path": "self-stabilizing-coded-atomic-storage/code/channel/serverRecvChannel.py", "source_repo": "eladschiller/self-stabilizing-cloud", "split": "val", "star_events_count": 0}
{"blob_id": "0b87d515e5c127b6da2b79222f8828749bf8a029", "bodies": ["strip_dc = lambda name: ''.join([el for el in name if el in string.ascii_lowercase])\nmsg_rvd = cls.parse_flask_req(r, session)\nmsg_rvd.update({'dc': {'name': strip_dc(msg_rvd['name'])}})\nreturn Line(**msg_rvd)", "if row.get('dc_id'):\n row.setdefault('dc', {})\n row['dc']['id'] = row.get('dc_id')\n del row['dc_id']\nif row.get('dc_name'):\n row.setdefault('dc', {})\n row['dc']['name'] = row.get('dc_name')\n del row['dc_name']\nreturn Line(**row)"], "bodies_text": "<|body_start_0|>\n strip_dc = lambda name: ''.join([el for el in name if el in string.ascii_lowercase])\n msg_rvd = cls.parse_flask_req(r, session)\n msg_rvd.update({'dc': {'name': strip_dc(msg_rvd['name'])}})\n return Line(**msg_rvd)\n<|end_body_0|>\n\n<|body_start_1|>\n if row.get('dc_id'):\n row.setdefault('dc', {})\n row['dc']['id'] = row.get('dc_id')\n del row['dc_id']\n if row.get('dc_name'):\n row.setdefault('dc', {})\n row['dc']['name'] = row.get('dc_name')\n del row['dc_name']\n return Line(**row)\n<|end_body_1|>\n", "class_docstring": "Line instance static fabric.", "class_name": "LineBuilder", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LineBuilder:\n \"\"\"Line instance static fabric.\"\"\"\n\n def from_Flask_req(cls, r, session):\n \"\"\"Creates class instance from Flask request object. Args: r: Flask request object. session: Flask session object. Returns: Ammo class instance.\"\"\"\n <|body_0|>\n\n def from_row(cls, **row):\n \"\"\"Creates class instance from RDBMS returned row. Args: row: dict with table columns as keys. Returns: *Line* class instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n strip_dc = lambda name: ''.join([el for el in name if el in string.ascii_lowercase])\n msg_rvd = cls.parse_flask_req(r, session)\n msg_rvd.update({'dc': {'name': strip_dc(msg_rvd['name'])}})\n return Line(**msg_rvd)\n<|end_body_0|>\n\n<|body_start_1|>\n if row.get('dc_id'):\n row.setdefault('dc', {})\n row['dc']['id'] = row.get('dc_id')\n del row['dc_id']\n if row.get('dc_name'):\n row.setdefault('dc', {})\n row['dc']['name'] = row.get('dc_name')\n del row['dc_name']\n return Line(**row)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000436", "length_bytes": 1844, "license_type": "permissive", "methods": [{"docstring": "Creates class instance from Flask request object. Args: r: Flask request object. session: Flask session object. Returns: Ammo class instance.", "name": "from_Flask_req", "signature": "def from_Flask_req(cls, r, session)"}, {"docstring": "Creates class instance from RDBMS returned row. Args: row: dict with table columns as keys. Returns: *Line* class instance.", "name": "from_row", "signature": "def from_row(cls, **row)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003804", "prompt": "Implement the Python class `LineBuilder` described below.\n\nClass description:\nLine instance static fabric.\n\nMethod signatures and docstrings:\n- def from_Flask_req(cls, r, session): Creates class instance from Flask request object. Args: r: Flask request object. session: Flask session object. Returns: Ammo class instance.\n- def from_row(cls, **row): Creates class instance from RDBMS returned row. Args: row: dict with table columns as keys. Returns: *Line* class instance.", "prompted_full_text": "Implement the Python class `LineBuilder` described below.\n\nClass description:\nLine instance static fabric.\n\nMethod signatures and docstrings:\n- def from_Flask_req(cls, r, session): Creates class instance from Flask request object. Args: r: Flask request object. session: Flask session object. Returns: Ammo class instance.\n- def from_row(cls, **row): Creates class instance from RDBMS returned row. Args: row: dict with table columns as keys. Returns: *Line* class instance.\n\n<|skeleton|>\nclass LineBuilder:\n \"\"\"Line instance static fabric.\"\"\"\n\n def from_Flask_req(cls, r, session):\n \"\"\"Creates class instance from Flask request object. Args: r: Flask request object. session: Flask session object. Returns: Ammo class instance.\"\"\"\n <|body_0|>\n\n def from_row(cls, **row):\n \"\"\"Creates class instance from RDBMS returned row. Args: row: dict with table columns as keys. Returns: *Line* class instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n strip_dc = lambda name: ''.join([el for el in name if el in string.ascii_lowercase])\n msg_rvd = cls.parse_flask_req(r, session)\n msg_rvd.update({'dc': {'name': strip_dc(msg_rvd['name'])}})\n return Line(**msg_rvd)\n<|end_body_0|>\n\n<|body_start_1|>\n if row.get('dc_id'):\n row.setdefault('dc', {})\n row['dc']['id'] = row.get('dc_id')\n del row['dc_id']\n if row.get('dc_name'):\n row.setdefault('dc', {})\n row['dc']['name'] = row.get('dc_name')\n del row['dc_name']\n return Line(**row)\n<|end_body_1|>\n", "revision_id": "e898fae67236cfb64b73c964d5b3ade73c976de6", "skeleton": "<|skeleton|>\nclass LineBuilder:\n \"\"\"Line instance static fabric.\"\"\"\n\n def from_Flask_req(cls, r, session):\n \"\"\"Creates class instance from Flask request object. Args: r: Flask request object. session: Flask session object. Returns: Ammo class instance.\"\"\"\n <|body_0|>\n\n def from_row(cls, **row):\n \"\"\"Creates class instance from RDBMS returned row. Args: row: dict with table columns as keys. Returns: *Line* class instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LineBuilder:\n \"\"\"Line instance static fabric.\"\"\"\n\n def from_Flask_req(cls, r, session):\n \"\"\"Creates class instance from Flask request object. Args: r: Flask request object. session: Flask session object. Returns: Ammo class instance.\"\"\"\n strip_dc = lambda name: ''.join([el for el in name if el in string.ascii_lowercase])\n msg_rvd = cls.parse_flask_req(r, session)\n msg_rvd.update({'dc': {'name': strip_dc(msg_rvd['name'])}})\n return Line(**msg_rvd)\n\n def from_row(cls, **row):\n \"\"\"Creates class instance from RDBMS returned row. Args: row: dict with table columns as keys. Returns: *Line* class instance.\"\"\"\n if row.get('dc_id'):\n row.setdefault('dc', {})\n row['dc']['id'] = row.get('dc_id')\n del row['dc_id']\n if row.get('dc_name'):\n row.setdefault('dc', {})\n row['dc']['name'] = row.get('dc_name')\n del row['dc_name']\n return Line(**row)\n", "source": "the_stack_v2_python_sparse", "source_path": "lunaport_server/domain/line.py", "source_repo": "maklaut/lunaport_server", "split": "val", "star_events_count": 0}
{"blob_id": "d3493d9c148c4907ecd185b7768243f36e4a57be", "bodies": ["self.cleaned_data = trim_fields_in_form(self)\nif not self.cleaned_data['billing_zip_postal'] and (not self.cleaned_data['billing_city']) and (not self.cleaned_data['billing_address2']) and (not self.cleaned_data['billing_address1']):\n self.cleaned_data['billing_state_province'] = ''\nreturn self.cleaned_data", "billing_address1 = self.cleaned_data['billing_address1']\nbilling_address2 = self.cleaned_data['billing_address2']\nbilling_city = self.cleaned_data['billing_city']\nbilling_state_province = self.cleaned_data['billing_state_province']\nbilling_zip_postal = self.cleaned_data['billing_zip_postal']\ntry:\n billing_record = BillingRecord.objects.get(id=billing_record_id, business__id=business_id)\n billing_record.billing_address1 = billing_address1\n billing_record.billing_address2 = billing_address2\n billing_record.billing_city = billing_city\n billing_record.billing_state_province = billing_state_province\n billing_record.billing_zip_postal = billing_zip_postal\nexcept BillingRecord.DoesNotExist:\n billing_record = BillingRecord(business_id=business_id, billing_address1=billing_address1, billing_address2=billing_address2, billing_city=billing_city, billing_state_province=billing_state_province, billing_zip_postal=billing_zip_postal)\nbilling_record.save()\nreturn billing_record"], "bodies_text": "<|body_start_0|>\n self.cleaned_data = trim_fields_in_form(self)\n if not self.cleaned_data['billing_zip_postal'] and (not self.cleaned_data['billing_city']) and (not self.cleaned_data['billing_address2']) and (not self.cleaned_data['billing_address1']):\n self.cleaned_data['billing_state_province'] = ''\n return self.cleaned_data\n<|end_body_0|>\n\n<|body_start_1|>\n billing_address1 = self.cleaned_data['billing_address1']\n billing_address2 = self.cleaned_data['billing_address2']\n billing_city = self.cleaned_data['billing_city']\n billing_state_province = self.cleaned_data['billing_state_province']\n billing_zip_postal = self.cleaned_data['billing_zip_postal']\n try:\n billing_record = BillingRecord.objects.get(id=billing_record_id, business__id=business_id)\n billing_record.billing_address1 = billing_address1\n billing_record.billing_address2 = billing_address2\n billing_record.billing_city = billing_city\n billing_record.billing_state_province = billing_state_province\n billing_record.billing_zip_postal = billing_zip_postal\n except BillingRecord.DoesNotExist:\n billing_record = BillingRecord(business_id=business_id, billing_address1=billing_address1, billing_address2=billing_address2, billing_city=billing_city, billing_state_province=billing_state_province, billing_zip_postal=billing_zip_postal)\n billing_record.save()\n return billing_record\n<|end_body_1|>\n", "class_docstring": "The billing record form during coupon purchase", "class_name": "CheckoutCouponBillingRecordForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CheckoutCouponBillingRecordForm:\n \"\"\"The billing record form during coupon purchase\"\"\"\n\n def clean(self):\n \"\"\"Clean all fields and de-populate default state if no other address info is present.\"\"\"\n <|body_0|>\n\n def create_or_update(self, business_id, billing_record_id=None):\n \"\"\"Retrieve billing record from previous payment submit or create new billing record. Return billing record.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cleaned_data = trim_fields_in_form(self)\n if not self.cleaned_data['billing_zip_postal'] and (not self.cleaned_data['billing_city']) and (not self.cleaned_data['billing_address2']) and (not self.cleaned_data['billing_address1']):\n self.cleaned_data['billing_state_province'] = ''\n return self.cleaned_data\n<|end_body_0|>\n\n<|body_start_1|>\n billing_address1 = self.cleaned_data['billing_address1']\n billing_address2 = self.cleaned_data['billing_address2']\n billing_city = self.cleaned_data['billing_city']\n billing_state_province = self.cleaned_data['billing_state_province']\n billing_zip_postal = self.cleaned_data['billing_zip_postal']\n try:\n billing_record = BillingRecord.objects.get(id=billing_record_id, business__id=business_id)\n billing_record.billing_address1 = billing_address1\n billing_record.billing_address2 = billing_address2\n billing_record.billing_city = billing_city\n billing_record.billing_state_province = billing_state_province\n billing_record.billing_zip_postal = billing_zip_postal\n except BillingRecord.DoesNotExist:\n billing_record = BillingRecord(business_id=business_id, billing_address1=billing_address1, billing_address2=billing_address2, billing_city=billing_city, billing_state_province=billing_state_province, billing_zip_postal=billing_zip_postal)\n billing_record.save()\n return billing_record\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000437", "length_bytes": 9895, "license_type": "no_license", "methods": [{"docstring": "Clean all fields and de-populate default state if no other address info is present.", "name": "clean", "signature": "def clean(self)"}, {"docstring": "Retrieve billing record from previous payment submit or create new billing record. Return billing record.", "name": "create_or_update", "signature": "def create_or_update(self, business_id, billing_record_id=None)"}], "n_methods": 2, "prompt": "Implement the Python class `CheckoutCouponBillingRecordForm` described below.\n\nClass description:\nThe billing record form during coupon purchase\n\nMethod signatures and docstrings:\n- def clean(self): Clean all fields and de-populate default state if no other address info is present.\n- def create_or_update(self, business_id, billing_record_id=None): Retrieve billing record from previous payment submit or create new billing record. Return billing record.", "prompted_full_text": "Implement the Python class `CheckoutCouponBillingRecordForm` described below.\n\nClass description:\nThe billing record form during coupon purchase\n\nMethod signatures and docstrings:\n- def clean(self): Clean all fields and de-populate default state if no other address info is present.\n- def create_or_update(self, business_id, billing_record_id=None): Retrieve billing record from previous payment submit or create new billing record. Return billing record.\n\n<|skeleton|>\nclass CheckoutCouponBillingRecordForm:\n \"\"\"The billing record form during coupon purchase\"\"\"\n\n def clean(self):\n \"\"\"Clean all fields and de-populate default state if no other address info is present.\"\"\"\n <|body_0|>\n\n def create_or_update(self, business_id, billing_record_id=None):\n \"\"\"Retrieve billing record from previous payment submit or create new billing record. Return billing record.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cleaned_data = trim_fields_in_form(self)\n if not self.cleaned_data['billing_zip_postal'] and (not self.cleaned_data['billing_city']) and (not self.cleaned_data['billing_address2']) and (not self.cleaned_data['billing_address1']):\n self.cleaned_data['billing_state_province'] = ''\n return self.cleaned_data\n<|end_body_0|>\n\n<|body_start_1|>\n billing_address1 = self.cleaned_data['billing_address1']\n billing_address2 = self.cleaned_data['billing_address2']\n billing_city = self.cleaned_data['billing_city']\n billing_state_province = self.cleaned_data['billing_state_province']\n billing_zip_postal = self.cleaned_data['billing_zip_postal']\n try:\n billing_record = BillingRecord.objects.get(id=billing_record_id, business__id=business_id)\n billing_record.billing_address1 = billing_address1\n billing_record.billing_address2 = billing_address2\n billing_record.billing_city = billing_city\n billing_record.billing_state_province = billing_state_province\n billing_record.billing_zip_postal = billing_zip_postal\n except BillingRecord.DoesNotExist:\n billing_record = BillingRecord(business_id=business_id, billing_address1=billing_address1, billing_address2=billing_address2, billing_city=billing_city, billing_state_province=billing_state_province, billing_zip_postal=billing_zip_postal)\n billing_record.save()\n return billing_record\n<|end_body_1|>\n", "revision_id": "a780ccdc3350d4b5c7990c65d1af8d71060c62cc", "skeleton": "<|skeleton|>\nclass CheckoutCouponBillingRecordForm:\n \"\"\"The billing record form during coupon purchase\"\"\"\n\n def clean(self):\n \"\"\"Clean all fields and de-populate default state if no other address info is present.\"\"\"\n <|body_0|>\n\n def create_or_update(self, business_id, billing_record_id=None):\n \"\"\"Retrieve billing record from previous payment submit or create new billing record. Return billing record.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CheckoutCouponBillingRecordForm:\n \"\"\"The billing record form during coupon purchase\"\"\"\n\n def clean(self):\n \"\"\"Clean all fields and de-populate default state if no other address info is present.\"\"\"\n self.cleaned_data = trim_fields_in_form(self)\n if not self.cleaned_data['billing_zip_postal'] and (not self.cleaned_data['billing_city']) and (not self.cleaned_data['billing_address2']) and (not self.cleaned_data['billing_address1']):\n self.cleaned_data['billing_state_province'] = ''\n return self.cleaned_data\n\n def create_or_update(self, business_id, billing_record_id=None):\n \"\"\"Retrieve billing record from previous payment submit or create new billing record. Return billing record.\"\"\"\n billing_address1 = self.cleaned_data['billing_address1']\n billing_address2 = self.cleaned_data['billing_address2']\n billing_city = self.cleaned_data['billing_city']\n billing_state_province = self.cleaned_data['billing_state_province']\n billing_zip_postal = self.cleaned_data['billing_zip_postal']\n try:\n billing_record = BillingRecord.objects.get(id=billing_record_id, business__id=business_id)\n billing_record.billing_address1 = billing_address1\n billing_record.billing_address2 = billing_address2\n billing_record.billing_city = billing_city\n billing_record.billing_state_province = billing_state_province\n billing_record.billing_zip_postal = billing_zip_postal\n except BillingRecord.DoesNotExist:\n billing_record = BillingRecord(business_id=business_id, billing_address1=billing_address1, billing_address2=billing_address2, billing_city=billing_city, billing_state_province=billing_state_province, billing_zip_postal=billing_zip_postal)\n billing_record.save()\n return billing_record\n", "source": "the_stack_v2_python_sparse", "source_path": "ecommerce/forms.py", "source_repo": "wcirillo/ten", "split": "val", "star_events_count": 0}
{"blob_id": "c651747cbea5fc8e3c760d64e20a847004586ec9", "bodies": ["ub = self.app.module_map.userbase\nevent = self.barcamp.get_event(eid)\nrooms = event.timetable.get('rooms', [])\ntimeslots = event.timetable.get('timeslots', [])\nsessions = event.timetable.get('sessions', {})\nif self.barcamp.ticketmode_enabled:\n tdb = self.config.dbs.tickets\n tickets = tdb.get_tickets(barcamp_id=self.barcamp._id)\n uids = set([t.user_id for t in tickets])\n participants = list(ub.get_users_by_ids(uids))\nelse:\n participants = list(ub.get_users_by_ids(event.participants))\nparticipants = [{'name': p.fullname, '_id': str(p._id)} for p in participants]\nproposals = []\nfor p in self.config.dbs.sessions.find({'barcamp_id': str(self.barcamp_id)}):\n proposals.append({'value': p.title, 'label': '%s (%s)' % (p.title, p.user.fullname), 'description': p.description, 'user_id': p.user_id, 'vote_count': p.vote_count})\nreturn {'rooms': rooms, 'timeslots': timeslots, 'event': event, 'eid': event._id, 'participants': participants, 'proposals': proposals, 'sessions': sessions}", "event = self.barcamp.get_event(eid)\ndata = json.loads(self.request.data)\nevent.timetable = {'rooms': data.get('rooms', []), 'timeslots': data.get('timeslots', []), 'sessions': data.get('sessions', {})}\nself.barcamp.events[eid] = event\nself.barcamp.save()\nreturn {'status': 'ok'}"], "bodies_text": "<|body_start_0|>\n ub = self.app.module_map.userbase\n event = self.barcamp.get_event(eid)\n rooms = event.timetable.get('rooms', [])\n timeslots = event.timetable.get('timeslots', [])\n sessions = event.timetable.get('sessions', {})\n if self.barcamp.ticketmode_enabled:\n tdb = self.config.dbs.tickets\n tickets = tdb.get_tickets(barcamp_id=self.barcamp._id)\n uids = set([t.user_id for t in tickets])\n participants = list(ub.get_users_by_ids(uids))\n else:\n participants = list(ub.get_users_by_ids(event.participants))\n participants = [{'name': p.fullname, '_id': str(p._id)} for p in participants]\n proposals = []\n for p in self.config.dbs.sessions.find({'barcamp_id': str(self.barcamp_id)}):\n proposals.append({'value': p.title, 'label': '%s (%s)' % (p.title, p.user.fullname), 'description': p.description, 'user_id': p.user_id, 'vote_count': p.vote_count})\n return {'rooms': rooms, 'timeslots': timeslots, 'event': event, 'eid': event._id, 'participants': participants, 'proposals': proposals, 'sessions': sessions}\n<|end_body_0|>\n\n<|body_start_1|>\n event = self.barcamp.get_event(eid)\n data = json.loads(self.request.data)\n event.timetable = {'rooms': data.get('rooms', []), 'timeslots': data.get('timeslots', []), 'sessions': data.get('sessions', {})}\n self.barcamp.events[eid] = event\n self.barcamp.save()\n return {'status': 'ok'}\n<|end_body_1|>\n", "class_docstring": "handles all AJAX related session board data", "class_name": "SessionBoardData", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SessionBoardData:\n \"\"\"handles all AJAX related session board data\"\"\"\n\n def get(self, slug=None, eid=None):\n \"\"\"return rooms and timeslots\"\"\"\n <|body_0|>\n\n def post(self, slug=None, eid=None):\n \"\"\"store room and timetable data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ub = self.app.module_map.userbase\n event = self.barcamp.get_event(eid)\n rooms = event.timetable.get('rooms', [])\n timeslots = event.timetable.get('timeslots', [])\n sessions = event.timetable.get('sessions', {})\n if self.barcamp.ticketmode_enabled:\n tdb = self.config.dbs.tickets\n tickets = tdb.get_tickets(barcamp_id=self.barcamp._id)\n uids = set([t.user_id for t in tickets])\n participants = list(ub.get_users_by_ids(uids))\n else:\n participants = list(ub.get_users_by_ids(event.participants))\n participants = [{'name': p.fullname, '_id': str(p._id)} for p in participants]\n proposals = []\n for p in self.config.dbs.sessions.find({'barcamp_id': str(self.barcamp_id)}):\n proposals.append({'value': p.title, 'label': '%s (%s)' % (p.title, p.user.fullname), 'description': p.description, 'user_id': p.user_id, 'vote_count': p.vote_count})\n return {'rooms': rooms, 'timeslots': timeslots, 'event': event, 'eid': event._id, 'participants': participants, 'proposals': proposals, 'sessions': sessions}\n<|end_body_0|>\n\n<|body_start_1|>\n event = self.barcamp.get_event(eid)\n data = json.loads(self.request.data)\n event.timetable = {'rooms': data.get('rooms', []), 'timeslots': data.get('timeslots', []), 'sessions': data.get('sessions', {})}\n self.barcamp.events[eid] = event\n self.barcamp.save()\n return {'status': 'ok'}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000438", "length_bytes": 5339, "license_type": "permissive", "methods": [{"docstring": "return rooms and timeslots", "name": "get", "signature": "def get(self, slug=None, eid=None)"}, {"docstring": "store room and timetable data", "name": "post", "signature": "def post(self, slug=None, eid=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002361", "prompt": "Implement the Python class `SessionBoardData` described below.\n\nClass description:\nhandles all AJAX related session board data\n\nMethod signatures and docstrings:\n- def get(self, slug=None, eid=None): return rooms and timeslots\n- def post(self, slug=None, eid=None): store room and timetable data", "prompted_full_text": "Implement the Python class `SessionBoardData` described below.\n\nClass description:\nhandles all AJAX related session board data\n\nMethod signatures and docstrings:\n- def get(self, slug=None, eid=None): return rooms and timeslots\n- def post(self, slug=None, eid=None): store room and timetable data\n\n<|skeleton|>\nclass SessionBoardData:\n \"\"\"handles all AJAX related session board data\"\"\"\n\n def get(self, slug=None, eid=None):\n \"\"\"return rooms and timeslots\"\"\"\n <|body_0|>\n\n def post(self, slug=None, eid=None):\n \"\"\"store room and timetable data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ub = self.app.module_map.userbase\n event = self.barcamp.get_event(eid)\n rooms = event.timetable.get('rooms', [])\n timeslots = event.timetable.get('timeslots', [])\n sessions = event.timetable.get('sessions', {})\n if self.barcamp.ticketmode_enabled:\n tdb = self.config.dbs.tickets\n tickets = tdb.get_tickets(barcamp_id=self.barcamp._id)\n uids = set([t.user_id for t in tickets])\n participants = list(ub.get_users_by_ids(uids))\n else:\n participants = list(ub.get_users_by_ids(event.participants))\n participants = [{'name': p.fullname, '_id': str(p._id)} for p in participants]\n proposals = []\n for p in self.config.dbs.sessions.find({'barcamp_id': str(self.barcamp_id)}):\n proposals.append({'value': p.title, 'label': '%s (%s)' % (p.title, p.user.fullname), 'description': p.description, 'user_id': p.user_id, 'vote_count': p.vote_count})\n return {'rooms': rooms, 'timeslots': timeslots, 'event': event, 'eid': event._id, 'participants': participants, 'proposals': proposals, 'sessions': sessions}\n<|end_body_0|>\n\n<|body_start_1|>\n event = self.barcamp.get_event(eid)\n data = json.loads(self.request.data)\n event.timetable = {'rooms': data.get('rooms', []), 'timeslots': data.get('timeslots', []), 'sessions': data.get('sessions', {})}\n self.barcamp.events[eid] = event\n self.barcamp.save()\n return {'status': 'ok'}\n<|end_body_1|>\n", "revision_id": "9b45664e46c451b2cbe00bb55583b043e769083d", "skeleton": "<|skeleton|>\nclass SessionBoardData:\n \"\"\"handles all AJAX related session board data\"\"\"\n\n def get(self, slug=None, eid=None):\n \"\"\"return rooms and timeslots\"\"\"\n <|body_0|>\n\n def post(self, slug=None, eid=None):\n \"\"\"store room and timetable data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SessionBoardData:\n \"\"\"handles all AJAX related session board data\"\"\"\n\n def get(self, slug=None, eid=None):\n \"\"\"return rooms and timeslots\"\"\"\n ub = self.app.module_map.userbase\n event = self.barcamp.get_event(eid)\n rooms = event.timetable.get('rooms', [])\n timeslots = event.timetable.get('timeslots', [])\n sessions = event.timetable.get('sessions', {})\n if self.barcamp.ticketmode_enabled:\n tdb = self.config.dbs.tickets\n tickets = tdb.get_tickets(barcamp_id=self.barcamp._id)\n uids = set([t.user_id for t in tickets])\n participants = list(ub.get_users_by_ids(uids))\n else:\n participants = list(ub.get_users_by_ids(event.participants))\n participants = [{'name': p.fullname, '_id': str(p._id)} for p in participants]\n proposals = []\n for p in self.config.dbs.sessions.find({'barcamp_id': str(self.barcamp_id)}):\n proposals.append({'value': p.title, 'label': '%s (%s)' % (p.title, p.user.fullname), 'description': p.description, 'user_id': p.user_id, 'vote_count': p.vote_count})\n return {'rooms': rooms, 'timeslots': timeslots, 'event': event, 'eid': event._id, 'participants': participants, 'proposals': proposals, 'sessions': sessions}\n\n def post(self, slug=None, eid=None):\n \"\"\"store room and timetable data\"\"\"\n event = self.barcamp.get_event(eid)\n data = json.loads(self.request.data)\n event.timetable = {'rooms': data.get('rooms', []), 'timeslots': data.get('timeslots', []), 'sessions': data.get('sessions', {})}\n self.barcamp.events[eid] = event\n self.barcamp.save()\n return {'status': 'ok'}\n", "source": "the_stack_v2_python_sparse", "source_path": "camper/barcamps/sessionboard.py", "source_repo": "comlounge/camper", "split": "val", "star_events_count": 14}
{"blob_id": "c791596abbd38c58500e87328052720b387ac850", "bodies": ["self.inventory = []\nself.name = 'Mysterious denizen'\nLog.info('new player created')", "result = self.get(thing_name) is not None\nLog.info('thing: %s, result: %s' % (thing_name, result))\nreturn result", "Log.info('thing: %s' % thing_name)\nfor thing in self.inventory:\n if thing.name == thing_name:\n return thing\nreturn None", "Log.info('thing: %s' % thing_name)\nfor thing in self.inventory:\n if thing.name == thing_name:\n self.inventory.remove(thing)\n return", "Log.info('action: %s' % action)\nfor item in self.inventory:\n if item.can_be_source_of(action):\n return item\nreturn None"], "bodies_text": "<|body_start_0|>\n self.inventory = []\n self.name = 'Mysterious denizen'\n Log.info('new player created')\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.get(thing_name) is not None\n Log.info('thing: %s, result: %s' % (thing_name, result))\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n Log.info('thing: %s' % thing_name)\n for thing in self.inventory:\n if thing.name == thing_name:\n return thing\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n Log.info('thing: %s' % thing_name)\n for thing in self.inventory:\n if thing.name == thing_name:\n self.inventory.remove(thing)\n return\n<|end_body_3|>\n\n<|body_start_4|>\n Log.info('action: %s' % action)\n for item in self.inventory:\n if item.can_be_source_of(action):\n return item\n return None\n<|end_body_4|>\n", "class_docstring": "models a player (user) in the game environment", "class_name": "Player", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Player:\n \"\"\"models a player (user) in the game environment\"\"\"\n\n def __init__(self):\n \"\"\"creates a new player\"\"\"\n <|body_0|>\n\n def has(self, thing_name):\n \"\"\"checks if this Player has an item with a given name in their inventory :param thing_name: the name to check :return: True if the item is present in the Player's inventory\"\"\"\n <|body_1|>\n\n def get(self, thing_name):\n \"\"\"gets a given item from a Player's inventory :param thing_name: the item's name :return: the item from the Player's inventory or None if it was not found\"\"\"\n <|body_2|>\n\n def remove_from_inventory(self, thing_name):\n \"\"\"removes the first item with a given name from the Player's inventory :param thing_name: the name of the item to remove :return: None\"\"\"\n <|body_3|>\n\n def item_that_can(self, action):\n \"\"\"finds the first item in a Player's inventory that can perform a given action :param action: the action to perform :return: the first item found that can perform the given action or None if not found\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.inventory = []\n self.name = 'Mysterious denizen'\n Log.info('new player created')\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.get(thing_name) is not None\n Log.info('thing: %s, result: %s' % (thing_name, result))\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n Log.info('thing: %s' % thing_name)\n for thing in self.inventory:\n if thing.name == thing_name:\n return thing\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n Log.info('thing: %s' % thing_name)\n for thing in self.inventory:\n if thing.name == thing_name:\n self.inventory.remove(thing)\n return\n<|end_body_3|>\n\n<|body_start_4|>\n Log.info('action: %s' % action)\n for item in self.inventory:\n if item.can_be_source_of(action):\n return item\n return None\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000439", "length_bytes": 39311, "license_type": "no_license", "methods": [{"docstring": "creates a new player", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "checks if this Player has an item with a given name in their inventory :param thing_name: the name to check :return: True if the item is present in the Player's inventory", "name": "has", "signature": "def has(self, thing_name)"}, {"docstring": "gets a given item from a Player's inventory :param thing_name: the item's name :return: the item from the Player's inventory or None if it was not found", "name": "get", "signature": "def get(self, thing_name)"}, {"docstring": "removes the first item with a given name from the Player's inventory :param thing_name: the name of the item to remove :return: None", "name": "remove_from_inventory", "signature": "def remove_from_inventory(self, thing_name)"}, {"docstring": "finds the first item in a Player's inventory that can perform a given action :param action: the action to perform :return: the first item found that can perform the given action or None if not found", "name": "item_that_can", "signature": "def item_that_can(self, action)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_035774", "prompt": "Implement the Python class `Player` described below.\n\nClass description:\nmodels a player (user) in the game environment\n\nMethod signatures and docstrings:\n- def __init__(self): creates a new player\n- def has(self, thing_name): checks if this Player has an item with a given name in their inventory :param thing_name: the name to check :return: True if the item is present in the Player's inventory\n- def get(self, thing_name): gets a given item from a Player's inventory :param thing_name: the item's name :return: the item from the Player's inventory or None if it was not found\n- def remove_from_inventory(self, thing_name): removes the first item with a given name from the Player's inventory :param thing_name: the name of the item to remove :return: None\n- def item_that_can(self, action): finds the first item in a Player's inventory that can perform a given action :param action: the action to perform :return: the first item found that can perform the given action or None if not found", "prompted_full_text": "Implement the Python class `Player` described below.\n\nClass description:\nmodels a player (user) in the game environment\n\nMethod signatures and docstrings:\n- def __init__(self): creates a new player\n- def has(self, thing_name): checks if this Player has an item with a given name in their inventory :param thing_name: the name to check :return: True if the item is present in the Player's inventory\n- def get(self, thing_name): gets a given item from a Player's inventory :param thing_name: the item's name :return: the item from the Player's inventory or None if it was not found\n- def remove_from_inventory(self, thing_name): removes the first item with a given name from the Player's inventory :param thing_name: the name of the item to remove :return: None\n- def item_that_can(self, action): finds the first item in a Player's inventory that can perform a given action :param action: the action to perform :return: the first item found that can perform the given action or None if not found\n\n<|skeleton|>\nclass Player:\n \"\"\"models a player (user) in the game environment\"\"\"\n\n def __init__(self):\n \"\"\"creates a new player\"\"\"\n <|body_0|>\n\n def has(self, thing_name):\n \"\"\"checks if this Player has an item with a given name in their inventory :param thing_name: the name to check :return: True if the item is present in the Player's inventory\"\"\"\n <|body_1|>\n\n def get(self, thing_name):\n \"\"\"gets a given item from a Player's inventory :param thing_name: the item's name :return: the item from the Player's inventory or None if it was not found\"\"\"\n <|body_2|>\n\n def remove_from_inventory(self, thing_name):\n \"\"\"removes the first item with a given name from the Player's inventory :param thing_name: the name of the item to remove :return: None\"\"\"\n <|body_3|>\n\n def item_that_can(self, action):\n \"\"\"finds the first item in a Player's inventory that can perform a given action :param action: the action to perform :return: the first item found that can perform the given action or None if not found\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.inventory = []\n self.name = 'Mysterious denizen'\n Log.info('new player created')\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.get(thing_name) is not None\n Log.info('thing: %s, result: %s' % (thing_name, result))\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n Log.info('thing: %s' % thing_name)\n for thing in self.inventory:\n if thing.name == thing_name:\n return thing\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n Log.info('thing: %s' % thing_name)\n for thing in self.inventory:\n if thing.name == thing_name:\n self.inventory.remove(thing)\n return\n<|end_body_3|>\n\n<|body_start_4|>\n Log.info('action: %s' % action)\n for item in self.inventory:\n if item.can_be_source_of(action):\n return item\n return None\n<|end_body_4|>\n", "revision_id": "ad9ac095c335fedba5ae294331e1846c036d560a", "skeleton": "<|skeleton|>\nclass Player:\n \"\"\"models a player (user) in the game environment\"\"\"\n\n def __init__(self):\n \"\"\"creates a new player\"\"\"\n <|body_0|>\n\n def has(self, thing_name):\n \"\"\"checks if this Player has an item with a given name in their inventory :param thing_name: the name to check :return: True if the item is present in the Player's inventory\"\"\"\n <|body_1|>\n\n def get(self, thing_name):\n \"\"\"gets a given item from a Player's inventory :param thing_name: the item's name :return: the item from the Player's inventory or None if it was not found\"\"\"\n <|body_2|>\n\n def remove_from_inventory(self, thing_name):\n \"\"\"removes the first item with a given name from the Player's inventory :param thing_name: the name of the item to remove :return: None\"\"\"\n <|body_3|>\n\n def item_that_can(self, action):\n \"\"\"finds the first item in a Player's inventory that can perform a given action :param action: the action to perform :return: the first item found that can perform the given action or None if not found\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Player:\n \"\"\"models a player (user) in the game environment\"\"\"\n\n def __init__(self):\n \"\"\"creates a new player\"\"\"\n self.inventory = []\n self.name = 'Mysterious denizen'\n Log.info('new player created')\n\n def has(self, thing_name):\n \"\"\"checks if this Player has an item with a given name in their inventory :param thing_name: the name to check :return: True if the item is present in the Player's inventory\"\"\"\n result = self.get(thing_name) is not None\n Log.info('thing: %s, result: %s' % (thing_name, result))\n return result\n\n def get(self, thing_name):\n \"\"\"gets a given item from a Player's inventory :param thing_name: the item's name :return: the item from the Player's inventory or None if it was not found\"\"\"\n Log.info('thing: %s' % thing_name)\n for thing in self.inventory:\n if thing.name == thing_name:\n return thing\n return None\n\n def remove_from_inventory(self, thing_name):\n \"\"\"removes the first item with a given name from the Player's inventory :param thing_name: the name of the item to remove :return: None\"\"\"\n Log.info('thing: %s' % thing_name)\n for thing in self.inventory:\n if thing.name == thing_name:\n self.inventory.remove(thing)\n return\n\n def item_that_can(self, action):\n \"\"\"finds the first item in a Player's inventory that can perform a given action :param action: the action to perform :return: the first item found that can perform the given action or None if not found\"\"\"\n Log.info('action: %s' % action)\n for item in self.inventory:\n if item.can_be_source_of(action):\n return item\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "lab/Lab12.py", "source_repo": "csumb-serious-business/18-FallB-205-Lab11", "split": "val", "star_events_count": 0}
{"blob_id": "43fe3211ab8df19c10531980f8434b516fec1a4e", "bodies": ["super().__init__()\nself.n_heads = n_heads\nself.down_kv = down_kv\nw_norm = w_norm_dispatch(w_norm)\nself.q_proj = w_norm(nn.Conv1d(C_in_q, C_qk, 1))\nself.k_proj = w_norm(nn.Conv1d(C_in_kv, C_qk, 1))\nself.v_proj = w_norm(nn.Conv1d(C_in_kv, C_v, 1))\nself.out = w_norm(nn.Conv2d(C_v, C_v, 1))\nif scale:\n self.scale = 1.0 / C_qk ** 0.5\nif rel_pos_size:\n C_h_qk = C_qk // n_heads\n self.rel_pos = RelativePositionalEmbedding2d(C_h_qk, rel_pos_size, rel_pos_size, down_kv=down_kv)", "B, C, H, W = x.shape\nflat_x = x.flatten(start_dim=2)\nif not self.down_kv:\n flat_y = y.flatten(start_dim=2)\nelse:\n y_down = F.avg_pool2d(y, 2)\n flat_y = y_down.flatten(2)\nquery = self.q_proj(flat_x)\nkey = self.k_proj(flat_y)\nvalue = self.v_proj(flat_y)\nquery = split_dim(query, 1, self.n_heads)\nkey = split_dim(key, 1, self.n_heads)\nvalue = split_dim(value, 1, self.n_heads)\nattn_score = torch.einsum('bhcq,bhck->bhqk', query, key)\nif hasattr(self, 'rel_pos'):\n attn_score += self.rel_pos(query)\nif hasattr(self, 'scale'):\n attn_score *= self.scale\nattn_w = F.softmax(attn_score, dim=-1)\nattn_out = torch.einsum('bhqk,bhck->bhcq', attn_w, value).reshape(B, C, H, W)\nout = self.out(attn_out)\nreturn out"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.n_heads = n_heads\n self.down_kv = down_kv\n w_norm = w_norm_dispatch(w_norm)\n self.q_proj = w_norm(nn.Conv1d(C_in_q, C_qk, 1))\n self.k_proj = w_norm(nn.Conv1d(C_in_kv, C_qk, 1))\n self.v_proj = w_norm(nn.Conv1d(C_in_kv, C_v, 1))\n self.out = w_norm(nn.Conv2d(C_v, C_v, 1))\n if scale:\n self.scale = 1.0 / C_qk ** 0.5\n if rel_pos_size:\n C_h_qk = C_qk // n_heads\n self.rel_pos = RelativePositionalEmbedding2d(C_h_qk, rel_pos_size, rel_pos_size, down_kv=down_kv)\n<|end_body_0|>\n\n<|body_start_1|>\n B, C, H, W = x.shape\n flat_x = x.flatten(start_dim=2)\n if not self.down_kv:\n flat_y = y.flatten(start_dim=2)\n else:\n y_down = F.avg_pool2d(y, 2)\n flat_y = y_down.flatten(2)\n query = self.q_proj(flat_x)\n key = self.k_proj(flat_y)\n value = self.v_proj(flat_y)\n query = split_dim(query, 1, self.n_heads)\n key = split_dim(key, 1, self.n_heads)\n value = split_dim(value, 1, self.n_heads)\n attn_score = torch.einsum('bhcq,bhck->bhqk', query, key)\n if hasattr(self, 'rel_pos'):\n attn_score += self.rel_pos(query)\n if hasattr(self, 'scale'):\n attn_score *= self.scale\n attn_w = F.softmax(attn_score, dim=-1)\n attn_out = torch.einsum('bhqk,bhck->bhcq', attn_w, value).reshape(B, C, H, W)\n out = self.out(attn_out)\n return out\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Attention", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Attention:\n\n def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale=False, n_heads=1, down_kv=False, rel_pos_size=None):\n \"\"\"Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding.\"\"\"\n <|body_0|>\n\n def forward(self, x, y):\n \"\"\"Attend from x (decoder) to y (encoder) Args: x: decoder feature y: encoder feature\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.n_heads = n_heads\n self.down_kv = down_kv\n w_norm = w_norm_dispatch(w_norm)\n self.q_proj = w_norm(nn.Conv1d(C_in_q, C_qk, 1))\n self.k_proj = w_norm(nn.Conv1d(C_in_kv, C_qk, 1))\n self.v_proj = w_norm(nn.Conv1d(C_in_kv, C_v, 1))\n self.out = w_norm(nn.Conv2d(C_v, C_v, 1))\n if scale:\n self.scale = 1.0 / C_qk ** 0.5\n if rel_pos_size:\n C_h_qk = C_qk // n_heads\n self.rel_pos = RelativePositionalEmbedding2d(C_h_qk, rel_pos_size, rel_pos_size, down_kv=down_kv)\n<|end_body_0|>\n\n<|body_start_1|>\n B, C, H, W = x.shape\n flat_x = x.flatten(start_dim=2)\n if not self.down_kv:\n flat_y = y.flatten(start_dim=2)\n else:\n y_down = F.avg_pool2d(y, 2)\n flat_y = y_down.flatten(2)\n query = self.q_proj(flat_x)\n key = self.k_proj(flat_y)\n value = self.v_proj(flat_y)\n query = split_dim(query, 1, self.n_heads)\n key = split_dim(key, 1, self.n_heads)\n value = split_dim(value, 1, self.n_heads)\n attn_score = torch.einsum('bhcq,bhck->bhqk', query, key)\n if hasattr(self, 'rel_pos'):\n attn_score += self.rel_pos(query)\n if hasattr(self, 'scale'):\n attn_score *= self.scale\n attn_w = F.softmax(attn_score, dim=-1)\n attn_out = torch.einsum('bhqk,bhck->bhcq', attn_w, value).reshape(B, C, H, W)\n out = self.out(attn_out)\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000440", "length_bytes": 37491, "license_type": "no_license", "methods": [{"docstring": "Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding.", "name": "__init__", "signature": "def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale=False, n_heads=1, down_kv=False, rel_pos_size=None)"}, {"docstring": "Attend from x (decoder) to y (encoder) Args: x: decoder feature y: encoder feature", "name": "forward", "signature": "def forward(self, x, y)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028760", "prompt": "Implement the Python class `Attention` described below.\n\nClass description:\nImplement the Attention class.\n\nMethod signatures and docstrings:\n- def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale=False, n_heads=1, down_kv=False, rel_pos_size=None): Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding.\n- def forward(self, x, y): Attend from x (decoder) to y (encoder) Args: x: decoder feature y: encoder feature", "prompted_full_text": "Implement the Python class `Attention` described below.\n\nClass description:\nImplement the Attention class.\n\nMethod signatures and docstrings:\n- def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale=False, n_heads=1, down_kv=False, rel_pos_size=None): Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding.\n- def forward(self, x, y): Attend from x (decoder) to y (encoder) Args: x: decoder feature y: encoder feature\n\n<|skeleton|>\nclass Attention:\n\n def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale=False, n_heads=1, down_kv=False, rel_pos_size=None):\n \"\"\"Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding.\"\"\"\n <|body_0|>\n\n def forward(self, x, y):\n \"\"\"Attend from x (decoder) to y (encoder) Args: x: decoder feature y: encoder feature\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.n_heads = n_heads\n self.down_kv = down_kv\n w_norm = w_norm_dispatch(w_norm)\n self.q_proj = w_norm(nn.Conv1d(C_in_q, C_qk, 1))\n self.k_proj = w_norm(nn.Conv1d(C_in_kv, C_qk, 1))\n self.v_proj = w_norm(nn.Conv1d(C_in_kv, C_v, 1))\n self.out = w_norm(nn.Conv2d(C_v, C_v, 1))\n if scale:\n self.scale = 1.0 / C_qk ** 0.5\n if rel_pos_size:\n C_h_qk = C_qk // n_heads\n self.rel_pos = RelativePositionalEmbedding2d(C_h_qk, rel_pos_size, rel_pos_size, down_kv=down_kv)\n<|end_body_0|>\n\n<|body_start_1|>\n B, C, H, W = x.shape\n flat_x = x.flatten(start_dim=2)\n if not self.down_kv:\n flat_y = y.flatten(start_dim=2)\n else:\n y_down = F.avg_pool2d(y, 2)\n flat_y = y_down.flatten(2)\n query = self.q_proj(flat_x)\n key = self.k_proj(flat_y)\n value = self.v_proj(flat_y)\n query = split_dim(query, 1, self.n_heads)\n key = split_dim(key, 1, self.n_heads)\n value = split_dim(value, 1, self.n_heads)\n attn_score = torch.einsum('bhcq,bhck->bhqk', query, key)\n if hasattr(self, 'rel_pos'):\n attn_score += self.rel_pos(query)\n if hasattr(self, 'scale'):\n attn_score *= self.scale\n attn_w = F.softmax(attn_score, dim=-1)\n attn_out = torch.einsum('bhqk,bhck->bhcq', attn_w, value).reshape(B, C, H, W)\n out = self.out(attn_out)\n return out\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass Attention:\n\n def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale=False, n_heads=1, down_kv=False, rel_pos_size=None):\n \"\"\"Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding.\"\"\"\n <|body_0|>\n\n def forward(self, x, y):\n \"\"\"Attend from x (decoder) to y (encoder) Args: x: decoder feature y: encoder feature\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Attention:\n def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale=False, n_heads=1, down_kv=False, rel_pos_size=None):\n \"\"\"Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding.\"\"\"\n super().__init__()\n self.n_heads = n_heads\n self.down_kv = down_kv\n w_norm = w_norm_dispatch(w_norm)\n self.q_proj = w_norm(nn.Conv1d(C_in_q, C_qk, 1))\n self.k_proj = w_norm(nn.Conv1d(C_in_kv, C_qk, 1))\n self.v_proj = w_norm(nn.Conv1d(C_in_kv, C_v, 1))\n self.out = w_norm(nn.Conv2d(C_v, C_v, 1))\n if scale:\n self.scale = 1.0 / C_qk ** 0.5\n if rel_pos_size:\n C_h_qk = C_qk // n_heads\n self.rel_pos = RelativePositionalEmbedding2d(C_h_qk, rel_pos_size, rel_pos_size, down_kv=down_kv)\n\n def forward(self, x, y):\n \"\"\"Attend from x (decoder) to y (encoder) Args: x: decoder feature y: encoder feature\"\"\"\n B, C, H, W = x.shape\n flat_x = x.flatten(start_dim=2)\n if not self.down_kv:\n flat_y = y.flatten(start_dim=2)\n else:\n y_down = F.avg_pool2d(y, 2)\n flat_y = y_down.flatten(2)\n query = self.q_proj(flat_x)\n key = self.k_proj(flat_y)\n value = self.v_proj(flat_y)\n query = split_dim(query, 1, self.n_heads)\n key = split_dim(key, 1, self.n_heads)\n value = split_dim(value, 1, self.n_heads)\n attn_score = torch.einsum('bhcq,bhck->bhqk', query, key)\n if hasattr(self, 'rel_pos'):\n attn_score += self.rel_pos(query)\n if hasattr(self, 'scale'):\n attn_score *= self.scale\n attn_w = F.softmax(attn_score, dim=-1)\n attn_out = torch.einsum('bhqk,bhck->bhcq', attn_w, value).reshape(B, C, H, W)\n out = self.out(attn_out)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_clovaai_dmfont.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35}
{"blob_id": "d5a85c5d6a922538b80f0018b250c4bf2267f7d5", "bodies": ["counts = [0] * len(nums)\n\ndef mergesort(pairs):\n if len(pairs) < 2:\n return pairs\n mid = len(pairs) // 2\n left = mergesort(pairs[:mid])\n right = mergesort(pairs[mid:])\n i = j = 0\n results = []\n while i < len(left) or j < len(right):\n if i >= len(left) or (j < len(right) and right[j][1] < left[i][1]):\n results.append(right[j])\n j += 1\n else:\n results.append(left[i])\n counts[left[i][0]] += j\n i += 1\n return results\nmergesort(list(enumerate(nums)))\nreturn counts", "def mergeSort(enum):\n half = len(enum) / 2\n if half:\n left, right = (mergeSort(enum[:half]), mergeSort(enum[half:]))\n for i in range(len(enum))[::-1]:\n if not right or (left and left[-1][1] > right[-1][1]):\n smaller[left[-1][0]] += len(right)\n enum[i] = left.pop()\n else:\n enum[i] = right.pop()\n return enum\nsmaller = [0] * len(nums)\nmergeSort(list(enumerate(nums)))\nreturn smaller"], "bodies_text": "<|body_start_0|>\n counts = [0] * len(nums)\n\n def mergesort(pairs):\n if len(pairs) < 2:\n return pairs\n mid = len(pairs) // 2\n left = mergesort(pairs[:mid])\n right = mergesort(pairs[mid:])\n i = j = 0\n results = []\n while i < len(left) or j < len(right):\n if i >= len(left) or (j < len(right) and right[j][1] < left[i][1]):\n results.append(right[j])\n j += 1\n else:\n results.append(left[i])\n counts[left[i][0]] += j\n i += 1\n return results\n mergesort(list(enumerate(nums)))\n return counts\n<|end_body_0|>\n\n<|body_start_1|>\n def mergeSort(enum):\n half = len(enum) / 2\n if half:\n left, right = (mergeSort(enum[:half]), mergeSort(enum[half:]))\n for i in range(len(enum))[::-1]:\n if not right or (left and left[-1][1] > right[-1][1]):\n smaller[left[-1][0]] += len(right)\n enum[i] = left.pop()\n else:\n enum[i] = right.pop()\n return enum\n smaller = [0] * len(nums)\n mergeSort(list(enumerate(nums)))\n return smaller\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def countSmaller_1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def countSmaller_2(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n counts = [0] * len(nums)\n\n def mergesort(pairs):\n if len(pairs) < 2:\n return pairs\n mid = len(pairs) // 2\n left = mergesort(pairs[:mid])\n right = mergesort(pairs[mid:])\n i = j = 0\n results = []\n while i < len(left) or j < len(right):\n if i >= len(left) or (j < len(right) and right[j][1] < left[i][1]):\n results.append(right[j])\n j += 1\n else:\n results.append(left[i])\n counts[left[i][0]] += j\n i += 1\n return results\n mergesort(list(enumerate(nums)))\n return counts\n<|end_body_0|>\n\n<|body_start_1|>\n def mergeSort(enum):\n half = len(enum) / 2\n if half:\n left, right = (mergeSort(enum[:half]), mergeSort(enum[half:]))\n for i in range(len(enum))[::-1]:\n if not right or (left and left[-1][1] > right[-1][1]):\n smaller[left[-1][0]] += len(right)\n enum[i] = left.pop()\n else:\n enum[i] = right.pop()\n return enum\n smaller = [0] * len(nums)\n mergeSort(list(enumerate(nums)))\n return smaller\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000441", "length_bytes": 3601, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: List[int]", "name": "countSmaller_1", "signature": "def countSmaller_1(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[int]", "name": "countSmaller_2", "signature": "def countSmaller_2(self, nums)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countSmaller_1(self, nums): :type nums: List[int] :rtype: List[int]\n- def countSmaller_2(self, nums): :type nums: List[int] :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countSmaller_1(self, nums): :type nums: List[int] :rtype: List[int]\n- def countSmaller_2(self, nums): :type nums: List[int] :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def countSmaller_1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def countSmaller_2(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n counts = [0] * len(nums)\n\n def mergesort(pairs):\n if len(pairs) < 2:\n return pairs\n mid = len(pairs) // 2\n left = mergesort(pairs[:mid])\n right = mergesort(pairs[mid:])\n i = j = 0\n results = []\n while i < len(left) or j < len(right):\n if i >= len(left) or (j < len(right) and right[j][1] < left[i][1]):\n results.append(right[j])\n j += 1\n else:\n results.append(left[i])\n counts[left[i][0]] += j\n i += 1\n return results\n mergesort(list(enumerate(nums)))\n return counts\n<|end_body_0|>\n\n<|body_start_1|>\n def mergeSort(enum):\n half = len(enum) / 2\n if half:\n left, right = (mergeSort(enum[:half]), mergeSort(enum[half:]))\n for i in range(len(enum))[::-1]:\n if not right or (left and left[-1][1] > right[-1][1]):\n smaller[left[-1][0]] += len(right)\n enum[i] = left.pop()\n else:\n enum[i] = right.pop()\n return enum\n smaller = [0] * len(nums)\n mergeSort(list(enumerate(nums)))\n return smaller\n<|end_body_1|>\n", "revision_id": "bf98c8fa31043a45b3d21cfe78d4e08f9cac9de6", "skeleton": "<|skeleton|>\nclass Solution:\n\n def countSmaller_1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def countSmaller_2(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def countSmaller_1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n counts = [0] * len(nums)\n\n def mergesort(pairs):\n if len(pairs) < 2:\n return pairs\n mid = len(pairs) // 2\n left = mergesort(pairs[:mid])\n right = mergesort(pairs[mid:])\n i = j = 0\n results = []\n while i < len(left) or j < len(right):\n if i >= len(left) or (j < len(right) and right[j][1] < left[i][1]):\n results.append(right[j])\n j += 1\n else:\n results.append(left[i])\n counts[left[i][0]] += j\n i += 1\n return results\n mergesort(list(enumerate(nums)))\n return counts\n\n def countSmaller_2(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n def mergeSort(enum):\n half = len(enum) / 2\n if half:\n left, right = (mergeSort(enum[:half]), mergeSort(enum[half:]))\n for i in range(len(enum))[::-1]:\n if not right or (left and left[-1][1] > right[-1][1]):\n smaller[left[-1][0]] += len(right)\n enum[i] = left.pop()\n else:\n enum[i] = right.pop()\n return enum\n smaller = [0] * len(nums)\n mergeSort(list(enumerate(nums)))\n return smaller\n", "source": "the_stack_v2_python_sparse", "source_path": "binary_search_tree/315_count_of_smaller_numbers_after_self.py", "source_repo": "mistrydarshan99/Leetcode-3", "split": "val", "star_events_count": 0}
{"blob_id": "809dbdaf35c9f1d9c579f7c054c3957ee204aa1e", "bodies": ["output = [0] * len(nums)\nfor i in range(len(nums)):\n j = 1\n news = nums[:i] + nums[i + 1:]\n for k in news:\n j *= k\n output[i] = j\nreturn output", "length = len(nums)\nL, R, answer = ([0] * length, [0] * length, [0] * length)\nL[0] = 1\nfor i in range(1, length):\n L[i] = nums[i - 1] * L[i - 1]\nR[length - 1] = 1\nfor i in reversed(range(length - 1)):\n R[i] = nums[i + 1] * R[i + 1]\nfor i in range(length):\n answer[i] = L[i] * R[i]\nreturn answer", "length = len(nums)\nanswer = [0] * length\nanswer[0] = 1\nfor i in range(1, length):\n answer[i] = nums[i - 1] * answer[i - 1]\nR = 1\nfor i in reversed(range(length)):\n answer[i] = answer[i] * R\n R *= nums[i]\nreturn answer", "left = 1\nright = 1\nresult = [1] * len(nums)\nfor i in range(len(nums)):\n result[i] *= left\n left *= nums[i]\nfor i in range(len(nums) - 1, -1, -1):\n result[i] *= right\n right *= nums[i]\nreturn result"], "bodies_text": "<|body_start_0|>\n output = [0] * len(nums)\n for i in range(len(nums)):\n j = 1\n news = nums[:i] + nums[i + 1:]\n for k in news:\n j *= k\n output[i] = j\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(nums)\n L, R, answer = ([0] * length, [0] * length, [0] * length)\n L[0] = 1\n for i in range(1, length):\n L[i] = nums[i - 1] * L[i - 1]\n R[length - 1] = 1\n for i in reversed(range(length - 1)):\n R[i] = nums[i + 1] * R[i + 1]\n for i in range(length):\n answer[i] = L[i] * R[i]\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n length = len(nums)\n answer = [0] * length\n answer[0] = 1\n for i in range(1, length):\n answer[i] = nums[i - 1] * answer[i - 1]\n R = 1\n for i in reversed(range(length)):\n answer[i] = answer[i] * R\n R *= nums[i]\n return answer\n<|end_body_2|>\n\n<|body_start_3|>\n left = 1\n right = 1\n result = [1] * len(nums)\n for i in range(len(nums)):\n result[i] *= left\n left *= nums[i]\n for i in range(len(nums) - 1, -1, -1):\n result[i] *= right\n right *= nums[i]\n return result\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def productExceptSelf_0(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def productExceptSelf_1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n def productExceptSelf_2(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_2|>\n\n def productExceptSelf(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n output = [0] * len(nums)\n for i in range(len(nums)):\n j = 1\n news = nums[:i] + nums[i + 1:]\n for k in news:\n j *= k\n output[i] = j\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(nums)\n L, R, answer = ([0] * length, [0] * length, [0] * length)\n L[0] = 1\n for i in range(1, length):\n L[i] = nums[i - 1] * L[i - 1]\n R[length - 1] = 1\n for i in reversed(range(length - 1)):\n R[i] = nums[i + 1] * R[i + 1]\n for i in range(length):\n answer[i] = L[i] * R[i]\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n length = len(nums)\n answer = [0] * length\n answer[0] = 1\n for i in range(1, length):\n answer[i] = nums[i - 1] * answer[i - 1]\n R = 1\n for i in reversed(range(length)):\n answer[i] = answer[i] * R\n R *= nums[i]\n return answer\n<|end_body_2|>\n\n<|body_start_3|>\n left = 1\n right = 1\n result = [1] * len(nums)\n for i in range(len(nums)):\n result[i] *= left\n left *= nums[i]\n for i in range(len(nums) - 1, -1, -1):\n result[i] *= right\n right *= nums[i]\n return result\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000442", "length_bytes": 3634, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: List[int]", "name": "productExceptSelf_0", "signature": "def productExceptSelf_0(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[int]", "name": "productExceptSelf_1", "signature": "def productExceptSelf_1(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[int]", "name": "productExceptSelf_2", "signature": "def productExceptSelf_2(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[int]", "name": "productExceptSelf", "signature": "def productExceptSelf(self, nums)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_028906", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def productExceptSelf_0(self, nums): :type nums: List[int] :rtype: List[int]\n- def productExceptSelf_1(self, nums): :type nums: List[int] :rtype: List[int]\n- def productExceptSelf_2(self, nums): :type nums: List[int] :rtype: List[int]\n- def productExceptSelf(self, nums): :type nums: List[int] :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def productExceptSelf_0(self, nums): :type nums: List[int] :rtype: List[int]\n- def productExceptSelf_1(self, nums): :type nums: List[int] :rtype: List[int]\n- def productExceptSelf_2(self, nums): :type nums: List[int] :rtype: List[int]\n- def productExceptSelf(self, nums): :type nums: List[int] :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def productExceptSelf_0(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def productExceptSelf_1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n def productExceptSelf_2(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_2|>\n\n def productExceptSelf(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n output = [0] * len(nums)\n for i in range(len(nums)):\n j = 1\n news = nums[:i] + nums[i + 1:]\n for k in news:\n j *= k\n output[i] = j\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(nums)\n L, R, answer = ([0] * length, [0] * length, [0] * length)\n L[0] = 1\n for i in range(1, length):\n L[i] = nums[i - 1] * L[i - 1]\n R[length - 1] = 1\n for i in reversed(range(length - 1)):\n R[i] = nums[i + 1] * R[i + 1]\n for i in range(length):\n answer[i] = L[i] * R[i]\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n length = len(nums)\n answer = [0] * length\n answer[0] = 1\n for i in range(1, length):\n answer[i] = nums[i - 1] * answer[i - 1]\n R = 1\n for i in reversed(range(length)):\n answer[i] = answer[i] * R\n R *= nums[i]\n return answer\n<|end_body_2|>\n\n<|body_start_3|>\n left = 1\n right = 1\n result = [1] * len(nums)\n for i in range(len(nums)):\n result[i] *= left\n left *= nums[i]\n for i in range(len(nums) - 1, -1, -1):\n result[i] *= right\n right *= nums[i]\n return result\n<|end_body_3|>\n", "revision_id": "3f7b2ea959308eb80f4c65be35aaeed666570f80", "skeleton": "<|skeleton|>\nclass Solution:\n\n def productExceptSelf_0(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def productExceptSelf_1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n def productExceptSelf_2(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_2|>\n\n def productExceptSelf(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def productExceptSelf_0(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n output = [0] * len(nums)\n for i in range(len(nums)):\n j = 1\n news = nums[:i] + nums[i + 1:]\n for k in news:\n j *= k\n output[i] = j\n return output\n\n def productExceptSelf_1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n length = len(nums)\n L, R, answer = ([0] * length, [0] * length, [0] * length)\n L[0] = 1\n for i in range(1, length):\n L[i] = nums[i - 1] * L[i - 1]\n R[length - 1] = 1\n for i in reversed(range(length - 1)):\n R[i] = nums[i + 1] * R[i + 1]\n for i in range(length):\n answer[i] = L[i] * R[i]\n return answer\n\n def productExceptSelf_2(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n length = len(nums)\n answer = [0] * length\n answer[0] = 1\n for i in range(1, length):\n answer[i] = nums[i - 1] * answer[i - 1]\n R = 1\n for i in reversed(range(length)):\n answer[i] = answer[i] * R\n R *= nums[i]\n return answer\n\n def productExceptSelf(self, nums):\n \"\"\":type nums: List[int] :rtype: List[int]\"\"\"\n left = 1\n right = 1\n result = [1] * len(nums)\n for i in range(len(nums)):\n result[i] *= left\n left *= nums[i]\n for i in range(len(nums) - 1, -1, -1):\n result[i] *= right\n right *= nums[i]\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "238. 除自身以外数组的乘积.py", "source_repo": "dxc19951001/Everyday_LeetCode", "split": "val", "star_events_count": 1}
{"blob_id": "ccf4e66485ac0bfb74c1573c0f5409d687eb1c2b", "bodies": ["obstacles = []\nobstacles.append(Obstacle(Position(400, 400)))\nobstacles.append(Obstacle(Position(690, 380)))\nobstacles.append(Obstacle(Position(490, 500)))\nobstacles.append(Obstacle(Position(300, 200)))\nreturn obstacles", "enemies = []\nenemies.append(CreationFactory().create_dummy_enemy(Position(420, 400)))\nenemies.append(CreationFactory().create_dummy_enemy(Position(700, 380)))\nenemies.append(CreationFactory().create_dummy_enemy(Position(320, 750)))\nenemies.append(CreationFactory().create_dummy_enemy(Position(560, 150)))\nenemies.append(CreationFactory().create_dummy_enemy(Position(750, 220)))\nenemies.append(CreationFactory().create_dummy_enemy(Position(750, 700)))\nenemies.append(CreationFactory().create_dummy_enemy(Position(500, 500)))\nenemies.append(CreationFactory().create_dummy_enemy(Position(820, 300)))\nreturn enemies", "root_dir = Config()['root_dir']\nfile = os.path.join(root_dir, 'resources', 'sounds', 'song1.mp3')\npygame.mixer.music.load(file)\npygame.mixer.music.play()"], "bodies_text": "<|body_start_0|>\n obstacles = []\n obstacles.append(Obstacle(Position(400, 400)))\n obstacles.append(Obstacle(Position(690, 380)))\n obstacles.append(Obstacle(Position(490, 500)))\n obstacles.append(Obstacle(Position(300, 200)))\n return obstacles\n<|end_body_0|>\n\n<|body_start_1|>\n enemies = []\n enemies.append(CreationFactory().create_dummy_enemy(Position(420, 400)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(700, 380)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(320, 750)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(560, 150)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(750, 220)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(750, 700)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(500, 500)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(820, 300)))\n return enemies\n<|end_body_1|>\n\n<|body_start_2|>\n root_dir = Config()['root_dir']\n file = os.path.join(root_dir, 'resources', 'sounds', 'song1.mp3')\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n<|end_body_2|>\n", "class_docstring": "Level creator for level 1", "class_name": "Level1Creator", "detected_licenses": ["CC0-1.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Level1Creator:\n \"\"\"Level creator for level 1\"\"\"\n\n def create_obstacles(self) -> list:\n \"\"\"Create all obstacles for level 1 :return: list of obstacles\"\"\"\n <|body_0|>\n\n def create_enemies(self) -> list:\n \"\"\"Create all enemies for level 1 :return: list of enemies\"\"\"\n <|body_1|>\n\n def music(self) -> None:\n \"\"\"Linux music for level 1\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n obstacles = []\n obstacles.append(Obstacle(Position(400, 400)))\n obstacles.append(Obstacle(Position(690, 380)))\n obstacles.append(Obstacle(Position(490, 500)))\n obstacles.append(Obstacle(Position(300, 200)))\n return obstacles\n<|end_body_0|>\n\n<|body_start_1|>\n enemies = []\n enemies.append(CreationFactory().create_dummy_enemy(Position(420, 400)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(700, 380)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(320, 750)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(560, 150)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(750, 220)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(750, 700)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(500, 500)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(820, 300)))\n return enemies\n<|end_body_1|>\n\n<|body_start_2|>\n root_dir = Config()['root_dir']\n file = os.path.join(root_dir, 'resources', 'sounds', 'song1.mp3')\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000443", "length_bytes": 1919, "license_type": "permissive", "methods": [{"docstring": "Create all obstacles for level 1 :return: list of obstacles", "name": "create_obstacles", "signature": "def create_obstacles(self) -> list"}, {"docstring": "Create all enemies for level 1 :return: list of enemies", "name": "create_enemies", "signature": "def create_enemies(self) -> list"}, {"docstring": "Linux music for level 1", "name": "music", "signature": "def music(self) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `Level1Creator` described below.\n\nClass description:\nLevel creator for level 1\n\nMethod signatures and docstrings:\n- def create_obstacles(self) -> list: Create all obstacles for level 1 :return: list of obstacles\n- def create_enemies(self) -> list: Create all enemies for level 1 :return: list of enemies\n- def music(self) -> None: Linux music for level 1", "prompted_full_text": "Implement the Python class `Level1Creator` described below.\n\nClass description:\nLevel creator for level 1\n\nMethod signatures and docstrings:\n- def create_obstacles(self) -> list: Create all obstacles for level 1 :return: list of obstacles\n- def create_enemies(self) -> list: Create all enemies for level 1 :return: list of enemies\n- def music(self) -> None: Linux music for level 1\n\n<|skeleton|>\nclass Level1Creator:\n \"\"\"Level creator for level 1\"\"\"\n\n def create_obstacles(self) -> list:\n \"\"\"Create all obstacles for level 1 :return: list of obstacles\"\"\"\n <|body_0|>\n\n def create_enemies(self) -> list:\n \"\"\"Create all enemies for level 1 :return: list of enemies\"\"\"\n <|body_1|>\n\n def music(self) -> None:\n \"\"\"Linux music for level 1\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n obstacles = []\n obstacles.append(Obstacle(Position(400, 400)))\n obstacles.append(Obstacle(Position(690, 380)))\n obstacles.append(Obstacle(Position(490, 500)))\n obstacles.append(Obstacle(Position(300, 200)))\n return obstacles\n<|end_body_0|>\n\n<|body_start_1|>\n enemies = []\n enemies.append(CreationFactory().create_dummy_enemy(Position(420, 400)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(700, 380)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(320, 750)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(560, 150)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(750, 220)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(750, 700)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(500, 500)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(820, 300)))\n return enemies\n<|end_body_1|>\n\n<|body_start_2|>\n root_dir = Config()['root_dir']\n file = os.path.join(root_dir, 'resources', 'sounds', 'song1.mp3')\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n<|end_body_2|>\n", "revision_id": "62b7457948675071fe328b69ba5d85aab6b39ed1", "skeleton": "<|skeleton|>\nclass Level1Creator:\n \"\"\"Level creator for level 1\"\"\"\n\n def create_obstacles(self) -> list:\n \"\"\"Create all obstacles for level 1 :return: list of obstacles\"\"\"\n <|body_0|>\n\n def create_enemies(self) -> list:\n \"\"\"Create all enemies for level 1 :return: list of enemies\"\"\"\n <|body_1|>\n\n def music(self) -> None:\n \"\"\"Linux music for level 1\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Level1Creator:\n \"\"\"Level creator for level 1\"\"\"\n\n def create_obstacles(self) -> list:\n \"\"\"Create all obstacles for level 1 :return: list of obstacles\"\"\"\n obstacles = []\n obstacles.append(Obstacle(Position(400, 400)))\n obstacles.append(Obstacle(Position(690, 380)))\n obstacles.append(Obstacle(Position(490, 500)))\n obstacles.append(Obstacle(Position(300, 200)))\n return obstacles\n\n def create_enemies(self) -> list:\n \"\"\"Create all enemies for level 1 :return: list of enemies\"\"\"\n enemies = []\n enemies.append(CreationFactory().create_dummy_enemy(Position(420, 400)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(700, 380)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(320, 750)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(560, 150)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(750, 220)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(750, 700)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(500, 500)))\n enemies.append(CreationFactory().create_dummy_enemy(Position(820, 300)))\n return enemies\n\n def music(self) -> None:\n \"\"\"Linux music for level 1\"\"\"\n root_dir = Config()['root_dir']\n file = os.path.join(root_dir, 'resources', 'sounds', 'song1.mp3')\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n", "source": "the_stack_v2_python_sparse", "source_path": "angrytux/model/Levels/Level1Creator.py", "source_repo": "Wilson194/Angry-tux", "split": "val", "star_events_count": 0}
{"blob_id": "e8afd773a8de62d47811142a65d905381153d1c3", "bodies": ["cats = ['annuals', 'perennials', 'grasses', 'shrubs']\nif not hasattr(self, 'category'):\n self.category = 'perennials'\nif self.category not in cats:\n raise ValueError('Invalid category! Choose from: {}'.format(', '.join(cats)))\nurl = '{}/plants/search/{}'.format(self.ROOT_URL, self.category)\nyield scrapy.Request(url)", "for plant in response.css('.view-content .item-list li'):\n top = plant.css('.info-grid-title')\n if not top:\n continue\n url = top.css('a::attr(href)').extract_first()\n yield response.follow(url, callback=self.parse_plant_details)", "title = response.css('.page-title h1 .plant-name')\nfeatures = response.css('.fieldgroup.group-features .content .field')\nchars_groups = response.css('.fieldgroup.group-characteristics .content .field')\nneeds_group = response.css('.fieldgroup.group-needs .content .field')\n\ndef extract_group(container):\n vals = {}\n for field in container:\n item = ' '.join(field.css('.field-item::text').extract())\n label = field.css('.field-label-inline-first::text').extract_first()\n if item is None or label is None:\n continue\n item, label = (item.strip(), label.strip())\n item = item.replace('\\n', '').replace(' ', ' ')\n label = labelize(label)\n if label == 'light_requirement':\n item = item.split(' to ')\n if label in ['soil_fertility_requirement', 'soil_ph_category', 'bloom_time']:\n item = item.split(' ')\n if label in ['foliage_colors', 'hardiness_zones', 'uses']:\n item = [x.lower() for x in item.split(' ')]\n if isinstance(item, list):\n item = [token.strip() for token in item if token.strip()]\n vals[label] = item\n return vals\nyield {'url': response.url, 'genus': maybe_lower(title.css('.genus-species .genus em::text').extract_first()), 'species': maybe_lower(title.css('.genus-species .species em::text').extract_first()), 'variety': ' '.join(title.css('.series-variety span strong::text').extract() + title.css('.series-variety .variety::text').extract()), 'common_name': title.css('.common-name::text').extract_first(), 'features': extract_group(features), 'characteristics': extract_group(chars_groups), 'needs': extract_group(needs_group)}", "last_page = response.css('.item-list ul li.pager-item a::text').extract()\nlast_page = last_page[-1]\nlast_page = int(last_page.strip())\nreturn (0, last_page)", "pages = response.css('.paging .item-list ul.pages')\nrel_url = pages.css('li a::attr(href)').extract_first().split('?')[0:-1]\nrel_url = ''.join(rel_url)\nfirst, last = self.pagination(response)\nfor page_num in range(first, last):\n yield response.follow('{}?page={}'.format(rel_url, page_num), callback=self.parse_grid)"], "bodies_text": "<|body_start_0|>\n cats = ['annuals', 'perennials', 'grasses', 'shrubs']\n if not hasattr(self, 'category'):\n self.category = 'perennials'\n if self.category not in cats:\n raise ValueError('Invalid category! Choose from: {}'.format(', '.join(cats)))\n url = '{}/plants/search/{}'.format(self.ROOT_URL, self.category)\n yield scrapy.Request(url)\n<|end_body_0|>\n\n<|body_start_1|>\n for plant in response.css('.view-content .item-list li'):\n top = plant.css('.info-grid-title')\n if not top:\n continue\n url = top.css('a::attr(href)').extract_first()\n yield response.follow(url, callback=self.parse_plant_details)\n<|end_body_1|>\n\n<|body_start_2|>\n title = response.css('.page-title h1 .plant-name')\n features = response.css('.fieldgroup.group-features .content .field')\n chars_groups = response.css('.fieldgroup.group-characteristics .content .field')\n needs_group = response.css('.fieldgroup.group-needs .content .field')\n\n def extract_group(container):\n vals = {}\n for field in container:\n item = ' '.join(field.css('.field-item::text').extract())\n label = field.css('.field-label-inline-first::text').extract_first()\n if item is None or label is None:\n continue\n item, label = (item.strip(), label.strip())\n item = item.replace('\\n', '').replace(' ', ' ')\n label = labelize(label)\n if label == 'light_requirement':\n item = item.split(' to ')\n if label in ['soil_fertility_requirement', 'soil_ph_category', 'bloom_time']:\n item = item.split(' ')\n if label in ['foliage_colors', 'hardiness_zones', 'uses']:\n item = [x.lower() for x in item.split(' ')]\n if isinstance(item, list):\n item = [token.strip() for token in item if token.strip()]\n vals[label] = item\n return vals\n yield {'url': response.url, 'genus': maybe_lower(title.css('.genus-species .genus em::text').extract_first()), 'species': maybe_lower(title.css('.genus-species .species em::text').extract_first()), 'variety': ' '.join(title.css('.series-variety span strong::text').extract() + title.css('.series-variety .variety::text').extract()), 'common_name': title.css('.common-name::text').extract_first(), 'features': extract_group(features), 'characteristics': extract_group(chars_groups), 'needs': extract_group(needs_group)}\n<|end_body_2|>\n\n<|body_start_3|>\n last_page = response.css('.item-list ul li.pager-item a::text').extract()\n last_page = last_page[-1]\n last_page = int(last_page.strip())\n return (0, last_page)\n<|end_body_3|>\n\n<|body_start_4|>\n pages = response.css('.paging .item-list ul.pages')\n rel_url = pages.css('li a::attr(href)').extract_first().split('?')[0:-1]\n rel_url = ''.join(rel_url)\n first, last = self.pagination(response)\n for page_num in range(first, last):\n yield response.follow('{}?page={}'.format(rel_url, page_num), callback=self.parse_grid)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "ProvenWinnersPlants", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProvenWinnersPlants:\n\n def start_requests(self):\n \"\"\"Determine which category starting url to run.\"\"\"\n <|body_0|>\n\n def parse_grid(self, response):\n \"\"\"Parse cols and rows for grid of results.\"\"\"\n <|body_1|>\n\n def parse_plant_details(self, response):\n \"\"\"Parse cols and rows for grid of results.\"\"\"\n <|body_2|>\n\n def pagination(self, response):\n \"\"\"Extract the pagination range for a given category.\"\"\"\n <|body_3|>\n\n def parse(self, response):\n \"\"\"Parse the response.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cats = ['annuals', 'perennials', 'grasses', 'shrubs']\n if not hasattr(self, 'category'):\n self.category = 'perennials'\n if self.category not in cats:\n raise ValueError('Invalid category! Choose from: {}'.format(', '.join(cats)))\n url = '{}/plants/search/{}'.format(self.ROOT_URL, self.category)\n yield scrapy.Request(url)\n<|end_body_0|>\n\n<|body_start_1|>\n for plant in response.css('.view-content .item-list li'):\n top = plant.css('.info-grid-title')\n if not top:\n continue\n url = top.css('a::attr(href)').extract_first()\n yield response.follow(url, callback=self.parse_plant_details)\n<|end_body_1|>\n\n<|body_start_2|>\n title = response.css('.page-title h1 .plant-name')\n features = response.css('.fieldgroup.group-features .content .field')\n chars_groups = response.css('.fieldgroup.group-characteristics .content .field')\n needs_group = response.css('.fieldgroup.group-needs .content .field')\n\n def extract_group(container):\n vals = {}\n for field in container:\n item = ' '.join(field.css('.field-item::text').extract())\n label = field.css('.field-label-inline-first::text').extract_first()\n if item is None or label is None:\n continue\n item, label = (item.strip(), label.strip())\n item = item.replace('\\n', '').replace(' ', ' ')\n label = labelize(label)\n if label == 'light_requirement':\n item = item.split(' to ')\n if label in ['soil_fertility_requirement', 'soil_ph_category', 'bloom_time']:\n item = item.split(' ')\n if label in ['foliage_colors', 'hardiness_zones', 'uses']:\n item = [x.lower() for x in item.split(' ')]\n if isinstance(item, list):\n item = [token.strip() for token in item if token.strip()]\n vals[label] = item\n return vals\n yield {'url': response.url, 'genus': maybe_lower(title.css('.genus-species .genus em::text').extract_first()), 'species': maybe_lower(title.css('.genus-species .species em::text').extract_first()), 'variety': ' '.join(title.css('.series-variety span strong::text').extract() + title.css('.series-variety .variety::text').extract()), 'common_name': title.css('.common-name::text').extract_first(), 'features': extract_group(features), 'characteristics': extract_group(chars_groups), 'needs': extract_group(needs_group)}\n<|end_body_2|>\n\n<|body_start_3|>\n last_page = response.css('.item-list ul li.pager-item a::text').extract()\n last_page = last_page[-1]\n last_page = int(last_page.strip())\n return (0, last_page)\n<|end_body_3|>\n\n<|body_start_4|>\n pages = response.css('.paging .item-list ul.pages')\n rel_url = pages.css('li a::attr(href)').extract_first().split('?')[0:-1]\n rel_url = ''.join(rel_url)\n first, last = self.pagination(response)\n for page_num in range(first, last):\n yield response.follow('{}?page={}'.format(rel_url, page_num), callback=self.parse_grid)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000444", "length_bytes": 4466, "license_type": "permissive", "methods": [{"docstring": "Determine which category starting url to run.", "name": "start_requests", "signature": "def start_requests(self)"}, {"docstring": "Parse cols and rows for grid of results.", "name": "parse_grid", "signature": "def parse_grid(self, response)"}, {"docstring": "Parse cols and rows for grid of results.", "name": "parse_plant_details", "signature": "def parse_plant_details(self, response)"}, {"docstring": "Extract the pagination range for a given category.", "name": "pagination", "signature": "def pagination(self, response)"}, {"docstring": "Parse the response.", "name": "parse", "signature": "def parse(self, response)"}], "n_methods": 5, "prompt": "Implement the Python class `ProvenWinnersPlants` described below.\n\nClass description:\nImplement the ProvenWinnersPlants class.\n\nMethod signatures and docstrings:\n- def start_requests(self): Determine which category starting url to run.\n- def parse_grid(self, response): Parse cols and rows for grid of results.\n- def parse_plant_details(self, response): Parse cols and rows for grid of results.\n- def pagination(self, response): Extract the pagination range for a given category.\n- def parse(self, response): Parse the response.", "prompted_full_text": "Implement the Python class `ProvenWinnersPlants` described below.\n\nClass description:\nImplement the ProvenWinnersPlants class.\n\nMethod signatures and docstrings:\n- def start_requests(self): Determine which category starting url to run.\n- def parse_grid(self, response): Parse cols and rows for grid of results.\n- def parse_plant_details(self, response): Parse cols and rows for grid of results.\n- def pagination(self, response): Extract the pagination range for a given category.\n- def parse(self, response): Parse the response.\n\n<|skeleton|>\nclass ProvenWinnersPlants:\n\n def start_requests(self):\n \"\"\"Determine which category starting url to run.\"\"\"\n <|body_0|>\n\n def parse_grid(self, response):\n \"\"\"Parse cols and rows for grid of results.\"\"\"\n <|body_1|>\n\n def parse_plant_details(self, response):\n \"\"\"Parse cols and rows for grid of results.\"\"\"\n <|body_2|>\n\n def pagination(self, response):\n \"\"\"Extract the pagination range for a given category.\"\"\"\n <|body_3|>\n\n def parse(self, response):\n \"\"\"Parse the response.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cats = ['annuals', 'perennials', 'grasses', 'shrubs']\n if not hasattr(self, 'category'):\n self.category = 'perennials'\n if self.category not in cats:\n raise ValueError('Invalid category! Choose from: {}'.format(', '.join(cats)))\n url = '{}/plants/search/{}'.format(self.ROOT_URL, self.category)\n yield scrapy.Request(url)\n<|end_body_0|>\n\n<|body_start_1|>\n for plant in response.css('.view-content .item-list li'):\n top = plant.css('.info-grid-title')\n if not top:\n continue\n url = top.css('a::attr(href)').extract_first()\n yield response.follow(url, callback=self.parse_plant_details)\n<|end_body_1|>\n\n<|body_start_2|>\n title = response.css('.page-title h1 .plant-name')\n features = response.css('.fieldgroup.group-features .content .field')\n chars_groups = response.css('.fieldgroup.group-characteristics .content .field')\n needs_group = response.css('.fieldgroup.group-needs .content .field')\n\n def extract_group(container):\n vals = {}\n for field in container:\n item = ' '.join(field.css('.field-item::text').extract())\n label = field.css('.field-label-inline-first::text').extract_first()\n if item is None or label is None:\n continue\n item, label = (item.strip(), label.strip())\n item = item.replace('\\n', '').replace(' ', ' ')\n label = labelize(label)\n if label == 'light_requirement':\n item = item.split(' to ')\n if label in ['soil_fertility_requirement', 'soil_ph_category', 'bloom_time']:\n item = item.split(' ')\n if label in ['foliage_colors', 'hardiness_zones', 'uses']:\n item = [x.lower() for x in item.split(' ')]\n if isinstance(item, list):\n item = [token.strip() for token in item if token.strip()]\n vals[label] = item\n return vals\n yield {'url': response.url, 'genus': maybe_lower(title.css('.genus-species .genus em::text').extract_first()), 'species': maybe_lower(title.css('.genus-species .species em::text').extract_first()), 'variety': ' '.join(title.css('.series-variety span strong::text').extract() + title.css('.series-variety .variety::text').extract()), 'common_name': title.css('.common-name::text').extract_first(), 'features': extract_group(features), 'characteristics': extract_group(chars_groups), 'needs': extract_group(needs_group)}\n<|end_body_2|>\n\n<|body_start_3|>\n last_page = response.css('.item-list ul li.pager-item a::text').extract()\n last_page = last_page[-1]\n last_page = int(last_page.strip())\n return (0, last_page)\n<|end_body_3|>\n\n<|body_start_4|>\n pages = response.css('.paging .item-list ul.pages')\n rel_url = pages.css('li a::attr(href)').extract_first().split('?')[0:-1]\n rel_url = ''.join(rel_url)\n first, last = self.pagination(response)\n for page_num in range(first, last):\n yield response.follow('{}?page={}'.format(rel_url, page_num), callback=self.parse_grid)\n<|end_body_4|>\n", "revision_id": "8515fcc4c86ef0a96f34278d90419e5fad2b48d3", "skeleton": "<|skeleton|>\nclass ProvenWinnersPlants:\n\n def start_requests(self):\n \"\"\"Determine which category starting url to run.\"\"\"\n <|body_0|>\n\n def parse_grid(self, response):\n \"\"\"Parse cols and rows for grid of results.\"\"\"\n <|body_1|>\n\n def parse_plant_details(self, response):\n \"\"\"Parse cols and rows for grid of results.\"\"\"\n <|body_2|>\n\n def pagination(self, response):\n \"\"\"Extract the pagination range for a given category.\"\"\"\n <|body_3|>\n\n def parse(self, response):\n \"\"\"Parse the response.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProvenWinnersPlants:\n def start_requests(self):\n \"\"\"Determine which category starting url to run.\"\"\"\n cats = ['annuals', 'perennials', 'grasses', 'shrubs']\n if not hasattr(self, 'category'):\n self.category = 'perennials'\n if self.category not in cats:\n raise ValueError('Invalid category! Choose from: {}'.format(', '.join(cats)))\n url = '{}/plants/search/{}'.format(self.ROOT_URL, self.category)\n yield scrapy.Request(url)\n\n def parse_grid(self, response):\n \"\"\"Parse cols and rows for grid of results.\"\"\"\n for plant in response.css('.view-content .item-list li'):\n top = plant.css('.info-grid-title')\n if not top:\n continue\n url = top.css('a::attr(href)').extract_first()\n yield response.follow(url, callback=self.parse_plant_details)\n\n def parse_plant_details(self, response):\n \"\"\"Parse cols and rows for grid of results.\"\"\"\n title = response.css('.page-title h1 .plant-name')\n features = response.css('.fieldgroup.group-features .content .field')\n chars_groups = response.css('.fieldgroup.group-characteristics .content .field')\n needs_group = response.css('.fieldgroup.group-needs .content .field')\n\n def extract_group(container):\n vals = {}\n for field in container:\n item = ' '.join(field.css('.field-item::text').extract())\n label = field.css('.field-label-inline-first::text').extract_first()\n if item is None or label is None:\n continue\n item, label = (item.strip(), label.strip())\n item = item.replace('\\n', '').replace(' ', ' ')\n label = labelize(label)\n if label == 'light_requirement':\n item = item.split(' to ')\n if label in ['soil_fertility_requirement', 'soil_ph_category', 'bloom_time']:\n item = item.split(' ')\n if label in ['foliage_colors', 'hardiness_zones', 'uses']:\n item = [x.lower() for x in item.split(' ')]\n if isinstance(item, list):\n item = [token.strip() for token in item if token.strip()]\n vals[label] = item\n return vals\n yield {'url': response.url, 'genus': maybe_lower(title.css('.genus-species .genus em::text').extract_first()), 'species': maybe_lower(title.css('.genus-species .species em::text').extract_first()), 'variety': ' '.join(title.css('.series-variety span strong::text').extract() + title.css('.series-variety .variety::text').extract()), 'common_name': title.css('.common-name::text').extract_first(), 'features': extract_group(features), 'characteristics': extract_group(chars_groups), 'needs': extract_group(needs_group)}\n\n def pagination(self, response):\n \"\"\"Extract the pagination range for a given category.\"\"\"\n last_page = response.css('.item-list ul li.pager-item a::text').extract()\n last_page = last_page[-1]\n last_page = int(last_page.strip())\n return (0, last_page)\n\n def parse(self, response):\n \"\"\"Parse the response.\"\"\"\n pages = response.css('.paging .item-list ul.pages')\n rel_url = pages.css('li a::attr(href)').extract_first().split('?')[0:-1]\n rel_url = ''.join(rel_url)\n first, last = self.pagination(response)\n for page_num in range(first, last):\n yield response.follow('{}?page={}'.format(rel_url, page_num), callback=self.parse_grid)\n", "source": "the_stack_v2_python_sparse", "source_path": "plantstuff/scraping/scrapers/spiders/proven_winners.py", "source_repo": "christabor/plantstuff", "split": "val", "star_events_count": 8}
{"blob_id": "92687d2f28d953d290a8156f7cfbd01f7bd9a79c", "bodies": ["result = super().allow(comment, content_object, request)\nif not result:\n return False\nelif content_object.comments_allowed:\n return True\nelse:\n return False", "result = super().moderate(comment, content_object, request)\nif result is True:\n message_content = 'Thanks for your comment. All comments must be checked before publishing, so it should appear soon.'\n add_comment_message(request, messages.SUCCESS, message_content)\nreturn result"], "bodies_text": "<|body_start_0|>\n result = super().allow(comment, content_object, request)\n if not result:\n return False\n elif content_object.comments_allowed:\n return True\n else:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n result = super().moderate(comment, content_object, request)\n if result is True:\n message_content = 'Thanks for your comment. All comments must be checked before publishing, so it should appear soon.'\n add_comment_message(request, messages.SUCCESS, message_content)\n return result\n<|end_body_1|>\n", "class_docstring": "In addition to what we do in Post.comments_allowed, this should also ensure that: * We could enable email_notifications * If something automatedly submits a Comment on a Post that's older than COMMENTS_CLOSE_AFTER_DAYS, it will just be discarded. https://django-contrib-comments.readthedocs.io/en/latest/moderation.html", "class_name": "PostCommentModerator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PostCommentModerator:\n \"\"\"In addition to what we do in Post.comments_allowed, this should also ensure that: * We could enable email_notifications * If something automatedly submits a Comment on a Post that's older than COMMENTS_CLOSE_AFTER_DAYS, it will just be discarded. https://django-contrib-comments.readthedocs.io/en/latest/moderation.html\"\"\"\n\n def allow(self, comment, content_object, request):\n \"\"\"While this slightly duplicates Post.comments_allowed() it ensures that our custom things (settings.HINES_COMMENTS_ALLOWED and Blog.allow_comments) are taken into account in this moderator. If this returns False, then a submitted comment is just disappeared.\"\"\"\n <|body_0|>\n\n def moderate(self, comment, content_object, request):\n \"\"\"All we do here is: * Get the result from the parent moderate() method. * If the message is to be moderated, add a flash message explaining this. * Return the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = super().allow(comment, content_object, request)\n if not result:\n return False\n elif content_object.comments_allowed:\n return True\n else:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n result = super().moderate(comment, content_object, request)\n if result is True:\n message_content = 'Thanks for your comment. All comments must be checked before publishing, so it should appear soon.'\n add_comment_message(request, messages.SUCCESS, message_content)\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000445", "length_bytes": 22986, "license_type": "no_license", "methods": [{"docstring": "While this slightly duplicates Post.comments_allowed() it ensures that our custom things (settings.HINES_COMMENTS_ALLOWED and Blog.allow_comments) are taken into account in this moderator. If this returns False, then a submitted comment is just disappeared.", "name": "allow", "signature": "def allow(self, comment, content_object, request)"}, {"docstring": "All we do here is: * Get the result from the parent moderate() method. * If the message is to be moderated, add a flash message explaining this. * Return the result.", "name": "moderate", "signature": "def moderate(self, comment, content_object, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000157", "prompt": "Implement the Python class `PostCommentModerator` described below.\n\nClass description:\nIn addition to what we do in Post.comments_allowed, this should also ensure that: * We could enable email_notifications * If something automatedly submits a Comment on a Post that's older than COMMENTS_CLOSE_AFTER_DAYS, it will just be discarded. https://django-contrib-comments.readthedocs.io/en/latest/moderation.html\n\nMethod signatures and docstrings:\n- def allow(self, comment, content_object, request): While this slightly duplicates Post.comments_allowed() it ensures that our custom things (settings.HINES_COMMENTS_ALLOWED and Blog.allow_comments) are taken into account in this moderator. If this returns False, then a submitted comment is just disappeared.\n- def moderate(self, comment, content_object, request): All we do here is: * Get the result from the parent moderate() method. * If the message is to be moderated, add a flash message explaining this. * Return the result.", "prompted_full_text": "Implement the Python class `PostCommentModerator` described below.\n\nClass description:\nIn addition to what we do in Post.comments_allowed, this should also ensure that: * We could enable email_notifications * If something automatedly submits a Comment on a Post that's older than COMMENTS_CLOSE_AFTER_DAYS, it will just be discarded. https://django-contrib-comments.readthedocs.io/en/latest/moderation.html\n\nMethod signatures and docstrings:\n- def allow(self, comment, content_object, request): While this slightly duplicates Post.comments_allowed() it ensures that our custom things (settings.HINES_COMMENTS_ALLOWED and Blog.allow_comments) are taken into account in this moderator. If this returns False, then a submitted comment is just disappeared.\n- def moderate(self, comment, content_object, request): All we do here is: * Get the result from the parent moderate() method. * If the message is to be moderated, add a flash message explaining this. * Return the result.\n\n<|skeleton|>\nclass PostCommentModerator:\n \"\"\"In addition to what we do in Post.comments_allowed, this should also ensure that: * We could enable email_notifications * If something automatedly submits a Comment on a Post that's older than COMMENTS_CLOSE_AFTER_DAYS, it will just be discarded. https://django-contrib-comments.readthedocs.io/en/latest/moderation.html\"\"\"\n\n def allow(self, comment, content_object, request):\n \"\"\"While this slightly duplicates Post.comments_allowed() it ensures that our custom things (settings.HINES_COMMENTS_ALLOWED and Blog.allow_comments) are taken into account in this moderator. If this returns False, then a submitted comment is just disappeared.\"\"\"\n <|body_0|>\n\n def moderate(self, comment, content_object, request):\n \"\"\"All we do here is: * Get the result from the parent moderate() method. * If the message is to be moderated, add a flash message explaining this. * Return the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = super().allow(comment, content_object, request)\n if not result:\n return False\n elif content_object.comments_allowed:\n return True\n else:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n result = super().moderate(comment, content_object, request)\n if result is True:\n message_content = 'Thanks for your comment. All comments must be checked before publishing, so it should appear soon.'\n add_comment_message(request, messages.SUCCESS, message_content)\n return result\n<|end_body_1|>\n", "revision_id": "af5ab91deae688ba67d1561cee31359b67b0d582", "skeleton": "<|skeleton|>\nclass PostCommentModerator:\n \"\"\"In addition to what we do in Post.comments_allowed, this should also ensure that: * We could enable email_notifications * If something automatedly submits a Comment on a Post that's older than COMMENTS_CLOSE_AFTER_DAYS, it will just be discarded. https://django-contrib-comments.readthedocs.io/en/latest/moderation.html\"\"\"\n\n def allow(self, comment, content_object, request):\n \"\"\"While this slightly duplicates Post.comments_allowed() it ensures that our custom things (settings.HINES_COMMENTS_ALLOWED and Blog.allow_comments) are taken into account in this moderator. If this returns False, then a submitted comment is just disappeared.\"\"\"\n <|body_0|>\n\n def moderate(self, comment, content_object, request):\n \"\"\"All we do here is: * Get the result from the parent moderate() method. * If the message is to be moderated, add a flash message explaining this. * Return the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PostCommentModerator:\n \"\"\"In addition to what we do in Post.comments_allowed, this should also ensure that: * We could enable email_notifications * If something automatedly submits a Comment on a Post that's older than COMMENTS_CLOSE_AFTER_DAYS, it will just be discarded. https://django-contrib-comments.readthedocs.io/en/latest/moderation.html\"\"\"\n\n def allow(self, comment, content_object, request):\n \"\"\"While this slightly duplicates Post.comments_allowed() it ensures that our custom things (settings.HINES_COMMENTS_ALLOWED and Blog.allow_comments) are taken into account in this moderator. If this returns False, then a submitted comment is just disappeared.\"\"\"\n result = super().allow(comment, content_object, request)\n if not result:\n return False\n elif content_object.comments_allowed:\n return True\n else:\n return False\n\n def moderate(self, comment, content_object, request):\n \"\"\"All we do here is: * Get the result from the parent moderate() method. * If the message is to be moderated, add a flash message explaining this. * Return the result.\"\"\"\n result = super().moderate(comment, content_object, request)\n if result is True:\n message_content = 'Thanks for your comment. All comments must be checked before publishing, so it should appear soon.'\n add_comment_message(request, messages.SUCCESS, message_content)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "hines/weblogs/models.py", "source_repo": "philgyford/django-hines", "split": "val", "star_events_count": 14}
{"blob_id": "4430d37c105bb1addabb420b86692b016d3262d6", "bodies": ["def dfs(root, Sum, path, ans):\n if not root.left and (not root.right) and (Sum == root.val):\n path.append(root.val)\n ans.append(path)\n if root.left:\n dfs(root.left, Sum - root.val, path + [root.val], ans)\n if root.right:\n dfs(root.right, Sum - root.val, path + [root.val], ans)\nif not root:\n return []\nans = []\ndfs(root, Sum, [], ans)\nreturn ans", "if not root:\n return []\nres = []\nstack = [(root, Sum - root.val, [root.val])]\nwhile stack:\n curr, val, ls = stack.pop()\n if not curr.left and (not curr.right) and (val == 0):\n res.append(ls)\n if curr.right:\n stack.append((curr.right, val - curr.right.val, ls + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, val - curr.left.val, ls + [curr.left.val]))\nreturn res", "if not root:\n return []\nans = []\nstack = [(root, [root.val])]\nwhile stack:\n curr, path = stack.pop()\n if not curr.left and (not curr.right) and (sum(path) == Sum):\n ans.append(path)\n if curr.right:\n stack.append((curr.right, path + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, path + [curr.left.val]))\nreturn ans", "if not root:\n return []\nqueue = [(root, Sum - root.val, [root.val])]\nans = []\nwhile queue:\n curr, val, path = queue.pop(0)\n if not curr.left and (not curr.right) and (val == 0):\n ans.append(path)\n if curr.left:\n queue.append((curr.left, val - curr.left.val, path + [curr.left.val]))\n if curr.right:\n queue.append((curr.right, val - curr.right.val, path + [curr.right.val]))\nreturn ans"], "bodies_text": "<|body_start_0|>\n def dfs(root, Sum, path, ans):\n if not root.left and (not root.right) and (Sum == root.val):\n path.append(root.val)\n ans.append(path)\n if root.left:\n dfs(root.left, Sum - root.val, path + [root.val], ans)\n if root.right:\n dfs(root.right, Sum - root.val, path + [root.val], ans)\n if not root:\n return []\n ans = []\n dfs(root, Sum, [], ans)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n res = []\n stack = [(root, Sum - root.val, [root.val])]\n while stack:\n curr, val, ls = stack.pop()\n if not curr.left and (not curr.right) and (val == 0):\n res.append(ls)\n if curr.right:\n stack.append((curr.right, val - curr.right.val, ls + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, val - curr.left.val, ls + [curr.left.val]))\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if not root:\n return []\n ans = []\n stack = [(root, [root.val])]\n while stack:\n curr, path = stack.pop()\n if not curr.left and (not curr.right) and (sum(path) == Sum):\n ans.append(path)\n if curr.right:\n stack.append((curr.right, path + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, path + [curr.left.val]))\n return ans\n<|end_body_2|>\n\n<|body_start_3|>\n if not root:\n return []\n queue = [(root, Sum - root.val, [root.val])]\n ans = []\n while queue:\n curr, val, path = queue.pop(0)\n if not curr.left and (not curr.right) and (val == 0):\n ans.append(path)\n if curr.left:\n queue.append((curr.left, val - curr.left.val, path + [curr.left.val]))\n if curr.right:\n queue.append((curr.right, val - curr.right.val, path + [curr.right.val]))\n return ans\n<|end_body_3|>\n", "class_docstring": "5 / 4 8 / / 11 13 4 / \\\\ / 7 2 5 1 sum = 22 [ [5,4,11,2], [5,8,4,5] ] Summary: 这类题考得就是二叉树的遍历,只不过在遍历的同时需要保存每个节点的和,可以用,因此自然而然的可以想用用dfs来做 需要注意的是: 遍历的时候使用 ``` while root: stack.append(...) root = root.left ``` 这种遍历方法并不太好,因为无法获得上一次压栈的值,也就无法很好的保存已访问过的节点以及目前节点的和", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"5 / 4 8 / / 11 13 4 / \\\\ / 7 2 5 1 sum = 22 [ [5,4,11,2], [5,8,4,5] ] Summary: 这类题考得就是二叉树的遍历,只不过在遍历的同时需要保存每个节点的和,可以用,因此自然而然的可以想用用dfs来做 需要注意的是: 遍历的时候使用 ``` while root: stack.append(...) root = root.left ``` 这种遍历方法并不太好,因为无法获得上一次压栈的值,也就无法很好的保存已访问过的节点以及目前节点的和\"\"\"\n\n def pathSum1(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs-recursive :param root: :param Sum: :return:\"\"\"\n <|body_0|>\n\n def pathSum2(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs+stack iterative 迭代版本其实就是自己用一个`stack`实现递归中的栈帧 :param root: :param Sum: :return:\"\"\"\n <|body_1|>\n\n def pathSum3(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs+stack iterative :param root: :param Sum: :return:\"\"\"\n <|body_2|>\n\n def pathSum4(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"bfs+queue iterative :param root: :param Sum: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(root, Sum, path, ans):\n if not root.left and (not root.right) and (Sum == root.val):\n path.append(root.val)\n ans.append(path)\n if root.left:\n dfs(root.left, Sum - root.val, path + [root.val], ans)\n if root.right:\n dfs(root.right, Sum - root.val, path + [root.val], ans)\n if not root:\n return []\n ans = []\n dfs(root, Sum, [], ans)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n res = []\n stack = [(root, Sum - root.val, [root.val])]\n while stack:\n curr, val, ls = stack.pop()\n if not curr.left and (not curr.right) and (val == 0):\n res.append(ls)\n if curr.right:\n stack.append((curr.right, val - curr.right.val, ls + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, val - curr.left.val, ls + [curr.left.val]))\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if not root:\n return []\n ans = []\n stack = [(root, [root.val])]\n while stack:\n curr, path = stack.pop()\n if not curr.left and (not curr.right) and (sum(path) == Sum):\n ans.append(path)\n if curr.right:\n stack.append((curr.right, path + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, path + [curr.left.val]))\n return ans\n<|end_body_2|>\n\n<|body_start_3|>\n if not root:\n return []\n queue = [(root, Sum - root.val, [root.val])]\n ans = []\n while queue:\n curr, val, path = queue.pop(0)\n if not curr.left and (not curr.right) and (val == 0):\n ans.append(path)\n if curr.left:\n queue.append((curr.left, val - curr.left.val, path + [curr.left.val]))\n if curr.right:\n queue.append((curr.right, val - curr.right.val, path + [curr.right.val]))\n return ans\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000446", "length_bytes": 3838, "license_type": "no_license", "methods": [{"docstring": "dfs-recursive :param root: :param Sum: :return:", "name": "pathSum1", "signature": "def pathSum1(self, root: TreeNode, Sum: int) -> List[List[int]]"}, {"docstring": "dfs+stack iterative 迭代版本其实就是自己用一个`stack`实现递归中的栈帧 :param root: :param Sum: :return:", "name": "pathSum2", "signature": "def pathSum2(self, root: TreeNode, Sum: int) -> List[List[int]]"}, {"docstring": "dfs+stack iterative :param root: :param Sum: :return:", "name": "pathSum3", "signature": "def pathSum3(self, root: TreeNode, Sum: int) -> List[List[int]]"}, {"docstring": "bfs+queue iterative :param root: :param Sum: :return:", "name": "pathSum4", "signature": "def pathSum4(self, root: TreeNode, Sum: int) -> List[List[int]]"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_051116", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\n5 / 4 8 / / 11 13 4 / \\\\ / 7 2 5 1 sum = 22 [ [5,4,11,2], [5,8,4,5] ] Summary: 这类题考得就是二叉树的遍历,只不过在遍历的同时需要保存每个节点的和,可以用,因此自然而然的可以想用用dfs来做 需要注意的是: 遍历的时候使用 ``` while root: stack.append(...) root = root.left ``` 这种遍历方法并不太好,因为无法获得上一次压栈的值,也就无法很好的保存已访问过的节点以及目前节点的和\n\nMethod signatures and docstrings:\n- def pathSum1(self, root: TreeNode, Sum: int) -> List[List[int]]: dfs-recursive :param root: :param Sum: :return:\n- def pathSum2(self, root: TreeNode, Sum: int) -> List[List[int]]: dfs+stack iterative 迭代版本其实就是自己用一个`stack`实现递归中的栈帧 :param root: :param Sum: :return:\n- def pathSum3(self, root: TreeNode, Sum: int) -> List[List[int]]: dfs+stack iterative :param root: :param Sum: :return:\n- def pathSum4(self, root: TreeNode, Sum: int) -> List[List[int]]: bfs+queue iterative :param root: :param Sum: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\n5 / 4 8 / / 11 13 4 / \\\\ / 7 2 5 1 sum = 22 [ [5,4,11,2], [5,8,4,5] ] Summary: 这类题考得就是二叉树的遍历,只不过在遍历的同时需要保存每个节点的和,可以用,因此自然而然的可以想用用dfs来做 需要注意的是: 遍历的时候使用 ``` while root: stack.append(...) root = root.left ``` 这种遍历方法并不太好,因为无法获得上一次压栈的值,也就无法很好的保存已访问过的节点以及目前节点的和\n\nMethod signatures and docstrings:\n- def pathSum1(self, root: TreeNode, Sum: int) -> List[List[int]]: dfs-recursive :param root: :param Sum: :return:\n- def pathSum2(self, root: TreeNode, Sum: int) -> List[List[int]]: dfs+stack iterative 迭代版本其实就是自己用一个`stack`实现递归中的栈帧 :param root: :param Sum: :return:\n- def pathSum3(self, root: TreeNode, Sum: int) -> List[List[int]]: dfs+stack iterative :param root: :param Sum: :return:\n- def pathSum4(self, root: TreeNode, Sum: int) -> List[List[int]]: bfs+queue iterative :param root: :param Sum: :return:\n\n<|skeleton|>\nclass Solution:\n \"\"\"5 / 4 8 / / 11 13 4 / \\\\ / 7 2 5 1 sum = 22 [ [5,4,11,2], [5,8,4,5] ] Summary: 这类题考得就是二叉树的遍历,只不过在遍历的同时需要保存每个节点的和,可以用,因此自然而然的可以想用用dfs来做 需要注意的是: 遍历的时候使用 ``` while root: stack.append(...) root = root.left ``` 这种遍历方法并不太好,因为无法获得上一次压栈的值,也就无法很好的保存已访问过的节点以及目前节点的和\"\"\"\n\n def pathSum1(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs-recursive :param root: :param Sum: :return:\"\"\"\n <|body_0|>\n\n def pathSum2(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs+stack iterative 迭代版本其实就是自己用一个`stack`实现递归中的栈帧 :param root: :param Sum: :return:\"\"\"\n <|body_1|>\n\n def pathSum3(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs+stack iterative :param root: :param Sum: :return:\"\"\"\n <|body_2|>\n\n def pathSum4(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"bfs+queue iterative :param root: :param Sum: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(root, Sum, path, ans):\n if not root.left and (not root.right) and (Sum == root.val):\n path.append(root.val)\n ans.append(path)\n if root.left:\n dfs(root.left, Sum - root.val, path + [root.val], ans)\n if root.right:\n dfs(root.right, Sum - root.val, path + [root.val], ans)\n if not root:\n return []\n ans = []\n dfs(root, Sum, [], ans)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n res = []\n stack = [(root, Sum - root.val, [root.val])]\n while stack:\n curr, val, ls = stack.pop()\n if not curr.left and (not curr.right) and (val == 0):\n res.append(ls)\n if curr.right:\n stack.append((curr.right, val - curr.right.val, ls + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, val - curr.left.val, ls + [curr.left.val]))\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if not root:\n return []\n ans = []\n stack = [(root, [root.val])]\n while stack:\n curr, path = stack.pop()\n if not curr.left and (not curr.right) and (sum(path) == Sum):\n ans.append(path)\n if curr.right:\n stack.append((curr.right, path + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, path + [curr.left.val]))\n return ans\n<|end_body_2|>\n\n<|body_start_3|>\n if not root:\n return []\n queue = [(root, Sum - root.val, [root.val])]\n ans = []\n while queue:\n curr, val, path = queue.pop(0)\n if not curr.left and (not curr.right) and (val == 0):\n ans.append(path)\n if curr.left:\n queue.append((curr.left, val - curr.left.val, path + [curr.left.val]))\n if curr.right:\n queue.append((curr.right, val - curr.right.val, path + [curr.right.val]))\n return ans\n<|end_body_3|>\n", "revision_id": "25f2795b6e7f9f68833f2fddc6cc4f4d977121a6", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"5 / 4 8 / / 11 13 4 / \\\\ / 7 2 5 1 sum = 22 [ [5,4,11,2], [5,8,4,5] ] Summary: 这类题考得就是二叉树的遍历,只不过在遍历的同时需要保存每个节点的和,可以用,因此自然而然的可以想用用dfs来做 需要注意的是: 遍历的时候使用 ``` while root: stack.append(...) root = root.left ``` 这种遍历方法并不太好,因为无法获得上一次压栈的值,也就无法很好的保存已访问过的节点以及目前节点的和\"\"\"\n\n def pathSum1(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs-recursive :param root: :param Sum: :return:\"\"\"\n <|body_0|>\n\n def pathSum2(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs+stack iterative 迭代版本其实就是自己用一个`stack`实现递归中的栈帧 :param root: :param Sum: :return:\"\"\"\n <|body_1|>\n\n def pathSum3(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs+stack iterative :param root: :param Sum: :return:\"\"\"\n <|body_2|>\n\n def pathSum4(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"bfs+queue iterative :param root: :param Sum: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n \"\"\"5 / 4 8 / / 11 13 4 / \\\\ / 7 2 5 1 sum = 22 [ [5,4,11,2], [5,8,4,5] ] Summary: 这类题考得就是二叉树的遍历,只不过在遍历的同时需要保存每个节点的和,可以用,因此自然而然的可以想用用dfs来做 需要注意的是: 遍历的时候使用 ``` while root: stack.append(...) root = root.left ``` 这种遍历方法并不太好,因为无法获得上一次压栈的值,也就无法很好的保存已访问过的节点以及目前节点的和\"\"\"\n\n def pathSum1(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs-recursive :param root: :param Sum: :return:\"\"\"\n def dfs(root, Sum, path, ans):\n if not root.left and (not root.right) and (Sum == root.val):\n path.append(root.val)\n ans.append(path)\n if root.left:\n dfs(root.left, Sum - root.val, path + [root.val], ans)\n if root.right:\n dfs(root.right, Sum - root.val, path + [root.val], ans)\n if not root:\n return []\n ans = []\n dfs(root, Sum, [], ans)\n return ans\n\n def pathSum2(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs+stack iterative 迭代版本其实就是自己用一个`stack`实现递归中的栈帧 :param root: :param Sum: :return:\"\"\"\n if not root:\n return []\n res = []\n stack = [(root, Sum - root.val, [root.val])]\n while stack:\n curr, val, ls = stack.pop()\n if not curr.left and (not curr.right) and (val == 0):\n res.append(ls)\n if curr.right:\n stack.append((curr.right, val - curr.right.val, ls + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, val - curr.left.val, ls + [curr.left.val]))\n return res\n\n def pathSum3(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"dfs+stack iterative :param root: :param Sum: :return:\"\"\"\n if not root:\n return []\n ans = []\n stack = [(root, [root.val])]\n while stack:\n curr, path = stack.pop()\n if not curr.left and (not curr.right) and (sum(path) == Sum):\n ans.append(path)\n if curr.right:\n stack.append((curr.right, path + [curr.right.val]))\n if curr.left:\n stack.append((curr.left, path + [curr.left.val]))\n return ans\n\n def pathSum4(self, root: TreeNode, Sum: int) -> List[List[int]]:\n \"\"\"bfs+queue iterative :param root: :param Sum: :return:\"\"\"\n if not root:\n return []\n queue = [(root, Sum - root.val, [root.val])]\n ans = []\n while queue:\n curr, val, path = queue.pop(0)\n if not curr.left and (not curr.right) and (val == 0):\n ans.append(path)\n if curr.left:\n queue.append((curr.left, val - curr.left.val, path + [curr.left.val]))\n if curr.right:\n queue.append((curr.right, val - curr.right.val, path + [curr.right.val]))\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "113.py", "source_repo": "Darkxiete/leetcode_python", "split": "val", "star_events_count": 0}
{"blob_id": "ab8b6124dbe4f5cd901e6522fde423c3c9068e56", "bodies": ["if 'count' not in df or 'feature' not in df:\n raise Exception('No \"count\" or \"feature\" found in data.')\nif len(df.columns) < 3:\n raise Exception('Need at least one metrics column.')\nif len(df) == 0:\n raise Exception('Data is empty')\ndata = []\nfor _, row in df.iterrows():\n metric_values = dict(row)\n feature = metric_values.pop('feature')\n data.append({'feature': feature, 'metricValues': metric_values})\nreturn data", "import IPython\nif sys.version_info.major > 2 and isinstance(data, str) or (sys.version_info.major <= 2 and isinstance(data, basestring)):\n data = bq.Query(data)\nif isinstance(data, bq.Query):\n df = data.execute().result().to_dataframe()\n data = self._get_lantern_format(df)\nelif isinstance(data, pd.core.frame.DataFrame):\n data = self._get_lantern_format(data)\nelse:\n raise Exception('data needs to be a sql query, or a pandas DataFrame.')\nHTML_TEMPLATE = '\\n \\n '\nmetrics_str = str(map(str, data[0]['metricValues'].keys()))\ndata_str = str([{str(k): json.dumps(v) for k, v in elem.iteritems()} for elem in data])\nhtml_id = 'l' + datalab.utils.commands.Html.next_id()\nhtml = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str)\nIPython.display.display(IPython.display.HTML(html))"], "bodies_text": "<|body_start_0|>\n if 'count' not in df or 'feature' not in df:\n raise Exception('No \"count\" or \"feature\" found in data.')\n if len(df.columns) < 3:\n raise Exception('Need at least one metrics column.')\n if len(df) == 0:\n raise Exception('Data is empty')\n data = []\n for _, row in df.iterrows():\n metric_values = dict(row)\n feature = metric_values.pop('feature')\n data.append({'feature': feature, 'metricValues': metric_values})\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n import IPython\n if sys.version_info.major > 2 and isinstance(data, str) or (sys.version_info.major <= 2 and isinstance(data, basestring)):\n data = bq.Query(data)\n if isinstance(data, bq.Query):\n df = data.execute().result().to_dataframe()\n data = self._get_lantern_format(df)\n elif isinstance(data, pd.core.frame.DataFrame):\n data = self._get_lantern_format(data)\n else:\n raise Exception('data needs to be a sql query, or a pandas DataFrame.')\n HTML_TEMPLATE = '\\n \\n '\n metrics_str = str(map(str, data[0]['metricValues'].keys()))\n data_str = str([{str(k): json.dumps(v) for k, v in elem.iteritems()} for elem in data])\n html_id = 'l' + datalab.utils.commands.Html.next_id()\n html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str)\n IPython.display.display(IPython.display.HTML(html))\n<|end_body_1|>\n", "class_docstring": "Represents A feature slice view.", "class_name": "FeatureSliceView", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FeatureSliceView:\n \"\"\"Represents A feature slice view.\"\"\"\n\n def _get_lantern_format(self, df):\n \"\"\"Feature slice view browser expects data in the format of: {\"metricValues\": {\"count\": 12, \"accuracy\": 1.0}, \"feature\": \"species:Iris-setosa\"} {\"metricValues\": {\"count\": 11, \"accuracy\": 0.72}, \"feature\": \"species:Iris-versicolor\"} ... This function converts a DataFrame to such format.\"\"\"\n <|body_0|>\n\n def plot(self, data):\n \"\"\"Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by \"%%sql --module module_name\". A pandas DataFrame. Regardless of data type, it must include the following columns: \"feature\": identifies a slice of features. For example: \"petal_length:4.0-4.2\". \"count\": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'count' not in df or 'feature' not in df:\n raise Exception('No \"count\" or \"feature\" found in data.')\n if len(df.columns) < 3:\n raise Exception('Need at least one metrics column.')\n if len(df) == 0:\n raise Exception('Data is empty')\n data = []\n for _, row in df.iterrows():\n metric_values = dict(row)\n feature = metric_values.pop('feature')\n data.append({'feature': feature, 'metricValues': metric_values})\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n import IPython\n if sys.version_info.major > 2 and isinstance(data, str) or (sys.version_info.major <= 2 and isinstance(data, basestring)):\n data = bq.Query(data)\n if isinstance(data, bq.Query):\n df = data.execute().result().to_dataframe()\n data = self._get_lantern_format(df)\n elif isinstance(data, pd.core.frame.DataFrame):\n data = self._get_lantern_format(data)\n else:\n raise Exception('data needs to be a sql query, or a pandas DataFrame.')\n HTML_TEMPLATE = '\\n \\n '\n metrics_str = str(map(str, data[0]['metricValues'].keys()))\n data_str = str([{str(k): json.dumps(v) for k, v in elem.iteritems()} for elem in data])\n html_id = 'l' + datalab.utils.commands.Html.next_id()\n html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str)\n IPython.display.display(IPython.display.HTML(html))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000447", "length_bytes": 3621, "license_type": "permissive", "methods": [{"docstring": "Feature slice view browser expects data in the format of: {\"metricValues\": {\"count\": 12, \"accuracy\": 1.0}, \"feature\": \"species:Iris-setosa\"} {\"metricValues\": {\"count\": 11, \"accuracy\": 0.72}, \"feature\": \"species:Iris-versicolor\"} ... This function converts a DataFrame to such format.", "name": "_get_lantern_format", "signature": "def _get_lantern_format(self, df)"}, {"docstring": "Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by \"%%sql --module module_name\". A pandas DataFrame. Regardless of data type, it must include the following columns: \"feature\": identifies a slice of features. For example: \"petal_length:4.0-4.2\". \"count\": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.", "name": "plot", "signature": "def plot(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039139", "prompt": "Implement the Python class `FeatureSliceView` described below.\n\nClass description:\nRepresents A feature slice view.\n\nMethod signatures and docstrings:\n- def _get_lantern_format(self, df): Feature slice view browser expects data in the format of: {\"metricValues\": {\"count\": 12, \"accuracy\": 1.0}, \"feature\": \"species:Iris-setosa\"} {\"metricValues\": {\"count\": 11, \"accuracy\": 0.72}, \"feature\": \"species:Iris-versicolor\"} ... This function converts a DataFrame to such format.\n- def plot(self, data): Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by \"%%sql --module module_name\". A pandas DataFrame. Regardless of data type, it must include the following columns: \"feature\": identifies a slice of features. For example: \"petal_length:4.0-4.2\". \"count\": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.", "prompted_full_text": "Implement the Python class `FeatureSliceView` described below.\n\nClass description:\nRepresents A feature slice view.\n\nMethod signatures and docstrings:\n- def _get_lantern_format(self, df): Feature slice view browser expects data in the format of: {\"metricValues\": {\"count\": 12, \"accuracy\": 1.0}, \"feature\": \"species:Iris-setosa\"} {\"metricValues\": {\"count\": 11, \"accuracy\": 0.72}, \"feature\": \"species:Iris-versicolor\"} ... This function converts a DataFrame to such format.\n- def plot(self, data): Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by \"%%sql --module module_name\". A pandas DataFrame. Regardless of data type, it must include the following columns: \"feature\": identifies a slice of features. For example: \"petal_length:4.0-4.2\". \"count\": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.\n\n<|skeleton|>\nclass FeatureSliceView:\n \"\"\"Represents A feature slice view.\"\"\"\n\n def _get_lantern_format(self, df):\n \"\"\"Feature slice view browser expects data in the format of: {\"metricValues\": {\"count\": 12, \"accuracy\": 1.0}, \"feature\": \"species:Iris-setosa\"} {\"metricValues\": {\"count\": 11, \"accuracy\": 0.72}, \"feature\": \"species:Iris-versicolor\"} ... This function converts a DataFrame to such format.\"\"\"\n <|body_0|>\n\n def plot(self, data):\n \"\"\"Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by \"%%sql --module module_name\". A pandas DataFrame. Regardless of data type, it must include the following columns: \"feature\": identifies a slice of features. For example: \"petal_length:4.0-4.2\". \"count\": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'count' not in df or 'feature' not in df:\n raise Exception('No \"count\" or \"feature\" found in data.')\n if len(df.columns) < 3:\n raise Exception('Need at least one metrics column.')\n if len(df) == 0:\n raise Exception('Data is empty')\n data = []\n for _, row in df.iterrows():\n metric_values = dict(row)\n feature = metric_values.pop('feature')\n data.append({'feature': feature, 'metricValues': metric_values})\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n import IPython\n if sys.version_info.major > 2 and isinstance(data, str) or (sys.version_info.major <= 2 and isinstance(data, basestring)):\n data = bq.Query(data)\n if isinstance(data, bq.Query):\n df = data.execute().result().to_dataframe()\n data = self._get_lantern_format(df)\n elif isinstance(data, pd.core.frame.DataFrame):\n data = self._get_lantern_format(data)\n else:\n raise Exception('data needs to be a sql query, or a pandas DataFrame.')\n HTML_TEMPLATE = '\\n \\n '\n metrics_str = str(map(str, data[0]['metricValues'].keys()))\n data_str = str([{str(k): json.dumps(v) for k, v in elem.iteritems()} for elem in data])\n html_id = 'l' + datalab.utils.commands.Html.next_id()\n html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str)\n IPython.display.display(IPython.display.HTML(html))\n<|end_body_1|>\n", "revision_id": "8bf007da3e43096aa3a3dca158fc56b286ba6f5c", "skeleton": "<|skeleton|>\nclass FeatureSliceView:\n \"\"\"Represents A feature slice view.\"\"\"\n\n def _get_lantern_format(self, df):\n \"\"\"Feature slice view browser expects data in the format of: {\"metricValues\": {\"count\": 12, \"accuracy\": 1.0}, \"feature\": \"species:Iris-setosa\"} {\"metricValues\": {\"count\": 11, \"accuracy\": 0.72}, \"feature\": \"species:Iris-versicolor\"} ... This function converts a DataFrame to such format.\"\"\"\n <|body_0|>\n\n def plot(self, data):\n \"\"\"Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by \"%%sql --module module_name\". A pandas DataFrame. Regardless of data type, it must include the following columns: \"feature\": identifies a slice of features. For example: \"petal_length:4.0-4.2\". \"count\": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FeatureSliceView:\n \"\"\"Represents A feature slice view.\"\"\"\n\n def _get_lantern_format(self, df):\n \"\"\"Feature slice view browser expects data in the format of: {\"metricValues\": {\"count\": 12, \"accuracy\": 1.0}, \"feature\": \"species:Iris-setosa\"} {\"metricValues\": {\"count\": 11, \"accuracy\": 0.72}, \"feature\": \"species:Iris-versicolor\"} ... This function converts a DataFrame to such format.\"\"\"\n if 'count' not in df or 'feature' not in df:\n raise Exception('No \"count\" or \"feature\" found in data.')\n if len(df.columns) < 3:\n raise Exception('Need at least one metrics column.')\n if len(df) == 0:\n raise Exception('Data is empty')\n data = []\n for _, row in df.iterrows():\n metric_values = dict(row)\n feature = metric_values.pop('feature')\n data.append({'feature': feature, 'metricValues': metric_values})\n return data\n\n def plot(self, data):\n \"\"\"Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by \"%%sql --module module_name\". A pandas DataFrame. Regardless of data type, it must include the following columns: \"feature\": identifies a slice of features. For example: \"petal_length:4.0-4.2\". \"count\": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.\"\"\"\n import IPython\n if sys.version_info.major > 2 and isinstance(data, str) or (sys.version_info.major <= 2 and isinstance(data, basestring)):\n data = bq.Query(data)\n if isinstance(data, bq.Query):\n df = data.execute().result().to_dataframe()\n data = self._get_lantern_format(df)\n elif isinstance(data, pd.core.frame.DataFrame):\n data = self._get_lantern_format(data)\n else:\n raise Exception('data needs to be a sql query, or a pandas DataFrame.')\n HTML_TEMPLATE = '\\n \\n '\n metrics_str = str(map(str, data[0]['metricValues'].keys()))\n data_str = str([{str(k): json.dumps(v) for k, v in elem.iteritems()} for elem in data])\n html_id = 'l' + datalab.utils.commands.Html.next_id()\n html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str)\n IPython.display.display(IPython.display.HTML(html))\n", "source": "the_stack_v2_python_sparse", "source_path": "google/datalab/ml/_feature_slice_view.py", "source_repo": "googledatalab/pydatalab", "split": "val", "star_events_count": 200}
{"blob_id": "d58c3d0ba58abc3161954ae2dd6df303f0f3b343", "bodies": ["print(matrix_name, ':', file=file)\nmatr_print(self.repres_matr, file=file)\nprint(vector_name, ':', file=file)\nprint(self.repres_vect[0], file=file)\nprint('Average intensity:', self.avg_intensity, file=file)\nprint('Variation coefficient:', self.c_var, file=file)\nprint('=======END=======', '\\n', file=file)", "self.repres_vect = np.array(repres_vect, dtype=float)\nself.repres_matr = np.array(repres_matr, dtype=float)\nself.repres_matr_0 = -r_multiply_e(self.repres_matr)\nself.avg_intensity = -la.inv(r_multiply_e(np.dot(self.repres_vect, la.inv(self.repres_matr))))[0, 0]\nself.dim = self.repres_matr.shape[0]\nself.dim_ = self.dim + 1\nb1 = r_multiply_e(np.dot(self.repres_vect, la.inv(-self.repres_matr)))[0]\nb2 = 2 * r_multiply_e(np.dot(self.repres_vect, np.linalg.matrix_power(-self.repres_matr, -2)))[0]\nc_var2 = (b2 - b1 ** 2) / b1 ** 2\nself.c_var = sqrt(c_var2)"], "bodies_text": "<|body_start_0|>\n print(matrix_name, ':', file=file)\n matr_print(self.repres_matr, file=file)\n print(vector_name, ':', file=file)\n print(self.repres_vect[0], file=file)\n print('Average intensity:', self.avg_intensity, file=file)\n print('Variation coefficient:', self.c_var, file=file)\n print('=======END=======', '\\n', file=file)\n<|end_body_0|>\n\n<|body_start_1|>\n self.repres_vect = np.array(repres_vect, dtype=float)\n self.repres_matr = np.array(repres_matr, dtype=float)\n self.repres_matr_0 = -r_multiply_e(self.repres_matr)\n self.avg_intensity = -la.inv(r_multiply_e(np.dot(self.repres_vect, la.inv(self.repres_matr))))[0, 0]\n self.dim = self.repres_matr.shape[0]\n self.dim_ = self.dim + 1\n b1 = r_multiply_e(np.dot(self.repres_vect, la.inv(-self.repres_matr)))[0]\n b2 = 2 * r_multiply_e(np.dot(self.repres_vect, np.linalg.matrix_power(-self.repres_matr, -2)))[0]\n c_var2 = (b2 - b1 ** 2) / b1 ** 2\n self.c_var = sqrt(c_var2)\n<|end_body_1|>\n", "class_docstring": "PH stream class. Contains representation vector, representation matrix, representation matrix_0, stream control Markov chain dimensions, stream intensity, variation coefficient and correlation coefficient.", "class_name": "PHStream", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PHStream:\n \"\"\"PH stream class. Contains representation vector, representation matrix, representation matrix_0, stream control Markov chain dimensions, stream intensity, variation coefficient and correlation coefficient.\"\"\"\n\n def print_characteristics(self, matrix_name, vector_name, file=sys.stdout):\n \"\"\"Prints characteristics of PH stream: Matrix Vector Average intensity Variation coefficient Correlation coefficient :return: None\"\"\"\n <|body_0|>\n\n def __init__(self, repres_vect, repres_matr):\n \"\"\"Constructor for PHStream :param repres_vect: np.array or list with representation vector :param repres_matr: np.array or list with representation matrix\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n print(matrix_name, ':', file=file)\n matr_print(self.repres_matr, file=file)\n print(vector_name, ':', file=file)\n print(self.repres_vect[0], file=file)\n print('Average intensity:', self.avg_intensity, file=file)\n print('Variation coefficient:', self.c_var, file=file)\n print('=======END=======', '\\n', file=file)\n<|end_body_0|>\n\n<|body_start_1|>\n self.repres_vect = np.array(repres_vect, dtype=float)\n self.repres_matr = np.array(repres_matr, dtype=float)\n self.repres_matr_0 = -r_multiply_e(self.repres_matr)\n self.avg_intensity = -la.inv(r_multiply_e(np.dot(self.repres_vect, la.inv(self.repres_matr))))[0, 0]\n self.dim = self.repres_matr.shape[0]\n self.dim_ = self.dim + 1\n b1 = r_multiply_e(np.dot(self.repres_vect, la.inv(-self.repres_matr)))[0]\n b2 = 2 * r_multiply_e(np.dot(self.repres_vect, np.linalg.matrix_power(-self.repres_matr, -2)))[0]\n c_var2 = (b2 - b1 ** 2) / b1 ** 2\n self.c_var = sqrt(c_var2)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000448", "length_bytes": 15627, "license_type": "no_license", "methods": [{"docstring": "Prints characteristics of PH stream: Matrix Vector Average intensity Variation coefficient Correlation coefficient :return: None", "name": "print_characteristics", "signature": "def print_characteristics(self, matrix_name, vector_name, file=sys.stdout)"}, {"docstring": "Constructor for PHStream :param repres_vect: np.array or list with representation vector :param repres_matr: np.array or list with representation matrix", "name": "__init__", "signature": "def __init__(self, repres_vect, repres_matr)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_046105", "prompt": "Implement the Python class `PHStream` described below.\n\nClass description:\nPH stream class. Contains representation vector, representation matrix, representation matrix_0, stream control Markov chain dimensions, stream intensity, variation coefficient and correlation coefficient.\n\nMethod signatures and docstrings:\n- def print_characteristics(self, matrix_name, vector_name, file=sys.stdout): Prints characteristics of PH stream: Matrix Vector Average intensity Variation coefficient Correlation coefficient :return: None\n- def __init__(self, repres_vect, repres_matr): Constructor for PHStream :param repres_vect: np.array or list with representation vector :param repres_matr: np.array or list with representation matrix", "prompted_full_text": "Implement the Python class `PHStream` described below.\n\nClass description:\nPH stream class. Contains representation vector, representation matrix, representation matrix_0, stream control Markov chain dimensions, stream intensity, variation coefficient and correlation coefficient.\n\nMethod signatures and docstrings:\n- def print_characteristics(self, matrix_name, vector_name, file=sys.stdout): Prints characteristics of PH stream: Matrix Vector Average intensity Variation coefficient Correlation coefficient :return: None\n- def __init__(self, repres_vect, repres_matr): Constructor for PHStream :param repres_vect: np.array or list with representation vector :param repres_matr: np.array or list with representation matrix\n\n<|skeleton|>\nclass PHStream:\n \"\"\"PH stream class. Contains representation vector, representation matrix, representation matrix_0, stream control Markov chain dimensions, stream intensity, variation coefficient and correlation coefficient.\"\"\"\n\n def print_characteristics(self, matrix_name, vector_name, file=sys.stdout):\n \"\"\"Prints characteristics of PH stream: Matrix Vector Average intensity Variation coefficient Correlation coefficient :return: None\"\"\"\n <|body_0|>\n\n def __init__(self, repres_vect, repres_matr):\n \"\"\"Constructor for PHStream :param repres_vect: np.array or list with representation vector :param repres_matr: np.array or list with representation matrix\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n print(matrix_name, ':', file=file)\n matr_print(self.repres_matr, file=file)\n print(vector_name, ':', file=file)\n print(self.repres_vect[0], file=file)\n print('Average intensity:', self.avg_intensity, file=file)\n print('Variation coefficient:', self.c_var, file=file)\n print('=======END=======', '\\n', file=file)\n<|end_body_0|>\n\n<|body_start_1|>\n self.repres_vect = np.array(repres_vect, dtype=float)\n self.repres_matr = np.array(repres_matr, dtype=float)\n self.repres_matr_0 = -r_multiply_e(self.repres_matr)\n self.avg_intensity = -la.inv(r_multiply_e(np.dot(self.repres_vect, la.inv(self.repres_matr))))[0, 0]\n self.dim = self.repres_matr.shape[0]\n self.dim_ = self.dim + 1\n b1 = r_multiply_e(np.dot(self.repres_vect, la.inv(-self.repres_matr)))[0]\n b2 = 2 * r_multiply_e(np.dot(self.repres_vect, np.linalg.matrix_power(-self.repres_matr, -2)))[0]\n c_var2 = (b2 - b1 ** 2) / b1 ** 2\n self.c_var = sqrt(c_var2)\n<|end_body_1|>\n", "revision_id": "6173e0d279893f0da4f8ad09b824cd5897c4e5e7", "skeleton": "<|skeleton|>\nclass PHStream:\n \"\"\"PH stream class. Contains representation vector, representation matrix, representation matrix_0, stream control Markov chain dimensions, stream intensity, variation coefficient and correlation coefficient.\"\"\"\n\n def print_characteristics(self, matrix_name, vector_name, file=sys.stdout):\n \"\"\"Prints characteristics of PH stream: Matrix Vector Average intensity Variation coefficient Correlation coefficient :return: None\"\"\"\n <|body_0|>\n\n def __init__(self, repres_vect, repres_matr):\n \"\"\"Constructor for PHStream :param repres_vect: np.array or list with representation vector :param repres_matr: np.array or list with representation matrix\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PHStream:\n \"\"\"PH stream class. Contains representation vector, representation matrix, representation matrix_0, stream control Markov chain dimensions, stream intensity, variation coefficient and correlation coefficient.\"\"\"\n\n def print_characteristics(self, matrix_name, vector_name, file=sys.stdout):\n \"\"\"Prints characteristics of PH stream: Matrix Vector Average intensity Variation coefficient Correlation coefficient :return: None\"\"\"\n print(matrix_name, ':', file=file)\n matr_print(self.repres_matr, file=file)\n print(vector_name, ':', file=file)\n print(self.repres_vect[0], file=file)\n print('Average intensity:', self.avg_intensity, file=file)\n print('Variation coefficient:', self.c_var, file=file)\n print('=======END=======', '\\n', file=file)\n\n def __init__(self, repres_vect, repres_matr):\n \"\"\"Constructor for PHStream :param repres_vect: np.array or list with representation vector :param repres_matr: np.array or list with representation matrix\"\"\"\n self.repres_vect = np.array(repres_vect, dtype=float)\n self.repres_matr = np.array(repres_matr, dtype=float)\n self.repres_matr_0 = -r_multiply_e(self.repres_matr)\n self.avg_intensity = -la.inv(r_multiply_e(np.dot(self.repres_vect, la.inv(self.repres_matr))))[0, 0]\n self.dim = self.repres_matr.shape[0]\n self.dim_ = self.dim + 1\n b1 = r_multiply_e(np.dot(self.repres_vect, la.inv(-self.repres_matr)))[0]\n b2 = 2 * r_multiply_e(np.dot(self.repres_vect, np.linalg.matrix_power(-self.repres_matr, -2)))[0]\n c_var2 = (b2 - b1 ** 2) / b1 ** 2\n self.c_var = sqrt(c_var2)\n", "source": "the_stack_v2_python_sparse", "source_path": "streams.py", "source_repo": "pishchynski/magister_work", "split": "val", "star_events_count": 0}
{"blob_id": "abc0d0118e15c1e19275c4740415de0abf51e791", "bodies": ["self.driver = driver\nself.ProjectFilePath = GetProjectFilePath()\nself.Page_object_data_file = open(self.ProjectFilePath + '\\\\Page_object\\\\Data\\\\DistributionlineFlowDayCalc.yaml')\nself.Page_Data = yaml.load(self.Page_object_data_file)\nself.Page_object_data_file.close()\nself.Data = self.Page_Data['DistributionlineFlowDayCalc']\nself.TheoreticalLineLoss_ID = self.Data['TheoreticalLineLoss_ID']\nself.TheoreticalLineLossManualcal_Xpath = self.Data['TheoreticalLineLossManualcal_Xpath']\nself.DistributionlineFlowDayCalc_Frame = self.Data['DistributionlineFlowDayCalc_Frame']\nself.DistributionlineFlowDayCalc_id = self.Data['DistributionlineFlowDayCalc_id']\nself.DistributionlineFlowDayCalcCompany_Xpath = self.Data['DistributionlineFlowDayCalcCompany_Xpath']\nself.DistributionlineFlowDayCalcSubstation_Xpath = self.Data['DistributionlineFlowDayCalcSubstation_Xpath']\nself.DistributionlineFlowDayCalcDistributionline_Xpath = self.Data['DistributionlineFlowDayCalcDistributionline_Xpath']\nself.Algorithm_ID = self.Data['Algorithm_ID']\nself.Calculate_ID = self.Data['Calculate_ID']\nself.AlertMsg_ID = self.Data['AlertMsg_ID']\nself.AlertConfirm_ID = self.Data['AlertConfirm_ID']", "self.driver.find_element_by_id(self.TheoreticalLineLoss_ID).click()\ntime.sleep(2)\nself.driver.find_element_by_xpath(self.TheoreticalLineLossManualcal_Xpath).click()\ntime.sleep(2)\nself.driver.switch_to_frame(self.DistributionlineFlowDayCalc_Frame)\ntime.sleep(2)\nself.driver.find_element_by_id(self.DistributionlineFlowDayCalc_id).click()\ntime.sleep(2)\nself.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcCompany_Xpath).click()\ntime.sleep(2)\nself.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcSubstation_Xpath).click()\ntime.sleep(2)\nself.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcDistributionline_Xpath).click()\ntime.sleep(2)\nself.driver.find_element_by_id(self.Algorithm_ID).click()\ntime.sleep(2)\nself.driver.find_element_by_id(self.Calculate_ID).click()\nself.driver.implicitly_wait(45)\nalerttext = self.driver.find_element_by_id(self.AlertMsg_ID).text\ntime.sleep(2)\nself.driver.find_element_by_id(self.AlertConfirm_ID).click()\ntime.sleep(1)\nreturn alerttext"], "bodies_text": "<|body_start_0|>\n self.driver = driver\n self.ProjectFilePath = GetProjectFilePath()\n self.Page_object_data_file = open(self.ProjectFilePath + '\\\\Page_object\\\\Data\\\\DistributionlineFlowDayCalc.yaml')\n self.Page_Data = yaml.load(self.Page_object_data_file)\n self.Page_object_data_file.close()\n self.Data = self.Page_Data['DistributionlineFlowDayCalc']\n self.TheoreticalLineLoss_ID = self.Data['TheoreticalLineLoss_ID']\n self.TheoreticalLineLossManualcal_Xpath = self.Data['TheoreticalLineLossManualcal_Xpath']\n self.DistributionlineFlowDayCalc_Frame = self.Data['DistributionlineFlowDayCalc_Frame']\n self.DistributionlineFlowDayCalc_id = self.Data['DistributionlineFlowDayCalc_id']\n self.DistributionlineFlowDayCalcCompany_Xpath = self.Data['DistributionlineFlowDayCalcCompany_Xpath']\n self.DistributionlineFlowDayCalcSubstation_Xpath = self.Data['DistributionlineFlowDayCalcSubstation_Xpath']\n self.DistributionlineFlowDayCalcDistributionline_Xpath = self.Data['DistributionlineFlowDayCalcDistributionline_Xpath']\n self.Algorithm_ID = self.Data['Algorithm_ID']\n self.Calculate_ID = self.Data['Calculate_ID']\n self.AlertMsg_ID = self.Data['AlertMsg_ID']\n self.AlertConfirm_ID = self.Data['AlertConfirm_ID']\n<|end_body_0|>\n\n<|body_start_1|>\n self.driver.find_element_by_id(self.TheoreticalLineLoss_ID).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.TheoreticalLineLossManualcal_Xpath).click()\n time.sleep(2)\n self.driver.switch_to_frame(self.DistributionlineFlowDayCalc_Frame)\n time.sleep(2)\n self.driver.find_element_by_id(self.DistributionlineFlowDayCalc_id).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcCompany_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcSubstation_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcDistributionline_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_id(self.Algorithm_ID).click()\n time.sleep(2)\n self.driver.find_element_by_id(self.Calculate_ID).click()\n self.driver.implicitly_wait(45)\n alerttext = self.driver.find_element_by_id(self.AlertMsg_ID).text\n time.sleep(2)\n self.driver.find_element_by_id(self.AlertConfirm_ID).click()\n time.sleep(1)\n return alerttext\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DistributionlineFlowDayCalc", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DistributionlineFlowDayCalc:\n\n def __init__(self, driver):\n \"\"\"配电线路潮流精确算法计算\"\"\"\n <|body_0|>\n\n def DistributionlineFlowDayCalc_Fun(self):\n \"\"\"配电线路潮流精确算法计算\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.driver = driver\n self.ProjectFilePath = GetProjectFilePath()\n self.Page_object_data_file = open(self.ProjectFilePath + '\\\\Page_object\\\\Data\\\\DistributionlineFlowDayCalc.yaml')\n self.Page_Data = yaml.load(self.Page_object_data_file)\n self.Page_object_data_file.close()\n self.Data = self.Page_Data['DistributionlineFlowDayCalc']\n self.TheoreticalLineLoss_ID = self.Data['TheoreticalLineLoss_ID']\n self.TheoreticalLineLossManualcal_Xpath = self.Data['TheoreticalLineLossManualcal_Xpath']\n self.DistributionlineFlowDayCalc_Frame = self.Data['DistributionlineFlowDayCalc_Frame']\n self.DistributionlineFlowDayCalc_id = self.Data['DistributionlineFlowDayCalc_id']\n self.DistributionlineFlowDayCalcCompany_Xpath = self.Data['DistributionlineFlowDayCalcCompany_Xpath']\n self.DistributionlineFlowDayCalcSubstation_Xpath = self.Data['DistributionlineFlowDayCalcSubstation_Xpath']\n self.DistributionlineFlowDayCalcDistributionline_Xpath = self.Data['DistributionlineFlowDayCalcDistributionline_Xpath']\n self.Algorithm_ID = self.Data['Algorithm_ID']\n self.Calculate_ID = self.Data['Calculate_ID']\n self.AlertMsg_ID = self.Data['AlertMsg_ID']\n self.AlertConfirm_ID = self.Data['AlertConfirm_ID']\n<|end_body_0|>\n\n<|body_start_1|>\n self.driver.find_element_by_id(self.TheoreticalLineLoss_ID).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.TheoreticalLineLossManualcal_Xpath).click()\n time.sleep(2)\n self.driver.switch_to_frame(self.DistributionlineFlowDayCalc_Frame)\n time.sleep(2)\n self.driver.find_element_by_id(self.DistributionlineFlowDayCalc_id).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcCompany_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcSubstation_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcDistributionline_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_id(self.Algorithm_ID).click()\n time.sleep(2)\n self.driver.find_element_by_id(self.Calculate_ID).click()\n self.driver.implicitly_wait(45)\n alerttext = self.driver.find_element_by_id(self.AlertMsg_ID).text\n time.sleep(2)\n self.driver.find_element_by_id(self.AlertConfirm_ID).click()\n time.sleep(1)\n return alerttext\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000449", "length_bytes": 4131, "license_type": "no_license", "methods": [{"docstring": "配电线路潮流精确算法计算", "name": "__init__", "signature": "def __init__(self, driver)"}, {"docstring": "配电线路潮流精确算法计算", "name": "DistributionlineFlowDayCalc_Fun", "signature": "def DistributionlineFlowDayCalc_Fun(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000772", "prompt": "Implement the Python class `DistributionlineFlowDayCalc` described below.\n\nClass description:\nImplement the DistributionlineFlowDayCalc class.\n\nMethod signatures and docstrings:\n- def __init__(self, driver): 配电线路潮流精确算法计算\n- def DistributionlineFlowDayCalc_Fun(self): 配电线路潮流精确算法计算", "prompted_full_text": "Implement the Python class `DistributionlineFlowDayCalc` described below.\n\nClass description:\nImplement the DistributionlineFlowDayCalc class.\n\nMethod signatures and docstrings:\n- def __init__(self, driver): 配电线路潮流精确算法计算\n- def DistributionlineFlowDayCalc_Fun(self): 配电线路潮流精确算法计算\n\n<|skeleton|>\nclass DistributionlineFlowDayCalc:\n\n def __init__(self, driver):\n \"\"\"配电线路潮流精确算法计算\"\"\"\n <|body_0|>\n\n def DistributionlineFlowDayCalc_Fun(self):\n \"\"\"配电线路潮流精确算法计算\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.driver = driver\n self.ProjectFilePath = GetProjectFilePath()\n self.Page_object_data_file = open(self.ProjectFilePath + '\\\\Page_object\\\\Data\\\\DistributionlineFlowDayCalc.yaml')\n self.Page_Data = yaml.load(self.Page_object_data_file)\n self.Page_object_data_file.close()\n self.Data = self.Page_Data['DistributionlineFlowDayCalc']\n self.TheoreticalLineLoss_ID = self.Data['TheoreticalLineLoss_ID']\n self.TheoreticalLineLossManualcal_Xpath = self.Data['TheoreticalLineLossManualcal_Xpath']\n self.DistributionlineFlowDayCalc_Frame = self.Data['DistributionlineFlowDayCalc_Frame']\n self.DistributionlineFlowDayCalc_id = self.Data['DistributionlineFlowDayCalc_id']\n self.DistributionlineFlowDayCalcCompany_Xpath = self.Data['DistributionlineFlowDayCalcCompany_Xpath']\n self.DistributionlineFlowDayCalcSubstation_Xpath = self.Data['DistributionlineFlowDayCalcSubstation_Xpath']\n self.DistributionlineFlowDayCalcDistributionline_Xpath = self.Data['DistributionlineFlowDayCalcDistributionline_Xpath']\n self.Algorithm_ID = self.Data['Algorithm_ID']\n self.Calculate_ID = self.Data['Calculate_ID']\n self.AlertMsg_ID = self.Data['AlertMsg_ID']\n self.AlertConfirm_ID = self.Data['AlertConfirm_ID']\n<|end_body_0|>\n\n<|body_start_1|>\n self.driver.find_element_by_id(self.TheoreticalLineLoss_ID).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.TheoreticalLineLossManualcal_Xpath).click()\n time.sleep(2)\n self.driver.switch_to_frame(self.DistributionlineFlowDayCalc_Frame)\n time.sleep(2)\n self.driver.find_element_by_id(self.DistributionlineFlowDayCalc_id).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcCompany_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcSubstation_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcDistributionline_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_id(self.Algorithm_ID).click()\n time.sleep(2)\n self.driver.find_element_by_id(self.Calculate_ID).click()\n self.driver.implicitly_wait(45)\n alerttext = self.driver.find_element_by_id(self.AlertMsg_ID).text\n time.sleep(2)\n self.driver.find_element_by_id(self.AlertConfirm_ID).click()\n time.sleep(1)\n return alerttext\n<|end_body_1|>\n", "revision_id": "190796e380df1e28770f73a392ac92f482eb9809", "skeleton": "<|skeleton|>\nclass DistributionlineFlowDayCalc:\n\n def __init__(self, driver):\n \"\"\"配电线路潮流精确算法计算\"\"\"\n <|body_0|>\n\n def DistributionlineFlowDayCalc_Fun(self):\n \"\"\"配电线路潮流精确算法计算\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DistributionlineFlowDayCalc:\n def __init__(self, driver):\n \"\"\"配电线路潮流精确算法计算\"\"\"\n self.driver = driver\n self.ProjectFilePath = GetProjectFilePath()\n self.Page_object_data_file = open(self.ProjectFilePath + '\\\\Page_object\\\\Data\\\\DistributionlineFlowDayCalc.yaml')\n self.Page_Data = yaml.load(self.Page_object_data_file)\n self.Page_object_data_file.close()\n self.Data = self.Page_Data['DistributionlineFlowDayCalc']\n self.TheoreticalLineLoss_ID = self.Data['TheoreticalLineLoss_ID']\n self.TheoreticalLineLossManualcal_Xpath = self.Data['TheoreticalLineLossManualcal_Xpath']\n self.DistributionlineFlowDayCalc_Frame = self.Data['DistributionlineFlowDayCalc_Frame']\n self.DistributionlineFlowDayCalc_id = self.Data['DistributionlineFlowDayCalc_id']\n self.DistributionlineFlowDayCalcCompany_Xpath = self.Data['DistributionlineFlowDayCalcCompany_Xpath']\n self.DistributionlineFlowDayCalcSubstation_Xpath = self.Data['DistributionlineFlowDayCalcSubstation_Xpath']\n self.DistributionlineFlowDayCalcDistributionline_Xpath = self.Data['DistributionlineFlowDayCalcDistributionline_Xpath']\n self.Algorithm_ID = self.Data['Algorithm_ID']\n self.Calculate_ID = self.Data['Calculate_ID']\n self.AlertMsg_ID = self.Data['AlertMsg_ID']\n self.AlertConfirm_ID = self.Data['AlertConfirm_ID']\n\n def DistributionlineFlowDayCalc_Fun(self):\n \"\"\"配电线路潮流精确算法计算\"\"\"\n self.driver.find_element_by_id(self.TheoreticalLineLoss_ID).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.TheoreticalLineLossManualcal_Xpath).click()\n time.sleep(2)\n self.driver.switch_to_frame(self.DistributionlineFlowDayCalc_Frame)\n time.sleep(2)\n self.driver.find_element_by_id(self.DistributionlineFlowDayCalc_id).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcCompany_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcSubstation_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_xpath(self.DistributionlineFlowDayCalcDistributionline_Xpath).click()\n time.sleep(2)\n self.driver.find_element_by_id(self.Algorithm_ID).click()\n time.sleep(2)\n self.driver.find_element_by_id(self.Calculate_ID).click()\n self.driver.implicitly_wait(45)\n alerttext = self.driver.find_element_by_id(self.AlertMsg_ID).text\n time.sleep(2)\n self.driver.find_element_by_id(self.AlertConfirm_ID).click()\n time.sleep(1)\n return alerttext\n", "source": "the_stack_v2_python_sparse", "source_path": "Project/Page_object/Page_object/DistributionlineFlowDayCalc.py", "source_repo": "RainsWang/Python2.7-Selenium", "split": "val", "star_events_count": 1}
{"blob_id": "9e6d290b90f244d3fbe15f8970240204f82ca49f", "bodies": ["if not functions_dict:\n functions_dict = {}\nsuper(Filter, self).__init__(functions_dict)\nself.Name = Name", "for field in self:\n for function in self[field]:\n try:\n if field is None:\n if not function(item):\n return False\n elif not function(getattr(item, field)):\n return False\n except AttributeError:\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n if not functions_dict:\n functions_dict = {}\n super(Filter, self).__init__(functions_dict)\n self.Name = Name\n<|end_body_0|>\n\n<|body_start_1|>\n for field in self:\n for function in self[field]:\n try:\n if field is None:\n if not function(item):\n return False\n elif not function(getattr(item, field)):\n return False\n except AttributeError:\n return False\n return True\n<|end_body_1|>\n", "class_docstring": "Dictionary of functions, i.e. selection criteria", "class_name": "Filter", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Filter:\n \"\"\"Dictionary of functions, i.e. selection criteria\"\"\"\n\n def __init__(self, Name, functions_dict=None):\n \"\"\"Returns a new Filter object with given name and funtions\"\"\"\n <|body_0|>\n\n def __call__(self, item):\n \"\"\"Returns True if the item satisfies all criteria\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not functions_dict:\n functions_dict = {}\n super(Filter, self).__init__(functions_dict)\n self.Name = Name\n<|end_body_0|>\n\n<|body_start_1|>\n for field in self:\n for function in self[field]:\n try:\n if field is None:\n if not function(item):\n return False\n elif not function(getattr(item, field)):\n return False\n except AttributeError:\n return False\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000450", "length_bytes": 3804, "license_type": "permissive", "methods": [{"docstring": "Returns a new Filter object with given name and funtions", "name": "__init__", "signature": "def __init__(self, Name, functions_dict=None)"}, {"docstring": "Returns True if the item satisfies all criteria", "name": "__call__", "signature": "def __call__(self, item)"}], "n_methods": 2, "prompt": "Implement the Python class `Filter` described below.\n\nClass description:\nDictionary of functions, i.e. selection criteria\n\nMethod signatures and docstrings:\n- def __init__(self, Name, functions_dict=None): Returns a new Filter object with given name and funtions\n- def __call__(self, item): Returns True if the item satisfies all criteria", "prompted_full_text": "Implement the Python class `Filter` described below.\n\nClass description:\nDictionary of functions, i.e. selection criteria\n\nMethod signatures and docstrings:\n- def __init__(self, Name, functions_dict=None): Returns a new Filter object with given name and funtions\n- def __call__(self, item): Returns True if the item satisfies all criteria\n\n<|skeleton|>\nclass Filter:\n \"\"\"Dictionary of functions, i.e. selection criteria\"\"\"\n\n def __init__(self, Name, functions_dict=None):\n \"\"\"Returns a new Filter object with given name and funtions\"\"\"\n <|body_0|>\n\n def __call__(self, item):\n \"\"\"Returns True if the item satisfies all criteria\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not functions_dict:\n functions_dict = {}\n super(Filter, self).__init__(functions_dict)\n self.Name = Name\n<|end_body_0|>\n\n<|body_start_1|>\n for field in self:\n for function in self[field]:\n try:\n if field is None:\n if not function(item):\n return False\n elif not function(getattr(item, field)):\n return False\n except AttributeError:\n return False\n return True\n<|end_body_1|>\n", "revision_id": "fe6f8c8dfed86d39c80f2804a753c05bb2e485b4", "skeleton": "<|skeleton|>\nclass Filter:\n \"\"\"Dictionary of functions, i.e. selection criteria\"\"\"\n\n def __init__(self, Name, functions_dict=None):\n \"\"\"Returns a new Filter object with given name and funtions\"\"\"\n <|body_0|>\n\n def __call__(self, item):\n \"\"\"Returns True if the item satisfies all criteria\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Filter:\n \"\"\"Dictionary of functions, i.e. selection criteria\"\"\"\n\n def __init__(self, Name, functions_dict=None):\n \"\"\"Returns a new Filter object with given name and funtions\"\"\"\n if not functions_dict:\n functions_dict = {}\n super(Filter, self).__init__(functions_dict)\n self.Name = Name\n\n def __call__(self, item):\n \"\"\"Returns True if the item satisfies all criteria\"\"\"\n for field in self:\n for function in self[field]:\n try:\n if field is None:\n if not function(item):\n return False\n elif not function(getattr(item, field)):\n return False\n except AttributeError:\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "scripts/venv/lib/python2.7/site-packages/cogent/util/organizer.py", "source_repo": "sauloal/cnidaria", "split": "val", "star_events_count": 3}
{"blob_id": "d5740db1797235047736880d74e4c394c2510870", "bodies": ["args = {'post_month': 1, 'post_day': 1, 'post_year': 2015, 'post_slug': 'non-existent-post'}\nresponse = self.client.get(reverse('blog:post_detail', kwargs=args))\nself.assertEqual(response.status_code, 404)", "tags = ['tag1', 'tag2', 'tag3']\npost = add_post(title='Title 1', text='Text 1', tags=tags)\npost.publish()\nargs = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\nresponse = self.client.get(reverse('blog:post_detail', kwargs=args))\nself.assertEqual(response.status_code, 200)\nself.assertContains(response, post.title)\nself.assertContains(response, post.text)\nfor tag in tags:\n self.assertContains(response, tag)", "post = add_post(title='Title 1', text='Text 1')\npost.publish()\npost.published_date += timedelta(days=1)\npost.save()\nkwargs = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\nresponse = self.client.get(reverse('blog:post_detail', kwargs=kwargs))\nself.assertEqual(response.status_code, 404)"], "bodies_text": "<|body_start_0|>\n args = {'post_month': 1, 'post_day': 1, 'post_year': 2015, 'post_slug': 'non-existent-post'}\n response = self.client.get(reverse('blog:post_detail', kwargs=args))\n self.assertEqual(response.status_code, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n tags = ['tag1', 'tag2', 'tag3']\n post = add_post(title='Title 1', text='Text 1', tags=tags)\n post.publish()\n args = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\n response = self.client.get(reverse('blog:post_detail', kwargs=args))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, post.title)\n self.assertContains(response, post.text)\n for tag in tags:\n self.assertContains(response, tag)\n<|end_body_1|>\n\n<|body_start_2|>\n post = add_post(title='Title 1', text='Text 1')\n post.publish()\n post.published_date += timedelta(days=1)\n post.save()\n kwargs = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\n response = self.client.get(reverse('blog:post_detail', kwargs=kwargs))\n self.assertEqual(response.status_code, 404)\n<|end_body_2|>\n", "class_docstring": "Tests post detail page of blog app.", "class_name": "PostDetailTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PostDetailTests:\n \"\"\"Tests post detail page of blog app.\"\"\"\n\n def test_post_detail_for_nonexistent_post(self):\n \"\"\"post_detail should show 404 page for non-existent post.\"\"\"\n <|body_0|>\n\n def test_post_detail_for_published_post(self):\n \"\"\"post_detail should display published post content.\"\"\"\n <|body_1|>\n\n def test_post_published_in_future(self):\n \"\"\"Make sure post with future published date not accessible.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n args = {'post_month': 1, 'post_day': 1, 'post_year': 2015, 'post_slug': 'non-existent-post'}\n response = self.client.get(reverse('blog:post_detail', kwargs=args))\n self.assertEqual(response.status_code, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n tags = ['tag1', 'tag2', 'tag3']\n post = add_post(title='Title 1', text='Text 1', tags=tags)\n post.publish()\n args = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\n response = self.client.get(reverse('blog:post_detail', kwargs=args))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, post.title)\n self.assertContains(response, post.text)\n for tag in tags:\n self.assertContains(response, tag)\n<|end_body_1|>\n\n<|body_start_2|>\n post = add_post(title='Title 1', text='Text 1')\n post.publish()\n post.published_date += timedelta(days=1)\n post.save()\n kwargs = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\n response = self.client.get(reverse('blog:post_detail', kwargs=kwargs))\n self.assertEqual(response.status_code, 404)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000451", "length_bytes": 12627, "license_type": "no_license", "methods": [{"docstring": "post_detail should show 404 page for non-existent post.", "name": "test_post_detail_for_nonexistent_post", "signature": "def test_post_detail_for_nonexistent_post(self)"}, {"docstring": "post_detail should display published post content.", "name": "test_post_detail_for_published_post", "signature": "def test_post_detail_for_published_post(self)"}, {"docstring": "Make sure post with future published date not accessible.", "name": "test_post_published_in_future", "signature": "def test_post_published_in_future(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_049857", "prompt": "Implement the Python class `PostDetailTests` described below.\n\nClass description:\nTests post detail page of blog app.\n\nMethod signatures and docstrings:\n- def test_post_detail_for_nonexistent_post(self): post_detail should show 404 page for non-existent post.\n- def test_post_detail_for_published_post(self): post_detail should display published post content.\n- def test_post_published_in_future(self): Make sure post with future published date not accessible.", "prompted_full_text": "Implement the Python class `PostDetailTests` described below.\n\nClass description:\nTests post detail page of blog app.\n\nMethod signatures and docstrings:\n- def test_post_detail_for_nonexistent_post(self): post_detail should show 404 page for non-existent post.\n- def test_post_detail_for_published_post(self): post_detail should display published post content.\n- def test_post_published_in_future(self): Make sure post with future published date not accessible.\n\n<|skeleton|>\nclass PostDetailTests:\n \"\"\"Tests post detail page of blog app.\"\"\"\n\n def test_post_detail_for_nonexistent_post(self):\n \"\"\"post_detail should show 404 page for non-existent post.\"\"\"\n <|body_0|>\n\n def test_post_detail_for_published_post(self):\n \"\"\"post_detail should display published post content.\"\"\"\n <|body_1|>\n\n def test_post_published_in_future(self):\n \"\"\"Make sure post with future published date not accessible.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n args = {'post_month': 1, 'post_day': 1, 'post_year': 2015, 'post_slug': 'non-existent-post'}\n response = self.client.get(reverse('blog:post_detail', kwargs=args))\n self.assertEqual(response.status_code, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n tags = ['tag1', 'tag2', 'tag3']\n post = add_post(title='Title 1', text='Text 1', tags=tags)\n post.publish()\n args = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\n response = self.client.get(reverse('blog:post_detail', kwargs=args))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, post.title)\n self.assertContains(response, post.text)\n for tag in tags:\n self.assertContains(response, tag)\n<|end_body_1|>\n\n<|body_start_2|>\n post = add_post(title='Title 1', text='Text 1')\n post.publish()\n post.published_date += timedelta(days=1)\n post.save()\n kwargs = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\n response = self.client.get(reverse('blog:post_detail', kwargs=kwargs))\n self.assertEqual(response.status_code, 404)\n<|end_body_2|>\n", "revision_id": "609d02846e7b0a0d67e7317a5491032f582d7a19", "skeleton": "<|skeleton|>\nclass PostDetailTests:\n \"\"\"Tests post detail page of blog app.\"\"\"\n\n def test_post_detail_for_nonexistent_post(self):\n \"\"\"post_detail should show 404 page for non-existent post.\"\"\"\n <|body_0|>\n\n def test_post_detail_for_published_post(self):\n \"\"\"post_detail should display published post content.\"\"\"\n <|body_1|>\n\n def test_post_published_in_future(self):\n \"\"\"Make sure post with future published date not accessible.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PostDetailTests:\n \"\"\"Tests post detail page of blog app.\"\"\"\n\n def test_post_detail_for_nonexistent_post(self):\n \"\"\"post_detail should show 404 page for non-existent post.\"\"\"\n args = {'post_month': 1, 'post_day': 1, 'post_year': 2015, 'post_slug': 'non-existent-post'}\n response = self.client.get(reverse('blog:post_detail', kwargs=args))\n self.assertEqual(response.status_code, 404)\n\n def test_post_detail_for_published_post(self):\n \"\"\"post_detail should display published post content.\"\"\"\n tags = ['tag1', 'tag2', 'tag3']\n post = add_post(title='Title 1', text='Text 1', tags=tags)\n post.publish()\n args = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\n response = self.client.get(reverse('blog:post_detail', kwargs=args))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, post.title)\n self.assertContains(response, post.text)\n for tag in tags:\n self.assertContains(response, tag)\n\n def test_post_published_in_future(self):\n \"\"\"Make sure post with future published date not accessible.\"\"\"\n post = add_post(title='Title 1', text='Text 1')\n post.publish()\n post.published_date += timedelta(days=1)\n post.save()\n kwargs = {'post_month': post.published_date.month, 'post_day': post.published_date.day, 'post_year': post.published_date.year, 'post_slug': post.slug}\n response = self.client.get(reverse('blog:post_detail', kwargs=kwargs))\n self.assertEqual(response.status_code, 404)\n", "source": "the_stack_v2_python_sparse", "source_path": "robcutmore-com/apps/blog/tests.py", "source_repo": "rcutmore/robcutmore.com", "split": "val", "star_events_count": 0}
{"blob_id": "ab1c02a75856d7dec84ab38f854f9d3ef21778ea", "bodies": ["self.timeData: TimeData = timeData\nself.sampleFreq: float = timeData.sampleFreq * 1.0\nself.chans: List = timeData.chans\nself.numSamples: int = timeData.numSamples\nself.decParams: DecimationParameters = decParams\nconfig = loadConfig()\nself.minSamples: int = config['Decimation']['minsamples']\nself.level: int = -1\nself.maxDownsampleFactor: int = 8", "self.level = self.level + 1\ndownsampleFactor = self.decParams.getIncrementalFactor(self.level)\nnumDownsamples = 1\ndownsampleList = [downsampleFactor]\nif downsampleFactor > self.maxDownsampleFactor:\n numDownsamples = intdiv(downsampleFactor, self.maxDownsampleFactor)\n downsampleList = [self.maxDownsampleFactor, numDownsamples]\n self.printText('Downsample factor of {:d} greater than max decimation factor {:d}.'.format(downsampleFactor, self.maxDownsampleFactor))\n self.printText('Downsampling in multiple decimations given by factors: {}'.format(arrayToStringInt(downsampleList)))\nfor iDS in range(0, numDownsamples):\n check = self.downsample(downsampleList[iDS])\n if not check:\n return False\nreturn True", "if self.level >= self.decParams.numLevels:\n self.printWarning('Error, number of decimation levels exceeded, returning no data')\n return False\nif downsampleFactor == 1:\n return True\nif self.numSamples / downsampleFactor < self.minSamples:\n self.printWarning('Next decimation level has less than {} samples. Decimation is exiting.\\nSet minimum of samples required using decimator.setMinSamples().'.format(self.minSamples))\n return False\nself.timeData = downsample(self.timeData, downsampleFactor)\nself.sampleFreq = self.timeData.sampleFreq\nself.numSamples = self.timeData.numSamples\nreturn True", "textLst = []\ntextLst.append('Current level = {:d}'.format(self.level))\nif self.level == -1:\n textLst.append('This is the initial level - no decimation has occured')\ntextLst.append('Current sample freq. [Hz] = {:.6f}'.format(self.sampleFreq))\ntextLst.append('Current sample rate [s] = {:.6f}'.format(1.0 / self.sampleFreq))\ntextLst.append('Current number of samples = {:d}'.format(self.numSamples))\nreturn textLst"], "bodies_text": "<|body_start_0|>\n self.timeData: TimeData = timeData\n self.sampleFreq: float = timeData.sampleFreq * 1.0\n self.chans: List = timeData.chans\n self.numSamples: int = timeData.numSamples\n self.decParams: DecimationParameters = decParams\n config = loadConfig()\n self.minSamples: int = config['Decimation']['minsamples']\n self.level: int = -1\n self.maxDownsampleFactor: int = 8\n<|end_body_0|>\n\n<|body_start_1|>\n self.level = self.level + 1\n downsampleFactor = self.decParams.getIncrementalFactor(self.level)\n numDownsamples = 1\n downsampleList = [downsampleFactor]\n if downsampleFactor > self.maxDownsampleFactor:\n numDownsamples = intdiv(downsampleFactor, self.maxDownsampleFactor)\n downsampleList = [self.maxDownsampleFactor, numDownsamples]\n self.printText('Downsample factor of {:d} greater than max decimation factor {:d}.'.format(downsampleFactor, self.maxDownsampleFactor))\n self.printText('Downsampling in multiple decimations given by factors: {}'.format(arrayToStringInt(downsampleList)))\n for iDS in range(0, numDownsamples):\n check = self.downsample(downsampleList[iDS])\n if not check:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self.level >= self.decParams.numLevels:\n self.printWarning('Error, number of decimation levels exceeded, returning no data')\n return False\n if downsampleFactor == 1:\n return True\n if self.numSamples / downsampleFactor < self.minSamples:\n self.printWarning('Next decimation level has less than {} samples. Decimation is exiting.\\nSet minimum of samples required using decimator.setMinSamples().'.format(self.minSamples))\n return False\n self.timeData = downsample(self.timeData, downsampleFactor)\n self.sampleFreq = self.timeData.sampleFreq\n self.numSamples = self.timeData.numSamples\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n textLst = []\n textLst.append('Current level = {:d}'.format(self.level))\n if self.level == -1:\n textLst.append('This is the initial level - no decimation has occured')\n textLst.append('Current sample freq. [Hz] = {:.6f}'.format(self.sampleFreq))\n textLst.append('Current sample rate [s] = {:.6f}'.format(1.0 / self.sampleFreq))\n textLst.append('Current number of samples = {:d}'.format(self.numSamples))\n return textLst\n<|end_body_3|>\n", "class_docstring": "Decimate time data Decimates time data by factors until the minimum number of required samples is reached. When a downsample factor is too large, downsampling is performed in multiple steps to maintain accuracy of result. Attributes ---------- timeData : TimeData timeData object to decimate sampleFreq : float Sampling frequency of time data in Hz chans : List[str] Channels in time data numSamples : int Number of samples in timeData decParams : DecimationParams A DecimationParams object holding decimation information minSamples : int Minimum required samples to decimate level : int Current decimation level maxDownSampleFactor : int Max allowable downsampling in one go. Downsampling becomes le", "class_name": "Decimator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Decimator:\n \"\"\"Decimate time data Decimates time data by factors until the minimum number of required samples is reached. When a downsample factor is too large, downsampling is performed in multiple steps to maintain accuracy of result. Attributes ---------- timeData : TimeData timeData object to decimate sampleFreq : float Sampling frequency of time data in Hz chans : List[str] Channels in time data numSamples : int Number of samples in timeData decParams : DecimationParams A DecimationParams object holding decimation information minSamples : int Minimum required samples to decimate level : int Current decimation level maxDownSampleFactor : int Max allowable downsampling in one go. Downsampling becomes le\"\"\"\n\n def __init__(self, timeData: TimeData, decParams: DecimationParameters) -> None:\n \"\"\"Initialise with timeData and decimation parameters Parameters ---------- timeData : TimeData The time data to decimate decParams : DecimationParams Decimation parameters for performing the decimation\"\"\"\n <|body_0|>\n\n def incrementLevel(self) -> bool:\n \"\"\"Downsample to the next decimation level Returns ------- out : bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling factor is too large, downsampling is performed in multiple steps. Downsampling will become increasingly inaccurate using the scipy routine when factor is too large\"\"\"\n <|body_1|>\n\n def downsample(self, downsampleFactor: int) -> bool:\n \"\"\"Downsample time data Parameters ---------- downsampleFactor : int Downsampling factor Returns ------- bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling causes number of samples to fall below minSamples, downsampling is not performed. The function returns False in this situation\"\"\"\n <|body_2|>\n\n def printList(self) -> List[str]:\n \"\"\"Class information as a list of strings Returns ------- out : list List of strings with information\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.timeData: TimeData = timeData\n self.sampleFreq: float = timeData.sampleFreq * 1.0\n self.chans: List = timeData.chans\n self.numSamples: int = timeData.numSamples\n self.decParams: DecimationParameters = decParams\n config = loadConfig()\n self.minSamples: int = config['Decimation']['minsamples']\n self.level: int = -1\n self.maxDownsampleFactor: int = 8\n<|end_body_0|>\n\n<|body_start_1|>\n self.level = self.level + 1\n downsampleFactor = self.decParams.getIncrementalFactor(self.level)\n numDownsamples = 1\n downsampleList = [downsampleFactor]\n if downsampleFactor > self.maxDownsampleFactor:\n numDownsamples = intdiv(downsampleFactor, self.maxDownsampleFactor)\n downsampleList = [self.maxDownsampleFactor, numDownsamples]\n self.printText('Downsample factor of {:d} greater than max decimation factor {:d}.'.format(downsampleFactor, self.maxDownsampleFactor))\n self.printText('Downsampling in multiple decimations given by factors: {}'.format(arrayToStringInt(downsampleList)))\n for iDS in range(0, numDownsamples):\n check = self.downsample(downsampleList[iDS])\n if not check:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self.level >= self.decParams.numLevels:\n self.printWarning('Error, number of decimation levels exceeded, returning no data')\n return False\n if downsampleFactor == 1:\n return True\n if self.numSamples / downsampleFactor < self.minSamples:\n self.printWarning('Next decimation level has less than {} samples. Decimation is exiting.\\nSet minimum of samples required using decimator.setMinSamples().'.format(self.minSamples))\n return False\n self.timeData = downsample(self.timeData, downsampleFactor)\n self.sampleFreq = self.timeData.sampleFreq\n self.numSamples = self.timeData.numSamples\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n textLst = []\n textLst.append('Current level = {:d}'.format(self.level))\n if self.level == -1:\n textLst.append('This is the initial level - no decimation has occured')\n textLst.append('Current sample freq. [Hz] = {:.6f}'.format(self.sampleFreq))\n textLst.append('Current sample rate [s] = {:.6f}'.format(1.0 / self.sampleFreq))\n textLst.append('Current number of samples = {:d}'.format(self.numSamples))\n return textLst\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000452", "length_bytes": 6514, "license_type": "permissive", "methods": [{"docstring": "Initialise with timeData and decimation parameters Parameters ---------- timeData : TimeData The time data to decimate decParams : DecimationParams Decimation parameters for performing the decimation", "name": "__init__", "signature": "def __init__(self, timeData: TimeData, decParams: DecimationParameters) -> None"}, {"docstring": "Downsample to the next decimation level Returns ------- out : bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling factor is too large, downsampling is performed in multiple steps. Downsampling will become increasingly inaccurate using the scipy routine when factor is too large", "name": "incrementLevel", "signature": "def incrementLevel(self) -> bool"}, {"docstring": "Downsample time data Parameters ---------- downsampleFactor : int Downsampling factor Returns ------- bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling causes number of samples to fall below minSamples, downsampling is not performed. The function returns False in this situation", "name": "downsample", "signature": "def downsample(self, downsampleFactor: int) -> bool"}, {"docstring": "Class information as a list of strings Returns ------- out : list List of strings with information", "name": "printList", "signature": "def printList(self) -> List[str]"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002560", "prompt": "Implement the Python class `Decimator` described below.\n\nClass description:\nDecimate time data Decimates time data by factors until the minimum number of required samples is reached. When a downsample factor is too large, downsampling is performed in multiple steps to maintain accuracy of result. Attributes ---------- timeData : TimeData timeData object to decimate sampleFreq : float Sampling frequency of time data in Hz chans : List[str] Channels in time data numSamples : int Number of samples in timeData decParams : DecimationParams A DecimationParams object holding decimation information minSamples : int Minimum required samples to decimate level : int Current decimation level maxDownSampleFactor : int Max allowable downsampling in one go. Downsampling becomes le\n\nMethod signatures and docstrings:\n- def __init__(self, timeData: TimeData, decParams: DecimationParameters) -> None: Initialise with timeData and decimation parameters Parameters ---------- timeData : TimeData The time data to decimate decParams : DecimationParams Decimation parameters for performing the decimation\n- def incrementLevel(self) -> bool: Downsample to the next decimation level Returns ------- out : bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling factor is too large, downsampling is performed in multiple steps. Downsampling will become increasingly inaccurate using the scipy routine when factor is too large\n- def downsample(self, downsampleFactor: int) -> bool: Downsample time data Parameters ---------- downsampleFactor : int Downsampling factor Returns ------- bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling causes number of samples to fall below minSamples, downsampling is not performed. The function returns False in this situation\n- def printList(self) -> List[str]: Class information as a list of strings Returns ------- out : list List of strings with information", "prompted_full_text": "Implement the Python class `Decimator` described below.\n\nClass description:\nDecimate time data Decimates time data by factors until the minimum number of required samples is reached. When a downsample factor is too large, downsampling is performed in multiple steps to maintain accuracy of result. Attributes ---------- timeData : TimeData timeData object to decimate sampleFreq : float Sampling frequency of time data in Hz chans : List[str] Channels in time data numSamples : int Number of samples in timeData decParams : DecimationParams A DecimationParams object holding decimation information minSamples : int Minimum required samples to decimate level : int Current decimation level maxDownSampleFactor : int Max allowable downsampling in one go. Downsampling becomes le\n\nMethod signatures and docstrings:\n- def __init__(self, timeData: TimeData, decParams: DecimationParameters) -> None: Initialise with timeData and decimation parameters Parameters ---------- timeData : TimeData The time data to decimate decParams : DecimationParams Decimation parameters for performing the decimation\n- def incrementLevel(self) -> bool: Downsample to the next decimation level Returns ------- out : bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling factor is too large, downsampling is performed in multiple steps. Downsampling will become increasingly inaccurate using the scipy routine when factor is too large\n- def downsample(self, downsampleFactor: int) -> bool: Downsample time data Parameters ---------- downsampleFactor : int Downsampling factor Returns ------- bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling causes number of samples to fall below minSamples, downsampling is not performed. The function returns False in this situation\n- def printList(self) -> List[str]: Class information as a list of strings Returns ------- out : list List of strings with information\n\n<|skeleton|>\nclass Decimator:\n \"\"\"Decimate time data Decimates time data by factors until the minimum number of required samples is reached. When a downsample factor is too large, downsampling is performed in multiple steps to maintain accuracy of result. Attributes ---------- timeData : TimeData timeData object to decimate sampleFreq : float Sampling frequency of time data in Hz chans : List[str] Channels in time data numSamples : int Number of samples in timeData decParams : DecimationParams A DecimationParams object holding decimation information minSamples : int Minimum required samples to decimate level : int Current decimation level maxDownSampleFactor : int Max allowable downsampling in one go. Downsampling becomes le\"\"\"\n\n def __init__(self, timeData: TimeData, decParams: DecimationParameters) -> None:\n \"\"\"Initialise with timeData and decimation parameters Parameters ---------- timeData : TimeData The time data to decimate decParams : DecimationParams Decimation parameters for performing the decimation\"\"\"\n <|body_0|>\n\n def incrementLevel(self) -> bool:\n \"\"\"Downsample to the next decimation level Returns ------- out : bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling factor is too large, downsampling is performed in multiple steps. Downsampling will become increasingly inaccurate using the scipy routine when factor is too large\"\"\"\n <|body_1|>\n\n def downsample(self, downsampleFactor: int) -> bool:\n \"\"\"Downsample time data Parameters ---------- downsampleFactor : int Downsampling factor Returns ------- bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling causes number of samples to fall below minSamples, downsampling is not performed. The function returns False in this situation\"\"\"\n <|body_2|>\n\n def printList(self) -> List[str]:\n \"\"\"Class information as a list of strings Returns ------- out : list List of strings with information\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.timeData: TimeData = timeData\n self.sampleFreq: float = timeData.sampleFreq * 1.0\n self.chans: List = timeData.chans\n self.numSamples: int = timeData.numSamples\n self.decParams: DecimationParameters = decParams\n config = loadConfig()\n self.minSamples: int = config['Decimation']['minsamples']\n self.level: int = -1\n self.maxDownsampleFactor: int = 8\n<|end_body_0|>\n\n<|body_start_1|>\n self.level = self.level + 1\n downsampleFactor = self.decParams.getIncrementalFactor(self.level)\n numDownsamples = 1\n downsampleList = [downsampleFactor]\n if downsampleFactor > self.maxDownsampleFactor:\n numDownsamples = intdiv(downsampleFactor, self.maxDownsampleFactor)\n downsampleList = [self.maxDownsampleFactor, numDownsamples]\n self.printText('Downsample factor of {:d} greater than max decimation factor {:d}.'.format(downsampleFactor, self.maxDownsampleFactor))\n self.printText('Downsampling in multiple decimations given by factors: {}'.format(arrayToStringInt(downsampleList)))\n for iDS in range(0, numDownsamples):\n check = self.downsample(downsampleList[iDS])\n if not check:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self.level >= self.decParams.numLevels:\n self.printWarning('Error, number of decimation levels exceeded, returning no data')\n return False\n if downsampleFactor == 1:\n return True\n if self.numSamples / downsampleFactor < self.minSamples:\n self.printWarning('Next decimation level has less than {} samples. Decimation is exiting.\\nSet minimum of samples required using decimator.setMinSamples().'.format(self.minSamples))\n return False\n self.timeData = downsample(self.timeData, downsampleFactor)\n self.sampleFreq = self.timeData.sampleFreq\n self.numSamples = self.timeData.numSamples\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n textLst = []\n textLst.append('Current level = {:d}'.format(self.level))\n if self.level == -1:\n textLst.append('This is the initial level - no decimation has occured')\n textLst.append('Current sample freq. [Hz] = {:.6f}'.format(self.sampleFreq))\n textLst.append('Current sample rate [s] = {:.6f}'.format(1.0 / self.sampleFreq))\n textLst.append('Current number of samples = {:d}'.format(self.numSamples))\n return textLst\n<|end_body_3|>\n", "revision_id": "a93040521fd6506929a59c363ee58b7ca073bac1", "skeleton": "<|skeleton|>\nclass Decimator:\n \"\"\"Decimate time data Decimates time data by factors until the minimum number of required samples is reached. When a downsample factor is too large, downsampling is performed in multiple steps to maintain accuracy of result. Attributes ---------- timeData : TimeData timeData object to decimate sampleFreq : float Sampling frequency of time data in Hz chans : List[str] Channels in time data numSamples : int Number of samples in timeData decParams : DecimationParams A DecimationParams object holding decimation information minSamples : int Minimum required samples to decimate level : int Current decimation level maxDownSampleFactor : int Max allowable downsampling in one go. Downsampling becomes le\"\"\"\n\n def __init__(self, timeData: TimeData, decParams: DecimationParameters) -> None:\n \"\"\"Initialise with timeData and decimation parameters Parameters ---------- timeData : TimeData The time data to decimate decParams : DecimationParams Decimation parameters for performing the decimation\"\"\"\n <|body_0|>\n\n def incrementLevel(self) -> bool:\n \"\"\"Downsample to the next decimation level Returns ------- out : bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling factor is too large, downsampling is performed in multiple steps. Downsampling will become increasingly inaccurate using the scipy routine when factor is too large\"\"\"\n <|body_1|>\n\n def downsample(self, downsampleFactor: int) -> bool:\n \"\"\"Downsample time data Parameters ---------- downsampleFactor : int Downsampling factor Returns ------- bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling causes number of samples to fall below minSamples, downsampling is not performed. The function returns False in this situation\"\"\"\n <|body_2|>\n\n def printList(self) -> List[str]:\n \"\"\"Class information as a list of strings Returns ------- out : list List of strings with information\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Decimator:\n \"\"\"Decimate time data Decimates time data by factors until the minimum number of required samples is reached. When a downsample factor is too large, downsampling is performed in multiple steps to maintain accuracy of result. Attributes ---------- timeData : TimeData timeData object to decimate sampleFreq : float Sampling frequency of time data in Hz chans : List[str] Channels in time data numSamples : int Number of samples in timeData decParams : DecimationParams A DecimationParams object holding decimation information minSamples : int Minimum required samples to decimate level : int Current decimation level maxDownSampleFactor : int Max allowable downsampling in one go. Downsampling becomes le\"\"\"\n\n def __init__(self, timeData: TimeData, decParams: DecimationParameters) -> None:\n \"\"\"Initialise with timeData and decimation parameters Parameters ---------- timeData : TimeData The time data to decimate decParams : DecimationParams Decimation parameters for performing the decimation\"\"\"\n self.timeData: TimeData = timeData\n self.sampleFreq: float = timeData.sampleFreq * 1.0\n self.chans: List = timeData.chans\n self.numSamples: int = timeData.numSamples\n self.decParams: DecimationParameters = decParams\n config = loadConfig()\n self.minSamples: int = config['Decimation']['minsamples']\n self.level: int = -1\n self.maxDownsampleFactor: int = 8\n\n def incrementLevel(self) -> bool:\n \"\"\"Downsample to the next decimation level Returns ------- out : bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling factor is too large, downsampling is performed in multiple steps. Downsampling will become increasingly inaccurate using the scipy routine when factor is too large\"\"\"\n self.level = self.level + 1\n downsampleFactor = self.decParams.getIncrementalFactor(self.level)\n numDownsamples = 1\n downsampleList = [downsampleFactor]\n if downsampleFactor > self.maxDownsampleFactor:\n numDownsamples = intdiv(downsampleFactor, self.maxDownsampleFactor)\n downsampleList = [self.maxDownsampleFactor, numDownsamples]\n self.printText('Downsample factor of {:d} greater than max decimation factor {:d}.'.format(downsampleFactor, self.maxDownsampleFactor))\n self.printText('Downsampling in multiple decimations given by factors: {}'.format(arrayToStringInt(downsampleList)))\n for iDS in range(0, numDownsamples):\n check = self.downsample(downsampleList[iDS])\n if not check:\n return False\n return True\n\n def downsample(self, downsampleFactor: int) -> bool:\n \"\"\"Downsample time data Parameters ---------- downsampleFactor : int Downsampling factor Returns ------- bool True if downsampling completed successfully. False otherwise Notes ----- When the downsampling causes number of samples to fall below minSamples, downsampling is not performed. The function returns False in this situation\"\"\"\n if self.level >= self.decParams.numLevels:\n self.printWarning('Error, number of decimation levels exceeded, returning no data')\n return False\n if downsampleFactor == 1:\n return True\n if self.numSamples / downsampleFactor < self.minSamples:\n self.printWarning('Next decimation level has less than {} samples. Decimation is exiting.\\nSet minimum of samples required using decimator.setMinSamples().'.format(self.minSamples))\n return False\n self.timeData = downsample(self.timeData, downsampleFactor)\n self.sampleFreq = self.timeData.sampleFreq\n self.numSamples = self.timeData.numSamples\n return True\n\n def printList(self) -> List[str]:\n \"\"\"Class information as a list of strings Returns ------- out : list List of strings with information\"\"\"\n textLst = []\n textLst.append('Current level = {:d}'.format(self.level))\n if self.level == -1:\n textLst.append('This is the initial level - no decimation has occured')\n textLst.append('Current sample freq. [Hz] = {:.6f}'.format(self.sampleFreq))\n textLst.append('Current sample rate [s] = {:.6f}'.format(1.0 / self.sampleFreq))\n textLst.append('Current number of samples = {:d}'.format(self.numSamples))\n return textLst\n", "source": "the_stack_v2_python_sparse", "source_path": "resistics/decimate/decimator.py", "source_repo": "Nishikinor/resistics", "split": "val", "star_events_count": 0}
{"blob_id": "05c3f4ce92dd0f6f9403bcad31ca149d5642373e", "bodies": ["task_configs = find_all_files(path, extension=name_task_config)\nassert len(task_configs) == 1\nwith open(task_configs[0]) as config_file:\n config = json.load(config_file)\n gene = config.get('{cls_network}.gene')\n gene = split(gene, int)\n data_set = get_dataset_from_json(task_configs[0], fake=True)\n data_set_name = data_set.__class__.__name__\ntb_files = find_tb_files(path)\nassert len(tb_files) > 0\nevents = read_event_files(tb_files)\nloss_train = events.get('train/loss', None)\nloss_test = events.get('test/loss', None)\nassert loss_train is not None and loss_test is not None\naccuracy_train = events.get('train/accuracy/1', None)\naccuracy_test = events.get('test/accuracy/1', None)\nassert accuracy_train is not None and accuracy_test is not None\nnet_config_path = Builder.find_net_config_path(path)\nnetwork = get_network(net_config_path, data_set.get_data_shape(), data_set.get_label_shape())\npass\naverage_last = 5\nreturn MiniResult(arch_index=arch_index, arch_str='%s(%s)' % (space_name, ', '.join([str(g) for g in gene])), arch_tuple=tuple(gene), params={data_set_name: network.get_num_parameters()}, flops={data_set_name: network.profile_macs()}, latency={data_set_name: -1}, loss={data_set_name: {'train': np.mean([v.value for v in loss_train[-average_last:]]), 'test': np.mean([v.value for v in loss_test[-average_last:]])}}, acc1={data_set_name: {'train': np.mean([v.value for v in accuracy_train[-average_last:]]), 'test': np.mean([v.value for v in accuracy_test[-average_last:]])}})", "results = []\nmerged_results = {}\narch_to_idx = {}\ntuple_to_str = {}\ntuple_to_idx = {}\ntask_configs = find_all_files(path, extension=name_task_config)\nassert len(task_configs) > 0\nfor i, cfg_path in enumerate(sorted(task_configs)):\n dir_name = os.path.dirname(cfg_path)\n r = cls.make_from_single_dir(dir_name, space_name, arch_index=i)\n if r is None:\n continue\n assert tuple_to_str.get(r.arch_tuple) is None, 'can not yet merge duplicate architecture results: %s, in %s' % (r.arch_tuple, dir_name)\n results.append(r)\nlen_before = len(results)\nresults = MiniResult.merge_result_list(results)\nprint('merging: had %d before, merged down to %d' % (len_before, len(results)))\nfor i, r in enumerate(results):\n r.arch_index = i\n merged_results[i] = r\n arch_to_idx[r.arch_str] = i\n tuple_to_idx[r.arch_tuple] = i\n tuple_to_str[r.arch_tuple] = r.arch_str\ndata_sets = list(merged_results.get(0).params.keys())\nreturn MiniNASParsedTabularBenchmark(default_data_set=data_sets[0], default_result_type='test', bench_name='%s on %s' % (space_name, data_sets[0]), bench_description='parsed empirical results', value_space=value_space, results=merged_results, arch_to_idx=arch_to_idx, tuple_to_str=tuple_to_str, tuple_to_idx=tuple_to_idx)"], "bodies_text": "<|body_start_0|>\n task_configs = find_all_files(path, extension=name_task_config)\n assert len(task_configs) == 1\n with open(task_configs[0]) as config_file:\n config = json.load(config_file)\n gene = config.get('{cls_network}.gene')\n gene = split(gene, int)\n data_set = get_dataset_from_json(task_configs[0], fake=True)\n data_set_name = data_set.__class__.__name__\n tb_files = find_tb_files(path)\n assert len(tb_files) > 0\n events = read_event_files(tb_files)\n loss_train = events.get('train/loss', None)\n loss_test = events.get('test/loss', None)\n assert loss_train is not None and loss_test is not None\n accuracy_train = events.get('train/accuracy/1', None)\n accuracy_test = events.get('test/accuracy/1', None)\n assert accuracy_train is not None and accuracy_test is not None\n net_config_path = Builder.find_net_config_path(path)\n network = get_network(net_config_path, data_set.get_data_shape(), data_set.get_label_shape())\n pass\n average_last = 5\n return MiniResult(arch_index=arch_index, arch_str='%s(%s)' % (space_name, ', '.join([str(g) for g in gene])), arch_tuple=tuple(gene), params={data_set_name: network.get_num_parameters()}, flops={data_set_name: network.profile_macs()}, latency={data_set_name: -1}, loss={data_set_name: {'train': np.mean([v.value for v in loss_train[-average_last:]]), 'test': np.mean([v.value for v in loss_test[-average_last:]])}}, acc1={data_set_name: {'train': np.mean([v.value for v in accuracy_train[-average_last:]]), 'test': np.mean([v.value for v in accuracy_test[-average_last:]])}})\n<|end_body_0|>\n\n<|body_start_1|>\n results = []\n merged_results = {}\n arch_to_idx = {}\n tuple_to_str = {}\n tuple_to_idx = {}\n task_configs = find_all_files(path, extension=name_task_config)\n assert len(task_configs) > 0\n for i, cfg_path in enumerate(sorted(task_configs)):\n dir_name = os.path.dirname(cfg_path)\n r = cls.make_from_single_dir(dir_name, space_name, arch_index=i)\n if r is None:\n continue\n assert tuple_to_str.get(r.arch_tuple) is None, 'can not yet merge duplicate architecture results: %s, in %s' % (r.arch_tuple, dir_name)\n results.append(r)\n len_before = len(results)\n results = MiniResult.merge_result_list(results)\n print('merging: had %d before, merged down to %d' % (len_before, len(results)))\n for i, r in enumerate(results):\n r.arch_index = i\n merged_results[i] = r\n arch_to_idx[r.arch_str] = i\n tuple_to_idx[r.arch_tuple] = i\n tuple_to_str[r.arch_tuple] = r.arch_str\n data_sets = list(merged_results.get(0).params.keys())\n return MiniNASParsedTabularBenchmark(default_data_set=data_sets[0], default_result_type='test', bench_name='%s on %s' % (space_name, data_sets[0]), bench_description='parsed empirical results', value_space=value_space, results=merged_results, arch_to_idx=arch_to_idx, tuple_to_str=tuple_to_str, tuple_to_idx=tuple_to_idx)\n<|end_body_1|>\n", "class_docstring": "go through a directory of training save-dirs, parsing the results of each single training run and its used architecture assumptions: - all networks are created as RetrainFromSearchUninasNetwork, so that their genes can be easily read", "class_name": "MiniNASParsedTabularBenchmark", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MiniNASParsedTabularBenchmark:\n \"\"\"go through a directory of training save-dirs, parsing the results of each single training run and its used architecture assumptions: - all networks are created as RetrainFromSearchUninasNetwork, so that their genes can be easily read\"\"\"\n\n def make_from_single_dir(cls, path: str, space_name: str, arch_index: int) -> MiniResult:\n \"\"\"creating a mini result by parsing a training process\"\"\"\n <|body_0|>\n\n def make_from_dirs(cls, path: str, space_name: str, value_space: ValueSpace):\n \"\"\"creating a mini bench dataset by parsing multiple training processes\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n task_configs = find_all_files(path, extension=name_task_config)\n assert len(task_configs) == 1\n with open(task_configs[0]) as config_file:\n config = json.load(config_file)\n gene = config.get('{cls_network}.gene')\n gene = split(gene, int)\n data_set = get_dataset_from_json(task_configs[0], fake=True)\n data_set_name = data_set.__class__.__name__\n tb_files = find_tb_files(path)\n assert len(tb_files) > 0\n events = read_event_files(tb_files)\n loss_train = events.get('train/loss', None)\n loss_test = events.get('test/loss', None)\n assert loss_train is not None and loss_test is not None\n accuracy_train = events.get('train/accuracy/1', None)\n accuracy_test = events.get('test/accuracy/1', None)\n assert accuracy_train is not None and accuracy_test is not None\n net_config_path = Builder.find_net_config_path(path)\n network = get_network(net_config_path, data_set.get_data_shape(), data_set.get_label_shape())\n pass\n average_last = 5\n return MiniResult(arch_index=arch_index, arch_str='%s(%s)' % (space_name, ', '.join([str(g) for g in gene])), arch_tuple=tuple(gene), params={data_set_name: network.get_num_parameters()}, flops={data_set_name: network.profile_macs()}, latency={data_set_name: -1}, loss={data_set_name: {'train': np.mean([v.value for v in loss_train[-average_last:]]), 'test': np.mean([v.value for v in loss_test[-average_last:]])}}, acc1={data_set_name: {'train': np.mean([v.value for v in accuracy_train[-average_last:]]), 'test': np.mean([v.value for v in accuracy_test[-average_last:]])}})\n<|end_body_0|>\n\n<|body_start_1|>\n results = []\n merged_results = {}\n arch_to_idx = {}\n tuple_to_str = {}\n tuple_to_idx = {}\n task_configs = find_all_files(path, extension=name_task_config)\n assert len(task_configs) > 0\n for i, cfg_path in enumerate(sorted(task_configs)):\n dir_name = os.path.dirname(cfg_path)\n r = cls.make_from_single_dir(dir_name, space_name, arch_index=i)\n if r is None:\n continue\n assert tuple_to_str.get(r.arch_tuple) is None, 'can not yet merge duplicate architecture results: %s, in %s' % (r.arch_tuple, dir_name)\n results.append(r)\n len_before = len(results)\n results = MiniResult.merge_result_list(results)\n print('merging: had %d before, merged down to %d' % (len_before, len(results)))\n for i, r in enumerate(results):\n r.arch_index = i\n merged_results[i] = r\n arch_to_idx[r.arch_str] = i\n tuple_to_idx[r.arch_tuple] = i\n tuple_to_str[r.arch_tuple] = r.arch_str\n data_sets = list(merged_results.get(0).params.keys())\n return MiniNASParsedTabularBenchmark(default_data_set=data_sets[0], default_result_type='test', bench_name='%s on %s' % (space_name, data_sets[0]), bench_description='parsed empirical results', value_space=value_space, results=merged_results, arch_to_idx=arch_to_idx, tuple_to_str=tuple_to_str, tuple_to_idx=tuple_to_idx)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000453", "length_bytes": 7134, "license_type": "permissive", "methods": [{"docstring": "creating a mini result by parsing a training process", "name": "make_from_single_dir", "signature": "def make_from_single_dir(cls, path: str, space_name: str, arch_index: int) -> MiniResult"}, {"docstring": "creating a mini bench dataset by parsing multiple training processes", "name": "make_from_dirs", "signature": "def make_from_dirs(cls, path: str, space_name: str, value_space: ValueSpace)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_051049", "prompt": "Implement the Python class `MiniNASParsedTabularBenchmark` described below.\n\nClass description:\ngo through a directory of training save-dirs, parsing the results of each single training run and its used architecture assumptions: - all networks are created as RetrainFromSearchUninasNetwork, so that their genes can be easily read\n\nMethod signatures and docstrings:\n- def make_from_single_dir(cls, path: str, space_name: str, arch_index: int) -> MiniResult: creating a mini result by parsing a training process\n- def make_from_dirs(cls, path: str, space_name: str, value_space: ValueSpace): creating a mini bench dataset by parsing multiple training processes", "prompted_full_text": "Implement the Python class `MiniNASParsedTabularBenchmark` described below.\n\nClass description:\ngo through a directory of training save-dirs, parsing the results of each single training run and its used architecture assumptions: - all networks are created as RetrainFromSearchUninasNetwork, so that their genes can be easily read\n\nMethod signatures and docstrings:\n- def make_from_single_dir(cls, path: str, space_name: str, arch_index: int) -> MiniResult: creating a mini result by parsing a training process\n- def make_from_dirs(cls, path: str, space_name: str, value_space: ValueSpace): creating a mini bench dataset by parsing multiple training processes\n\n<|skeleton|>\nclass MiniNASParsedTabularBenchmark:\n \"\"\"go through a directory of training save-dirs, parsing the results of each single training run and its used architecture assumptions: - all networks are created as RetrainFromSearchUninasNetwork, so that their genes can be easily read\"\"\"\n\n def make_from_single_dir(cls, path: str, space_name: str, arch_index: int) -> MiniResult:\n \"\"\"creating a mini result by parsing a training process\"\"\"\n <|body_0|>\n\n def make_from_dirs(cls, path: str, space_name: str, value_space: ValueSpace):\n \"\"\"creating a mini bench dataset by parsing multiple training processes\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n task_configs = find_all_files(path, extension=name_task_config)\n assert len(task_configs) == 1\n with open(task_configs[0]) as config_file:\n config = json.load(config_file)\n gene = config.get('{cls_network}.gene')\n gene = split(gene, int)\n data_set = get_dataset_from_json(task_configs[0], fake=True)\n data_set_name = data_set.__class__.__name__\n tb_files = find_tb_files(path)\n assert len(tb_files) > 0\n events = read_event_files(tb_files)\n loss_train = events.get('train/loss', None)\n loss_test = events.get('test/loss', None)\n assert loss_train is not None and loss_test is not None\n accuracy_train = events.get('train/accuracy/1', None)\n accuracy_test = events.get('test/accuracy/1', None)\n assert accuracy_train is not None and accuracy_test is not None\n net_config_path = Builder.find_net_config_path(path)\n network = get_network(net_config_path, data_set.get_data_shape(), data_set.get_label_shape())\n pass\n average_last = 5\n return MiniResult(arch_index=arch_index, arch_str='%s(%s)' % (space_name, ', '.join([str(g) for g in gene])), arch_tuple=tuple(gene), params={data_set_name: network.get_num_parameters()}, flops={data_set_name: network.profile_macs()}, latency={data_set_name: -1}, loss={data_set_name: {'train': np.mean([v.value for v in loss_train[-average_last:]]), 'test': np.mean([v.value for v in loss_test[-average_last:]])}}, acc1={data_set_name: {'train': np.mean([v.value for v in accuracy_train[-average_last:]]), 'test': np.mean([v.value for v in accuracy_test[-average_last:]])}})\n<|end_body_0|>\n\n<|body_start_1|>\n results = []\n merged_results = {}\n arch_to_idx = {}\n tuple_to_str = {}\n tuple_to_idx = {}\n task_configs = find_all_files(path, extension=name_task_config)\n assert len(task_configs) > 0\n for i, cfg_path in enumerate(sorted(task_configs)):\n dir_name = os.path.dirname(cfg_path)\n r = cls.make_from_single_dir(dir_name, space_name, arch_index=i)\n if r is None:\n continue\n assert tuple_to_str.get(r.arch_tuple) is None, 'can not yet merge duplicate architecture results: %s, in %s' % (r.arch_tuple, dir_name)\n results.append(r)\n len_before = len(results)\n results = MiniResult.merge_result_list(results)\n print('merging: had %d before, merged down to %d' % (len_before, len(results)))\n for i, r in enumerate(results):\n r.arch_index = i\n merged_results[i] = r\n arch_to_idx[r.arch_str] = i\n tuple_to_idx[r.arch_tuple] = i\n tuple_to_str[r.arch_tuple] = r.arch_str\n data_sets = list(merged_results.get(0).params.keys())\n return MiniNASParsedTabularBenchmark(default_data_set=data_sets[0], default_result_type='test', bench_name='%s on %s' % (space_name, data_sets[0]), bench_description='parsed empirical results', value_space=value_space, results=merged_results, arch_to_idx=arch_to_idx, tuple_to_str=tuple_to_str, tuple_to_idx=tuple_to_idx)\n<|end_body_1|>\n", "revision_id": "06729b9cf517ec416fb798ae387c5bd9c3a278ac", "skeleton": "<|skeleton|>\nclass MiniNASParsedTabularBenchmark:\n \"\"\"go through a directory of training save-dirs, parsing the results of each single training run and its used architecture assumptions: - all networks are created as RetrainFromSearchUninasNetwork, so that their genes can be easily read\"\"\"\n\n def make_from_single_dir(cls, path: str, space_name: str, arch_index: int) -> MiniResult:\n \"\"\"creating a mini result by parsing a training process\"\"\"\n <|body_0|>\n\n def make_from_dirs(cls, path: str, space_name: str, value_space: ValueSpace):\n \"\"\"creating a mini bench dataset by parsing multiple training processes\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MiniNASParsedTabularBenchmark:\n \"\"\"go through a directory of training save-dirs, parsing the results of each single training run and its used architecture assumptions: - all networks are created as RetrainFromSearchUninasNetwork, so that their genes can be easily read\"\"\"\n\n def make_from_single_dir(cls, path: str, space_name: str, arch_index: int) -> MiniResult:\n \"\"\"creating a mini result by parsing a training process\"\"\"\n task_configs = find_all_files(path, extension=name_task_config)\n assert len(task_configs) == 1\n with open(task_configs[0]) as config_file:\n config = json.load(config_file)\n gene = config.get('{cls_network}.gene')\n gene = split(gene, int)\n data_set = get_dataset_from_json(task_configs[0], fake=True)\n data_set_name = data_set.__class__.__name__\n tb_files = find_tb_files(path)\n assert len(tb_files) > 0\n events = read_event_files(tb_files)\n loss_train = events.get('train/loss', None)\n loss_test = events.get('test/loss', None)\n assert loss_train is not None and loss_test is not None\n accuracy_train = events.get('train/accuracy/1', None)\n accuracy_test = events.get('test/accuracy/1', None)\n assert accuracy_train is not None and accuracy_test is not None\n net_config_path = Builder.find_net_config_path(path)\n network = get_network(net_config_path, data_set.get_data_shape(), data_set.get_label_shape())\n pass\n average_last = 5\n return MiniResult(arch_index=arch_index, arch_str='%s(%s)' % (space_name, ', '.join([str(g) for g in gene])), arch_tuple=tuple(gene), params={data_set_name: network.get_num_parameters()}, flops={data_set_name: network.profile_macs()}, latency={data_set_name: -1}, loss={data_set_name: {'train': np.mean([v.value for v in loss_train[-average_last:]]), 'test': np.mean([v.value for v in loss_test[-average_last:]])}}, acc1={data_set_name: {'train': np.mean([v.value for v in accuracy_train[-average_last:]]), 'test': np.mean([v.value for v in accuracy_test[-average_last:]])}})\n\n def make_from_dirs(cls, path: str, space_name: str, value_space: ValueSpace):\n \"\"\"creating a mini bench dataset by parsing multiple training processes\"\"\"\n results = []\n merged_results = {}\n arch_to_idx = {}\n tuple_to_str = {}\n tuple_to_idx = {}\n task_configs = find_all_files(path, extension=name_task_config)\n assert len(task_configs) > 0\n for i, cfg_path in enumerate(sorted(task_configs)):\n dir_name = os.path.dirname(cfg_path)\n r = cls.make_from_single_dir(dir_name, space_name, arch_index=i)\n if r is None:\n continue\n assert tuple_to_str.get(r.arch_tuple) is None, 'can not yet merge duplicate architecture results: %s, in %s' % (r.arch_tuple, dir_name)\n results.append(r)\n len_before = len(results)\n results = MiniResult.merge_result_list(results)\n print('merging: had %d before, merged down to %d' % (len_before, len(results)))\n for i, r in enumerate(results):\n r.arch_index = i\n merged_results[i] = r\n arch_to_idx[r.arch_str] = i\n tuple_to_idx[r.arch_tuple] = i\n tuple_to_str[r.arch_tuple] = r.arch_str\n data_sets = list(merged_results.get(0).params.keys())\n return MiniNASParsedTabularBenchmark(default_data_set=data_sets[0], default_result_type='test', bench_name='%s on %s' % (space_name, data_sets[0]), bench_description='parsed empirical results', value_space=value_space, results=merged_results, arch_to_idx=arch_to_idx, tuple_to_str=tuple_to_str, tuple_to_idx=tuple_to_idx)\n", "source": "the_stack_v2_python_sparse", "source_path": "uninas/optimization/benchmarks/mini/tabular_parsed.py", "source_repo": "MLDL/uninas", "split": "val", "star_events_count": 0}
{"blob_id": "65be1fcf50a650c9e5286a0c1028136ca431d64b", "bodies": ["if not head or not head.next:\n return head\nlength, cur = (1, head)\nwhile cur.next:\n cur = cur.next\n length += 1\ncur.next = head\ncur, tail = (head, cur)\nfor _ in xrange(length - k % length):\n tail = cur\n cur = cur.next\ntail.next = None\nreturn cur", "if not head or not head.next:\n return head\nslow, fast = (head, head)\nfor i in range(k):\n if not fast.next:\n fast = head\n else:\n fast = fast.next\nwhile fast.next:\n slow, fast = (slow.next, fast.next)\nfast.next = head\nans = slow.next\nslow.next = None\nreturn ans"], "bodies_text": "<|body_start_0|>\n if not head or not head.next:\n return head\n length, cur = (1, head)\n while cur.next:\n cur = cur.next\n length += 1\n cur.next = head\n cur, tail = (head, cur)\n for _ in xrange(length - k % length):\n tail = cur\n cur = cur.next\n tail.next = None\n return cur\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or not head.next:\n return head\n slow, fast = (head, head)\n for i in range(k):\n if not fast.next:\n fast = head\n else:\n fast = fast.next\n while fast.next:\n slow, fast = (slow.next, fast.next)\n fast.next = head\n ans = slow.next\n slow.next = None\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def rotateRight(self, head, k):\n \"\"\":type head: ListNode :type k: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def rotate_without_count(self, head, k):\n \"\"\"不需要计算链表的长度, 但是当K很大时, 会遍历多次, 浪费时间 :type head: ListNode :type k: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not head or not head.next:\n return head\n length, cur = (1, head)\n while cur.next:\n cur = cur.next\n length += 1\n cur.next = head\n cur, tail = (head, cur)\n for _ in xrange(length - k % length):\n tail = cur\n cur = cur.next\n tail.next = None\n return cur\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or not head.next:\n return head\n slow, fast = (head, head)\n for i in range(k):\n if not fast.next:\n fast = head\n else:\n fast = fast.next\n while fast.next:\n slow, fast = (slow.next, fast.next)\n fast.next = head\n ans = slow.next\n slow.next = None\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000454", "length_bytes": 1740, "license_type": "no_license", "methods": [{"docstring": ":type head: ListNode :type k: int :rtype: ListNode", "name": "rotateRight", "signature": "def rotateRight(self, head, k)"}, {"docstring": "不需要计算链表的长度, 但是当K很大时, 会遍历多次, 浪费时间 :type head: ListNode :type k: int :rtype: ListNode", "name": "rotate_without_count", "signature": "def rotate_without_count(self, head, k)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000305", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rotateRight(self, head, k): :type head: ListNode :type k: int :rtype: ListNode\n- def rotate_without_count(self, head, k): 不需要计算链表的长度, 但是当K很大时, 会遍历多次, 浪费时间 :type head: ListNode :type k: int :rtype: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rotateRight(self, head, k): :type head: ListNode :type k: int :rtype: ListNode\n- def rotate_without_count(self, head, k): 不需要计算链表的长度, 但是当K很大时, 会遍历多次, 浪费时间 :type head: ListNode :type k: int :rtype: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def rotateRight(self, head, k):\n \"\"\":type head: ListNode :type k: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def rotate_without_count(self, head, k):\n \"\"\"不需要计算链表的长度, 但是当K很大时, 会遍历多次, 浪费时间 :type head: ListNode :type k: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not head or not head.next:\n return head\n length, cur = (1, head)\n while cur.next:\n cur = cur.next\n length += 1\n cur.next = head\n cur, tail = (head, cur)\n for _ in xrange(length - k % length):\n tail = cur\n cur = cur.next\n tail.next = None\n return cur\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or not head.next:\n return head\n slow, fast = (head, head)\n for i in range(k):\n if not fast.next:\n fast = head\n else:\n fast = fast.next\n while fast.next:\n slow, fast = (slow.next, fast.next)\n fast.next = head\n ans = slow.next\n slow.next = None\n return ans\n<|end_body_1|>\n", "revision_id": "215d513b3564a7a76db3d2b29e4acc341a68e8ee", "skeleton": "<|skeleton|>\nclass Solution:\n\n def rotateRight(self, head, k):\n \"\"\":type head: ListNode :type k: int :rtype: ListNode\"\"\"\n <|body_0|>\n\n def rotate_without_count(self, head, k):\n \"\"\"不需要计算链表的长度, 但是当K很大时, 会遍历多次, 浪费时间 :type head: ListNode :type k: int :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def rotateRight(self, head, k):\n \"\"\":type head: ListNode :type k: int :rtype: ListNode\"\"\"\n if not head or not head.next:\n return head\n length, cur = (1, head)\n while cur.next:\n cur = cur.next\n length += 1\n cur.next = head\n cur, tail = (head, cur)\n for _ in xrange(length - k % length):\n tail = cur\n cur = cur.next\n tail.next = None\n return cur\n\n def rotate_without_count(self, head, k):\n \"\"\"不需要计算链表的长度, 但是当K很大时, 会遍历多次, 浪费时间 :type head: ListNode :type k: int :rtype: ListNode\"\"\"\n if not head or not head.next:\n return head\n slow, fast = (head, head)\n for i in range(k):\n if not fast.next:\n fast = head\n else:\n fast = fast.next\n while fast.next:\n slow, fast = (slow.next, fast.next)\n fast.next = head\n ans = slow.next\n slow.next = None\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "python/two-pointer/rotate-list.py", "source_repo": "euxuoh/leetcode", "split": "val", "star_events_count": 0}
{"blob_id": "aea4cf7daaee5433a61bd6b29ba0726dba6e2905", "bodies": ["required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'fx'}]\nif project == 'CMIP6':\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'Ofx'}]\nreturn required", "fgco2_cube = cubes.extract_strict(iris.Constraint(name='surface_downward_mass_flux_of_carbon_dioxide_expressed_as_carbon'))\ntry:\n cube_area = cubes.extract_strict(iris.Constraint(name='cell_area'))\nexcept iris.exceptions.ConstraintMismatchError:\n pass\ntotal_flux = calculate_total_flux(fgco2_cube, cube_area)\nresult = fgco2_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)\nresult.units = fgco2_cube.units * cube_area.units\nresult.data = total_flux\nreturn result"], "bodies_text": "<|body_start_0|>\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'fx'}]\n if project == 'CMIP6':\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'Ofx'}]\n return required\n<|end_body_0|>\n\n<|body_start_1|>\n fgco2_cube = cubes.extract_strict(iris.Constraint(name='surface_downward_mass_flux_of_carbon_dioxide_expressed_as_carbon'))\n try:\n cube_area = cubes.extract_strict(iris.Constraint(name='cell_area'))\n except iris.exceptions.ConstraintMismatchError:\n pass\n total_flux = calculate_total_flux(fgco2_cube, cube_area)\n result = fgco2_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)\n result.units = fgco2_cube.units * cube_area.units\n result.data = total_flux\n return result\n<|end_body_1|>\n", "class_docstring": "Derivation of variable `gtfgco2`.", "class_name": "DerivedVariable", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DerivedVariable:\n \"\"\"Derivation of variable `gtfgco2`.\"\"\"\n\n def required(project):\n \"\"\"Declare the variables needed for derivation.\"\"\"\n <|body_0|>\n\n def calculate(cubes):\n \"\"\"Compute longwave cloud radiative effect.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'fx'}]\n if project == 'CMIP6':\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'Ofx'}]\n return required\n<|end_body_0|>\n\n<|body_start_1|>\n fgco2_cube = cubes.extract_strict(iris.Constraint(name='surface_downward_mass_flux_of_carbon_dioxide_expressed_as_carbon'))\n try:\n cube_area = cubes.extract_strict(iris.Constraint(name='cell_area'))\n except iris.exceptions.ConstraintMismatchError:\n pass\n total_flux = calculate_total_flux(fgco2_cube, cube_area)\n result = fgco2_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)\n result.units = fgco2_cube.units * cube_area.units\n result.data = total_flux\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000455", "length_bytes": 2526, "license_type": "permissive", "methods": [{"docstring": "Declare the variables needed for derivation.", "name": "required", "signature": "def required(project)"}, {"docstring": "Compute longwave cloud radiative effect.", "name": "calculate", "signature": "def calculate(cubes)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_027110", "prompt": "Implement the Python class `DerivedVariable` described below.\n\nClass description:\nDerivation of variable `gtfgco2`.\n\nMethod signatures and docstrings:\n- def required(project): Declare the variables needed for derivation.\n- def calculate(cubes): Compute longwave cloud radiative effect.", "prompted_full_text": "Implement the Python class `DerivedVariable` described below.\n\nClass description:\nDerivation of variable `gtfgco2`.\n\nMethod signatures and docstrings:\n- def required(project): Declare the variables needed for derivation.\n- def calculate(cubes): Compute longwave cloud radiative effect.\n\n<|skeleton|>\nclass DerivedVariable:\n \"\"\"Derivation of variable `gtfgco2`.\"\"\"\n\n def required(project):\n \"\"\"Declare the variables needed for derivation.\"\"\"\n <|body_0|>\n\n def calculate(cubes):\n \"\"\"Compute longwave cloud radiative effect.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'fx'}]\n if project == 'CMIP6':\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'Ofx'}]\n return required\n<|end_body_0|>\n\n<|body_start_1|>\n fgco2_cube = cubes.extract_strict(iris.Constraint(name='surface_downward_mass_flux_of_carbon_dioxide_expressed_as_carbon'))\n try:\n cube_area = cubes.extract_strict(iris.Constraint(name='cell_area'))\n except iris.exceptions.ConstraintMismatchError:\n pass\n total_flux = calculate_total_flux(fgco2_cube, cube_area)\n result = fgco2_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)\n result.units = fgco2_cube.units * cube_area.units\n result.data = total_flux\n return result\n<|end_body_1|>\n", "revision_id": "d5bf3f459ff3a43e780d75d57b63b88b6cc8c4f2", "skeleton": "<|skeleton|>\nclass DerivedVariable:\n \"\"\"Derivation of variable `gtfgco2`.\"\"\"\n\n def required(project):\n \"\"\"Declare the variables needed for derivation.\"\"\"\n <|body_0|>\n\n def calculate(cubes):\n \"\"\"Compute longwave cloud radiative effect.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DerivedVariable:\n \"\"\"Derivation of variable `gtfgco2`.\"\"\"\n\n def required(project):\n \"\"\"Declare the variables needed for derivation.\"\"\"\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'fx'}]\n if project == 'CMIP6':\n required = [{'short_name': 'fgco2', 'mip': 'Omon'}, {'short_name': 'areacello', 'mip': 'Ofx'}]\n return required\n\n def calculate(cubes):\n \"\"\"Compute longwave cloud radiative effect.\"\"\"\n fgco2_cube = cubes.extract_strict(iris.Constraint(name='surface_downward_mass_flux_of_carbon_dioxide_expressed_as_carbon'))\n try:\n cube_area = cubes.extract_strict(iris.Constraint(name='cell_area'))\n except iris.exceptions.ConstraintMismatchError:\n pass\n total_flux = calculate_total_flux(fgco2_cube, cube_area)\n result = fgco2_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)\n result.units = fgco2_cube.units * cube_area.units\n result.data = total_flux\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "esmvalcore/preprocessor/_derive/gtfgco2.py", "source_repo": "aperezpredictia/ESMValCore", "split": "val", "star_events_count": 1}
{"blob_id": "e7f9b6fdd51e765eeb5ba8668c9e4ef0f2a44ee0", "bodies": ["super(GANLoss, self).__init__()\nself.register_buffer('real_label', torch.tensor(target_real_label, device=device))\nself.register_buffer('fake_label', torch.tensor(target_fake_label, device=device))\nself.gan_mode = gan_mode\nif gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\nelif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\nelif gan_mode in ['wgangp']:\n self.loss = None\nelse:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)", "if target_is_real:\n target_tensor = self.real_label\nelse:\n target_tensor = self.fake_label\nreturn target_tensor.expand_as(prediction)", "if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\nelif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\nreturn loss"], "bodies_text": "<|body_start_0|>\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label, device=device))\n self.register_buffer('fake_label', torch.tensor(target_fake_label, device=device))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode in ['wgangp']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n<|end_body_0|>\n\n<|body_start_1|>\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(prediction)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n return loss\n<|end_body_2|>\n", "class_docstring": "Define different GAN objectives. The GANLoss class abstracts away the need to create the target label tensor that has the same size as the input.", "class_name": "GANLoss", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GANLoss:\n \"\"\"Define different GAN objectives. The GANLoss class abstracts away the need to create the target label tensor that has the same size as the input.\"\"\"\n\n def __init__(self, gan_mode, device, target_real_label=1.0, target_fake_label=0.0):\n \"\"\"Initialize the GANLoss class. Parameters: gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. target_real_label (bool) - - label for a real image target_fake_label (bool) - - label of a fake image Note: Do not use sigmoid as the last layer of Discriminator. LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\"\"\"\n <|body_0|>\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input. Parameters: prediction (tensor) - - tpyically the prediction from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: A label tensor filled with ground truth label, and with the size of the input\"\"\"\n <|body_1|>\n\n def __call__(self, prediction, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels. Parameters: prediction (tensor) - - tpyically the prediction output from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: the calculated loss.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label, device=device))\n self.register_buffer('fake_label', torch.tensor(target_fake_label, device=device))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode in ['wgangp']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n<|end_body_0|>\n\n<|body_start_1|>\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(prediction)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n return loss\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000456", "length_bytes": 18046, "license_type": "no_license", "methods": [{"docstring": "Initialize the GANLoss class. Parameters: gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. target_real_label (bool) - - label for a real image target_fake_label (bool) - - label of a fake image Note: Do not use sigmoid as the last layer of Discriminator. LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.", "name": "__init__", "signature": "def __init__(self, gan_mode, device, target_real_label=1.0, target_fake_label=0.0)"}, {"docstring": "Create label tensors with the same size as the input. Parameters: prediction (tensor) - - tpyically the prediction from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: A label tensor filled with ground truth label, and with the size of the input", "name": "get_target_tensor", "signature": "def get_target_tensor(self, prediction, target_is_real)"}, {"docstring": "Calculate loss given Discriminator's output and grount truth labels. Parameters: prediction (tensor) - - tpyically the prediction output from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: the calculated loss.", "name": "__call__", "signature": "def __call__(self, prediction, target_is_real)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_008161", "prompt": "Implement the Python class `GANLoss` described below.\n\nClass description:\nDefine different GAN objectives. The GANLoss class abstracts away the need to create the target label tensor that has the same size as the input.\n\nMethod signatures and docstrings:\n- def __init__(self, gan_mode, device, target_real_label=1.0, target_fake_label=0.0): Initialize the GANLoss class. Parameters: gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. target_real_label (bool) - - label for a real image target_fake_label (bool) - - label of a fake image Note: Do not use sigmoid as the last layer of Discriminator. LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n- def get_target_tensor(self, prediction, target_is_real): Create label tensors with the same size as the input. Parameters: prediction (tensor) - - tpyically the prediction from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: A label tensor filled with ground truth label, and with the size of the input\n- def __call__(self, prediction, target_is_real): Calculate loss given Discriminator's output and grount truth labels. Parameters: prediction (tensor) - - tpyically the prediction output from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: the calculated loss.", "prompted_full_text": "Implement the Python class `GANLoss` described below.\n\nClass description:\nDefine different GAN objectives. The GANLoss class abstracts away the need to create the target label tensor that has the same size as the input.\n\nMethod signatures and docstrings:\n- def __init__(self, gan_mode, device, target_real_label=1.0, target_fake_label=0.0): Initialize the GANLoss class. Parameters: gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. target_real_label (bool) - - label for a real image target_fake_label (bool) - - label of a fake image Note: Do not use sigmoid as the last layer of Discriminator. LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n- def get_target_tensor(self, prediction, target_is_real): Create label tensors with the same size as the input. Parameters: prediction (tensor) - - tpyically the prediction from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: A label tensor filled with ground truth label, and with the size of the input\n- def __call__(self, prediction, target_is_real): Calculate loss given Discriminator's output and grount truth labels. Parameters: prediction (tensor) - - tpyically the prediction output from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: the calculated loss.\n\n<|skeleton|>\nclass GANLoss:\n \"\"\"Define different GAN objectives. The GANLoss class abstracts away the need to create the target label tensor that has the same size as the input.\"\"\"\n\n def __init__(self, gan_mode, device, target_real_label=1.0, target_fake_label=0.0):\n \"\"\"Initialize the GANLoss class. Parameters: gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. target_real_label (bool) - - label for a real image target_fake_label (bool) - - label of a fake image Note: Do not use sigmoid as the last layer of Discriminator. LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\"\"\"\n <|body_0|>\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input. Parameters: prediction (tensor) - - tpyically the prediction from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: A label tensor filled with ground truth label, and with the size of the input\"\"\"\n <|body_1|>\n\n def __call__(self, prediction, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels. Parameters: prediction (tensor) - - tpyically the prediction output from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: the calculated loss.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label, device=device))\n self.register_buffer('fake_label', torch.tensor(target_fake_label, device=device))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode in ['wgangp']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n<|end_body_0|>\n\n<|body_start_1|>\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(prediction)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n return loss\n<|end_body_2|>\n", "revision_id": "c50c81c1e5166f84b44ce4087c04d98a4c9bd0e3", "skeleton": "<|skeleton|>\nclass GANLoss:\n \"\"\"Define different GAN objectives. The GANLoss class abstracts away the need to create the target label tensor that has the same size as the input.\"\"\"\n\n def __init__(self, gan_mode, device, target_real_label=1.0, target_fake_label=0.0):\n \"\"\"Initialize the GANLoss class. Parameters: gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. target_real_label (bool) - - label for a real image target_fake_label (bool) - - label of a fake image Note: Do not use sigmoid as the last layer of Discriminator. LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\"\"\"\n <|body_0|>\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input. Parameters: prediction (tensor) - - tpyically the prediction from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: A label tensor filled with ground truth label, and with the size of the input\"\"\"\n <|body_1|>\n\n def __call__(self, prediction, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels. Parameters: prediction (tensor) - - tpyically the prediction output from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: the calculated loss.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GANLoss:\n \"\"\"Define different GAN objectives. The GANLoss class abstracts away the need to create the target label tensor that has the same size as the input.\"\"\"\n\n def __init__(self, gan_mode, device, target_real_label=1.0, target_fake_label=0.0):\n \"\"\"Initialize the GANLoss class. Parameters: gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. target_real_label (bool) - - label for a real image target_fake_label (bool) - - label of a fake image Note: Do not use sigmoid as the last layer of Discriminator. LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label, device=device))\n self.register_buffer('fake_label', torch.tensor(target_fake_label, device=device))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode in ['wgangp']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input. Parameters: prediction (tensor) - - tpyically the prediction from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: A label tensor filled with ground truth label, and with the size of the input\"\"\"\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(prediction)\n\n def __call__(self, prediction, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels. Parameters: prediction (tensor) - - tpyically the prediction output from a discriminator target_is_real (bool) - - if the ground truth label is for real images or fake images Returns: the calculated loss.\"\"\"\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n return loss\n", "source": "the_stack_v2_python_sparse", "source_path": "training/loss.py", "source_repo": "dhbloo/ChunkGAN-pytorch", "split": "val", "star_events_count": 0}
{"blob_id": "bd8f123d8b33f42fb1aaa42e777d78eccb642692", "bodies": ["WrappingFactory.__init__(self, wrappedFactory)\nif isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\nelse:\n creatorInterface = IOpenSSLServerConnectionCreator\nself._creatorInterface = creatorInterface\nif not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\nself._connectionCreator = contextFactory", "if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\nelse:\n logPrefix = self.wrappedFactory.__class__.__name__\nreturn '{} (TLS)'.format(logPrefix)", "if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\nreturn", "connectionCreator = self._connectionCreator\nif self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\nelse:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\nreturn connection"], "bodies_text": "<|body_start_0|>\n WrappingFactory.__init__(self, wrappedFactory)\n if isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\n else:\n creatorInterface = IOpenSSLServerConnectionCreator\n self._creatorInterface = creatorInterface\n if not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\n self._connectionCreator = contextFactory\n<|end_body_0|>\n\n<|body_start_1|>\n if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\n else:\n logPrefix = self.wrappedFactory.__class__.__name__\n return '{} (TLS)'.format(logPrefix)\n<|end_body_1|>\n\n<|body_start_2|>\n if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n connectionCreator = self._connectionCreator\n if self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\n else:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\n return connection\n<|end_body_3|>\n", "class_docstring": "L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.", "class_name": "TLSMemoryBIOFactory", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TLSMemoryBIOFactory:\n \"\"\"L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\"\"\"\n\n def __init__(self, contextFactory, isClient, wrappedFactory):\n \"\"\"Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\"\"\"\n <|body_0|>\n\n def logPrefix(self):\n \"\"\"Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\"\"\"\n <|body_1|>\n\n def _applyProtocolNegotiation(self, connection):\n \"\"\"Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\"\"\"\n <|body_2|>\n\n def _createConnection(self, tlsProtocol):\n \"\"\"Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n WrappingFactory.__init__(self, wrappedFactory)\n if isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\n else:\n creatorInterface = IOpenSSLServerConnectionCreator\n self._creatorInterface = creatorInterface\n if not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\n self._connectionCreator = contextFactory\n<|end_body_0|>\n\n<|body_start_1|>\n if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\n else:\n logPrefix = self.wrappedFactory.__class__.__name__\n return '{} (TLS)'.format(logPrefix)\n<|end_body_1|>\n\n<|body_start_2|>\n if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n connectionCreator = self._connectionCreator\n if self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\n else:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\n return connection\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000457", "length_bytes": 32500, "license_type": "permissive", "methods": [{"docstring": "Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne", "name": "__init__", "signature": "def __init__(self, contextFactory, isClient, wrappedFactory)"}, {"docstring": "Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}", "name": "logPrefix", "signature": "def logPrefix(self)"}, {"docstring": "Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}", "name": "_applyProtocolNegotiation", "signature": "def _applyProtocolNegotiation(self, connection)"}, {"docstring": "Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}", "name": "_createConnection", "signature": "def _createConnection(self, tlsProtocol)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_054496", "prompt": "Implement the Python class `TLSMemoryBIOFactory` described below.\n\nClass description:\nL{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\n\nMethod signatures and docstrings:\n- def __init__(self, contextFactory, isClient, wrappedFactory): Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\n- def logPrefix(self): Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\n- def _applyProtocolNegotiation(self, connection): Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\n- def _createConnection(self, tlsProtocol): Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}", "prompted_full_text": "Implement the Python class `TLSMemoryBIOFactory` described below.\n\nClass description:\nL{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\n\nMethod signatures and docstrings:\n- def __init__(self, contextFactory, isClient, wrappedFactory): Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\n- def logPrefix(self): Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\n- def _applyProtocolNegotiation(self, connection): Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\n- def _createConnection(self, tlsProtocol): Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\n\n<|skeleton|>\nclass TLSMemoryBIOFactory:\n \"\"\"L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\"\"\"\n\n def __init__(self, contextFactory, isClient, wrappedFactory):\n \"\"\"Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\"\"\"\n <|body_0|>\n\n def logPrefix(self):\n \"\"\"Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\"\"\"\n <|body_1|>\n\n def _applyProtocolNegotiation(self, connection):\n \"\"\"Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\"\"\"\n <|body_2|>\n\n def _createConnection(self, tlsProtocol):\n \"\"\"Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n WrappingFactory.__init__(self, wrappedFactory)\n if isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\n else:\n creatorInterface = IOpenSSLServerConnectionCreator\n self._creatorInterface = creatorInterface\n if not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\n self._connectionCreator = contextFactory\n<|end_body_0|>\n\n<|body_start_1|>\n if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\n else:\n logPrefix = self.wrappedFactory.__class__.__name__\n return '{} (TLS)'.format(logPrefix)\n<|end_body_1|>\n\n<|body_start_2|>\n if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n connectionCreator = self._connectionCreator\n if self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\n else:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\n return connection\n<|end_body_3|>\n", "revision_id": "5cee0a8c4180a3108538b4e4ce945a18726595a6", "skeleton": "<|skeleton|>\nclass TLSMemoryBIOFactory:\n \"\"\"L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\"\"\"\n\n def __init__(self, contextFactory, isClient, wrappedFactory):\n \"\"\"Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\"\"\"\n <|body_0|>\n\n def logPrefix(self):\n \"\"\"Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\"\"\"\n <|body_1|>\n\n def _applyProtocolNegotiation(self, connection):\n \"\"\"Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\"\"\"\n <|body_2|>\n\n def _createConnection(self, tlsProtocol):\n \"\"\"Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TLSMemoryBIOFactory:\n \"\"\"L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\"\"\"\n\n def __init__(self, contextFactory, isClient, wrappedFactory):\n \"\"\"Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\"\"\"\n WrappingFactory.__init__(self, wrappedFactory)\n if isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\n else:\n creatorInterface = IOpenSSLServerConnectionCreator\n self._creatorInterface = creatorInterface\n if not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\n self._connectionCreator = contextFactory\n\n def logPrefix(self):\n \"\"\"Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\"\"\"\n if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\n else:\n logPrefix = self.wrappedFactory.__class__.__name__\n return '{} (TLS)'.format(logPrefix)\n\n def _applyProtocolNegotiation(self, connection):\n \"\"\"Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\"\"\"\n if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\n return\n\n def _createConnection(self, tlsProtocol):\n \"\"\"Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\"\"\"\n connectionCreator = self._connectionCreator\n if self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\n else:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\n return connection\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/Lib/site-packages/twisted/protocols/tls.py", "source_repo": "zoelesv/Smathchat", "split": "val", "star_events_count": 9}
{"blob_id": "88e1f01bd2d7ab8ce73200f52accefaf782a35fb", "bodies": ["self._input_names = [ids_name, mask_name, segment_name]\nself._ordered_input_names = writer_utils.get_input_tensor_names(model_buffer)\nif collections.Counter(self._ordered_input_names) != collections.Counter(self._input_names):\n raise ValueError(f'The input tensor names ({self._ordered_input_names}) do not match the tensor names read from the model ({self._input_names}).')\nif ids_md is None:\n ids_md = TensorMd(name=self._IDS_NAME, description=self._IDS_DESCRIPTION)\nif mask_md is None:\n mask_md = TensorMd(name=self._MASK_NAME, description=self._MASK_DESCRIPTION)\nif segment_ids_md is None:\n segment_ids_md = TensorMd(name=self._SEGMENT_IDS_NAME, description=self._SEGMENT_IDS_DESCRIPTION)\nself._input_md = [ids_md, mask_md, segment_ids_md]\nif not isinstance(tokenizer_md, (type(None), BertTokenizerMd, SentencePieceTokenizerMd)):\n raise ValueError(f'The type of tokenizer_options, {type(tokenizer_md)}, is unsupported')\nself._tokenizer_md = tokenizer_md", "ordered_metadata = []\nname_md_dict = dict(zip(self._input_names, self._input_md))\nfor name in self._ordered_input_names:\n ordered_metadata.append(name_md_dict[name].create_metadata())\nreturn ordered_metadata", "if self._tokenizer_md:\n return [self._tokenizer_md.create_metadata()]\nelse:\n return []", "if self._tokenizer_md:\n return writer_utils.get_tokenizer_associated_files(self._tokenizer_md.create_metadata().options)\nelse:\n return []"], "bodies_text": "<|body_start_0|>\n self._input_names = [ids_name, mask_name, segment_name]\n self._ordered_input_names = writer_utils.get_input_tensor_names(model_buffer)\n if collections.Counter(self._ordered_input_names) != collections.Counter(self._input_names):\n raise ValueError(f'The input tensor names ({self._ordered_input_names}) do not match the tensor names read from the model ({self._input_names}).')\n if ids_md is None:\n ids_md = TensorMd(name=self._IDS_NAME, description=self._IDS_DESCRIPTION)\n if mask_md is None:\n mask_md = TensorMd(name=self._MASK_NAME, description=self._MASK_DESCRIPTION)\n if segment_ids_md is None:\n segment_ids_md = TensorMd(name=self._SEGMENT_IDS_NAME, description=self._SEGMENT_IDS_DESCRIPTION)\n self._input_md = [ids_md, mask_md, segment_ids_md]\n if not isinstance(tokenizer_md, (type(None), BertTokenizerMd, SentencePieceTokenizerMd)):\n raise ValueError(f'The type of tokenizer_options, {type(tokenizer_md)}, is unsupported')\n self._tokenizer_md = tokenizer_md\n<|end_body_0|>\n\n<|body_start_1|>\n ordered_metadata = []\n name_md_dict = dict(zip(self._input_names, self._input_md))\n for name in self._ordered_input_names:\n ordered_metadata.append(name_md_dict[name].create_metadata())\n return ordered_metadata\n<|end_body_1|>\n\n<|body_start_2|>\n if self._tokenizer_md:\n return [self._tokenizer_md.create_metadata()]\n else:\n return []\n<|end_body_2|>\n\n<|body_start_3|>\n if self._tokenizer_md:\n return writer_utils.get_tokenizer_associated_files(self._tokenizer_md.create_metadata().options)\n else:\n return []\n<|end_body_3|>\n", "class_docstring": "A container for the input tensor metadata information of Bert models.", "class_name": "BertInputTensorsMd", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later", "MIT", "LGPL-2.0-or-later", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BertInputTensorsMd:\n \"\"\"A container for the input tensor metadata information of Bert models.\"\"\"\n\n def __init__(self, model_buffer: bytearray, ids_name: str, mask_name: str, segment_name: str, ids_md: Optional[TensorMd]=None, mask_md: Optional[TensorMd]=None, segment_ids_md: Optional[TensorMd]=None, tokenizer_md: Union[None, BertTokenizerMd, SentencePieceTokenizerMd]=None):\n \"\"\"Initializes a BertInputTensorsMd object. `ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name` in the TFLite schema, which help to determine the tensor order when populating metadata. Args: model_buffer: valid buffer of the model file. ids_name: name of the ids tensor, which represents the tokenized ids of the input text. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_name: name of the segment ids tensor, where `0` stands for the first sequence, and `1` stands for the second sequence if exists. ids_md: input ids tensor informaton. mask_md: input mask tensor informaton. segment_ids_md: input segment\"\"\"\n <|body_0|>\n\n def create_input_tesnor_metadata(self) -> List[_metadata_fb.TensorMetadataT]:\n \"\"\"Creates the input metadata for the three input tesnors.\"\"\"\n <|body_1|>\n\n def create_input_process_unit_metadata(self) -> List[_metadata_fb.ProcessUnitT]:\n \"\"\"Creates the input process unit metadata.\"\"\"\n <|body_2|>\n\n def get_tokenizer_associated_files(self) -> List[str]:\n \"\"\"Gets the associated files that are packed in the tokenizer.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._input_names = [ids_name, mask_name, segment_name]\n self._ordered_input_names = writer_utils.get_input_tensor_names(model_buffer)\n if collections.Counter(self._ordered_input_names) != collections.Counter(self._input_names):\n raise ValueError(f'The input tensor names ({self._ordered_input_names}) do not match the tensor names read from the model ({self._input_names}).')\n if ids_md is None:\n ids_md = TensorMd(name=self._IDS_NAME, description=self._IDS_DESCRIPTION)\n if mask_md is None:\n mask_md = TensorMd(name=self._MASK_NAME, description=self._MASK_DESCRIPTION)\n if segment_ids_md is None:\n segment_ids_md = TensorMd(name=self._SEGMENT_IDS_NAME, description=self._SEGMENT_IDS_DESCRIPTION)\n self._input_md = [ids_md, mask_md, segment_ids_md]\n if not isinstance(tokenizer_md, (type(None), BertTokenizerMd, SentencePieceTokenizerMd)):\n raise ValueError(f'The type of tokenizer_options, {type(tokenizer_md)}, is unsupported')\n self._tokenizer_md = tokenizer_md\n<|end_body_0|>\n\n<|body_start_1|>\n ordered_metadata = []\n name_md_dict = dict(zip(self._input_names, self._input_md))\n for name in self._ordered_input_names:\n ordered_metadata.append(name_md_dict[name].create_metadata())\n return ordered_metadata\n<|end_body_1|>\n\n<|body_start_2|>\n if self._tokenizer_md:\n return [self._tokenizer_md.create_metadata()]\n else:\n return []\n<|end_body_2|>\n\n<|body_start_3|>\n if self._tokenizer_md:\n return writer_utils.get_tokenizer_associated_files(self._tokenizer_md.create_metadata().options)\n else:\n return []\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000458", "length_bytes": 32760, "license_type": "permissive", "methods": [{"docstring": "Initializes a BertInputTensorsMd object. `ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name` in the TFLite schema, which help to determine the tensor order when populating metadata. Args: model_buffer: valid buffer of the model file. ids_name: name of the ids tensor, which represents the tokenized ids of the input text. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_name: name of the segment ids tensor, where `0` stands for the first sequence, and `1` stands for the second sequence if exists. ids_md: input ids tensor informaton. mask_md: input mask tensor informaton. segment_ids_md: input segment", "name": "__init__", "signature": "def __init__(self, model_buffer: bytearray, ids_name: str, mask_name: str, segment_name: str, ids_md: Optional[TensorMd]=None, mask_md: Optional[TensorMd]=None, segment_ids_md: Optional[TensorMd]=None, tokenizer_md: Union[None, BertTokenizerMd, SentencePieceTokenizerMd]=None)"}, {"docstring": "Creates the input metadata for the three input tesnors.", "name": "create_input_tesnor_metadata", "signature": "def create_input_tesnor_metadata(self) -> List[_metadata_fb.TensorMetadataT]"}, {"docstring": "Creates the input process unit metadata.", "name": "create_input_process_unit_metadata", "signature": "def create_input_process_unit_metadata(self) -> List[_metadata_fb.ProcessUnitT]"}, {"docstring": "Gets the associated files that are packed in the tokenizer.", "name": "get_tokenizer_associated_files", "signature": "def get_tokenizer_associated_files(self) -> List[str]"}], "n_methods": 4, "prompt": "Implement the Python class `BertInputTensorsMd` described below.\n\nClass description:\nA container for the input tensor metadata information of Bert models.\n\nMethod signatures and docstrings:\n- def __init__(self, model_buffer: bytearray, ids_name: str, mask_name: str, segment_name: str, ids_md: Optional[TensorMd]=None, mask_md: Optional[TensorMd]=None, segment_ids_md: Optional[TensorMd]=None, tokenizer_md: Union[None, BertTokenizerMd, SentencePieceTokenizerMd]=None): Initializes a BertInputTensorsMd object. `ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name` in the TFLite schema, which help to determine the tensor order when populating metadata. Args: model_buffer: valid buffer of the model file. ids_name: name of the ids tensor, which represents the tokenized ids of the input text. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_name: name of the segment ids tensor, where `0` stands for the first sequence, and `1` stands for the second sequence if exists. ids_md: input ids tensor informaton. mask_md: input mask tensor informaton. segment_ids_md: input segment\n- def create_input_tesnor_metadata(self) -> List[_metadata_fb.TensorMetadataT]: Creates the input metadata for the three input tesnors.\n- def create_input_process_unit_metadata(self) -> List[_metadata_fb.ProcessUnitT]: Creates the input process unit metadata.\n- def get_tokenizer_associated_files(self) -> List[str]: Gets the associated files that are packed in the tokenizer.", "prompted_full_text": "Implement the Python class `BertInputTensorsMd` described below.\n\nClass description:\nA container for the input tensor metadata information of Bert models.\n\nMethod signatures and docstrings:\n- def __init__(self, model_buffer: bytearray, ids_name: str, mask_name: str, segment_name: str, ids_md: Optional[TensorMd]=None, mask_md: Optional[TensorMd]=None, segment_ids_md: Optional[TensorMd]=None, tokenizer_md: Union[None, BertTokenizerMd, SentencePieceTokenizerMd]=None): Initializes a BertInputTensorsMd object. `ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name` in the TFLite schema, which help to determine the tensor order when populating metadata. Args: model_buffer: valid buffer of the model file. ids_name: name of the ids tensor, which represents the tokenized ids of the input text. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_name: name of the segment ids tensor, where `0` stands for the first sequence, and `1` stands for the second sequence if exists. ids_md: input ids tensor informaton. mask_md: input mask tensor informaton. segment_ids_md: input segment\n- def create_input_tesnor_metadata(self) -> List[_metadata_fb.TensorMetadataT]: Creates the input metadata for the three input tesnors.\n- def create_input_process_unit_metadata(self) -> List[_metadata_fb.ProcessUnitT]: Creates the input process unit metadata.\n- def get_tokenizer_associated_files(self) -> List[str]: Gets the associated files that are packed in the tokenizer.\n\n<|skeleton|>\nclass BertInputTensorsMd:\n \"\"\"A container for the input tensor metadata information of Bert models.\"\"\"\n\n def __init__(self, model_buffer: bytearray, ids_name: str, mask_name: str, segment_name: str, ids_md: Optional[TensorMd]=None, mask_md: Optional[TensorMd]=None, segment_ids_md: Optional[TensorMd]=None, tokenizer_md: Union[None, BertTokenizerMd, SentencePieceTokenizerMd]=None):\n \"\"\"Initializes a BertInputTensorsMd object. `ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name` in the TFLite schema, which help to determine the tensor order when populating metadata. Args: model_buffer: valid buffer of the model file. ids_name: name of the ids tensor, which represents the tokenized ids of the input text. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_name: name of the segment ids tensor, where `0` stands for the first sequence, and `1` stands for the second sequence if exists. ids_md: input ids tensor informaton. mask_md: input mask tensor informaton. segment_ids_md: input segment\"\"\"\n <|body_0|>\n\n def create_input_tesnor_metadata(self) -> List[_metadata_fb.TensorMetadataT]:\n \"\"\"Creates the input metadata for the three input tesnors.\"\"\"\n <|body_1|>\n\n def create_input_process_unit_metadata(self) -> List[_metadata_fb.ProcessUnitT]:\n \"\"\"Creates the input process unit metadata.\"\"\"\n <|body_2|>\n\n def get_tokenizer_associated_files(self) -> List[str]:\n \"\"\"Gets the associated files that are packed in the tokenizer.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._input_names = [ids_name, mask_name, segment_name]\n self._ordered_input_names = writer_utils.get_input_tensor_names(model_buffer)\n if collections.Counter(self._ordered_input_names) != collections.Counter(self._input_names):\n raise ValueError(f'The input tensor names ({self._ordered_input_names}) do not match the tensor names read from the model ({self._input_names}).')\n if ids_md is None:\n ids_md = TensorMd(name=self._IDS_NAME, description=self._IDS_DESCRIPTION)\n if mask_md is None:\n mask_md = TensorMd(name=self._MASK_NAME, description=self._MASK_DESCRIPTION)\n if segment_ids_md is None:\n segment_ids_md = TensorMd(name=self._SEGMENT_IDS_NAME, description=self._SEGMENT_IDS_DESCRIPTION)\n self._input_md = [ids_md, mask_md, segment_ids_md]\n if not isinstance(tokenizer_md, (type(None), BertTokenizerMd, SentencePieceTokenizerMd)):\n raise ValueError(f'The type of tokenizer_options, {type(tokenizer_md)}, is unsupported')\n self._tokenizer_md = tokenizer_md\n<|end_body_0|>\n\n<|body_start_1|>\n ordered_metadata = []\n name_md_dict = dict(zip(self._input_names, self._input_md))\n for name in self._ordered_input_names:\n ordered_metadata.append(name_md_dict[name].create_metadata())\n return ordered_metadata\n<|end_body_1|>\n\n<|body_start_2|>\n if self._tokenizer_md:\n return [self._tokenizer_md.create_metadata()]\n else:\n return []\n<|end_body_2|>\n\n<|body_start_3|>\n if self._tokenizer_md:\n return writer_utils.get_tokenizer_associated_files(self._tokenizer_md.create_metadata().options)\n else:\n return []\n<|end_body_3|>\n", "revision_id": "a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c", "skeleton": "<|skeleton|>\nclass BertInputTensorsMd:\n \"\"\"A container for the input tensor metadata information of Bert models.\"\"\"\n\n def __init__(self, model_buffer: bytearray, ids_name: str, mask_name: str, segment_name: str, ids_md: Optional[TensorMd]=None, mask_md: Optional[TensorMd]=None, segment_ids_md: Optional[TensorMd]=None, tokenizer_md: Union[None, BertTokenizerMd, SentencePieceTokenizerMd]=None):\n \"\"\"Initializes a BertInputTensorsMd object. `ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name` in the TFLite schema, which help to determine the tensor order when populating metadata. Args: model_buffer: valid buffer of the model file. ids_name: name of the ids tensor, which represents the tokenized ids of the input text. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_name: name of the segment ids tensor, where `0` stands for the first sequence, and `1` stands for the second sequence if exists. ids_md: input ids tensor informaton. mask_md: input mask tensor informaton. segment_ids_md: input segment\"\"\"\n <|body_0|>\n\n def create_input_tesnor_metadata(self) -> List[_metadata_fb.TensorMetadataT]:\n \"\"\"Creates the input metadata for the three input tesnors.\"\"\"\n <|body_1|>\n\n def create_input_process_unit_metadata(self) -> List[_metadata_fb.ProcessUnitT]:\n \"\"\"Creates the input process unit metadata.\"\"\"\n <|body_2|>\n\n def get_tokenizer_associated_files(self) -> List[str]:\n \"\"\"Gets the associated files that are packed in the tokenizer.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BertInputTensorsMd:\n \"\"\"A container for the input tensor metadata information of Bert models.\"\"\"\n\n def __init__(self, model_buffer: bytearray, ids_name: str, mask_name: str, segment_name: str, ids_md: Optional[TensorMd]=None, mask_md: Optional[TensorMd]=None, segment_ids_md: Optional[TensorMd]=None, tokenizer_md: Union[None, BertTokenizerMd, SentencePieceTokenizerMd]=None):\n \"\"\"Initializes a BertInputTensorsMd object. `ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name` in the TFLite schema, which help to determine the tensor order when populating metadata. Args: model_buffer: valid buffer of the model file. ids_name: name of the ids tensor, which represents the tokenized ids of the input text. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_name: name of the segment ids tensor, where `0` stands for the first sequence, and `1` stands for the second sequence if exists. ids_md: input ids tensor informaton. mask_md: input mask tensor informaton. segment_ids_md: input segment\"\"\"\n self._input_names = [ids_name, mask_name, segment_name]\n self._ordered_input_names = writer_utils.get_input_tensor_names(model_buffer)\n if collections.Counter(self._ordered_input_names) != collections.Counter(self._input_names):\n raise ValueError(f'The input tensor names ({self._ordered_input_names}) do not match the tensor names read from the model ({self._input_names}).')\n if ids_md is None:\n ids_md = TensorMd(name=self._IDS_NAME, description=self._IDS_DESCRIPTION)\n if mask_md is None:\n mask_md = TensorMd(name=self._MASK_NAME, description=self._MASK_DESCRIPTION)\n if segment_ids_md is None:\n segment_ids_md = TensorMd(name=self._SEGMENT_IDS_NAME, description=self._SEGMENT_IDS_DESCRIPTION)\n self._input_md = [ids_md, mask_md, segment_ids_md]\n if not isinstance(tokenizer_md, (type(None), BertTokenizerMd, SentencePieceTokenizerMd)):\n raise ValueError(f'The type of tokenizer_options, {type(tokenizer_md)}, is unsupported')\n self._tokenizer_md = tokenizer_md\n\n def create_input_tesnor_metadata(self) -> List[_metadata_fb.TensorMetadataT]:\n \"\"\"Creates the input metadata for the three input tesnors.\"\"\"\n ordered_metadata = []\n name_md_dict = dict(zip(self._input_names, self._input_md))\n for name in self._ordered_input_names:\n ordered_metadata.append(name_md_dict[name].create_metadata())\n return ordered_metadata\n\n def create_input_process_unit_metadata(self) -> List[_metadata_fb.ProcessUnitT]:\n \"\"\"Creates the input process unit metadata.\"\"\"\n if self._tokenizer_md:\n return [self._tokenizer_md.create_metadata()]\n else:\n return []\n\n def get_tokenizer_associated_files(self) -> List[str]:\n \"\"\"Gets the associated files that are packed in the tokenizer.\"\"\"\n if self._tokenizer_md:\n return writer_utils.get_tokenizer_associated_files(self._tokenizer_md.create_metadata().options)\n else:\n return []\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/tflite_support/src/tensorflow_lite_support/metadata/python/metadata_writers/metadata_info.py", "source_repo": "chromium/chromium", "split": "val", "star_events_count": 17408}
{"blob_id": "e5e8c2b80377e1b76e4d46a5fdcf73aa81ca8218", "bodies": ["try:\n account_request_list = account_request_api.get_all()\n serializer = AccountRequestSerializer(account_request_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\nexcept Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "try:\n serializer = UserSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n account_request = serializer.save()\n account_request_serializer = AccountRequestSerializer(account_request)\n return Response(account_request_serializer.data, status=status.HTTP_201_CREATED)\nexcept ValidationError as validation_exception:\n content = {'message': validation_exception.detail}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\nexcept Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)"], "bodies_text": "<|body_start_0|>\n try:\n account_request_list = account_request_api.get_all()\n serializer = AccountRequestSerializer(account_request_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n serializer = UserSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n account_request = serializer.save()\n account_request_serializer = AccountRequestSerializer(account_request)\n return Response(account_request_serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as validation_exception:\n content = {'message': validation_exception.detail}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_1|>\n", "class_docstring": "Create or get all Account Request", "class_name": "AccountRequestList", "detected_licenses": ["NIST-Software", "BSD-3-Clause", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccountRequestList:\n \"\"\"Create or get all Account Request\"\"\"\n\n def get(self, request):\n \"\"\"Get all account requests Args: request: HTTP request Returns: - code: 200 content: List of account requests - code: 400 content: Validation error\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create a new account request Parameters: { \"username\": \"username\", \"first_name\": \"first_name\", \"last_name\": \"last_name\", \"password\": \"password\", \"email\": \"email\" } Args: request: HTTP request Returns: - code: 200 content: Account Request - code: 400 content: Validation error / missing parameters\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n account_request_list = account_request_api.get_all()\n serializer = AccountRequestSerializer(account_request_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n serializer = UserSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n account_request = serializer.save()\n account_request_serializer = AccountRequestSerializer(account_request)\n return Response(account_request_serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as validation_exception:\n content = {'message': validation_exception.detail}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000459", "length_bytes": 5445, "license_type": "permissive", "methods": [{"docstring": "Get all account requests Args: request: HTTP request Returns: - code: 200 content: List of account requests - code: 400 content: Validation error", "name": "get", "signature": "def get(self, request)"}, {"docstring": "Create a new account request Parameters: { \"username\": \"username\", \"first_name\": \"first_name\", \"last_name\": \"last_name\", \"password\": \"password\", \"email\": \"email\" } Args: request: HTTP request Returns: - code: 200 content: Account Request - code: 400 content: Validation error / missing parameters", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "prompt": "Implement the Python class `AccountRequestList` described below.\n\nClass description:\nCreate or get all Account Request\n\nMethod signatures and docstrings:\n- def get(self, request): Get all account requests Args: request: HTTP request Returns: - code: 200 content: List of account requests - code: 400 content: Validation error\n- def post(self, request): Create a new account request Parameters: { \"username\": \"username\", \"first_name\": \"first_name\", \"last_name\": \"last_name\", \"password\": \"password\", \"email\": \"email\" } Args: request: HTTP request Returns: - code: 200 content: Account Request - code: 400 content: Validation error / missing parameters", "prompted_full_text": "Implement the Python class `AccountRequestList` described below.\n\nClass description:\nCreate or get all Account Request\n\nMethod signatures and docstrings:\n- def get(self, request): Get all account requests Args: request: HTTP request Returns: - code: 200 content: List of account requests - code: 400 content: Validation error\n- def post(self, request): Create a new account request Parameters: { \"username\": \"username\", \"first_name\": \"first_name\", \"last_name\": \"last_name\", \"password\": \"password\", \"email\": \"email\" } Args: request: HTTP request Returns: - code: 200 content: Account Request - code: 400 content: Validation error / missing parameters\n\n<|skeleton|>\nclass AccountRequestList:\n \"\"\"Create or get all Account Request\"\"\"\n\n def get(self, request):\n \"\"\"Get all account requests Args: request: HTTP request Returns: - code: 200 content: List of account requests - code: 400 content: Validation error\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create a new account request Parameters: { \"username\": \"username\", \"first_name\": \"first_name\", \"last_name\": \"last_name\", \"password\": \"password\", \"email\": \"email\" } Args: request: HTTP request Returns: - code: 200 content: Account Request - code: 400 content: Validation error / missing parameters\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n account_request_list = account_request_api.get_all()\n serializer = AccountRequestSerializer(account_request_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n serializer = UserSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n account_request = serializer.save()\n account_request_serializer = AccountRequestSerializer(account_request)\n return Response(account_request_serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as validation_exception:\n content = {'message': validation_exception.detail}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_1|>\n", "revision_id": "c96ca2563a748497f8490d6a79d6a316e39c6f04", "skeleton": "<|skeleton|>\nclass AccountRequestList:\n \"\"\"Create or get all Account Request\"\"\"\n\n def get(self, request):\n \"\"\"Get all account requests Args: request: HTTP request Returns: - code: 200 content: List of account requests - code: 400 content: Validation error\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create a new account request Parameters: { \"username\": \"username\", \"first_name\": \"first_name\", \"last_name\": \"last_name\", \"password\": \"password\", \"email\": \"email\" } Args: request: HTTP request Returns: - code: 200 content: Account Request - code: 400 content: Validation error / missing parameters\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AccountRequestList:\n \"\"\"Create or get all Account Request\"\"\"\n\n def get(self, request):\n \"\"\"Get all account requests Args: request: HTTP request Returns: - code: 200 content: List of account requests - code: 400 content: Validation error\"\"\"\n try:\n account_request_list = account_request_api.get_all()\n serializer = AccountRequestSerializer(account_request_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def post(self, request):\n \"\"\"Create a new account request Parameters: { \"username\": \"username\", \"first_name\": \"first_name\", \"last_name\": \"last_name\", \"password\": \"password\", \"email\": \"email\" } Args: request: HTTP request Returns: - code: 200 content: Account Request - code: 400 content: Validation error / missing parameters\"\"\"\n try:\n serializer = UserSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n account_request = serializer.save()\n account_request_serializer = AccountRequestSerializer(account_request)\n return Response(account_request_serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as validation_exception:\n content = {'message': validation_exception.detail}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n except Exception as api_exception:\n content = {'message': str(api_exception)}\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n", "source": "the_stack_v2_python_sparse", "source_path": "core_website_app/rest/account_request/views.py", "source_repo": "usnistgov/core_website_app", "split": "val", "star_events_count": 0}
{"blob_id": "04d93ad992021dfb967a4fd441a52b042f48fbe8", "bodies": ["if weak:\n self._cache = weakref.WeakValueDictionary()\nelse:\n self._cache = {}", "try:\n value = self._cache[key]\n logger.debug('Got cached result for %r', key)\nexcept KeyError:\n logger.debug(\"Didn't get cached result for %r\", key)\n logger.debug('Computing operation: %r', operation)\n start = time.time()\n value = operation()\n value = self.convert_to_weakref_compat(value)\n end = time.time()\n logger.debug('Computation completed in %s seconds, storing into cache', end - start)\n self._cache[key] = value\nreturn value", "if obj.__class__ is dict:\n return Dict(obj)\nelif obj.__class__ is tuple or obj.__class__ is list:\n return List(obj)\nelse:\n return obj"], "bodies_text": "<|body_start_0|>\n if weak:\n self._cache = weakref.WeakValueDictionary()\n else:\n self._cache = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n value = self._cache[key]\n logger.debug('Got cached result for %r', key)\n except KeyError:\n logger.debug(\"Didn't get cached result for %r\", key)\n logger.debug('Computing operation: %r', operation)\n start = time.time()\n value = operation()\n value = self.convert_to_weakref_compat(value)\n end = time.time()\n logger.debug('Computation completed in %s seconds, storing into cache', end - start)\n self._cache[key] = value\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n if obj.__class__ is dict:\n return Dict(obj)\n elif obj.__class__ is tuple or obj.__class__ is list:\n return List(obj)\n else:\n return obj\n<|end_body_2|>\n", "class_docstring": "Cache for expensive operations. If your test needs to compute something (slowly) and reuse it in various different test\\\\_ methods then this will save time.", "class_name": "ResourceCache", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ResourceCache:\n \"\"\"Cache for expensive operations. If your test needs to compute something (slowly) and reuse it in various different test\\\\_ methods then this will save time.\"\"\"\n\n def __init__(self, weak=True):\n \"\"\"Initialize a new ResourceCache object\"\"\"\n <|body_0|>\n\n def get(self, key, operation):\n \"\"\"Get a value from the cache, falling back to computing it if needed Gets something from the cache dictionary, referenced by the key. If the value is missing it is computed, by calling the operation, and stored in the cache.\"\"\"\n <|body_1|>\n\n def convert_to_weakref_compat(obj):\n \"\"\"Convert the passed object to something that can be weakly reachable\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if weak:\n self._cache = weakref.WeakValueDictionary()\n else:\n self._cache = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n value = self._cache[key]\n logger.debug('Got cached result for %r', key)\n except KeyError:\n logger.debug(\"Didn't get cached result for %r\", key)\n logger.debug('Computing operation: %r', operation)\n start = time.time()\n value = operation()\n value = self.convert_to_weakref_compat(value)\n end = time.time()\n logger.debug('Computation completed in %s seconds, storing into cache', end - start)\n self._cache[key] = value\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n if obj.__class__ is dict:\n return Dict(obj)\n elif obj.__class__ is tuple or obj.__class__ is list:\n return List(obj)\n else:\n return obj\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000460", "length_bytes": 3154, "license_type": "no_license", "methods": [{"docstring": "Initialize a new ResourceCache object", "name": "__init__", "signature": "def __init__(self, weak=True)"}, {"docstring": "Get a value from the cache, falling back to computing it if needed Gets something from the cache dictionary, referenced by the key. If the value is missing it is computed, by calling the operation, and stored in the cache.", "name": "get", "signature": "def get(self, key, operation)"}, {"docstring": "Convert the passed object to something that can be weakly reachable", "name": "convert_to_weakref_compat", "signature": "def convert_to_weakref_compat(obj)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_043369", "prompt": "Implement the Python class `ResourceCache` described below.\n\nClass description:\nCache for expensive operations. If your test needs to compute something (slowly) and reuse it in various different test\\\\_ methods then this will save time.\n\nMethod signatures and docstrings:\n- def __init__(self, weak=True): Initialize a new ResourceCache object\n- def get(self, key, operation): Get a value from the cache, falling back to computing it if needed Gets something from the cache dictionary, referenced by the key. If the value is missing it is computed, by calling the operation, and stored in the cache.\n- def convert_to_weakref_compat(obj): Convert the passed object to something that can be weakly reachable", "prompted_full_text": "Implement the Python class `ResourceCache` described below.\n\nClass description:\nCache for expensive operations. If your test needs to compute something (slowly) and reuse it in various different test\\\\_ methods then this will save time.\n\nMethod signatures and docstrings:\n- def __init__(self, weak=True): Initialize a new ResourceCache object\n- def get(self, key, operation): Get a value from the cache, falling back to computing it if needed Gets something from the cache dictionary, referenced by the key. If the value is missing it is computed, by calling the operation, and stored in the cache.\n- def convert_to_weakref_compat(obj): Convert the passed object to something that can be weakly reachable\n\n<|skeleton|>\nclass ResourceCache:\n \"\"\"Cache for expensive operations. If your test needs to compute something (slowly) and reuse it in various different test\\\\_ methods then this will save time.\"\"\"\n\n def __init__(self, weak=True):\n \"\"\"Initialize a new ResourceCache object\"\"\"\n <|body_0|>\n\n def get(self, key, operation):\n \"\"\"Get a value from the cache, falling back to computing it if needed Gets something from the cache dictionary, referenced by the key. If the value is missing it is computed, by calling the operation, and stored in the cache.\"\"\"\n <|body_1|>\n\n def convert_to_weakref_compat(obj):\n \"\"\"Convert the passed object to something that can be weakly reachable\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if weak:\n self._cache = weakref.WeakValueDictionary()\n else:\n self._cache = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n value = self._cache[key]\n logger.debug('Got cached result for %r', key)\n except KeyError:\n logger.debug(\"Didn't get cached result for %r\", key)\n logger.debug('Computing operation: %r', operation)\n start = time.time()\n value = operation()\n value = self.convert_to_weakref_compat(value)\n end = time.time()\n logger.debug('Computation completed in %s seconds, storing into cache', end - start)\n self._cache[key] = value\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n if obj.__class__ is dict:\n return Dict(obj)\n elif obj.__class__ is tuple or obj.__class__ is list:\n return List(obj)\n else:\n return obj\n<|end_body_2|>\n", "revision_id": "78aa82cdb35808988214329b3b1aabcc2d1a5e01", "skeleton": "<|skeleton|>\nclass ResourceCache:\n \"\"\"Cache for expensive operations. If your test needs to compute something (slowly) and reuse it in various different test\\\\_ methods then this will save time.\"\"\"\n\n def __init__(self, weak=True):\n \"\"\"Initialize a new ResourceCache object\"\"\"\n <|body_0|>\n\n def get(self, key, operation):\n \"\"\"Get a value from the cache, falling back to computing it if needed Gets something from the cache dictionary, referenced by the key. If the value is missing it is computed, by calling the operation, and stored in the cache.\"\"\"\n <|body_1|>\n\n def convert_to_weakref_compat(obj):\n \"\"\"Convert the passed object to something that can be weakly reachable\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ResourceCache:\n \"\"\"Cache for expensive operations. If your test needs to compute something (slowly) and reuse it in various different test\\\\_ methods then this will save time.\"\"\"\n\n def __init__(self, weak=True):\n \"\"\"Initialize a new ResourceCache object\"\"\"\n if weak:\n self._cache = weakref.WeakValueDictionary()\n else:\n self._cache = {}\n\n def get(self, key, operation):\n \"\"\"Get a value from the cache, falling back to computing it if needed Gets something from the cache dictionary, referenced by the key. If the value is missing it is computed, by calling the operation, and stored in the cache.\"\"\"\n try:\n value = self._cache[key]\n logger.debug('Got cached result for %r', key)\n except KeyError:\n logger.debug(\"Didn't get cached result for %r\", key)\n logger.debug('Computing operation: %r', operation)\n start = time.time()\n value = operation()\n value = self.convert_to_weakref_compat(value)\n end = time.time()\n logger.debug('Computation completed in %s seconds, storing into cache', end - start)\n self._cache[key] = value\n return value\n\n def convert_to_weakref_compat(obj):\n \"\"\"Convert the passed object to something that can be weakly reachable\"\"\"\n if obj.__class__ is dict:\n return Dict(obj)\n elif obj.__class__ is tuple or obj.__class__ is list:\n return List(obj)\n else:\n return obj\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/lib/python3.6/site-packages/plainbox/testing_utils/resource.py", "source_repo": "utkarshyadavin/CloudMarks", "split": "val", "star_events_count": 0}
{"blob_id": "e289205113301f5ec8e762154fa23b908b845812", "bodies": ["if serializer_class is None:\n if 'context' in kwargs.keys():\n kwargs.pop('context')\n return self.get_serializer(queryset, *args, **kwargs)\nreturn serializer_class(queryset, *args, context=self.get_serializer_context(), **kwargs)", "if plan_pk is None:\n queryset = self.get_queryset()\nelse:\n queryset = self.get_queryset().filter(plan_id=int(plan_pk)).all()\nqueryset = self.filter_queryset(queryset)\nif request.version == 'v2':\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self._serialize(serializer_class, page, many=True)\n return self.get_paginated_response(serializer.data)\nserializer = self._serialize(serializer_class, queryset, many=True)\nreturn Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n if serializer_class is None:\n if 'context' in kwargs.keys():\n kwargs.pop('context')\n return self.get_serializer(queryset, *args, **kwargs)\n return serializer_class(queryset, *args, context=self.get_serializer_context(), **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if plan_pk is None:\n queryset = self.get_queryset()\n else:\n queryset = self.get_queryset().filter(plan_id=int(plan_pk)).all()\n queryset = self.filter_queryset(queryset)\n if request.version == 'v2':\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self._serialize(serializer_class, page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self._serialize(serializer_class, queryset, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "class_docstring": "/plans//favs/ のようなネストされた要素に対してリストを返す時のmixin", "class_name": "PlanNestedListMixin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PlanNestedListMixin:\n \"\"\"/plans//favs/ のようなネストされた要素に対してリストを返す時のmixin\"\"\"\n\n def _serialize(self, serializer_class, queryset, *args, **kwargs):\n \"\"\"Serializerの指定があればそれで返す.無ければself.get_serializerする. :param serializer_class: 使用するSerializerクラスを指定する :param args: Serializerをインスタンス化する際の位置引数 :param kwargs: Serializerをインスタンス化する際のオプション引数 :return: インスタンス化されたSerializer\"\"\"\n <|body_0|>\n\n def list(self, request, plan_pk=None, serializer_class=None, **kwargs):\n \"\"\"plan_pkでフィルタリングしてレスポンスを返す :param request: ユーザのリクエストオブジェクト :param plan_pk: フィルタ対象のPlanのPrimary Key :param serializer_class: 使用するSerializer.デフォルトはself.serializerになる. :param kwargs: その他オプション :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if serializer_class is None:\n if 'context' in kwargs.keys():\n kwargs.pop('context')\n return self.get_serializer(queryset, *args, **kwargs)\n return serializer_class(queryset, *args, context=self.get_serializer_context(), **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if plan_pk is None:\n queryset = self.get_queryset()\n else:\n queryset = self.get_queryset().filter(plan_id=int(plan_pk)).all()\n queryset = self.filter_queryset(queryset)\n if request.version == 'v2':\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self._serialize(serializer_class, page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self._serialize(serializer_class, queryset, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000461", "length_bytes": 5541, "license_type": "no_license", "methods": [{"docstring": "Serializerの指定があればそれで返す.無ければself.get_serializerする. :param serializer_class: 使用するSerializerクラスを指定する :param args: Serializerをインスタンス化する際の位置引数 :param kwargs: Serializerをインスタンス化する際のオプション引数 :return: インスタンス化されたSerializer", "name": "_serialize", "signature": "def _serialize(self, serializer_class, queryset, *args, **kwargs)"}, {"docstring": "plan_pkでフィルタリングしてレスポンスを返す :param request: ユーザのリクエストオブジェクト :param plan_pk: フィルタ対象のPlanのPrimary Key :param serializer_class: 使用するSerializer.デフォルトはself.serializerになる. :param kwargs: その他オプション :return:", "name": "list", "signature": "def list(self, request, plan_pk=None, serializer_class=None, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_053424", "prompt": "Implement the Python class `PlanNestedListMixin` described below.\n\nClass description:\n/plans//favs/ のようなネストされた要素に対してリストを返す時のmixin\n\nMethod signatures and docstrings:\n- def _serialize(self, serializer_class, queryset, *args, **kwargs): Serializerの指定があればそれで返す.無ければself.get_serializerする. :param serializer_class: 使用するSerializerクラスを指定する :param args: Serializerをインスタンス化する際の位置引数 :param kwargs: Serializerをインスタンス化する際のオプション引数 :return: インスタンス化されたSerializer\n- def list(self, request, plan_pk=None, serializer_class=None, **kwargs): plan_pkでフィルタリングしてレスポンスを返す :param request: ユーザのリクエストオブジェクト :param plan_pk: フィルタ対象のPlanのPrimary Key :param serializer_class: 使用するSerializer.デフォルトはself.serializerになる. :param kwargs: その他オプション :return:", "prompted_full_text": "Implement the Python class `PlanNestedListMixin` described below.\n\nClass description:\n/plans//favs/ のようなネストされた要素に対してリストを返す時のmixin\n\nMethod signatures and docstrings:\n- def _serialize(self, serializer_class, queryset, *args, **kwargs): Serializerの指定があればそれで返す.無ければself.get_serializerする. :param serializer_class: 使用するSerializerクラスを指定する :param args: Serializerをインスタンス化する際の位置引数 :param kwargs: Serializerをインスタンス化する際のオプション引数 :return: インスタンス化されたSerializer\n- def list(self, request, plan_pk=None, serializer_class=None, **kwargs): plan_pkでフィルタリングしてレスポンスを返す :param request: ユーザのリクエストオブジェクト :param plan_pk: フィルタ対象のPlanのPrimary Key :param serializer_class: 使用するSerializer.デフォルトはself.serializerになる. :param kwargs: その他オプション :return:\n\n<|skeleton|>\nclass PlanNestedListMixin:\n \"\"\"/plans//favs/ のようなネストされた要素に対してリストを返す時のmixin\"\"\"\n\n def _serialize(self, serializer_class, queryset, *args, **kwargs):\n \"\"\"Serializerの指定があればそれで返す.無ければself.get_serializerする. :param serializer_class: 使用するSerializerクラスを指定する :param args: Serializerをインスタンス化する際の位置引数 :param kwargs: Serializerをインスタンス化する際のオプション引数 :return: インスタンス化されたSerializer\"\"\"\n <|body_0|>\n\n def list(self, request, plan_pk=None, serializer_class=None, **kwargs):\n \"\"\"plan_pkでフィルタリングしてレスポンスを返す :param request: ユーザのリクエストオブジェクト :param plan_pk: フィルタ対象のPlanのPrimary Key :param serializer_class: 使用するSerializer.デフォルトはself.serializerになる. :param kwargs: その他オプション :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if serializer_class is None:\n if 'context' in kwargs.keys():\n kwargs.pop('context')\n return self.get_serializer(queryset, *args, **kwargs)\n return serializer_class(queryset, *args, context=self.get_serializer_context(), **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if plan_pk is None:\n queryset = self.get_queryset()\n else:\n queryset = self.get_queryset().filter(plan_id=int(plan_pk)).all()\n queryset = self.filter_queryset(queryset)\n if request.version == 'v2':\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self._serialize(serializer_class, page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self._serialize(serializer_class, queryset, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "revision_id": "6f9487dcfc13c706d312be6586159c7d3a25c6aa", "skeleton": "<|skeleton|>\nclass PlanNestedListMixin:\n \"\"\"/plans//favs/ のようなネストされた要素に対してリストを返す時のmixin\"\"\"\n\n def _serialize(self, serializer_class, queryset, *args, **kwargs):\n \"\"\"Serializerの指定があればそれで返す.無ければself.get_serializerする. :param serializer_class: 使用するSerializerクラスを指定する :param args: Serializerをインスタンス化する際の位置引数 :param kwargs: Serializerをインスタンス化する際のオプション引数 :return: インスタンス化されたSerializer\"\"\"\n <|body_0|>\n\n def list(self, request, plan_pk=None, serializer_class=None, **kwargs):\n \"\"\"plan_pkでフィルタリングしてレスポンスを返す :param request: ユーザのリクエストオブジェクト :param plan_pk: フィルタ対象のPlanのPrimary Key :param serializer_class: 使用するSerializer.デフォルトはself.serializerになる. :param kwargs: その他オプション :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PlanNestedListMixin:\n \"\"\"/plans//favs/ のようなネストされた要素に対してリストを返す時のmixin\"\"\"\n\n def _serialize(self, serializer_class, queryset, *args, **kwargs):\n \"\"\"Serializerの指定があればそれで返す.無ければself.get_serializerする. :param serializer_class: 使用するSerializerクラスを指定する :param args: Serializerをインスタンス化する際の位置引数 :param kwargs: Serializerをインスタンス化する際のオプション引数 :return: インスタンス化されたSerializer\"\"\"\n if serializer_class is None:\n if 'context' in kwargs.keys():\n kwargs.pop('context')\n return self.get_serializer(queryset, *args, **kwargs)\n return serializer_class(queryset, *args, context=self.get_serializer_context(), **kwargs)\n\n def list(self, request, plan_pk=None, serializer_class=None, **kwargs):\n \"\"\"plan_pkでフィルタリングしてレスポンスを返す :param request: ユーザのリクエストオブジェクト :param plan_pk: フィルタ対象のPlanのPrimary Key :param serializer_class: 使用するSerializer.デフォルトはself.serializerになる. :param kwargs: その他オプション :return:\"\"\"\n if plan_pk is None:\n queryset = self.get_queryset()\n else:\n queryset = self.get_queryset().filter(plan_id=int(plan_pk)).all()\n queryset = self.filter_queryset(queryset)\n if request.version == 'v2':\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self._serialize(serializer_class, page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self._serialize(serializer_class, queryset, many=True)\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/plan/mixins.py", "source_repo": "jphacks/KB_1809_2", "split": "val", "star_events_count": 3}
{"blob_id": "eea8de42a84217c39b5874ee4708cb5ecb435be6", "bodies": ["\"\"\"\n 大致步骤:\n 1-获取传入date以及code\n 2-根据code以及date获取geostationtidedata中的一个 StationTideData(model)\n 3-从realdata中获取过程中的极值出现时间以及值\n \"\"\"\ncode = request.GET.get('code', settings.DEFAULT_TYPHOON_CODE_BYSTATION)\nself.code = code\ndate_str = request.GET.get('date', settings.DEFAULT_TYPHOON_DATE)\ntargetdate = dateutil.parser.parse(date_str)\nfilter_list = self.getStationTargetRealData(targetdate, code)\njson_data = StationTideIncludeAllMidModelSerializer(filter_list, many=True).data\nreturn Response(json_data)", "targetdate = date(targetdatetime.year, targetdatetime.month, targetdatetime.day)\nlist = StationTideData.objects(typhoonnum=code)\n\ndef getTargetMoment(moment: datetime, realdate: StationTideData) -> StationTideAllDataMidModel:\n \"\"\"\n 根据指定时刻,从当前的测站数据中找到对应时刻的观测值\n :param moment:\n :param realdate:\n :return:\n \"\"\"\n '\\n 思路:\\n \\n '\n date_moment = date(moment.year, moment.month, moment.day)\n hour_moment = moment.hour\n datetime_moment = datetime(moment.year, moment.month, moment.day, moment.hour, 0).replace(tzinfo=TZ_UTC_0)\n '\\n s1:现传入的moment是一个utc的datetime(前台传递过来的是utc时间,后端不再做时区的修改)\\n 1: 对于测站数据的查询需要根据一个datetime去获取realtidedata数组中的每一个targetdate\\n 2: 而其中的forecastdata->forecast_arr 是根据北京时间来保存的(我日你妈!)\\n ---\\n 1: 创建两个datetime变量,一个用来存储utc时间,一个用来存储beijing时间\\n 2: utc时间用来找到targetdate\\n 3: beijing时间用来获取每个小时的测值\\n '\n moment_utc = moment\n moment_bj = moment + timedelta(hours=8)\n datetime_utc_start = datetime(year=moment_bj.year, month=moment_bj.month, day=moment_bj.day) + timedelta(hours=-8)\n temp_realtidedata = [temp for temp in realdate.realtidedata if temp.targetdate == datetime_utc_start]\n if len(temp_realtidedata) > 0:\n return StationTideAllDataMidModel(temp_realtidedata[0].realdata.realdata_arr[moment_bj.hour], temp_realtidedata[0].forecastdata.forecast_arr[moment_bj.hour], moment_utc)\n else:\n return None\nlist_StationForecast = []\nif len(list) > 0:\n for temp in list:\n list_StationForecast.append(StationTideIncludeForecastMidModel(temp, getTargetMoment(targetdatetime, temp)))\nreturn list_StationForecast", "list = self.getTargetDateRealData(data, date)\nmax_data = max(list, key=lambda x: x.val)\nreturn max_data", "days = 0\nlist_tidedata = []\nif 'days' in kwargs:\n days = int(kwargs.get('days'))\nfor temp_realtidedata in data.realtidedata:\n for index, temp_realdata in enumerate(temp_realtidedata.realdata.realdata_arr):\n temp_datetime = datetime.datetime(temp_realtidedata.targetdate.year, temp_realtidedata.targetdate.month, temp_realtidedata.targetdate.day, 0, 0) + datetime.timedelta(hours=index)\n list_tidedata.append(TideRealMidModel(temp_realdata, temp_datetime))\nreturn list_tidedata"], "bodies_text": "<|body_start_0|>\n \"\"\"\n 大致步骤:\n 1-获取传入date以及code\n 2-根据code以及date获取geostationtidedata中的一个 StationTideData(model)\n 3-从realdata中获取过程中的极值出现时间以及值\n \"\"\"\n code = request.GET.get('code', settings.DEFAULT_TYPHOON_CODE_BYSTATION)\n self.code = code\n date_str = request.GET.get('date', settings.DEFAULT_TYPHOON_DATE)\n targetdate = dateutil.parser.parse(date_str)\n filter_list = self.getStationTargetRealData(targetdate, code)\n json_data = StationTideIncludeAllMidModelSerializer(filter_list, many=True).data\n return Response(json_data)\n<|end_body_0|>\n\n<|body_start_1|>\n targetdate = date(targetdatetime.year, targetdatetime.month, targetdatetime.day)\n list = StationTideData.objects(typhoonnum=code)\n\n def getTargetMoment(moment: datetime, realdate: StationTideData) -> StationTideAllDataMidModel:\n \"\"\"\n 根据指定时刻,从当前的测站数据中找到对应时刻的观测值\n :param moment:\n :param realdate:\n :return:\n \"\"\"\n '\\n 思路:\\n \\n '\n date_moment = date(moment.year, moment.month, moment.day)\n hour_moment = moment.hour\n datetime_moment = datetime(moment.year, moment.month, moment.day, moment.hour, 0).replace(tzinfo=TZ_UTC_0)\n '\\n s1:现传入的moment是一个utc的datetime(前台传递过来的是utc时间,后端不再做时区的修改)\\n 1: 对于测站数据的查询需要根据一个datetime去获取realtidedata数组中的每一个targetdate\\n 2: 而其中的forecastdata->forecast_arr 是根据北京时间来保存的(我日你妈!)\\n ---\\n 1: 创建两个datetime变量,一个用来存储utc时间,一个用来存储beijing时间\\n 2: utc时间用来找到targetdate\\n 3: beijing时间用来获取每个小时的测值\\n '\n moment_utc = moment\n moment_bj = moment + timedelta(hours=8)\n datetime_utc_start = datetime(year=moment_bj.year, month=moment_bj.month, day=moment_bj.day) + timedelta(hours=-8)\n temp_realtidedata = [temp for temp in realdate.realtidedata if temp.targetdate == datetime_utc_start]\n if len(temp_realtidedata) > 0:\n return StationTideAllDataMidModel(temp_realtidedata[0].realdata.realdata_arr[moment_bj.hour], temp_realtidedata[0].forecastdata.forecast_arr[moment_bj.hour], moment_utc)\n else:\n return None\n list_StationForecast = []\n if len(list) > 0:\n for temp in list:\n list_StationForecast.append(StationTideIncludeForecastMidModel(temp, getTargetMoment(targetdatetime, temp)))\n return list_StationForecast\n<|end_body_1|>\n\n<|body_start_2|>\n list = self.getTargetDateRealData(data, date)\n max_data = max(list, key=lambda x: x.val)\n return max_data\n<|end_body_2|>\n\n<|body_start_3|>\n days = 0\n list_tidedata = []\n if 'days' in kwargs:\n days = int(kwargs.get('days'))\n for temp_realtidedata in data.realtidedata:\n for index, temp_realdata in enumerate(temp_realtidedata.realdata.realdata_arr):\n temp_datetime = datetime.datetime(temp_realtidedata.targetdate.year, temp_realtidedata.targetdate.month, temp_realtidedata.targetdate.day, 0, 0) + datetime.timedelta(hours=index)\n list_tidedata.append(TideRealMidModel(temp_realdata, temp_datetime))\n return list_tidedata\n<|end_body_3|>\n", "class_docstring": "", "class_name": "StationTideDataListView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StationTideDataListView:\n\n def get(self, request):\n \"\"\":param request: :return:\"\"\"\n <|body_0|>\n\n def getStationTargetRealData(self, targetdatetime: datetime, code: str) -> []:\n \"\"\"根据时间获取该时间该台风的测站数据 :param date: :return:\"\"\"\n <|body_1|>\n\n def dataListMax(self, data: StationTideData, date: datetime.date) -> TideRealMidModel:\n \"\"\"找到传入的站点的极值(极大值) :param data: :param date: :return: 极大值(TideRealMidModel)\"\"\"\n <|body_2|>\n\n def getTargetDateRealData(self, data: StationTideData, date: datetime.date, **kwargs) -> []:\n \"\"\"根据时间获取该时刻的观测值list :param date: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"\n 大致步骤:\n 1-获取传入date以及code\n 2-根据code以及date获取geostationtidedata中的一个 StationTideData(model)\n 3-从realdata中获取过程中的极值出现时间以及值\n \"\"\"\n code = request.GET.get('code', settings.DEFAULT_TYPHOON_CODE_BYSTATION)\n self.code = code\n date_str = request.GET.get('date', settings.DEFAULT_TYPHOON_DATE)\n targetdate = dateutil.parser.parse(date_str)\n filter_list = self.getStationTargetRealData(targetdate, code)\n json_data = StationTideIncludeAllMidModelSerializer(filter_list, many=True).data\n return Response(json_data)\n<|end_body_0|>\n\n<|body_start_1|>\n targetdate = date(targetdatetime.year, targetdatetime.month, targetdatetime.day)\n list = StationTideData.objects(typhoonnum=code)\n\n def getTargetMoment(moment: datetime, realdate: StationTideData) -> StationTideAllDataMidModel:\n \"\"\"\n 根据指定时刻,从当前的测站数据中找到对应时刻的观测值\n :param moment:\n :param realdate:\n :return:\n \"\"\"\n '\\n 思路:\\n \\n '\n date_moment = date(moment.year, moment.month, moment.day)\n hour_moment = moment.hour\n datetime_moment = datetime(moment.year, moment.month, moment.day, moment.hour, 0).replace(tzinfo=TZ_UTC_0)\n '\\n s1:现传入的moment是一个utc的datetime(前台传递过来的是utc时间,后端不再做时区的修改)\\n 1: 对于测站数据的查询需要根据一个datetime去获取realtidedata数组中的每一个targetdate\\n 2: 而其中的forecastdata->forecast_arr 是根据北京时间来保存的(我日你妈!)\\n ---\\n 1: 创建两个datetime变量,一个用来存储utc时间,一个用来存储beijing时间\\n 2: utc时间用来找到targetdate\\n 3: beijing时间用来获取每个小时的测值\\n '\n moment_utc = moment\n moment_bj = moment + timedelta(hours=8)\n datetime_utc_start = datetime(year=moment_bj.year, month=moment_bj.month, day=moment_bj.day) + timedelta(hours=-8)\n temp_realtidedata = [temp for temp in realdate.realtidedata if temp.targetdate == datetime_utc_start]\n if len(temp_realtidedata) > 0:\n return StationTideAllDataMidModel(temp_realtidedata[0].realdata.realdata_arr[moment_bj.hour], temp_realtidedata[0].forecastdata.forecast_arr[moment_bj.hour], moment_utc)\n else:\n return None\n list_StationForecast = []\n if len(list) > 0:\n for temp in list:\n list_StationForecast.append(StationTideIncludeForecastMidModel(temp, getTargetMoment(targetdatetime, temp)))\n return list_StationForecast\n<|end_body_1|>\n\n<|body_start_2|>\n list = self.getTargetDateRealData(data, date)\n max_data = max(list, key=lambda x: x.val)\n return max_data\n<|end_body_2|>\n\n<|body_start_3|>\n days = 0\n list_tidedata = []\n if 'days' in kwargs:\n days = int(kwargs.get('days'))\n for temp_realtidedata in data.realtidedata:\n for index, temp_realdata in enumerate(temp_realtidedata.realdata.realdata_arr):\n temp_datetime = datetime.datetime(temp_realtidedata.targetdate.year, temp_realtidedata.targetdate.month, temp_realtidedata.targetdate.day, 0, 0) + datetime.timedelta(hours=index)\n list_tidedata.append(TideRealMidModel(temp_realdata, temp_datetime))\n return list_tidedata\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000462", "length_bytes": 46325, "license_type": "no_license", "methods": [{"docstring": ":param request: :return:", "name": "get", "signature": "def get(self, request)"}, {"docstring": "根据时间获取该时间该台风的测站数据 :param date: :return:", "name": "getStationTargetRealData", "signature": "def getStationTargetRealData(self, targetdatetime: datetime, code: str) -> []"}, {"docstring": "找到传入的站点的极值(极大值) :param data: :param date: :return: 极大值(TideRealMidModel)", "name": "dataListMax", "signature": "def dataListMax(self, data: StationTideData, date: datetime.date) -> TideRealMidModel"}, {"docstring": "根据时间获取该时刻的观测值list :param date: :return:", "name": "getTargetDateRealData", "signature": "def getTargetDateRealData(self, data: StationTideData, date: datetime.date, **kwargs) -> []"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_054377", "prompt": "Implement the Python class `StationTideDataListView` described below.\n\nClass description:\nImplement the StationTideDataListView class.\n\nMethod signatures and docstrings:\n- def get(self, request): :param request: :return:\n- def getStationTargetRealData(self, targetdatetime: datetime, code: str) -> []: 根据时间获取该时间该台风的测站数据 :param date: :return:\n- def dataListMax(self, data: StationTideData, date: datetime.date) -> TideRealMidModel: 找到传入的站点的极值(极大值) :param data: :param date: :return: 极大值(TideRealMidModel)\n- def getTargetDateRealData(self, data: StationTideData, date: datetime.date, **kwargs) -> []: 根据时间获取该时刻的观测值list :param date: :return:", "prompted_full_text": "Implement the Python class `StationTideDataListView` described below.\n\nClass description:\nImplement the StationTideDataListView class.\n\nMethod signatures and docstrings:\n- def get(self, request): :param request: :return:\n- def getStationTargetRealData(self, targetdatetime: datetime, code: str) -> []: 根据时间获取该时间该台风的测站数据 :param date: :return:\n- def dataListMax(self, data: StationTideData, date: datetime.date) -> TideRealMidModel: 找到传入的站点的极值(极大值) :param data: :param date: :return: 极大值(TideRealMidModel)\n- def getTargetDateRealData(self, data: StationTideData, date: datetime.date, **kwargs) -> []: 根据时间获取该时刻的观测值list :param date: :return:\n\n<|skeleton|>\nclass StationTideDataListView:\n\n def get(self, request):\n \"\"\":param request: :return:\"\"\"\n <|body_0|>\n\n def getStationTargetRealData(self, targetdatetime: datetime, code: str) -> []:\n \"\"\"根据时间获取该时间该台风的测站数据 :param date: :return:\"\"\"\n <|body_1|>\n\n def dataListMax(self, data: StationTideData, date: datetime.date) -> TideRealMidModel:\n \"\"\"找到传入的站点的极值(极大值) :param data: :param date: :return: 极大值(TideRealMidModel)\"\"\"\n <|body_2|>\n\n def getTargetDateRealData(self, data: StationTideData, date: datetime.date, **kwargs) -> []:\n \"\"\"根据时间获取该时刻的观测值list :param date: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"\n 大致步骤:\n 1-获取传入date以及code\n 2-根据code以及date获取geostationtidedata中的一个 StationTideData(model)\n 3-从realdata中获取过程中的极值出现时间以及值\n \"\"\"\n code = request.GET.get('code', settings.DEFAULT_TYPHOON_CODE_BYSTATION)\n self.code = code\n date_str = request.GET.get('date', settings.DEFAULT_TYPHOON_DATE)\n targetdate = dateutil.parser.parse(date_str)\n filter_list = self.getStationTargetRealData(targetdate, code)\n json_data = StationTideIncludeAllMidModelSerializer(filter_list, many=True).data\n return Response(json_data)\n<|end_body_0|>\n\n<|body_start_1|>\n targetdate = date(targetdatetime.year, targetdatetime.month, targetdatetime.day)\n list = StationTideData.objects(typhoonnum=code)\n\n def getTargetMoment(moment: datetime, realdate: StationTideData) -> StationTideAllDataMidModel:\n \"\"\"\n 根据指定时刻,从当前的测站数据中找到对应时刻的观测值\n :param moment:\n :param realdate:\n :return:\n \"\"\"\n '\\n 思路:\\n \\n '\n date_moment = date(moment.year, moment.month, moment.day)\n hour_moment = moment.hour\n datetime_moment = datetime(moment.year, moment.month, moment.day, moment.hour, 0).replace(tzinfo=TZ_UTC_0)\n '\\n s1:现传入的moment是一个utc的datetime(前台传递过来的是utc时间,后端不再做时区的修改)\\n 1: 对于测站数据的查询需要根据一个datetime去获取realtidedata数组中的每一个targetdate\\n 2: 而其中的forecastdata->forecast_arr 是根据北京时间来保存的(我日你妈!)\\n ---\\n 1: 创建两个datetime变量,一个用来存储utc时间,一个用来存储beijing时间\\n 2: utc时间用来找到targetdate\\n 3: beijing时间用来获取每个小时的测值\\n '\n moment_utc = moment\n moment_bj = moment + timedelta(hours=8)\n datetime_utc_start = datetime(year=moment_bj.year, month=moment_bj.month, day=moment_bj.day) + timedelta(hours=-8)\n temp_realtidedata = [temp for temp in realdate.realtidedata if temp.targetdate == datetime_utc_start]\n if len(temp_realtidedata) > 0:\n return StationTideAllDataMidModel(temp_realtidedata[0].realdata.realdata_arr[moment_bj.hour], temp_realtidedata[0].forecastdata.forecast_arr[moment_bj.hour], moment_utc)\n else:\n return None\n list_StationForecast = []\n if len(list) > 0:\n for temp in list:\n list_StationForecast.append(StationTideIncludeForecastMidModel(temp, getTargetMoment(targetdatetime, temp)))\n return list_StationForecast\n<|end_body_1|>\n\n<|body_start_2|>\n list = self.getTargetDateRealData(data, date)\n max_data = max(list, key=lambda x: x.val)\n return max_data\n<|end_body_2|>\n\n<|body_start_3|>\n days = 0\n list_tidedata = []\n if 'days' in kwargs:\n days = int(kwargs.get('days'))\n for temp_realtidedata in data.realtidedata:\n for index, temp_realdata in enumerate(temp_realtidedata.realdata.realdata_arr):\n temp_datetime = datetime.datetime(temp_realtidedata.targetdate.year, temp_realtidedata.targetdate.month, temp_realtidedata.targetdate.day, 0, 0) + datetime.timedelta(hours=index)\n list_tidedata.append(TideRealMidModel(temp_realdata, temp_datetime))\n return list_tidedata\n<|end_body_3|>\n", "revision_id": "53289e9583e52531346031921fb8f8b7026e399d", "skeleton": "<|skeleton|>\nclass StationTideDataListView:\n\n def get(self, request):\n \"\"\":param request: :return:\"\"\"\n <|body_0|>\n\n def getStationTargetRealData(self, targetdatetime: datetime, code: str) -> []:\n \"\"\"根据时间获取该时间该台风的测站数据 :param date: :return:\"\"\"\n <|body_1|>\n\n def dataListMax(self, data: StationTideData, date: datetime.date) -> TideRealMidModel:\n \"\"\"找到传入的站点的极值(极大值) :param data: :param date: :return: 极大值(TideRealMidModel)\"\"\"\n <|body_2|>\n\n def getTargetDateRealData(self, data: StationTideData, date: datetime.date, **kwargs) -> []:\n \"\"\"根据时间获取该时刻的观测值list :param date: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class StationTideDataListView:\n def get(self, request):\n \"\"\":param request: :return:\"\"\"\n \"\"\"\n 大致步骤:\n 1-获取传入date以及code\n 2-根据code以及date获取geostationtidedata中的一个 StationTideData(model)\n 3-从realdata中获取过程中的极值出现时间以及值\n \"\"\"\n code = request.GET.get('code', settings.DEFAULT_TYPHOON_CODE_BYSTATION)\n self.code = code\n date_str = request.GET.get('date', settings.DEFAULT_TYPHOON_DATE)\n targetdate = dateutil.parser.parse(date_str)\n filter_list = self.getStationTargetRealData(targetdate, code)\n json_data = StationTideIncludeAllMidModelSerializer(filter_list, many=True).data\n return Response(json_data)\n\n def getStationTargetRealData(self, targetdatetime: datetime, code: str) -> []:\n \"\"\"根据时间获取该时间该台风的测站数据 :param date: :return:\"\"\"\n targetdate = date(targetdatetime.year, targetdatetime.month, targetdatetime.day)\n list = StationTideData.objects(typhoonnum=code)\n\n def getTargetMoment(moment: datetime, realdate: StationTideData) -> StationTideAllDataMidModel:\n \"\"\"\n 根据指定时刻,从当前的测站数据中找到对应时刻的观测值\n :param moment:\n :param realdate:\n :return:\n \"\"\"\n '\\n 思路:\\n \\n '\n date_moment = date(moment.year, moment.month, moment.day)\n hour_moment = moment.hour\n datetime_moment = datetime(moment.year, moment.month, moment.day, moment.hour, 0).replace(tzinfo=TZ_UTC_0)\n '\\n s1:现传入的moment是一个utc的datetime(前台传递过来的是utc时间,后端不再做时区的修改)\\n 1: 对于测站数据的查询需要根据一个datetime去获取realtidedata数组中的每一个targetdate\\n 2: 而其中的forecastdata->forecast_arr 是根据北京时间来保存的(我日你妈!)\\n ---\\n 1: 创建两个datetime变量,一个用来存储utc时间,一个用来存储beijing时间\\n 2: utc时间用来找到targetdate\\n 3: beijing时间用来获取每个小时的测值\\n '\n moment_utc = moment\n moment_bj = moment + timedelta(hours=8)\n datetime_utc_start = datetime(year=moment_bj.year, month=moment_bj.month, day=moment_bj.day) + timedelta(hours=-8)\n temp_realtidedata = [temp for temp in realdate.realtidedata if temp.targetdate == datetime_utc_start]\n if len(temp_realtidedata) > 0:\n return StationTideAllDataMidModel(temp_realtidedata[0].realdata.realdata_arr[moment_bj.hour], temp_realtidedata[0].forecastdata.forecast_arr[moment_bj.hour], moment_utc)\n else:\n return None\n list_StationForecast = []\n if len(list) > 0:\n for temp in list:\n list_StationForecast.append(StationTideIncludeForecastMidModel(temp, getTargetMoment(targetdatetime, temp)))\n return list_StationForecast\n\n def dataListMax(self, data: StationTideData, date: datetime.date) -> TideRealMidModel:\n \"\"\"找到传入的站点的极值(极大值) :param data: :param date: :return: 极大值(TideRealMidModel)\"\"\"\n list = self.getTargetDateRealData(data, date)\n max_data = max(list, key=lambda x: x.val)\n return max_data\n\n def getTargetDateRealData(self, data: StationTideData, date: datetime.date, **kwargs) -> []:\n \"\"\"根据时间获取该时刻的观测值list :param date: :return:\"\"\"\n days = 0\n list_tidedata = []\n if 'days' in kwargs:\n days = int(kwargs.get('days'))\n for temp_realtidedata in data.realtidedata:\n for index, temp_realdata in enumerate(temp_realtidedata.realdata.realdata_arr):\n temp_datetime = datetime.datetime(temp_realtidedata.targetdate.year, temp_realtidedata.targetdate.month, temp_realtidedata.targetdate.day, 0, 0) + datetime.timedelta(hours=index)\n list_tidedata.append(TideRealMidModel(temp_realdata, temp_datetime))\n return list_tidedata\n", "source": "the_stack_v2_python_sparse", "source_path": "docker/pull-code/210724/code/apps/Typhoon/views.py", "source_repo": "evaseemefly/TyphoonSearchSys", "split": "val", "star_events_count": 20}
{"blob_id": "7092c532c79feb18b6296cd3c9a95c8c33cbb0df", "bodies": ["if value is None:\n return default_value\nif not isinstance(value, int) and (not isinstance(value, float)):\n raise ValidationException(f'Parameter {name} must be a number')\nif value < 0:\n raise ValidationException(f'Parameter {name} cannot be lower than 0')\nreturn value", "value = self.validate_number(value, default_value, name)\nif value == 0:\n raise ValidationException(f'Parameter {name} must be bigger than 0')\nreturn value", "if value is None:\n return default_value\nif not isinstance(value, bool):\n raise ValidationException(f'Parameter {name} must be a boolean')\nreturn value"], "bodies_text": "<|body_start_0|>\n if value is None:\n return default_value\n if not isinstance(value, int) and (not isinstance(value, float)):\n raise ValidationException(f'Parameter {name} must be a number')\n if value < 0:\n raise ValidationException(f'Parameter {name} cannot be lower than 0')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.validate_number(value, default_value, name)\n if value == 0:\n raise ValidationException(f'Parameter {name} must be bigger than 0')\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n if value is None:\n return default_value\n if not isinstance(value, bool):\n raise ValidationException(f'Parameter {name} must be a boolean')\n return value\n<|end_body_2|>\n", "class_docstring": "Class for validating API options.", "class_name": "OptionsValidator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OptionsValidator:\n \"\"\"Class for validating API options.\"\"\"\n\n def validate_number(self, value: int or float or None, default_value: int or float, name: str):\n \"\"\"Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_0|>\n\n def validate_non_zero(self, value: int or float or None, default_value: int or float, name: str):\n \"\"\"Validates a number parameter to be above zero. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_1|>\n\n def validate_boolean(self, value: bool or None, default_value: bool, name: str):\n \"\"\"Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if value is None:\n return default_value\n if not isinstance(value, int) and (not isinstance(value, float)):\n raise ValidationException(f'Parameter {name} must be a number')\n if value < 0:\n raise ValidationException(f'Parameter {name} cannot be lower than 0')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.validate_number(value, default_value, name)\n if value == 0:\n raise ValidationException(f'Parameter {name} must be bigger than 0')\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n if value is None:\n return default_value\n if not isinstance(value, bool):\n raise ValidationException(f'Parameter {name} must be a boolean')\n return value\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000463", "length_bytes": 2079, "license_type": "no_license", "methods": [{"docstring": "Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.", "name": "validate_number", "signature": "def validate_number(self, value: int or float or None, default_value: int or float, name: str)"}, {"docstring": "Validates a number parameter to be above zero. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.", "name": "validate_non_zero", "signature": "def validate_non_zero(self, value: int or float or None, default_value: int or float, name: str)"}, {"docstring": "Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.", "name": "validate_boolean", "signature": "def validate_boolean(self, value: bool or None, default_value: bool, name: str)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_008418", "prompt": "Implement the Python class `OptionsValidator` described below.\n\nClass description:\nClass for validating API options.\n\nMethod signatures and docstrings:\n- def validate_number(self, value: int or float or None, default_value: int or float, name: str): Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\n- def validate_non_zero(self, value: int or float or None, default_value: int or float, name: str): Validates a number parameter to be above zero. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\n- def validate_boolean(self, value: bool or None, default_value: bool, name: str): Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.", "prompted_full_text": "Implement the Python class `OptionsValidator` described below.\n\nClass description:\nClass for validating API options.\n\nMethod signatures and docstrings:\n- def validate_number(self, value: int or float or None, default_value: int or float, name: str): Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\n- def validate_non_zero(self, value: int or float or None, default_value: int or float, name: str): Validates a number parameter to be above zero. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\n- def validate_boolean(self, value: bool or None, default_value: bool, name: str): Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\n\n<|skeleton|>\nclass OptionsValidator:\n \"\"\"Class for validating API options.\"\"\"\n\n def validate_number(self, value: int or float or None, default_value: int or float, name: str):\n \"\"\"Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_0|>\n\n def validate_non_zero(self, value: int or float or None, default_value: int or float, name: str):\n \"\"\"Validates a number parameter to be above zero. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_1|>\n\n def validate_boolean(self, value: bool or None, default_value: bool, name: str):\n \"\"\"Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if value is None:\n return default_value\n if not isinstance(value, int) and (not isinstance(value, float)):\n raise ValidationException(f'Parameter {name} must be a number')\n if value < 0:\n raise ValidationException(f'Parameter {name} cannot be lower than 0')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.validate_number(value, default_value, name)\n if value == 0:\n raise ValidationException(f'Parameter {name} must be bigger than 0')\n return value\n<|end_body_1|>\n\n<|body_start_2|>\n if value is None:\n return default_value\n if not isinstance(value, bool):\n raise ValidationException(f'Parameter {name} must be a boolean')\n return value\n<|end_body_2|>\n", "revision_id": "b410e4c6bc4b11fc6ed85c91aca43e07fcd5fd2c", "skeleton": "<|skeleton|>\nclass OptionsValidator:\n \"\"\"Class for validating API options.\"\"\"\n\n def validate_number(self, value: int or float or None, default_value: int or float, name: str):\n \"\"\"Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_0|>\n\n def validate_non_zero(self, value: int or float or None, default_value: int or float, name: str):\n \"\"\"Validates a number parameter to be above zero. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_1|>\n\n def validate_boolean(self, value: bool or None, default_value: bool, name: str):\n \"\"\"Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OptionsValidator:\n \"\"\"Class for validating API options.\"\"\"\n\n def validate_number(self, value: int or float or None, default_value: int or float, name: str):\n \"\"\"Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n if value is None:\n return default_value\n if not isinstance(value, int) and (not isinstance(value, float)):\n raise ValidationException(f'Parameter {name} must be a number')\n if value < 0:\n raise ValidationException(f'Parameter {name} cannot be lower than 0')\n return value\n\n def validate_non_zero(self, value: int or float or None, default_value: int or float, name: str):\n \"\"\"Validates a number parameter to be above zero. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n value = self.validate_number(value, default_value, name)\n if value == 0:\n raise ValidationException(f'Parameter {name} must be bigger than 0')\n return value\n\n def validate_boolean(self, value: bool or None, default_value: bool, name: str):\n \"\"\"Validates a number parameter. Args: value: Value to validate. default_value: Default value for an option. name: Option name. Returns: Validated value. Raises: ValidationException: If value is invalid.\"\"\"\n if value is None:\n return default_value\n if not isinstance(value, bool):\n raise ValidationException(f'Parameter {name} must be a boolean')\n return value\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/clients/optionsValidator.py", "source_repo": "alading241/metaapi-python-sdk", "split": "val", "star_events_count": 0}
{"blob_id": "e65fae160ea63fb8b362ccd5704e5203c4465e1f", "bodies": ["req_parser = RequestParser()\nreq_parser.add_argument('image', type=parser.image_file, required=True, location='files')\nfile = req_parser.parse_args()\nuser_id = g.user_id\ntry:\n image_key = upload_image(file['image'].read())\nexcept Exception as e:\n current_app.logger.error('upload failed {}'.format(e))\n return ({'message': 'Uploading profile photo image failed.'}, 507)\nquery = insert(Material).values(user_id=user_id, type=Material.TYPE.IMAGE, hash=image_key, url=image_key, status=Material.STATUS.APPROVED).on_duplicate_key_update(status=Material.STATUS.APPROVED)\ndb.session.execute(query)\ndb.session.commit()\nmaterial = Material.query.options(load_only(Material.id, Material.url)).filter_by(user_id=user_id, hash=image_key).first()\nreturn ({'id': material.id, 'url': current_app.config['QINIU_DOMAIN'] + material.url}, 201)", "req_parser = RequestParser()\nreq_parser.add_argument('collect', type=inputs.boolean, required=False, location='args')\nreq_parser.add_argument('page', type=inputs.positive, required=False, location='args')\nreq_parser.add_argument('per_page', type=inputs.int_range(1, constants.DEFAULT_IMAGE_PER_PAGE_MAX, 'per_page'), required=False, location='args')\nargs = req_parser.parse_args()\ncollect = args['collect']\npage = 1 if args['page'] is None else args['page']\nper_page = args.per_page if args.per_page else constants.DEFAULT_IMAGE_PER_PAGE\nresp = {'total_count': 0, 'page': page, 'per_page': per_page, 'results': []}\ntotal_query = db.session.query(func.count(Material.id)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\nif collect:\n total_query = total_query.filter_by(is_collected=True)\nret = total_query.first()\ntotal_count = ret[0]\nif total_count == 0 or page > math.ceil(total_count / per_page):\n return resp\nquery = Material.query.options(load_only(Material.id, Material.url, Material.is_collected)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\nif collect:\n query = query.filter_by(is_collected=True)\nmaterials = query.order_by(Material.is_collected.desc(), Material.ctime.desc()).offset((page - 1) * per_page).limit(per_page).all()\nresults = []\nfor material in materials:\n results.append(dict(id=material.id, url=current_app.config['QINIU_DOMAIN'] + material.url, is_collected=material.is_collected))\nresp['total_count'] = total_count\nresp['results'] = results\nreturn resp"], "bodies_text": "<|body_start_0|>\n req_parser = RequestParser()\n req_parser.add_argument('image', type=parser.image_file, required=True, location='files')\n file = req_parser.parse_args()\n user_id = g.user_id\n try:\n image_key = upload_image(file['image'].read())\n except Exception as e:\n current_app.logger.error('upload failed {}'.format(e))\n return ({'message': 'Uploading profile photo image failed.'}, 507)\n query = insert(Material).values(user_id=user_id, type=Material.TYPE.IMAGE, hash=image_key, url=image_key, status=Material.STATUS.APPROVED).on_duplicate_key_update(status=Material.STATUS.APPROVED)\n db.session.execute(query)\n db.session.commit()\n material = Material.query.options(load_only(Material.id, Material.url)).filter_by(user_id=user_id, hash=image_key).first()\n return ({'id': material.id, 'url': current_app.config['QINIU_DOMAIN'] + material.url}, 201)\n<|end_body_0|>\n\n<|body_start_1|>\n req_parser = RequestParser()\n req_parser.add_argument('collect', type=inputs.boolean, required=False, location='args')\n req_parser.add_argument('page', type=inputs.positive, required=False, location='args')\n req_parser.add_argument('per_page', type=inputs.int_range(1, constants.DEFAULT_IMAGE_PER_PAGE_MAX, 'per_page'), required=False, location='args')\n args = req_parser.parse_args()\n collect = args['collect']\n page = 1 if args['page'] is None else args['page']\n per_page = args.per_page if args.per_page else constants.DEFAULT_IMAGE_PER_PAGE\n resp = {'total_count': 0, 'page': page, 'per_page': per_page, 'results': []}\n total_query = db.session.query(func.count(Material.id)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\n if collect:\n total_query = total_query.filter_by(is_collected=True)\n ret = total_query.first()\n total_count = ret[0]\n if total_count == 0 or page > math.ceil(total_count / per_page):\n return resp\n query = Material.query.options(load_only(Material.id, Material.url, Material.is_collected)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\n if collect:\n query = query.filter_by(is_collected=True)\n materials = query.order_by(Material.is_collected.desc(), Material.ctime.desc()).offset((page - 1) * per_page).limit(per_page).all()\n results = []\n for material in materials:\n results.append(dict(id=material.id, url=current_app.config['QINIU_DOMAIN'] + material.url, is_collected=material.is_collected))\n resp['total_count'] = total_count\n resp['results'] = results\n return resp\n<|end_body_1|>\n", "class_docstring": "图片资源", "class_name": "ImageListResource", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ImageListResource:\n \"\"\"图片资源\"\"\"\n\n def post(self):\n \"\"\"上传图片文件\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"查询图片\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n req_parser = RequestParser()\n req_parser.add_argument('image', type=parser.image_file, required=True, location='files')\n file = req_parser.parse_args()\n user_id = g.user_id\n try:\n image_key = upload_image(file['image'].read())\n except Exception as e:\n current_app.logger.error('upload failed {}'.format(e))\n return ({'message': 'Uploading profile photo image failed.'}, 507)\n query = insert(Material).values(user_id=user_id, type=Material.TYPE.IMAGE, hash=image_key, url=image_key, status=Material.STATUS.APPROVED).on_duplicate_key_update(status=Material.STATUS.APPROVED)\n db.session.execute(query)\n db.session.commit()\n material = Material.query.options(load_only(Material.id, Material.url)).filter_by(user_id=user_id, hash=image_key).first()\n return ({'id': material.id, 'url': current_app.config['QINIU_DOMAIN'] + material.url}, 201)\n<|end_body_0|>\n\n<|body_start_1|>\n req_parser = RequestParser()\n req_parser.add_argument('collect', type=inputs.boolean, required=False, location='args')\n req_parser.add_argument('page', type=inputs.positive, required=False, location='args')\n req_parser.add_argument('per_page', type=inputs.int_range(1, constants.DEFAULT_IMAGE_PER_PAGE_MAX, 'per_page'), required=False, location='args')\n args = req_parser.parse_args()\n collect = args['collect']\n page = 1 if args['page'] is None else args['page']\n per_page = args.per_page if args.per_page else constants.DEFAULT_IMAGE_PER_PAGE\n resp = {'total_count': 0, 'page': page, 'per_page': per_page, 'results': []}\n total_query = db.session.query(func.count(Material.id)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\n if collect:\n total_query = total_query.filter_by(is_collected=True)\n ret = total_query.first()\n total_count = ret[0]\n if total_count == 0 or page > math.ceil(total_count / per_page):\n return resp\n query = Material.query.options(load_only(Material.id, Material.url, Material.is_collected)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\n if collect:\n query = query.filter_by(is_collected=True)\n materials = query.order_by(Material.is_collected.desc(), Material.ctime.desc()).offset((page - 1) * per_page).limit(per_page).all()\n results = []\n for material in materials:\n results.append(dict(id=material.id, url=current_app.config['QINIU_DOMAIN'] + material.url, is_collected=material.is_collected))\n resp['total_count'] = total_count\n resp['results'] = results\n return resp\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000464", "length_bytes": 4926, "license_type": "no_license", "methods": [{"docstring": "上传图片文件", "name": "post", "signature": "def post(self)"}, {"docstring": "查询图片", "name": "get", "signature": "def get(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028043", "prompt": "Implement the Python class `ImageListResource` described below.\n\nClass description:\n图片资源\n\nMethod signatures and docstrings:\n- def post(self): 上传图片文件\n- def get(self): 查询图片", "prompted_full_text": "Implement the Python class `ImageListResource` described below.\n\nClass description:\n图片资源\n\nMethod signatures and docstrings:\n- def post(self): 上传图片文件\n- def get(self): 查询图片\n\n<|skeleton|>\nclass ImageListResource:\n \"\"\"图片资源\"\"\"\n\n def post(self):\n \"\"\"上传图片文件\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"查询图片\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n req_parser = RequestParser()\n req_parser.add_argument('image', type=parser.image_file, required=True, location='files')\n file = req_parser.parse_args()\n user_id = g.user_id\n try:\n image_key = upload_image(file['image'].read())\n except Exception as e:\n current_app.logger.error('upload failed {}'.format(e))\n return ({'message': 'Uploading profile photo image failed.'}, 507)\n query = insert(Material).values(user_id=user_id, type=Material.TYPE.IMAGE, hash=image_key, url=image_key, status=Material.STATUS.APPROVED).on_duplicate_key_update(status=Material.STATUS.APPROVED)\n db.session.execute(query)\n db.session.commit()\n material = Material.query.options(load_only(Material.id, Material.url)).filter_by(user_id=user_id, hash=image_key).first()\n return ({'id': material.id, 'url': current_app.config['QINIU_DOMAIN'] + material.url}, 201)\n<|end_body_0|>\n\n<|body_start_1|>\n req_parser = RequestParser()\n req_parser.add_argument('collect', type=inputs.boolean, required=False, location='args')\n req_parser.add_argument('page', type=inputs.positive, required=False, location='args')\n req_parser.add_argument('per_page', type=inputs.int_range(1, constants.DEFAULT_IMAGE_PER_PAGE_MAX, 'per_page'), required=False, location='args')\n args = req_parser.parse_args()\n collect = args['collect']\n page = 1 if args['page'] is None else args['page']\n per_page = args.per_page if args.per_page else constants.DEFAULT_IMAGE_PER_PAGE\n resp = {'total_count': 0, 'page': page, 'per_page': per_page, 'results': []}\n total_query = db.session.query(func.count(Material.id)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\n if collect:\n total_query = total_query.filter_by(is_collected=True)\n ret = total_query.first()\n total_count = ret[0]\n if total_count == 0 or page > math.ceil(total_count / per_page):\n return resp\n query = Material.query.options(load_only(Material.id, Material.url, Material.is_collected)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\n if collect:\n query = query.filter_by(is_collected=True)\n materials = query.order_by(Material.is_collected.desc(), Material.ctime.desc()).offset((page - 1) * per_page).limit(per_page).all()\n results = []\n for material in materials:\n results.append(dict(id=material.id, url=current_app.config['QINIU_DOMAIN'] + material.url, is_collected=material.is_collected))\n resp['total_count'] = total_count\n resp['results'] = results\n return resp\n<|end_body_1|>\n", "revision_id": "c9703a9c57a98babf8d1e41b227aada9ef4bfe15", "skeleton": "<|skeleton|>\nclass ImageListResource:\n \"\"\"图片资源\"\"\"\n\n def post(self):\n \"\"\"上传图片文件\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"查询图片\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ImageListResource:\n \"\"\"图片资源\"\"\"\n\n def post(self):\n \"\"\"上传图片文件\"\"\"\n req_parser = RequestParser()\n req_parser.add_argument('image', type=parser.image_file, required=True, location='files')\n file = req_parser.parse_args()\n user_id = g.user_id\n try:\n image_key = upload_image(file['image'].read())\n except Exception as e:\n current_app.logger.error('upload failed {}'.format(e))\n return ({'message': 'Uploading profile photo image failed.'}, 507)\n query = insert(Material).values(user_id=user_id, type=Material.TYPE.IMAGE, hash=image_key, url=image_key, status=Material.STATUS.APPROVED).on_duplicate_key_update(status=Material.STATUS.APPROVED)\n db.session.execute(query)\n db.session.commit()\n material = Material.query.options(load_only(Material.id, Material.url)).filter_by(user_id=user_id, hash=image_key).first()\n return ({'id': material.id, 'url': current_app.config['QINIU_DOMAIN'] + material.url}, 201)\n\n def get(self):\n \"\"\"查询图片\"\"\"\n req_parser = RequestParser()\n req_parser.add_argument('collect', type=inputs.boolean, required=False, location='args')\n req_parser.add_argument('page', type=inputs.positive, required=False, location='args')\n req_parser.add_argument('per_page', type=inputs.int_range(1, constants.DEFAULT_IMAGE_PER_PAGE_MAX, 'per_page'), required=False, location='args')\n args = req_parser.parse_args()\n collect = args['collect']\n page = 1 if args['page'] is None else args['page']\n per_page = args.per_page if args.per_page else constants.DEFAULT_IMAGE_PER_PAGE\n resp = {'total_count': 0, 'page': page, 'per_page': per_page, 'results': []}\n total_query = db.session.query(func.count(Material.id)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\n if collect:\n total_query = total_query.filter_by(is_collected=True)\n ret = total_query.first()\n total_count = ret[0]\n if total_count == 0 or page > math.ceil(total_count / per_page):\n return resp\n query = Material.query.options(load_only(Material.id, Material.url, Material.is_collected)).filter(Material.user_id == g.user_id, Material.status != Material.STATUS.DELETED)\n if collect:\n query = query.filter_by(is_collected=True)\n materials = query.order_by(Material.is_collected.desc(), Material.ctime.desc()).offset((page - 1) * per_page).limit(per_page).all()\n results = []\n for material in materials:\n results.append(dict(id=material.id, url=current_app.config['QINIU_DOMAIN'] + material.url, is_collected=material.is_collected))\n resp['total_count'] = total_count\n resp['results'] = results\n return resp\n", "source": "the_stack_v2_python_sparse", "source_path": "mp/resources/news/material.py", "source_repo": "Yaooooooooooooo/toutiao-backend", "split": "val", "star_events_count": 0}
{"blob_id": "eb640963f9ed25f83d8df1698d6f0f5713419449", "bodies": ["if 'modifier' in kwargs:\n self.modifier = kwargs['modifier']\nelif len(args) > 2:\n self.modifier = args[2]\n args = args[:2]\nelse:\n self.modifier = lambda x: x\nif not six.callable(self.modifier):\n raise TypeError('itermod(o, modifier): modifier must be callable')\nsuper(itermod, self).__init__(*args)", "if not n:\n n = 1\ntry:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable)))\nexcept StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)"], "bodies_text": "<|body_start_0|>\n if 'modifier' in kwargs:\n self.modifier = kwargs['modifier']\n elif len(args) > 2:\n self.modifier = args[2]\n args = args[:2]\n else:\n self.modifier = lambda x: x\n if not six.callable(self.modifier):\n raise TypeError('itermod(o, modifier): modifier must be callable')\n super(itermod, self).__init__(*args)\n<|end_body_0|>\n\n<|body_start_1|>\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable)))\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)\n<|end_body_1|>\n", "class_docstring": "An iterator object that supports modifying items as they are returned. >>> a = [\" A list \", ... \" of strings \", ... \" with \", ... \" extra \", ... \" whitespace. \"] >>> modifier = lambda s: s.strip().replace('with', 'without') >>> for s in itermod(a, modifier=modifier): ... print('\"%s\"' % s) \"A list\" \"of strings\" \"without\" \"extra\" \"whitespace.\" Args: o (iterable or callable): `o` is interpreted very differently depending on the presence of `sentinel`. If `sentinel` is not given, then `o` must be a collection object which supports either the iteration protocol or the sequence protocol. If `sentinel` is given, then `o` must be a callable object. sentinel (any value, optional): If given, the itera", "class_name": "itermod", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass itermod:\n \"\"\"An iterator object that supports modifying items as they are returned. >>> a = [\" A list \", ... \" of strings \", ... \" with \", ... \" extra \", ... \" whitespace. \"] >>> modifier = lambda s: s.strip().replace('with', 'without') >>> for s in itermod(a, modifier=modifier): ... print('\"%s\"' % s) \"A list\" \"of strings\" \"without\" \"extra\" \"whitespace.\" Args: o (iterable or callable): `o` is interpreted very differently depending on the presence of `sentinel`. If `sentinel` is not given, then `o` must be a collection object which supports either the iteration protocol or the sequence protocol. If `sentinel` is given, then `o` must be a callable object. sentinel (any value, optional): If given, the itera\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"__init__(o, sentinel=None, modifier=lambda x: x)\"\"\"\n <|body_0|>\n\n def _fillcache(self, n):\n \"\"\"Cache `n` modified items. If `n` is 0 or None, 1 item is cached. Each item returned by the iterator is passed through the `itermod.modified` function before being cached.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'modifier' in kwargs:\n self.modifier = kwargs['modifier']\n elif len(args) > 2:\n self.modifier = args[2]\n args = args[:2]\n else:\n self.modifier = lambda x: x\n if not six.callable(self.modifier):\n raise TypeError('itermod(o, modifier): modifier must be callable')\n super(itermod, self).__init__(*args)\n<|end_body_0|>\n\n<|body_start_1|>\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable)))\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000465", "length_bytes": 8468, "license_type": "permissive", "methods": [{"docstring": "__init__(o, sentinel=None, modifier=lambda x: x)", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Cache `n` modified items. If `n` is 0 or None, 1 item is cached. Each item returned by the iterator is passed through the `itermod.modified` function before being cached.", "name": "_fillcache", "signature": "def _fillcache(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020028", "prompt": "Implement the Python class `itermod` described below.\n\nClass description:\nAn iterator object that supports modifying items as they are returned. >>> a = [\" A list \", ... \" of strings \", ... \" with \", ... \" extra \", ... \" whitespace. \"] >>> modifier = lambda s: s.strip().replace('with', 'without') >>> for s in itermod(a, modifier=modifier): ... print('\"%s\"' % s) \"A list\" \"of strings\" \"without\" \"extra\" \"whitespace.\" Args: o (iterable or callable): `o` is interpreted very differently depending on the presence of `sentinel`. If `sentinel` is not given, then `o` must be a collection object which supports either the iteration protocol or the sequence protocol. If `sentinel` is given, then `o` must be a callable object. sentinel (any value, optional): If given, the itera\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): __init__(o, sentinel=None, modifier=lambda x: x)\n- def _fillcache(self, n): Cache `n` modified items. If `n` is 0 or None, 1 item is cached. Each item returned by the iterator is passed through the `itermod.modified` function before being cached.", "prompted_full_text": "Implement the Python class `itermod` described below.\n\nClass description:\nAn iterator object that supports modifying items as they are returned. >>> a = [\" A list \", ... \" of strings \", ... \" with \", ... \" extra \", ... \" whitespace. \"] >>> modifier = lambda s: s.strip().replace('with', 'without') >>> for s in itermod(a, modifier=modifier): ... print('\"%s\"' % s) \"A list\" \"of strings\" \"without\" \"extra\" \"whitespace.\" Args: o (iterable or callable): `o` is interpreted very differently depending on the presence of `sentinel`. If `sentinel` is not given, then `o` must be a collection object which supports either the iteration protocol or the sequence protocol. If `sentinel` is given, then `o` must be a callable object. sentinel (any value, optional): If given, the itera\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): __init__(o, sentinel=None, modifier=lambda x: x)\n- def _fillcache(self, n): Cache `n` modified items. If `n` is 0 or None, 1 item is cached. Each item returned by the iterator is passed through the `itermod.modified` function before being cached.\n\n<|skeleton|>\nclass itermod:\n \"\"\"An iterator object that supports modifying items as they are returned. >>> a = [\" A list \", ... \" of strings \", ... \" with \", ... \" extra \", ... \" whitespace. \"] >>> modifier = lambda s: s.strip().replace('with', 'without') >>> for s in itermod(a, modifier=modifier): ... print('\"%s\"' % s) \"A list\" \"of strings\" \"without\" \"extra\" \"whitespace.\" Args: o (iterable or callable): `o` is interpreted very differently depending on the presence of `sentinel`. If `sentinel` is not given, then `o` must be a collection object which supports either the iteration protocol or the sequence protocol. If `sentinel` is given, then `o` must be a callable object. sentinel (any value, optional): If given, the itera\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"__init__(o, sentinel=None, modifier=lambda x: x)\"\"\"\n <|body_0|>\n\n def _fillcache(self, n):\n \"\"\"Cache `n` modified items. If `n` is 0 or None, 1 item is cached. Each item returned by the iterator is passed through the `itermod.modified` function before being cached.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'modifier' in kwargs:\n self.modifier = kwargs['modifier']\n elif len(args) > 2:\n self.modifier = args[2]\n args = args[:2]\n else:\n self.modifier = lambda x: x\n if not six.callable(self.modifier):\n raise TypeError('itermod(o, modifier): modifier must be callable')\n super(itermod, self).__init__(*args)\n<|end_body_0|>\n\n<|body_start_1|>\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable)))\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)\n<|end_body_1|>\n", "revision_id": "05dbd4575d01a213f3f4d69aa4968473f2536142", "skeleton": "<|skeleton|>\nclass itermod:\n \"\"\"An iterator object that supports modifying items as they are returned. >>> a = [\" A list \", ... \" of strings \", ... \" with \", ... \" extra \", ... \" whitespace. \"] >>> modifier = lambda s: s.strip().replace('with', 'without') >>> for s in itermod(a, modifier=modifier): ... print('\"%s\"' % s) \"A list\" \"of strings\" \"without\" \"extra\" \"whitespace.\" Args: o (iterable or callable): `o` is interpreted very differently depending on the presence of `sentinel`. If `sentinel` is not given, then `o` must be a collection object which supports either the iteration protocol or the sequence protocol. If `sentinel` is given, then `o` must be a callable object. sentinel (any value, optional): If given, the itera\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"__init__(o, sentinel=None, modifier=lambda x: x)\"\"\"\n <|body_0|>\n\n def _fillcache(self, n):\n \"\"\"Cache `n` modified items. If `n` is 0 or None, 1 item is cached. Each item returned by the iterator is passed through the `itermod.modified` function before being cached.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class itermod:\n \"\"\"An iterator object that supports modifying items as they are returned. >>> a = [\" A list \", ... \" of strings \", ... \" with \", ... \" extra \", ... \" whitespace. \"] >>> modifier = lambda s: s.strip().replace('with', 'without') >>> for s in itermod(a, modifier=modifier): ... print('\"%s\"' % s) \"A list\" \"of strings\" \"without\" \"extra\" \"whitespace.\" Args: o (iterable or callable): `o` is interpreted very differently depending on the presence of `sentinel`. If `sentinel` is not given, then `o` must be a collection object which supports either the iteration protocol or the sequence protocol. If `sentinel` is given, then `o` must be a callable object. sentinel (any value, optional): If given, the itera\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"__init__(o, sentinel=None, modifier=lambda x: x)\"\"\"\n if 'modifier' in kwargs:\n self.modifier = kwargs['modifier']\n elif len(args) > 2:\n self.modifier = args[2]\n args = args[:2]\n else:\n self.modifier = lambda x: x\n if not six.callable(self.modifier):\n raise TypeError('itermod(o, modifier): modifier must be callable')\n super(itermod, self).__init__(*args)\n\n def _fillcache(self, n):\n \"\"\"Cache `n` modified items. If `n` is 0 or None, 1 item is cached. Each item returned by the iterator is passed through the `itermod.modified` function before being cached.\"\"\"\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable)))\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)\n", "source": "the_stack_v2_python_sparse", "source_path": "python/helpers/pockets/iterators.py", "source_repo": "JetBrains/intellij-community", "split": "val", "star_events_count": 16288}
{"blob_id": "3dbe257b5126749aad80941db64961baf9fcfc29", "bodies": ["user = instance\nuser_id = user.id\nnew_groups = validated_data.pop('groups', None)\nif new_groups is not None:\n UserInGroups = User.groups.through\n group_qs = UserInGroups.objects.filter(user=user)\n group_qs.exclude(group_id__in=(gr.id for gr in new_groups)).delete()\n for gr in new_groups:\n UserInGroups.objects.update_or_create(user=user, group=gr)\nprofile_data = validated_data.pop('profile', None)\nif profile_data is not None:\n profile = Profile.objects.get(id=user_id)\n new_casestudies = profile_data.pop('casestudies', None)\n if new_casestudies is not None:\n casestudy_qs = UserInCasestudy.objects.filter(user=user_id)\n casestudy_qs.exclude(id__in=(cs.id for cs in new_casestudies)).delete()\n for cs in new_casestudies:\n UserInCasestudy.objects.update_or_create(user=profile, casestudy=cs)\n for attr, value in profile_data.items():\n setattr(profile, attr, value)\n profile.save()\nfor attr, value in validated_data.items():\n setattr(instance, attr, value)\nif 'password' in validated_data:\n instance.set_password(validated_data['password'])\ninstance.save()\nreturn instance", "username = validated_data.pop('username')\nemail = validated_data.pop('email')\npassword = validated_data.pop('password')\nuser = User.objects.create_user(username, email, password)\nself.update(instance=user, validated_data=validated_data)\nreturn user"], "bodies_text": "<|body_start_0|>\n user = instance\n user_id = user.id\n new_groups = validated_data.pop('groups', None)\n if new_groups is not None:\n UserInGroups = User.groups.through\n group_qs = UserInGroups.objects.filter(user=user)\n group_qs.exclude(group_id__in=(gr.id for gr in new_groups)).delete()\n for gr in new_groups:\n UserInGroups.objects.update_or_create(user=user, group=gr)\n profile_data = validated_data.pop('profile', None)\n if profile_data is not None:\n profile = Profile.objects.get(id=user_id)\n new_casestudies = profile_data.pop('casestudies', None)\n if new_casestudies is not None:\n casestudy_qs = UserInCasestudy.objects.filter(user=user_id)\n casestudy_qs.exclude(id__in=(cs.id for cs in new_casestudies)).delete()\n for cs in new_casestudies:\n UserInCasestudy.objects.update_or_create(user=profile, casestudy=cs)\n for attr, value in profile_data.items():\n setattr(profile, attr, value)\n profile.save()\n for attr, value in validated_data.items():\n setattr(instance, attr, value)\n if 'password' in validated_data:\n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n<|end_body_0|>\n\n<|body_start_1|>\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(username, email, password)\n self.update(instance=user, validated_data=validated_data)\n return user\n<|end_body_1|>\n", "class_docstring": "Serializer for put and post requests", "class_name": "UserSerializer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserSerializer:\n \"\"\"Serializer for put and post requests\"\"\"\n\n def update(self, instance, validated_data):\n \"\"\"update the user-attributes, including profile information\"\"\"\n <|body_0|>\n\n def create(self, validated_data):\n \"\"\"Create a new user and its profile\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = instance\n user_id = user.id\n new_groups = validated_data.pop('groups', None)\n if new_groups is not None:\n UserInGroups = User.groups.through\n group_qs = UserInGroups.objects.filter(user=user)\n group_qs.exclude(group_id__in=(gr.id for gr in new_groups)).delete()\n for gr in new_groups:\n UserInGroups.objects.update_or_create(user=user, group=gr)\n profile_data = validated_data.pop('profile', None)\n if profile_data is not None:\n profile = Profile.objects.get(id=user_id)\n new_casestudies = profile_data.pop('casestudies', None)\n if new_casestudies is not None:\n casestudy_qs = UserInCasestudy.objects.filter(user=user_id)\n casestudy_qs.exclude(id__in=(cs.id for cs in new_casestudies)).delete()\n for cs in new_casestudies:\n UserInCasestudy.objects.update_or_create(user=profile, casestudy=cs)\n for attr, value in profile_data.items():\n setattr(profile, attr, value)\n profile.save()\n for attr, value in validated_data.items():\n setattr(instance, attr, value)\n if 'password' in validated_data:\n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n<|end_body_0|>\n\n<|body_start_1|>\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(username, email, password)\n self.update(instance=user, validated_data=validated_data)\n return user\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000466", "length_bytes": 7759, "license_type": "no_license", "methods": [{"docstring": "update the user-attributes, including profile information", "name": "update", "signature": "def update(self, instance, validated_data)"}, {"docstring": "Create a new user and its profile", "name": "create", "signature": "def create(self, validated_data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_050205", "prompt": "Implement the Python class `UserSerializer` described below.\n\nClass description:\nSerializer for put and post requests\n\nMethod signatures and docstrings:\n- def update(self, instance, validated_data): update the user-attributes, including profile information\n- def create(self, validated_data): Create a new user and its profile", "prompted_full_text": "Implement the Python class `UserSerializer` described below.\n\nClass description:\nSerializer for put and post requests\n\nMethod signatures and docstrings:\n- def update(self, instance, validated_data): update the user-attributes, including profile information\n- def create(self, validated_data): Create a new user and its profile\n\n<|skeleton|>\nclass UserSerializer:\n \"\"\"Serializer for put and post requests\"\"\"\n\n def update(self, instance, validated_data):\n \"\"\"update the user-attributes, including profile information\"\"\"\n <|body_0|>\n\n def create(self, validated_data):\n \"\"\"Create a new user and its profile\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = instance\n user_id = user.id\n new_groups = validated_data.pop('groups', None)\n if new_groups is not None:\n UserInGroups = User.groups.through\n group_qs = UserInGroups.objects.filter(user=user)\n group_qs.exclude(group_id__in=(gr.id for gr in new_groups)).delete()\n for gr in new_groups:\n UserInGroups.objects.update_or_create(user=user, group=gr)\n profile_data = validated_data.pop('profile', None)\n if profile_data is not None:\n profile = Profile.objects.get(id=user_id)\n new_casestudies = profile_data.pop('casestudies', None)\n if new_casestudies is not None:\n casestudy_qs = UserInCasestudy.objects.filter(user=user_id)\n casestudy_qs.exclude(id__in=(cs.id for cs in new_casestudies)).delete()\n for cs in new_casestudies:\n UserInCasestudy.objects.update_or_create(user=profile, casestudy=cs)\n for attr, value in profile_data.items():\n setattr(profile, attr, value)\n profile.save()\n for attr, value in validated_data.items():\n setattr(instance, attr, value)\n if 'password' in validated_data:\n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n<|end_body_0|>\n\n<|body_start_1|>\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(username, email, password)\n self.update(instance=user, validated_data=validated_data)\n return user\n<|end_body_1|>\n", "revision_id": "a5ba34f085f0d5af5ea3ded24706ea54ab39e7cb", "skeleton": "<|skeleton|>\nclass UserSerializer:\n \"\"\"Serializer for put and post requests\"\"\"\n\n def update(self, instance, validated_data):\n \"\"\"update the user-attributes, including profile information\"\"\"\n <|body_0|>\n\n def create(self, validated_data):\n \"\"\"Create a new user and its profile\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UserSerializer:\n \"\"\"Serializer for put and post requests\"\"\"\n\n def update(self, instance, validated_data):\n \"\"\"update the user-attributes, including profile information\"\"\"\n user = instance\n user_id = user.id\n new_groups = validated_data.pop('groups', None)\n if new_groups is not None:\n UserInGroups = User.groups.through\n group_qs = UserInGroups.objects.filter(user=user)\n group_qs.exclude(group_id__in=(gr.id for gr in new_groups)).delete()\n for gr in new_groups:\n UserInGroups.objects.update_or_create(user=user, group=gr)\n profile_data = validated_data.pop('profile', None)\n if profile_data is not None:\n profile = Profile.objects.get(id=user_id)\n new_casestudies = profile_data.pop('casestudies', None)\n if new_casestudies is not None:\n casestudy_qs = UserInCasestudy.objects.filter(user=user_id)\n casestudy_qs.exclude(id__in=(cs.id for cs in new_casestudies)).delete()\n for cs in new_casestudies:\n UserInCasestudy.objects.update_or_create(user=profile, casestudy=cs)\n for attr, value in profile_data.items():\n setattr(profile, attr, value)\n profile.save()\n for attr, value in validated_data.items():\n setattr(instance, attr, value)\n if 'password' in validated_data:\n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n\n def create(self, validated_data):\n \"\"\"Create a new user and its profile\"\"\"\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(username, email, password)\n self.update(instance=user, validated_data=validated_data)\n return user\n", "source": "the_stack_v2_python_sparse", "source_path": "repair/apps/login/serializers/users.py", "source_repo": "MaxBo/REPAiR-Web", "split": "val", "star_events_count": 9}
{"blob_id": "60ec0c5870def3695556dcc29fd5d3026eacb8a5", "bodies": ["if nums == []:\n return []\nquadruplets = list()\nnums = sorted(nums)\nfor i in range(len(nums) - 3):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in range(i + 1, len(nums) - 2):\n hash_ = []\n for k in range(j + 1, len(nums)):\n complement = target - (nums[i] + nums[j] + nums[k])\n if complement in hash_:\n if [nums[i], nums[j], nums[k], complement] not in quadruplets:\n quadruplets.append([nums[i], nums[j], nums[k], complement])\n hash_.append(nums[k])\nreturn quadruplets", "def inner_sort(arr):\n return sorted([sorted(x) for x in arr])\nassert inner_sort(self.fourSum([1, 0, -1, 0, -2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]])\nassert self.fourSum([], 0) == []\nassert inner_sort(self.fourSum([-2, -1, -1, 1, 1, 2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-1, -1, 1, 1]])\nassert self.fourSum([0, 0, 0, 0], 0) == [[0, 0, 0, 0]]\nassert inner_sort(self.fourSum([-5, 5, 4, -3, 0, 0, 4, -2], 4)) == inner_sort([[-5, 0, 4, 5], [-3, -2, 4, 5]])\nprint('All test cases ran successfully')"], "bodies_text": "<|body_start_0|>\n if nums == []:\n return []\n quadruplets = list()\n nums = sorted(nums)\n for i in range(len(nums) - 3):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in range(i + 1, len(nums) - 2):\n hash_ = []\n for k in range(j + 1, len(nums)):\n complement = target - (nums[i] + nums[j] + nums[k])\n if complement in hash_:\n if [nums[i], nums[j], nums[k], complement] not in quadruplets:\n quadruplets.append([nums[i], nums[j], nums[k], complement])\n hash_.append(nums[k])\n return quadruplets\n<|end_body_0|>\n\n<|body_start_1|>\n def inner_sort(arr):\n return sorted([sorted(x) for x in arr])\n assert inner_sort(self.fourSum([1, 0, -1, 0, -2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]])\n assert self.fourSum([], 0) == []\n assert inner_sort(self.fourSum([-2, -1, -1, 1, 1, 2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-1, -1, 1, 1]])\n assert self.fourSum([0, 0, 0, 0], 0) == [[0, 0, 0, 0]]\n assert inner_sort(self.fourSum([-5, 5, 4, -3, 0, 0, 4, -2], 4)) == inner_sort([[-5, 0, 4, 5], [-3, -2, 4, 5]])\n print('All test cases ran successfully')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n \"\"\":param nums: A list of integers :param target: The target to sum up to\"\"\"\n <|body_0|>\n\n def test_fourSum(self):\n \"\"\"Method to test the code with a few sample cases\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if nums == []:\n return []\n quadruplets = list()\n nums = sorted(nums)\n for i in range(len(nums) - 3):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in range(i + 1, len(nums) - 2):\n hash_ = []\n for k in range(j + 1, len(nums)):\n complement = target - (nums[i] + nums[j] + nums[k])\n if complement in hash_:\n if [nums[i], nums[j], nums[k], complement] not in quadruplets:\n quadruplets.append([nums[i], nums[j], nums[k], complement])\n hash_.append(nums[k])\n return quadruplets\n<|end_body_0|>\n\n<|body_start_1|>\n def inner_sort(arr):\n return sorted([sorted(x) for x in arr])\n assert inner_sort(self.fourSum([1, 0, -1, 0, -2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]])\n assert self.fourSum([], 0) == []\n assert inner_sort(self.fourSum([-2, -1, -1, 1, 1, 2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-1, -1, 1, 1]])\n assert self.fourSum([0, 0, 0, 0], 0) == [[0, 0, 0, 0]]\n assert inner_sort(self.fourSum([-5, 5, 4, -3, 0, 0, 4, -2], 4)) == inner_sort([[-5, 0, 4, 5], [-3, -2, 4, 5]])\n print('All test cases ran successfully')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000467", "length_bytes": 1858, "license_type": "no_license", "methods": [{"docstring": ":param nums: A list of integers :param target: The target to sum up to", "name": "fourSum", "signature": "def fourSum(self, nums: List[int], target: int) -> List[List[int]]"}, {"docstring": "Method to test the code with a few sample cases", "name": "test_fourSum", "signature": "def test_fourSum(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030311", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def fourSum(self, nums: List[int], target: int) -> List[List[int]]: :param nums: A list of integers :param target: The target to sum up to\n- def test_fourSum(self): Method to test the code with a few sample cases", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def fourSum(self, nums: List[int], target: int) -> List[List[int]]: :param nums: A list of integers :param target: The target to sum up to\n- def test_fourSum(self): Method to test the code with a few sample cases\n\n<|skeleton|>\nclass Solution:\n\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n \"\"\":param nums: A list of integers :param target: The target to sum up to\"\"\"\n <|body_0|>\n\n def test_fourSum(self):\n \"\"\"Method to test the code with a few sample cases\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if nums == []:\n return []\n quadruplets = list()\n nums = sorted(nums)\n for i in range(len(nums) - 3):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in range(i + 1, len(nums) - 2):\n hash_ = []\n for k in range(j + 1, len(nums)):\n complement = target - (nums[i] + nums[j] + nums[k])\n if complement in hash_:\n if [nums[i], nums[j], nums[k], complement] not in quadruplets:\n quadruplets.append([nums[i], nums[j], nums[k], complement])\n hash_.append(nums[k])\n return quadruplets\n<|end_body_0|>\n\n<|body_start_1|>\n def inner_sort(arr):\n return sorted([sorted(x) for x in arr])\n assert inner_sort(self.fourSum([1, 0, -1, 0, -2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]])\n assert self.fourSum([], 0) == []\n assert inner_sort(self.fourSum([-2, -1, -1, 1, 1, 2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-1, -1, 1, 1]])\n assert self.fourSum([0, 0, 0, 0], 0) == [[0, 0, 0, 0]]\n assert inner_sort(self.fourSum([-5, 5, 4, -3, 0, 0, 4, -2], 4)) == inner_sort([[-5, 0, 4, 5], [-3, -2, 4, 5]])\n print('All test cases ran successfully')\n<|end_body_1|>\n", "revision_id": "575fa25c4586fa41b3d45d95dca6eff9584c3a4a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n \"\"\":param nums: A list of integers :param target: The target to sum up to\"\"\"\n <|body_0|>\n\n def test_fourSum(self):\n \"\"\"Method to test the code with a few sample cases\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n \"\"\":param nums: A list of integers :param target: The target to sum up to\"\"\"\n if nums == []:\n return []\n quadruplets = list()\n nums = sorted(nums)\n for i in range(len(nums) - 3):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in range(i + 1, len(nums) - 2):\n hash_ = []\n for k in range(j + 1, len(nums)):\n complement = target - (nums[i] + nums[j] + nums[k])\n if complement in hash_:\n if [nums[i], nums[j], nums[k], complement] not in quadruplets:\n quadruplets.append([nums[i], nums[j], nums[k], complement])\n hash_.append(nums[k])\n return quadruplets\n\n def test_fourSum(self):\n \"\"\"Method to test the code with a few sample cases\"\"\"\n def inner_sort(arr):\n return sorted([sorted(x) for x in arr])\n assert inner_sort(self.fourSum([1, 0, -1, 0, -2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]])\n assert self.fourSum([], 0) == []\n assert inner_sort(self.fourSum([-2, -1, -1, 1, 1, 2, 2], 0)) == inner_sort([[-2, -1, 1, 2], [-1, -1, 1, 1]])\n assert self.fourSum([0, 0, 0, 0], 0) == [[0, 0, 0, 0]]\n assert inner_sort(self.fourSum([-5, 5, 4, -3, 0, 0, 4, -2], 4)) == inner_sort([[-5, 0, 4, 5], [-3, -2, 4, 5]])\n print('All test cases ran successfully')\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/4sum.py", "source_repo": "aaakashkumar/competitive_programming", "split": "val", "star_events_count": 0}
{"blob_id": "b66e30b3b887047fd2b1691977f8fe1c9b8a6fe9", "bodies": ["self.max_epochs = max_epochs\nself.init_lr = init_lr\nself.power = power", "decay = (1 - epoch / float(self.max_epochs)) ** self.power\nalpha = self.init_lr * decay\nreturn float(alpha)"], "bodies_text": "<|body_start_0|>\n self.max_epochs = max_epochs\n self.init_lr = init_lr\n self.power = power\n<|end_body_0|>\n\n<|body_start_1|>\n decay = (1 - epoch / float(self.max_epochs)) ** self.power\n alpha = self.init_lr * decay\n return float(alpha)\n<|end_body_1|>\n", "class_docstring": "A callable that implements polynomial decay. Used as a callback in keras.", "class_name": "PolynomialDecay", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PolynomialDecay:\n \"\"\"A callable that implements polynomial decay. Used as a callback in keras.\"\"\"\n\n def __init__(self, max_epochs, init_lr, power=1.0):\n \"\"\"Creates a new PolynomialDecay Args: max_epochs: int, maximum number of epochs init_lr: float, initial learning rate which will decay power: float, the power of the decay function\"\"\"\n <|body_0|>\n\n def __call__(self, epoch):\n \"\"\"Calculates the new (smaller) learning rate for the current epoch Args: epoch: int, the epoch for which we need to calculate the LR Returns: float, the new learning rate\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.max_epochs = max_epochs\n self.init_lr = init_lr\n self.power = power\n<|end_body_0|>\n\n<|body_start_1|>\n decay = (1 - epoch / float(self.max_epochs)) ** self.power\n alpha = self.init_lr * decay\n return float(alpha)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000468", "length_bytes": 5135, "license_type": "permissive", "methods": [{"docstring": "Creates a new PolynomialDecay Args: max_epochs: int, maximum number of epochs init_lr: float, initial learning rate which will decay power: float, the power of the decay function", "name": "__init__", "signature": "def __init__(self, max_epochs, init_lr, power=1.0)"}, {"docstring": "Calculates the new (smaller) learning rate for the current epoch Args: epoch: int, the epoch for which we need to calculate the LR Returns: float, the new learning rate", "name": "__call__", "signature": "def __call__(self, epoch)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_019769", "prompt": "Implement the Python class `PolynomialDecay` described below.\n\nClass description:\nA callable that implements polynomial decay. Used as a callback in keras.\n\nMethod signatures and docstrings:\n- def __init__(self, max_epochs, init_lr, power=1.0): Creates a new PolynomialDecay Args: max_epochs: int, maximum number of epochs init_lr: float, initial learning rate which will decay power: float, the power of the decay function\n- def __call__(self, epoch): Calculates the new (smaller) learning rate for the current epoch Args: epoch: int, the epoch for which we need to calculate the LR Returns: float, the new learning rate", "prompted_full_text": "Implement the Python class `PolynomialDecay` described below.\n\nClass description:\nA callable that implements polynomial decay. Used as a callback in keras.\n\nMethod signatures and docstrings:\n- def __init__(self, max_epochs, init_lr, power=1.0): Creates a new PolynomialDecay Args: max_epochs: int, maximum number of epochs init_lr: float, initial learning rate which will decay power: float, the power of the decay function\n- def __call__(self, epoch): Calculates the new (smaller) learning rate for the current epoch Args: epoch: int, the epoch for which we need to calculate the LR Returns: float, the new learning rate\n\n<|skeleton|>\nclass PolynomialDecay:\n \"\"\"A callable that implements polynomial decay. Used as a callback in keras.\"\"\"\n\n def __init__(self, max_epochs, init_lr, power=1.0):\n \"\"\"Creates a new PolynomialDecay Args: max_epochs: int, maximum number of epochs init_lr: float, initial learning rate which will decay power: float, the power of the decay function\"\"\"\n <|body_0|>\n\n def __call__(self, epoch):\n \"\"\"Calculates the new (smaller) learning rate for the current epoch Args: epoch: int, the epoch for which we need to calculate the LR Returns: float, the new learning rate\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.max_epochs = max_epochs\n self.init_lr = init_lr\n self.power = power\n<|end_body_0|>\n\n<|body_start_1|>\n decay = (1 - epoch / float(self.max_epochs)) ** self.power\n alpha = self.init_lr * decay\n return float(alpha)\n<|end_body_1|>\n", "revision_id": "20cc6ff59396a2884a748509526a022347a7340c", "skeleton": "<|skeleton|>\nclass PolynomialDecay:\n \"\"\"A callable that implements polynomial decay. Used as a callback in keras.\"\"\"\n\n def __init__(self, max_epochs, init_lr, power=1.0):\n \"\"\"Creates a new PolynomialDecay Args: max_epochs: int, maximum number of epochs init_lr: float, initial learning rate which will decay power: float, the power of the decay function\"\"\"\n <|body_0|>\n\n def __call__(self, epoch):\n \"\"\"Calculates the new (smaller) learning rate for the current epoch Args: epoch: int, the epoch for which we need to calculate the LR Returns: float, the new learning rate\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PolynomialDecay:\n \"\"\"A callable that implements polynomial decay. Used as a callback in keras.\"\"\"\n\n def __init__(self, max_epochs, init_lr, power=1.0):\n \"\"\"Creates a new PolynomialDecay Args: max_epochs: int, maximum number of epochs init_lr: float, initial learning rate which will decay power: float, the power of the decay function\"\"\"\n self.max_epochs = max_epochs\n self.init_lr = init_lr\n self.power = power\n\n def __call__(self, epoch):\n \"\"\"Calculates the new (smaller) learning rate for the current epoch Args: epoch: int, the epoch for which we need to calculate the LR Returns: float, the new learning rate\"\"\"\n decay = (1 - epoch / float(self.max_epochs)) ** self.power\n alpha = self.init_lr * decay\n return float(alpha)\n", "source": "the_stack_v2_python_sparse", "source_path": "intent_detection/classifier.py", "source_repo": "EdwardBurgin/polyai-models", "split": "val", "star_events_count": 2}
{"blob_id": "597761546c5737d39f63051a53a4d00b3e2ab8c5", "bodies": ["total_line = 0\nfor line in file:\n total_line += 1\nreturn total_line", "empty_line = 0\nfor line in file:\n if len(line) == 1:\n empty_line += 1\nreturn empty_line", "z_line = 0\nfor line in file:\n if 'z' in line:\n z_line += 1\nreturn z_line", "sum_z = 0\nfor line in file:\n for letter in line:\n if letter == 'z':\n sum_z += 1\nreturn sum_z", "and_string = 0\nfor line in file:\n if 'and' in line:\n and_string += 1\nreturn and_string"], "bodies_text": "<|body_start_0|>\n total_line = 0\n for line in file:\n total_line += 1\n return total_line\n<|end_body_0|>\n\n<|body_start_1|>\n empty_line = 0\n for line in file:\n if len(line) == 1:\n empty_line += 1\n return empty_line\n<|end_body_1|>\n\n<|body_start_2|>\n z_line = 0\n for line in file:\n if 'z' in line:\n z_line += 1\n return z_line\n<|end_body_2|>\n\n<|body_start_3|>\n sum_z = 0\n for line in file:\n for letter in line:\n if letter == 'z':\n sum_z += 1\n return sum_z\n<|end_body_3|>\n\n<|body_start_4|>\n and_string = 0\n for line in file:\n if 'and' in line:\n and_string += 1\n return and_string\n<|end_body_4|>\n", "class_docstring": "", "class_name": "FileStatistic", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FileStatistic:\n\n def total_lines(file):\n \"\"\"Counting the number of all lines in a file.\"\"\"\n <|body_0|>\n\n def number_of_empty_lines(file):\n \"\"\"Counting empty lines.\"\"\"\n <|body_1|>\n\n def lines_with_z(file):\n \"\"\"Output the number of lines with 'z'.\"\"\"\n <|body_2|>\n\n def z_count(file):\n \"\"\"Counting the number of all occurrences of 'z'.\"\"\"\n <|body_3|>\n\n def lines_with_and(file):\n \"\"\"Counting the number of lines containing 'and'.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total_line = 0\n for line in file:\n total_line += 1\n return total_line\n<|end_body_0|>\n\n<|body_start_1|>\n empty_line = 0\n for line in file:\n if len(line) == 1:\n empty_line += 1\n return empty_line\n<|end_body_1|>\n\n<|body_start_2|>\n z_line = 0\n for line in file:\n if 'z' in line:\n z_line += 1\n return z_line\n<|end_body_2|>\n\n<|body_start_3|>\n sum_z = 0\n for line in file:\n for letter in line:\n if letter == 'z':\n sum_z += 1\n return sum_z\n<|end_body_3|>\n\n<|body_start_4|>\n and_string = 0\n for line in file:\n if 'and' in line:\n and_string += 1\n return and_string\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000469", "length_bytes": 2018, "license_type": "no_license", "methods": [{"docstring": "Counting the number of all lines in a file.", "name": "total_lines", "signature": "def total_lines(file)"}, {"docstring": "Counting empty lines.", "name": "number_of_empty_lines", "signature": "def number_of_empty_lines(file)"}, {"docstring": "Output the number of lines with 'z'.", "name": "lines_with_z", "signature": "def lines_with_z(file)"}, {"docstring": "Counting the number of all occurrences of 'z'.", "name": "z_count", "signature": "def z_count(file)"}, {"docstring": "Counting the number of lines containing 'and'.", "name": "lines_with_and", "signature": "def lines_with_and(file)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_018136", "prompt": "Implement the Python class `FileStatistic` described below.\n\nClass description:\nImplement the FileStatistic class.\n\nMethod signatures and docstrings:\n- def total_lines(file): Counting the number of all lines in a file.\n- def number_of_empty_lines(file): Counting empty lines.\n- def lines_with_z(file): Output the number of lines with 'z'.\n- def z_count(file): Counting the number of all occurrences of 'z'.\n- def lines_with_and(file): Counting the number of lines containing 'and'.", "prompted_full_text": "Implement the Python class `FileStatistic` described below.\n\nClass description:\nImplement the FileStatistic class.\n\nMethod signatures and docstrings:\n- def total_lines(file): Counting the number of all lines in a file.\n- def number_of_empty_lines(file): Counting empty lines.\n- def lines_with_z(file): Output the number of lines with 'z'.\n- def z_count(file): Counting the number of all occurrences of 'z'.\n- def lines_with_and(file): Counting the number of lines containing 'and'.\n\n<|skeleton|>\nclass FileStatistic:\n\n def total_lines(file):\n \"\"\"Counting the number of all lines in a file.\"\"\"\n <|body_0|>\n\n def number_of_empty_lines(file):\n \"\"\"Counting empty lines.\"\"\"\n <|body_1|>\n\n def lines_with_z(file):\n \"\"\"Output the number of lines with 'z'.\"\"\"\n <|body_2|>\n\n def z_count(file):\n \"\"\"Counting the number of all occurrences of 'z'.\"\"\"\n <|body_3|>\n\n def lines_with_and(file):\n \"\"\"Counting the number of lines containing 'and'.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total_line = 0\n for line in file:\n total_line += 1\n return total_line\n<|end_body_0|>\n\n<|body_start_1|>\n empty_line = 0\n for line in file:\n if len(line) == 1:\n empty_line += 1\n return empty_line\n<|end_body_1|>\n\n<|body_start_2|>\n z_line = 0\n for line in file:\n if 'z' in line:\n z_line += 1\n return z_line\n<|end_body_2|>\n\n<|body_start_3|>\n sum_z = 0\n for line in file:\n for letter in line:\n if letter == 'z':\n sum_z += 1\n return sum_z\n<|end_body_3|>\n\n<|body_start_4|>\n and_string = 0\n for line in file:\n if 'and' in line:\n and_string += 1\n return and_string\n<|end_body_4|>\n", "revision_id": "3a500c9d55fecf4032b5faf59a1cbecf64592e9a", "skeleton": "<|skeleton|>\nclass FileStatistic:\n\n def total_lines(file):\n \"\"\"Counting the number of all lines in a file.\"\"\"\n <|body_0|>\n\n def number_of_empty_lines(file):\n \"\"\"Counting empty lines.\"\"\"\n <|body_1|>\n\n def lines_with_z(file):\n \"\"\"Output the number of lines with 'z'.\"\"\"\n <|body_2|>\n\n def z_count(file):\n \"\"\"Counting the number of all occurrences of 'z'.\"\"\"\n <|body_3|>\n\n def lines_with_and(file):\n \"\"\"Counting the number of lines containing 'and'.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FileStatistic:\n def total_lines(file):\n \"\"\"Counting the number of all lines in a file.\"\"\"\n total_line = 0\n for line in file:\n total_line += 1\n return total_line\n\n def number_of_empty_lines(file):\n \"\"\"Counting empty lines.\"\"\"\n empty_line = 0\n for line in file:\n if len(line) == 1:\n empty_line += 1\n return empty_line\n\n def lines_with_z(file):\n \"\"\"Output the number of lines with 'z'.\"\"\"\n z_line = 0\n for line in file:\n if 'z' in line:\n z_line += 1\n return z_line\n\n def z_count(file):\n \"\"\"Counting the number of all occurrences of 'z'.\"\"\"\n sum_z = 0\n for line in file:\n for letter in line:\n if letter == 'z':\n sum_z += 1\n return sum_z\n\n def lines_with_and(file):\n \"\"\"Counting the number of lines containing 'and'.\"\"\"\n and_string = 0\n for line in file:\n if 'and' in line:\n and_string += 1\n return and_string\n", "source": "the_stack_v2_python_sparse", "source_path": "python02/python02.py", "source_repo": "maksimok93/Dp-189", "split": "val", "star_events_count": 0}
{"blob_id": "0582fe1d0c3100afd8d4baa29f0fbca1dbf47097", "bodies": ["super(PreAggGraphConv, self).__init__(activity_regularizer=activity_regularizer, **kwargs)\nself.units = int(units)\n\ndef normalize(x):\n \"\"\"Row-normalization of the matrix.\"\"\"\n rowsum = tf.math.reduce_sum(x, axis=1)\n rowsum = tf.math.maximum(rowsum, 1e-12)\n rowinv = rowsum ** (-1)\n return tf.matmul(tf.linalg.diag(rowinv), x)\nself.affinity_matrix = tf.convert_to_tensor(affinity_matrix, dtype=tf.dtypes.float32)\nself.affinity_matrix = normalize(self.affinity_matrix)\nself.activation = tf.keras.activations.get(activation)\nself.use_bias = use_bias\nself.kernel_initializer = tf.keras.initializers.get(kernel_initializer)\nself.bias_initializer = tf.keras.initializers.get(bias_initializer)\nself.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\nself.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\nself.kernel_constraint = tf.keras.constraints.get(kernel_constraint)\nself.bias_constraint = tf.keras.constraints.get(bias_constraint)", "last_dim = input_shape[-1]\nself.kernel = self.add_weight('kernel', shape=(2, self.affinity_matrix.shape[0], last_dim, self.units), initializer=self.kernel_initializer, constraint=self.kernel_constraint, trainable=True)\nif self.use_bias:\n self.bias = self.add_weight('bias', shape=(self.units,), initializer=self.bias_initializer, constraint=self.bias_constraint, trainable=True)\nelse:\n self.bias = None\nself.input_spec = layers.InputSpec(min_ndim=3, axes={-1: last_dim})\nself.built = True", "affinity_matrix = tf.expand_dims(self.affinity_matrix, axis=1)\neye_matrix = tf.eye(affinity_matrix.shape[0], dtype=tf.dtypes.float32)\neye_matrix = tf.expand_dims(eye_matrix, axis=1)\nx = tf.expand_dims(inputs, axis=-3)\neye_outputs = tf.matmul(x, self.kernel[0])\neye_outputs = tf.matmul(affinity_matrix * eye_matrix, eye_outputs)\neye_outputs = tf.squeeze(eye_outputs, axis=-2)\nnoneye_outputs = tf.matmul(x, self.kernel[1])\nnoneye_outputs = tf.matmul(affinity_matrix * (1.0 - eye_matrix), noneye_outputs)\nnoneye_outputs = tf.squeeze(noneye_outputs, axis=-2)\noutputs = eye_outputs + noneye_outputs\nif self.use_bias:\n outputs = tf.nn.bias_add(outputs, self.bias)\nif self.activation:\n outputs = self.activation(outputs)\nreturn outputs"], "bodies_text": "<|body_start_0|>\n super(PreAggGraphConv, self).__init__(activity_regularizer=activity_regularizer, **kwargs)\n self.units = int(units)\n\n def normalize(x):\n \"\"\"Row-normalization of the matrix.\"\"\"\n rowsum = tf.math.reduce_sum(x, axis=1)\n rowsum = tf.math.maximum(rowsum, 1e-12)\n rowinv = rowsum ** (-1)\n return tf.matmul(tf.linalg.diag(rowinv), x)\n self.affinity_matrix = tf.convert_to_tensor(affinity_matrix, dtype=tf.dtypes.float32)\n self.affinity_matrix = normalize(self.affinity_matrix)\n self.activation = tf.keras.activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)\n self.bias_constraint = tf.keras.constraints.get(bias_constraint)\n<|end_body_0|>\n\n<|body_start_1|>\n last_dim = input_shape[-1]\n self.kernel = self.add_weight('kernel', shape=(2, self.affinity_matrix.shape[0], last_dim, self.units), initializer=self.kernel_initializer, constraint=self.kernel_constraint, trainable=True)\n if self.use_bias:\n self.bias = self.add_weight('bias', shape=(self.units,), initializer=self.bias_initializer, constraint=self.bias_constraint, trainable=True)\n else:\n self.bias = None\n self.input_spec = layers.InputSpec(min_ndim=3, axes={-1: last_dim})\n self.built = True\n<|end_body_1|>\n\n<|body_start_2|>\n affinity_matrix = tf.expand_dims(self.affinity_matrix, axis=1)\n eye_matrix = tf.eye(affinity_matrix.shape[0], dtype=tf.dtypes.float32)\n eye_matrix = tf.expand_dims(eye_matrix, axis=1)\n x = tf.expand_dims(inputs, axis=-3)\n eye_outputs = tf.matmul(x, self.kernel[0])\n eye_outputs = tf.matmul(affinity_matrix * eye_matrix, eye_outputs)\n eye_outputs = tf.squeeze(eye_outputs, axis=-2)\n noneye_outputs = tf.matmul(x, self.kernel[1])\n noneye_outputs = tf.matmul(affinity_matrix * (1.0 - eye_matrix), noneye_outputs)\n noneye_outputs = tf.squeeze(noneye_outputs, axis=-2)\n outputs = eye_outputs + noneye_outputs\n if self.use_bias:\n outputs = tf.nn.bias_add(outputs, self.bias)\n if self.activation:\n outputs = self.activation(outputs)\n return outputs\n<|end_body_2|>\n", "class_docstring": "Implements Pre-Aggregation Graph Convolution. Reference: Liu et al. A Comprehensive Study of Weight Sharing in Graph Networks for 3D Human Pose Estimation. http://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123550324.pdf.", "class_name": "PreAggGraphConv", "detected_licenses": ["Apache-2.0", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PreAggGraphConv:\n \"\"\"Implements Pre-Aggregation Graph Convolution. Reference: Liu et al. A Comprehensive Study of Weight Sharing in Graph Networks for 3D Human Pose Estimation. http://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123550324.pdf.\"\"\"\n\n def __init__(self, units, affinity_matrix, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs):\n \"\"\"Initializer. Args: units: An integer for the output dimension of the layer. affinity_matrix: A tensor for the keypoint affinity matrix. activation: Activation function to use. use_bias: A boolean for whether the layer uses a bias vector. kernel_initializer: Initializer for the kernel weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function for the kernel weights matrix. bias_regularizer: Regularizer function for the bias vector. activity_regularizer: Regularizer function for the output of the layer. kernel_constraint: Constraint function for the kernel weights matrix. bias_constraint: Constraint function for the bias vector. **kwargs: A dict\"\"\"\n <|body_0|>\n\n def build(self, input_shape):\n \"\"\"Builds the layer. Args: input_shape: A TensorShape for the shape of the input tensor.\"\"\"\n <|body_1|>\n\n def call(self, inputs, training=False):\n \"\"\"Computes a forward pass. Args: inputs: An input tensor. training: A boolean indicating whether the call is for training or not. Returns: An output tensor.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PreAggGraphConv, self).__init__(activity_regularizer=activity_regularizer, **kwargs)\n self.units = int(units)\n\n def normalize(x):\n \"\"\"Row-normalization of the matrix.\"\"\"\n rowsum = tf.math.reduce_sum(x, axis=1)\n rowsum = tf.math.maximum(rowsum, 1e-12)\n rowinv = rowsum ** (-1)\n return tf.matmul(tf.linalg.diag(rowinv), x)\n self.affinity_matrix = tf.convert_to_tensor(affinity_matrix, dtype=tf.dtypes.float32)\n self.affinity_matrix = normalize(self.affinity_matrix)\n self.activation = tf.keras.activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)\n self.bias_constraint = tf.keras.constraints.get(bias_constraint)\n<|end_body_0|>\n\n<|body_start_1|>\n last_dim = input_shape[-1]\n self.kernel = self.add_weight('kernel', shape=(2, self.affinity_matrix.shape[0], last_dim, self.units), initializer=self.kernel_initializer, constraint=self.kernel_constraint, trainable=True)\n if self.use_bias:\n self.bias = self.add_weight('bias', shape=(self.units,), initializer=self.bias_initializer, constraint=self.bias_constraint, trainable=True)\n else:\n self.bias = None\n self.input_spec = layers.InputSpec(min_ndim=3, axes={-1: last_dim})\n self.built = True\n<|end_body_1|>\n\n<|body_start_2|>\n affinity_matrix = tf.expand_dims(self.affinity_matrix, axis=1)\n eye_matrix = tf.eye(affinity_matrix.shape[0], dtype=tf.dtypes.float32)\n eye_matrix = tf.expand_dims(eye_matrix, axis=1)\n x = tf.expand_dims(inputs, axis=-3)\n eye_outputs = tf.matmul(x, self.kernel[0])\n eye_outputs = tf.matmul(affinity_matrix * eye_matrix, eye_outputs)\n eye_outputs = tf.squeeze(eye_outputs, axis=-2)\n noneye_outputs = tf.matmul(x, self.kernel[1])\n noneye_outputs = tf.matmul(affinity_matrix * (1.0 - eye_matrix), noneye_outputs)\n noneye_outputs = tf.squeeze(noneye_outputs, axis=-2)\n outputs = eye_outputs + noneye_outputs\n if self.use_bias:\n outputs = tf.nn.bias_add(outputs, self.bias)\n if self.activation:\n outputs = self.activation(outputs)\n return outputs\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000470", "length_bytes": 30548, "license_type": "permissive", "methods": [{"docstring": "Initializer. Args: units: An integer for the output dimension of the layer. affinity_matrix: A tensor for the keypoint affinity matrix. activation: Activation function to use. use_bias: A boolean for whether the layer uses a bias vector. kernel_initializer: Initializer for the kernel weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function for the kernel weights matrix. bias_regularizer: Regularizer function for the bias vector. activity_regularizer: Regularizer function for the output of the layer. kernel_constraint: Constraint function for the kernel weights matrix. bias_constraint: Constraint function for the bias vector. **kwargs: A dict", "name": "__init__", "signature": "def __init__(self, units, affinity_matrix, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs)"}, {"docstring": "Builds the layer. Args: input_shape: A TensorShape for the shape of the input tensor.", "name": "build", "signature": "def build(self, input_shape)"}, {"docstring": "Computes a forward pass. Args: inputs: An input tensor. training: A boolean indicating whether the call is for training or not. Returns: An output tensor.", "name": "call", "signature": "def call(self, inputs, training=False)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_010561", "prompt": "Implement the Python class `PreAggGraphConv` described below.\n\nClass description:\nImplements Pre-Aggregation Graph Convolution. Reference: Liu et al. A Comprehensive Study of Weight Sharing in Graph Networks for 3D Human Pose Estimation. http://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123550324.pdf.\n\nMethod signatures and docstrings:\n- def __init__(self, units, affinity_matrix, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): Initializer. Args: units: An integer for the output dimension of the layer. affinity_matrix: A tensor for the keypoint affinity matrix. activation: Activation function to use. use_bias: A boolean for whether the layer uses a bias vector. kernel_initializer: Initializer for the kernel weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function for the kernel weights matrix. bias_regularizer: Regularizer function for the bias vector. activity_regularizer: Regularizer function for the output of the layer. kernel_constraint: Constraint function for the kernel weights matrix. bias_constraint: Constraint function for the bias vector. **kwargs: A dict\n- def build(self, input_shape): Builds the layer. Args: input_shape: A TensorShape for the shape of the input tensor.\n- def call(self, inputs, training=False): Computes a forward pass. Args: inputs: An input tensor. training: A boolean indicating whether the call is for training or not. Returns: An output tensor.", "prompted_full_text": "Implement the Python class `PreAggGraphConv` described below.\n\nClass description:\nImplements Pre-Aggregation Graph Convolution. Reference: Liu et al. A Comprehensive Study of Weight Sharing in Graph Networks for 3D Human Pose Estimation. http://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123550324.pdf.\n\nMethod signatures and docstrings:\n- def __init__(self, units, affinity_matrix, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): Initializer. Args: units: An integer for the output dimension of the layer. affinity_matrix: A tensor for the keypoint affinity matrix. activation: Activation function to use. use_bias: A boolean for whether the layer uses a bias vector. kernel_initializer: Initializer for the kernel weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function for the kernel weights matrix. bias_regularizer: Regularizer function for the bias vector. activity_regularizer: Regularizer function for the output of the layer. kernel_constraint: Constraint function for the kernel weights matrix. bias_constraint: Constraint function for the bias vector. **kwargs: A dict\n- def build(self, input_shape): Builds the layer. Args: input_shape: A TensorShape for the shape of the input tensor.\n- def call(self, inputs, training=False): Computes a forward pass. Args: inputs: An input tensor. training: A boolean indicating whether the call is for training or not. Returns: An output tensor.\n\n<|skeleton|>\nclass PreAggGraphConv:\n \"\"\"Implements Pre-Aggregation Graph Convolution. Reference: Liu et al. A Comprehensive Study of Weight Sharing in Graph Networks for 3D Human Pose Estimation. http://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123550324.pdf.\"\"\"\n\n def __init__(self, units, affinity_matrix, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs):\n \"\"\"Initializer. Args: units: An integer for the output dimension of the layer. affinity_matrix: A tensor for the keypoint affinity matrix. activation: Activation function to use. use_bias: A boolean for whether the layer uses a bias vector. kernel_initializer: Initializer for the kernel weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function for the kernel weights matrix. bias_regularizer: Regularizer function for the bias vector. activity_regularizer: Regularizer function for the output of the layer. kernel_constraint: Constraint function for the kernel weights matrix. bias_constraint: Constraint function for the bias vector. **kwargs: A dict\"\"\"\n <|body_0|>\n\n def build(self, input_shape):\n \"\"\"Builds the layer. Args: input_shape: A TensorShape for the shape of the input tensor.\"\"\"\n <|body_1|>\n\n def call(self, inputs, training=False):\n \"\"\"Computes a forward pass. Args: inputs: An input tensor. training: A boolean indicating whether the call is for training or not. Returns: An output tensor.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PreAggGraphConv, self).__init__(activity_regularizer=activity_regularizer, **kwargs)\n self.units = int(units)\n\n def normalize(x):\n \"\"\"Row-normalization of the matrix.\"\"\"\n rowsum = tf.math.reduce_sum(x, axis=1)\n rowsum = tf.math.maximum(rowsum, 1e-12)\n rowinv = rowsum ** (-1)\n return tf.matmul(tf.linalg.diag(rowinv), x)\n self.affinity_matrix = tf.convert_to_tensor(affinity_matrix, dtype=tf.dtypes.float32)\n self.affinity_matrix = normalize(self.affinity_matrix)\n self.activation = tf.keras.activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)\n self.bias_constraint = tf.keras.constraints.get(bias_constraint)\n<|end_body_0|>\n\n<|body_start_1|>\n last_dim = input_shape[-1]\n self.kernel = self.add_weight('kernel', shape=(2, self.affinity_matrix.shape[0], last_dim, self.units), initializer=self.kernel_initializer, constraint=self.kernel_constraint, trainable=True)\n if self.use_bias:\n self.bias = self.add_weight('bias', shape=(self.units,), initializer=self.bias_initializer, constraint=self.bias_constraint, trainable=True)\n else:\n self.bias = None\n self.input_spec = layers.InputSpec(min_ndim=3, axes={-1: last_dim})\n self.built = True\n<|end_body_1|>\n\n<|body_start_2|>\n affinity_matrix = tf.expand_dims(self.affinity_matrix, axis=1)\n eye_matrix = tf.eye(affinity_matrix.shape[0], dtype=tf.dtypes.float32)\n eye_matrix = tf.expand_dims(eye_matrix, axis=1)\n x = tf.expand_dims(inputs, axis=-3)\n eye_outputs = tf.matmul(x, self.kernel[0])\n eye_outputs = tf.matmul(affinity_matrix * eye_matrix, eye_outputs)\n eye_outputs = tf.squeeze(eye_outputs, axis=-2)\n noneye_outputs = tf.matmul(x, self.kernel[1])\n noneye_outputs = tf.matmul(affinity_matrix * (1.0 - eye_matrix), noneye_outputs)\n noneye_outputs = tf.squeeze(noneye_outputs, axis=-2)\n outputs = eye_outputs + noneye_outputs\n if self.use_bias:\n outputs = tf.nn.bias_add(outputs, self.bias)\n if self.activation:\n outputs = self.activation(outputs)\n return outputs\n<|end_body_2|>\n", "revision_id": "5573d9c5822f4e866b6692769963ae819cb3f10d", "skeleton": "<|skeleton|>\nclass PreAggGraphConv:\n \"\"\"Implements Pre-Aggregation Graph Convolution. Reference: Liu et al. A Comprehensive Study of Weight Sharing in Graph Networks for 3D Human Pose Estimation. http://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123550324.pdf.\"\"\"\n\n def __init__(self, units, affinity_matrix, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs):\n \"\"\"Initializer. Args: units: An integer for the output dimension of the layer. affinity_matrix: A tensor for the keypoint affinity matrix. activation: Activation function to use. use_bias: A boolean for whether the layer uses a bias vector. kernel_initializer: Initializer for the kernel weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function for the kernel weights matrix. bias_regularizer: Regularizer function for the bias vector. activity_regularizer: Regularizer function for the output of the layer. kernel_constraint: Constraint function for the kernel weights matrix. bias_constraint: Constraint function for the bias vector. **kwargs: A dict\"\"\"\n <|body_0|>\n\n def build(self, input_shape):\n \"\"\"Builds the layer. Args: input_shape: A TensorShape for the shape of the input tensor.\"\"\"\n <|body_1|>\n\n def call(self, inputs, training=False):\n \"\"\"Computes a forward pass. Args: inputs: An input tensor. training: A boolean indicating whether the call is for training or not. Returns: An output tensor.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PreAggGraphConv:\n \"\"\"Implements Pre-Aggregation Graph Convolution. Reference: Liu et al. A Comprehensive Study of Weight Sharing in Graph Networks for 3D Human Pose Estimation. http://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123550324.pdf.\"\"\"\n\n def __init__(self, units, affinity_matrix, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs):\n \"\"\"Initializer. Args: units: An integer for the output dimension of the layer. affinity_matrix: A tensor for the keypoint affinity matrix. activation: Activation function to use. use_bias: A boolean for whether the layer uses a bias vector. kernel_initializer: Initializer for the kernel weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function for the kernel weights matrix. bias_regularizer: Regularizer function for the bias vector. activity_regularizer: Regularizer function for the output of the layer. kernel_constraint: Constraint function for the kernel weights matrix. bias_constraint: Constraint function for the bias vector. **kwargs: A dict\"\"\"\n super(PreAggGraphConv, self).__init__(activity_regularizer=activity_regularizer, **kwargs)\n self.units = int(units)\n\n def normalize(x):\n \"\"\"Row-normalization of the matrix.\"\"\"\n rowsum = tf.math.reduce_sum(x, axis=1)\n rowsum = tf.math.maximum(rowsum, 1e-12)\n rowinv = rowsum ** (-1)\n return tf.matmul(tf.linalg.diag(rowinv), x)\n self.affinity_matrix = tf.convert_to_tensor(affinity_matrix, dtype=tf.dtypes.float32)\n self.affinity_matrix = normalize(self.affinity_matrix)\n self.activation = tf.keras.activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)\n self.bias_constraint = tf.keras.constraints.get(bias_constraint)\n\n def build(self, input_shape):\n \"\"\"Builds the layer. Args: input_shape: A TensorShape for the shape of the input tensor.\"\"\"\n last_dim = input_shape[-1]\n self.kernel = self.add_weight('kernel', shape=(2, self.affinity_matrix.shape[0], last_dim, self.units), initializer=self.kernel_initializer, constraint=self.kernel_constraint, trainable=True)\n if self.use_bias:\n self.bias = self.add_weight('bias', shape=(self.units,), initializer=self.bias_initializer, constraint=self.bias_constraint, trainable=True)\n else:\n self.bias = None\n self.input_spec = layers.InputSpec(min_ndim=3, axes={-1: last_dim})\n self.built = True\n\n def call(self, inputs, training=False):\n \"\"\"Computes a forward pass. Args: inputs: An input tensor. training: A boolean indicating whether the call is for training or not. Returns: An output tensor.\"\"\"\n affinity_matrix = tf.expand_dims(self.affinity_matrix, axis=1)\n eye_matrix = tf.eye(affinity_matrix.shape[0], dtype=tf.dtypes.float32)\n eye_matrix = tf.expand_dims(eye_matrix, axis=1)\n x = tf.expand_dims(inputs, axis=-3)\n eye_outputs = tf.matmul(x, self.kernel[0])\n eye_outputs = tf.matmul(affinity_matrix * eye_matrix, eye_outputs)\n eye_outputs = tf.squeeze(eye_outputs, axis=-2)\n noneye_outputs = tf.matmul(x, self.kernel[1])\n noneye_outputs = tf.matmul(affinity_matrix * (1.0 - eye_matrix), noneye_outputs)\n noneye_outputs = tf.squeeze(noneye_outputs, axis=-2)\n outputs = eye_outputs + noneye_outputs\n if self.use_bias:\n outputs = tf.nn.bias_add(outputs, self.bias)\n if self.activation:\n outputs = self.activation(outputs)\n return outputs\n", "source": "the_stack_v2_python_sparse", "source_path": "poem/cv_mim/models.py", "source_repo": "Jimmy-INL/google-research", "split": "val", "star_events_count": 1}
{"blob_id": "5b037718d032f9744c42456006077254aba709d9", "bodies": ["cases = [{'expect': 'PositiveSmallIntegerField', 'value': '0'}, {'expect': 'SmallIntegerField', 'value': '-2'}, {'expect': 'PositiveIntegerField', 'value': '1000000'}, {'expect': 'IntegerField', 'value': '-1000000'}, {'expect': 'DecimalField', 'value': '1.1'}]\nfor case in cases:\n factory = DataTypeFactory(case['value']).produce()\n self.assertEqual(factory, case['expect'])", "cases = [{'expect': 'SmallIntegerField', 'prev': 'PositiveSmallIntegerField', 'value': '-1'}, {'expect': 'IntegerField', 'prev': 'PositiveIntegerField', 'value': '-1'}]\nfor case in cases:\n factory = DataTypeFactory(case['value'], case['prev']).produce()\n self.assertEqual(factory, case['expect'])"], "bodies_text": "<|body_start_0|>\n cases = [{'expect': 'PositiveSmallIntegerField', 'value': '0'}, {'expect': 'SmallIntegerField', 'value': '-2'}, {'expect': 'PositiveIntegerField', 'value': '1000000'}, {'expect': 'IntegerField', 'value': '-1000000'}, {'expect': 'DecimalField', 'value': '1.1'}]\n for case in cases:\n factory = DataTypeFactory(case['value']).produce()\n self.assertEqual(factory, case['expect'])\n<|end_body_0|>\n\n<|body_start_1|>\n cases = [{'expect': 'SmallIntegerField', 'prev': 'PositiveSmallIntegerField', 'value': '-1'}, {'expect': 'IntegerField', 'prev': 'PositiveIntegerField', 'value': '-1'}]\n for case in cases:\n factory = DataTypeFactory(case['value'], case['prev']).produce()\n self.assertEqual(factory, case['expect'])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DataTypeFactoryTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataTypeFactoryTest:\n\n def test_return_valid_class(self):\n \"\"\"Do we set datatype\"\"\"\n <|body_0|>\n\n def test_return_valid_class_for_prev(self):\n \"\"\"Do we set datatype if there is previous datatype\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cases = [{'expect': 'PositiveSmallIntegerField', 'value': '0'}, {'expect': 'SmallIntegerField', 'value': '-2'}, {'expect': 'PositiveIntegerField', 'value': '1000000'}, {'expect': 'IntegerField', 'value': '-1000000'}, {'expect': 'DecimalField', 'value': '1.1'}]\n for case in cases:\n factory = DataTypeFactory(case['value']).produce()\n self.assertEqual(factory, case['expect'])\n<|end_body_0|>\n\n<|body_start_1|>\n cases = [{'expect': 'SmallIntegerField', 'prev': 'PositiveSmallIntegerField', 'value': '-1'}, {'expect': 'IntegerField', 'prev': 'PositiveIntegerField', 'value': '-1'}]\n for case in cases:\n factory = DataTypeFactory(case['value'], case['prev']).produce()\n self.assertEqual(factory, case['expect'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000471", "length_bytes": 1217, "license_type": "no_license", "methods": [{"docstring": "Do we set datatype", "name": "test_return_valid_class", "signature": "def test_return_valid_class(self)"}, {"docstring": "Do we set datatype if there is previous datatype", "name": "test_return_valid_class_for_prev", "signature": "def test_return_valid_class_for_prev(self)"}], "n_methods": 2, "prompt": "Implement the Python class `DataTypeFactoryTest` described below.\n\nClass description:\nImplement the DataTypeFactoryTest class.\n\nMethod signatures and docstrings:\n- def test_return_valid_class(self): Do we set datatype\n- def test_return_valid_class_for_prev(self): Do we set datatype if there is previous datatype", "prompted_full_text": "Implement the Python class `DataTypeFactoryTest` described below.\n\nClass description:\nImplement the DataTypeFactoryTest class.\n\nMethod signatures and docstrings:\n- def test_return_valid_class(self): Do we set datatype\n- def test_return_valid_class_for_prev(self): Do we set datatype if there is previous datatype\n\n<|skeleton|>\nclass DataTypeFactoryTest:\n\n def test_return_valid_class(self):\n \"\"\"Do we set datatype\"\"\"\n <|body_0|>\n\n def test_return_valid_class_for_prev(self):\n \"\"\"Do we set datatype if there is previous datatype\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cases = [{'expect': 'PositiveSmallIntegerField', 'value': '0'}, {'expect': 'SmallIntegerField', 'value': '-2'}, {'expect': 'PositiveIntegerField', 'value': '1000000'}, {'expect': 'IntegerField', 'value': '-1000000'}, {'expect': 'DecimalField', 'value': '1.1'}]\n for case in cases:\n factory = DataTypeFactory(case['value']).produce()\n self.assertEqual(factory, case['expect'])\n<|end_body_0|>\n\n<|body_start_1|>\n cases = [{'expect': 'SmallIntegerField', 'prev': 'PositiveSmallIntegerField', 'value': '-1'}, {'expect': 'IntegerField', 'prev': 'PositiveIntegerField', 'value': '-1'}]\n for case in cases:\n factory = DataTypeFactory(case['value'], case['prev']).produce()\n self.assertEqual(factory, case['expect'])\n<|end_body_1|>\n", "revision_id": "f59a5b3e70162a272d6d74e2a61f26f482571503", "skeleton": "<|skeleton|>\nclass DataTypeFactoryTest:\n\n def test_return_valid_class(self):\n \"\"\"Do we set datatype\"\"\"\n <|body_0|>\n\n def test_return_valid_class_for_prev(self):\n \"\"\"Do we set datatype if there is previous datatype\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DataTypeFactoryTest:\n def test_return_valid_class(self):\n \"\"\"Do we set datatype\"\"\"\n cases = [{'expect': 'PositiveSmallIntegerField', 'value': '0'}, {'expect': 'SmallIntegerField', 'value': '-2'}, {'expect': 'PositiveIntegerField', 'value': '1000000'}, {'expect': 'IntegerField', 'value': '-1000000'}, {'expect': 'DecimalField', 'value': '1.1'}]\n for case in cases:\n factory = DataTypeFactory(case['value']).produce()\n self.assertEqual(factory, case['expect'])\n\n def test_return_valid_class_for_prev(self):\n \"\"\"Do we set datatype if there is previous datatype\"\"\"\n cases = [{'expect': 'SmallIntegerField', 'prev': 'PositiveSmallIntegerField', 'value': '-1'}, {'expect': 'IntegerField', 'prev': 'PositiveIntegerField', 'value': '-1'}]\n for case in cases:\n factory = DataTypeFactory(case['value'], case['prev']).produce()\n self.assertEqual(factory, case['expect'])\n", "source": "the_stack_v2_python_sparse", "source_path": "datatype/field/tests.py", "source_repo": "kiote/housing_survey", "split": "val", "star_events_count": 0}
{"blob_id": "6029ae44d34399aa8dfaf94a1b43b23884eafce7", "bodies": ["group_form = GroupInscriptionForm(request.POST)\naddress_form = AddressForm(request.POST)\nreturn render(request, 'group/group_inscription.html', {'group_form': group_form, 'address_form': address_form})", "if request.method == 'POST':\n group_form = GroupInscriptionForm(request.POST, request.FILES)\n address_form = AddressForm(request.POST)\n if group_form.is_valid() and address_form.is_valid():\n group_form.save()\n address = address_form.save()\n group = Group.objects.get(name=request.POST['name'])\n group.address = address\n group.save()\n GroupMember.objects.create(user=request.user, group=group)\n messages.success(request, 'Vous avez ajouté une nouvelle communauté !')\n return redirect('group:community', group.pk)\n else:\n messages.error(request, 'Une erreur est survenue, réessayez de remplirle formulaire ou contactez un administrateur')\n return render(request, 'group/group_inscription.html', {'product_form': group_form, 'address_form': address_form})"], "bodies_text": "<|body_start_0|>\n group_form = GroupInscriptionForm(request.POST)\n address_form = AddressForm(request.POST)\n return render(request, 'group/group_inscription.html', {'group_form': group_form, 'address_form': address_form})\n<|end_body_0|>\n\n<|body_start_1|>\n if request.method == 'POST':\n group_form = GroupInscriptionForm(request.POST, request.FILES)\n address_form = AddressForm(request.POST)\n if group_form.is_valid() and address_form.is_valid():\n group_form.save()\n address = address_form.save()\n group = Group.objects.get(name=request.POST['name'])\n group.address = address\n group.save()\n GroupMember.objects.create(user=request.user, group=group)\n messages.success(request, 'Vous avez ajouté une nouvelle communauté !')\n return redirect('group:community', group.pk)\n else:\n messages.error(request, 'Une erreur est survenue, réessayez de remplirle formulaire ou contactez un administrateur')\n return render(request, 'group/group_inscription.html', {'product_form': group_form, 'address_form': address_form})\n<|end_body_1|>\n", "class_docstring": "Generic class-based view that permit to user to add a community (Group object)", "class_name": "GroupInscriptionView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GroupInscriptionView:\n \"\"\"Generic class-based view that permit to user to add a community (Group object)\"\"\"\n\n def get(self, request):\n \"\"\"Method GET to print all inputs that user must fill to add a community\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Method POST to send datas input by user and register a Group object (community)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n group_form = GroupInscriptionForm(request.POST)\n address_form = AddressForm(request.POST)\n return render(request, 'group/group_inscription.html', {'group_form': group_form, 'address_form': address_form})\n<|end_body_0|>\n\n<|body_start_1|>\n if request.method == 'POST':\n group_form = GroupInscriptionForm(request.POST, request.FILES)\n address_form = AddressForm(request.POST)\n if group_form.is_valid() and address_form.is_valid():\n group_form.save()\n address = address_form.save()\n group = Group.objects.get(name=request.POST['name'])\n group.address = address\n group.save()\n GroupMember.objects.create(user=request.user, group=group)\n messages.success(request, 'Vous avez ajouté une nouvelle communauté !')\n return redirect('group:community', group.pk)\n else:\n messages.error(request, 'Une erreur est survenue, réessayez de remplirle formulaire ou contactez un administrateur')\n return render(request, 'group/group_inscription.html', {'product_form': group_form, 'address_form': address_form})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000472", "length_bytes": 7810, "license_type": "no_license", "methods": [{"docstring": "Method GET to print all inputs that user must fill to add a community", "name": "get", "signature": "def get(self, request)"}, {"docstring": "Method POST to send datas input by user and register a Group object (community)", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041904", "prompt": "Implement the Python class `GroupInscriptionView` described below.\n\nClass description:\nGeneric class-based view that permit to user to add a community (Group object)\n\nMethod signatures and docstrings:\n- def get(self, request): Method GET to print all inputs that user must fill to add a community\n- def post(self, request): Method POST to send datas input by user and register a Group object (community)", "prompted_full_text": "Implement the Python class `GroupInscriptionView` described below.\n\nClass description:\nGeneric class-based view that permit to user to add a community (Group object)\n\nMethod signatures and docstrings:\n- def get(self, request): Method GET to print all inputs that user must fill to add a community\n- def post(self, request): Method POST to send datas input by user and register a Group object (community)\n\n<|skeleton|>\nclass GroupInscriptionView:\n \"\"\"Generic class-based view that permit to user to add a community (Group object)\"\"\"\n\n def get(self, request):\n \"\"\"Method GET to print all inputs that user must fill to add a community\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Method POST to send datas input by user and register a Group object (community)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n group_form = GroupInscriptionForm(request.POST)\n address_form = AddressForm(request.POST)\n return render(request, 'group/group_inscription.html', {'group_form': group_form, 'address_form': address_form})\n<|end_body_0|>\n\n<|body_start_1|>\n if request.method == 'POST':\n group_form = GroupInscriptionForm(request.POST, request.FILES)\n address_form = AddressForm(request.POST)\n if group_form.is_valid() and address_form.is_valid():\n group_form.save()\n address = address_form.save()\n group = Group.objects.get(name=request.POST['name'])\n group.address = address\n group.save()\n GroupMember.objects.create(user=request.user, group=group)\n messages.success(request, 'Vous avez ajouté une nouvelle communauté !')\n return redirect('group:community', group.pk)\n else:\n messages.error(request, 'Une erreur est survenue, réessayez de remplirle formulaire ou contactez un administrateur')\n return render(request, 'group/group_inscription.html', {'product_form': group_form, 'address_form': address_form})\n<|end_body_1|>\n", "revision_id": "cf0b982a6df2b8b4318d12d344ef0827394eedfd", "skeleton": "<|skeleton|>\nclass GroupInscriptionView:\n \"\"\"Generic class-based view that permit to user to add a community (Group object)\"\"\"\n\n def get(self, request):\n \"\"\"Method GET to print all inputs that user must fill to add a community\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Method POST to send datas input by user and register a Group object (community)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GroupInscriptionView:\n \"\"\"Generic class-based view that permit to user to add a community (Group object)\"\"\"\n\n def get(self, request):\n \"\"\"Method GET to print all inputs that user must fill to add a community\"\"\"\n group_form = GroupInscriptionForm(request.POST)\n address_form = AddressForm(request.POST)\n return render(request, 'group/group_inscription.html', {'group_form': group_form, 'address_form': address_form})\n\n def post(self, request):\n \"\"\"Method POST to send datas input by user and register a Group object (community)\"\"\"\n if request.method == 'POST':\n group_form = GroupInscriptionForm(request.POST, request.FILES)\n address_form = AddressForm(request.POST)\n if group_form.is_valid() and address_form.is_valid():\n group_form.save()\n address = address_form.save()\n group = Group.objects.get(name=request.POST['name'])\n group.address = address\n group.save()\n GroupMember.objects.create(user=request.user, group=group)\n messages.success(request, 'Vous avez ajouté une nouvelle communauté !')\n return redirect('group:community', group.pk)\n else:\n messages.error(request, 'Une erreur est survenue, réessayez de remplirle formulaire ou contactez un administrateur')\n return render(request, 'group/group_inscription.html', {'product_form': group_form, 'address_form': address_form})\n", "source": "the_stack_v2_python_sparse", "source_path": "group/views.py", "source_repo": "cleliofavoccia/Share", "split": "val", "star_events_count": 0}
{"blob_id": "af9a7d269cd5f5586ca02f0f256eb254c56bfce7", "bodies": ["if not self.is_visible(source, overrides):\n return\ntag = self.get_property('tag', source, overrides)\nshow_line = self.get_property('show_line', source, overrides)\nshow_labels = self.get_property('show_labels', source, overrides)\nshow_major_ticks = self.get_property('show_major_ticks', source, overrides)\nshow_minor_ticks = self.get_property('show_minor_ticks', source, overrides)\ncanvas.group(tag, 'axis')\nif show_minor_ticks:\n canvas.group(None, 'minor_ticks')\n self._draw_minor_ticks(canvas, source, overrides)\n canvas.ungroup()\nif show_major_ticks:\n canvas.group(None, 'major_ticks')\n self._draw_major_ticks(canvas, source, overrides)\n canvas.ungroup()\nif show_labels:\n canvas.group(None, 'labels')\n self._draw_labels(canvas, source, overrides)\n canvas.ungroup()\nif show_line:\n self._draw_line(canvas, source, overrides)\ncanvas.ungroup()", "ticks = self.get_property('major_ticks', source, overrides)\nsize = self.get_property('major_tick_size', source, overrides)\noffset = self.get_property('major_tick_offset', source, overrides)\nflip = self.get_property('major_tick_flip', source, overrides)\ncanvas.set_pen_by(self, prefix='major_tick_', source=source, overrides=overrides)\nself._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)", "ticks = self.get_property('minor_ticks', source, overrides)\nsize = self.get_property('minor_tick_size', source, overrides)\noffset = self.get_property('minor_tick_offset', source, overrides)\nflip = self.get_property('minor_tick_flip', source, overrides)\ncanvas.set_pen_by(self, prefix='minor_tick_', source=source, overrides=overrides)\nself._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)", "if not ticks:\n return\nx = self.get_property('x', source, overrides)\ny = self.get_property('y', source, overrides)\nradius = self.get_property('radius', source, overrides)\nunits = self.get_property('units', source, overrides)\nif units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\nif tick_flip:\n inner_radius = radius - tick_offset - tick_size\n outer_radius = inner_radius + tick_size\nelse:\n inner_radius = radius + tick_offset\n outer_radius = inner_radius + tick_size\nfor angle in ticks:\n cos = math.cos(angle)\n sin = math.sin(angle)\n x1 = x + inner_radius * cos\n y1 = y + inner_radius * sin\n x2 = x + outer_radius * cos\n y2 = y + outer_radius * sin\n canvas.draw_line(x1, y1, x2, y2)", "x = self.get_property('x', source, overrides)\ny = self.get_property('y', source, overrides)\nradius = self.get_property('radius', source, overrides)\nflip = self.get_property('label_flip', source, overrides)\nlabels = self.get_property('labels', source, overrides)\nlabel_offset = self.get_property('label_offset', source, overrides)\nlabel_rotation = self.get_property('label_rotation', source, overrides)\nticks = self.get_property('major_ticks', source, overrides)\nunits = self.get_property('units', source, overrides)\nif not labels:\n return\ncanvas.set_text_by(self, prefix='label_', source=source, overrides=overrides)\nif units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\nposition = POS_INSIDE if flip else POS_OUTSIDE\nradius += -label_offset if flip else label_offset\nfor i in range(min(len(labels), len(ticks))):\n label = labels[i]\n angle = ticks[i]\n if not label:\n continue\n canvas.draw_text_polar(label, x, y, radius, angle, position, label_rotation)", "x = self.get_property('x', source, overrides)\ny = self.get_property('y', source, overrides)\nradius = self.get_property('radius', source, overrides)\nclockwise = self.get_property('clockwise', source, overrides)\nstart_angle = AngleProperties.get_angle(self, 'start_', ANGLE_RAD, source, overrides)\nend_angle = AngleProperties.get_angle(self, 'end_', ANGLE_RAD, source, overrides)\ncanvas.set_pen_by(self, source=source, overrides=overrides)\ncanvas.fill_color = None\nif abs(start_angle - end_angle) >= 2 * math.pi:\n canvas.draw_circle(x, y, radius)\nelse:\n canvas.draw_arc(x, y, radius, start_angle, end_angle, clockwise)"], "bodies_text": "<|body_start_0|>\n if not self.is_visible(source, overrides):\n return\n tag = self.get_property('tag', source, overrides)\n show_line = self.get_property('show_line', source, overrides)\n show_labels = self.get_property('show_labels', source, overrides)\n show_major_ticks = self.get_property('show_major_ticks', source, overrides)\n show_minor_ticks = self.get_property('show_minor_ticks', source, overrides)\n canvas.group(tag, 'axis')\n if show_minor_ticks:\n canvas.group(None, 'minor_ticks')\n self._draw_minor_ticks(canvas, source, overrides)\n canvas.ungroup()\n if show_major_ticks:\n canvas.group(None, 'major_ticks')\n self._draw_major_ticks(canvas, source, overrides)\n canvas.ungroup()\n if show_labels:\n canvas.group(None, 'labels')\n self._draw_labels(canvas, source, overrides)\n canvas.ungroup()\n if show_line:\n self._draw_line(canvas, source, overrides)\n canvas.ungroup()\n<|end_body_0|>\n\n<|body_start_1|>\n ticks = self.get_property('major_ticks', source, overrides)\n size = self.get_property('major_tick_size', source, overrides)\n offset = self.get_property('major_tick_offset', source, overrides)\n flip = self.get_property('major_tick_flip', source, overrides)\n canvas.set_pen_by(self, prefix='major_tick_', source=source, overrides=overrides)\n self._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)\n<|end_body_1|>\n\n<|body_start_2|>\n ticks = self.get_property('minor_ticks', source, overrides)\n size = self.get_property('minor_tick_size', source, overrides)\n offset = self.get_property('minor_tick_offset', source, overrides)\n flip = self.get_property('minor_tick_flip', source, overrides)\n canvas.set_pen_by(self, prefix='minor_tick_', source=source, overrides=overrides)\n self._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)\n<|end_body_2|>\n\n<|body_start_3|>\n if not ticks:\n return\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n units = self.get_property('units', source, overrides)\n if units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\n if tick_flip:\n inner_radius = radius - tick_offset - tick_size\n outer_radius = inner_radius + tick_size\n else:\n inner_radius = radius + tick_offset\n outer_radius = inner_radius + tick_size\n for angle in ticks:\n cos = math.cos(angle)\n sin = math.sin(angle)\n x1 = x + inner_radius * cos\n y1 = y + inner_radius * sin\n x2 = x + outer_radius * cos\n y2 = y + outer_radius * sin\n canvas.draw_line(x1, y1, x2, y2)\n<|end_body_3|>\n\n<|body_start_4|>\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n flip = self.get_property('label_flip', source, overrides)\n labels = self.get_property('labels', source, overrides)\n label_offset = self.get_property('label_offset', source, overrides)\n label_rotation = self.get_property('label_rotation', source, overrides)\n ticks = self.get_property('major_ticks', source, overrides)\n units = self.get_property('units', source, overrides)\n if not labels:\n return\n canvas.set_text_by(self, prefix='label_', source=source, overrides=overrides)\n if units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\n position = POS_INSIDE if flip else POS_OUTSIDE\n radius += -label_offset if flip else label_offset\n for i in range(min(len(labels), len(ticks))):\n label = labels[i]\n angle = ticks[i]\n if not label:\n continue\n canvas.draw_text_polar(label, x, y, radius, angle, position, label_rotation)\n<|end_body_4|>\n\n<|body_start_5|>\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n clockwise = self.get_property('clockwise', source, overrides)\n start_angle = AngleProperties.get_angle(self, 'start_', ANGLE_RAD, source, overrides)\n end_angle = AngleProperties.get_angle(self, 'end_', ANGLE_RAD, source, overrides)\n canvas.set_pen_by(self, source=source, overrides=overrides)\n canvas.fill_color = None\n if abs(start_angle - end_angle) >= 2 * math.pi:\n canvas.draw_circle(x, y, radius)\n else:\n canvas.draw_arc(x, y, radius, start_angle, end_angle, clockwise)\n<|end_body_5|>\n", "class_docstring": "Radial axis is a standard type of axis used for polar plots. By default the axis is drawn as a circle or arc line with ticks and labels facing out. The ticks are expected to be provided as absolute angle values in the units specified by the 'units' property. Properties: radius: int, float or callable Specifies the axis radius. units: str or callable Specifies the angle units for the ticks as any item from the pero.ANGLE enum. start_angle properties: Includes pero.AngleProperties to specify the start angle. end_angle properties: Includes pero.AngleProperties to specify the end angle. clockwise: bool or callable Specifies the drawing direction. If set to True the axis is drawn clockwise, other", "class_name": "RadialAxis", "detected_licenses": ["LicenseRef-scancode-philippe-de-muyter", "LicenseRef-scancode-commercial-license", "AGPL-3.0-or-later", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RadialAxis:\n \"\"\"Radial axis is a standard type of axis used for polar plots. By default the axis is drawn as a circle or arc line with ticks and labels facing out. The ticks are expected to be provided as absolute angle values in the units specified by the 'units' property. Properties: radius: int, float or callable Specifies the axis radius. units: str or callable Specifies the angle units for the ticks as any item from the pero.ANGLE enum. start_angle properties: Includes pero.AngleProperties to specify the start angle. end_angle properties: Includes pero.AngleProperties to specify the end angle. clockwise: bool or callable Specifies the drawing direction. If set to True the axis is drawn clockwise, other\"\"\"\n\n def draw(self, canvas, source=UNDEF, **overrides):\n \"\"\"Uses given canvas to draw the axis.\"\"\"\n <|body_0|>\n\n def _draw_major_ticks(self, canvas, source, overrides):\n \"\"\"Draws axis major ticks.\"\"\"\n <|body_1|>\n\n def _draw_minor_ticks(self, canvas, source, overrides):\n \"\"\"Draws axis minor ticks.\"\"\"\n <|body_2|>\n\n def _draw_ticks(self, canvas, source, overrides, ticks, tick_size, tick_offset, tick_flip):\n \"\"\"Draws axis ticks.\"\"\"\n <|body_3|>\n\n def _draw_labels(self, canvas, source, overrides):\n \"\"\"Draws axis labels.\"\"\"\n <|body_4|>\n\n def _draw_line(self, canvas, source, overrides):\n \"\"\"Draws axis line.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.is_visible(source, overrides):\n return\n tag = self.get_property('tag', source, overrides)\n show_line = self.get_property('show_line', source, overrides)\n show_labels = self.get_property('show_labels', source, overrides)\n show_major_ticks = self.get_property('show_major_ticks', source, overrides)\n show_minor_ticks = self.get_property('show_minor_ticks', source, overrides)\n canvas.group(tag, 'axis')\n if show_minor_ticks:\n canvas.group(None, 'minor_ticks')\n self._draw_minor_ticks(canvas, source, overrides)\n canvas.ungroup()\n if show_major_ticks:\n canvas.group(None, 'major_ticks')\n self._draw_major_ticks(canvas, source, overrides)\n canvas.ungroup()\n if show_labels:\n canvas.group(None, 'labels')\n self._draw_labels(canvas, source, overrides)\n canvas.ungroup()\n if show_line:\n self._draw_line(canvas, source, overrides)\n canvas.ungroup()\n<|end_body_0|>\n\n<|body_start_1|>\n ticks = self.get_property('major_ticks', source, overrides)\n size = self.get_property('major_tick_size', source, overrides)\n offset = self.get_property('major_tick_offset', source, overrides)\n flip = self.get_property('major_tick_flip', source, overrides)\n canvas.set_pen_by(self, prefix='major_tick_', source=source, overrides=overrides)\n self._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)\n<|end_body_1|>\n\n<|body_start_2|>\n ticks = self.get_property('minor_ticks', source, overrides)\n size = self.get_property('minor_tick_size', source, overrides)\n offset = self.get_property('minor_tick_offset', source, overrides)\n flip = self.get_property('minor_tick_flip', source, overrides)\n canvas.set_pen_by(self, prefix='minor_tick_', source=source, overrides=overrides)\n self._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)\n<|end_body_2|>\n\n<|body_start_3|>\n if not ticks:\n return\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n units = self.get_property('units', source, overrides)\n if units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\n if tick_flip:\n inner_radius = radius - tick_offset - tick_size\n outer_radius = inner_radius + tick_size\n else:\n inner_radius = radius + tick_offset\n outer_radius = inner_radius + tick_size\n for angle in ticks:\n cos = math.cos(angle)\n sin = math.sin(angle)\n x1 = x + inner_radius * cos\n y1 = y + inner_radius * sin\n x2 = x + outer_radius * cos\n y2 = y + outer_radius * sin\n canvas.draw_line(x1, y1, x2, y2)\n<|end_body_3|>\n\n<|body_start_4|>\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n flip = self.get_property('label_flip', source, overrides)\n labels = self.get_property('labels', source, overrides)\n label_offset = self.get_property('label_offset', source, overrides)\n label_rotation = self.get_property('label_rotation', source, overrides)\n ticks = self.get_property('major_ticks', source, overrides)\n units = self.get_property('units', source, overrides)\n if not labels:\n return\n canvas.set_text_by(self, prefix='label_', source=source, overrides=overrides)\n if units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\n position = POS_INSIDE if flip else POS_OUTSIDE\n radius += -label_offset if flip else label_offset\n for i in range(min(len(labels), len(ticks))):\n label = labels[i]\n angle = ticks[i]\n if not label:\n continue\n canvas.draw_text_polar(label, x, y, radius, angle, position, label_rotation)\n<|end_body_4|>\n\n<|body_start_5|>\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n clockwise = self.get_property('clockwise', source, overrides)\n start_angle = AngleProperties.get_angle(self, 'start_', ANGLE_RAD, source, overrides)\n end_angle = AngleProperties.get_angle(self, 'end_', ANGLE_RAD, source, overrides)\n canvas.set_pen_by(self, source=source, overrides=overrides)\n canvas.fill_color = None\n if abs(start_angle - end_angle) >= 2 * math.pi:\n canvas.draw_circle(x, y, radius)\n else:\n canvas.draw_arc(x, y, radius, start_angle, end_angle, clockwise)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000473", "length_bytes": 29078, "license_type": "permissive", "methods": [{"docstring": "Uses given canvas to draw the axis.", "name": "draw", "signature": "def draw(self, canvas, source=UNDEF, **overrides)"}, {"docstring": "Draws axis major ticks.", "name": "_draw_major_ticks", "signature": "def _draw_major_ticks(self, canvas, source, overrides)"}, {"docstring": "Draws axis minor ticks.", "name": "_draw_minor_ticks", "signature": "def _draw_minor_ticks(self, canvas, source, overrides)"}, {"docstring": "Draws axis ticks.", "name": "_draw_ticks", "signature": "def _draw_ticks(self, canvas, source, overrides, ticks, tick_size, tick_offset, tick_flip)"}, {"docstring": "Draws axis labels.", "name": "_draw_labels", "signature": "def _draw_labels(self, canvas, source, overrides)"}, {"docstring": "Draws axis line.", "name": "_draw_line", "signature": "def _draw_line(self, canvas, source, overrides)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_044020", "prompt": "Implement the Python class `RadialAxis` described below.\n\nClass description:\nRadial axis is a standard type of axis used for polar plots. By default the axis is drawn as a circle or arc line with ticks and labels facing out. The ticks are expected to be provided as absolute angle values in the units specified by the 'units' property. Properties: radius: int, float or callable Specifies the axis radius. units: str or callable Specifies the angle units for the ticks as any item from the pero.ANGLE enum. start_angle properties: Includes pero.AngleProperties to specify the start angle. end_angle properties: Includes pero.AngleProperties to specify the end angle. clockwise: bool or callable Specifies the drawing direction. If set to True the axis is drawn clockwise, other\n\nMethod signatures and docstrings:\n- def draw(self, canvas, source=UNDEF, **overrides): Uses given canvas to draw the axis.\n- def _draw_major_ticks(self, canvas, source, overrides): Draws axis major ticks.\n- def _draw_minor_ticks(self, canvas, source, overrides): Draws axis minor ticks.\n- def _draw_ticks(self, canvas, source, overrides, ticks, tick_size, tick_offset, tick_flip): Draws axis ticks.\n- def _draw_labels(self, canvas, source, overrides): Draws axis labels.\n- def _draw_line(self, canvas, source, overrides): Draws axis line.", "prompted_full_text": "Implement the Python class `RadialAxis` described below.\n\nClass description:\nRadial axis is a standard type of axis used for polar plots. By default the axis is drawn as a circle or arc line with ticks and labels facing out. The ticks are expected to be provided as absolute angle values in the units specified by the 'units' property. Properties: radius: int, float or callable Specifies the axis radius. units: str or callable Specifies the angle units for the ticks as any item from the pero.ANGLE enum. start_angle properties: Includes pero.AngleProperties to specify the start angle. end_angle properties: Includes pero.AngleProperties to specify the end angle. clockwise: bool or callable Specifies the drawing direction. If set to True the axis is drawn clockwise, other\n\nMethod signatures and docstrings:\n- def draw(self, canvas, source=UNDEF, **overrides): Uses given canvas to draw the axis.\n- def _draw_major_ticks(self, canvas, source, overrides): Draws axis major ticks.\n- def _draw_minor_ticks(self, canvas, source, overrides): Draws axis minor ticks.\n- def _draw_ticks(self, canvas, source, overrides, ticks, tick_size, tick_offset, tick_flip): Draws axis ticks.\n- def _draw_labels(self, canvas, source, overrides): Draws axis labels.\n- def _draw_line(self, canvas, source, overrides): Draws axis line.\n\n<|skeleton|>\nclass RadialAxis:\n \"\"\"Radial axis is a standard type of axis used for polar plots. By default the axis is drawn as a circle or arc line with ticks and labels facing out. The ticks are expected to be provided as absolute angle values in the units specified by the 'units' property. Properties: radius: int, float or callable Specifies the axis radius. units: str or callable Specifies the angle units for the ticks as any item from the pero.ANGLE enum. start_angle properties: Includes pero.AngleProperties to specify the start angle. end_angle properties: Includes pero.AngleProperties to specify the end angle. clockwise: bool or callable Specifies the drawing direction. If set to True the axis is drawn clockwise, other\"\"\"\n\n def draw(self, canvas, source=UNDEF, **overrides):\n \"\"\"Uses given canvas to draw the axis.\"\"\"\n <|body_0|>\n\n def _draw_major_ticks(self, canvas, source, overrides):\n \"\"\"Draws axis major ticks.\"\"\"\n <|body_1|>\n\n def _draw_minor_ticks(self, canvas, source, overrides):\n \"\"\"Draws axis minor ticks.\"\"\"\n <|body_2|>\n\n def _draw_ticks(self, canvas, source, overrides, ticks, tick_size, tick_offset, tick_flip):\n \"\"\"Draws axis ticks.\"\"\"\n <|body_3|>\n\n def _draw_labels(self, canvas, source, overrides):\n \"\"\"Draws axis labels.\"\"\"\n <|body_4|>\n\n def _draw_line(self, canvas, source, overrides):\n \"\"\"Draws axis line.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.is_visible(source, overrides):\n return\n tag = self.get_property('tag', source, overrides)\n show_line = self.get_property('show_line', source, overrides)\n show_labels = self.get_property('show_labels', source, overrides)\n show_major_ticks = self.get_property('show_major_ticks', source, overrides)\n show_minor_ticks = self.get_property('show_minor_ticks', source, overrides)\n canvas.group(tag, 'axis')\n if show_minor_ticks:\n canvas.group(None, 'minor_ticks')\n self._draw_minor_ticks(canvas, source, overrides)\n canvas.ungroup()\n if show_major_ticks:\n canvas.group(None, 'major_ticks')\n self._draw_major_ticks(canvas, source, overrides)\n canvas.ungroup()\n if show_labels:\n canvas.group(None, 'labels')\n self._draw_labels(canvas, source, overrides)\n canvas.ungroup()\n if show_line:\n self._draw_line(canvas, source, overrides)\n canvas.ungroup()\n<|end_body_0|>\n\n<|body_start_1|>\n ticks = self.get_property('major_ticks', source, overrides)\n size = self.get_property('major_tick_size', source, overrides)\n offset = self.get_property('major_tick_offset', source, overrides)\n flip = self.get_property('major_tick_flip', source, overrides)\n canvas.set_pen_by(self, prefix='major_tick_', source=source, overrides=overrides)\n self._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)\n<|end_body_1|>\n\n<|body_start_2|>\n ticks = self.get_property('minor_ticks', source, overrides)\n size = self.get_property('minor_tick_size', source, overrides)\n offset = self.get_property('minor_tick_offset', source, overrides)\n flip = self.get_property('minor_tick_flip', source, overrides)\n canvas.set_pen_by(self, prefix='minor_tick_', source=source, overrides=overrides)\n self._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)\n<|end_body_2|>\n\n<|body_start_3|>\n if not ticks:\n return\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n units = self.get_property('units', source, overrides)\n if units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\n if tick_flip:\n inner_radius = radius - tick_offset - tick_size\n outer_radius = inner_radius + tick_size\n else:\n inner_radius = radius + tick_offset\n outer_radius = inner_radius + tick_size\n for angle in ticks:\n cos = math.cos(angle)\n sin = math.sin(angle)\n x1 = x + inner_radius * cos\n y1 = y + inner_radius * sin\n x2 = x + outer_radius * cos\n y2 = y + outer_radius * sin\n canvas.draw_line(x1, y1, x2, y2)\n<|end_body_3|>\n\n<|body_start_4|>\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n flip = self.get_property('label_flip', source, overrides)\n labels = self.get_property('labels', source, overrides)\n label_offset = self.get_property('label_offset', source, overrides)\n label_rotation = self.get_property('label_rotation', source, overrides)\n ticks = self.get_property('major_ticks', source, overrides)\n units = self.get_property('units', source, overrides)\n if not labels:\n return\n canvas.set_text_by(self, prefix='label_', source=source, overrides=overrides)\n if units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\n position = POS_INSIDE if flip else POS_OUTSIDE\n radius += -label_offset if flip else label_offset\n for i in range(min(len(labels), len(ticks))):\n label = labels[i]\n angle = ticks[i]\n if not label:\n continue\n canvas.draw_text_polar(label, x, y, radius, angle, position, label_rotation)\n<|end_body_4|>\n\n<|body_start_5|>\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n clockwise = self.get_property('clockwise', source, overrides)\n start_angle = AngleProperties.get_angle(self, 'start_', ANGLE_RAD, source, overrides)\n end_angle = AngleProperties.get_angle(self, 'end_', ANGLE_RAD, source, overrides)\n canvas.set_pen_by(self, source=source, overrides=overrides)\n canvas.fill_color = None\n if abs(start_angle - end_angle) >= 2 * math.pi:\n canvas.draw_circle(x, y, radius)\n else:\n canvas.draw_arc(x, y, radius, start_angle, end_angle, clockwise)\n<|end_body_5|>\n", "revision_id": "d59b1bc056f3037b7b7ab635b6deb41120612965", "skeleton": "<|skeleton|>\nclass RadialAxis:\n \"\"\"Radial axis is a standard type of axis used for polar plots. By default the axis is drawn as a circle or arc line with ticks and labels facing out. The ticks are expected to be provided as absolute angle values in the units specified by the 'units' property. Properties: radius: int, float or callable Specifies the axis radius. units: str or callable Specifies the angle units for the ticks as any item from the pero.ANGLE enum. start_angle properties: Includes pero.AngleProperties to specify the start angle. end_angle properties: Includes pero.AngleProperties to specify the end angle. clockwise: bool or callable Specifies the drawing direction. If set to True the axis is drawn clockwise, other\"\"\"\n\n def draw(self, canvas, source=UNDEF, **overrides):\n \"\"\"Uses given canvas to draw the axis.\"\"\"\n <|body_0|>\n\n def _draw_major_ticks(self, canvas, source, overrides):\n \"\"\"Draws axis major ticks.\"\"\"\n <|body_1|>\n\n def _draw_minor_ticks(self, canvas, source, overrides):\n \"\"\"Draws axis minor ticks.\"\"\"\n <|body_2|>\n\n def _draw_ticks(self, canvas, source, overrides, ticks, tick_size, tick_offset, tick_flip):\n \"\"\"Draws axis ticks.\"\"\"\n <|body_3|>\n\n def _draw_labels(self, canvas, source, overrides):\n \"\"\"Draws axis labels.\"\"\"\n <|body_4|>\n\n def _draw_line(self, canvas, source, overrides):\n \"\"\"Draws axis line.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RadialAxis:\n \"\"\"Radial axis is a standard type of axis used for polar plots. By default the axis is drawn as a circle or arc line with ticks and labels facing out. The ticks are expected to be provided as absolute angle values in the units specified by the 'units' property. Properties: radius: int, float or callable Specifies the axis radius. units: str or callable Specifies the angle units for the ticks as any item from the pero.ANGLE enum. start_angle properties: Includes pero.AngleProperties to specify the start angle. end_angle properties: Includes pero.AngleProperties to specify the end angle. clockwise: bool or callable Specifies the drawing direction. If set to True the axis is drawn clockwise, other\"\"\"\n\n def draw(self, canvas, source=UNDEF, **overrides):\n \"\"\"Uses given canvas to draw the axis.\"\"\"\n if not self.is_visible(source, overrides):\n return\n tag = self.get_property('tag', source, overrides)\n show_line = self.get_property('show_line', source, overrides)\n show_labels = self.get_property('show_labels', source, overrides)\n show_major_ticks = self.get_property('show_major_ticks', source, overrides)\n show_minor_ticks = self.get_property('show_minor_ticks', source, overrides)\n canvas.group(tag, 'axis')\n if show_minor_ticks:\n canvas.group(None, 'minor_ticks')\n self._draw_minor_ticks(canvas, source, overrides)\n canvas.ungroup()\n if show_major_ticks:\n canvas.group(None, 'major_ticks')\n self._draw_major_ticks(canvas, source, overrides)\n canvas.ungroup()\n if show_labels:\n canvas.group(None, 'labels')\n self._draw_labels(canvas, source, overrides)\n canvas.ungroup()\n if show_line:\n self._draw_line(canvas, source, overrides)\n canvas.ungroup()\n\n def _draw_major_ticks(self, canvas, source, overrides):\n \"\"\"Draws axis major ticks.\"\"\"\n ticks = self.get_property('major_ticks', source, overrides)\n size = self.get_property('major_tick_size', source, overrides)\n offset = self.get_property('major_tick_offset', source, overrides)\n flip = self.get_property('major_tick_flip', source, overrides)\n canvas.set_pen_by(self, prefix='major_tick_', source=source, overrides=overrides)\n self._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)\n\n def _draw_minor_ticks(self, canvas, source, overrides):\n \"\"\"Draws axis minor ticks.\"\"\"\n ticks = self.get_property('minor_ticks', source, overrides)\n size = self.get_property('minor_tick_size', source, overrides)\n offset = self.get_property('minor_tick_offset', source, overrides)\n flip = self.get_property('minor_tick_flip', source, overrides)\n canvas.set_pen_by(self, prefix='minor_tick_', source=source, overrides=overrides)\n self._draw_ticks(canvas, source, overrides, ticks, size, offset, flip)\n\n def _draw_ticks(self, canvas, source, overrides, ticks, tick_size, tick_offset, tick_flip):\n \"\"\"Draws axis ticks.\"\"\"\n if not ticks:\n return\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n units = self.get_property('units', source, overrides)\n if units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\n if tick_flip:\n inner_radius = radius - tick_offset - tick_size\n outer_radius = inner_radius + tick_size\n else:\n inner_radius = radius + tick_offset\n outer_radius = inner_radius + tick_size\n for angle in ticks:\n cos = math.cos(angle)\n sin = math.sin(angle)\n x1 = x + inner_radius * cos\n y1 = y + inner_radius * sin\n x2 = x + outer_radius * cos\n y2 = y + outer_radius * sin\n canvas.draw_line(x1, y1, x2, y2)\n\n def _draw_labels(self, canvas, source, overrides):\n \"\"\"Draws axis labels.\"\"\"\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n flip = self.get_property('label_flip', source, overrides)\n labels = self.get_property('labels', source, overrides)\n label_offset = self.get_property('label_offset', source, overrides)\n label_rotation = self.get_property('label_rotation', source, overrides)\n ticks = self.get_property('major_ticks', source, overrides)\n units = self.get_property('units', source, overrides)\n if not labels:\n return\n canvas.set_text_by(self, prefix='label_', source=source, overrides=overrides)\n if units == ANGLE_DEG:\n ticks = tuple(map(math.radians, ticks))\n position = POS_INSIDE if flip else POS_OUTSIDE\n radius += -label_offset if flip else label_offset\n for i in range(min(len(labels), len(ticks))):\n label = labels[i]\n angle = ticks[i]\n if not label:\n continue\n canvas.draw_text_polar(label, x, y, radius, angle, position, label_rotation)\n\n def _draw_line(self, canvas, source, overrides):\n \"\"\"Draws axis line.\"\"\"\n x = self.get_property('x', source, overrides)\n y = self.get_property('y', source, overrides)\n radius = self.get_property('radius', source, overrides)\n clockwise = self.get_property('clockwise', source, overrides)\n start_angle = AngleProperties.get_angle(self, 'start_', ANGLE_RAD, source, overrides)\n end_angle = AngleProperties.get_angle(self, 'end_', ANGLE_RAD, source, overrides)\n canvas.set_pen_by(self, source=source, overrides=overrides)\n canvas.fill_color = None\n if abs(start_angle - end_angle) >= 2 * math.pi:\n canvas.draw_circle(x, y, radius)\n else:\n canvas.draw_arc(x, y, radius, start_angle, end_angle, clockwise)\n", "source": "the_stack_v2_python_sparse", "source_path": "pero/glyphs/axes.py", "source_repo": "xxao/pero", "split": "val", "star_events_count": 31}
{"blob_id": "0a0e40e25ea7963d4e7a3cb5452f909a3a3a38bc", "bodies": ["dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\nnum_list = ['IV', 'IX', 'XL', 'XC', 'CD', 'CM']\nres, i, lens = (0, 0, len(s))\nwhile i < lens:\n if i == lens - 1:\n res += dic.get(s[i])\n break\n if s[i:i + 2] in num_list:\n temp = dic.get(s[i + 1]) - dic.get(s[i])\n res += temp\n i += 2\n else:\n res += dic.get(s[i])\n i += 1\nreturn res", "dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\ns = s.replace('IV', 'IIII').replace('IX', 'VIIII').replace('XL', 'XXXX').replace('XC', 'LXXXX').replace('CD', 'CCCC').replace('CM', 'DCCCC')\nres = 0\nfor c in s:\n res += dic[c]\nreturn res"], "bodies_text": "<|body_start_0|>\n dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n num_list = ['IV', 'IX', 'XL', 'XC', 'CD', 'CM']\n res, i, lens = (0, 0, len(s))\n while i < lens:\n if i == lens - 1:\n res += dic.get(s[i])\n break\n if s[i:i + 2] in num_list:\n temp = dic.get(s[i + 1]) - dic.get(s[i])\n res += temp\n i += 2\n else:\n res += dic.get(s[i])\n i += 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n s = s.replace('IV', 'IIII').replace('IX', 'VIIII').replace('XL', 'XXXX').replace('XC', 'LXXXX').replace('CD', 'CCCC').replace('CM', 'DCCCC')\n res = 0\n for c in s:\n res += dic[c]\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def romanToInt1(self, s: str) -> int:\n \"\"\"思想:只有六种情况是两数相减,其他都是加上value即可。 注:字符串s[i:i+1]只取一个字符,s[i:i+2]才是取两个字符。\"\"\"\n <|body_0|>\n\n def romanToInt2(self, s: str) -> int:\n \"\"\"取巧法:将减法场景全部替换为加法场景。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n num_list = ['IV', 'IX', 'XL', 'XC', 'CD', 'CM']\n res, i, lens = (0, 0, len(s))\n while i < lens:\n if i == lens - 1:\n res += dic.get(s[i])\n break\n if s[i:i + 2] in num_list:\n temp = dic.get(s[i + 1]) - dic.get(s[i])\n res += temp\n i += 2\n else:\n res += dic.get(s[i])\n i += 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n s = s.replace('IV', 'IIII').replace('IX', 'VIIII').replace('XL', 'XXXX').replace('XC', 'LXXXX').replace('CD', 'CCCC').replace('CM', 'DCCCC')\n res = 0\n for c in s:\n res += dic[c]\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000474", "length_bytes": 1786, "license_type": "no_license", "methods": [{"docstring": "思想:只有六种情况是两数相减,其他都是加上value即可。 注:字符串s[i:i+1]只取一个字符,s[i:i+2]才是取两个字符。", "name": "romanToInt1", "signature": "def romanToInt1(self, s: str) -> int"}, {"docstring": "取巧法:将减法场景全部替换为加法场景。", "name": "romanToInt2", "signature": "def romanToInt2(self, s: str) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_035614", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def romanToInt1(self, s: str) -> int: 思想:只有六种情况是两数相减,其他都是加上value即可。 注:字符串s[i:i+1]只取一个字符,s[i:i+2]才是取两个字符。\n- def romanToInt2(self, s: str) -> int: 取巧法:将减法场景全部替换为加法场景。", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def romanToInt1(self, s: str) -> int: 思想:只有六种情况是两数相减,其他都是加上value即可。 注:字符串s[i:i+1]只取一个字符,s[i:i+2]才是取两个字符。\n- def romanToInt2(self, s: str) -> int: 取巧法:将减法场景全部替换为加法场景。\n\n<|skeleton|>\nclass Solution:\n\n def romanToInt1(self, s: str) -> int:\n \"\"\"思想:只有六种情况是两数相减,其他都是加上value即可。 注:字符串s[i:i+1]只取一个字符,s[i:i+2]才是取两个字符。\"\"\"\n <|body_0|>\n\n def romanToInt2(self, s: str) -> int:\n \"\"\"取巧法:将减法场景全部替换为加法场景。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n num_list = ['IV', 'IX', 'XL', 'XC', 'CD', 'CM']\n res, i, lens = (0, 0, len(s))\n while i < lens:\n if i == lens - 1:\n res += dic.get(s[i])\n break\n if s[i:i + 2] in num_list:\n temp = dic.get(s[i + 1]) - dic.get(s[i])\n res += temp\n i += 2\n else:\n res += dic.get(s[i])\n i += 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n s = s.replace('IV', 'IIII').replace('IX', 'VIIII').replace('XL', 'XXXX').replace('XC', 'LXXXX').replace('CD', 'CCCC').replace('CM', 'DCCCC')\n res = 0\n for c in s:\n res += dic[c]\n return res\n<|end_body_1|>\n", "revision_id": "2bbb1640589aab34f2bc42489283033cc11fb885", "skeleton": "<|skeleton|>\nclass Solution:\n\n def romanToInt1(self, s: str) -> int:\n \"\"\"思想:只有六种情况是两数相减,其他都是加上value即可。 注:字符串s[i:i+1]只取一个字符,s[i:i+2]才是取两个字符。\"\"\"\n <|body_0|>\n\n def romanToInt2(self, s: str) -> int:\n \"\"\"取巧法:将减法场景全部替换为加法场景。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def romanToInt1(self, s: str) -> int:\n \"\"\"思想:只有六种情况是两数相减,其他都是加上value即可。 注:字符串s[i:i+1]只取一个字符,s[i:i+2]才是取两个字符。\"\"\"\n dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n num_list = ['IV', 'IX', 'XL', 'XC', 'CD', 'CM']\n res, i, lens = (0, 0, len(s))\n while i < lens:\n if i == lens - 1:\n res += dic.get(s[i])\n break\n if s[i:i + 2] in num_list:\n temp = dic.get(s[i + 1]) - dic.get(s[i])\n res += temp\n i += 2\n else:\n res += dic.get(s[i])\n i += 1\n return res\n\n def romanToInt2(self, s: str) -> int:\n \"\"\"取巧法:将减法场景全部替换为加法场景。\"\"\"\n dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n s = s.replace('IV', 'IIII').replace('IX', 'VIIII').replace('XL', 'XXXX').replace('XC', 'LXXXX').replace('CD', 'CCCC').replace('CM', 'DCCCC')\n res = 0\n for c in s:\n res += dic[c]\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "013_roman-to-integer.py", "source_repo": "helloocc/algorithm", "split": "val", "star_events_count": 1}
{"blob_id": "3f001039742ae63e79e25bbb0459c1be586de6ba", "bodies": ["self.state_size = state_size\nself.action_size = action_size\nself.action_high = action_high\nself.action_low = action_low\nself.layer_sizes = layer_sizes\nself.batch_norm_options = batch_norm_options\nself.dropout_options = dropout_options\nself.learning_rate = learning_rate\nself.logger = logger\nself.build_model()", "states = K.layers.Input(shape=(self.state_size,), name='states')\nnet = states\nfor layer_count in range(len(self.layer_sizes)):\n net = K.layers.Dense(units=self.layer_sizes[layer_count])(net)\n net = K.layers.Activation('relu')(net)\n if self.batch_norm_options[layer_count]:\n net = K.layers.BatchNormalization()(net)\n net = K.layers.Dropout(self.dropout_options[layer_count])(net)\nactions = K.layers.Dense(units=self.action_size, activation='linear', name='raw_actions')(net)\nself.model = K.models.Model(inputs=states, outputs=actions)\nself.logger.debug('Model Summery:')\nself.model.summary(print_fn=self.logger.debug)\nself.optimizer = K.optimizers.Adam(lr=self.learning_rate)\nself.model.compile(loss='mse', optimizer=self.optimizer)"], "bodies_text": "<|body_start_0|>\n self.state_size = state_size\n self.action_size = action_size\n self.action_high = action_high\n self.action_low = action_low\n self.layer_sizes = layer_sizes\n self.batch_norm_options = batch_norm_options\n self.dropout_options = dropout_options\n self.learning_rate = learning_rate\n self.logger = logger\n self.build_model()\n<|end_body_0|>\n\n<|body_start_1|>\n states = K.layers.Input(shape=(self.state_size,), name='states')\n net = states\n for layer_count in range(len(self.layer_sizes)):\n net = K.layers.Dense(units=self.layer_sizes[layer_count])(net)\n net = K.layers.Activation('relu')(net)\n if self.batch_norm_options[layer_count]:\n net = K.layers.BatchNormalization()(net)\n net = K.layers.Dropout(self.dropout_options[layer_count])(net)\n actions = K.layers.Dense(units=self.action_size, activation='linear', name='raw_actions')(net)\n self.model = K.models.Model(inputs=states, outputs=actions)\n self.logger.debug('Model Summery:')\n self.model.summary(print_fn=self.logger.debug)\n self.optimizer = K.optimizers.Adam(lr=self.learning_rate)\n self.model.compile(loss='mse', optimizer=self.optimizer)\n<|end_body_1|>\n", "class_docstring": "Standard QNetwork implementation : Actor(Policy) Model", "class_name": "DQNetwork", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DQNetwork:\n \"\"\"Standard QNetwork implementation : Actor(Policy) Model\"\"\"\n\n def __init__(self, state_size, action_size, action_high=1.0, action_low=0.0, layer_sizes=(64, 64), batch_norm_options=(True, True), dropout_options=(0, 0), learning_rate=0.0001, logger=None):\n \"\"\"Initialise the Network Model with given number of layes defined with given size. Parameters ========== :param state_size : size of the state space. :type state_size : int :param action_size : size of the action space. :type action_size : int :param action_high : Upper bound of the action space. :type action_high : float :param action_low : Lower bound of the action space. :type action_low : float :param layer_sizes : list of ints defining the size of each layer used in the model :type layer_sizes : list :param batch_norm_options : list of bool defining whether to use Batch Normalisation in layers used in the model. Index of element corresponds to number of layer to set. :type batch_norm_opti\"\"\"\n <|body_0|>\n\n def build_model(self):\n \"\"\"Build an actor (policy) network that maps states -> actions.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.state_size = state_size\n self.action_size = action_size\n self.action_high = action_high\n self.action_low = action_low\n self.layer_sizes = layer_sizes\n self.batch_norm_options = batch_norm_options\n self.dropout_options = dropout_options\n self.learning_rate = learning_rate\n self.logger = logger\n self.build_model()\n<|end_body_0|>\n\n<|body_start_1|>\n states = K.layers.Input(shape=(self.state_size,), name='states')\n net = states\n for layer_count in range(len(self.layer_sizes)):\n net = K.layers.Dense(units=self.layer_sizes[layer_count])(net)\n net = K.layers.Activation('relu')(net)\n if self.batch_norm_options[layer_count]:\n net = K.layers.BatchNormalization()(net)\n net = K.layers.Dropout(self.dropout_options[layer_count])(net)\n actions = K.layers.Dense(units=self.action_size, activation='linear', name='raw_actions')(net)\n self.model = K.models.Model(inputs=states, outputs=actions)\n self.logger.debug('Model Summery:')\n self.model.summary(print_fn=self.logger.debug)\n self.optimizer = K.optimizers.Adam(lr=self.learning_rate)\n self.model.compile(loss='mse', optimizer=self.optimizer)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000475", "length_bytes": 3237, "license_type": "no_license", "methods": [{"docstring": "Initialise the Network Model with given number of layes defined with given size. Parameters ========== :param state_size : size of the state space. :type state_size : int :param action_size : size of the action space. :type action_size : int :param action_high : Upper bound of the action space. :type action_high : float :param action_low : Lower bound of the action space. :type action_low : float :param layer_sizes : list of ints defining the size of each layer used in the model :type layer_sizes : list :param batch_norm_options : list of bool defining whether to use Batch Normalisation in layers used in the model. Index of element corresponds to number of layer to set. :type batch_norm_opti", "name": "__init__", "signature": "def __init__(self, state_size, action_size, action_high=1.0, action_low=0.0, layer_sizes=(64, 64), batch_norm_options=(True, True), dropout_options=(0, 0), learning_rate=0.0001, logger=None)"}, {"docstring": "Build an actor (policy) network that maps states -> actions.", "name": "build_model", "signature": "def build_model(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030735", "prompt": "Implement the Python class `DQNetwork` described below.\n\nClass description:\nStandard QNetwork implementation : Actor(Policy) Model\n\nMethod signatures and docstrings:\n- def __init__(self, state_size, action_size, action_high=1.0, action_low=0.0, layer_sizes=(64, 64), batch_norm_options=(True, True), dropout_options=(0, 0), learning_rate=0.0001, logger=None): Initialise the Network Model with given number of layes defined with given size. Parameters ========== :param state_size : size of the state space. :type state_size : int :param action_size : size of the action space. :type action_size : int :param action_high : Upper bound of the action space. :type action_high : float :param action_low : Lower bound of the action space. :type action_low : float :param layer_sizes : list of ints defining the size of each layer used in the model :type layer_sizes : list :param batch_norm_options : list of bool defining whether to use Batch Normalisation in layers used in the model. Index of element corresponds to number of layer to set. :type batch_norm_opti\n- def build_model(self): Build an actor (policy) network that maps states -> actions.", "prompted_full_text": "Implement the Python class `DQNetwork` described below.\n\nClass description:\nStandard QNetwork implementation : Actor(Policy) Model\n\nMethod signatures and docstrings:\n- def __init__(self, state_size, action_size, action_high=1.0, action_low=0.0, layer_sizes=(64, 64), batch_norm_options=(True, True), dropout_options=(0, 0), learning_rate=0.0001, logger=None): Initialise the Network Model with given number of layes defined with given size. Parameters ========== :param state_size : size of the state space. :type state_size : int :param action_size : size of the action space. :type action_size : int :param action_high : Upper bound of the action space. :type action_high : float :param action_low : Lower bound of the action space. :type action_low : float :param layer_sizes : list of ints defining the size of each layer used in the model :type layer_sizes : list :param batch_norm_options : list of bool defining whether to use Batch Normalisation in layers used in the model. Index of element corresponds to number of layer to set. :type batch_norm_opti\n- def build_model(self): Build an actor (policy) network that maps states -> actions.\n\n<|skeleton|>\nclass DQNetwork:\n \"\"\"Standard QNetwork implementation : Actor(Policy) Model\"\"\"\n\n def __init__(self, state_size, action_size, action_high=1.0, action_low=0.0, layer_sizes=(64, 64), batch_norm_options=(True, True), dropout_options=(0, 0), learning_rate=0.0001, logger=None):\n \"\"\"Initialise the Network Model with given number of layes defined with given size. Parameters ========== :param state_size : size of the state space. :type state_size : int :param action_size : size of the action space. :type action_size : int :param action_high : Upper bound of the action space. :type action_high : float :param action_low : Lower bound of the action space. :type action_low : float :param layer_sizes : list of ints defining the size of each layer used in the model :type layer_sizes : list :param batch_norm_options : list of bool defining whether to use Batch Normalisation in layers used in the model. Index of element corresponds to number of layer to set. :type batch_norm_opti\"\"\"\n <|body_0|>\n\n def build_model(self):\n \"\"\"Build an actor (policy) network that maps states -> actions.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.state_size = state_size\n self.action_size = action_size\n self.action_high = action_high\n self.action_low = action_low\n self.layer_sizes = layer_sizes\n self.batch_norm_options = batch_norm_options\n self.dropout_options = dropout_options\n self.learning_rate = learning_rate\n self.logger = logger\n self.build_model()\n<|end_body_0|>\n\n<|body_start_1|>\n states = K.layers.Input(shape=(self.state_size,), name='states')\n net = states\n for layer_count in range(len(self.layer_sizes)):\n net = K.layers.Dense(units=self.layer_sizes[layer_count])(net)\n net = K.layers.Activation('relu')(net)\n if self.batch_norm_options[layer_count]:\n net = K.layers.BatchNormalization()(net)\n net = K.layers.Dropout(self.dropout_options[layer_count])(net)\n actions = K.layers.Dense(units=self.action_size, activation='linear', name='raw_actions')(net)\n self.model = K.models.Model(inputs=states, outputs=actions)\n self.logger.debug('Model Summery:')\n self.model.summary(print_fn=self.logger.debug)\n self.optimizer = K.optimizers.Adam(lr=self.learning_rate)\n self.model.compile(loss='mse', optimizer=self.optimizer)\n<|end_body_1|>\n", "revision_id": "4f16a275121fc631420eb2a9708eafec5b91d494", "skeleton": "<|skeleton|>\nclass DQNetwork:\n \"\"\"Standard QNetwork implementation : Actor(Policy) Model\"\"\"\n\n def __init__(self, state_size, action_size, action_high=1.0, action_low=0.0, layer_sizes=(64, 64), batch_norm_options=(True, True), dropout_options=(0, 0), learning_rate=0.0001, logger=None):\n \"\"\"Initialise the Network Model with given number of layes defined with given size. Parameters ========== :param state_size : size of the state space. :type state_size : int :param action_size : size of the action space. :type action_size : int :param action_high : Upper bound of the action space. :type action_high : float :param action_low : Lower bound of the action space. :type action_low : float :param layer_sizes : list of ints defining the size of each layer used in the model :type layer_sizes : list :param batch_norm_options : list of bool defining whether to use Batch Normalisation in layers used in the model. Index of element corresponds to number of layer to set. :type batch_norm_opti\"\"\"\n <|body_0|>\n\n def build_model(self):\n \"\"\"Build an actor (policy) network that maps states -> actions.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DQNetwork:\n \"\"\"Standard QNetwork implementation : Actor(Policy) Model\"\"\"\n\n def __init__(self, state_size, action_size, action_high=1.0, action_low=0.0, layer_sizes=(64, 64), batch_norm_options=(True, True), dropout_options=(0, 0), learning_rate=0.0001, logger=None):\n \"\"\"Initialise the Network Model with given number of layes defined with given size. Parameters ========== :param state_size : size of the state space. :type state_size : int :param action_size : size of the action space. :type action_size : int :param action_high : Upper bound of the action space. :type action_high : float :param action_low : Lower bound of the action space. :type action_low : float :param layer_sizes : list of ints defining the size of each layer used in the model :type layer_sizes : list :param batch_norm_options : list of bool defining whether to use Batch Normalisation in layers used in the model. Index of element corresponds to number of layer to set. :type batch_norm_opti\"\"\"\n self.state_size = state_size\n self.action_size = action_size\n self.action_high = action_high\n self.action_low = action_low\n self.layer_sizes = layer_sizes\n self.batch_norm_options = batch_norm_options\n self.dropout_options = dropout_options\n self.learning_rate = learning_rate\n self.logger = logger\n self.build_model()\n\n def build_model(self):\n \"\"\"Build an actor (policy) network that maps states -> actions.\"\"\"\n states = K.layers.Input(shape=(self.state_size,), name='states')\n net = states\n for layer_count in range(len(self.layer_sizes)):\n net = K.layers.Dense(units=self.layer_sizes[layer_count])(net)\n net = K.layers.Activation('relu')(net)\n if self.batch_norm_options[layer_count]:\n net = K.layers.BatchNormalization()(net)\n net = K.layers.Dropout(self.dropout_options[layer_count])(net)\n actions = K.layers.Dense(units=self.action_size, activation='linear', name='raw_actions')(net)\n self.model = K.models.Model(inputs=states, outputs=actions)\n self.logger.debug('Model Summery:')\n self.model.summary(print_fn=self.logger.debug)\n self.optimizer = K.optimizers.Adam(lr=self.learning_rate)\n self.model.compile(loss='mse', optimizer=self.optimizer)\n", "source": "the_stack_v2_python_sparse", "source_path": "DQN-DDQN/Deep-Q-Network/model/DQNetwork.py", "source_repo": "YikangGui/Reinforcement-Learning-Algorithms-Zoo", "split": "val", "star_events_count": 0}
{"blob_id": "709b7a281bac1f3d1f7541e5bddaa9a7e7cf4786", "bodies": ["super(FourierFineCoattention, self).__init__()\nwith self.init_scope():\n self.energy_layer = links.Bilinear(hidden_dim, hidden_dim, 1)\n self.attention_layer_1 = GraphLinear(head, 1, nobias=True)\n self.attention_layer_2 = GraphLinear(head, 1, nobias=True)\n self.lt_layer_1 = GraphLinear(hidden_dim, head, nobias=True)\n self.lt_layer_2 = GraphLinear(hidden_dim, head, nobias=True)\n self.j_layer = GraphLinear(hidden_dim, out_dim)\nself.hidden_dim = hidden_dim\nself.out_dim = out_dim\nself.head = head\nself.activation = activation", "C = self.compute_attention(query=atoms_2, key=atoms_1)\nL_2 = functions.softmax(C, axis=1)\nL_1 = functions.softmax(functions.transpose(C, (0, 2, 1)), axis=1)\nlt_atoms_1 = self.lt_layer_1(atoms_1)\nlt_atoms_2 = self.lt_layer_2(atoms_2)\nlt_atoms_2_C = functions.matmul(L_1, lt_atoms_2)\nH_1 = functions.tanh(functions.add(lt_atoms_1, lt_atoms_2_C))\nlt_atoms_1_C = functions.matmul(L_2, lt_atoms_1)\nH_2 = functions.tanh(functions.add(lt_atoms_2, lt_atoms_1_C))\nattn_1 = functions.softmax(self.attention_layer_1(H_1))\nattn_2 = functions.softmax(self.attention_layer_2(H_2))\ncompact_1 = functions.sum(functions.tile(attn_1, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_1), axis=1)\ncompact_2 = functions.sum(functions.tile(attn_2, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_2), axis=1)\nreturn (compact_1, compact_2)", "energy_layer = self.energy_layer\nmb, N_1, hidden_dim = query.shape\nN_2 = key.shape[1]\nquery_real, query_imag = self.fourier_transform(query)\nkey_real, key_imag = self.fourier_transform(key)\nquery_real = functions.reshape(functions.tile(functions.expand_dims(query_real, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\nquery_imag = functions.reshape(functions.tile(functions.expand_dims(query_imag, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\nkey_real = functions.reshape(functions.tile(functions.expand_dims(key_real, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\nkey_imag = functions.reshape(functions.tile(functions.expand_dims(key_imag, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\nenergy = self.activation(energy_layer(key_real, query_real) + energy_layer(key_imag, query_imag))\nenergy = functions.reshape(energy, (mb, N_1, N_2))\nreturn energy", "x_real = x\nx_imag = chainer.as_variable(self.xp.zeros_like(x_real, dtype=self.xp.float32))\nx_fft_real, x_fft_imag = functions.fft((x_real, x_imag))\nreturn (x_fft_real, x_fft_imag)"], "bodies_text": "<|body_start_0|>\n super(FourierFineCoattention, self).__init__()\n with self.init_scope():\n self.energy_layer = links.Bilinear(hidden_dim, hidden_dim, 1)\n self.attention_layer_1 = GraphLinear(head, 1, nobias=True)\n self.attention_layer_2 = GraphLinear(head, 1, nobias=True)\n self.lt_layer_1 = GraphLinear(hidden_dim, head, nobias=True)\n self.lt_layer_2 = GraphLinear(hidden_dim, head, nobias=True)\n self.j_layer = GraphLinear(hidden_dim, out_dim)\n self.hidden_dim = hidden_dim\n self.out_dim = out_dim\n self.head = head\n self.activation = activation\n<|end_body_0|>\n\n<|body_start_1|>\n C = self.compute_attention(query=atoms_2, key=atoms_1)\n L_2 = functions.softmax(C, axis=1)\n L_1 = functions.softmax(functions.transpose(C, (0, 2, 1)), axis=1)\n lt_atoms_1 = self.lt_layer_1(atoms_1)\n lt_atoms_2 = self.lt_layer_2(atoms_2)\n lt_atoms_2_C = functions.matmul(L_1, lt_atoms_2)\n H_1 = functions.tanh(functions.add(lt_atoms_1, lt_atoms_2_C))\n lt_atoms_1_C = functions.matmul(L_2, lt_atoms_1)\n H_2 = functions.tanh(functions.add(lt_atoms_2, lt_atoms_1_C))\n attn_1 = functions.softmax(self.attention_layer_1(H_1))\n attn_2 = functions.softmax(self.attention_layer_2(H_2))\n compact_1 = functions.sum(functions.tile(attn_1, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_1), axis=1)\n compact_2 = functions.sum(functions.tile(attn_2, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_2), axis=1)\n return (compact_1, compact_2)\n<|end_body_1|>\n\n<|body_start_2|>\n energy_layer = self.energy_layer\n mb, N_1, hidden_dim = query.shape\n N_2 = key.shape[1]\n query_real, query_imag = self.fourier_transform(query)\n key_real, key_imag = self.fourier_transform(key)\n query_real = functions.reshape(functions.tile(functions.expand_dims(query_real, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n query_imag = functions.reshape(functions.tile(functions.expand_dims(query_imag, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n key_real = functions.reshape(functions.tile(functions.expand_dims(key_real, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n key_imag = functions.reshape(functions.tile(functions.expand_dims(key_imag, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n energy = self.activation(energy_layer(key_real, query_real) + energy_layer(key_imag, query_imag))\n energy = functions.reshape(energy, (mb, N_1, N_2))\n return energy\n<|end_body_2|>\n\n<|body_start_3|>\n x_real = x\n x_imag = chainer.as_variable(self.xp.zeros_like(x_real, dtype=self.xp.float32))\n x_fft_real, x_fft_imag = functions.fft((x_real, x_imag))\n return (x_fft_real, x_fft_imag)\n<|end_body_3|>\n", "class_docstring": "TODO", "class_name": "FourierFineCoattention", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FourierFineCoattention:\n \"\"\"TODO\"\"\"\n\n def __init__(self, hidden_dim, out_dim, head, activation=functions.identity):\n \"\"\":param hidden_dim: dimension of atom representation :param out_dim: dimension of molecular representation :param head: number of heads in attention mechanism\"\"\"\n <|body_0|>\n\n def __call__(self, atoms_1, g_1, atoms_2, g_2):\n \"\"\":param atoms_1: atomic representation of molecule 1, with shape of (mb, N_1, hidden_dim) :param g_1: molecular representation of molecule 1, with shape of (mb, out_dim) :param atoms_2: atomic representation of molecule 2, with shape of (mb, N_2, hidden_dim) :param g_2: molecular representation of molecule 2, with shape of (mb, out_dim) :return:\"\"\"\n <|body_1|>\n\n def compute_attention(self, query, key):\n \"\"\":param query: with shape of (mb, N_1, hidden_dim) :param key: with shape of (mb, N_2, hidden_dim) :return: attn: attention weights (mb, N_1, N_2)\"\"\"\n <|body_2|>\n\n def fourier_transform(self, x):\n \"\"\":param x: (mb, N, hidden_dim) :return: tuple of x_fft_real and x_fft_imag, (mb, N, hidden_dim)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FourierFineCoattention, self).__init__()\n with self.init_scope():\n self.energy_layer = links.Bilinear(hidden_dim, hidden_dim, 1)\n self.attention_layer_1 = GraphLinear(head, 1, nobias=True)\n self.attention_layer_2 = GraphLinear(head, 1, nobias=True)\n self.lt_layer_1 = GraphLinear(hidden_dim, head, nobias=True)\n self.lt_layer_2 = GraphLinear(hidden_dim, head, nobias=True)\n self.j_layer = GraphLinear(hidden_dim, out_dim)\n self.hidden_dim = hidden_dim\n self.out_dim = out_dim\n self.head = head\n self.activation = activation\n<|end_body_0|>\n\n<|body_start_1|>\n C = self.compute_attention(query=atoms_2, key=atoms_1)\n L_2 = functions.softmax(C, axis=1)\n L_1 = functions.softmax(functions.transpose(C, (0, 2, 1)), axis=1)\n lt_atoms_1 = self.lt_layer_1(atoms_1)\n lt_atoms_2 = self.lt_layer_2(atoms_2)\n lt_atoms_2_C = functions.matmul(L_1, lt_atoms_2)\n H_1 = functions.tanh(functions.add(lt_atoms_1, lt_atoms_2_C))\n lt_atoms_1_C = functions.matmul(L_2, lt_atoms_1)\n H_2 = functions.tanh(functions.add(lt_atoms_2, lt_atoms_1_C))\n attn_1 = functions.softmax(self.attention_layer_1(H_1))\n attn_2 = functions.softmax(self.attention_layer_2(H_2))\n compact_1 = functions.sum(functions.tile(attn_1, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_1), axis=1)\n compact_2 = functions.sum(functions.tile(attn_2, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_2), axis=1)\n return (compact_1, compact_2)\n<|end_body_1|>\n\n<|body_start_2|>\n energy_layer = self.energy_layer\n mb, N_1, hidden_dim = query.shape\n N_2 = key.shape[1]\n query_real, query_imag = self.fourier_transform(query)\n key_real, key_imag = self.fourier_transform(key)\n query_real = functions.reshape(functions.tile(functions.expand_dims(query_real, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n query_imag = functions.reshape(functions.tile(functions.expand_dims(query_imag, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n key_real = functions.reshape(functions.tile(functions.expand_dims(key_real, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n key_imag = functions.reshape(functions.tile(functions.expand_dims(key_imag, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n energy = self.activation(energy_layer(key_real, query_real) + energy_layer(key_imag, query_imag))\n energy = functions.reshape(energy, (mb, N_1, N_2))\n return energy\n<|end_body_2|>\n\n<|body_start_3|>\n x_real = x\n x_imag = chainer.as_variable(self.xp.zeros_like(x_real, dtype=self.xp.float32))\n x_fft_real, x_fft_imag = functions.fft((x_real, x_imag))\n return (x_fft_real, x_fft_imag)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000476", "length_bytes": 25561, "license_type": "permissive", "methods": [{"docstring": ":param hidden_dim: dimension of atom representation :param out_dim: dimension of molecular representation :param head: number of heads in attention mechanism", "name": "__init__", "signature": "def __init__(self, hidden_dim, out_dim, head, activation=functions.identity)"}, {"docstring": ":param atoms_1: atomic representation of molecule 1, with shape of (mb, N_1, hidden_dim) :param g_1: molecular representation of molecule 1, with shape of (mb, out_dim) :param atoms_2: atomic representation of molecule 2, with shape of (mb, N_2, hidden_dim) :param g_2: molecular representation of molecule 2, with shape of (mb, out_dim) :return:", "name": "__call__", "signature": "def __call__(self, atoms_1, g_1, atoms_2, g_2)"}, {"docstring": ":param query: with shape of (mb, N_1, hidden_dim) :param key: with shape of (mb, N_2, hidden_dim) :return: attn: attention weights (mb, N_1, N_2)", "name": "compute_attention", "signature": "def compute_attention(self, query, key)"}, {"docstring": ":param x: (mb, N, hidden_dim) :return: tuple of x_fft_real and x_fft_imag, (mb, N, hidden_dim)", "name": "fourier_transform", "signature": "def fourier_transform(self, x)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_037450", "prompt": "Implement the Python class `FourierFineCoattention` described below.\n\nClass description:\nTODO\n\nMethod signatures and docstrings:\n- def __init__(self, hidden_dim, out_dim, head, activation=functions.identity): :param hidden_dim: dimension of atom representation :param out_dim: dimension of molecular representation :param head: number of heads in attention mechanism\n- def __call__(self, atoms_1, g_1, atoms_2, g_2): :param atoms_1: atomic representation of molecule 1, with shape of (mb, N_1, hidden_dim) :param g_1: molecular representation of molecule 1, with shape of (mb, out_dim) :param atoms_2: atomic representation of molecule 2, with shape of (mb, N_2, hidden_dim) :param g_2: molecular representation of molecule 2, with shape of (mb, out_dim) :return:\n- def compute_attention(self, query, key): :param query: with shape of (mb, N_1, hidden_dim) :param key: with shape of (mb, N_2, hidden_dim) :return: attn: attention weights (mb, N_1, N_2)\n- def fourier_transform(self, x): :param x: (mb, N, hidden_dim) :return: tuple of x_fft_real and x_fft_imag, (mb, N, hidden_dim)", "prompted_full_text": "Implement the Python class `FourierFineCoattention` described below.\n\nClass description:\nTODO\n\nMethod signatures and docstrings:\n- def __init__(self, hidden_dim, out_dim, head, activation=functions.identity): :param hidden_dim: dimension of atom representation :param out_dim: dimension of molecular representation :param head: number of heads in attention mechanism\n- def __call__(self, atoms_1, g_1, atoms_2, g_2): :param atoms_1: atomic representation of molecule 1, with shape of (mb, N_1, hidden_dim) :param g_1: molecular representation of molecule 1, with shape of (mb, out_dim) :param atoms_2: atomic representation of molecule 2, with shape of (mb, N_2, hidden_dim) :param g_2: molecular representation of molecule 2, with shape of (mb, out_dim) :return:\n- def compute_attention(self, query, key): :param query: with shape of (mb, N_1, hidden_dim) :param key: with shape of (mb, N_2, hidden_dim) :return: attn: attention weights (mb, N_1, N_2)\n- def fourier_transform(self, x): :param x: (mb, N, hidden_dim) :return: tuple of x_fft_real and x_fft_imag, (mb, N, hidden_dim)\n\n<|skeleton|>\nclass FourierFineCoattention:\n \"\"\"TODO\"\"\"\n\n def __init__(self, hidden_dim, out_dim, head, activation=functions.identity):\n \"\"\":param hidden_dim: dimension of atom representation :param out_dim: dimension of molecular representation :param head: number of heads in attention mechanism\"\"\"\n <|body_0|>\n\n def __call__(self, atoms_1, g_1, atoms_2, g_2):\n \"\"\":param atoms_1: atomic representation of molecule 1, with shape of (mb, N_1, hidden_dim) :param g_1: molecular representation of molecule 1, with shape of (mb, out_dim) :param atoms_2: atomic representation of molecule 2, with shape of (mb, N_2, hidden_dim) :param g_2: molecular representation of molecule 2, with shape of (mb, out_dim) :return:\"\"\"\n <|body_1|>\n\n def compute_attention(self, query, key):\n \"\"\":param query: with shape of (mb, N_1, hidden_dim) :param key: with shape of (mb, N_2, hidden_dim) :return: attn: attention weights (mb, N_1, N_2)\"\"\"\n <|body_2|>\n\n def fourier_transform(self, x):\n \"\"\":param x: (mb, N, hidden_dim) :return: tuple of x_fft_real and x_fft_imag, (mb, N, hidden_dim)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FourierFineCoattention, self).__init__()\n with self.init_scope():\n self.energy_layer = links.Bilinear(hidden_dim, hidden_dim, 1)\n self.attention_layer_1 = GraphLinear(head, 1, nobias=True)\n self.attention_layer_2 = GraphLinear(head, 1, nobias=True)\n self.lt_layer_1 = GraphLinear(hidden_dim, head, nobias=True)\n self.lt_layer_2 = GraphLinear(hidden_dim, head, nobias=True)\n self.j_layer = GraphLinear(hidden_dim, out_dim)\n self.hidden_dim = hidden_dim\n self.out_dim = out_dim\n self.head = head\n self.activation = activation\n<|end_body_0|>\n\n<|body_start_1|>\n C = self.compute_attention(query=atoms_2, key=atoms_1)\n L_2 = functions.softmax(C, axis=1)\n L_1 = functions.softmax(functions.transpose(C, (0, 2, 1)), axis=1)\n lt_atoms_1 = self.lt_layer_1(atoms_1)\n lt_atoms_2 = self.lt_layer_2(atoms_2)\n lt_atoms_2_C = functions.matmul(L_1, lt_atoms_2)\n H_1 = functions.tanh(functions.add(lt_atoms_1, lt_atoms_2_C))\n lt_atoms_1_C = functions.matmul(L_2, lt_atoms_1)\n H_2 = functions.tanh(functions.add(lt_atoms_2, lt_atoms_1_C))\n attn_1 = functions.softmax(self.attention_layer_1(H_1))\n attn_2 = functions.softmax(self.attention_layer_2(H_2))\n compact_1 = functions.sum(functions.tile(attn_1, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_1), axis=1)\n compact_2 = functions.sum(functions.tile(attn_2, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_2), axis=1)\n return (compact_1, compact_2)\n<|end_body_1|>\n\n<|body_start_2|>\n energy_layer = self.energy_layer\n mb, N_1, hidden_dim = query.shape\n N_2 = key.shape[1]\n query_real, query_imag = self.fourier_transform(query)\n key_real, key_imag = self.fourier_transform(key)\n query_real = functions.reshape(functions.tile(functions.expand_dims(query_real, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n query_imag = functions.reshape(functions.tile(functions.expand_dims(query_imag, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n key_real = functions.reshape(functions.tile(functions.expand_dims(key_real, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n key_imag = functions.reshape(functions.tile(functions.expand_dims(key_imag, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n energy = self.activation(energy_layer(key_real, query_real) + energy_layer(key_imag, query_imag))\n energy = functions.reshape(energy, (mb, N_1, N_2))\n return energy\n<|end_body_2|>\n\n<|body_start_3|>\n x_real = x\n x_imag = chainer.as_variable(self.xp.zeros_like(x_real, dtype=self.xp.float32))\n x_fft_real, x_fft_imag = functions.fft((x_real, x_imag))\n return (x_fft_real, x_fft_imag)\n<|end_body_3|>\n", "revision_id": "21b64a3c8cc9bc33718ae09c65aa917e575132eb", "skeleton": "<|skeleton|>\nclass FourierFineCoattention:\n \"\"\"TODO\"\"\"\n\n def __init__(self, hidden_dim, out_dim, head, activation=functions.identity):\n \"\"\":param hidden_dim: dimension of atom representation :param out_dim: dimension of molecular representation :param head: number of heads in attention mechanism\"\"\"\n <|body_0|>\n\n def __call__(self, atoms_1, g_1, atoms_2, g_2):\n \"\"\":param atoms_1: atomic representation of molecule 1, with shape of (mb, N_1, hidden_dim) :param g_1: molecular representation of molecule 1, with shape of (mb, out_dim) :param atoms_2: atomic representation of molecule 2, with shape of (mb, N_2, hidden_dim) :param g_2: molecular representation of molecule 2, with shape of (mb, out_dim) :return:\"\"\"\n <|body_1|>\n\n def compute_attention(self, query, key):\n \"\"\":param query: with shape of (mb, N_1, hidden_dim) :param key: with shape of (mb, N_2, hidden_dim) :return: attn: attention weights (mb, N_1, N_2)\"\"\"\n <|body_2|>\n\n def fourier_transform(self, x):\n \"\"\":param x: (mb, N, hidden_dim) :return: tuple of x_fft_real and x_fft_imag, (mb, N, hidden_dim)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FourierFineCoattention:\n \"\"\"TODO\"\"\"\n\n def __init__(self, hidden_dim, out_dim, head, activation=functions.identity):\n \"\"\":param hidden_dim: dimension of atom representation :param out_dim: dimension of molecular representation :param head: number of heads in attention mechanism\"\"\"\n super(FourierFineCoattention, self).__init__()\n with self.init_scope():\n self.energy_layer = links.Bilinear(hidden_dim, hidden_dim, 1)\n self.attention_layer_1 = GraphLinear(head, 1, nobias=True)\n self.attention_layer_2 = GraphLinear(head, 1, nobias=True)\n self.lt_layer_1 = GraphLinear(hidden_dim, head, nobias=True)\n self.lt_layer_2 = GraphLinear(hidden_dim, head, nobias=True)\n self.j_layer = GraphLinear(hidden_dim, out_dim)\n self.hidden_dim = hidden_dim\n self.out_dim = out_dim\n self.head = head\n self.activation = activation\n\n def __call__(self, atoms_1, g_1, atoms_2, g_2):\n \"\"\":param atoms_1: atomic representation of molecule 1, with shape of (mb, N_1, hidden_dim) :param g_1: molecular representation of molecule 1, with shape of (mb, out_dim) :param atoms_2: atomic representation of molecule 2, with shape of (mb, N_2, hidden_dim) :param g_2: molecular representation of molecule 2, with shape of (mb, out_dim) :return:\"\"\"\n C = self.compute_attention(query=atoms_2, key=atoms_1)\n L_2 = functions.softmax(C, axis=1)\n L_1 = functions.softmax(functions.transpose(C, (0, 2, 1)), axis=1)\n lt_atoms_1 = self.lt_layer_1(atoms_1)\n lt_atoms_2 = self.lt_layer_2(atoms_2)\n lt_atoms_2_C = functions.matmul(L_1, lt_atoms_2)\n H_1 = functions.tanh(functions.add(lt_atoms_1, lt_atoms_2_C))\n lt_atoms_1_C = functions.matmul(L_2, lt_atoms_1)\n H_2 = functions.tanh(functions.add(lt_atoms_2, lt_atoms_1_C))\n attn_1 = functions.softmax(self.attention_layer_1(H_1))\n attn_2 = functions.softmax(self.attention_layer_2(H_2))\n compact_1 = functions.sum(functions.tile(attn_1, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_1), axis=1)\n compact_2 = functions.sum(functions.tile(attn_2, reps=(1, 1, self.out_dim)) * self.j_layer(atoms_2), axis=1)\n return (compact_1, compact_2)\n\n def compute_attention(self, query, key):\n \"\"\":param query: with shape of (mb, N_1, hidden_dim) :param key: with shape of (mb, N_2, hidden_dim) :return: attn: attention weights (mb, N_1, N_2)\"\"\"\n energy_layer = self.energy_layer\n mb, N_1, hidden_dim = query.shape\n N_2 = key.shape[1]\n query_real, query_imag = self.fourier_transform(query)\n key_real, key_imag = self.fourier_transform(key)\n query_real = functions.reshape(functions.tile(functions.expand_dims(query_real, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n query_imag = functions.reshape(functions.tile(functions.expand_dims(query_imag, axis=2), reps=(1, 1, N_2, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n key_real = functions.reshape(functions.tile(functions.expand_dims(key_real, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n key_imag = functions.reshape(functions.tile(functions.expand_dims(key_imag, axis=1), reps=(1, N_1, 1, 1)), shape=(mb * N_1 * N_2, hidden_dim))\n energy = self.activation(energy_layer(key_real, query_real) + energy_layer(key_imag, query_imag))\n energy = functions.reshape(energy, (mb, N_1, N_2))\n return energy\n\n def fourier_transform(self, x):\n \"\"\":param x: (mb, N, hidden_dim) :return: tuple of x_fft_real and x_fft_imag, (mb, N, hidden_dim)\"\"\"\n x_real = x\n x_imag = chainer.as_variable(self.xp.zeros_like(x_real, dtype=self.xp.float32))\n x_fft_real, x_fft_imag = functions.fft((x_real, x_imag))\n return (x_fft_real, x_fft_imag)\n", "source": "the_stack_v2_python_sparse", "source_path": "models/coattention/nie_coattention.py", "source_repo": "Minys233/GCN-BMP", "split": "val", "star_events_count": 1}
{"blob_id": "c5990096f3b4ed3aabb1da476f06adc2f1488199", "bodies": ["self.filename = filename\nself.imaname = os.path.basename(filename[:filename.rfind('.')])\nself.imgHDU = self._makeImgHDU(self.filename, self.imaname)", "imgHDU = None\nfitsHDU = pyfits.open(filename, 'update')\nindex = 0\nfor HDU in fitsHDU:\n if HDU.data != None:\n HDU.header['IMANAME'] = imaname\n normData = self._norm_data(HDU.data)\n imgHDU = pyfits.ImageHDU(normData, header=HDU.header, name=imaname)\n print('File %s, ext: %s loaded' % (filename, str(index)))\n break\nfitsHDU.close()\nreturn imgHDU", "cts_sum = Imgdata.sum()\nnormData = Imgdata / cts_sum\nreturn normData"], "bodies_text": "<|body_start_0|>\n self.filename = filename\n self.imaname = os.path.basename(filename[:filename.rfind('.')])\n self.imgHDU = self._makeImgHDU(self.filename, self.imaname)\n<|end_body_0|>\n\n<|body_start_1|>\n imgHDU = None\n fitsHDU = pyfits.open(filename, 'update')\n index = 0\n for HDU in fitsHDU:\n if HDU.data != None:\n HDU.header['IMANAME'] = imaname\n normData = self._norm_data(HDU.data)\n imgHDU = pyfits.ImageHDU(normData, header=HDU.header, name=imaname)\n print('File %s, ext: %s loaded' % (filename, str(index)))\n break\n fitsHDU.close()\n return imgHDU\n<|end_body_1|>\n\n<|body_start_2|>\n cts_sum = Imgdata.sum()\n normData = Imgdata / cts_sum\n return normData\n<|end_body_2|>\n", "class_docstring": "Class for one image template", "class_name": "ArtImage", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ArtImage:\n \"\"\"Class for one image template\"\"\"\n\n def __init__(self, filename):\n \"\"\"Initializer for the class @param filename: name of the spectrum @type filename: string\"\"\"\n <|body_0|>\n\n def _makeImgHDU(self, filename, imaname):\n \"\"\"Extract and return the first non-empty image HDU from the fits The method opens a fits image and goes along its extension. The first extension with a non-zero data part is returned after updating the header with a given image name. @param filename: name of the fits file @type filename: string @param imaname: name of the image @type imaname: string @return: the image hdu @rtype: hdu()\"\"\"\n <|body_1|>\n\n def _norm_data(self, Imgdata):\n \"\"\"Normalize the image data The method normalizes the image data. It uses methods of the data class (numpy or numarray). @param Imgdata: the image data @type Imgdata: @return: the normalized image data @rtype: \"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.filename = filename\n self.imaname = os.path.basename(filename[:filename.rfind('.')])\n self.imgHDU = self._makeImgHDU(self.filename, self.imaname)\n<|end_body_0|>\n\n<|body_start_1|>\n imgHDU = None\n fitsHDU = pyfits.open(filename, 'update')\n index = 0\n for HDU in fitsHDU:\n if HDU.data != None:\n HDU.header['IMANAME'] = imaname\n normData = self._norm_data(HDU.data)\n imgHDU = pyfits.ImageHDU(normData, header=HDU.header, name=imaname)\n print('File %s, ext: %s loaded' % (filename, str(index)))\n break\n fitsHDU.close()\n return imgHDU\n<|end_body_1|>\n\n<|body_start_2|>\n cts_sum = Imgdata.sum()\n normData = Imgdata / cts_sum\n return normData\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000477", "length_bytes": 6339, "license_type": "permissive", "methods": [{"docstring": "Initializer for the class @param filename: name of the spectrum @type filename: string", "name": "__init__", "signature": "def __init__(self, filename)"}, {"docstring": "Extract and return the first non-empty image HDU from the fits The method opens a fits image and goes along its extension. The first extension with a non-zero data part is returned after updating the header with a given image name. @param filename: name of the fits file @type filename: string @param imaname: name of the image @type imaname: string @return: the image hdu @rtype: hdu()", "name": "_makeImgHDU", "signature": "def _makeImgHDU(self, filename, imaname)"}, {"docstring": "Normalize the image data The method normalizes the image data. It uses methods of the data class (numpy or numarray). @param Imgdata: the image data @type Imgdata: @return: the normalized image data @rtype: ", "name": "_norm_data", "signature": "def _norm_data(self, Imgdata)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_010155", "prompt": "Implement the Python class `ArtImage` described below.\n\nClass description:\nClass for one image template\n\nMethod signatures and docstrings:\n- def __init__(self, filename): Initializer for the class @param filename: name of the spectrum @type filename: string\n- def _makeImgHDU(self, filename, imaname): Extract and return the first non-empty image HDU from the fits The method opens a fits image and goes along its extension. The first extension with a non-zero data part is returned after updating the header with a given image name. @param filename: name of the fits file @type filename: string @param imaname: name of the image @type imaname: string @return: the image hdu @rtype: hdu()\n- def _norm_data(self, Imgdata): Normalize the image data The method normalizes the image data. It uses methods of the data class (numpy or numarray). @param Imgdata: the image data @type Imgdata: @return: the normalized image data @rtype: ", "prompted_full_text": "Implement the Python class `ArtImage` described below.\n\nClass description:\nClass for one image template\n\nMethod signatures and docstrings:\n- def __init__(self, filename): Initializer for the class @param filename: name of the spectrum @type filename: string\n- def _makeImgHDU(self, filename, imaname): Extract and return the first non-empty image HDU from the fits The method opens a fits image and goes along its extension. The first extension with a non-zero data part is returned after updating the header with a given image name. @param filename: name of the fits file @type filename: string @param imaname: name of the image @type imaname: string @return: the image hdu @rtype: hdu()\n- def _norm_data(self, Imgdata): Normalize the image data The method normalizes the image data. It uses methods of the data class (numpy or numarray). @param Imgdata: the image data @type Imgdata: @return: the normalized image data @rtype: \n\n<|skeleton|>\nclass ArtImage:\n \"\"\"Class for one image template\"\"\"\n\n def __init__(self, filename):\n \"\"\"Initializer for the class @param filename: name of the spectrum @type filename: string\"\"\"\n <|body_0|>\n\n def _makeImgHDU(self, filename, imaname):\n \"\"\"Extract and return the first non-empty image HDU from the fits The method opens a fits image and goes along its extension. The first extension with a non-zero data part is returned after updating the header with a given image name. @param filename: name of the fits file @type filename: string @param imaname: name of the image @type imaname: string @return: the image hdu @rtype: hdu()\"\"\"\n <|body_1|>\n\n def _norm_data(self, Imgdata):\n \"\"\"Normalize the image data The method normalizes the image data. It uses methods of the data class (numpy or numarray). @param Imgdata: the image data @type Imgdata: @return: the normalized image data @rtype: \"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.filename = filename\n self.imaname = os.path.basename(filename[:filename.rfind('.')])\n self.imgHDU = self._makeImgHDU(self.filename, self.imaname)\n<|end_body_0|>\n\n<|body_start_1|>\n imgHDU = None\n fitsHDU = pyfits.open(filename, 'update')\n index = 0\n for HDU in fitsHDU:\n if HDU.data != None:\n HDU.header['IMANAME'] = imaname\n normData = self._norm_data(HDU.data)\n imgHDU = pyfits.ImageHDU(normData, header=HDU.header, name=imaname)\n print('File %s, ext: %s loaded' % (filename, str(index)))\n break\n fitsHDU.close()\n return imgHDU\n<|end_body_1|>\n\n<|body_start_2|>\n cts_sum = Imgdata.sum()\n normData = Imgdata / cts_sum\n return normData\n<|end_body_2|>\n", "revision_id": "043c173fd5497c18c2b1bfe8bcff65180bca3996", "skeleton": "<|skeleton|>\nclass ArtImage:\n \"\"\"Class for one image template\"\"\"\n\n def __init__(self, filename):\n \"\"\"Initializer for the class @param filename: name of the spectrum @type filename: string\"\"\"\n <|body_0|>\n\n def _makeImgHDU(self, filename, imaname):\n \"\"\"Extract and return the first non-empty image HDU from the fits The method opens a fits image and goes along its extension. The first extension with a non-zero data part is returned after updating the header with a given image name. @param filename: name of the fits file @type filename: string @param imaname: name of the image @type imaname: string @return: the image hdu @rtype: hdu()\"\"\"\n <|body_1|>\n\n def _norm_data(self, Imgdata):\n \"\"\"Normalize the image data The method normalizes the image data. It uses methods of the data class (numpy or numarray). @param Imgdata: the image data @type Imgdata: @return: the normalized image data @rtype: \"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ArtImage:\n \"\"\"Class for one image template\"\"\"\n\n def __init__(self, filename):\n \"\"\"Initializer for the class @param filename: name of the spectrum @type filename: string\"\"\"\n self.filename = filename\n self.imaname = os.path.basename(filename[:filename.rfind('.')])\n self.imgHDU = self._makeImgHDU(self.filename, self.imaname)\n\n def _makeImgHDU(self, filename, imaname):\n \"\"\"Extract and return the first non-empty image HDU from the fits The method opens a fits image and goes along its extension. The first extension with a non-zero data part is returned after updating the header with a given image name. @param filename: name of the fits file @type filename: string @param imaname: name of the image @type imaname: string @return: the image hdu @rtype: hdu()\"\"\"\n imgHDU = None\n fitsHDU = pyfits.open(filename, 'update')\n index = 0\n for HDU in fitsHDU:\n if HDU.data != None:\n HDU.header['IMANAME'] = imaname\n normData = self._norm_data(HDU.data)\n imgHDU = pyfits.ImageHDU(normData, header=HDU.header, name=imaname)\n print('File %s, ext: %s loaded' % (filename, str(index)))\n break\n fitsHDU.close()\n return imgHDU\n\n def _norm_data(self, Imgdata):\n \"\"\"Normalize the image data The method normalizes the image data. It uses methods of the data class (numpy or numarray). @param Imgdata: the image data @type Imgdata: @return: the normalized image data @rtype: \"\"\"\n cts_sum = Imgdata.sum()\n normData = Imgdata / cts_sum\n return normData\n", "source": "the_stack_v2_python_sparse", "source_path": "stsdas/pkg/analysis/slitless/axe/axesrc/templateimages.py", "source_repo": "spacetelescope/stsdas_stripped", "split": "val", "star_events_count": 1}
{"blob_id": "fbfa9b9d67280b16f0fb2f9c791ed05f867d9a2e", "bodies": ["super().__init__(n_in=7, n_out=2)\nself._separate_cls = separate_cls\nself._n_heads = n_heads\nself._dropout = dropout\nself._mode = mode", "location_bias, context_bias, pos_emb, q, k, v, mask = inputs\nd_feature = q.shape[-1]\nn_heads = self._n_heads\nif d_feature % n_heads != 0:\n raise ValueError(f'Dimensionality of feature embedding ({d_feature}) is not a multiple of the requested number of attention heads ({n_heads}).')\nper_head_results, dots = DotProductAttention(SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(q), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(k), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(v), pos_emb.reshape((-1, n_heads, d_feature // n_heads)), context_bias, location_bias, mask, separate_cls=self._separate_cls, dropout=self._dropout, mode=self._mode, rng=self.rng)\nif self._mode == 'viz':\n self.state = dots\nmerged_results = MergeHeads(n_heads, merged_batch_and_head=False).forward(per_head_results)\nreturn (merged_results, mask)"], "bodies_text": "<|body_start_0|>\n super().__init__(n_in=7, n_out=2)\n self._separate_cls = separate_cls\n self._n_heads = n_heads\n self._dropout = dropout\n self._mode = mode\n<|end_body_0|>\n\n<|body_start_1|>\n location_bias, context_bias, pos_emb, q, k, v, mask = inputs\n d_feature = q.shape[-1]\n n_heads = self._n_heads\n if d_feature % n_heads != 0:\n raise ValueError(f'Dimensionality of feature embedding ({d_feature}) is not a multiple of the requested number of attention heads ({n_heads}).')\n per_head_results, dots = DotProductAttention(SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(q), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(k), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(v), pos_emb.reshape((-1, n_heads, d_feature // n_heads)), context_bias, location_bias, mask, separate_cls=self._separate_cls, dropout=self._dropout, mode=self._mode, rng=self.rng)\n if self._mode == 'viz':\n self.state = dots\n merged_results = MergeHeads(n_heads, merged_batch_and_head=False).forward(per_head_results)\n return (merged_results, mask)\n<|end_body_1|>\n", "class_docstring": "Relative attention layer. Layer that maps (location_bias, context_bias, pos_emb, q, k, v, mask) to (activations, mask). This layer type performs the inner workings of one pass of multi-head self-attention. It: - splits queries, keys, and values into multiple 'heads', - splits positional embeddings into multiple 'heads', - computes per-head attention weights from per-head (queries, keys), - applies mask to screen out positions that come from padding tokens, - [in `'train'` mode] applies dropout to attention weights, - uses attention weights to combine per-head values vectors, and - merges per-head results into outgoing activations matching original input activation vector shapes.", "class_name": "RelativeAttention", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RelativeAttention:\n \"\"\"Relative attention layer. Layer that maps (location_bias, context_bias, pos_emb, q, k, v, mask) to (activations, mask). This layer type performs the inner workings of one pass of multi-head self-attention. It: - splits queries, keys, and values into multiple 'heads', - splits positional embeddings into multiple 'heads', - computes per-head attention weights from per-head (queries, keys), - applies mask to screen out positions that come from padding tokens, - [in `'train'` mode] applies dropout to attention weights, - uses attention weights to combine per-head values vectors, and - merges per-head results into outgoing activations matching original input activation vector shapes.\"\"\"\n\n def __init__(self, separate_cls, n_heads=1, dropout=0.0, mode='train'):\n \"\"\"Returns a new PureAttention instance. Args: separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`.\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Returns attention-computed activations and unmodified mask. Args: inputs: A (location_bias, context_bias, pos_emb, q, k, v, mask) tuple.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(n_in=7, n_out=2)\n self._separate_cls = separate_cls\n self._n_heads = n_heads\n self._dropout = dropout\n self._mode = mode\n<|end_body_0|>\n\n<|body_start_1|>\n location_bias, context_bias, pos_emb, q, k, v, mask = inputs\n d_feature = q.shape[-1]\n n_heads = self._n_heads\n if d_feature % n_heads != 0:\n raise ValueError(f'Dimensionality of feature embedding ({d_feature}) is not a multiple of the requested number of attention heads ({n_heads}).')\n per_head_results, dots = DotProductAttention(SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(q), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(k), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(v), pos_emb.reshape((-1, n_heads, d_feature // n_heads)), context_bias, location_bias, mask, separate_cls=self._separate_cls, dropout=self._dropout, mode=self._mode, rng=self.rng)\n if self._mode == 'viz':\n self.state = dots\n merged_results = MergeHeads(n_heads, merged_batch_and_head=False).forward(per_head_results)\n return (merged_results, mask)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000478", "length_bytes": 18993, "license_type": "permissive", "methods": [{"docstring": "Returns a new PureAttention instance. Args: separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`.", "name": "__init__", "signature": "def __init__(self, separate_cls, n_heads=1, dropout=0.0, mode='train')"}, {"docstring": "Returns attention-computed activations and unmodified mask. Args: inputs: A (location_bias, context_bias, pos_emb, q, k, v, mask) tuple.", "name": "forward", "signature": "def forward(self, inputs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002890", "prompt": "Implement the Python class `RelativeAttention` described below.\n\nClass description:\nRelative attention layer. Layer that maps (location_bias, context_bias, pos_emb, q, k, v, mask) to (activations, mask). This layer type performs the inner workings of one pass of multi-head self-attention. It: - splits queries, keys, and values into multiple 'heads', - splits positional embeddings into multiple 'heads', - computes per-head attention weights from per-head (queries, keys), - applies mask to screen out positions that come from padding tokens, - [in `'train'` mode] applies dropout to attention weights, - uses attention weights to combine per-head values vectors, and - merges per-head results into outgoing activations matching original input activation vector shapes.\n\nMethod signatures and docstrings:\n- def __init__(self, separate_cls, n_heads=1, dropout=0.0, mode='train'): Returns a new PureAttention instance. Args: separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`.\n- def forward(self, inputs): Returns attention-computed activations and unmodified mask. Args: inputs: A (location_bias, context_bias, pos_emb, q, k, v, mask) tuple.", "prompted_full_text": "Implement the Python class `RelativeAttention` described below.\n\nClass description:\nRelative attention layer. Layer that maps (location_bias, context_bias, pos_emb, q, k, v, mask) to (activations, mask). This layer type performs the inner workings of one pass of multi-head self-attention. It: - splits queries, keys, and values into multiple 'heads', - splits positional embeddings into multiple 'heads', - computes per-head attention weights from per-head (queries, keys), - applies mask to screen out positions that come from padding tokens, - [in `'train'` mode] applies dropout to attention weights, - uses attention weights to combine per-head values vectors, and - merges per-head results into outgoing activations matching original input activation vector shapes.\n\nMethod signatures and docstrings:\n- def __init__(self, separate_cls, n_heads=1, dropout=0.0, mode='train'): Returns a new PureAttention instance. Args: separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`.\n- def forward(self, inputs): Returns attention-computed activations and unmodified mask. Args: inputs: A (location_bias, context_bias, pos_emb, q, k, v, mask) tuple.\n\n<|skeleton|>\nclass RelativeAttention:\n \"\"\"Relative attention layer. Layer that maps (location_bias, context_bias, pos_emb, q, k, v, mask) to (activations, mask). This layer type performs the inner workings of one pass of multi-head self-attention. It: - splits queries, keys, and values into multiple 'heads', - splits positional embeddings into multiple 'heads', - computes per-head attention weights from per-head (queries, keys), - applies mask to screen out positions that come from padding tokens, - [in `'train'` mode] applies dropout to attention weights, - uses attention weights to combine per-head values vectors, and - merges per-head results into outgoing activations matching original input activation vector shapes.\"\"\"\n\n def __init__(self, separate_cls, n_heads=1, dropout=0.0, mode='train'):\n \"\"\"Returns a new PureAttention instance. Args: separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`.\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Returns attention-computed activations and unmodified mask. Args: inputs: A (location_bias, context_bias, pos_emb, q, k, v, mask) tuple.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(n_in=7, n_out=2)\n self._separate_cls = separate_cls\n self._n_heads = n_heads\n self._dropout = dropout\n self._mode = mode\n<|end_body_0|>\n\n<|body_start_1|>\n location_bias, context_bias, pos_emb, q, k, v, mask = inputs\n d_feature = q.shape[-1]\n n_heads = self._n_heads\n if d_feature % n_heads != 0:\n raise ValueError(f'Dimensionality of feature embedding ({d_feature}) is not a multiple of the requested number of attention heads ({n_heads}).')\n per_head_results, dots = DotProductAttention(SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(q), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(k), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(v), pos_emb.reshape((-1, n_heads, d_feature // n_heads)), context_bias, location_bias, mask, separate_cls=self._separate_cls, dropout=self._dropout, mode=self._mode, rng=self.rng)\n if self._mode == 'viz':\n self.state = dots\n merged_results = MergeHeads(n_heads, merged_batch_and_head=False).forward(per_head_results)\n return (merged_results, mask)\n<|end_body_1|>\n", "revision_id": "1bb3b89427f669f2f0ec84633952e21b68964a23", "skeleton": "<|skeleton|>\nclass RelativeAttention:\n \"\"\"Relative attention layer. Layer that maps (location_bias, context_bias, pos_emb, q, k, v, mask) to (activations, mask). This layer type performs the inner workings of one pass of multi-head self-attention. It: - splits queries, keys, and values into multiple 'heads', - splits positional embeddings into multiple 'heads', - computes per-head attention weights from per-head (queries, keys), - applies mask to screen out positions that come from padding tokens, - [in `'train'` mode] applies dropout to attention weights, - uses attention weights to combine per-head values vectors, and - merges per-head results into outgoing activations matching original input activation vector shapes.\"\"\"\n\n def __init__(self, separate_cls, n_heads=1, dropout=0.0, mode='train'):\n \"\"\"Returns a new PureAttention instance. Args: separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`.\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Returns attention-computed activations and unmodified mask. Args: inputs: A (location_bias, context_bias, pos_emb, q, k, v, mask) tuple.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RelativeAttention:\n \"\"\"Relative attention layer. Layer that maps (location_bias, context_bias, pos_emb, q, k, v, mask) to (activations, mask). This layer type performs the inner workings of one pass of multi-head self-attention. It: - splits queries, keys, and values into multiple 'heads', - splits positional embeddings into multiple 'heads', - computes per-head attention weights from per-head (queries, keys), - applies mask to screen out positions that come from padding tokens, - [in `'train'` mode] applies dropout to attention weights, - uses attention weights to combine per-head values vectors, and - merges per-head results into outgoing activations matching original input activation vector shapes.\"\"\"\n\n def __init__(self, separate_cls, n_heads=1, dropout=0.0, mode='train'):\n \"\"\"Returns a new PureAttention instance. Args: separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`.\"\"\"\n super().__init__(n_in=7, n_out=2)\n self._separate_cls = separate_cls\n self._n_heads = n_heads\n self._dropout = dropout\n self._mode = mode\n\n def forward(self, inputs):\n \"\"\"Returns attention-computed activations and unmodified mask. Args: inputs: A (location_bias, context_bias, pos_emb, q, k, v, mask) tuple.\"\"\"\n location_bias, context_bias, pos_emb, q, k, v, mask = inputs\n d_feature = q.shape[-1]\n n_heads = self._n_heads\n if d_feature % n_heads != 0:\n raise ValueError(f'Dimensionality of feature embedding ({d_feature}) is not a multiple of the requested number of attention heads ({n_heads}).')\n per_head_results, dots = DotProductAttention(SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(q), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(k), SplitIntoHeads(n_heads, merged_batch_and_head=False).forward(v), pos_emb.reshape((-1, n_heads, d_feature // n_heads)), context_bias, location_bias, mask, separate_cls=self._separate_cls, dropout=self._dropout, mode=self._mode, rng=self.rng)\n if self._mode == 'viz':\n self.state = dots\n merged_results = MergeHeads(n_heads, merged_batch_and_head=False).forward(per_head_results)\n return (merged_results, mask)\n", "source": "the_stack_v2_python_sparse", "source_path": "trax/layers/research/rel_attention.py", "source_repo": "google/trax", "split": "val", "star_events_count": 8180}
{"blob_id": "e4bfe853ccc103948b8889c8037e0efe07706e26", "bodies": ["bd = MySQLConnector.MySQLConnector()\nconsulta = ('SELECT IdTag FROM Tag_Script WHERE IdScript=%s', (p_id_script,))\nrespuesta_bd = bd.execute(consulta)\nreturn respuesta_bd", "bd = MySQLConnector.MySQLConnector()\nconsulta = ('SELECT IdScript FROM Tag_Script WHERE IdTag=%s', (p_id_tag,))\nrespuesta_bd = bd.execute(consulta)\nreturn respuesta_bd", "bd = MySQLConnector.MySQLConnector()\nconsulta = ('DELETE FROM Tag WHERE IdTag=%s', (p_id_tag,))\nrespuesta_bd = bd.execute(consulta)\nreturn respuesta_bd"], "bodies_text": "<|body_start_0|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('SELECT IdTag FROM Tag_Script WHERE IdScript=%s', (p_id_script,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_0|>\n\n<|body_start_1|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('SELECT IdScript FROM Tag_Script WHERE IdTag=%s', (p_id_tag,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_1|>\n\n<|body_start_2|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('DELETE FROM Tag WHERE IdTag=%s', (p_id_tag,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_2|>\n", "class_docstring": "", "class_name": "GestorTag", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GestorTag:\n\n def obtener_tags_script(self, p_id_script):\n \"\"\"Dado el identificador de un Script. Obtenemos los Tags asociados al mismo :param p_id_script: El identificador del script :return:\"\"\"\n <|body_0|>\n\n def obtener_scripts_del_tag(self, p_id_tag):\n \"\"\"Dado el identificador de un Tag. Obtenemos la lista de Scripts asociada al mismo :param p_id_tag: :return:\"\"\"\n <|body_1|>\n\n def borrar_tag(self, p_id_tag):\n \"\"\"Dado el identificador de un TAg. Borra del sistema el TAg :param p_id_tag: El identificador de un TAg :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('SELECT IdTag FROM Tag_Script WHERE IdScript=%s', (p_id_script,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_0|>\n\n<|body_start_1|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('SELECT IdScript FROM Tag_Script WHERE IdTag=%s', (p_id_tag,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_1|>\n\n<|body_start_2|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('DELETE FROM Tag WHERE IdTag=%s', (p_id_tag,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000479", "length_bytes": 1795, "license_type": "no_license", "methods": [{"docstring": "Dado el identificador de un Script. Obtenemos los Tags asociados al mismo :param p_id_script: El identificador del script :return:", "name": "obtener_tags_script", "signature": "def obtener_tags_script(self, p_id_script)"}, {"docstring": "Dado el identificador de un Tag. Obtenemos la lista de Scripts asociada al mismo :param p_id_tag: :return:", "name": "obtener_scripts_del_tag", "signature": "def obtener_scripts_del_tag(self, p_id_tag)"}, {"docstring": "Dado el identificador de un TAg. Borra del sistema el TAg :param p_id_tag: El identificador de un TAg :return:", "name": "borrar_tag", "signature": "def borrar_tag(self, p_id_tag)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_035992", "prompt": "Implement the Python class `GestorTag` described below.\n\nClass description:\nImplement the GestorTag class.\n\nMethod signatures and docstrings:\n- def obtener_tags_script(self, p_id_script): Dado el identificador de un Script. Obtenemos los Tags asociados al mismo :param p_id_script: El identificador del script :return:\n- def obtener_scripts_del_tag(self, p_id_tag): Dado el identificador de un Tag. Obtenemos la lista de Scripts asociada al mismo :param p_id_tag: :return:\n- def borrar_tag(self, p_id_tag): Dado el identificador de un TAg. Borra del sistema el TAg :param p_id_tag: El identificador de un TAg :return:", "prompted_full_text": "Implement the Python class `GestorTag` described below.\n\nClass description:\nImplement the GestorTag class.\n\nMethod signatures and docstrings:\n- def obtener_tags_script(self, p_id_script): Dado el identificador de un Script. Obtenemos los Tags asociados al mismo :param p_id_script: El identificador del script :return:\n- def obtener_scripts_del_tag(self, p_id_tag): Dado el identificador de un Tag. Obtenemos la lista de Scripts asociada al mismo :param p_id_tag: :return:\n- def borrar_tag(self, p_id_tag): Dado el identificador de un TAg. Borra del sistema el TAg :param p_id_tag: El identificador de un TAg :return:\n\n<|skeleton|>\nclass GestorTag:\n\n def obtener_tags_script(self, p_id_script):\n \"\"\"Dado el identificador de un Script. Obtenemos los Tags asociados al mismo :param p_id_script: El identificador del script :return:\"\"\"\n <|body_0|>\n\n def obtener_scripts_del_tag(self, p_id_tag):\n \"\"\"Dado el identificador de un Tag. Obtenemos la lista de Scripts asociada al mismo :param p_id_tag: :return:\"\"\"\n <|body_1|>\n\n def borrar_tag(self, p_id_tag):\n \"\"\"Dado el identificador de un TAg. Borra del sistema el TAg :param p_id_tag: El identificador de un TAg :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('SELECT IdTag FROM Tag_Script WHERE IdScript=%s', (p_id_script,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_0|>\n\n<|body_start_1|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('SELECT IdScript FROM Tag_Script WHERE IdTag=%s', (p_id_tag,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_1|>\n\n<|body_start_2|>\n bd = MySQLConnector.MySQLConnector()\n consulta = ('DELETE FROM Tag WHERE IdTag=%s', (p_id_tag,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n<|end_body_2|>\n", "revision_id": "7fa252a193b934fd192763b6168bb48eb4542aed", "skeleton": "<|skeleton|>\nclass GestorTag:\n\n def obtener_tags_script(self, p_id_script):\n \"\"\"Dado el identificador de un Script. Obtenemos los Tags asociados al mismo :param p_id_script: El identificador del script :return:\"\"\"\n <|body_0|>\n\n def obtener_scripts_del_tag(self, p_id_tag):\n \"\"\"Dado el identificador de un Tag. Obtenemos la lista de Scripts asociada al mismo :param p_id_tag: :return:\"\"\"\n <|body_1|>\n\n def borrar_tag(self, p_id_tag):\n \"\"\"Dado el identificador de un TAg. Borra del sistema el TAg :param p_id_tag: El identificador de un TAg :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GestorTag:\n def obtener_tags_script(self, p_id_script):\n \"\"\"Dado el identificador de un Script. Obtenemos los Tags asociados al mismo :param p_id_script: El identificador del script :return:\"\"\"\n bd = MySQLConnector.MySQLConnector()\n consulta = ('SELECT IdTag FROM Tag_Script WHERE IdScript=%s', (p_id_script,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n\n def obtener_scripts_del_tag(self, p_id_tag):\n \"\"\"Dado el identificador de un Tag. Obtenemos la lista de Scripts asociada al mismo :param p_id_tag: :return:\"\"\"\n bd = MySQLConnector.MySQLConnector()\n consulta = ('SELECT IdScript FROM Tag_Script WHERE IdTag=%s', (p_id_tag,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n\n def borrar_tag(self, p_id_tag):\n \"\"\"Dado el identificador de un TAg. Borra del sistema el TAg :param p_id_tag: El identificador de un TAg :return:\"\"\"\n bd = MySQLConnector.MySQLConnector()\n consulta = ('DELETE FROM Tag WHERE IdTag=%s', (p_id_tag,))\n respuesta_bd = bd.execute(consulta)\n return respuesta_bd\n", "source": "the_stack_v2_python_sparse", "source_path": "src/packControladoras/GestorTag.py", "source_repo": "rubenmulero/Akeko_Admin", "split": "val", "star_events_count": 0}
{"blob_id": "934cc4adf4784cdbedeaa5bc022b4ec99423d927", "bodies": ["if project == 'CMIP6':\n required = [{'short_name': 'o3'}, {'short_name': 'ps'}]\nelse:\n required = [{'short_name': 'tro3'}, {'short_name': 'ps'}]\nreturn required", "tro3_cube = cubes.extract_cube(iris.Constraint(name='mole_fraction_of_ozone_in_air'))\nps_cube = cubes.extract_cube(iris.Constraint(name='surface_air_pressure'))\np_layer_widths = pressure_level_widths(tro3_cube, ps_cube, top_limit=0.0)\ntoz_cube = tro3_cube * p_layer_widths / STANDARD_GRAVITY * MW_O3 / MW_AIR\ntoz_cube = toz_cube.collapsed('air_pressure', iris.analysis.SUM)\ntoz_cube.units = tro3_cube.units * p_layer_widths.units / STANDARD_GRAVITY_UNIT * MW_O3_UNIT / MW_AIR_UNIT\ntoz_cube = toz_cube / MW_O3 * AVOGADRO_CONST\ntoz_cube.units = toz_cube.units / MW_O3_UNIT * AVOGADRO_CONST_UNIT\ntoz_cube.convert_units(DOBSON_UNIT)\ntoz_cube.units = 'DU'\nreturn toz_cube"], "bodies_text": "<|body_start_0|>\n if project == 'CMIP6':\n required = [{'short_name': 'o3'}, {'short_name': 'ps'}]\n else:\n required = [{'short_name': 'tro3'}, {'short_name': 'ps'}]\n return required\n<|end_body_0|>\n\n<|body_start_1|>\n tro3_cube = cubes.extract_cube(iris.Constraint(name='mole_fraction_of_ozone_in_air'))\n ps_cube = cubes.extract_cube(iris.Constraint(name='surface_air_pressure'))\n p_layer_widths = pressure_level_widths(tro3_cube, ps_cube, top_limit=0.0)\n toz_cube = tro3_cube * p_layer_widths / STANDARD_GRAVITY * MW_O3 / MW_AIR\n toz_cube = toz_cube.collapsed('air_pressure', iris.analysis.SUM)\n toz_cube.units = tro3_cube.units * p_layer_widths.units / STANDARD_GRAVITY_UNIT * MW_O3_UNIT / MW_AIR_UNIT\n toz_cube = toz_cube / MW_O3 * AVOGADRO_CONST\n toz_cube.units = toz_cube.units / MW_O3_UNIT * AVOGADRO_CONST_UNIT\n toz_cube.convert_units(DOBSON_UNIT)\n toz_cube.units = 'DU'\n return toz_cube\n<|end_body_1|>\n", "class_docstring": "Derivation of variable `toz`.", "class_name": "DerivedVariable", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DerivedVariable:\n \"\"\"Derivation of variable `toz`.\"\"\"\n\n def required(project):\n \"\"\"Declare the variables needed for derivation.\"\"\"\n <|body_0|>\n\n def calculate(cubes):\n \"\"\"Compute total column ozone. Note ---- The surface pressure is used as a lower integration bound. A fixed upper integration bound of 0 Pa is used.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if project == 'CMIP6':\n required = [{'short_name': 'o3'}, {'short_name': 'ps'}]\n else:\n required = [{'short_name': 'tro3'}, {'short_name': 'ps'}]\n return required\n<|end_body_0|>\n\n<|body_start_1|>\n tro3_cube = cubes.extract_cube(iris.Constraint(name='mole_fraction_of_ozone_in_air'))\n ps_cube = cubes.extract_cube(iris.Constraint(name='surface_air_pressure'))\n p_layer_widths = pressure_level_widths(tro3_cube, ps_cube, top_limit=0.0)\n toz_cube = tro3_cube * p_layer_widths / STANDARD_GRAVITY * MW_O3 / MW_AIR\n toz_cube = toz_cube.collapsed('air_pressure', iris.analysis.SUM)\n toz_cube.units = tro3_cube.units * p_layer_widths.units / STANDARD_GRAVITY_UNIT * MW_O3_UNIT / MW_AIR_UNIT\n toz_cube = toz_cube / MW_O3 * AVOGADRO_CONST\n toz_cube.units = toz_cube.units / MW_O3_UNIT * AVOGADRO_CONST_UNIT\n toz_cube.convert_units(DOBSON_UNIT)\n toz_cube.units = 'DU'\n return toz_cube\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000480", "length_bytes": 2234, "license_type": "permissive", "methods": [{"docstring": "Declare the variables needed for derivation.", "name": "required", "signature": "def required(project)"}, {"docstring": "Compute total column ozone. Note ---- The surface pressure is used as a lower integration bound. A fixed upper integration bound of 0 Pa is used.", "name": "calculate", "signature": "def calculate(cubes)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031358", "prompt": "Implement the Python class `DerivedVariable` described below.\n\nClass description:\nDerivation of variable `toz`.\n\nMethod signatures and docstrings:\n- def required(project): Declare the variables needed for derivation.\n- def calculate(cubes): Compute total column ozone. Note ---- The surface pressure is used as a lower integration bound. A fixed upper integration bound of 0 Pa is used.", "prompted_full_text": "Implement the Python class `DerivedVariable` described below.\n\nClass description:\nDerivation of variable `toz`.\n\nMethod signatures and docstrings:\n- def required(project): Declare the variables needed for derivation.\n- def calculate(cubes): Compute total column ozone. Note ---- The surface pressure is used as a lower integration bound. A fixed upper integration bound of 0 Pa is used.\n\n<|skeleton|>\nclass DerivedVariable:\n \"\"\"Derivation of variable `toz`.\"\"\"\n\n def required(project):\n \"\"\"Declare the variables needed for derivation.\"\"\"\n <|body_0|>\n\n def calculate(cubes):\n \"\"\"Compute total column ozone. Note ---- The surface pressure is used as a lower integration bound. A fixed upper integration bound of 0 Pa is used.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if project == 'CMIP6':\n required = [{'short_name': 'o3'}, {'short_name': 'ps'}]\n else:\n required = [{'short_name': 'tro3'}, {'short_name': 'ps'}]\n return required\n<|end_body_0|>\n\n<|body_start_1|>\n tro3_cube = cubes.extract_cube(iris.Constraint(name='mole_fraction_of_ozone_in_air'))\n ps_cube = cubes.extract_cube(iris.Constraint(name='surface_air_pressure'))\n p_layer_widths = pressure_level_widths(tro3_cube, ps_cube, top_limit=0.0)\n toz_cube = tro3_cube * p_layer_widths / STANDARD_GRAVITY * MW_O3 / MW_AIR\n toz_cube = toz_cube.collapsed('air_pressure', iris.analysis.SUM)\n toz_cube.units = tro3_cube.units * p_layer_widths.units / STANDARD_GRAVITY_UNIT * MW_O3_UNIT / MW_AIR_UNIT\n toz_cube = toz_cube / MW_O3 * AVOGADRO_CONST\n toz_cube.units = toz_cube.units / MW_O3_UNIT * AVOGADRO_CONST_UNIT\n toz_cube.convert_units(DOBSON_UNIT)\n toz_cube.units = 'DU'\n return toz_cube\n<|end_body_1|>\n", "revision_id": "d5187438fea2928644cb53ecb26c6adb1e4cc947", "skeleton": "<|skeleton|>\nclass DerivedVariable:\n \"\"\"Derivation of variable `toz`.\"\"\"\n\n def required(project):\n \"\"\"Declare the variables needed for derivation.\"\"\"\n <|body_0|>\n\n def calculate(cubes):\n \"\"\"Compute total column ozone. Note ---- The surface pressure is used as a lower integration bound. A fixed upper integration bound of 0 Pa is used.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DerivedVariable:\n \"\"\"Derivation of variable `toz`.\"\"\"\n\n def required(project):\n \"\"\"Declare the variables needed for derivation.\"\"\"\n if project == 'CMIP6':\n required = [{'short_name': 'o3'}, {'short_name': 'ps'}]\n else:\n required = [{'short_name': 'tro3'}, {'short_name': 'ps'}]\n return required\n\n def calculate(cubes):\n \"\"\"Compute total column ozone. Note ---- The surface pressure is used as a lower integration bound. A fixed upper integration bound of 0 Pa is used.\"\"\"\n tro3_cube = cubes.extract_cube(iris.Constraint(name='mole_fraction_of_ozone_in_air'))\n ps_cube = cubes.extract_cube(iris.Constraint(name='surface_air_pressure'))\n p_layer_widths = pressure_level_widths(tro3_cube, ps_cube, top_limit=0.0)\n toz_cube = tro3_cube * p_layer_widths / STANDARD_GRAVITY * MW_O3 / MW_AIR\n toz_cube = toz_cube.collapsed('air_pressure', iris.analysis.SUM)\n toz_cube.units = tro3_cube.units * p_layer_widths.units / STANDARD_GRAVITY_UNIT * MW_O3_UNIT / MW_AIR_UNIT\n toz_cube = toz_cube / MW_O3 * AVOGADRO_CONST\n toz_cube.units = toz_cube.units / MW_O3_UNIT * AVOGADRO_CONST_UNIT\n toz_cube.convert_units(DOBSON_UNIT)\n toz_cube.units = 'DU'\n return toz_cube\n", "source": "the_stack_v2_python_sparse", "source_path": "esmvalcore/preprocessor/_derive/toz.py", "source_repo": "ESMValGroup/ESMValCore", "split": "val", "star_events_count": 41}
{"blob_id": "433bda39a1ee43e889f54d5ac8a1c57d5dc1abe2", "bodies": ["super(WinRssi, self).__init__()\nself.connection = connection\nself._expression = None\nself.key = 'rssi'\nreturn", "if self._expression is None:\n self._expression = re.compile(oatbran.NAMED(n=self.key, e='-' + oatbran.INTEGER))\nreturn self._expression", "self.logger.debug('Validating: {0}'.format(line))\nif 'Unable to find the wireless interface.' in line:\n raise WinRssiError(line)\nif 'The group or resource is not in the correct state' in line:\n raise WinRssiError(line)\nreturn", "output = self.connection.wifi('rssi')\nfor line in output.output:\n match = self.expression.search(line)\n if match:\n self.logger.debug('Matched: {0}'.format(line))\n return match.groupdict()[self.key]\n self.validate(line)\nreturn NA"], "bodies_text": "<|body_start_0|>\n super(WinRssi, self).__init__()\n self.connection = connection\n self._expression = None\n self.key = 'rssi'\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self._expression is None:\n self._expression = re.compile(oatbran.NAMED(n=self.key, e='-' + oatbran.INTEGER))\n return self._expression\n<|end_body_1|>\n\n<|body_start_2|>\n self.logger.debug('Validating: {0}'.format(line))\n if 'Unable to find the wireless interface.' in line:\n raise WinRssiError(line)\n if 'The group or resource is not in the correct state' in line:\n raise WinRssiError(line)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n output = self.connection.wifi('rssi')\n for line in output.output:\n match = self.expression.search(line)\n if match:\n self.logger.debug('Matched: {0}'.format(line))\n return match.groupdict()[self.key]\n self.validate(line)\n return NA\n<|end_body_3|>\n", "class_docstring": "A class to get the rssi via an installed version of miller's rssi puller.", "class_name": "WinRssi", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WinRssi:\n \"\"\"A class to get the rssi via an installed version of miller's rssi puller.\"\"\"\n\n def __init__(self, connection):\n \"\"\":param: - `connection`: a connection to the windows device\"\"\"\n <|body_0|>\n\n def expression(self):\n \"\"\":return: compiled regular expression to match valid rssi output\"\"\"\n <|body_1|>\n\n def validate(self, line):\n \"\"\":parameter: - `line`: a line of output\"\"\"\n <|body_2|>\n\n def __call__(self):\n \"\"\":return: the rssi value :raises: CommandError if the rssi can't be retrieved.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(WinRssi, self).__init__()\n self.connection = connection\n self._expression = None\n self.key = 'rssi'\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self._expression is None:\n self._expression = re.compile(oatbran.NAMED(n=self.key, e='-' + oatbran.INTEGER))\n return self._expression\n<|end_body_1|>\n\n<|body_start_2|>\n self.logger.debug('Validating: {0}'.format(line))\n if 'Unable to find the wireless interface.' in line:\n raise WinRssiError(line)\n if 'The group or resource is not in the correct state' in line:\n raise WinRssiError(line)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n output = self.connection.wifi('rssi')\n for line in output.output:\n match = self.expression.search(line)\n if match:\n self.logger.debug('Matched: {0}'.format(line))\n return match.groupdict()[self.key]\n self.validate(line)\n return NA\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000481", "length_bytes": 1925, "license_type": "permissive", "methods": [{"docstring": ":param: - `connection`: a connection to the windows device", "name": "__init__", "signature": "def __init__(self, connection)"}, {"docstring": ":return: compiled regular expression to match valid rssi output", "name": "expression", "signature": "def expression(self)"}, {"docstring": ":parameter: - `line`: a line of output", "name": "validate", "signature": "def validate(self, line)"}, {"docstring": ":return: the rssi value :raises: CommandError if the rssi can't be retrieved.", "name": "__call__", "signature": "def __call__(self)"}], "n_methods": 4, "prompt": "Implement the Python class `WinRssi` described below.\n\nClass description:\nA class to get the rssi via an installed version of miller's rssi puller.\n\nMethod signatures and docstrings:\n- def __init__(self, connection): :param: - `connection`: a connection to the windows device\n- def expression(self): :return: compiled regular expression to match valid rssi output\n- def validate(self, line): :parameter: - `line`: a line of output\n- def __call__(self): :return: the rssi value :raises: CommandError if the rssi can't be retrieved.", "prompted_full_text": "Implement the Python class `WinRssi` described below.\n\nClass description:\nA class to get the rssi via an installed version of miller's rssi puller.\n\nMethod signatures and docstrings:\n- def __init__(self, connection): :param: - `connection`: a connection to the windows device\n- def expression(self): :return: compiled regular expression to match valid rssi output\n- def validate(self, line): :parameter: - `line`: a line of output\n- def __call__(self): :return: the rssi value :raises: CommandError if the rssi can't be retrieved.\n\n<|skeleton|>\nclass WinRssi:\n \"\"\"A class to get the rssi via an installed version of miller's rssi puller.\"\"\"\n\n def __init__(self, connection):\n \"\"\":param: - `connection`: a connection to the windows device\"\"\"\n <|body_0|>\n\n def expression(self):\n \"\"\":return: compiled regular expression to match valid rssi output\"\"\"\n <|body_1|>\n\n def validate(self, line):\n \"\"\":parameter: - `line`: a line of output\"\"\"\n <|body_2|>\n\n def __call__(self):\n \"\"\":return: the rssi value :raises: CommandError if the rssi can't be retrieved.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(WinRssi, self).__init__()\n self.connection = connection\n self._expression = None\n self.key = 'rssi'\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self._expression is None:\n self._expression = re.compile(oatbran.NAMED(n=self.key, e='-' + oatbran.INTEGER))\n return self._expression\n<|end_body_1|>\n\n<|body_start_2|>\n self.logger.debug('Validating: {0}'.format(line))\n if 'Unable to find the wireless interface.' in line:\n raise WinRssiError(line)\n if 'The group or resource is not in the correct state' in line:\n raise WinRssiError(line)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n output = self.connection.wifi('rssi')\n for line in output.output:\n match = self.expression.search(line)\n if match:\n self.logger.debug('Matched: {0}'.format(line))\n return match.groupdict()[self.key]\n self.validate(line)\n return NA\n<|end_body_3|>\n", "revision_id": "b4d1c77e1d611fe2b30768b42bdc7493afb0ea95", "skeleton": "<|skeleton|>\nclass WinRssi:\n \"\"\"A class to get the rssi via an installed version of miller's rssi puller.\"\"\"\n\n def __init__(self, connection):\n \"\"\":param: - `connection`: a connection to the windows device\"\"\"\n <|body_0|>\n\n def expression(self):\n \"\"\":return: compiled regular expression to match valid rssi output\"\"\"\n <|body_1|>\n\n def validate(self, line):\n \"\"\":parameter: - `line`: a line of output\"\"\"\n <|body_2|>\n\n def __call__(self):\n \"\"\":return: the rssi value :raises: CommandError if the rssi can't be retrieved.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WinRssi:\n \"\"\"A class to get the rssi via an installed version of miller's rssi puller.\"\"\"\n\n def __init__(self, connection):\n \"\"\":param: - `connection`: a connection to the windows device\"\"\"\n super(WinRssi, self).__init__()\n self.connection = connection\n self._expression = None\n self.key = 'rssi'\n return\n\n def expression(self):\n \"\"\":return: compiled regular expression to match valid rssi output\"\"\"\n if self._expression is None:\n self._expression = re.compile(oatbran.NAMED(n=self.key, e='-' + oatbran.INTEGER))\n return self._expression\n\n def validate(self, line):\n \"\"\":parameter: - `line`: a line of output\"\"\"\n self.logger.debug('Validating: {0}'.format(line))\n if 'Unable to find the wireless interface.' in line:\n raise WinRssiError(line)\n if 'The group or resource is not in the correct state' in line:\n raise WinRssiError(line)\n return\n\n def __call__(self):\n \"\"\":return: the rssi value :raises: CommandError if the rssi can't be retrieved.\"\"\"\n output = self.connection.wifi('rssi')\n for line in output.output:\n match = self.expression.search(line)\n if match:\n self.logger.debug('Matched: {0}'.format(line))\n return match.groupdict()[self.key]\n self.validate(line)\n return NA\n", "source": "the_stack_v2_python_sparse", "source_path": "apetools/commands/winrssi.py", "source_repo": "russell-n/oldape", "split": "val", "star_events_count": 0}
{"blob_id": "b32b542e96b92f6210d5721f7677c942eaa1f2bc", "bodies": ["super().__init__()\nself.conv1 = nn.Conv2d(1, 64, kernel_size=(2, 2), padding=1)\nself.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)\nself.dropout1 = nn.Dropout(0.3)\nself.conv2 = nn.Conv2d(64, 32, kernel_size=(2, 2), padding=1)\nself.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)\nself.dropout2 = nn.Dropout(0.3)\nself.flatten = nn.Flatten()\nself.fc1 = nn.Linear(1568, 256)\nself.fc2 = nn.Linear(256, output_dim)\nself.to(self.device)", "x = self.dropout1(self.maxpool1(F.relu(self.conv1(x))))\nx = self.dropout2(self.maxpool2(F.relu(self.conv2(x))))\nx = self.flatten(x)\nx = F.relu(self.fc1(x))\nx = self.fc2(x)\nreturn x"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=(2, 2), padding=1)\n self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.dropout1 = nn.Dropout(0.3)\n self.conv2 = nn.Conv2d(64, 32, kernel_size=(2, 2), padding=1)\n self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.dropout2 = nn.Dropout(0.3)\n self.flatten = nn.Flatten()\n self.fc1 = nn.Linear(1568, 256)\n self.fc2 = nn.Linear(256, output_dim)\n self.to(self.device)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.dropout1(self.maxpool1(F.relu(self.conv1(x))))\n x = self.dropout2(self.maxpool2(F.relu(self.conv2(x))))\n x = self.flatten(x)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n<|end_body_1|>\n", "class_docstring": "MNIST classifier used in the experiments for Counterfactual with Reinforcement Learning. The model consists of two convolutional layers having 64 and 32 channels and a kernel size of 2 with ReLU nonlinearities, followed by maxpooling of size 2 and dropout of 0.3. The convolutional block is followed by a fully connected layer of 256 with ReLU nonlinearity, and finally a fully connected layer is used to predict the class logits (10 in MNIST case).", "class_name": "MNISTClassifier", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MNISTClassifier:\n \"\"\"MNIST classifier used in the experiments for Counterfactual with Reinforcement Learning. The model consists of two convolutional layers having 64 and 32 channels and a kernel size of 2 with ReLU nonlinearities, followed by maxpooling of size 2 and dropout of 0.3. The convolutional block is followed by a fully connected layer of 256 with ReLU nonlinearity, and finally a fully connected layer is used to predict the class logits (10 in MNIST case).\"\"\"\n\n def __init__(self, output_dim: int) -> None:\n \"\"\"Constructor. Parameters ---------- output_dim Output dimension.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass. Parameters ---------- x Input tensor. Returns ------- Classification logits.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=(2, 2), padding=1)\n self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.dropout1 = nn.Dropout(0.3)\n self.conv2 = nn.Conv2d(64, 32, kernel_size=(2, 2), padding=1)\n self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.dropout2 = nn.Dropout(0.3)\n self.flatten = nn.Flatten()\n self.fc1 = nn.Linear(1568, 256)\n self.fc2 = nn.Linear(256, output_dim)\n self.to(self.device)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.dropout1(self.maxpool1(F.relu(self.conv1(x))))\n x = self.dropout2(self.maxpool2(F.relu(self.conv2(x))))\n x = self.flatten(x)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000482", "length_bytes": 8134, "license_type": "permissive", "methods": [{"docstring": "Constructor. Parameters ---------- output_dim Output dimension.", "name": "__init__", "signature": "def __init__(self, output_dim: int) -> None"}, {"docstring": "Forward pass. Parameters ---------- x Input tensor. Returns ------- Classification logits.", "name": "forward", "signature": "def forward(self, x: torch.Tensor) -> torch.Tensor"}], "n_methods": 2, "prompt": "Implement the Python class `MNISTClassifier` described below.\n\nClass description:\nMNIST classifier used in the experiments for Counterfactual with Reinforcement Learning. The model consists of two convolutional layers having 64 and 32 channels and a kernel size of 2 with ReLU nonlinearities, followed by maxpooling of size 2 and dropout of 0.3. The convolutional block is followed by a fully connected layer of 256 with ReLU nonlinearity, and finally a fully connected layer is used to predict the class logits (10 in MNIST case).\n\nMethod signatures and docstrings:\n- def __init__(self, output_dim: int) -> None: Constructor. Parameters ---------- output_dim Output dimension.\n- def forward(self, x: torch.Tensor) -> torch.Tensor: Forward pass. Parameters ---------- x Input tensor. Returns ------- Classification logits.", "prompted_full_text": "Implement the Python class `MNISTClassifier` described below.\n\nClass description:\nMNIST classifier used in the experiments for Counterfactual with Reinforcement Learning. The model consists of two convolutional layers having 64 and 32 channels and a kernel size of 2 with ReLU nonlinearities, followed by maxpooling of size 2 and dropout of 0.3. The convolutional block is followed by a fully connected layer of 256 with ReLU nonlinearity, and finally a fully connected layer is used to predict the class logits (10 in MNIST case).\n\nMethod signatures and docstrings:\n- def __init__(self, output_dim: int) -> None: Constructor. Parameters ---------- output_dim Output dimension.\n- def forward(self, x: torch.Tensor) -> torch.Tensor: Forward pass. Parameters ---------- x Input tensor. Returns ------- Classification logits.\n\n<|skeleton|>\nclass MNISTClassifier:\n \"\"\"MNIST classifier used in the experiments for Counterfactual with Reinforcement Learning. The model consists of two convolutional layers having 64 and 32 channels and a kernel size of 2 with ReLU nonlinearities, followed by maxpooling of size 2 and dropout of 0.3. The convolutional block is followed by a fully connected layer of 256 with ReLU nonlinearity, and finally a fully connected layer is used to predict the class logits (10 in MNIST case).\"\"\"\n\n def __init__(self, output_dim: int) -> None:\n \"\"\"Constructor. Parameters ---------- output_dim Output dimension.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass. Parameters ---------- x Input tensor. Returns ------- Classification logits.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=(2, 2), padding=1)\n self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.dropout1 = nn.Dropout(0.3)\n self.conv2 = nn.Conv2d(64, 32, kernel_size=(2, 2), padding=1)\n self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.dropout2 = nn.Dropout(0.3)\n self.flatten = nn.Flatten()\n self.fc1 = nn.Linear(1568, 256)\n self.fc2 = nn.Linear(256, output_dim)\n self.to(self.device)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.dropout1(self.maxpool1(F.relu(self.conv1(x))))\n x = self.dropout2(self.maxpool2(F.relu(self.conv2(x))))\n x = self.flatten(x)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n<|end_body_1|>\n", "revision_id": "54d0c957fb01c7ebba4e2a0d28fcbde52d9c6718", "skeleton": "<|skeleton|>\nclass MNISTClassifier:\n \"\"\"MNIST classifier used in the experiments for Counterfactual with Reinforcement Learning. The model consists of two convolutional layers having 64 and 32 channels and a kernel size of 2 with ReLU nonlinearities, followed by maxpooling of size 2 and dropout of 0.3. The convolutional block is followed by a fully connected layer of 256 with ReLU nonlinearity, and finally a fully connected layer is used to predict the class logits (10 in MNIST case).\"\"\"\n\n def __init__(self, output_dim: int) -> None:\n \"\"\"Constructor. Parameters ---------- output_dim Output dimension.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass. Parameters ---------- x Input tensor. Returns ------- Classification logits.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MNISTClassifier:\n \"\"\"MNIST classifier used in the experiments for Counterfactual with Reinforcement Learning. The model consists of two convolutional layers having 64 and 32 channels and a kernel size of 2 with ReLU nonlinearities, followed by maxpooling of size 2 and dropout of 0.3. The convolutional block is followed by a fully connected layer of 256 with ReLU nonlinearity, and finally a fully connected layer is used to predict the class logits (10 in MNIST case).\"\"\"\n\n def __init__(self, output_dim: int) -> None:\n \"\"\"Constructor. Parameters ---------- output_dim Output dimension.\"\"\"\n super().__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=(2, 2), padding=1)\n self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.dropout1 = nn.Dropout(0.3)\n self.conv2 = nn.Conv2d(64, 32, kernel_size=(2, 2), padding=1)\n self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.dropout2 = nn.Dropout(0.3)\n self.flatten = nn.Flatten()\n self.fc1 = nn.Linear(1568, 256)\n self.fc2 = nn.Linear(256, output_dim)\n self.to(self.device)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass. Parameters ---------- x Input tensor. Returns ------- Classification logits.\"\"\"\n x = self.dropout1(self.maxpool1(F.relu(self.conv1(x))))\n x = self.dropout2(self.maxpool2(F.relu(self.conv2(x))))\n x = self.flatten(x)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "alibi/models/pytorch/cfrl_models.py", "source_repo": "SeldonIO/alibi", "split": "val", "star_events_count": 2143}
{"blob_id": "05cb529d49bae0a53ce68bf302aa96207362aaca", "bodies": ["fast, slow = (head, head)\nwhile fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n start = head\n while start.next:\n if slow == start:\n return start\n start = start.next\n slow = slow.next\nreturn None", "nodeList = set()\nwhile head:\n if head in nodeList:\n return head\n nodeList.add(head)\n head = head.next\nreturn None"], "bodies_text": "<|body_start_0|>\n fast, slow = (head, head)\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n start = head\n while start.next:\n if slow == start:\n return start\n start = start.next\n slow = slow.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n nodeList = set()\n while head:\n if head in nodeList:\n return head\n nodeList.add(head)\n head = head.next\n return None\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def detectCycle(self, head: ListNode) -> ListNode:\n \"\"\"1. 快慢指针: 入环点的位置为 r,fast、slow 两个指针在环 a 处相遇,环长度 a+b slow 移动长度 l1=r+a,则 fast 移动长度 l2=r+a+b+a,l2=2*l1-->r=b\"\"\"\n <|body_0|>\n\n def detectCycle2(self, head: ListNode) -> ListNode:\n \"\"\"2. 集合保存访问过的节点,额外的空间 O(N)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fast, slow = (head, head)\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n start = head\n while start.next:\n if slow == start:\n return start\n start = start.next\n slow = slow.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n nodeList = set()\n while head:\n if head in nodeList:\n return head\n nodeList.add(head)\n head = head.next\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000483", "length_bytes": 2631, "license_type": "no_license", "methods": [{"docstring": "1. 快慢指针: 入环点的位置为 r,fast、slow 两个指针在环 a 处相遇,环长度 a+b slow 移动长度 l1=r+a,则 fast 移动长度 l2=r+a+b+a,l2=2*l1-->r=b", "name": "detectCycle", "signature": "def detectCycle(self, head: ListNode) -> ListNode"}, {"docstring": "2. 集合保存访问过的节点,额外的空间 O(N)", "name": "detectCycle2", "signature": "def detectCycle2(self, head: ListNode) -> ListNode"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036347", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def detectCycle(self, head: ListNode) -> ListNode: 1. 快慢指针: 入环点的位置为 r,fast、slow 两个指针在环 a 处相遇,环长度 a+b slow 移动长度 l1=r+a,则 fast 移动长度 l2=r+a+b+a,l2=2*l1-->r=b\n- def detectCycle2(self, head: ListNode) -> ListNode: 2. 集合保存访问过的节点,额外的空间 O(N)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def detectCycle(self, head: ListNode) -> ListNode: 1. 快慢指针: 入环点的位置为 r,fast、slow 两个指针在环 a 处相遇,环长度 a+b slow 移动长度 l1=r+a,则 fast 移动长度 l2=r+a+b+a,l2=2*l1-->r=b\n- def detectCycle2(self, head: ListNode) -> ListNode: 2. 集合保存访问过的节点,额外的空间 O(N)\n\n<|skeleton|>\nclass Solution:\n\n def detectCycle(self, head: ListNode) -> ListNode:\n \"\"\"1. 快慢指针: 入环点的位置为 r,fast、slow 两个指针在环 a 处相遇,环长度 a+b slow 移动长度 l1=r+a,则 fast 移动长度 l2=r+a+b+a,l2=2*l1-->r=b\"\"\"\n <|body_0|>\n\n def detectCycle2(self, head: ListNode) -> ListNode:\n \"\"\"2. 集合保存访问过的节点,额外的空间 O(N)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fast, slow = (head, head)\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n start = head\n while start.next:\n if slow == start:\n return start\n start = start.next\n slow = slow.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n nodeList = set()\n while head:\n if head in nodeList:\n return head\n nodeList.add(head)\n head = head.next\n return None\n<|end_body_1|>\n", "revision_id": "4732fb80710a08a715c3e7080c394f5298b8326d", "skeleton": "<|skeleton|>\nclass Solution:\n\n def detectCycle(self, head: ListNode) -> ListNode:\n \"\"\"1. 快慢指针: 入环点的位置为 r,fast、slow 两个指针在环 a 处相遇,环长度 a+b slow 移动长度 l1=r+a,则 fast 移动长度 l2=r+a+b+a,l2=2*l1-->r=b\"\"\"\n <|body_0|>\n\n def detectCycle2(self, head: ListNode) -> ListNode:\n \"\"\"2. 集合保存访问过的节点,额外的空间 O(N)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def detectCycle(self, head: ListNode) -> ListNode:\n \"\"\"1. 快慢指针: 入环点的位置为 r,fast、slow 两个指针在环 a 处相遇,环长度 a+b slow 移动长度 l1=r+a,则 fast 移动长度 l2=r+a+b+a,l2=2*l1-->r=b\"\"\"\n fast, slow = (head, head)\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n start = head\n while start.next:\n if slow == start:\n return start\n start = start.next\n slow = slow.next\n return None\n\n def detectCycle2(self, head: ListNode) -> ListNode:\n \"\"\"2. 集合保存访问过的节点,额外的空间 O(N)\"\"\"\n nodeList = set()\n while head:\n if head in nodeList:\n return head\n nodeList.add(head)\n head = head.next\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "02-linkedlist/142.环形链表-ii.py", "source_repo": "xiaoruijiang/algorithm", "split": "val", "star_events_count": 0}
{"blob_id": "7dd174889e80af09bcfe84ad8e73b194da0444e6", "bodies": ["self.root = root\nself.checksum = checksum\nif download:\n self._download()\nif not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted. ' + 'You can use download=True to download it')\nsuper().__init__(root, crs, res, transforms)", "for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n filepath = os.path.join(self.root, prov_terr + '.zip')\n if not check_integrity(filepath, md5 if self.checksum else None):\n return False\nreturn True", "if self._check_integrity():\n print('Files already downloaded and verified')\n return\nfor prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n download_and_extract_archive(self.url + prov_terr + '.zip', self.root, md5=md5 if self.checksum else None)", "image = sample['mask'].squeeze(0)\nncols = 1\nshowing_prediction = 'prediction' in sample\nif showing_prediction:\n pred = sample['prediction'].squeeze(0)\n ncols = 2\nfig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4, 4))\nif showing_prediction:\n axs[0].imshow(image)\n axs[0].axis('off')\n axs[1].imshow(pred)\n axs[1].axis('off')\n if show_titles:\n axs[0].set_title('Mask')\n axs[1].set_title('Prediction')\nelse:\n axs.imshow(image)\n axs.axis('off')\n if show_titles:\n axs.set_title('Mask')\nif suptitle is not None:\n plt.suptitle(suptitle)\nreturn fig"], "bodies_text": "<|body_start_0|>\n self.root = root\n self.checksum = checksum\n if download:\n self._download()\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted. ' + 'You can use download=True to download it')\n super().__init__(root, crs, res, transforms)\n<|end_body_0|>\n\n<|body_start_1|>\n for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n filepath = os.path.join(self.root, prov_terr + '.zip')\n if not check_integrity(filepath, md5 if self.checksum else None):\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n download_and_extract_archive(self.url + prov_terr + '.zip', self.root, md5=md5 if self.checksum else None)\n<|end_body_2|>\n\n<|body_start_3|>\n image = sample['mask'].squeeze(0)\n ncols = 1\n showing_prediction = 'prediction' in sample\n if showing_prediction:\n pred = sample['prediction'].squeeze(0)\n ncols = 2\n fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4, 4))\n if showing_prediction:\n axs[0].imshow(image)\n axs[0].axis('off')\n axs[1].imshow(pred)\n axs[1].axis('off')\n if show_titles:\n axs[0].set_title('Mask')\n axs[1].set_title('Prediction')\n else:\n axs.imshow(image)\n axs.axis('off')\n if show_titles:\n axs.set_title('Mask')\n if suptitle is not None:\n plt.suptitle(suptitle)\n return fig\n<|end_body_3|>\n", "class_docstring": "Canadian Building Footprints dataset. The `Canadian Building Footprints `__ dataset contains 11,842,186 computer generated building footprints in all Canadian provinces and territories in GeoJSON format. This data is freely available for download and use.", "class_name": "CanadianBuildingFootprints", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CanadianBuildingFootprints:\n \"\"\"Canadian Building Footprints dataset. The `Canadian Building Footprints `__ dataset contains 11,842,186 computer generated building footprints in all Canadian provinces and territories in GeoJSON format. This data is freely available for download and use.\"\"\"\n\n def __init__(self, root: str='data', crs: Optional[CRS]=None, res: float=1e-05, transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]]=None, download: bool=False, checksum: bool=False) -> None:\n \"\"\"Initialize a new Dataset instance. Args: root: root directory where dataset can be found crs: :term:`coordinate reference system (CRS)` to warp to (defaults to the CRS of the first file found) res: resolution of the dataset in units of CRS transforms: a function/transform that takes an input sample and returns a transformed version download: if True, download dataset and store it in the root directory checksum: if True, check the MD5 of the downloaded files (may be slow) Raises: FileNotFoundError: if no files are found in ``root`` RuntimeError: if ``download=False`` and data is not found, or ``checksum=True`` and checksums don't match\"\"\"\n <|body_0|>\n\n def _check_integrity(self) -> bool:\n \"\"\"Check integrity of dataset. Returns: True if dataset files are found and/or MD5s match, else False\"\"\"\n <|body_1|>\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n <|body_2|>\n\n def plot(self, sample: dict[str, Any], show_titles: bool=True, suptitle: Optional[str]=None) -> plt.Figure:\n \"\"\"Plot a sample from the dataset. Args: sample: a sample returned by :meth:`VectorDataset.__getitem__` show_titles: flag indicating whether to show titles above each panel suptitle: optional string to use as a suptitle Returns: a matplotlib Figure with the rendered sample .. versionchanged:: 0.3 Method now takes a sample dict, not a Tensor. Additionally, it is possible to show subplot titles and/or use a custom suptitle.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.root = root\n self.checksum = checksum\n if download:\n self._download()\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted. ' + 'You can use download=True to download it')\n super().__init__(root, crs, res, transforms)\n<|end_body_0|>\n\n<|body_start_1|>\n for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n filepath = os.path.join(self.root, prov_terr + '.zip')\n if not check_integrity(filepath, md5 if self.checksum else None):\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n download_and_extract_archive(self.url + prov_terr + '.zip', self.root, md5=md5 if self.checksum else None)\n<|end_body_2|>\n\n<|body_start_3|>\n image = sample['mask'].squeeze(0)\n ncols = 1\n showing_prediction = 'prediction' in sample\n if showing_prediction:\n pred = sample['prediction'].squeeze(0)\n ncols = 2\n fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4, 4))\n if showing_prediction:\n axs[0].imshow(image)\n axs[0].axis('off')\n axs[1].imshow(pred)\n axs[1].axis('off')\n if show_titles:\n axs[0].set_title('Mask')\n axs[1].set_title('Prediction')\n else:\n axs.imshow(image)\n axs.axis('off')\n if show_titles:\n axs.set_title('Mask')\n if suptitle is not None:\n plt.suptitle(suptitle)\n return fig\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000484", "length_bytes": 5763, "license_type": "permissive", "methods": [{"docstring": "Initialize a new Dataset instance. Args: root: root directory where dataset can be found crs: :term:`coordinate reference system (CRS)` to warp to (defaults to the CRS of the first file found) res: resolution of the dataset in units of CRS transforms: a function/transform that takes an input sample and returns a transformed version download: if True, download dataset and store it in the root directory checksum: if True, check the MD5 of the downloaded files (may be slow) Raises: FileNotFoundError: if no files are found in ``root`` RuntimeError: if ``download=False`` and data is not found, or ``checksum=True`` and checksums don't match", "name": "__init__", "signature": "def __init__(self, root: str='data', crs: Optional[CRS]=None, res: float=1e-05, transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]]=None, download: bool=False, checksum: bool=False) -> None"}, {"docstring": "Check integrity of dataset. Returns: True if dataset files are found and/or MD5s match, else False", "name": "_check_integrity", "signature": "def _check_integrity(self) -> bool"}, {"docstring": "Download the dataset and extract it.", "name": "_download", "signature": "def _download(self) -> None"}, {"docstring": "Plot a sample from the dataset. Args: sample: a sample returned by :meth:`VectorDataset.__getitem__` show_titles: flag indicating whether to show titles above each panel suptitle: optional string to use as a suptitle Returns: a matplotlib Figure with the rendered sample .. versionchanged:: 0.3 Method now takes a sample dict, not a Tensor. Additionally, it is possible to show subplot titles and/or use a custom suptitle.", "name": "plot", "signature": "def plot(self, sample: dict[str, Any], show_titles: bool=True, suptitle: Optional[str]=None) -> plt.Figure"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_032406", "prompt": "Implement the Python class `CanadianBuildingFootprints` described below.\n\nClass description:\nCanadian Building Footprints dataset. The `Canadian Building Footprints `__ dataset contains 11,842,186 computer generated building footprints in all Canadian provinces and territories in GeoJSON format. This data is freely available for download and use.\n\nMethod signatures and docstrings:\n- def __init__(self, root: str='data', crs: Optional[CRS]=None, res: float=1e-05, transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]]=None, download: bool=False, checksum: bool=False) -> None: Initialize a new Dataset instance. Args: root: root directory where dataset can be found crs: :term:`coordinate reference system (CRS)` to warp to (defaults to the CRS of the first file found) res: resolution of the dataset in units of CRS transforms: a function/transform that takes an input sample and returns a transformed version download: if True, download dataset and store it in the root directory checksum: if True, check the MD5 of the downloaded files (may be slow) Raises: FileNotFoundError: if no files are found in ``root`` RuntimeError: if ``download=False`` and data is not found, or ``checksum=True`` and checksums don't match\n- def _check_integrity(self) -> bool: Check integrity of dataset. Returns: True if dataset files are found and/or MD5s match, else False\n- def _download(self) -> None: Download the dataset and extract it.\n- def plot(self, sample: dict[str, Any], show_titles: bool=True, suptitle: Optional[str]=None) -> plt.Figure: Plot a sample from the dataset. Args: sample: a sample returned by :meth:`VectorDataset.__getitem__` show_titles: flag indicating whether to show titles above each panel suptitle: optional string to use as a suptitle Returns: a matplotlib Figure with the rendered sample .. versionchanged:: 0.3 Method now takes a sample dict, not a Tensor. Additionally, it is possible to show subplot titles and/or use a custom suptitle.", "prompted_full_text": "Implement the Python class `CanadianBuildingFootprints` described below.\n\nClass description:\nCanadian Building Footprints dataset. The `Canadian Building Footprints `__ dataset contains 11,842,186 computer generated building footprints in all Canadian provinces and territories in GeoJSON format. This data is freely available for download and use.\n\nMethod signatures and docstrings:\n- def __init__(self, root: str='data', crs: Optional[CRS]=None, res: float=1e-05, transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]]=None, download: bool=False, checksum: bool=False) -> None: Initialize a new Dataset instance. Args: root: root directory where dataset can be found crs: :term:`coordinate reference system (CRS)` to warp to (defaults to the CRS of the first file found) res: resolution of the dataset in units of CRS transforms: a function/transform that takes an input sample and returns a transformed version download: if True, download dataset and store it in the root directory checksum: if True, check the MD5 of the downloaded files (may be slow) Raises: FileNotFoundError: if no files are found in ``root`` RuntimeError: if ``download=False`` and data is not found, or ``checksum=True`` and checksums don't match\n- def _check_integrity(self) -> bool: Check integrity of dataset. Returns: True if dataset files are found and/or MD5s match, else False\n- def _download(self) -> None: Download the dataset and extract it.\n- def plot(self, sample: dict[str, Any], show_titles: bool=True, suptitle: Optional[str]=None) -> plt.Figure: Plot a sample from the dataset. Args: sample: a sample returned by :meth:`VectorDataset.__getitem__` show_titles: flag indicating whether to show titles above each panel suptitle: optional string to use as a suptitle Returns: a matplotlib Figure with the rendered sample .. versionchanged:: 0.3 Method now takes a sample dict, not a Tensor. Additionally, it is possible to show subplot titles and/or use a custom suptitle.\n\n<|skeleton|>\nclass CanadianBuildingFootprints:\n \"\"\"Canadian Building Footprints dataset. The `Canadian Building Footprints `__ dataset contains 11,842,186 computer generated building footprints in all Canadian provinces and territories in GeoJSON format. This data is freely available for download and use.\"\"\"\n\n def __init__(self, root: str='data', crs: Optional[CRS]=None, res: float=1e-05, transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]]=None, download: bool=False, checksum: bool=False) -> None:\n \"\"\"Initialize a new Dataset instance. Args: root: root directory where dataset can be found crs: :term:`coordinate reference system (CRS)` to warp to (defaults to the CRS of the first file found) res: resolution of the dataset in units of CRS transforms: a function/transform that takes an input sample and returns a transformed version download: if True, download dataset and store it in the root directory checksum: if True, check the MD5 of the downloaded files (may be slow) Raises: FileNotFoundError: if no files are found in ``root`` RuntimeError: if ``download=False`` and data is not found, or ``checksum=True`` and checksums don't match\"\"\"\n <|body_0|>\n\n def _check_integrity(self) -> bool:\n \"\"\"Check integrity of dataset. Returns: True if dataset files are found and/or MD5s match, else False\"\"\"\n <|body_1|>\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n <|body_2|>\n\n def plot(self, sample: dict[str, Any], show_titles: bool=True, suptitle: Optional[str]=None) -> plt.Figure:\n \"\"\"Plot a sample from the dataset. Args: sample: a sample returned by :meth:`VectorDataset.__getitem__` show_titles: flag indicating whether to show titles above each panel suptitle: optional string to use as a suptitle Returns: a matplotlib Figure with the rendered sample .. versionchanged:: 0.3 Method now takes a sample dict, not a Tensor. Additionally, it is possible to show subplot titles and/or use a custom suptitle.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.root = root\n self.checksum = checksum\n if download:\n self._download()\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted. ' + 'You can use download=True to download it')\n super().__init__(root, crs, res, transforms)\n<|end_body_0|>\n\n<|body_start_1|>\n for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n filepath = os.path.join(self.root, prov_terr + '.zip')\n if not check_integrity(filepath, md5 if self.checksum else None):\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n download_and_extract_archive(self.url + prov_terr + '.zip', self.root, md5=md5 if self.checksum else None)\n<|end_body_2|>\n\n<|body_start_3|>\n image = sample['mask'].squeeze(0)\n ncols = 1\n showing_prediction = 'prediction' in sample\n if showing_prediction:\n pred = sample['prediction'].squeeze(0)\n ncols = 2\n fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4, 4))\n if showing_prediction:\n axs[0].imshow(image)\n axs[0].axis('off')\n axs[1].imshow(pred)\n axs[1].axis('off')\n if show_titles:\n axs[0].set_title('Mask')\n axs[1].set_title('Prediction')\n else:\n axs.imshow(image)\n axs.axis('off')\n if show_titles:\n axs.set_title('Mask')\n if suptitle is not None:\n plt.suptitle(suptitle)\n return fig\n<|end_body_3|>\n", "revision_id": "29985861614b3b93f9ef5389469ebb98570de7dd", "skeleton": "<|skeleton|>\nclass CanadianBuildingFootprints:\n \"\"\"Canadian Building Footprints dataset. The `Canadian Building Footprints `__ dataset contains 11,842,186 computer generated building footprints in all Canadian provinces and territories in GeoJSON format. This data is freely available for download and use.\"\"\"\n\n def __init__(self, root: str='data', crs: Optional[CRS]=None, res: float=1e-05, transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]]=None, download: bool=False, checksum: bool=False) -> None:\n \"\"\"Initialize a new Dataset instance. Args: root: root directory where dataset can be found crs: :term:`coordinate reference system (CRS)` to warp to (defaults to the CRS of the first file found) res: resolution of the dataset in units of CRS transforms: a function/transform that takes an input sample and returns a transformed version download: if True, download dataset and store it in the root directory checksum: if True, check the MD5 of the downloaded files (may be slow) Raises: FileNotFoundError: if no files are found in ``root`` RuntimeError: if ``download=False`` and data is not found, or ``checksum=True`` and checksums don't match\"\"\"\n <|body_0|>\n\n def _check_integrity(self) -> bool:\n \"\"\"Check integrity of dataset. Returns: True if dataset files are found and/or MD5s match, else False\"\"\"\n <|body_1|>\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n <|body_2|>\n\n def plot(self, sample: dict[str, Any], show_titles: bool=True, suptitle: Optional[str]=None) -> plt.Figure:\n \"\"\"Plot a sample from the dataset. Args: sample: a sample returned by :meth:`VectorDataset.__getitem__` show_titles: flag indicating whether to show titles above each panel suptitle: optional string to use as a suptitle Returns: a matplotlib Figure with the rendered sample .. versionchanged:: 0.3 Method now takes a sample dict, not a Tensor. Additionally, it is possible to show subplot titles and/or use a custom suptitle.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CanadianBuildingFootprints:\n \"\"\"Canadian Building Footprints dataset. The `Canadian Building Footprints `__ dataset contains 11,842,186 computer generated building footprints in all Canadian provinces and territories in GeoJSON format. This data is freely available for download and use.\"\"\"\n\n def __init__(self, root: str='data', crs: Optional[CRS]=None, res: float=1e-05, transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]]=None, download: bool=False, checksum: bool=False) -> None:\n \"\"\"Initialize a new Dataset instance. Args: root: root directory where dataset can be found crs: :term:`coordinate reference system (CRS)` to warp to (defaults to the CRS of the first file found) res: resolution of the dataset in units of CRS transforms: a function/transform that takes an input sample and returns a transformed version download: if True, download dataset and store it in the root directory checksum: if True, check the MD5 of the downloaded files (may be slow) Raises: FileNotFoundError: if no files are found in ``root`` RuntimeError: if ``download=False`` and data is not found, or ``checksum=True`` and checksums don't match\"\"\"\n self.root = root\n self.checksum = checksum\n if download:\n self._download()\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted. ' + 'You can use download=True to download it')\n super().__init__(root, crs, res, transforms)\n\n def _check_integrity(self) -> bool:\n \"\"\"Check integrity of dataset. Returns: True if dataset files are found and/or MD5s match, else False\"\"\"\n for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n filepath = os.path.join(self.root, prov_terr + '.zip')\n if not check_integrity(filepath, md5 if self.checksum else None):\n return False\n return True\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n for prov_terr, md5 in zip(self.provinces_territories, self.md5s):\n download_and_extract_archive(self.url + prov_terr + '.zip', self.root, md5=md5 if self.checksum else None)\n\n def plot(self, sample: dict[str, Any], show_titles: bool=True, suptitle: Optional[str]=None) -> plt.Figure:\n \"\"\"Plot a sample from the dataset. Args: sample: a sample returned by :meth:`VectorDataset.__getitem__` show_titles: flag indicating whether to show titles above each panel suptitle: optional string to use as a suptitle Returns: a matplotlib Figure with the rendered sample .. versionchanged:: 0.3 Method now takes a sample dict, not a Tensor. Additionally, it is possible to show subplot titles and/or use a custom suptitle.\"\"\"\n image = sample['mask'].squeeze(0)\n ncols = 1\n showing_prediction = 'prediction' in sample\n if showing_prediction:\n pred = sample['prediction'].squeeze(0)\n ncols = 2\n fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(4, 4))\n if showing_prediction:\n axs[0].imshow(image)\n axs[0].axis('off')\n axs[1].imshow(pred)\n axs[1].axis('off')\n if show_titles:\n axs[0].set_title('Mask')\n axs[1].set_title('Prediction')\n else:\n axs.imshow(image)\n axs.axis('off')\n if show_titles:\n axs.set_title('Mask')\n if suptitle is not None:\n plt.suptitle(suptitle)\n return fig\n", "source": "the_stack_v2_python_sparse", "source_path": "torchgeo/datasets/cbf.py", "source_repo": "microsoft/torchgeo", "split": "val", "star_events_count": 1724}
{"blob_id": "513a297965d3db04bdbaf65093dd086512482eb4", "bodies": ["if head is None:\n return None\nslow = head.next\nif slow is None:\n return None\nfast = slow.next\nwhile slow is not None and fast is not None:\n if slow == fast:\n return fast\n slow = slow.next\n fast = fast.next\n if fast is not None:\n fast = fast.next\nreturn None", "meet_node = self.meeting_node(head)\nif meet_node is None:\n return None\nloop_nodes = 1\ntemp_node = meet_node\nwhile temp_node.next != meet_node:\n temp_node = temp_node.next\n loop_nodes += 1\ntemp_node = head\nfor i in range(loop_nodes):\n temp_node = temp_node.next\ntemp_node2 = head\nwhile temp_node2 != temp_node:\n temp_node = temp_node.next\n temp_node2 = temp_node2.next\nreturn temp_node"], "bodies_text": "<|body_start_0|>\n if head is None:\n return None\n slow = head.next\n if slow is None:\n return None\n fast = slow.next\n while slow is not None and fast is not None:\n if slow == fast:\n return fast\n slow = slow.next\n fast = fast.next\n if fast is not None:\n fast = fast.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n meet_node = self.meeting_node(head)\n if meet_node is None:\n return None\n loop_nodes = 1\n temp_node = meet_node\n while temp_node.next != meet_node:\n temp_node = temp_node.next\n loop_nodes += 1\n temp_node = head\n for i in range(loop_nodes):\n temp_node = temp_node.next\n temp_node2 = head\n while temp_node2 != temp_node:\n temp_node = temp_node.next\n temp_node2 = temp_node2.next\n return temp_node\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def meeting_node(self, head):\n \"\"\"在链表存在环的前提下找到一快一慢指针相遇的结点, :param head: 链表的头结点 :return:\"\"\"\n <|body_0|>\n\n def entry_node_of_loop(self, head):\n \"\"\"找到环的入口结点 :param head: 链表头结点 :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if head is None:\n return None\n slow = head.next\n if slow is None:\n return None\n fast = slow.next\n while slow is not None and fast is not None:\n if slow == fast:\n return fast\n slow = slow.next\n fast = fast.next\n if fast is not None:\n fast = fast.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n meet_node = self.meeting_node(head)\n if meet_node is None:\n return None\n loop_nodes = 1\n temp_node = meet_node\n while temp_node.next != meet_node:\n temp_node = temp_node.next\n loop_nodes += 1\n temp_node = head\n for i in range(loop_nodes):\n temp_node = temp_node.next\n temp_node2 = head\n while temp_node2 != temp_node:\n temp_node = temp_node.next\n temp_node2 = temp_node2.next\n return temp_node\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000485", "length_bytes": 1932, "license_type": "no_license", "methods": [{"docstring": "在链表存在环的前提下找到一快一慢指针相遇的结点, :param head: 链表的头结点 :return:", "name": "meeting_node", "signature": "def meeting_node(self, head)"}, {"docstring": "找到环的入口结点 :param head: 链表头结点 :return:", "name": "entry_node_of_loop", "signature": "def entry_node_of_loop(self, head)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_033972", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def meeting_node(self, head): 在链表存在环的前提下找到一快一慢指针相遇的结点, :param head: 链表的头结点 :return:\n- def entry_node_of_loop(self, head): 找到环的入口结点 :param head: 链表头结点 :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def meeting_node(self, head): 在链表存在环的前提下找到一快一慢指针相遇的结点, :param head: 链表的头结点 :return:\n- def entry_node_of_loop(self, head): 找到环的入口结点 :param head: 链表头结点 :return:\n\n<|skeleton|>\nclass Solution:\n\n def meeting_node(self, head):\n \"\"\"在链表存在环的前提下找到一快一慢指针相遇的结点, :param head: 链表的头结点 :return:\"\"\"\n <|body_0|>\n\n def entry_node_of_loop(self, head):\n \"\"\"找到环的入口结点 :param head: 链表头结点 :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if head is None:\n return None\n slow = head.next\n if slow is None:\n return None\n fast = slow.next\n while slow is not None and fast is not None:\n if slow == fast:\n return fast\n slow = slow.next\n fast = fast.next\n if fast is not None:\n fast = fast.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n meet_node = self.meeting_node(head)\n if meet_node is None:\n return None\n loop_nodes = 1\n temp_node = meet_node\n while temp_node.next != meet_node:\n temp_node = temp_node.next\n loop_nodes += 1\n temp_node = head\n for i in range(loop_nodes):\n temp_node = temp_node.next\n temp_node2 = head\n while temp_node2 != temp_node:\n temp_node = temp_node.next\n temp_node2 = temp_node2.next\n return temp_node\n<|end_body_1|>\n", "revision_id": "51e6d72bfc631fa96e5a8ed6e4e55cd240ad47d9", "skeleton": "<|skeleton|>\nclass Solution:\n\n def meeting_node(self, head):\n \"\"\"在链表存在环的前提下找到一快一慢指针相遇的结点, :param head: 链表的头结点 :return:\"\"\"\n <|body_0|>\n\n def entry_node_of_loop(self, head):\n \"\"\"找到环的入口结点 :param head: 链表头结点 :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def meeting_node(self, head):\n \"\"\"在链表存在环的前提下找到一快一慢指针相遇的结点, :param head: 链表的头结点 :return:\"\"\"\n if head is None:\n return None\n slow = head.next\n if slow is None:\n return None\n fast = slow.next\n while slow is not None and fast is not None:\n if slow == fast:\n return fast\n slow = slow.next\n fast = fast.next\n if fast is not None:\n fast = fast.next\n return None\n\n def entry_node_of_loop(self, head):\n \"\"\"找到环的入口结点 :param head: 链表头结点 :return:\"\"\"\n meet_node = self.meeting_node(head)\n if meet_node is None:\n return None\n loop_nodes = 1\n temp_node = meet_node\n while temp_node.next != meet_node:\n temp_node = temp_node.next\n loop_nodes += 1\n temp_node = head\n for i in range(loop_nodes):\n temp_node = temp_node.next\n temp_node2 = head\n while temp_node2 != temp_node:\n temp_node = temp_node.next\n temp_node2 = temp_node2.next\n return temp_node\n", "source": "the_stack_v2_python_sparse", "source_path": "剑指Offer/23.链表中环的入口结点.py", "source_repo": "CodingBuye/PythonForNowcoder", "split": "val", "star_events_count": 2}
{"blob_id": "d17b0ac3ac62cf0d442ec7efab84c5548c61e747", "bodies": ["wallet_query = wallets.select().where(wallets.c.id == wallet_id)\nwallet = await self._db.fetch_one(wallet_query)\nif wallet:\n return WalletEntity(**wallet)\nreturn None", "wallet_query = wallets.select().where(wallets.c.user_id == user_id)\nwallet = await self._db.fetch_one(wallet_query)\nif wallet:\n return WalletEntity(**wallet)\nreturn None", "wallet_query = wallets.insert().values({'user_id': user_id})\nwallet_id: int = await self._db.execute(wallet_query)\nreturn wallet_id", "query = wallets.update().where(wallets.c.id == wallet_id).values({'balance': wallets.c.balance + amount}).returning(wallets.c.id)\nw_id: int = await self._db.execute(query)\nreturn w_id", "source_query = wallets.update().where(wallets.c.id == source_wallet_id).values({'balance': wallets.c.balance - amount}).returning(wallets.c.id)\ntarget_query = wallets.update().where(wallets.c.id == destination_wallet_id).values({'balance': wallets.c.balance + amount})\nwallet_id: int = await self._db.execute(source_query)\nif not wallet_id:\n raise ValueError('Source wallet id does not exist')\nawait self._db.execute(target_query)\nreturn wallet_id"], "bodies_text": "<|body_start_0|>\n wallet_query = wallets.select().where(wallets.c.id == wallet_id)\n wallet = await self._db.fetch_one(wallet_query)\n if wallet:\n return WalletEntity(**wallet)\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n wallet_query = wallets.select().where(wallets.c.user_id == user_id)\n wallet = await self._db.fetch_one(wallet_query)\n if wallet:\n return WalletEntity(**wallet)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n wallet_query = wallets.insert().values({'user_id': user_id})\n wallet_id: int = await self._db.execute(wallet_query)\n return wallet_id\n<|end_body_2|>\n\n<|body_start_3|>\n query = wallets.update().where(wallets.c.id == wallet_id).values({'balance': wallets.c.balance + amount}).returning(wallets.c.id)\n w_id: int = await self._db.execute(query)\n return w_id\n<|end_body_3|>\n\n<|body_start_4|>\n source_query = wallets.update().where(wallets.c.id == source_wallet_id).values({'balance': wallets.c.balance - amount}).returning(wallets.c.id)\n target_query = wallets.update().where(wallets.c.id == destination_wallet_id).values({'balance': wallets.c.balance + amount})\n wallet_id: int = await self._db.execute(source_query)\n if not wallet_id:\n raise ValueError('Source wallet id does not exist')\n await self._db.execute(target_query)\n return wallet_id\n<|end_body_4|>\n", "class_docstring": "Implementation of wallet repository.", "class_name": "WalletRepository", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WalletRepository:\n \"\"\"Implementation of wallet repository.\"\"\"\n\n async def get_by_id(self, wallet_id: int) -> Optional[WalletEntity]:\n \"\"\"Retrieve wallet record by wallet id. :param wallet_id: ID of use :returns: Wallet schema\"\"\"\n <|body_0|>\n\n async def get_by_user_id(self, user_id: int) -> Optional[WalletEntity]:\n \"\"\"Retrieve wallet record by user id. :param user_id: ID of use :returns: Wallet schema\"\"\"\n <|body_1|>\n\n async def create(self, user_id: int) -> int:\n \"\"\"Create new wallet with user ID :param user_id: ID of user :returns: ID of new wallet\"\"\"\n <|body_2|>\n\n async def enroll(self, wallet_id: int, amount: Decimal) -> int:\n \"\"\"Put given funds on wallet :param wallet_id: ID of wallet :param amount: Initial funds :returns: ID of updated wallet\"\"\"\n <|body_3|>\n\n async def transfer(self, source_wallet_id: int, destination_wallet_id: int, amount: Decimal) -> int:\n \"\"\"Transfer amount of currency between wallets. :param source_wallet_id: ID of source wallet :param destination_wallet_id: ID of wallet recepients :param amount: Amount of currency :returns: Source wallet id\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n wallet_query = wallets.select().where(wallets.c.id == wallet_id)\n wallet = await self._db.fetch_one(wallet_query)\n if wallet:\n return WalletEntity(**wallet)\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n wallet_query = wallets.select().where(wallets.c.user_id == user_id)\n wallet = await self._db.fetch_one(wallet_query)\n if wallet:\n return WalletEntity(**wallet)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n wallet_query = wallets.insert().values({'user_id': user_id})\n wallet_id: int = await self._db.execute(wallet_query)\n return wallet_id\n<|end_body_2|>\n\n<|body_start_3|>\n query = wallets.update().where(wallets.c.id == wallet_id).values({'balance': wallets.c.balance + amount}).returning(wallets.c.id)\n w_id: int = await self._db.execute(query)\n return w_id\n<|end_body_3|>\n\n<|body_start_4|>\n source_query = wallets.update().where(wallets.c.id == source_wallet_id).values({'balance': wallets.c.balance - amount}).returning(wallets.c.id)\n target_query = wallets.update().where(wallets.c.id == destination_wallet_id).values({'balance': wallets.c.balance + amount})\n wallet_id: int = await self._db.execute(source_query)\n if not wallet_id:\n raise ValueError('Source wallet id does not exist')\n await self._db.execute(target_query)\n return wallet_id\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000486", "length_bytes": 4539, "license_type": "no_license", "methods": [{"docstring": "Retrieve wallet record by wallet id. :param wallet_id: ID of use :returns: Wallet schema", "name": "get_by_id", "signature": "async def get_by_id(self, wallet_id: int) -> Optional[WalletEntity]"}, {"docstring": "Retrieve wallet record by user id. :param user_id: ID of use :returns: Wallet schema", "name": "get_by_user_id", "signature": "async def get_by_user_id(self, user_id: int) -> Optional[WalletEntity]"}, {"docstring": "Create new wallet with user ID :param user_id: ID of user :returns: ID of new wallet", "name": "create", "signature": "async def create(self, user_id: int) -> int"}, {"docstring": "Put given funds on wallet :param wallet_id: ID of wallet :param amount: Initial funds :returns: ID of updated wallet", "name": "enroll", "signature": "async def enroll(self, wallet_id: int, amount: Decimal) -> int"}, {"docstring": "Transfer amount of currency between wallets. :param source_wallet_id: ID of source wallet :param destination_wallet_id: ID of wallet recepients :param amount: Amount of currency :returns: Source wallet id", "name": "transfer", "signature": "async def transfer(self, source_wallet_id: int, destination_wallet_id: int, amount: Decimal) -> int"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_test_001185", "prompt": "Implement the Python class `WalletRepository` described below.\n\nClass description:\nImplementation of wallet repository.\n\nMethod signatures and docstrings:\n- async def get_by_id(self, wallet_id: int) -> Optional[WalletEntity]: Retrieve wallet record by wallet id. :param wallet_id: ID of use :returns: Wallet schema\n- async def get_by_user_id(self, user_id: int) -> Optional[WalletEntity]: Retrieve wallet record by user id. :param user_id: ID of use :returns: Wallet schema\n- async def create(self, user_id: int) -> int: Create new wallet with user ID :param user_id: ID of user :returns: ID of new wallet\n- async def enroll(self, wallet_id: int, amount: Decimal) -> int: Put given funds on wallet :param wallet_id: ID of wallet :param amount: Initial funds :returns: ID of updated wallet\n- async def transfer(self, source_wallet_id: int, destination_wallet_id: int, amount: Decimal) -> int: Transfer amount of currency between wallets. :param source_wallet_id: ID of source wallet :param destination_wallet_id: ID of wallet recepients :param amount: Amount of currency :returns: Source wallet id", "prompted_full_text": "Implement the Python class `WalletRepository` described below.\n\nClass description:\nImplementation of wallet repository.\n\nMethod signatures and docstrings:\n- async def get_by_id(self, wallet_id: int) -> Optional[WalletEntity]: Retrieve wallet record by wallet id. :param wallet_id: ID of use :returns: Wallet schema\n- async def get_by_user_id(self, user_id: int) -> Optional[WalletEntity]: Retrieve wallet record by user id. :param user_id: ID of use :returns: Wallet schema\n- async def create(self, user_id: int) -> int: Create new wallet with user ID :param user_id: ID of user :returns: ID of new wallet\n- async def enroll(self, wallet_id: int, amount: Decimal) -> int: Put given funds on wallet :param wallet_id: ID of wallet :param amount: Initial funds :returns: ID of updated wallet\n- async def transfer(self, source_wallet_id: int, destination_wallet_id: int, amount: Decimal) -> int: Transfer amount of currency between wallets. :param source_wallet_id: ID of source wallet :param destination_wallet_id: ID of wallet recepients :param amount: Amount of currency :returns: Source wallet id\n\n<|skeleton|>\nclass WalletRepository:\n \"\"\"Implementation of wallet repository.\"\"\"\n\n async def get_by_id(self, wallet_id: int) -> Optional[WalletEntity]:\n \"\"\"Retrieve wallet record by wallet id. :param wallet_id: ID of use :returns: Wallet schema\"\"\"\n <|body_0|>\n\n async def get_by_user_id(self, user_id: int) -> Optional[WalletEntity]:\n \"\"\"Retrieve wallet record by user id. :param user_id: ID of use :returns: Wallet schema\"\"\"\n <|body_1|>\n\n async def create(self, user_id: int) -> int:\n \"\"\"Create new wallet with user ID :param user_id: ID of user :returns: ID of new wallet\"\"\"\n <|body_2|>\n\n async def enroll(self, wallet_id: int, amount: Decimal) -> int:\n \"\"\"Put given funds on wallet :param wallet_id: ID of wallet :param amount: Initial funds :returns: ID of updated wallet\"\"\"\n <|body_3|>\n\n async def transfer(self, source_wallet_id: int, destination_wallet_id: int, amount: Decimal) -> int:\n \"\"\"Transfer amount of currency between wallets. :param source_wallet_id: ID of source wallet :param destination_wallet_id: ID of wallet recepients :param amount: Amount of currency :returns: Source wallet id\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n wallet_query = wallets.select().where(wallets.c.id == wallet_id)\n wallet = await self._db.fetch_one(wallet_query)\n if wallet:\n return WalletEntity(**wallet)\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n wallet_query = wallets.select().where(wallets.c.user_id == user_id)\n wallet = await self._db.fetch_one(wallet_query)\n if wallet:\n return WalletEntity(**wallet)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n wallet_query = wallets.insert().values({'user_id': user_id})\n wallet_id: int = await self._db.execute(wallet_query)\n return wallet_id\n<|end_body_2|>\n\n<|body_start_3|>\n query = wallets.update().where(wallets.c.id == wallet_id).values({'balance': wallets.c.balance + amount}).returning(wallets.c.id)\n w_id: int = await self._db.execute(query)\n return w_id\n<|end_body_3|>\n\n<|body_start_4|>\n source_query = wallets.update().where(wallets.c.id == source_wallet_id).values({'balance': wallets.c.balance - amount}).returning(wallets.c.id)\n target_query = wallets.update().where(wallets.c.id == destination_wallet_id).values({'balance': wallets.c.balance + amount})\n wallet_id: int = await self._db.execute(source_query)\n if not wallet_id:\n raise ValueError('Source wallet id does not exist')\n await self._db.execute(target_query)\n return wallet_id\n<|end_body_4|>\n", "revision_id": "4cd339bdbe9ca1ac9ab01849dcd43c81e068488d", "skeleton": "<|skeleton|>\nclass WalletRepository:\n \"\"\"Implementation of wallet repository.\"\"\"\n\n async def get_by_id(self, wallet_id: int) -> Optional[WalletEntity]:\n \"\"\"Retrieve wallet record by wallet id. :param wallet_id: ID of use :returns: Wallet schema\"\"\"\n <|body_0|>\n\n async def get_by_user_id(self, user_id: int) -> Optional[WalletEntity]:\n \"\"\"Retrieve wallet record by user id. :param user_id: ID of use :returns: Wallet schema\"\"\"\n <|body_1|>\n\n async def create(self, user_id: int) -> int:\n \"\"\"Create new wallet with user ID :param user_id: ID of user :returns: ID of new wallet\"\"\"\n <|body_2|>\n\n async def enroll(self, wallet_id: int, amount: Decimal) -> int:\n \"\"\"Put given funds on wallet :param wallet_id: ID of wallet :param amount: Initial funds :returns: ID of updated wallet\"\"\"\n <|body_3|>\n\n async def transfer(self, source_wallet_id: int, destination_wallet_id: int, amount: Decimal) -> int:\n \"\"\"Transfer amount of currency between wallets. :param source_wallet_id: ID of source wallet :param destination_wallet_id: ID of wallet recepients :param amount: Amount of currency :returns: Source wallet id\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WalletRepository:\n \"\"\"Implementation of wallet repository.\"\"\"\n\n async def get_by_id(self, wallet_id: int) -> Optional[WalletEntity]:\n \"\"\"Retrieve wallet record by wallet id. :param wallet_id: ID of use :returns: Wallet schema\"\"\"\n wallet_query = wallets.select().where(wallets.c.id == wallet_id)\n wallet = await self._db.fetch_one(wallet_query)\n if wallet:\n return WalletEntity(**wallet)\n return None\n\n async def get_by_user_id(self, user_id: int) -> Optional[WalletEntity]:\n \"\"\"Retrieve wallet record by user id. :param user_id: ID of use :returns: Wallet schema\"\"\"\n wallet_query = wallets.select().where(wallets.c.user_id == user_id)\n wallet = await self._db.fetch_one(wallet_query)\n if wallet:\n return WalletEntity(**wallet)\n return None\n\n async def create(self, user_id: int) -> int:\n \"\"\"Create new wallet with user ID :param user_id: ID of user :returns: ID of new wallet\"\"\"\n wallet_query = wallets.insert().values({'user_id': user_id})\n wallet_id: int = await self._db.execute(wallet_query)\n return wallet_id\n\n async def enroll(self, wallet_id: int, amount: Decimal) -> int:\n \"\"\"Put given funds on wallet :param wallet_id: ID of wallet :param amount: Initial funds :returns: ID of updated wallet\"\"\"\n query = wallets.update().where(wallets.c.id == wallet_id).values({'balance': wallets.c.balance + amount}).returning(wallets.c.id)\n w_id: int = await self._db.execute(query)\n return w_id\n\n async def transfer(self, source_wallet_id: int, destination_wallet_id: int, amount: Decimal) -> int:\n \"\"\"Transfer amount of currency between wallets. :param source_wallet_id: ID of source wallet :param destination_wallet_id: ID of wallet recepients :param amount: Amount of currency :returns: Source wallet id\"\"\"\n source_query = wallets.update().where(wallets.c.id == source_wallet_id).values({'balance': wallets.c.balance - amount}).returning(wallets.c.id)\n target_query = wallets.update().where(wallets.c.id == destination_wallet_id).values({'balance': wallets.c.balance + amount})\n wallet_id: int = await self._db.execute(source_query)\n if not wallet_id:\n raise ValueError('Source wallet id does not exist')\n await self._db.execute(target_query)\n return wallet_id\n", "source": "the_stack_v2_python_sparse", "source_path": "app/repositories/wallet.py", "source_repo": "vsokoltsov/billing_system_test_task", "split": "val", "star_events_count": 0}
{"blob_id": "71ac27520f9eb9f9c75373ee4240fe2461b494fa", "bodies": ["self.manager.get_policy_content(resource)\nfor s in resource['PolicyDocument']['statement']:\n if 'condition' not in s and s['effect'] == 'allow':\n if 'action' in s and '*:*' in s['action'] and ('resource' in s) and (isinstance(s['resource'], str) and s['resource'] == '*' or (isinstance(s['resource'], list) and '*' in s['resource'])):\n return True\nreturn False", "results = [r for r in resources if self.has_allow_all_policy(r)]\nself.log.info('%d of %d cam policies have allow all.', len(results), len(resources))\nreturn results"], "bodies_text": "<|body_start_0|>\n self.manager.get_policy_content(resource)\n for s in resource['PolicyDocument']['statement']:\n if 'condition' not in s and s['effect'] == 'allow':\n if 'action' in s and '*:*' in s['action'] and ('resource' in s) and (isinstance(s['resource'], str) and s['resource'] == '*' or (isinstance(s['resource'], list) and '*' in s['resource'])):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n results = [r for r in resources if self.has_allow_all_policy(r)]\n self.log.info('%d of %d cam policies have allow all.', len(results), len(resources))\n return results\n<|end_body_1|>\n", "class_docstring": "AllowAllIamPolicies :example: .. code-block:: yaml policies: - name: cam-policy-overly-permissive resource: tencentcloud.cam-policy filters: - type: has-allow-all", "class_name": "AllowAllIamPolicies", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AllowAllIamPolicies:\n \"\"\"AllowAllIamPolicies :example: .. code-block:: yaml policies: - name: cam-policy-overly-permissive resource: tencentcloud.cam-policy filters: - type: has-allow-all\"\"\"\n\n def has_allow_all_policy(self, resource):\n \"\"\"has_allow_all_policy\"\"\"\n <|body_0|>\n\n def process(self, resources, event=None):\n \"\"\"process\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.manager.get_policy_content(resource)\n for s in resource['PolicyDocument']['statement']:\n if 'condition' not in s and s['effect'] == 'allow':\n if 'action' in s and '*:*' in s['action'] and ('resource' in s) and (isinstance(s['resource'], str) and s['resource'] == '*' or (isinstance(s['resource'], list) and '*' in s['resource'])):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n results = [r for r in resources if self.has_allow_all_policy(r)]\n self.log.info('%d of %d cam policies have allow all.', len(results), len(resources))\n return results\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000487", "length_bytes": 20157, "license_type": "permissive", "methods": [{"docstring": "has_allow_all_policy", "name": "has_allow_all_policy", "signature": "def has_allow_all_policy(self, resource)"}, {"docstring": "process", "name": "process", "signature": "def process(self, resources, event=None)"}], "n_methods": 2, "prompt": "Implement the Python class `AllowAllIamPolicies` described below.\n\nClass description:\nAllowAllIamPolicies :example: .. code-block:: yaml policies: - name: cam-policy-overly-permissive resource: tencentcloud.cam-policy filters: - type: has-allow-all\n\nMethod signatures and docstrings:\n- def has_allow_all_policy(self, resource): has_allow_all_policy\n- def process(self, resources, event=None): process", "prompted_full_text": "Implement the Python class `AllowAllIamPolicies` described below.\n\nClass description:\nAllowAllIamPolicies :example: .. code-block:: yaml policies: - name: cam-policy-overly-permissive resource: tencentcloud.cam-policy filters: - type: has-allow-all\n\nMethod signatures and docstrings:\n- def has_allow_all_policy(self, resource): has_allow_all_policy\n- def process(self, resources, event=None): process\n\n<|skeleton|>\nclass AllowAllIamPolicies:\n \"\"\"AllowAllIamPolicies :example: .. code-block:: yaml policies: - name: cam-policy-overly-permissive resource: tencentcloud.cam-policy filters: - type: has-allow-all\"\"\"\n\n def has_allow_all_policy(self, resource):\n \"\"\"has_allow_all_policy\"\"\"\n <|body_0|>\n\n def process(self, resources, event=None):\n \"\"\"process\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.manager.get_policy_content(resource)\n for s in resource['PolicyDocument']['statement']:\n if 'condition' not in s and s['effect'] == 'allow':\n if 'action' in s and '*:*' in s['action'] and ('resource' in s) and (isinstance(s['resource'], str) and s['resource'] == '*' or (isinstance(s['resource'], list) and '*' in s['resource'])):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n results = [r for r in resources if self.has_allow_all_policy(r)]\n self.log.info('%d of %d cam policies have allow all.', len(results), len(resources))\n return results\n<|end_body_1|>\n", "revision_id": "27563cf4571040f923124e1acb2463f11e372225", "skeleton": "<|skeleton|>\nclass AllowAllIamPolicies:\n \"\"\"AllowAllIamPolicies :example: .. code-block:: yaml policies: - name: cam-policy-overly-permissive resource: tencentcloud.cam-policy filters: - type: has-allow-all\"\"\"\n\n def has_allow_all_policy(self, resource):\n \"\"\"has_allow_all_policy\"\"\"\n <|body_0|>\n\n def process(self, resources, event=None):\n \"\"\"process\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AllowAllIamPolicies:\n \"\"\"AllowAllIamPolicies :example: .. code-block:: yaml policies: - name: cam-policy-overly-permissive resource: tencentcloud.cam-policy filters: - type: has-allow-all\"\"\"\n\n def has_allow_all_policy(self, resource):\n \"\"\"has_allow_all_policy\"\"\"\n self.manager.get_policy_content(resource)\n for s in resource['PolicyDocument']['statement']:\n if 'condition' not in s and s['effect'] == 'allow':\n if 'action' in s and '*:*' in s['action'] and ('resource' in s) and (isinstance(s['resource'], str) and s['resource'] == '*' or (isinstance(s['resource'], list) and '*' in s['resource'])):\n return True\n return False\n\n def process(self, resources, event=None):\n \"\"\"process\"\"\"\n results = [r for r in resources if self.has_allow_all_policy(r)]\n self.log.info('%d of %d cam policies have allow all.', len(results), len(resources))\n return results\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/c7n_tencentcloud/c7n_tencentcloud/resources/cam.py", "source_repo": "cloud-custodian/cloud-custodian", "split": "val", "star_events_count": 3327}
{"blob_id": "b60734479f1c0cb2ccbcb23571f67dd1c408a627", "bodies": ["if hasattr(self.survey_record, 'grade'):\n grade = self.survey_record.grade\nvals_grade = {True: 'pass', False: 'fail'}\nself.data['grade'] = vals_grade.get(grade, None) or grade\nreturn super(GradeSurveyRecordForm, self).getFields(*args)", "fields = super(GradeSurveyRecordForm, self).insertFields()\ngrade_choices = (('', 'Choose a Grade'),) + tuple(DEF_GRADE_CHOICES)\ngradeField = forms.fields.CharField(widget=custom_widgets.PlainTextWidget, initial=self.data.get('grade'))\nfields.insert(len(fields) + 1, 'grade', gradeField)\nreturn fields"], "bodies_text": "<|body_start_0|>\n if hasattr(self.survey_record, 'grade'):\n grade = self.survey_record.grade\n vals_grade = {True: 'pass', False: 'fail'}\n self.data['grade'] = vals_grade.get(grade, None) or grade\n return super(GradeSurveyRecordForm, self).getFields(*args)\n<|end_body_0|>\n\n<|body_start_1|>\n fields = super(GradeSurveyRecordForm, self).insertFields()\n grade_choices = (('', 'Choose a Grade'),) + tuple(DEF_GRADE_CHOICES)\n gradeField = forms.fields.CharField(widget=custom_widgets.PlainTextWidget, initial=self.data.get('grade'))\n fields.insert(len(fields) + 1, 'grade', gradeField)\n return fields\n<|end_body_1|>\n", "class_docstring": "RecordForm for the GradeSurveyTakeForm.", "class_name": "GradeSurveyRecordForm", "detected_licenses": ["Apache-2.0", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GradeSurveyRecordForm:\n \"\"\"RecordForm for the GradeSurveyTakeForm.\"\"\"\n\n def getFields(self, *args):\n \"\"\"Add the extra grade field's value from survey_record.\"\"\"\n <|body_0|>\n\n def insertFields(self):\n \"\"\"Add ordered fields to self.fields, add grade field with grade choices.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if hasattr(self.survey_record, 'grade'):\n grade = self.survey_record.grade\n vals_grade = {True: 'pass', False: 'fail'}\n self.data['grade'] = vals_grade.get(grade, None) or grade\n return super(GradeSurveyRecordForm, self).getFields(*args)\n<|end_body_0|>\n\n<|body_start_1|>\n fields = super(GradeSurveyRecordForm, self).insertFields()\n grade_choices = (('', 'Choose a Grade'),) + tuple(DEF_GRADE_CHOICES)\n gradeField = forms.fields.CharField(widget=custom_widgets.PlainTextWidget, initial=self.data.get('grade'))\n fields.insert(len(fields) + 1, 'grade', gradeField)\n return fields\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000488", "length_bytes": 9757, "license_type": "permissive", "methods": [{"docstring": "Add the extra grade field's value from survey_record.", "name": "getFields", "signature": "def getFields(self, *args)"}, {"docstring": "Add ordered fields to self.fields, add grade field with grade choices.", "name": "insertFields", "signature": "def insertFields(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020861", "prompt": "Implement the Python class `GradeSurveyRecordForm` described below.\n\nClass description:\nRecordForm for the GradeSurveyTakeForm.\n\nMethod signatures and docstrings:\n- def getFields(self, *args): Add the extra grade field's value from survey_record.\n- def insertFields(self): Add ordered fields to self.fields, add grade field with grade choices.", "prompted_full_text": "Implement the Python class `GradeSurveyRecordForm` described below.\n\nClass description:\nRecordForm for the GradeSurveyTakeForm.\n\nMethod signatures and docstrings:\n- def getFields(self, *args): Add the extra grade field's value from survey_record.\n- def insertFields(self): Add ordered fields to self.fields, add grade field with grade choices.\n\n<|skeleton|>\nclass GradeSurveyRecordForm:\n \"\"\"RecordForm for the GradeSurveyTakeForm.\"\"\"\n\n def getFields(self, *args):\n \"\"\"Add the extra grade field's value from survey_record.\"\"\"\n <|body_0|>\n\n def insertFields(self):\n \"\"\"Add ordered fields to self.fields, add grade field with grade choices.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if hasattr(self.survey_record, 'grade'):\n grade = self.survey_record.grade\n vals_grade = {True: 'pass', False: 'fail'}\n self.data['grade'] = vals_grade.get(grade, None) or grade\n return super(GradeSurveyRecordForm, self).getFields(*args)\n<|end_body_0|>\n\n<|body_start_1|>\n fields = super(GradeSurveyRecordForm, self).insertFields()\n grade_choices = (('', 'Choose a Grade'),) + tuple(DEF_GRADE_CHOICES)\n gradeField = forms.fields.CharField(widget=custom_widgets.PlainTextWidget, initial=self.data.get('grade'))\n fields.insert(len(fields) + 1, 'grade', gradeField)\n return fields\n<|end_body_1|>\n", "revision_id": "9bd45c168f8ddb5c0e6c04eacdcaeafd61908be7", "skeleton": "<|skeleton|>\nclass GradeSurveyRecordForm:\n \"\"\"RecordForm for the GradeSurveyTakeForm.\"\"\"\n\n def getFields(self, *args):\n \"\"\"Add the extra grade field's value from survey_record.\"\"\"\n <|body_0|>\n\n def insertFields(self):\n \"\"\"Add ordered fields to self.fields, add grade field with grade choices.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GradeSurveyRecordForm:\n \"\"\"RecordForm for the GradeSurveyTakeForm.\"\"\"\n\n def getFields(self, *args):\n \"\"\"Add the extra grade field's value from survey_record.\"\"\"\n if hasattr(self.survey_record, 'grade'):\n grade = self.survey_record.grade\n vals_grade = {True: 'pass', False: 'fail'}\n self.data['grade'] = vals_grade.get(grade, None) or grade\n return super(GradeSurveyRecordForm, self).getFields(*args)\n\n def insertFields(self):\n \"\"\"Add ordered fields to self.fields, add grade field with grade choices.\"\"\"\n fields = super(GradeSurveyRecordForm, self).insertFields()\n grade_choices = (('', 'Choose a Grade'),) + tuple(DEF_GRADE_CHOICES)\n gradeField = forms.fields.CharField(widget=custom_widgets.PlainTextWidget, initial=self.data.get('grade'))\n fields.insert(len(fields) + 1, 'grade', gradeField)\n return fields\n", "source": "the_stack_v2_python_sparse", "source_path": "app/soc/modules/gsoc/views/models/grading_project_survey.py", "source_repo": "pombredanne/Melange-1", "split": "val", "star_events_count": 0}
{"blob_id": "7770a3fda906a7853a106d9d64197b9f44f58c07", "bodies": ["log.debug('Metaclass __new__ constructor called for %r' % cls)\nif cls._find_agent():\n attrs['init'] = cls.__init__\n attrs['_remove_agent'] = True\nreturn super(GPGMeta, cls).__new__(cls, name, bases, attrs)", "if not psutil:\n return False\nthis_process = psutil.Process(os.getpid())\nownership_match = False\nif _util._running_windows:\n identity = this_process.username()\nelse:\n identity = this_process.uids\nfor proc in psutil.process_iter():\n if proc.name == 'gpg-agent' and proc.is_running:\n log.debug('Found gpg-agent process with pid %d' % proc.pid)\n if _util._running_windows:\n if proc.username() == identity:\n ownership_match = True\n elif proc.uids == identity:\n ownership_match = True\nif ownership_match:\n log.debug('Effective UIDs of this process and gpg-agent match')\n setattr(cls, '_agent_proc', proc)\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n log.debug('Metaclass __new__ constructor called for %r' % cls)\n if cls._find_agent():\n attrs['init'] = cls.__init__\n attrs['_remove_agent'] = True\n return super(GPGMeta, cls).__new__(cls, name, bases, attrs)\n<|end_body_0|>\n\n<|body_start_1|>\n if not psutil:\n return False\n this_process = psutil.Process(os.getpid())\n ownership_match = False\n if _util._running_windows:\n identity = this_process.username()\n else:\n identity = this_process.uids\n for proc in psutil.process_iter():\n if proc.name == 'gpg-agent' and proc.is_running:\n log.debug('Found gpg-agent process with pid %d' % proc.pid)\n if _util._running_windows:\n if proc.username() == identity:\n ownership_match = True\n elif proc.uids == identity:\n ownership_match = True\n if ownership_match:\n log.debug('Effective UIDs of this process and gpg-agent match')\n setattr(cls, '_agent_proc', proc)\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "Metaclass for changing the :meth:GPG.__init__ initialiser. Detects running gpg-agent processes and the presence of a pinentry program, and disables pinentry so that python-gnupg can write the passphrase to the controlled GnuPG process without killing the agent. :attr _agent_proc: If a :program:`gpg-agent` process is currently running for the effective userid, then **_agent_proc** will be set to a ``psutil.Process`` for that process.", "class_name": "GPGMeta", "detected_licenses": ["LicenseRef-scancode-philippe-de-muyter", "MIT", "GPL-3.0-only", "LicenseRef-scancode-unknown-license-reference", "GPL-3.0-or-later", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GPGMeta:\n \"\"\"Metaclass for changing the :meth:GPG.__init__ initialiser. Detects running gpg-agent processes and the presence of a pinentry program, and disables pinentry so that python-gnupg can write the passphrase to the controlled GnuPG process without killing the agent. :attr _agent_proc: If a :program:`gpg-agent` process is currently running for the effective userid, then **_agent_proc** will be set to a ``psutil.Process`` for that process.\"\"\"\n\n def __new__(cls, name, bases, attrs):\n \"\"\"Construct the initialiser for GPG\"\"\"\n <|body_0|>\n\n def _find_agent(cls):\n \"\"\"Discover if a gpg-agent process for the current euid is running. If there is a matching gpg-agent process, set a :class:`psutil.Process` instance containing the gpg-agent process' information to ``cls._agent_proc``. For Unix systems, we check that the effective UID of this ``python-gnupg`` process is also the owner of the gpg-agent process. For Windows, we check that the usernames of the owners are the same. (Sorry Windows users; maybe you should switch to anything else.) .. note: This function will only run if the psutil_ Python extension is installed. Because psutil won't run with the PyPy interpreter, use of it is optional (although highly recommended). .. _psutil: https://pypi.python.org\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log.debug('Metaclass __new__ constructor called for %r' % cls)\n if cls._find_agent():\n attrs['init'] = cls.__init__\n attrs['_remove_agent'] = True\n return super(GPGMeta, cls).__new__(cls, name, bases, attrs)\n<|end_body_0|>\n\n<|body_start_1|>\n if not psutil:\n return False\n this_process = psutil.Process(os.getpid())\n ownership_match = False\n if _util._running_windows:\n identity = this_process.username()\n else:\n identity = this_process.uids\n for proc in psutil.process_iter():\n if proc.name == 'gpg-agent' and proc.is_running:\n log.debug('Found gpg-agent process with pid %d' % proc.pid)\n if _util._running_windows:\n if proc.username() == identity:\n ownership_match = True\n elif proc.uids == identity:\n ownership_match = True\n if ownership_match:\n log.debug('Effective UIDs of this process and gpg-agent match')\n setattr(cls, '_agent_proc', proc)\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000489", "length_bytes": 45539, "license_type": "permissive", "methods": [{"docstring": "Construct the initialiser for GPG", "name": "__new__", "signature": "def __new__(cls, name, bases, attrs)"}, {"docstring": "Discover if a gpg-agent process for the current euid is running. If there is a matching gpg-agent process, set a :class:`psutil.Process` instance containing the gpg-agent process' information to ``cls._agent_proc``. For Unix systems, we check that the effective UID of this ``python-gnupg`` process is also the owner of the gpg-agent process. For Windows, we check that the usernames of the owners are the same. (Sorry Windows users; maybe you should switch to anything else.) .. note: This function will only run if the psutil_ Python extension is installed. Because psutil won't run with the PyPy interpreter, use of it is optional (although highly recommended). .. _psutil: https://pypi.python.org", "name": "_find_agent", "signature": "def _find_agent(cls)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000640", "prompt": "Implement the Python class `GPGMeta` described below.\n\nClass description:\nMetaclass for changing the :meth:GPG.__init__ initialiser. Detects running gpg-agent processes and the presence of a pinentry program, and disables pinentry so that python-gnupg can write the passphrase to the controlled GnuPG process without killing the agent. :attr _agent_proc: If a :program:`gpg-agent` process is currently running for the effective userid, then **_agent_proc** will be set to a ``psutil.Process`` for that process.\n\nMethod signatures and docstrings:\n- def __new__(cls, name, bases, attrs): Construct the initialiser for GPG\n- def _find_agent(cls): Discover if a gpg-agent process for the current euid is running. If there is a matching gpg-agent process, set a :class:`psutil.Process` instance containing the gpg-agent process' information to ``cls._agent_proc``. For Unix systems, we check that the effective UID of this ``python-gnupg`` process is also the owner of the gpg-agent process. For Windows, we check that the usernames of the owners are the same. (Sorry Windows users; maybe you should switch to anything else.) .. note: This function will only run if the psutil_ Python extension is installed. Because psutil won't run with the PyPy interpreter, use of it is optional (although highly recommended). .. _psutil: https://pypi.python.org", "prompted_full_text": "Implement the Python class `GPGMeta` described below.\n\nClass description:\nMetaclass for changing the :meth:GPG.__init__ initialiser. Detects running gpg-agent processes and the presence of a pinentry program, and disables pinentry so that python-gnupg can write the passphrase to the controlled GnuPG process without killing the agent. :attr _agent_proc: If a :program:`gpg-agent` process is currently running for the effective userid, then **_agent_proc** will be set to a ``psutil.Process`` for that process.\n\nMethod signatures and docstrings:\n- def __new__(cls, name, bases, attrs): Construct the initialiser for GPG\n- def _find_agent(cls): Discover if a gpg-agent process for the current euid is running. If there is a matching gpg-agent process, set a :class:`psutil.Process` instance containing the gpg-agent process' information to ``cls._agent_proc``. For Unix systems, we check that the effective UID of this ``python-gnupg`` process is also the owner of the gpg-agent process. For Windows, we check that the usernames of the owners are the same. (Sorry Windows users; maybe you should switch to anything else.) .. note: This function will only run if the psutil_ Python extension is installed. Because psutil won't run with the PyPy interpreter, use of it is optional (although highly recommended). .. _psutil: https://pypi.python.org\n\n<|skeleton|>\nclass GPGMeta:\n \"\"\"Metaclass for changing the :meth:GPG.__init__ initialiser. Detects running gpg-agent processes and the presence of a pinentry program, and disables pinentry so that python-gnupg can write the passphrase to the controlled GnuPG process without killing the agent. :attr _agent_proc: If a :program:`gpg-agent` process is currently running for the effective userid, then **_agent_proc** will be set to a ``psutil.Process`` for that process.\"\"\"\n\n def __new__(cls, name, bases, attrs):\n \"\"\"Construct the initialiser for GPG\"\"\"\n <|body_0|>\n\n def _find_agent(cls):\n \"\"\"Discover if a gpg-agent process for the current euid is running. If there is a matching gpg-agent process, set a :class:`psutil.Process` instance containing the gpg-agent process' information to ``cls._agent_proc``. For Unix systems, we check that the effective UID of this ``python-gnupg`` process is also the owner of the gpg-agent process. For Windows, we check that the usernames of the owners are the same. (Sorry Windows users; maybe you should switch to anything else.) .. note: This function will only run if the psutil_ Python extension is installed. Because psutil won't run with the PyPy interpreter, use of it is optional (although highly recommended). .. _psutil: https://pypi.python.org\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log.debug('Metaclass __new__ constructor called for %r' % cls)\n if cls._find_agent():\n attrs['init'] = cls.__init__\n attrs['_remove_agent'] = True\n return super(GPGMeta, cls).__new__(cls, name, bases, attrs)\n<|end_body_0|>\n\n<|body_start_1|>\n if not psutil:\n return False\n this_process = psutil.Process(os.getpid())\n ownership_match = False\n if _util._running_windows:\n identity = this_process.username()\n else:\n identity = this_process.uids\n for proc in psutil.process_iter():\n if proc.name == 'gpg-agent' and proc.is_running:\n log.debug('Found gpg-agent process with pid %d' % proc.pid)\n if _util._running_windows:\n if proc.username() == identity:\n ownership_match = True\n elif proc.uids == identity:\n ownership_match = True\n if ownership_match:\n log.debug('Effective UIDs of this process and gpg-agent match')\n setattr(cls, '_agent_proc', proc)\n return True\n return False\n<|end_body_1|>\n", "revision_id": "cd6975e14ad78450c3cd58b415d8e074ed201cf3", "skeleton": "<|skeleton|>\nclass GPGMeta:\n \"\"\"Metaclass for changing the :meth:GPG.__init__ initialiser. Detects running gpg-agent processes and the presence of a pinentry program, and disables pinentry so that python-gnupg can write the passphrase to the controlled GnuPG process without killing the agent. :attr _agent_proc: If a :program:`gpg-agent` process is currently running for the effective userid, then **_agent_proc** will be set to a ``psutil.Process`` for that process.\"\"\"\n\n def __new__(cls, name, bases, attrs):\n \"\"\"Construct the initialiser for GPG\"\"\"\n <|body_0|>\n\n def _find_agent(cls):\n \"\"\"Discover if a gpg-agent process for the current euid is running. If there is a matching gpg-agent process, set a :class:`psutil.Process` instance containing the gpg-agent process' information to ``cls._agent_proc``. For Unix systems, we check that the effective UID of this ``python-gnupg`` process is also the owner of the gpg-agent process. For Windows, we check that the usernames of the owners are the same. (Sorry Windows users; maybe you should switch to anything else.) .. note: This function will only run if the psutil_ Python extension is installed. Because psutil won't run with the PyPy interpreter, use of it is optional (although highly recommended). .. _psutil: https://pypi.python.org\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GPGMeta:\n \"\"\"Metaclass for changing the :meth:GPG.__init__ initialiser. Detects running gpg-agent processes and the presence of a pinentry program, and disables pinentry so that python-gnupg can write the passphrase to the controlled GnuPG process without killing the agent. :attr _agent_proc: If a :program:`gpg-agent` process is currently running for the effective userid, then **_agent_proc** will be set to a ``psutil.Process`` for that process.\"\"\"\n\n def __new__(cls, name, bases, attrs):\n \"\"\"Construct the initialiser for GPG\"\"\"\n log.debug('Metaclass __new__ constructor called for %r' % cls)\n if cls._find_agent():\n attrs['init'] = cls.__init__\n attrs['_remove_agent'] = True\n return super(GPGMeta, cls).__new__(cls, name, bases, attrs)\n\n def _find_agent(cls):\n \"\"\"Discover if a gpg-agent process for the current euid is running. If there is a matching gpg-agent process, set a :class:`psutil.Process` instance containing the gpg-agent process' information to ``cls._agent_proc``. For Unix systems, we check that the effective UID of this ``python-gnupg`` process is also the owner of the gpg-agent process. For Windows, we check that the usernames of the owners are the same. (Sorry Windows users; maybe you should switch to anything else.) .. note: This function will only run if the psutil_ Python extension is installed. Because psutil won't run with the PyPy interpreter, use of it is optional (although highly recommended). .. _psutil: https://pypi.python.org\"\"\"\n if not psutil:\n return False\n this_process = psutil.Process(os.getpid())\n ownership_match = False\n if _util._running_windows:\n identity = this_process.username()\n else:\n identity = this_process.uids\n for proc in psutil.process_iter():\n if proc.name == 'gpg-agent' and proc.is_running:\n log.debug('Found gpg-agent process with pid %d' % proc.pid)\n if _util._running_windows:\n if proc.username() == identity:\n ownership_match = True\n elif proc.uids == identity:\n ownership_match = True\n if ownership_match:\n log.debug('Effective UIDs of this process and gpg-agent match')\n setattr(cls, '_agent_proc', proc)\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "gnupg/_meta.py", "source_repo": "iMeiji/shadowsocksR", "split": "val", "star_events_count": 15}
{"blob_id": "ef5d4a323d956c305c261f9e75aabc80c068d2a4", "bodies": ["if root == None:\n return []\nqueue = [root]\ndata = []\nwhile queue:\n node = queue.pop(0)\n if node != None:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\nreturn data", "if not data:\n return None\nroot = TreeNode(data.pop(0))\nqueue = [root]\nindex = 0\nwhile queue:\n node = queue.pop(0)\n left = data[index]\n index += 1\n right = data[index]\n index += 1\n if left != None:\n lNode = TreeNode(left)\n queue.append(lNode)\n node.left = lNode\n else:\n node.left = None\n if right != None:\n rNode = TreeNode(right)\n queue.append(rNode)\n node.right = rNode\n else:\n node.right = None\nreturn root"], "bodies_text": "<|body_start_0|>\n if root == None:\n return []\n queue = [root]\n data = []\n while queue:\n node = queue.pop(0)\n if node != None:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n root = TreeNode(data.pop(0))\n queue = [root]\n index = 0\n while queue:\n node = queue.pop(0)\n left = data[index]\n index += 1\n right = data[index]\n index += 1\n if left != None:\n lNode = TreeNode(left)\n queue.append(lNode)\n node.left = lNode\n else:\n node.left = None\n if right != None:\n rNode = TreeNode(right)\n queue.append(rNode)\n node.right = rNode\n else:\n node.right = None\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: List[TreeNode]\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: List[TreeNode] :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root == None:\n return []\n queue = [root]\n data = []\n while queue:\n node = queue.pop(0)\n if node != None:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n root = TreeNode(data.pop(0))\n queue = [root]\n index = 0\n while queue:\n node = queue.pop(0)\n left = data[index]\n index += 1\n right = data[index]\n index += 1\n if left != None:\n lNode = TreeNode(left)\n queue.append(lNode)\n node.left = lNode\n else:\n node.left = None\n if right != None:\n rNode = TreeNode(right)\n queue.append(rNode)\n node.right = rNode\n else:\n node.right = None\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000490", "length_bytes": 2484, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: List[TreeNode]", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: List[TreeNode] :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: List[TreeNode]\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: List[TreeNode] :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: List[TreeNode]\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: List[TreeNode] :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: List[TreeNode]\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: List[TreeNode] :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root == None:\n return []\n queue = [root]\n data = []\n while queue:\n node = queue.pop(0)\n if node != None:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n root = TreeNode(data.pop(0))\n queue = [root]\n index = 0\n while queue:\n node = queue.pop(0)\n left = data[index]\n index += 1\n right = data[index]\n index += 1\n if left != None:\n lNode = TreeNode(left)\n queue.append(lNode)\n node.left = lNode\n else:\n node.left = None\n if right != None:\n rNode = TreeNode(right)\n queue.append(rNode)\n node.right = rNode\n else:\n node.right = None\n return root\n<|end_body_1|>\n", "revision_id": "1cb50b2250792d78e1d215ecbccc0ee5df123d67", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: List[TreeNode]\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: List[TreeNode] :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: List[TreeNode]\"\"\"\n if root == None:\n return []\n queue = [root]\n data = []\n while queue:\n node = queue.pop(0)\n if node != None:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\n return data\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: List[TreeNode] :rtype: TreeNode\"\"\"\n if not data:\n return None\n root = TreeNode(data.pop(0))\n queue = [root]\n index = 0\n while queue:\n node = queue.pop(0)\n left = data[index]\n index += 1\n right = data[index]\n index += 1\n if left != None:\n lNode = TreeNode(left)\n queue.append(lNode)\n node.left = lNode\n else:\n node.left = None\n if right != None:\n rNode = TreeNode(right)\n queue.append(rNode)\n node.right = rNode\n else:\n node.right = None\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "297_Serialize_and_Deserialize_Binary_Tree/solution.py", "source_repo": "dwangproof/1337c0d3", "split": "val", "star_events_count": 0}
{"blob_id": "b8775b29273eca0b685a32effc33ad564e59101c", "bodies": ["super(Chef, self).__init__(image=Chef.image, x=games.screen.width / 2, y=y, dx=speed)\nself.odds_change = odds_change\nself.time_til_drop = 0", "if self.left < 0 or self.right > games.screen.width:\n self.dx = -self.dx\nelif random.randrange(self.odds_change) == 0:\n self.dx = -self.dx\nself.check_drop()", "if self.time_til_drop > 0:\n self.time_til_drop -= 1\nelse:\n new_pizza = Pizza(x=self.x)\n games.screen.add(new_pizza)\n self.time_til_drop = int(new_pizza.height * 1.3 / Pizza.speed) + 1"], "bodies_text": "<|body_start_0|>\n super(Chef, self).__init__(image=Chef.image, x=games.screen.width / 2, y=y, dx=speed)\n self.odds_change = odds_change\n self.time_til_drop = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.left < 0 or self.right > games.screen.width:\n self.dx = -self.dx\n elif random.randrange(self.odds_change) == 0:\n self.dx = -self.dx\n self.check_drop()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.time_til_drop > 0:\n self.time_til_drop -= 1\n else:\n new_pizza = Pizza(x=self.x)\n games.screen.add(new_pizza)\n self.time_til_drop = int(new_pizza.height * 1.3 / Pizza.speed) + 1\n<|end_body_2|>\n", "class_docstring": "A chef which moves left and right, dropping pizzas.", "class_name": "Chef", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Chef:\n \"\"\"A chef which moves left and right, dropping pizzas.\"\"\"\n\n def __init__(self, y=55, speed=2, odds_change=200):\n \"\"\"Initialize the Chef object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Determine if direction needs to be reversed.\"\"\"\n <|body_1|>\n\n def check_drop(self):\n \"\"\"Decrease countdown or drop pizza and reset countdown.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Chef, self).__init__(image=Chef.image, x=games.screen.width / 2, y=y, dx=speed)\n self.odds_change = odds_change\n self.time_til_drop = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.left < 0 or self.right > games.screen.width:\n self.dx = -self.dx\n elif random.randrange(self.odds_change) == 0:\n self.dx = -self.dx\n self.check_drop()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.time_til_drop > 0:\n self.time_til_drop -= 1\n else:\n new_pizza = Pizza(x=self.x)\n games.screen.add(new_pizza)\n self.time_til_drop = int(new_pizza.height * 1.3 / Pizza.speed) + 1\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000491", "length_bytes": 4247, "license_type": "no_license", "methods": [{"docstring": "Initialize the Chef object.", "name": "__init__", "signature": "def __init__(self, y=55, speed=2, odds_change=200)"}, {"docstring": "Determine if direction needs to be reversed.", "name": "update", "signature": "def update(self)"}, {"docstring": "Decrease countdown or drop pizza and reset countdown.", "name": "check_drop", "signature": "def check_drop(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_001169", "prompt": "Implement the Python class `Chef` described below.\n\nClass description:\nA chef which moves left and right, dropping pizzas.\n\nMethod signatures and docstrings:\n- def __init__(self, y=55, speed=2, odds_change=200): Initialize the Chef object.\n- def update(self): Determine if direction needs to be reversed.\n- def check_drop(self): Decrease countdown or drop pizza and reset countdown.", "prompted_full_text": "Implement the Python class `Chef` described below.\n\nClass description:\nA chef which moves left and right, dropping pizzas.\n\nMethod signatures and docstrings:\n- def __init__(self, y=55, speed=2, odds_change=200): Initialize the Chef object.\n- def update(self): Determine if direction needs to be reversed.\n- def check_drop(self): Decrease countdown or drop pizza and reset countdown.\n\n<|skeleton|>\nclass Chef:\n \"\"\"A chef which moves left and right, dropping pizzas.\"\"\"\n\n def __init__(self, y=55, speed=2, odds_change=200):\n \"\"\"Initialize the Chef object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Determine if direction needs to be reversed.\"\"\"\n <|body_1|>\n\n def check_drop(self):\n \"\"\"Decrease countdown or drop pizza and reset countdown.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Chef, self).__init__(image=Chef.image, x=games.screen.width / 2, y=y, dx=speed)\n self.odds_change = odds_change\n self.time_til_drop = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.left < 0 or self.right > games.screen.width:\n self.dx = -self.dx\n elif random.randrange(self.odds_change) == 0:\n self.dx = -self.dx\n self.check_drop()\n<|end_body_1|>\n\n<|body_start_2|>\n if self.time_til_drop > 0:\n self.time_til_drop -= 1\n else:\n new_pizza = Pizza(x=self.x)\n games.screen.add(new_pizza)\n self.time_til_drop = int(new_pizza.height * 1.3 / Pizza.speed) + 1\n<|end_body_2|>\n", "revision_id": "c1e12491a4998c35e86e46010adf9a14e735d667", "skeleton": "<|skeleton|>\nclass Chef:\n \"\"\"A chef which moves left and right, dropping pizzas.\"\"\"\n\n def __init__(self, y=55, speed=2, odds_change=200):\n \"\"\"Initialize the Chef object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Determine if direction needs to be reversed.\"\"\"\n <|body_1|>\n\n def check_drop(self):\n \"\"\"Decrease countdown or drop pizza and reset countdown.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Chef:\n \"\"\"A chef which moves left and right, dropping pizzas.\"\"\"\n\n def __init__(self, y=55, speed=2, odds_change=200):\n \"\"\"Initialize the Chef object.\"\"\"\n super(Chef, self).__init__(image=Chef.image, x=games.screen.width / 2, y=y, dx=speed)\n self.odds_change = odds_change\n self.time_til_drop = 0\n\n def update(self):\n \"\"\"Determine if direction needs to be reversed.\"\"\"\n if self.left < 0 or self.right > games.screen.width:\n self.dx = -self.dx\n elif random.randrange(self.odds_change) == 0:\n self.dx = -self.dx\n self.check_drop()\n\n def check_drop(self):\n \"\"\"Decrease countdown or drop pizza and reset countdown.\"\"\"\n if self.time_til_drop > 0:\n self.time_til_drop -= 1\n else:\n new_pizza = Pizza(x=self.x)\n games.screen.add(new_pizza)\n self.time_til_drop = int(new_pizza.height * 1.3 / Pizza.speed) + 1\n", "source": "the_stack_v2_python_sparse", "source_path": "Ch11E1_PizzaPanicV110/pizza_panic.py", "source_repo": "malmhaug/Py_AbsBegin", "split": "val", "star_events_count": 7}
{"blob_id": "0a373442aa931bf0a0830edb65272122a15323ca", "bodies": ["Nodes.__init__(self, is_cuda).__init__()\nself.num = num\nself.is_cuda = is_cuda\nself.child_edges = []\nself.parent_edges = []\nself.val = None", "batch = self.child_edges[0].child.val.size()[0]\nself.val = self.var(torch.zeros((batch, self.num)))\nfor e in self.child_edges:\n self.val += torch.mm(e.child.val, e.connections)\nreturn self.val"], "bodies_text": "<|body_start_0|>\n Nodes.__init__(self, is_cuda).__init__()\n self.num = num\n self.is_cuda = is_cuda\n self.child_edges = []\n self.parent_edges = []\n self.val = None\n<|end_body_0|>\n\n<|body_start_1|>\n batch = self.child_edges[0].child.val.size()[0]\n self.val = self.var(torch.zeros((batch, self.num)))\n for e in self.child_edges:\n self.val += torch.mm(e.child.val, e.connections)\n return self.val\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ProductNodes", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProductNodes:\n\n def __init__(self, is_cuda, num=1):\n \"\"\":param is_cuda: determines if the node is stored on the gpu or cpu :param num: The number of nodes being created\"\"\"\n <|body_0|>\n\n def forward(self):\n \"\"\"Overriding torch's forward pass for Product nodes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Nodes.__init__(self, is_cuda).__init__()\n self.num = num\n self.is_cuda = is_cuda\n self.child_edges = []\n self.parent_edges = []\n self.val = None\n<|end_body_0|>\n\n<|body_start_1|>\n batch = self.child_edges[0].child.val.size()[0]\n self.val = self.var(torch.zeros((batch, self.num)))\n for e in self.child_edges:\n self.val += torch.mm(e.child.val, e.connections)\n return self.val\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000492", "length_bytes": 5420, "license_type": "permissive", "methods": [{"docstring": ":param is_cuda: determines if the node is stored on the gpu or cpu :param num: The number of nodes being created", "name": "__init__", "signature": "def __init__(self, is_cuda, num=1)"}, {"docstring": "Overriding torch's forward pass for Product nodes.", "name": "forward", "signature": "def forward(self)"}], "n_methods": 2, "prompt": "Implement the Python class `ProductNodes` described below.\n\nClass description:\nImplement the ProductNodes class.\n\nMethod signatures and docstrings:\n- def __init__(self, is_cuda, num=1): :param is_cuda: determines if the node is stored on the gpu or cpu :param num: The number of nodes being created\n- def forward(self): Overriding torch's forward pass for Product nodes.", "prompted_full_text": "Implement the Python class `ProductNodes` described below.\n\nClass description:\nImplement the ProductNodes class.\n\nMethod signatures and docstrings:\n- def __init__(self, is_cuda, num=1): :param is_cuda: determines if the node is stored on the gpu or cpu :param num: The number of nodes being created\n- def forward(self): Overriding torch's forward pass for Product nodes.\n\n<|skeleton|>\nclass ProductNodes:\n\n def __init__(self, is_cuda, num=1):\n \"\"\":param is_cuda: determines if the node is stored on the gpu or cpu :param num: The number of nodes being created\"\"\"\n <|body_0|>\n\n def forward(self):\n \"\"\"Overriding torch's forward pass for Product nodes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Nodes.__init__(self, is_cuda).__init__()\n self.num = num\n self.is_cuda = is_cuda\n self.child_edges = []\n self.parent_edges = []\n self.val = None\n<|end_body_0|>\n\n<|body_start_1|>\n batch = self.child_edges[0].child.val.size()[0]\n self.val = self.var(torch.zeros((batch, self.num)))\n for e in self.child_edges:\n self.val += torch.mm(e.child.val, e.connections)\n return self.val\n<|end_body_1|>\n", "revision_id": "4ba05aef644b66fc8621991c78e426cef408b985", "skeleton": "<|skeleton|>\nclass ProductNodes:\n\n def __init__(self, is_cuda, num=1):\n \"\"\":param is_cuda: determines if the node is stored on the gpu or cpu :param num: The number of nodes being created\"\"\"\n <|body_0|>\n\n def forward(self):\n \"\"\"Overriding torch's forward pass for Product nodes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProductNodes:\n def __init__(self, is_cuda, num=1):\n \"\"\":param is_cuda: determines if the node is stored on the gpu or cpu :param num: The number of nodes being created\"\"\"\n Nodes.__init__(self, is_cuda).__init__()\n self.num = num\n self.is_cuda = is_cuda\n self.child_edges = []\n self.parent_edges = []\n self.val = None\n\n def forward(self):\n \"\"\"Overriding torch's forward pass for Product nodes.\"\"\"\n batch = self.child_edges[0].child.val.size()[0]\n self.val = self.var(torch.zeros((batch, self.num)))\n for e in self.child_edges:\n self.val += torch.mm(e.child.val, e.connections)\n return self.val\n", "source": "the_stack_v2_python_sparse", "source_path": "src/spn/gpu/PyTorch/Nodes.py", "source_repo": "c0derzer0/SPFlow", "split": "val", "star_events_count": 2}
{"blob_id": "b2d63af76783b118e984f650550ea23fcc066fd4", "bodies": ["ex_dir = './Example1/{}'\nex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\nsol = Solution([ex_dir.format(s) for s in ex_files])\nsol.writing()\nwith open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\nwith open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\nself.assertEqual(res['students'][1]['totalAverage'], expected_res['students'][1]['totalAverage'])\nself.assertEqual(res['students'][1]['courses'], expected_res['students'][1]['courses'])", "ex_dir = './Example2/{}'\nex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\nsol = Solution([ex_dir.format(s) for s in ex_files])\nsol.writing()\nwith open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\nwith open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\nself.assertEqual(res, expected_res)", "ex_dir = './Example3/{}'\nex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\nsol = Solution([ex_dir.format(s) for s in ex_files])\nsol.writing()\nwith open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\nself.assertEqual(res, {'error': 'Invalid course weights'})"], "bodies_text": "<|body_start_0|>\n ex_dir = './Example1/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n with open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\n self.assertEqual(res['students'][1]['totalAverage'], expected_res['students'][1]['totalAverage'])\n self.assertEqual(res['students'][1]['courses'], expected_res['students'][1]['courses'])\n<|end_body_0|>\n\n<|body_start_1|>\n ex_dir = './Example2/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n with open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\n self.assertEqual(res, expected_res)\n<|end_body_1|>\n\n<|body_start_2|>\n ex_dir = './Example3/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n self.assertEqual(res, {'error': 'Invalid course weights'})\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TestExamples", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestExamples:\n\n def test_eg1(self):\n \"\"\"Test specific fields\"\"\"\n <|body_0|>\n\n def test_eg2(self):\n \"\"\"Test entire example 2\"\"\"\n <|body_1|>\n\n def test_eg3(self):\n \"\"\"Test outputing error when weights are incorrect\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ex_dir = './Example1/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n with open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\n self.assertEqual(res['students'][1]['totalAverage'], expected_res['students'][1]['totalAverage'])\n self.assertEqual(res['students'][1]['courses'], expected_res['students'][1]['courses'])\n<|end_body_0|>\n\n<|body_start_1|>\n ex_dir = './Example2/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n with open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\n self.assertEqual(res, expected_res)\n<|end_body_1|>\n\n<|body_start_2|>\n ex_dir = './Example3/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n self.assertEqual(res, {'error': 'Invalid course weights'})\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000493", "length_bytes": 1962, "license_type": "permissive", "methods": [{"docstring": "Test specific fields", "name": "test_eg1", "signature": "def test_eg1(self)"}, {"docstring": "Test entire example 2", "name": "test_eg2", "signature": "def test_eg2(self)"}, {"docstring": "Test outputing error when weights are incorrect", "name": "test_eg3", "signature": "def test_eg3(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000202", "prompt": "Implement the Python class `TestExamples` described below.\n\nClass description:\nImplement the TestExamples class.\n\nMethod signatures and docstrings:\n- def test_eg1(self): Test specific fields\n- def test_eg2(self): Test entire example 2\n- def test_eg3(self): Test outputing error when weights are incorrect", "prompted_full_text": "Implement the Python class `TestExamples` described below.\n\nClass description:\nImplement the TestExamples class.\n\nMethod signatures and docstrings:\n- def test_eg1(self): Test specific fields\n- def test_eg2(self): Test entire example 2\n- def test_eg3(self): Test outputing error when weights are incorrect\n\n<|skeleton|>\nclass TestExamples:\n\n def test_eg1(self):\n \"\"\"Test specific fields\"\"\"\n <|body_0|>\n\n def test_eg2(self):\n \"\"\"Test entire example 2\"\"\"\n <|body_1|>\n\n def test_eg3(self):\n \"\"\"Test outputing error when weights are incorrect\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ex_dir = './Example1/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n with open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\n self.assertEqual(res['students'][1]['totalAverage'], expected_res['students'][1]['totalAverage'])\n self.assertEqual(res['students'][1]['courses'], expected_res['students'][1]['courses'])\n<|end_body_0|>\n\n<|body_start_1|>\n ex_dir = './Example2/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n with open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\n self.assertEqual(res, expected_res)\n<|end_body_1|>\n\n<|body_start_2|>\n ex_dir = './Example3/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n self.assertEqual(res, {'error': 'Invalid course weights'})\n<|end_body_2|>\n", "revision_id": "49a0b03c55d8a702785888d473ef96539265ce9c", "skeleton": "<|skeleton|>\nclass TestExamples:\n\n def test_eg1(self):\n \"\"\"Test specific fields\"\"\"\n <|body_0|>\n\n def test_eg2(self):\n \"\"\"Test entire example 2\"\"\"\n <|body_1|>\n\n def test_eg3(self):\n \"\"\"Test outputing error when weights are incorrect\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestExamples:\n def test_eg1(self):\n \"\"\"Test specific fields\"\"\"\n ex_dir = './Example1/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n with open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\n self.assertEqual(res['students'][1]['totalAverage'], expected_res['students'][1]['totalAverage'])\n self.assertEqual(res['students'][1]['courses'], expected_res['students'][1]['courses'])\n\n def test_eg2(self):\n \"\"\"Test entire example 2\"\"\"\n ex_dir = './Example2/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n with open(ex_dir.format('output.json')) as f:\n expected_res = json.load(f)\n self.assertEqual(res, expected_res)\n\n def test_eg3(self):\n \"\"\"Test outputing error when weights are incorrect\"\"\"\n ex_dir = './Example3/{}'\n ex_files = ['courses.csv', 'students.csv', 'tests.csv', 'marks.csv', 'output_test.json']\n sol = Solution([ex_dir.format(s) for s in ex_files])\n sol.writing()\n with open(ex_dir.format(ex_files[-1])) as f:\n res = json.load(f)\n self.assertEqual(res, {'error': 'Invalid course weights'})\n", "source": "the_stack_v2_python_sparse", "source_path": "online_testing_202006/test_with_examples.py", "source_repo": "chaosWsF/Python-Practice", "split": "val", "star_events_count": 1}
{"blob_id": "09ceeff88db61da4ecf6a84878bedc5302bdf39a", "bodies": ["if self.request.version == 'v6':\n return ScanSerializerV6\nelif self.request.version == 'v7':\n return ScanSerializerV6", "if request.version == 'v6':\n return self._list_v6(request)\nelif request.version == 'v7':\n return self._list_v6(request)\nraise Http404()", "started = rest_util.parse_timestamp(request, 'started', required=False)\nended = rest_util.parse_timestamp(request, 'ended', required=False)\nrest_util.check_time_range(started, ended)\nnames = rest_util.parse_string_list(request, 'name', required=False)\norder = rest_util.parse_string_list(request, 'order', required=False)\nscans = Scan.objects.get_scans(started, ended, names, order)\npage = self.paginate_queryset(scans)\nserializer = self.get_serializer(page, many=True)\nreturn self.get_paginated_response(serializer.data)", "if request.version == 'v6':\n return self._create_v6(request)\nelif request.version == 'v7':\n return self._create_v6(request)\nraise Http404()", "title = rest_util.parse_string(request, 'title', required=True)\nname = title_to_name(self.queryset, title)\ndescription = rest_util.parse_string(request, 'description', required=False)\nconfiguration = rest_util.parse_dict(request, 'configuration')\ntry:\n config = ScanConfigurationV6(configuration, do_validate=True).get_configuration()\nexcept InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\ntry:\n scan = Scan.objects.create_scan(name, title, description, config)\nexcept InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\nserializer = ScanDetailsSerializerV6(scan)\nscan_url = reverse('scans_details_view', args=[scan.id], request=request)\nreturn Response(serializer.data, status=status.HTTP_201_CREATED, headers=dict(location=scan_url))"], "bodies_text": "<|body_start_0|>\n if self.request.version == 'v6':\n return ScanSerializerV6\n elif self.request.version == 'v7':\n return ScanSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self._list_v6(request)\n elif request.version == 'v7':\n return self._list_v6(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended)\n names = rest_util.parse_string_list(request, 'name', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n scans = Scan.objects.get_scans(started, ended, names, order)\n page = self.paginate_queryset(scans)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n\n<|body_start_3|>\n if request.version == 'v6':\n return self._create_v6(request)\n elif request.version == 'v7':\n return self._create_v6(request)\n raise Http404()\n<|end_body_3|>\n\n<|body_start_4|>\n title = rest_util.parse_string(request, 'title', required=True)\n name = title_to_name(self.queryset, title)\n description = rest_util.parse_string(request, 'description', required=False)\n configuration = rest_util.parse_dict(request, 'configuration')\n try:\n config = ScanConfigurationV6(configuration, do_validate=True).get_configuration()\n except InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\n try:\n scan = Scan.objects.create_scan(name, title, description, config)\n except InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\n serializer = ScanDetailsSerializerV6(scan)\n scan_url = reverse('scans_details_view', args=[scan.id], request=request)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=dict(location=scan_url))\n<|end_body_4|>\n", "class_docstring": "This view is the endpoint for retrieving the list of all Scan process.", "class_name": "ScansView", "detected_licenses": ["LicenseRef-scancode-free-unknown", "Apache-2.0", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScansView:\n \"\"\"This view is the endpoint for retrieving the list of all Scan process.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API.\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def _list_v6(self, request):\n \"\"\"Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n def create(self, request):\n \"\"\"Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_3|>\n\n def _create_v6(self, request):\n \"\"\"Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return ScanSerializerV6\n elif self.request.version == 'v7':\n return ScanSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self._list_v6(request)\n elif request.version == 'v7':\n return self._list_v6(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended)\n names = rest_util.parse_string_list(request, 'name', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n scans = Scan.objects.get_scans(started, ended, names, order)\n page = self.paginate_queryset(scans)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n\n<|body_start_3|>\n if request.version == 'v6':\n return self._create_v6(request)\n elif request.version == 'v7':\n return self._create_v6(request)\n raise Http404()\n<|end_body_3|>\n\n<|body_start_4|>\n title = rest_util.parse_string(request, 'title', required=True)\n name = title_to_name(self.queryset, title)\n description = rest_util.parse_string(request, 'description', required=False)\n configuration = rest_util.parse_dict(request, 'configuration')\n try:\n config = ScanConfigurationV6(configuration, do_validate=True).get_configuration()\n except InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\n try:\n scan = Scan.objects.create_scan(name, title, description, config)\n except InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\n serializer = ScanDetailsSerializerV6(scan)\n scan_url = reverse('scans_details_view', args=[scan.id], request=request)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=dict(location=scan_url))\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000494", "length_bytes": 30689, "license_type": "permissive", "methods": [{"docstring": "Returns the appropriate serializer based off the requests version of the REST API.", "name": "get_serializer_class", "signature": "def get_serializer_class(self)"}, {"docstring": "Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "list", "signature": "def list(self, request)"}, {"docstring": "Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "_list_v6", "signature": "def _list_v6(self, request)"}, {"docstring": "Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "create", "signature": "def create(self, request)"}, {"docstring": "Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "_create_v6", "signature": "def _create_v6(self, request)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_041588", "prompt": "Implement the Python class `ScansView` described below.\n\nClass description:\nThis view is the endpoint for retrieving the list of all Scan process.\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API.\n- def list(self, request): Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def _list_v6(self, request): Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def create(self, request): Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def _create_v6(self, request): Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "prompted_full_text": "Implement the Python class `ScansView` described below.\n\nClass description:\nThis view is the endpoint for retrieving the list of all Scan process.\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API.\n- def list(self, request): Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def _list_v6(self, request): Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def create(self, request): Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def _create_v6(self, request): Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n\n<|skeleton|>\nclass ScansView:\n \"\"\"This view is the endpoint for retrieving the list of all Scan process.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API.\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def _list_v6(self, request):\n \"\"\"Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n def create(self, request):\n \"\"\"Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_3|>\n\n def _create_v6(self, request):\n \"\"\"Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return ScanSerializerV6\n elif self.request.version == 'v7':\n return ScanSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self._list_v6(request)\n elif request.version == 'v7':\n return self._list_v6(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended)\n names = rest_util.parse_string_list(request, 'name', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n scans = Scan.objects.get_scans(started, ended, names, order)\n page = self.paginate_queryset(scans)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n\n<|body_start_3|>\n if request.version == 'v6':\n return self._create_v6(request)\n elif request.version == 'v7':\n return self._create_v6(request)\n raise Http404()\n<|end_body_3|>\n\n<|body_start_4|>\n title = rest_util.parse_string(request, 'title', required=True)\n name = title_to_name(self.queryset, title)\n description = rest_util.parse_string(request, 'description', required=False)\n configuration = rest_util.parse_dict(request, 'configuration')\n try:\n config = ScanConfigurationV6(configuration, do_validate=True).get_configuration()\n except InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\n try:\n scan = Scan.objects.create_scan(name, title, description, config)\n except InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\n serializer = ScanDetailsSerializerV6(scan)\n scan_url = reverse('scans_details_view', args=[scan.id], request=request)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=dict(location=scan_url))\n<|end_body_4|>\n", "revision_id": "28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b", "skeleton": "<|skeleton|>\nclass ScansView:\n \"\"\"This view is the endpoint for retrieving the list of all Scan process.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API.\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def _list_v6(self, request):\n \"\"\"Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n def create(self, request):\n \"\"\"Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_3|>\n\n def _create_v6(self, request):\n \"\"\"Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ScansView:\n \"\"\"This view is the endpoint for retrieving the list of all Scan process.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API.\"\"\"\n if self.request.version == 'v6':\n return ScanSerializerV6\n elif self.request.version == 'v7':\n return ScanSerializerV6\n\n def list(self, request):\n \"\"\"Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n if request.version == 'v6':\n return self._list_v6(request)\n elif request.version == 'v7':\n return self._list_v6(request)\n raise Http404()\n\n def _list_v6(self, request):\n \"\"\"Retrieves the list of all Scan process and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended)\n names = rest_util.parse_string_list(request, 'name', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n scans = Scan.objects.get_scans(started, ended, names, order)\n page = self.paginate_queryset(scans)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n def create(self, request):\n \"\"\"Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n if request.version == 'v6':\n return self._create_v6(request)\n elif request.version == 'v7':\n return self._create_v6(request)\n raise Http404()\n\n def _create_v6(self, request):\n \"\"\"Creates a new Scan process and returns a link to the detail URL :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n title = rest_util.parse_string(request, 'title', required=True)\n name = title_to_name(self.queryset, title)\n description = rest_util.parse_string(request, 'description', required=False)\n configuration = rest_util.parse_dict(request, 'configuration')\n try:\n config = ScanConfigurationV6(configuration, do_validate=True).get_configuration()\n except InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\n try:\n scan = Scan.objects.create_scan(name, title, description, config)\n except InvalidScanConfiguration as ex:\n raise BadParameter('Scan configuration invalid: %s' % unicode(ex))\n serializer = ScanDetailsSerializerV6(scan)\n scan_url = reverse('scans_details_view', args=[scan.id], request=request)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=dict(location=scan_url))\n", "source": "the_stack_v2_python_sparse", "source_path": "scale/ingest/views.py", "source_repo": "kfconsultant/scale", "split": "val", "star_events_count": 0}
{"blob_id": "cc3c50369875b3626126b1e1354934afdd55c8e4", "bodies": ["section = self.cookbook.cook(section_name, config=self.config)\nself.sections[section_name] = section\nself.add_widget(SeparatorWithHeading(heading=section_name))\nself.add_widget(section)", "for section_name in self.config:\n if section_name in self.cookbook.get_recipe_names():\n self.add_section(section_name)"], "bodies_text": "<|body_start_0|>\n section = self.cookbook.cook(section_name, config=self.config)\n self.sections[section_name] = section\n self.add_widget(SeparatorWithHeading(heading=section_name))\n self.add_widget(section)\n<|end_body_0|>\n\n<|body_start_1|>\n for section_name in self.config:\n if section_name in self.cookbook.get_recipe_names():\n self.add_section(section_name)\n<|end_body_1|>\n", "class_docstring": "Base-class for SettingsRoot screen.", "class_name": "SettingsWidget", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SettingsWidget:\n \"\"\"Base-class for SettingsRoot screen.\"\"\"\n\n def add_section(self, section_name: str):\n \"\"\"Add section by name. Must be contained in :attr:`cookbook`.\"\"\"\n <|body_0|>\n\n def on_config(self, *_):\n \"\"\"Add add sections from :attr:`config`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n section = self.cookbook.cook(section_name, config=self.config)\n self.sections[section_name] = section\n self.add_widget(SeparatorWithHeading(heading=section_name))\n self.add_widget(section)\n<|end_body_0|>\n\n<|body_start_1|>\n for section_name in self.config:\n if section_name in self.cookbook.get_recipe_names():\n self.add_section(section_name)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000495", "length_bytes": 4889, "license_type": "permissive", "methods": [{"docstring": "Add section by name. Must be contained in :attr:`cookbook`.", "name": "add_section", "signature": "def add_section(self, section_name: str)"}, {"docstring": "Add add sections from :attr:`config`.", "name": "on_config", "signature": "def on_config(self, *_)"}], "n_methods": 2, "prompt": "Implement the Python class `SettingsWidget` described below.\n\nClass description:\nBase-class for SettingsRoot screen.\n\nMethod signatures and docstrings:\n- def add_section(self, section_name: str): Add section by name. Must be contained in :attr:`cookbook`.\n- def on_config(self, *_): Add add sections from :attr:`config`.", "prompted_full_text": "Implement the Python class `SettingsWidget` described below.\n\nClass description:\nBase-class for SettingsRoot screen.\n\nMethod signatures and docstrings:\n- def add_section(self, section_name: str): Add section by name. Must be contained in :attr:`cookbook`.\n- def on_config(self, *_): Add add sections from :attr:`config`.\n\n<|skeleton|>\nclass SettingsWidget:\n \"\"\"Base-class for SettingsRoot screen.\"\"\"\n\n def add_section(self, section_name: str):\n \"\"\"Add section by name. Must be contained in :attr:`cookbook`.\"\"\"\n <|body_0|>\n\n def on_config(self, *_):\n \"\"\"Add add sections from :attr:`config`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n section = self.cookbook.cook(section_name, config=self.config)\n self.sections[section_name] = section\n self.add_widget(SeparatorWithHeading(heading=section_name))\n self.add_widget(section)\n<|end_body_0|>\n\n<|body_start_1|>\n for section_name in self.config:\n if section_name in self.cookbook.get_recipe_names():\n self.add_section(section_name)\n<|end_body_1|>\n", "revision_id": "909d088ed4e98b97f65a2c896dc607941b00e4da", "skeleton": "<|skeleton|>\nclass SettingsWidget:\n \"\"\"Base-class for SettingsRoot screen.\"\"\"\n\n def add_section(self, section_name: str):\n \"\"\"Add section by name. Must be contained in :attr:`cookbook`.\"\"\"\n <|body_0|>\n\n def on_config(self, *_):\n \"\"\"Add add sections from :attr:`config`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SettingsWidget:\n \"\"\"Base-class for SettingsRoot screen.\"\"\"\n\n def add_section(self, section_name: str):\n \"\"\"Add section by name. Must be contained in :attr:`cookbook`.\"\"\"\n section = self.cookbook.cook(section_name, config=self.config)\n self.sections[section_name] = section\n self.add_widget(SeparatorWithHeading(heading=section_name))\n self.add_widget(section)\n\n def on_config(self, *_):\n \"\"\"Add add sections from :attr:`config`.\"\"\"\n for section_name in self.config:\n if section_name in self.cookbook.get_recipe_names():\n self.add_section(section_name)\n", "source": "the_stack_v2_python_sparse", "source_path": "acg/custom_widgets/settings.py", "source_repo": "david-fischer/Anki_CardGen", "split": "val", "star_events_count": 5}
{"blob_id": "ec8060172f6b4fc01a76c2aa21dde6b35dfd3c07", "bodies": ["self.helimited_company_submitting_information()\nactual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\nself.assertTrue(actual)\nlogger.info('断言')", "self.helimited_partnership_submission()\nactual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\nself.assertTrue(actual)\nlogger.info('断言')", "self.hesole_proprietorship_submission()\nactual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\nself.assertTrue(actual)\nlogger.info('断言')"], "bodies_text": "<|body_start_0|>\n self.helimited_company_submitting_information()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_0|>\n\n<|body_start_1|>\n self.helimited_partnership_submission()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_1|>\n\n<|body_start_2|>\n self.hesole_proprietorship_submission()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_2|>\n", "class_docstring": "", "class_name": "HeRegister", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HeRegister:\n\n def test_case01(self):\n \"\"\"合作商购买有限注册提交资料\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"合作商购买有限合伙注册提交资料\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"合作商购买个人独资注册提交资料\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.helimited_company_submitting_information()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_0|>\n\n<|body_start_1|>\n self.helimited_partnership_submission()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_1|>\n\n<|body_start_2|>\n self.hesole_proprietorship_submission()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000496", "length_bytes": 1169, "license_type": "no_license", "methods": [{"docstring": "合作商购买有限注册提交资料", "name": "test_case01", "signature": "def test_case01(self)"}, {"docstring": "合作商购买有限合伙注册提交资料", "name": "test_case02", "signature": "def test_case02(self)"}, {"docstring": "合作商购买个人独资注册提交资料", "name": "test_case03", "signature": "def test_case03(self)"}], "n_methods": 3, "prompt": "Implement the Python class `HeRegister` described below.\n\nClass description:\nImplement the HeRegister class.\n\nMethod signatures and docstrings:\n- def test_case01(self): 合作商购买有限注册提交资料\n- def test_case02(self): 合作商购买有限合伙注册提交资料\n- def test_case03(self): 合作商购买个人独资注册提交资料", "prompted_full_text": "Implement the Python class `HeRegister` described below.\n\nClass description:\nImplement the HeRegister class.\n\nMethod signatures and docstrings:\n- def test_case01(self): 合作商购买有限注册提交资料\n- def test_case02(self): 合作商购买有限合伙注册提交资料\n- def test_case03(self): 合作商购买个人独资注册提交资料\n\n<|skeleton|>\nclass HeRegister:\n\n def test_case01(self):\n \"\"\"合作商购买有限注册提交资料\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"合作商购买有限合伙注册提交资料\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"合作商购买个人独资注册提交资料\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.helimited_company_submitting_information()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_0|>\n\n<|body_start_1|>\n self.helimited_partnership_submission()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_1|>\n\n<|body_start_2|>\n self.hesole_proprietorship_submission()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n<|end_body_2|>\n", "revision_id": "cf92e8e81ceb5cb67217bf36993cf94fe470fd0b", "skeleton": "<|skeleton|>\nclass HeRegister:\n\n def test_case01(self):\n \"\"\"合作商购买有限注册提交资料\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"合作商购买有限合伙注册提交资料\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"合作商购买个人独资注册提交资料\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HeRegister:\n def test_case01(self):\n \"\"\"合作商购买有限注册提交资料\"\"\"\n self.helimited_company_submitting_information()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n\n def test_case02(self):\n \"\"\"合作商购买有限合伙注册提交资料\"\"\"\n self.helimited_partnership_submission()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n\n def test_case03(self):\n \"\"\"合作商购买个人独资注册提交资料\"\"\"\n self.hesole_proprietorship_submission()\n actual = self.driver.find_element_by_class_name('el-input__inner').is_enabled()\n self.assertTrue(actual)\n logger.info('断言')\n", "source": "the_stack_v2_python_sparse", "source_path": "hhr/case/qiantai/test_heregister.py", "source_repo": "aixin2000/Test_Scripts", "split": "val", "star_events_count": 0}
{"blob_id": "76faa298c4611e45da7a34b4b547b3a2821ac633", "bodies": ["self.url = url\nself.username = username\nself.password = password\nif session is None:\n session = Session()\nself._session = session\nself._token = None\nself.verify = verify", "if self._token is None and (not _skip):\n self._token = self._request('core.getSecurityToken')\nreturn self._token", "kwargs.setdefault('auth', (self.username, self.password))\nparams = kwargs.setdefault('params', {})\nis_json = params.setdefault(':output', 'json') == 'json'\nparams.setdefault('orion.user.security.token', self._get_token(_skip=name == 'core.getSecurityToken'))\nurl = urljoin(self.url, 'remote/{}'.format(name))\nkwargs['verify'] = self.verify\nif any((kwargs.get(key) for key in ('data', 'json', 'files'))):\n r = self._session.post(url, **kwargs)\nelse:\n r = self._session.get(url, **kwargs)\nr.raise_for_status()\ntext = r.text\nif not text.startswith('OK:'):\n raise APIError(text)\nreturn json.loads(text[3:]) if is_json else text[3:]", "params = kwargs.pop('params', {})\nfiles = kwargs.pop('files', {})\nfor i, item in enumerate(args, start=1):\n params['param{}'.format(i)] = item\nparams.update(kwargs)\nreturn self._request(name, params=params, files=files)"], "bodies_text": "<|body_start_0|>\n self.url = url\n self.username = username\n self.password = password\n if session is None:\n session = Session()\n self._session = session\n self._token = None\n self.verify = verify\n<|end_body_0|>\n\n<|body_start_1|>\n if self._token is None and (not _skip):\n self._token = self._request('core.getSecurityToken')\n return self._token\n<|end_body_1|>\n\n<|body_start_2|>\n kwargs.setdefault('auth', (self.username, self.password))\n params = kwargs.setdefault('params', {})\n is_json = params.setdefault(':output', 'json') == 'json'\n params.setdefault('orion.user.security.token', self._get_token(_skip=name == 'core.getSecurityToken'))\n url = urljoin(self.url, 'remote/{}'.format(name))\n kwargs['verify'] = self.verify\n if any((kwargs.get(key) for key in ('data', 'json', 'files'))):\n r = self._session.post(url, **kwargs)\n else:\n r = self._session.get(url, **kwargs)\n r.raise_for_status()\n text = r.text\n if not text.startswith('OK:'):\n raise APIError(text)\n return json.loads(text[3:]) if is_json else text[3:]\n<|end_body_2|>\n\n<|body_start_3|>\n params = kwargs.pop('params', {})\n files = kwargs.pop('files', {})\n for i, item in enumerate(args, start=1):\n params['param{}'.format(i)] = item\n params.update(kwargs)\n return self._request(name, params=params, files=files)\n<|end_body_3|>\n", "class_docstring": "Communicate with an ePO server. Instances are callable, pass a command name and parameters to make API calls.", "class_name": "Client", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Client:\n \"\"\"Communicate with an ePO server. Instances are callable, pass a command name and parameters to make API calls.\"\"\"\n\n def __init__(self, url, username, password, session=None, verify=None):\n \"\"\"Create a client for the given ePO server. :param url: Location of ePO server. :param username: Username to authenticate. :param password: Password to authenticate. :param session: Custom instance of :class:`requests.Session`, useful for configuring server verification.\"\"\"\n <|body_0|>\n\n def _get_token(self, _skip=False):\n \"\"\"Get the security token if it's not already cached. :param bool _skip: Used internally when making the initial request to get the token.\"\"\"\n <|body_1|>\n\n def _request(self, name, **kwargs):\n \"\"\"Format the request and interpret the response. Usually you want to use :meth:`__call__` instead. :param name: ePO command name to call. :param kwargs: Arguments passed to :meth:`requests.request`. :return: Deserialized JSON data.\"\"\"\n <|body_2|>\n\n def __call__(self, name, *args, **kwargs):\n \"\"\"Make an API call by calling this instance. Collects arguments and calls :meth:`_request`. ePO commands take positional and named arguments. Positional arguments are internally numbered \"param#\" and passed as named arguments. Files can be passed to some commands. Pass a dictionary of ``'filename': file-like objects``, or other formats accepted by :meth:`requests.request`. This command will not open files, as it is better to manage that in a ``with`` block from the calling code. :param name: ePO command name to call. :param args: Positional arguments to the command. :param kwargs: Named arguments to the command. :param dict params: Named arguments that are not valid Python names can be provide\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.url = url\n self.username = username\n self.password = password\n if session is None:\n session = Session()\n self._session = session\n self._token = None\n self.verify = verify\n<|end_body_0|>\n\n<|body_start_1|>\n if self._token is None and (not _skip):\n self._token = self._request('core.getSecurityToken')\n return self._token\n<|end_body_1|>\n\n<|body_start_2|>\n kwargs.setdefault('auth', (self.username, self.password))\n params = kwargs.setdefault('params', {})\n is_json = params.setdefault(':output', 'json') == 'json'\n params.setdefault('orion.user.security.token', self._get_token(_skip=name == 'core.getSecurityToken'))\n url = urljoin(self.url, 'remote/{}'.format(name))\n kwargs['verify'] = self.verify\n if any((kwargs.get(key) for key in ('data', 'json', 'files'))):\n r = self._session.post(url, **kwargs)\n else:\n r = self._session.get(url, **kwargs)\n r.raise_for_status()\n text = r.text\n if not text.startswith('OK:'):\n raise APIError(text)\n return json.loads(text[3:]) if is_json else text[3:]\n<|end_body_2|>\n\n<|body_start_3|>\n params = kwargs.pop('params', {})\n files = kwargs.pop('files', {})\n for i, item in enumerate(args, start=1):\n params['param{}'.format(i)] = item\n params.update(kwargs)\n return self._request(name, params=params, files=files)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000497", "length_bytes": 4141, "license_type": "permissive", "methods": [{"docstring": "Create a client for the given ePO server. :param url: Location of ePO server. :param username: Username to authenticate. :param password: Password to authenticate. :param session: Custom instance of :class:`requests.Session`, useful for configuring server verification.", "name": "__init__", "signature": "def __init__(self, url, username, password, session=None, verify=None)"}, {"docstring": "Get the security token if it's not already cached. :param bool _skip: Used internally when making the initial request to get the token.", "name": "_get_token", "signature": "def _get_token(self, _skip=False)"}, {"docstring": "Format the request and interpret the response. Usually you want to use :meth:`__call__` instead. :param name: ePO command name to call. :param kwargs: Arguments passed to :meth:`requests.request`. :return: Deserialized JSON data.", "name": "_request", "signature": "def _request(self, name, **kwargs)"}, {"docstring": "Make an API call by calling this instance. Collects arguments and calls :meth:`_request`. ePO commands take positional and named arguments. Positional arguments are internally numbered \"param#\" and passed as named arguments. Files can be passed to some commands. Pass a dictionary of ``'filename': file-like objects``, or other formats accepted by :meth:`requests.request`. This command will not open files, as it is better to manage that in a ``with`` block from the calling code. :param name: ePO command name to call. :param args: Positional arguments to the command. :param kwargs: Named arguments to the command. :param dict params: Named arguments that are not valid Python names can be provide", "name": "__call__", "signature": "def __call__(self, name, *args, **kwargs)"}], "n_methods": 4, "prompt": "Implement the Python class `Client` described below.\n\nClass description:\nCommunicate with an ePO server. Instances are callable, pass a command name and parameters to make API calls.\n\nMethod signatures and docstrings:\n- def __init__(self, url, username, password, session=None, verify=None): Create a client for the given ePO server. :param url: Location of ePO server. :param username: Username to authenticate. :param password: Password to authenticate. :param session: Custom instance of :class:`requests.Session`, useful for configuring server verification.\n- def _get_token(self, _skip=False): Get the security token if it's not already cached. :param bool _skip: Used internally when making the initial request to get the token.\n- def _request(self, name, **kwargs): Format the request and interpret the response. Usually you want to use :meth:`__call__` instead. :param name: ePO command name to call. :param kwargs: Arguments passed to :meth:`requests.request`. :return: Deserialized JSON data.\n- def __call__(self, name, *args, **kwargs): Make an API call by calling this instance. Collects arguments and calls :meth:`_request`. ePO commands take positional and named arguments. Positional arguments are internally numbered \"param#\" and passed as named arguments. Files can be passed to some commands. Pass a dictionary of ``'filename': file-like objects``, or other formats accepted by :meth:`requests.request`. This command will not open files, as it is better to manage that in a ``with`` block from the calling code. :param name: ePO command name to call. :param args: Positional arguments to the command. :param kwargs: Named arguments to the command. :param dict params: Named arguments that are not valid Python names can be provide", "prompted_full_text": "Implement the Python class `Client` described below.\n\nClass description:\nCommunicate with an ePO server. Instances are callable, pass a command name and parameters to make API calls.\n\nMethod signatures and docstrings:\n- def __init__(self, url, username, password, session=None, verify=None): Create a client for the given ePO server. :param url: Location of ePO server. :param username: Username to authenticate. :param password: Password to authenticate. :param session: Custom instance of :class:`requests.Session`, useful for configuring server verification.\n- def _get_token(self, _skip=False): Get the security token if it's not already cached. :param bool _skip: Used internally when making the initial request to get the token.\n- def _request(self, name, **kwargs): Format the request and interpret the response. Usually you want to use :meth:`__call__` instead. :param name: ePO command name to call. :param kwargs: Arguments passed to :meth:`requests.request`. :return: Deserialized JSON data.\n- def __call__(self, name, *args, **kwargs): Make an API call by calling this instance. Collects arguments and calls :meth:`_request`. ePO commands take positional and named arguments. Positional arguments are internally numbered \"param#\" and passed as named arguments. Files can be passed to some commands. Pass a dictionary of ``'filename': file-like objects``, or other formats accepted by :meth:`requests.request`. This command will not open files, as it is better to manage that in a ``with`` block from the calling code. :param name: ePO command name to call. :param args: Positional arguments to the command. :param kwargs: Named arguments to the command. :param dict params: Named arguments that are not valid Python names can be provide\n\n<|skeleton|>\nclass Client:\n \"\"\"Communicate with an ePO server. Instances are callable, pass a command name and parameters to make API calls.\"\"\"\n\n def __init__(self, url, username, password, session=None, verify=None):\n \"\"\"Create a client for the given ePO server. :param url: Location of ePO server. :param username: Username to authenticate. :param password: Password to authenticate. :param session: Custom instance of :class:`requests.Session`, useful for configuring server verification.\"\"\"\n <|body_0|>\n\n def _get_token(self, _skip=False):\n \"\"\"Get the security token if it's not already cached. :param bool _skip: Used internally when making the initial request to get the token.\"\"\"\n <|body_1|>\n\n def _request(self, name, **kwargs):\n \"\"\"Format the request and interpret the response. Usually you want to use :meth:`__call__` instead. :param name: ePO command name to call. :param kwargs: Arguments passed to :meth:`requests.request`. :return: Deserialized JSON data.\"\"\"\n <|body_2|>\n\n def __call__(self, name, *args, **kwargs):\n \"\"\"Make an API call by calling this instance. Collects arguments and calls :meth:`_request`. ePO commands take positional and named arguments. Positional arguments are internally numbered \"param#\" and passed as named arguments. Files can be passed to some commands. Pass a dictionary of ``'filename': file-like objects``, or other formats accepted by :meth:`requests.request`. This command will not open files, as it is better to manage that in a ``with`` block from the calling code. :param name: ePO command name to call. :param args: Positional arguments to the command. :param kwargs: Named arguments to the command. :param dict params: Named arguments that are not valid Python names can be provide\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.url = url\n self.username = username\n self.password = password\n if session is None:\n session = Session()\n self._session = session\n self._token = None\n self.verify = verify\n<|end_body_0|>\n\n<|body_start_1|>\n if self._token is None and (not _skip):\n self._token = self._request('core.getSecurityToken')\n return self._token\n<|end_body_1|>\n\n<|body_start_2|>\n kwargs.setdefault('auth', (self.username, self.password))\n params = kwargs.setdefault('params', {})\n is_json = params.setdefault(':output', 'json') == 'json'\n params.setdefault('orion.user.security.token', self._get_token(_skip=name == 'core.getSecurityToken'))\n url = urljoin(self.url, 'remote/{}'.format(name))\n kwargs['verify'] = self.verify\n if any((kwargs.get(key) for key in ('data', 'json', 'files'))):\n r = self._session.post(url, **kwargs)\n else:\n r = self._session.get(url, **kwargs)\n r.raise_for_status()\n text = r.text\n if not text.startswith('OK:'):\n raise APIError(text)\n return json.loads(text[3:]) if is_json else text[3:]\n<|end_body_2|>\n\n<|body_start_3|>\n params = kwargs.pop('params', {})\n files = kwargs.pop('files', {})\n for i, item in enumerate(args, start=1):\n params['param{}'.format(i)] = item\n params.update(kwargs)\n return self._request(name, params=params, files=files)\n<|end_body_3|>\n", "revision_id": "718d15ca36c57231bb89df0aebc53d0210db400c", "skeleton": "<|skeleton|>\nclass Client:\n \"\"\"Communicate with an ePO server. Instances are callable, pass a command name and parameters to make API calls.\"\"\"\n\n def __init__(self, url, username, password, session=None, verify=None):\n \"\"\"Create a client for the given ePO server. :param url: Location of ePO server. :param username: Username to authenticate. :param password: Password to authenticate. :param session: Custom instance of :class:`requests.Session`, useful for configuring server verification.\"\"\"\n <|body_0|>\n\n def _get_token(self, _skip=False):\n \"\"\"Get the security token if it's not already cached. :param bool _skip: Used internally when making the initial request to get the token.\"\"\"\n <|body_1|>\n\n def _request(self, name, **kwargs):\n \"\"\"Format the request and interpret the response. Usually you want to use :meth:`__call__` instead. :param name: ePO command name to call. :param kwargs: Arguments passed to :meth:`requests.request`. :return: Deserialized JSON data.\"\"\"\n <|body_2|>\n\n def __call__(self, name, *args, **kwargs):\n \"\"\"Make an API call by calling this instance. Collects arguments and calls :meth:`_request`. ePO commands take positional and named arguments. Positional arguments are internally numbered \"param#\" and passed as named arguments. Files can be passed to some commands. Pass a dictionary of ``'filename': file-like objects``, or other formats accepted by :meth:`requests.request`. This command will not open files, as it is better to manage that in a ``with`` block from the calling code. :param name: ePO command name to call. :param args: Positional arguments to the command. :param kwargs: Named arguments to the command. :param dict params: Named arguments that are not valid Python names can be provide\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Client:\n \"\"\"Communicate with an ePO server. Instances are callable, pass a command name and parameters to make API calls.\"\"\"\n\n def __init__(self, url, username, password, session=None, verify=None):\n \"\"\"Create a client for the given ePO server. :param url: Location of ePO server. :param username: Username to authenticate. :param password: Password to authenticate. :param session: Custom instance of :class:`requests.Session`, useful for configuring server verification.\"\"\"\n self.url = url\n self.username = username\n self.password = password\n if session is None:\n session = Session()\n self._session = session\n self._token = None\n self.verify = verify\n\n def _get_token(self, _skip=False):\n \"\"\"Get the security token if it's not already cached. :param bool _skip: Used internally when making the initial request to get the token.\"\"\"\n if self._token is None and (not _skip):\n self._token = self._request('core.getSecurityToken')\n return self._token\n\n def _request(self, name, **kwargs):\n \"\"\"Format the request and interpret the response. Usually you want to use :meth:`__call__` instead. :param name: ePO command name to call. :param kwargs: Arguments passed to :meth:`requests.request`. :return: Deserialized JSON data.\"\"\"\n kwargs.setdefault('auth', (self.username, self.password))\n params = kwargs.setdefault('params', {})\n is_json = params.setdefault(':output', 'json') == 'json'\n params.setdefault('orion.user.security.token', self._get_token(_skip=name == 'core.getSecurityToken'))\n url = urljoin(self.url, 'remote/{}'.format(name))\n kwargs['verify'] = self.verify\n if any((kwargs.get(key) for key in ('data', 'json', 'files'))):\n r = self._session.post(url, **kwargs)\n else:\n r = self._session.get(url, **kwargs)\n r.raise_for_status()\n text = r.text\n if not text.startswith('OK:'):\n raise APIError(text)\n return json.loads(text[3:]) if is_json else text[3:]\n\n def __call__(self, name, *args, **kwargs):\n \"\"\"Make an API call by calling this instance. Collects arguments and calls :meth:`_request`. ePO commands take positional and named arguments. Positional arguments are internally numbered \"param#\" and passed as named arguments. Files can be passed to some commands. Pass a dictionary of ``'filename': file-like objects``, or other formats accepted by :meth:`requests.request`. This command will not open files, as it is better to manage that in a ``with`` block from the calling code. :param name: ePO command name to call. :param args: Positional arguments to the command. :param kwargs: Named arguments to the command. :param dict params: Named arguments that are not valid Python names can be provide\"\"\"\n params = kwargs.pop('params', {})\n files = kwargs.pop('files', {})\n for i, item in enumerate(args, start=1):\n params['param{}'.format(i)] = item\n params.update(kwargs)\n return self._request(name, params=params, files=files)\n", "source": "the_stack_v2_python_sparse", "source_path": "plugins/mcafee_epo/komand_mcafee_epo/util/mcafee.py", "source_repo": "rapid7/insightconnect-plugins", "split": "val", "star_events_count": 61}
{"blob_id": "d95b10af1d87947d430005c2e09c0d51cb431e7f", "bodies": ["month_bounds_error = gettext('Check the expiry date or use a new card')\nself.fields = [forms.IntegerField(min_value=1, max_value=12, error_messages={'min_value': month_bounds_error, 'max_value': month_bounds_error, 'invalid': gettext('Check the expiry date or use a new card')}), YearField()]\nsuper().__init__(self.fields, *args, **kwargs)", "if data_list:\n try:\n if any((item in self.empty_values for item in data_list)):\n raise ValueError\n return (data_list[1], data_list[0])\n except ValueError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\nreturn None", "attrs = super().widget_attrs(widget)\nif not isinstance(widget, ExpirySplitDateWidget):\n return attrs\nfor subfield, subwidget in zip(self.fields, widget.widgets):\n if subfield.min_value is not None:\n subwidget.attrs['min'] = subfield.min_value\n if subfield.max_value is not None:\n subwidget.attrs['max'] = subfield.max_value\nreturn attrs"], "bodies_text": "<|body_start_0|>\n month_bounds_error = gettext('Check the expiry date or use a new card')\n self.fields = [forms.IntegerField(min_value=1, max_value=12, error_messages={'min_value': month_bounds_error, 'max_value': month_bounds_error, 'invalid': gettext('Check the expiry date or use a new card')}), YearField()]\n super().__init__(self.fields, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if data_list:\n try:\n if any((item in self.empty_values for item in data_list)):\n raise ValueError\n return (data_list[1], data_list[0])\n except ValueError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n attrs = super().widget_attrs(widget)\n if not isinstance(widget, ExpirySplitDateWidget):\n return attrs\n for subfield, subwidget in zip(self.fields, widget.widgets):\n if subfield.min_value is not None:\n subwidget.attrs['min'] = subfield.min_value\n if subfield.max_value is not None:\n subwidget.attrs['max'] = subfield.max_value\n return attrs\n<|end_body_2|>\n", "class_docstring": "This class defines the validation for the month field and also the overall ordering and organisation for the two fields", "class_name": "ExpirySplitDateField", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExpirySplitDateField:\n \"\"\"This class defines the validation for the month field and also the overall ordering and organisation for the two fields\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Standard constructor that defines what the month field should do, and which errors should be raised should certain events occur :param args: Standard arguments parameter :param kwargs: Standard key word arguments parameter\"\"\"\n <|body_0|>\n\n def compress(self, data_list):\n \"\"\"Uses compress as there are multiple values (compress is a replacement for clean in these cases :param data_list: The object containing each of the values :return: Returns the cleaned value object\"\"\"\n <|body_1|>\n\n def widget_attrs(self, widget):\n \"\"\"Populates the attributes of the widget with the values defined in the original widget creation :param widget: The widget to have its parameters populated :return: returns the attributes\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n month_bounds_error = gettext('Check the expiry date or use a new card')\n self.fields = [forms.IntegerField(min_value=1, max_value=12, error_messages={'min_value': month_bounds_error, 'max_value': month_bounds_error, 'invalid': gettext('Check the expiry date or use a new card')}), YearField()]\n super().__init__(self.fields, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if data_list:\n try:\n if any((item in self.empty_values for item in data_list)):\n raise ValueError\n return (data_list[1], data_list[0])\n except ValueError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n attrs = super().widget_attrs(widget)\n if not isinstance(widget, ExpirySplitDateWidget):\n return attrs\n for subfield, subwidget in zip(self.fields, widget.widgets):\n if subfield.min_value is not None:\n subwidget.attrs['min'] = subfield.min_value\n if subfield.max_value is not None:\n subwidget.attrs['max'] = subfield.max_value\n return attrs\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000498", "length_bytes": 21840, "license_type": "no_license", "methods": [{"docstring": "Standard constructor that defines what the month field should do, and which errors should be raised should certain events occur :param args: Standard arguments parameter :param kwargs: Standard key word arguments parameter", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Uses compress as there are multiple values (compress is a replacement for clean in these cases :param data_list: The object containing each of the values :return: Returns the cleaned value object", "name": "compress", "signature": "def compress(self, data_list)"}, {"docstring": "Populates the attributes of the widget with the values defined in the original widget creation :param widget: The widget to have its parameters populated :return: returns the attributes", "name": "widget_attrs", "signature": "def widget_attrs(self, widget)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_017477", "prompt": "Implement the Python class `ExpirySplitDateField` described below.\n\nClass description:\nThis class defines the validation for the month field and also the overall ordering and organisation for the two fields\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Standard constructor that defines what the month field should do, and which errors should be raised should certain events occur :param args: Standard arguments parameter :param kwargs: Standard key word arguments parameter\n- def compress(self, data_list): Uses compress as there are multiple values (compress is a replacement for clean in these cases :param data_list: The object containing each of the values :return: Returns the cleaned value object\n- def widget_attrs(self, widget): Populates the attributes of the widget with the values defined in the original widget creation :param widget: The widget to have its parameters populated :return: returns the attributes", "prompted_full_text": "Implement the Python class `ExpirySplitDateField` described below.\n\nClass description:\nThis class defines the validation for the month field and also the overall ordering and organisation for the two fields\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Standard constructor that defines what the month field should do, and which errors should be raised should certain events occur :param args: Standard arguments parameter :param kwargs: Standard key word arguments parameter\n- def compress(self, data_list): Uses compress as there are multiple values (compress is a replacement for clean in these cases :param data_list: The object containing each of the values :return: Returns the cleaned value object\n- def widget_attrs(self, widget): Populates the attributes of the widget with the values defined in the original widget creation :param widget: The widget to have its parameters populated :return: returns the attributes\n\n<|skeleton|>\nclass ExpirySplitDateField:\n \"\"\"This class defines the validation for the month field and also the overall ordering and organisation for the two fields\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Standard constructor that defines what the month field should do, and which errors should be raised should certain events occur :param args: Standard arguments parameter :param kwargs: Standard key word arguments parameter\"\"\"\n <|body_0|>\n\n def compress(self, data_list):\n \"\"\"Uses compress as there are multiple values (compress is a replacement for clean in these cases :param data_list: The object containing each of the values :return: Returns the cleaned value object\"\"\"\n <|body_1|>\n\n def widget_attrs(self, widget):\n \"\"\"Populates the attributes of the widget with the values defined in the original widget creation :param widget: The widget to have its parameters populated :return: returns the attributes\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n month_bounds_error = gettext('Check the expiry date or use a new card')\n self.fields = [forms.IntegerField(min_value=1, max_value=12, error_messages={'min_value': month_bounds_error, 'max_value': month_bounds_error, 'invalid': gettext('Check the expiry date or use a new card')}), YearField()]\n super().__init__(self.fields, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if data_list:\n try:\n if any((item in self.empty_values for item in data_list)):\n raise ValueError\n return (data_list[1], data_list[0])\n except ValueError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n attrs = super().widget_attrs(widget)\n if not isinstance(widget, ExpirySplitDateWidget):\n return attrs\n for subfield, subwidget in zip(self.fields, widget.widgets):\n if subfield.min_value is not None:\n subwidget.attrs['min'] = subfield.min_value\n if subfield.max_value is not None:\n subwidget.attrs['max'] = subfield.max_value\n return attrs\n<|end_body_2|>\n", "revision_id": "fa6ca6a8164763e1dfe1581702ca5d36e44859de", "skeleton": "<|skeleton|>\nclass ExpirySplitDateField:\n \"\"\"This class defines the validation for the month field and also the overall ordering and organisation for the two fields\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Standard constructor that defines what the month field should do, and which errors should be raised should certain events occur :param args: Standard arguments parameter :param kwargs: Standard key word arguments parameter\"\"\"\n <|body_0|>\n\n def compress(self, data_list):\n \"\"\"Uses compress as there are multiple values (compress is a replacement for clean in these cases :param data_list: The object containing each of the values :return: Returns the cleaned value object\"\"\"\n <|body_1|>\n\n def widget_attrs(self, widget):\n \"\"\"Populates the attributes of the widget with the values defined in the original widget creation :param widget: The widget to have its parameters populated :return: returns the attributes\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ExpirySplitDateField:\n \"\"\"This class defines the validation for the month field and also the overall ordering and organisation for the two fields\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Standard constructor that defines what the month field should do, and which errors should be raised should certain events occur :param args: Standard arguments parameter :param kwargs: Standard key word arguments parameter\"\"\"\n month_bounds_error = gettext('Check the expiry date or use a new card')\n self.fields = [forms.IntegerField(min_value=1, max_value=12, error_messages={'min_value': month_bounds_error, 'max_value': month_bounds_error, 'invalid': gettext('Check the expiry date or use a new card')}), YearField()]\n super().__init__(self.fields, *args, **kwargs)\n\n def compress(self, data_list):\n \"\"\"Uses compress as there are multiple values (compress is a replacement for clean in these cases :param data_list: The object containing each of the values :return: Returns the cleaned value object\"\"\"\n if data_list:\n try:\n if any((item in self.empty_values for item in data_list)):\n raise ValueError\n return (data_list[1], data_list[0])\n except ValueError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n return None\n\n def widget_attrs(self, widget):\n \"\"\"Populates the attributes of the widget with the values defined in the original widget creation :param widget: The widget to have its parameters populated :return: returns the attributes\"\"\"\n attrs = super().widget_attrs(widget)\n if not isinstance(widget, ExpirySplitDateWidget):\n return attrs\n for subfield, subwidget in zip(self.fields, widget.widgets):\n if subfield.min_value is not None:\n subwidget.attrs['min'] = subfield.min_value\n if subfield.max_value is not None:\n subwidget.attrs['max'] = subfield.max_value\n return attrs\n", "source": "the_stack_v2_python_sparse", "source_path": "application/customfields.py", "source_repo": "IS-JAQU-CAZ/OFS-MORE-Childminder-Website", "split": "val", "star_events_count": 0}
{"blob_id": "560e3be22f2abfe0cd7d0a85cf58448e325a5980", "bodies": ["doc = SimpleDocTemplate(filename, pagesize=A3)\nelements = []\ndata = []\ndata.append(['EnrollmentNumber', 'FirstName', 'LastName', 'DOB', 'Faculty', 'Email'])\nmapper = StudentListMapper()\nfor student in students:\n data.append(mapper.map_to_list(student))\nwidth = 6\nheight = len(data)\nt = Table(data, width * [2 * inch], height * [0.5 * inch])\nt.setStyle(TableStyle([('ALIGN', (0, 0), (-1, -1), 'CENTER'), ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), ('TEXTCOLOR', (0, 0), (-1, -1), colors.black), ('TEXTCOLOR', (0, 0), (-1, 0), colors.green), ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black), ('BOX', (0, 0), (-1, -1), 0.5, colors.black)]))\nelements.append(t)\ndoc.build(elements)", "doc = SimpleDocTemplate(filename)\nstyles = getSampleStyleSheet()\nStory = [Spacer(1, 2 * inch)]\nstyle = styles['Normal']\nfor student in students:\n bogustext = str(student)\n p = Paragraph(bogustext, style)\n Story.append(p)\n Story.append(Spacer(1, 0.2 * inch))\ndoc.build(Story)"], "bodies_text": "<|body_start_0|>\n doc = SimpleDocTemplate(filename, pagesize=A3)\n elements = []\n data = []\n data.append(['EnrollmentNumber', 'FirstName', 'LastName', 'DOB', 'Faculty', 'Email'])\n mapper = StudentListMapper()\n for student in students:\n data.append(mapper.map_to_list(student))\n width = 6\n height = len(data)\n t = Table(data, width * [2 * inch], height * [0.5 * inch])\n t.setStyle(TableStyle([('ALIGN', (0, 0), (-1, -1), 'CENTER'), ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), ('TEXTCOLOR', (0, 0), (-1, -1), colors.black), ('TEXTCOLOR', (0, 0), (-1, 0), colors.green), ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black), ('BOX', (0, 0), (-1, -1), 0.5, colors.black)]))\n elements.append(t)\n doc.build(elements)\n<|end_body_0|>\n\n<|body_start_1|>\n doc = SimpleDocTemplate(filename)\n styles = getSampleStyleSheet()\n Story = [Spacer(1, 2 * inch)]\n style = styles['Normal']\n for student in students:\n bogustext = str(student)\n p = Paragraph(bogustext, style)\n Story.append(p)\n Story.append(Spacer(1, 0.2 * inch))\n doc.build(Story)\n<|end_body_1|>\n", "class_docstring": "This class is used for exporting students to PDF files", "class_name": "StudentPDFSerializer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StudentPDFSerializer:\n \"\"\"This class is used for exporting students to PDF files\"\"\"\n\n def exportAsPDFToFile(self, students, filename='../files/students.pdf'):\n \"\"\"Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\"\"\"\n <|body_0|>\n\n def exportAsPDFToFile_2(self, students, filename='../files/students.pdf'):\n \"\"\"Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n doc = SimpleDocTemplate(filename, pagesize=A3)\n elements = []\n data = []\n data.append(['EnrollmentNumber', 'FirstName', 'LastName', 'DOB', 'Faculty', 'Email'])\n mapper = StudentListMapper()\n for student in students:\n data.append(mapper.map_to_list(student))\n width = 6\n height = len(data)\n t = Table(data, width * [2 * inch], height * [0.5 * inch])\n t.setStyle(TableStyle([('ALIGN', (0, 0), (-1, -1), 'CENTER'), ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), ('TEXTCOLOR', (0, 0), (-1, -1), colors.black), ('TEXTCOLOR', (0, 0), (-1, 0), colors.green), ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black), ('BOX', (0, 0), (-1, -1), 0.5, colors.black)]))\n elements.append(t)\n doc.build(elements)\n<|end_body_0|>\n\n<|body_start_1|>\n doc = SimpleDocTemplate(filename)\n styles = getSampleStyleSheet()\n Story = [Spacer(1, 2 * inch)]\n style = styles['Normal']\n for student in students:\n bogustext = str(student)\n p = Paragraph(bogustext, style)\n Story.append(p)\n Story.append(Spacer(1, 0.2 * inch))\n doc.build(Story)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_val_000499", "length_bytes": 2594, "license_type": "no_license", "methods": [{"docstring": "Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:", "name": "exportAsPDFToFile", "signature": "def exportAsPDFToFile(self, students, filename='../files/students.pdf')"}, {"docstring": "Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:", "name": "exportAsPDFToFile_2", "signature": "def exportAsPDFToFile_2(self, students, filename='../files/students.pdf')"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_002023", "prompt": "Implement the Python class `StudentPDFSerializer` described below.\n\nClass description:\nThis class is used for exporting students to PDF files\n\nMethod signatures and docstrings:\n- def exportAsPDFToFile(self, students, filename='../files/students.pdf'): Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\n- def exportAsPDFToFile_2(self, students, filename='../files/students.pdf'): Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:", "prompted_full_text": "Implement the Python class `StudentPDFSerializer` described below.\n\nClass description:\nThis class is used for exporting students to PDF files\n\nMethod signatures and docstrings:\n- def exportAsPDFToFile(self, students, filename='../files/students.pdf'): Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\n- def exportAsPDFToFile_2(self, students, filename='../files/students.pdf'): Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\n\n<|skeleton|>\nclass StudentPDFSerializer:\n \"\"\"This class is used for exporting students to PDF files\"\"\"\n\n def exportAsPDFToFile(self, students, filename='../files/students.pdf'):\n \"\"\"Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\"\"\"\n <|body_0|>\n\n def exportAsPDFToFile_2(self, students, filename='../files/students.pdf'):\n \"\"\"Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n doc = SimpleDocTemplate(filename, pagesize=A3)\n elements = []\n data = []\n data.append(['EnrollmentNumber', 'FirstName', 'LastName', 'DOB', 'Faculty', 'Email'])\n mapper = StudentListMapper()\n for student in students:\n data.append(mapper.map_to_list(student))\n width = 6\n height = len(data)\n t = Table(data, width * [2 * inch], height * [0.5 * inch])\n t.setStyle(TableStyle([('ALIGN', (0, 0), (-1, -1), 'CENTER'), ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), ('TEXTCOLOR', (0, 0), (-1, -1), colors.black), ('TEXTCOLOR', (0, 0), (-1, 0), colors.green), ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black), ('BOX', (0, 0), (-1, -1), 0.5, colors.black)]))\n elements.append(t)\n doc.build(elements)\n<|end_body_0|>\n\n<|body_start_1|>\n doc = SimpleDocTemplate(filename)\n styles = getSampleStyleSheet()\n Story = [Spacer(1, 2 * inch)]\n style = styles['Normal']\n for student in students:\n bogustext = str(student)\n p = Paragraph(bogustext, style)\n Story.append(p)\n Story.append(Spacer(1, 0.2 * inch))\n doc.build(Story)\n<|end_body_1|>\n", "revision_id": "a30389aa4542a23011a955ac61bf5b853c3e7854", "skeleton": "<|skeleton|>\nclass StudentPDFSerializer:\n \"\"\"This class is used for exporting students to PDF files\"\"\"\n\n def exportAsPDFToFile(self, students, filename='../files/students.pdf'):\n \"\"\"Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\"\"\"\n <|body_0|>\n\n def exportAsPDFToFile_2(self, students, filename='../files/students.pdf'):\n \"\"\"Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class StudentPDFSerializer:\n \"\"\"This class is used for exporting students to PDF files\"\"\"\n\n def exportAsPDFToFile(self, students, filename='../files/students.pdf'):\n \"\"\"Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\"\"\"\n doc = SimpleDocTemplate(filename, pagesize=A3)\n elements = []\n data = []\n data.append(['EnrollmentNumber', 'FirstName', 'LastName', 'DOB', 'Faculty', 'Email'])\n mapper = StudentListMapper()\n for student in students:\n data.append(mapper.map_to_list(student))\n width = 6\n height = len(data)\n t = Table(data, width * [2 * inch], height * [0.5 * inch])\n t.setStyle(TableStyle([('ALIGN', (0, 0), (-1, -1), 'CENTER'), ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), ('TEXTCOLOR', (0, 0), (-1, -1), colors.black), ('TEXTCOLOR', (0, 0), (-1, 0), colors.green), ('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black), ('BOX', (0, 0), (-1, -1), 0.5, colors.black)]))\n elements.append(t)\n doc.build(elements)\n\n def exportAsPDFToFile_2(self, students, filename='../files/students.pdf'):\n \"\"\"Exports students to the PDF file with the given filename. :param students: list of model.Student.Student-s :param filename: string :return:\"\"\"\n doc = SimpleDocTemplate(filename)\n styles = getSampleStyleSheet()\n Story = [Spacer(1, 2 * inch)]\n style = styles['Normal']\n for student in students:\n bogustext = str(student)\n p = Paragraph(bogustext, style)\n Story.append(p)\n Story.append(Spacer(1, 0.2 * inch))\n doc.build(Story)\n", "source": "the_stack_v2_python_sparse", "source_path": "serializer/StudentPDFSerializer.py", "source_repo": "edutilos6666/PythonSciStudentProject", "split": "val", "star_events_count": 0}