query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Regrid the field to a new Cartesian grid. {{regridding overview}} Between one and three axes may be simultaneously regridded in Cartesian space. Coordinates The source and destination grids of the regridding must both be defined by equivalent coordinates, which must be 1d dimension coordinates. These are automatically ... | def regridc(
self,
dst,
axes=None,
method=None,
use_src_mask=True,
use_dst_mask=False,
fracfield=False,
axis_order=None,
ignore_degenerate=True,
return_operator=False,
check_coordinates=False,
min_weight=None,
weight... | [
"def regrid(self, new_size, input_lower_lon, input_upper_lon, input_lower_lat, input_upper_lat):\n# Get grid size in meters\n old_size = self.find_base_size()\n\n# Scaling factor is the ratio between the old size and the new size. If the\n# ratio is 4, than 16 times as many squares will be added to the new g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the derivative along the specified axis. The derivative is calculated using centred finite differences apart from at the boundaries (see the one_sided_at_boundary parameter). If missing values are present then missing values will be returned at all points where a centred finite difference could not be calcula... | def derivative(
self,
axis,
wrap=None,
one_sided_at_boundary=False,
inplace=False,
i=False,
cyclic=None,
):
if cyclic:
_DEPRECATION_ERROR_KWARGS(
self,
"derivative",
{"cyclic": cyclic},
... | [
"def _dnedx(self, x, dx=0.01):\n assert len(x) == self._plasma.grid.dimension\n\n x = np.array(x, dtype=float)\n dx = np.array(dx, dtype=float)\n if (dx.ndim == 0):\n assert dx > 0\n dx = np.zeros_like(x) + dx\n else:\n assert dx.ndims == self._pla... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return domain axis constructs. Deprecated at version 3.0.0. Use `domain_axes` method instead. | def _Axes(self):
raise DeprecationError(
f"{self.__class__.__name__} attribute '_Axes' has been deprecated "
"at version 3.0.0 and is no longer available and will be removed"
"at v4.0.0"
"Use 'domain_axes' instead."
) | [
"def newPanelAxis(self, **attrlinks):\n return PanelAxis(self, **attrlinks)",
"def data_axes(self):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"data_axes\",\n \"Use 'get_data_axes' method instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return cell method constructs. Deprecated at version 3.0.0. Use `cell_methods` method instead. | def CellMethods(self):
raise DeprecationError(
f"{self.__class__.__name__} attribute 'CellMethods' has been "
"deprecated at version 3.0.0 and is no longer available "
"and will be removed at v4.0.0. "
"Use 'cell_methods' instead."
) | [
"def insert_cell_methods(self, item):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"insert_cell_methods\",\n \"Use method 'set_construct' instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ) # pragma: no cover",
"def _update_cell_methods(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the canonical name for an axis. Deprecated at version 3.0.0. Use `domain_axis_identity` method instead. | def axis_name(self, *args, **kwargs):
_DEPRECATION_ERROR_METHOD(
self,
"axis_name",
"Use 'domain_axis_identity' method instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def x_name(self):\n if self.get_attrs(\"x_name\") not in self._obj.dims:\n self.set_spatial_dims()\n if \"x_name\" in self.attrs:\n return self.attrs[\"x_name\"]",
"def get_axis_names(axes_metadata):\n\n def leaf_rewrite(x):\n return None if x is None else jax.sharding.Par... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the domain axes for the data array dimensions. Deprecated at version 3.0.0. Use `get_data_axes` method instead. | def data_axes(self):
_DEPRECATION_ERROR_METHOD(
self,
"data_axes",
"Use 'get_data_axes' method instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def _Axes(self):\n raise DeprecationError(\n f\"{self.__class__.__name__} attribute '_Axes' has been deprecated \"\n \"at version 3.0.0 and is no longer available and will be removed\"\n \"at v4.0.0\"\n \"Use 'domain_axes' instead.\"\n )",
"def getAxes(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an example field construct. Deprecated at version 3.0.5. Use function `cf.example_field` instead. | def example_field(cls, n):
_DEPRECATION_ERROR_METHOD(
cls,
"example_field",
"Use function 'cf.example_field' instead.",
version="3.0.5",
removed_at="4.0.0",
) # pragma: no cover | [
"def gen_fake(self, field_name, fake):\r\n ...",
"def get_cc_field(self, cc_field, **kwargs):\n return cc_field",
"def makeFieldInfo(column):\n\treturn adql.FieldInfo(column.type,\n\t\tcolumn.unit, column.ucd, (column,), stc=column.stc)",
"def get_example_value_for_field(self, field_name):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated at version 3.0.0. Use methods 'Data.nc_hdf5_chunksizes', 'Data.nc_set_hdf5_chunksizes', 'Data.nc_clear_hdf5_chunksizes' instead. | def HDF_chunks(self, *chunksizes):
_DEPRECATION_ERROR_METHOD(
self,
"HDF_chunks",
"Use methods 'Data.nc_hdf5_chunksizes', "
"'Data.nc_set_hdf5_chunksizes', "
"'Data.nc_clear_hdf5_chunksizes' instead.",
version="3.0.0",
removed_a... | [
"def set_performance(chunksize=None, free_memory_factor=None):\n old = _cf_chunksize(), _cf_free_memory_factor()\n if free_memory_factor is None:\n if chunksize is not None:\n _cf_chunksize(chunksize)\n else:\n _cf_free_memory_factor(free_memory_factor)\n try:\n _... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a cell measure object into the field. Deprecated at version 3.0.0. Use method 'set_construct' instead. | def insert_measure(
self, item, key=None, axes=None, copy=True, replace=True
):
_DEPRECATION_ERROR_METHOD(
self,
"insert_measure",
"Use method 'set_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def insert_cell_methods(self, item):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"insert_cell_methods\",\n \"Use method 'set_construct' instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ) # pragma: no cover",
"def add_cell(self, cell: Cel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a dimension coordinate object into the field. Deprecated at version 3.0.0. Use method 'set_construct' instead. | def insert_dim(self, item, key=None, axes=None, copy=True, replace=True):
_DEPRECATION_ERROR_METHOD(
self,
"insert_dim",
"Use method 'set_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def __create_coord(self, position, representation, args):\n if len(position) < 2:\n raise CoordinateError(\"You need at least two coordinates\")\n if representation == 'unitspherical':\n return self.__create_unitspherical_coord(position, args)\n elif representation == 'sp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a domain axis into the field. Deprecated at version 3.0.0. Use method 'set_construct' instead. | def insert_axis(self, axis, key=None, replace=True):
_DEPRECATION_ERROR_METHOD(
self,
"insert_axis",
"Use method 'set_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def insert_domain_anc(\n self, item, key=None, axes=None, copy=True, replace=True\n ):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"insert_domain_anc\",\n \"Use method 'set_construct' instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert an item into the field. Deprecated at version 3.0.0. Use method 'set_construct' instead. | def insert_item(
self, role, item, key=None, axes=None, copy=True, replace=True
):
_DEPRECATION_ERROR_METHOD(
self,
"insert_item",
"Use method 'set_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def insert_field_anc(\n self, item, key=None, axes=None, copy=True, replace=True\n ):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"insert_field_anc\",\n \"Use method 'set_construct' instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert one or more cell method objects into the field. Deprecated at version 3.0.0. Use method 'set_construct' instead. | def insert_cell_methods(self, item):
_DEPRECATION_ERROR_METHOD(
self,
"insert_cell_methods",
"Use method 'set_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def CellMethods(self):\n raise DeprecationError(\n f\"{self.__class__.__name__} attribute 'CellMethods' has been \"\n \"deprecated at version 3.0.0 and is no longer available \"\n \"and will be removed at v4.0.0. \"\n \"Use 'cell_methods' instead.\"\n )",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a domain ancillary object into the field. Deprecated at version 3.0.0. Use method 'set_construct' instead. | def insert_domain_anc(
self, item, key=None, axes=None, copy=True, replace=True
):
_DEPRECATION_ERROR_METHOD(
self,
"insert_domain_anc",
"Use method 'set_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cov... | [
"def insert_field_anc(\n self, item, key=None, axes=None, copy=True, replace=True\n ):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"insert_field_anc\",\n \"Use method 'set_construct' instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a data array into the field. Deprecated at version 3.0.0. Use method 'set_data' instead. | def insert_data(self, data, axes=None, copy=True, replace=True):
_DEPRECATION_ERROR_METHOD(
self,
"insert_data",
"Use method 'set_data' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def add_data(self, data: bytes):\n self._data += data",
"def add_data(self, data: []) -> []:\n if not data:\n return [{'data': 'Empty data set', }]\n errors = self.check_data(data)\n if errors:\n return errors\n self.status = self.INSERTING_STATUS\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a field ancillary object into the field. Deprecated at version 3.0.0. Use method 'set_construct' instead.g | def insert_field_anc(
self, item, key=None, axes=None, copy=True, replace=True
):
_DEPRECATION_ERROR_METHOD(
self,
"insert_field_anc",
"Use method 'set_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def __init__(self, *args):\n this = _Field.new_FieldZoneMat(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _Field.new_FieldZoneDouble(*args)\n try: self.this.append(this)\n except: self.this = this",
"def _create_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a coordinate reference object into the field. Deprecated at version 3.0.0. Use method 'set_construct' or 'set_coordinate_reference' instead. | def insert_ref(self, item, key=None, axes=None, copy=True, replace=True):
_DEPRECATION_ERROR_METHOD(
self,
"insert_ref",
"Use method 'set_construct' or 'set_coordinate_reference' "
"instead.",
version="3.0.0",
removed_at="4.0.0",
) ... | [
"def set_ref_pos(self, x, y):\n self.ref_pos = [x, y]",
"def _add_coord_object(self, coord: Union[CORD1R, CORD1C, CORD1S,\n CORD2R, CORD2C, CORD2S], # CORD3G\n allow_overwrites: bool=False) -> None:\n key = coord.cid\n asser... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a new, unused construct key. Deprecated at version 3.0.0. Use 'new_identifier' method of 'constructs' attribute instead. | def new_identifier(self, item_type):
_DEPRECATION_ERROR_METHOD(
self,
" new_identifier",
"Use 'new_identifier' method of 'constructs' attribute instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def _create_key(self):\n return uuid.uuid4().hex",
"def make_key(self, key, version=None):\n if version is None:\n version = self.version\n\n new_key = self.key_func(self.key_prefix, key, version)\n return new_key",
"def constructor(key_type: str):\n return KeyType... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove and return axes from the field. Deprecated at version 3.0.0. Use method 'del_construct' instead. | def remove_axes(self, axes=None, **kwargs):
_DEPRECATION_ERROR_METHOD(
self,
"remove_axes",
"Use method 'del_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def remove_axis(self, axes=None, size=None, **kwargs):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"remove_axis\",\n \"Use method 'del_construct' instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ) # pragma: no cover",
"def remove(self):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove and return a unique axis from the field. Deprecated at version 3.0.0. Use method 'del_construct' instead. | def remove_axis(self, axes=None, size=None, **kwargs):
_DEPRECATION_ERROR_METHOD(
self,
"remove_axis",
"Use method 'del_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def remove_axes(self, axes=None, **kwargs):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"remove_axes\",\n \"Use method 'del_construct' instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ) # pragma: no cover",
"def dropaxis(self, dropax):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Permute the axes of a field item data array. Deprecated at version 3.0.0. Use method 'transpose_construct' instead. | def transpose_item(self, description=None, iaxes=None, **kwargs):
_DEPRECATION_ERROR_METHOD(
self,
"transpose_item",
"Use method 'transpose_construct' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def transpose(\n self,\n axes=None,\n constructs=False,\n inplace=False,\n items=True,\n i=False,\n **kwargs,\n ):\n if not items:\n _DEPRECATION_ERROR_KWARGS(\n self,\n \"transpose\",\n {\"items\": i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated at version 3.0.0. Use methods `DomainAxis.nc_is_unlimited`, and `DomainAxis.nc_set_unlimited` instead. | def unlimited(self, *args):
_DEPRECATION_ERROR_METHOD(
self,
"unlimited",
"Use methods 'DomainAxis.nc_is_unlimited', and "
"'DomainAxis.nc_set_unlimited' instead.",
version="3.0.0",
removed_at="4.0.0",
) # pragma: no cover | [
"def _Axes(self):\n raise DeprecationError(\n f\"{self.__class__.__name__} attribute '_Axes' has been deprecated \"\n \"at version 3.0.0 and is no longer available and will be removed\"\n \"at v4.0.0\"\n \"Use 'domain_axes' instead.\"\n )",
"def no_axis():... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The manifest should parse as JSON | def test_manifest_parses(self):
self.assertIsInstance(self.json, dict) | [
"def get_manifest(self):\n logger.debug(\"Getting manifest {}\".format(self))\n text = self.get_text(self.get_manifest_key())\n return json.loads(text)",
"def test_plugin_manifest(self):\n \n # Get the plugin version\n with open(self.version_path,'r') as file:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The manifest has expected keys | def test_manifest_has_keys(self):
keys = ["description", "manifest_version", "version", "background",
"icons", "browser_action", "web_accessible_resources",
"permissions"]
for key in keys:
self.assertIn(key, self.json) | [
"def test_validate_mandatory_keys2(self):\n data = json.load(self.valid_manifest)\n new_data = modify_manifest(data, to_dict=True)\n with tempfile.NamedTemporaryFile(mode='w+', prefix='invalid_otsu-') as fd:\n json.dump(new_data, fd, indent=4)\n manifest_loader = fpgaotsu.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the extension indexfile exists, and is a string | def test_indexfile(self):
indexfile = self.json.get("background").get("page")
self.assertIsNotNone(indexfile)
self.assertIsInstance(indexfile, basestring) | [
"def test__make_filename__index() -> None:\n int_filename = make_filename(index=1)\n\n assert '1' in int_filename\n assert '1.0' not in int_filename",
"def has_index(self) -> bool:\n try:\n self.files.get(path=\"index.html\")\n return True\n except File.DoesNotExist:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that default CSP policy is in place | def test_csp(self):
csp = self.json.get("content_security_policy")
self.assertEqual(csp, "script-src \'self\' \'unsafe-eval\'; object-src \'unsafe-eval\';") | [
"def test_get_default_policy__strict(self):\n policy = csp.get_default_policy(nonce='12345')\n self.assertCountEqual(list(csp.DEFAULT_POLICY.keys()), list(policy.keys()))\n self.assertIn('\\'strict-dynamic\\'', policy['script-src'])\n self.assertIn(\"'nonce-12345'\", policy['script-src'])",
"def test_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the list of instruments classes given by PrettyMIDI for the MSD id. | def get_instrument_classes(msd_id) -> Optional[list]:
midi_md5 = get_matched_midi_md5(msd_id, MSD_SCORE_MATCHES)
midi_path = get_midi_path(msd_id, midi_md5, args.path_dataset_dir)
pm = PrettyMIDI(midi_path)
classes = [program_to_instrument_class(instrument.program)
for instrument in pm.instruments
... | [
"def _supported_imts(self):\n imt_list = []\n for key in self.imls:\n if \"SA\" in key:\n imt_list.append(imt_module.SA)\n elif key == \"T\":\n continue\n else:\n try:\n imt_val = imt_module.from_string(ke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processes the given MSD id and increments the counter. The method will call the get_instrument_classes method. | def process(msd_id: str, counter: AtomicCounter) -> Optional[dict]:
try:
with tables.open_file(msd_id_to_h5(msd_id, args.path_dataset_dir)) as h5:
classes = get_instrument_classes(msd_id)
return {"msd_id": msd_id, "classes": classes}
except Exception as e:
print(f"Exception during processing of ... | [
"def process(msd_id: str, counter: AtomicCounter) -> Optional[dict]:\n try:\n with tables.open_file(msd_id_to_h5(msd_id, args.path_dataset_dir)) as h5:\n tags = get_tags(h5)\n matching_tags = [tag for tag in tags if tag in TAGS]\n if not matching_tags:\n return\n pm_drums = extract_dr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The name of the active aligner. | def aligner(self) -> str:
return self.__alinger_name | [
"def get_name(self):\n return self.__name_army",
"def _get_name(self) -> \"std::string\" :\n return _core.ToolbarPanel__get_name(self)",
"def target_arch_name(self):\n\n return self._target.name",
"def _get_name(self) -> \"std::string\" :\n return _core.Appearance__get_name(self)",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We turn our array into integers based on the precision given by digits and then put them in a hashable format. | def hashable_rows(data, digits=None):
# if there is no data return immediately
if len(data) == 0:
return np.array([])
# get array as integer to precision we care about
as_int = float_to_int(data, digits=digits)
# if it is flat integers already, return
if len(as_int.shape) == 1:
... | [
"def small_array(arr, least_significant_digit):\n assert np.all(np.isfinite(arr))\n data = np.round(arr * np.power(10, least_significant_digit))\n data = data.astype(arr.dtype)\n return {'packed_array': zlib.compress(data.tostring(), 9),\n 'dtype': arr.dtype,\n 'least_significant_d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For arrays of integers find unique values using bin counting. Roughly 10x faster for correct input than np.unique | def unique_bincount(values,
minlength=0,
return_inverse=False,
return_counts=False):
values = np.asanyarray(values)
if len(values.shape) != 1 or values.dtype.kind != 'i':
raise ValueError('input must be 1D integers!')
try:
# count ... | [
"def unique_bincount(values, minlength=0):\n values = np.asanyarray(values)\n if len(values.shape) != 1 or values.dtype.kind != \"i\": # pragma: no cover\n raise ValueError(\"input must be 1D integers!\")\n\n # count the number of occurrences of each value\n counts = np.bincount(values, minlengt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Identical to the numpy.unique command, except evaluates floating point numbers, using a specified number of digits. If digits isn't specified, the library default TOL_MERGE will be used. | def unique_float(data,
return_index=False,
return_inverse=False,
digits=None):
data = np.asanyarray(data)
as_int = float_to_int(data, digits)
_junk, unique, inverse = np.unique(as_int,
return_index=True,
... | [
"def test_univGroupsFromNPFloats(self):\n self.setAs(float64)\n self._tester()",
"def _round_digits(digits):\n if len(digits) > float_info.dig:\n # we need to add some rounding to remove spurious digits\n pos = len(digits) - 1\n while pos >= 0:\n if pos > float_inf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Group vectors based on an angle tolerance, with the option to include negative vectors. | def group_vectors(vectors,
angle=1e-4,
include_negative=False):
vectors = np.asanyarray(vectors, dtype=np.float64)
angle = float(angle)
if include_negative:
vectors = util.vector_hemisphere(vectors)
spherical = util.vector_to_spherical(vectors)
angles, ... | [
"def collect_by_angle(obj):\n out = XRadVolume()\n angles = [ds.fixed_angle for ds in obj]\n unique_angles = list(set(angles))\n if len(unique_angles) == len(obj):\n out.extend(obj)\n else:\n for a in unique_angles:\n idx = np.argwhere(angles == a).flatten()\n merg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of groups find the minimum element of data within each group | def group_min(groups, data):
# sort with major key groups, minor key data
order = np.lexsort((data, groups))
groups = groups[order] # this is only needed if groups is unsorted
data = data[order]
# construct an index which marks borders between groups
index = np.empty(len(groups), 'bool')
in... | [
"def find_minimum(data):\n minimum_of_set = min(data)\n return minimum_of_set",
"def min_by(collection, transform_function):\n if len(collection) == 0:\n return None\n\n min_value = transform_function(collection[0])\n min_item = collection[0]\n\n for item in collection[1:]:\n this_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unit test to test stripKeys helper function | def testStripKeys(self):
skeys = ['_id']
expect = {'pileupId': 1}
pdict = {'pileupId': 1, '_id': 1}
pdict = stripKeys(pdict, skeys)
self.assertDictEqual(pdict, expect)
pdict = {'pileupId': 1, '_id': 1}
results = [pdict]
results = stripKeys(results, skeys)... | [
"def test_key_deletion(self):\n pass",
"def test__strip_sort_key():\n input = {\n \"Editor's Circle\": [\n \"Alinger Account:Mark Alinger\",\n \"Blinger Account:Mark Blinger\",\n \"Zlinger Account:Mark Zlinger\",\n ],\n \"Chairman's Circle\": [\"Bar\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unit test to test serialization of timestamps | def testTimestampsSerialization(self):
tkeys = ['insertTime', 'lastUpdateTime']
doc = {'pileupId': 1}
now = int(time.time())
gnow = time.gmtime(now)
expect = time.strftime("%Y-%m-%dT%H:%M:%SZ", gnow)
for key in tkeys:
doc.update({key: now})
# encode ti... | [
"def test_timestamp(self):\n for year, month, day, hour, minute, line in (\n (2020, 2, 11, 0, 0, \"KMCO GFS MOS GUIDANCE 2/11/2020 0000 UTC\"),\n (2020, 12, 3, 12, 0, \"KMCO GFSX MOS GUIDANCE 12/03/2020 1200 UTC\"),\n ):\n time = gfs._timestamp(line)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the getNewTimestamp function | def testGetNewTimestamp(self):
timeNow = gmtimeSeconds()
resp = getNewTimestamp({})
self.assertEqual(len(resp), 1)
self.assertTrue(resp['lastUpdateTime'] >= timeNow)
resp = getNewTimestamp({'lastUpdateTime': 1})
self.assertEqual(len(resp), 1)
self.assertTrue(resp... | [
"def test_timestamp(data, logging_file_name):\n current_time = datetime.now()\n time_str = current_time.strftime('%d-%m-%y %H:%M')\n create_instance(data, logging_file_name)\n log_file_name = create_file_path(logging_file_name)\n log_file = read_file(log_file_name)\n timestamp = log_file[len(log_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a reference, return all the serials that has a ``has_chain`` capability | async def tile_serials_from_reference(target, reference, afr):
serials = []
async for pkt, _, _ in target.script(DeviceMessages.GetVersion()).run_with(reference, afr):
if pkt | DeviceMessages.StateVersion:
cap = capability_for_ids(pkt.product, pkt.vendor)
if cap.has_chain:
... | [
"def get_chain_bundles(self, name):\n res = self.get_via_condition(lambda x: x.get_chain_name() == name)\n error(f\"Multi-bundle chain output found: {name}!\", len(res) > 1)\n return res[0]",
"def find_by_transaction_reference(self,\n reference: UUID) -> I... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a unique (not strictly guaranteed) key based on `something'. | def gen_key(something: AnyStr) -> str:
if isinstance(something, six.binary_type):
return sha1(something).hexdigest()
return sha1(something.encode('UTF-8')).hexdigest() | [
"def generate_keyname():\n return str(uuid.uuid1())",
"def generate_key(self):\n try:\n return self.proto.genuid()\n except ValueError:\n return uuid.uuid4()",
"def _create_key(self):\n return uuid.uuid4().hex",
"def _get_unique_key():\n key = key_generator()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The entity ID of the service provider as a string. | def sp_entity_id(self) -> str:
_res = self.raw_sp_entity_id.text
if not isinstance(_res, str):
raise ValueError(f'Unknown SP entity id type ({type(_res)})')
return _res | [
"def entity_id(self):\n return f\"sensor.{self._entity_id}\"",
"def entity_id(self) -> Optional[Text]:\n return self._entity_id",
"def unique_id(self):\n return f\"{self.entity_id}\"",
"def get_entity_id(self):\n\n\t\treturn self.__entity_id",
"def get_entity_id(domain, name):\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the entity attributes for the SP that made the request from the metadata. | def sp_entity_attributes(self) -> Mapping[str, Any]:
res: Dict[str, Any] = {}
try:
_attrs = self._idp.metadata.entity_attributes(self.sp_entity_id)
for k, v in _attrs.items():
if not isinstance(k, str):
raise ValueError(f'Unknown entity attribu... | [
"def get_entities(self):\n\n\t\tself.entity_key_values = self.req_dict[\"result\"].get(\"parameters\")\n\t\treturn self.entity_key_values",
"def get_attrs(self):\n return self.ms.get_attrs()",
"def getAttrs(self):\n\t\treturn self._attributes",
"def metadata(self):\n return self.Model.metadata",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the top luigi package can be imported and contains the usual suspects. | def import_luigi_test(self):
import luigi
# These should exist (if not, this will cause AttributeErrors)
expected = [
luigi.Event,
luigi.Config,
luigi.Task, luigi.ExternalTask, luigi.WrapperTask,
luigi.Target, luigi.LocalTarget,
luigi.... | [
"def test_best_practices_imported():\n assert \"best_practices\" in sys.modules",
"def testImport(self):\n success = False\n try:\n from cutlass import Cytokine\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.fa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encode a speech waveform. The encoding framers (frames and pitch) pad the frames so that the first frame is centered on sample zero. This is consistent with STRAIGHT and SPTK (I hope!). At least, it means the pitch can have longer frame lengths and still align with the OLA'd frames. | def encode(a, pcm):
if opt.ola:
frameSize = pcm.seconds_to_period(0.025, 'atleast') # 25ms frame size
else:
frameSize = framePeriod
pitchSize = pcm.seconds_to_period(0.1, 'atmost')
print("Encoding with period", framePeriod, "size", frameSize,
"and pitch window", pitchSize)
... | [
"def samples_to_wav(samples):\n buf = io.BytesIO()\n w = wave.open(buf, 'w')\n w.setnchannels(1)\n w.setframerate(fs)\n w.setsampwidth(2)\n \n b = bytearray()\n for x in samples:\n i = int(x * (1 << 16))\n i = max(-(1 << 15), min((1 << 15) - 1, i)) # clipping\n packed = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Switch to a new environment. The new environment must have the same spaces as the old one. | def switch_env(self, new_env):
self.env = new_env | [
"def switchEnvironment(self, name):\n# self.removeGameMode()\n \n if name in globals():\n for eObject in self.envMap.values():\n eObject.unload()\n self.envMap.clear()\n\n self.clearMessageBoxList()\n\n self.envMap[name] = globals()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate that the TEAMS trainer can be serialized and deserialized. | def test_serialize_deserialize(self):
vocab_size = 100
test_generator_network = self._get_network(vocab_size)
test_discriminator_network = self._get_network(vocab_size)
# Create a TEAMS trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mism... | [
"def _validate_transformer(\n self,\n ) -> None:\n if not (\n hasattr(self.transformer, \"fit\") # noqa: WPS421\n and hasattr(self.transformer, \"transform\") # noqa: WPS421\n and hasattr(self.transformer, \"fit_transform\") # noqa: WPS421\n ):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a sample of N source flux densities, at nu. | def sample_source_counts(self,N,ret_nu_array=False):
beta = self.params['beta']
smx = (self.Smax0/un.Jy) ** (1 - beta)
smn = (self.Smin0/un.Jy) ** (1 - beta)
nu0_sample =((smx - smn)*np.random.uniform(size=N) + smn) ** (1./(1 - beta))
if ret_nu_array:
return np.outer... | [
"def sample_source_counts(self, N, ret_nu_array=False):\n\n exp_num = self._get_mu_in_sections(0)[0]\n tot_num = np.sum(exp_num)\n exp_frac = exp_num/tot_num\n\n nsplit = np.unique(np.random.choice(len(exp_frac), size=N, p=exp_frac), return_counts=True)[-1]\n beta = self.params['b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a sample of source flux densities, over given area, at nu. | def sample_source_counts(self, N, ret_nu_array=False):
exp_num = self._get_mu_in_sections(0)[0]
tot_num = np.sum(exp_num)
exp_frac = exp_num/tot_num
nsplit = np.unique(np.random.choice(len(exp_frac), size=N, p=exp_frac), return_counts=True)[-1]
beta = self.params['beta']
... | [
"def sample_source_counts(self,N,ret_nu_array=False):\n beta = self.params['beta']\n smx = (self.Smax0/un.Jy) ** (1 - beta)\n smn = (self.Smin0/un.Jy) ** (1 - beta)\n nu0_sample =((smx - smn)*np.random.uniform(size=N) + smn) ** (1./(1 - beta))\n\n if ret_nu_array:\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_crypto_key returns the right key matched by name. | def test_get_crypto_key(self):
crypto_key = kms.get_crypto_key(DUMMY_ENABLED_CRYPTO_KEY_NAME)
assert crypto_key.name == DUMMY_ENABLED_CRYPTO_KEY_NAME
crypto_key = kms.get_crypto_key(DUMMY_DISABLED_CRYPTO_KEY_NAME)
assert crypto_key.name == DUMMY_DISABLED_CRYPTO_KEY_NAME | [
"def get_key_pair(self, name):\r\n raise NotImplementedError(\r\n 'get_key_pair not implemented for this driver')",
"def get_key(name, key=None):\n if key is None:\n fname = '.{}'.format(name)\n if exists(fname):\n with open(fname, 'rt') as f:\n key = f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new instance for a given new provider info value. | def __init__(self, new_provider_infos):
self.new_provider_infos = new_provider_infos | [
"def provider_build(**kwargs):\n return ProviderFactory.build(**kwargs)",
"def get_info(self) -> ProviderInfo:",
"def create_and_update_provider(cls, rowdict, provider_repo):\n deficiency = cls(rowdict)\n cls.update_provider()\n return deficiency",
"def create_and_update_provider(cls, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Request materials from the wrapped CMM, and then change the provider info on each EDK. | def get_encryption_materials(self, request):
result = self.wrapped_cmm.get_encryption_materials(request)
for encrypted_data_key in result.encrypted_data_keys:
encrypted_data_key.key_provider.key_info = self.new_provider_info
return result | [
"def __init__(self, master_key_provider):\n self.wrapped_default_cmm = DefaultCryptoMaterialsManager(master_key_provider)",
"def test_get_ifc_materials(self):\n pass",
"def get_materials(self):\n with open(\"materials.json\", \"r\") as read_file:\n self.materials = json.load(read... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Flip only the given bit in the given ciphertext | def flip_bit(cls, ciphertext, bit):
byte_index, bit_index = divmod(bit, BITS_PER_BYTE)
result = bytearray(ciphertext)
result[byte_index] ^= 1 << (BITS_PER_BYTE - bit_index - 1)
return bytes(result) | [
"def cbc_xor_bitflip(ciphertext, pos, plaintext, desired_string):\n ciphertext = [c for c in ciphertext]\n new_ct = []\n\n for pt, ct, desired in zip(plaintext, ciphertext[pos:], desired_string):\n new_ct.append(chr((ord(pt) ^ ord(ct)) ^ ord(desired)))\n\n ciphertext[pos:pos+len(new_ct)] = new_c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new CMM that wraps a new DefaultCryptoMaterialsManager based on the given master key provider. | def __init__(self, master_key_provider):
self.wrapped_default_cmm = DefaultCryptoMaterialsManager(master_key_provider) | [
"def _kms_master_key_from_spec(self, keys):\n # type: (KeysManifest) -> KMSMasterKey\n if not self.type_name == \"aws-kms\":\n raise TypeError(\"This is not an AWS KMS master key\")\n\n key_spec = keys.key(self.key_name)\n return KMS_MASTER_KEY_PROVIDER.master_key(key_id=key_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate halfsigning materials by requesting signing materials from the wrapped default CMM, and then changing the algorithm suite and removing the signing key from teh result. | def get_encryption_materials(self, request):
if request.algorithm == AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY:
signing_request = copy(request)
signing_request.algorithm = AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384
result = self.wrapped_default_cmm.... | [
"def generate_mkek(hsm):\n BarbicanCharm.singleton.action_generate_mkek(hsm)",
"def test_write_kmip_2_0(self):\n payload = payloads.DeriveKeyRequestPayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifiers=[\n \"fb4b5b9c-6188-4c63-8142-fe9c328129fc\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new (name, decryption scenario) pair | def decryption_test_scenario_pair(self, ciphertext_writer, ciphertext_to_decrypt, expected_result):
ciphertext_name = str(uuid.uuid4())
ciphertext_uri = ciphertext_writer(ciphertext_name, ciphertext_to_decrypt)
return (
ciphertext_name,
MessageDecryptionTestScenario(
... | [
"def create_fw_policy(self,name):",
"def __init__(self, mode, text):\r\n self.mode = mode\r\n self.name = \"Alberti\"\r\n if mode == \"Encrypt\":\r\n self.plaintext = text\r\n self.ciphertext = \"\"\r\n else:\r\n self.ciphertext = text\r\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate required plaintext values. | def _generate_plaintexts(plaintexts_specs):
# type: (PLAINTEXTS_SPEC) -> Dict[str, bytes]
return {name: os.urandom(size) for name, size in plaintexts_specs.items()} | [
"def gen(self, kgram, T):\n\n text = kgram\n for i in range(T - self._k, 0, -1):\n text += self.rand(kgram)\n kgram = text[len(text) - self._k:]\n return text",
"def get_prime_text(self):\n return \"\".join(\n [self.format_example(ex) for ex in self.exa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load from a file containing a full message encrypt manifest. | def from_file(cls, input_file):
# type: (IO) -> MessageDecryptionGenerationManifest
raw_manifest = json.load(input_file)
validate_manifest_type(
type_name=cls.type_name, manifest_version=raw_manifest["manifest"], supported_versions=SUPPORTED_VERSIONS
)
parent_dir = o... | [
"def read_manifest_file(self, filename):\n if not os.path.isfile(filename):\n raise KeyError(\"No file found for manifest at {0}\".format(filename))\n\n with open(filename, \"r\") as manifest_file:\n self._manifest = json.load(manifest_file)",
"def add_manifest_from_file(self, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate distance between two list by custom way | def custom_distance(a, b):
return sum([abs(i - j) ** 2 for (i, j) in zip(a, b)]) / len(a) | [
"def distances(a, b):\n mem = [[]]\n ca = [0]\n mem.append(ca)\n ans = editDist(a, b, mem)\n return ans",
"def calculate_distance(self, point1, point2, list_salience = \"empty\" ):\r\n distance = 0\r\n if list_salience == \"empty\":\r\n list_salience = [1] * len(point1)\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate cosine distance for two list | def cosine_distance(a, b):
return pdist([a, b], 'cosine') | [
"def cosine_dist(l1: Union[list, np.ndarray], \n l2: Union[list, np.ndarray]):\n return distance.cosine(l1, l2)",
"def cosine_sim(l1: Union[list, np.ndarray], \n l2: Union[list, np.ndarray]):\n return (1. - cosine_dist(l1, l2))",
"def cosine_distance(vec1, vec2):\n return 1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate canberra distance for two list | def canberra_distance(a, b):
return pdist([a, b], 'canberra') | [
"def basic(r1, r2):\n temp = 0\n n = 0\n for key in r1:\n if key in r2:\n temp += abs(r1[key] - r2[key])\n n += 1\n if n != 0:\n distance = float(temp)/float(n)\n else:\n distance = sys.float_info.max\n return distance",
"def custom_distance(a, b):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate the similarity of candidate based on ratings | def similarity(candidate, user):
candidate_rating_vector = []
user_rating_vector = []
for i in candidate:
if i in user:
candidate_rating_vector.append(candidate[i])
user_rating_vector.append(user[i])
ratio = math.log(30 + len(user_rating_vector), 64)
return [candidat... | [
"def get_ratings_similarity(self):\n\n # Get average rating of the target movie\n query_1 = \"SELECT AVG(rating) FROM ratings WHERE movie_id=%i\" % self.target_movie.movie_id\n res = self.db.execute(query_1).fetchall()\n target_movie_average_rating = res[0][0]\n\n pmids = []\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
hybrid algorithm based on average rating, nearest neighbour and slope one I don't use slope one because it's hard to find the similar movie, so the performance of slope one is poor. | def hybrid_algorithm(avg, nearest, slope, silence=False):
sign = (nearest - avg) / abs(nearest - avg)
ratio = 0.2
predict_value = nearest + sign * abs(nearest - avg) * ratio
if not silence:
print(' Hybrid Algorithm '.center(80, '#'))
print(round(predict_value), '(', predict_value, ... | [
"def predict(user_id, movie_id):\n print_user_info(user_id)\n print_movie_info(movie_id)\n print_actual_rating(user_id, movie_id)\n avg = average_rating(movie_id)\n nearest = nearest_neighbour(user_id, movie_id)\n slope = slope_one(user_id, movie_id)\n hybrid_algorithm(avg, nearest, slope)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use different algorithm to predict movie rating | def predict(user_id, movie_id):
print_user_info(user_id)
print_movie_info(movie_id)
print_actual_rating(user_id, movie_id)
avg = average_rating(movie_id)
nearest = nearest_neighbour(user_id, movie_id)
slope = slope_one(user_id, movie_id)
hybrid_algorithm(avg, nearest, slope) | [
"def predict_rating(self, movie):\n\n other_ratings = movie.ratings\n\n similarities = [\n (self.similarity(r.user), r)\n for r in other_ratings\n ]\n\n similarities.sort(reverse=True)\n\n similarities = [(sim, r) for sim, r in similarities if sim > 0]\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add `restart` option to argument parser. | def add_restart_arg(parser):
parser.add_argument(
"--restart",
type=_arg_non_neg_int,
default=None,
help=_help_cli,
) | [
"def do_restart(self, args):\n clean = False\n if args.startswith(\"--clean\"):\n args = args[7:]\n clean = True\n\n results = self.zeekctl.restart(clean=clean, node_list=args)\n return results.ok",
"def set_restart_mode(restart_file, flag=\"reload\"):\r\n with... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove calculation folder after (included) a given number. Example | def remove_folders_after_number(run_dir, num):
num = _arg_non_neg_int(num)
previous = get_module_steps_folders(run_dir.resolve())
for folder in previous[num:]:
remove_folder(Path(run_dir, folder))
return | [
"def remove_metfile(self, num=0, rall=False):\n if rall:\n self.num_met = 0\n self.metfiles = []\n self.metdirs = [] \n else:\n self.metfiles.pop(num)\n self.metdirs.pop(num) \n self.num_met += -1",
"def delete():\n new_number = number.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that _construct_process_message does not append empty stdout and stderr to the message. | def test_construct_process_message_no_output(self):
process = subprocess.run('exit 0',
shell=True,
text=True,
capture_output=True)
message = import_executor._construct_process_message('message', process)
... | [
"def testFormattedEmptyMessage(self):\n\t\tnotNeeded=self.fixture.read(1) # Empty the port.\n\t\tself.assertEqual(self.fixture.read(1),b'',\n\t\t\t\t\t\tmsg='Need an empty buffer before running this test case.')\n\t\t# port.inWaiting will be 0, so grabPortOutput will just proceed to return\n\t\t# the input outputBu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a list of trackpoints. | def add_trackpoints(self, trackpoints: List) -> None:
self.trackpoints.extend(trackpoints)
self.graph = None | [
"def insert_trackpoints(self):\n users_ids = self.fs_helper.get_all_ids()\n for user_id in users_ids:\n self.insert_trackpoint_by_user_id(user_id)",
"def add_track(self):\n self.tracks.append(Track(self))",
"def add_points():\n client = get_client()\n client['volue'].dataPo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a list of waypoints. | def add_waypoints(self, waypoints: List) -> None:
self.waypoints.extend(waypoints)
self.graph = None | [
"def add_trackpoints(self, trackpoints: List) -> None:\n self.trackpoints.extend(trackpoints)\n self.graph = None",
"def _add_trips(self, trips):\n for trip in trips:\n self.trips.append(self.__get_trip(trip))",
"def append_points(self, points):\n tags = self.AcDbPolyline\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a GPX XML file content. | def add_gpx(self, track_xml: str) -> None:
trackpoints, waypoints = extract_gpx(track_xml)
if trackpoints:
self.add_trackpoints(trackpoints)
if waypoints:
self.add_waypoints(waypoints) | [
"def main_gpx(input_file):\n\thandler = GPSDataHandler(input_file)\n\n\tprint(\"Create GPX file {} from CSV data in {}\".format(handler.output_file, handler.input_file))\n\n\thandler.create_gpx_from_csv()\n\n\tprint(\"GPX file successfully created!\")\n\n\treturn",
"def read_gpx(filename):\n tree = ET.parse(fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the shortest path between src and dst with quantile probability. | def fastest_path(self, src: str, dst: str, quantile: float = 0.8) -> nx.Graph:
self._ensure_graph()
path = nx.path_graph(
nx.dijkstra_path(
self.graph, src, dst, lambda u, v, a: np.quantile(a["secs"], quantile)
)
)
return path | [
"def minimumCostPath(paths, destStation):\n path = djk.pathTo(paths, destStation)\n return path",
"def find_fastest_path(file_name,src,dest): \n #initilized parameters\n visited=[]\n distances={}\n predecessors={}\n\n #create dic that represent the graph edges for each vertex\n graph = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Allow the use of of Django forms outside Django. By default, at rendering Django routes all form error messages through internal encoding machinery that attempts to load the its settings file to check for internationalisation. To allow the use of use of these forms outside of Django, this function should be called befo... | def allow_forms_outside_django():
from django.conf import settings
if (not settings._target):
settings.configure (USE_I18N=False) | [
"def test_form_template_i18n():",
"def test_i18n_language_non_english_fallback(self):\n with self.settings(LANGUAGE_CODE=\"fr\"), override(\"none\"):\n response = self.client.get(\"/jsi18n/\")\n self.assertContains(response, \"Choisir une heure\")",
"def test_i18n_language_non_engli... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The standard table styling, with the description put on the left. | def form_as_leftdesc_table (frm):
# TODO: if field.required need
# '<span class='fieldRequired' title='Required'>(Required)</span>'
frm.label_suffix = None
frm.error_class = PlainErrorList
text = frm._html_output (
u'<tr><td><label>%(label)s</label>' \
'<p class="discreet">%(help_text)s</p>%(errors)s</td>' \
... | [
"def printTableTitle(self):\n print(\"%12s %8s %8s %12s %12s %8s %8s %12s %12s %8s %12s\" %('Time[Myr]','id1','id2','semi[R*]','ecc','kw1(i)','kw2(i)','m1[M*](i)','m2[M*](i)','kw(f)','m[M*](f)'))",
"def format_medical_table(self):\n self.format_medical_table_headers()\n self.format_medical_ta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A convenience function to enclose form html in styled table tags. | def _enclose_in_table (text):
return tag_with_contents (
'table',
text,
class_='revi_formtable',
) | [
"def form_as_table_rows(form):\r\n return {\"form\": form}",
"def render_html(table, data):\n return render(renderers.HtmlRenderer, table, data)",
"def form_as_leftdesc_table (frm):\n\t# TODO: if field.required need\n\t# '<span class='fieldRequired' title='Required'>(Required)</span>'\n\tfrm.label_suffix ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Like fetch_labelindices(), but fetches and converts format in parallel. | def fetch_labelindices_parallel(server, uuid, instance, labels, *, format='single-dataframe', processes=16):
assert format in ('list-of-protobuf', 'pandas', 'single-dataframe')
fmt = format
if fmt == 'single-dataframe':
fmt = 'pandas'
_fetch = partial(fetch_labelindex, server, uuid, instance, f... | [
"def fetch_labelindices(server, uuid, instance, labels, *, format='protobuf', session=None):\n assert format in ('raw', 'protobuf', 'list-of-protobuf', 'pandas', 'single-dataframe')\n if isinstance(labels, (np.ndarray, pd.Series)):\n labels = labels.tolist()\n elif not isinstance(labels, list):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch a batch of label indexes via a single call to dvid. | def fetch_labelindices(server, uuid, instance, labels, *, format='protobuf', session=None):
assert format in ('raw', 'protobuf', 'list-of-protobuf', 'pandas', 'single-dataframe')
if isinstance(labels, (np.ndarray, pd.Series)):
labels = labels.tolist()
elif not isinstance(labels, list):
label... | [
"def fetch_labelindices_parallel(server, uuid, instance, labels, *, format='single-dataframe', processes=16):\n assert format in ('list-of-protobuf', 'pandas', 'single-dataframe')\n fmt = format\n if fmt == 'single-dataframe':\n fmt = 'pandas'\n\n _fetch = partial(fetch_labelindex, server, uuid, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Post a protobuf LabelIndex object for the given label to the specified DVID labelmap instance. | def post_labelindex(server, uuid, instance, label, proto_index, *, session=None):
payload = None
assert isinstance(proto_index, (bytes, LabelIndex))
if isinstance(proto_index, LabelIndex):
assert proto_index.label == label
payload = proto_index.SerializeToString()
elif isinstance(proto_i... | [
"def update_dict(label):\n if update:\n nonlocal index\n if label not in labels_dict:\n labels_dict[label] = index\n index += 1",
"def add(self,label):\n\t\tif label not in self._label_to_index:\n\t\t\tself._label_to_index[label] = self.num_labels\n\t\t\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the label indexes for a list of bodies. DVID supports deletion en masse via POST of empty label index prototbuf structures. (See the DVID docs for POST .../index and POST .../indices) | def delete_labelindices(server, uuid, instance, bodies, *, session=None):
index_list = []
for body in bodies:
li = LabelIndex()
li.label = int(body)
index_list.append(li)
indices = LabelIndices()
indices.indices.extend(index_list)
payload = indices.SerializeToString()
e... | [
"def test_bulk_delete(self):\n\n se = SearchEngineFactory().create()\n # se.create_index(index='test')\n\n for i in range(10):\n x = {\n 'id': i,\n 'type': 'prefLabel',\n 'value': 'test pref label',\n }\n se.index_dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy many labelindexes from one dvid repo to another, in batches. | def copy_labelindices(src_triple, dest_triple, labels, *, batch_size=10_000, threads=None, processes=None):
labels = np.asarray(labels)
label_batches = []
for batch_start in range(0, len(labels), batch_size):
batch = labels[batch_start:batch_start+batch_size]
label_batches.append(batch)
... | [
"def _copy_labelindex_batch(src_triple, dest_triple, labels_batch):\n indexes_batch = fetch_labelindices(*src_triple, labels_batch)\n post_labelindices(*dest_triple, indexes_batch)",
"def split_dataset_by_indices():",
"def merge_indexes(index_files):\n index = {}\n for f in index_files:\n pri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper for copy_labelindexes(), above. Defined here at the module toplevel to allow it to be pickled when using multiprocessing. | def _copy_labelindex_batch(src_triple, dest_triple, labels_batch):
indexes_batch = fetch_labelindices(*src_triple, labels_batch)
post_labelindices(*dest_triple, indexes_batch) | [
"def copy_labelindices(src_triple, dest_triple, labels, *, batch_size=10_000, threads=None, processes=None):\n labels = np.asarray(labels)\n label_batches = []\n for batch_start in range(0, len(labels), batch_size):\n batch = labels[batch_start:batch_start+batch_size]\n label_batches.append(b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a protobuf LabelIndex object into a PandasLabelIndex tuple, which returns supervoxel counts for all blocks in one big pd.DataFrame. | def _convert_labelindex_to_pandas(labelindex):
encoded_block_coords = np.fromiter(labelindex.blocks.keys(), np.uint64, len(labelindex.blocks))
coords_zyx = decode_labelindex_blocks(encoded_block_coords)
block_svs = []
block_counts = []
block_coords = []
# Convert each block's data into arrays
... | [
"def create_labelindex(pandas_labelindex):\n pli = pandas_labelindex\n assert isinstance(pli, PandasLabelIndex)\n labelindex = LabelIndex()\n labelindex.label = pli.label\n labelindex.last_mutid = pli.last_mutid\n labelindex.last_mod_time = pli.last_mod_time\n labelindex.last_mod_user = pli.las... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a protobuf LabelIndex structure from a PandasLabelIndex tuple. In the PandasLabelIndex tuple, the ``blocks`` member is a pd.DataFrame | def create_labelindex(pandas_labelindex):
pli = pandas_labelindex
assert isinstance(pli, PandasLabelIndex)
labelindex = LabelIndex()
labelindex.label = pli.label
labelindex.last_mutid = pli.last_mutid
labelindex.last_mod_time = pli.last_mod_time
labelindex.last_mod_user = pli.last_mod_user
... | [
"def _convert_labelindex_to_pandas(labelindex):\n encoded_block_coords = np.fromiter(labelindex.blocks.keys(), np.uint64, len(labelindex.blocks))\n coords_zyx = decode_labelindex_blocks(encoded_block_coords)\n\n block_svs = []\n block_counts = []\n block_coords = []\n\n # Convert each block's data... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Equivalent to fetch_sizes(), but uses the raw /labelindex endpoint to obtain the sizes, rather than requesting the sizes from dvid. In a single thread, this will be slower than simply callying fetch_sizes(), but if you have more CPU cores than DVID does (or you want to save DVID a little bit of CPU load), then you can ... | def fetch_sizes_via_labelindex(server, uuid, instance, labels, supervoxels=False, *, batch_size=None, threads=None, processes=None, session=None):
if batch_size is None:
assert threads is None and processes is None, \
"Specify a batch size or don't use multithreading"
sizes = _fetch_size... | [
"def list_sizes(location=None):",
"def _get_sizes(self) -> int:\n pass",
"def list_sizes(self, location=None):\r\n raise NotImplementedError(\r\n 'list_sizes not implemented for this driver')",
"def fetch_sparsevol_coarse_via_labelindex(server, uuid, instance, labels, supervoxels=Fals... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Equivalent to fetch_sparsevol_coarse, but uses the raw /labelindex endpoint to obtain the coordinate list, rather than requesting sparsevol RLEs from dvid. You can provide a list of labels to this function, and the output will include all blocks that any of the given labels intersect. When fetching coarse sparsevols fo... | def fetch_sparsevol_coarse_via_labelindex(server, uuid, instance, labels, supervoxels=False, *, method='pandas', session=None):
assert method in ('pandas', 'protobuf')
if np.issubdtype(type(labels), np.integer):
labels = np.asarray([labels], np.uint64)
else:
assert isinstance(labels, Iterabl... | [
"def fetch_labelindices_parallel(server, uuid, instance, labels, *, format='single-dataframe', processes=16):\n assert format in ('list-of-protobuf', 'pandas', 'single-dataframe')\n fmt = format\n if fmt == 'single-dataframe':\n fmt = 'pandas'\n\n _fetch = partial(fetch_labelindex, server, uuid, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calls decode_labelindex_block() on a 1D array of encoded coordinates. | def decode_labelindex_blocks(encoded_blocks):
decoded_blocks = np.zeros((len(encoded_blocks), 3), dtype=np.int32)
for i in range(len(encoded_blocks)):
encoded = encoded_blocks[i]
decoded_blocks[i,:] = decode_labelindex_block(encoded)
return decoded_blocks | [
"def decode_labelindex_block(encoded_block):\n z = np.int32((encoded_block >> 2*21) & 0x1F_FFFF) # 21 bits\n y = np.int32((encoded_block >> 21) & 0x1F_FFFF) # 21 bits\n x = np.int32((encoded_block >> 0) & 0x1F_FFFF) # 21 bits\n \n # Check sign bits and extend if necessary\n if encoded_block &... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function. Decodes a block coordinate from a LabelIndex entry. DVID encodes the block coordinates into a single uint64, as three signed 21bit integers, in zyx order (leaving the top bit of the uint64 set to 0). | def decode_labelindex_block(encoded_block):
z = np.int32((encoded_block >> 2*21) & 0x1F_FFFF) # 21 bits
y = np.int32((encoded_block >> 21) & 0x1F_FFFF) # 21 bits
x = np.int32((encoded_block >> 0) & 0x1F_FFFF) # 21 bits
# Check sign bits and extend if necessary
if encoded_block & (1 << (3*2... | [
"def decode_labelindex_blocks(encoded_blocks):\n decoded_blocks = np.zeros((len(encoded_blocks), 3), dtype=np.int32)\n for i in range(len(encoded_blocks)):\n encoded = encoded_blocks[i]\n decoded_blocks[i,:] = decode_labelindex_block(encoded)\n return decoded_blocks",
"def decodevid(vid):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses a single tf.Example into image and label tensors. | def example_parser(serialized_example):
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
... | [
"def _parse_record(example_proto):\n\n example = tf.parse_single_example(example_proto, feature)\n im = tf.decode_raw(example['image'], tf.float32)\n im = tf.reshape(im, (img_rows, img_cols, 1))\n\n label = tf.decode_raw(example['label'], tf.int32)\n label = tf.reshape(label, (4, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes the MNIST inputs and mode and outputs a tensor of logits. | def mnist_model(inputs, mode):
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
inputs = tf.reshape(inputs, [-1, 28, 28, 1])
data_format = 'channels_last'
if tf.test.is_built_with_cuda():
# When running on... | [
"def logits(self, x):",
"def transform_logits(self, logits):",
"def mlp_mnist():\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets('/tmp/data', one_hot=True)\n training_data = np.array([image.flatten() for image in mnist.train.images])\n training_label ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test estimation correctness with image. | def test_correctness_with_image(self):
expectedAgs = 0.96425
imageWithFaceDetection = ImageWithFaceDetection(self.image1, self.detection1.boundingBox)
singleValue = self.estimator.estimate(imageWithFaceDetection=imageWithFaceDetection)
batchValue = self.estimator.estimateBatch([imageWit... | [
"def test_image(self, image):\n if type(image) == str:\n image = cv2.imread(image)\n x0, y0, x1, y1 = self.image(image)\n\n cv2.rectangle(image, (x0, y0), (x1, y1), (0, 0, 255), 1)\n cv2.imshow(\"image\", image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test estimation correctness with detections. | def test_correctness_with_detections(self):
expectedAgs = 0.96425
singleValue = self.estimator.estimate(detection=self.detection1)
batchValue = self.estimator.estimateBatch(detections=[self.detection1])[0]
assert type(singleValue) == type(batchValue)
assert isinstance(singleValue... | [
"def test_correctness_with_image(self):\n expectedAgs = 0.96425\n imageWithFaceDetection = ImageWithFaceDetection(self.image1, self.detection1.boundingBox)\n\n singleValue = self.estimator.estimate(imageWithFaceDetection=imageWithFaceDetection)\n batchValue = self.estimator.estimateBatch... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test batch estimation correctness with images. | def test_batch_with_images(self):
expectedAgsList = [0.96425, 1.00085]
result = self.estimator.estimateBatch(
[
ImageWithFaceDetection(self.image1, self.detection1.boundingBox),
ImageWithFaceDetection(self.image2, self.detection2.boundingBox),
]
... | [
"def test_batch_with_detections(self):\n expectedAgsList = [0.96425, 1.00086]\n result = self.estimator.estimateBatch(detections=[self.detection1, self.detection2])\n assert isinstance(result, list)\n for idx, row in enumerate(result):\n assert isinstance(row, float)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test batch estimation correctness with detections. | def test_batch_with_detections(self):
expectedAgsList = [0.96425, 1.00086]
result = self.estimator.estimateBatch(detections=[self.detection1, self.detection2])
assert isinstance(result, list)
for idx, row in enumerate(result):
assert isinstance(row, float)
assert ... | [
"def test_correctness_with_detections(self):\n expectedAgs = 0.96425\n singleValue = self.estimator.estimate(detection=self.detection1)\n batchValue = self.estimator.estimateBatch(detections=[self.detection1])[0]\n assert type(singleValue) == type(batchValue)\n assert isinstance(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test batch estimation with invalid input. | def test_batch_with_detections_bad_input(self):
with pytest.raises(LunaSDKException) as exceptionInfo:
self.estimator.estimateBatch([])
self.assertLunaVlError(exceptionInfo, LunaVLError.InvalidSpanSize.format("Invalid span size")) | [
"def test_estimate_background_batch_invalid_input(self):\n with pytest.raises(LunaSDKException) as e:\n self.backgroundEstimator.estimateBatch([], [])\n assert e.value.error.errorCode == LunaVLError.InvalidSpanSize.errorCode",
"def test_invalid_input_batch_size(self):\n msg1 = (\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a Vertex instance named 'v1'. | def vertex():
return Vertex('v1') | [
"def get_vertex(self, v_id):\n pass",
"def get_or_create_vertex(self, label=None, **kwargs):",
"def get_vertex(self, id_num):",
"def create_vertex(self, key):\n new_vertex = SpVertex(key)\n self._vertex_list[key] = new_vertex\n return new_vertex",
"def vertexId(*args, **kwargs):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a custodyId to the set of custodyIds that this ACS covers. | def add(self, custodyId):
fillsAsBlocks = lengthBlocksToBlocks(self.fills)
fillsAsBlocks = mergeBlocks(fillsAsBlocks + [(custodyId, custodyId)])
self.fills = blocksToLengthBlocks(fillsAsBlocks) | [
"def onchange_custody_ids(self, cr, uid, ids, custody_ids,context=None):\n vals={}\n custody_pool = self.pool.get('vehicle.custody.move')\n value = {}\n if custody_ids:\n rec=self.browse(cr, uid, ids, context=context)\n custodys=[]\n for custody in custod... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Serializes this ACS into a string of bytes that constitute the payload of an aggregate custody signal. This serialization does not include the bundle primary block, or the payload block header; it is only the payload of the payload block. | def serialize(self):
# Encode Administrative Record header byte
toReturn = "\x40" # Aggregate Custody Signal, not for a fragment.
# Encode status byte
toReturn += struct.pack('!B', 128*self.succeeded)
# Encode the array of fills.
for (start, length) in... | [
"def serialize(self):\n return bytes(BlockchainEncoder().encode(self.chain), \"utf-8\")",
"def payload(self):\n if not hasattr(self, '_payload'):\n if self.shouldCompress():\n self._payload = compressString(self.data())\n else:\n self._payload = se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Counterpart to BpAcs.serialize(); takes a string of bytes that are the payload of an aggregate custody signal and turns them into an instance of the BpAcs class. acs_string must be the payload of the payload block of an aggregate custody signal bundle (i.e. acs_string must not include a bundle primary block, or the pay... | def unserialize_acs(acs_string):
toReturn = BpAcs()
(adminrecordheader, status, ) = struct.unpack("!BB", acs_string[0:2])
acs_string = acs_string[2:]
# Parse the administrative record header byte.
if (adminrecordheader & 0xF0) != 0x40:
# Not an aggregate custody signal.
ret... | [
"def hex_string_to_binary_string(self,hex_string):\n\n # remove newlines and spaces\n hex_string = hex_string.replace(\" \",\"\")\n hex_string = hex_string.replace(\"\\n\",\"\")\n\n if self.debug:\n print(\"CCSDS packet: \",hex_string)\n print()\n\n # convert... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
TCO(n^2) Idea is to use two loops One is from i =0 to n other is from j =i+1 to n keep checking for greater ele for curr element if found print(array[j], end=" ") break | def naive_next_greater_element(array):
n = len(array)
i = 0
while i < n:
j = i + 1
while j < n:
if array[j] > array[i]:
print(array[j], end=" ")
break
j += 1
else:
print(-1, end=" ")
i += 1 | [
"def imprimer_taquin(taq):\n n=len(taq)\n for decor in range(n):\n print(' ___',end=\"\")\n print('_')\n\n for lignes in taq:\n for cases in lignes:\n if cases-10<0:\n print(\"| \",cases,end=\"\")\n else:\n print(\"|\",cases,end=\"\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Open the FITS file. Central 10x10 pixels are inspected for NaNs or all 0s (in case of flagging). If the selected channel is bad, the channel 10 pixels along is selected. | def fitsopen(filename, chan, verbose=True):
if verbose:
print('Opening', filename)
hdulist = fits.open(filename, memmap=True, mode='denywrite')
hdu = hdulist[0]
head = hdu.header
size1 = head['NAXIS1']
size2 = head['NAXIS2']
size3 = head['NAXIS3']
if chan > size3:
raise ... | [
"def view_fits(infile):\n pf = pyfits.open(infile) # Read-only\n\n # Look at available extensions.\n # This is slightly different than IRAF catfits.\n pf.info()\n\n for ext in range(4):\n # Look at all the headers\n print\n print repr(pf[ext].header)\n print\n\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the decorated autoray function stack can handle all inputs | def test_multi_dispatch_stack(x):
stack = fn.multi_dispatch(argnum=0, tensor_list=0)(autoray.numpy.stack)
res = stack(x)
assert fn.allequal(res, [[1.0, 0.0], [2.0, 3.0]]) | [
"def test_callability():\n the_list = l.function_builder(4)\n for func in the_list:\n assert hasattr(func, '__call__')",
"def test_variable_arguments(self):\n def foo(*args):\n return tuple(args)\n provider = FunctionProvider(foo)\n wrapped_function = provider()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a provided change calculation to the database | def add_change_calculation(self, user_id: str, calculation: ChangeCalculation):
receipt = self.__generate_receipt()
with pymysql.connect(host=self.rdb_host, user=self.user_name, passwd=self.password, db=self.db_name) as conn:
with conn.cursor() as cursor:
sql = "insert into m... | [
"def test_calculation_new(self):\n args = 'TestCalc', '12*34', 'integer', 'measure', 'quantitative', 'False'\n original_len = len(self.tds.calculations)\n self.tds.add_calculation(*args)\n self.assertEqual(len(self.tds.calculations), original_len + 1)",
"def test_calculation_change(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves the change calculation for the provided receipt | def get_change_from_receipt(self, receipt: str, user_name: str):
# Not a fan that I'm doing an ambiguous (Any) return, but the idea is that
# this is going to be printed to Slack as a message
with pymysql.connect(host=self.rdb_host, user=self.user_name, passwd=self.password, db=self.db_name) as ... | [
"def calculate_change(total, received, denoms):\n change = received - total\n return _calculate_denoms(change, denoms)",
"def change_rate_extractor(change_rates, initial_currency, final_currency):\r\n ACR_1 = '%s/%s'%(\r\n initial_currency, final_currency\r\n )\r\n ACR_2 = '%s/%s'%(\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |