query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Create a flexible volume that is a clone of a "backing" or "parent" flexible volume by spawning a background job. The jobid will be returned. The progress of the job can be tracked using the job APIs. This command fails if the chosen parent volume is currently involved in a split operation. This command also fails if t... | def volume_clone_create_async(self, parent_volume, volume, use_snaprestore_license=None, junction_active=None, space_reserve=None, junction_path=None, parent_snapshot=None):
return self.request( "volume-clone-create-async", {
'use_snaprestore_license': [ use_snaprestore_license, 'use-snaprestore-lic... | [
"def volume_clone_create(self, parent_volume, volume, use_snaprestore_license=None, force_worm_clone=None, junction_active=None, qos_policy_group_name=None, space_reserve=None, junction_path=None, parent_snapshot=None, volume_type=None):\n return self.request( \"volume-clone-create\", {\n 'use_sna... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scans aggregates and returns a list of compatible target aggregates for the given volume move operation. | def volume_move_target_aggr_get_iter(self, vserver, volume_name, max_records=None, desired_attributes=None, tag=None, query=None):
return self.request( "volume-move-target-aggr-get-iter", {
'max_records': max_records,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ Vo... | [
"def get_mutations_for_target(target: LocIndex) -> Set[Any]:\n search_space: List[Set[Any]] = [m.operations for m in get_compatible_operation_sets()]\n mutation_ops: Set[Any] = set()\n\n for potential_ops in search_space:\n if target.op_type in potential_ops:\n LOGGER.debug(\"Potential mu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the name of a flexible volume, either return its current size or set the volume's size to the stated amount. This API is not supported for Infinite Volumes. Also, this API does not allow to set the volume's size from vFiler context. | def volume_size(self, volume, new_size=None):
return self.request( "volume-size", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],
}, {
'is-fixed-size-flex-volume': [ bool, False ],
... | [
"def volume_size_async(self, volume_name, new_size=None):\n return self.request( \"volume-size-async\", {\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 're... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Restrict the specified volume, making it unavailable for userlevel data access but leaving it (or its containing aggregate, if it's a flexible volume) available to internal OnTAP RAIDlevel access. This API is not supported for Infinite Volumes. This API is not supported on Infinite Volume constituents. | def volume_restrict(self, name, cifs_delay=None):
return self.request( "volume-restrict", {
'name': [ name, 'name', [ basestring, 'None' ], False ],
'cifs_delay': [ cifs_delay, 'cifs-delay', [ int, 'None' ], False ],
}, {
} ) | [
"def volume_restrict_async(self, volume_name):\n return self.request( \"volume-restrict-async\", {\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Destroy the specified volume or plex. If a flexible volume is specified, all of its blocks are freed and returned to its containing aggregate; no other flexible volumes in the same containing aggregate (if any) are affected. If a traditional volume is specified, all of its plexes are destroyed, and its disks are return... | def volume_destroy(self, name, force=None, unmount_and_offline=None):
return self.request( "volume-destroy", {
'force': [ force, 'force', [ bool, 'None' ], False ],
'name': [ name, 'name', [ basestring, 'None' ], False ],
'unmount_and_offline': [ unmount_and_offline, 'unmount... | [
"def destroy_volume(self, volume):\r\n url = REST_BASE + '/storage/%s' % (volume.id)\r\n status = int(self.connection.request(action=url,\r\n method='DELETE').status)\r\n return status == httplib.OK",
"def delPhysicalVolume(self, pv):\n self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pauses the volume move operation of the specified source volume. This is a synchronous API. | def volume_move_pause(self, source_volume):
return self.request( "volume-move-pause", {
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
}, {
} ) | [
"def volume_move_abort(self, source_volume):\n return self.request( \"volume-move-abort\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )",
"def volume_move_resume(self, source_volume, cutover_window=None, is_manual_cutover=None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Trigger cutover of a move job | def volume_move_trigger_cutover(self, source_volume, vserver=None, force=None):
return self.request( "volume-move-trigger-cutover", {
'vserver': [ vserver, 'vserver', [ basestring, 'None' ], False ],
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
... | [
"def cancelMove(self) -> None:\n frames_already_done = self._totalFrameNeeded - self._frameNeeded\n for _ in range(frames_already_done):\n self.unit.moveTo(self.sourceTile.graphics.center)\n self.isPerformed = True",
"def at_after_move(self, source_location):\r\n pass",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renames the specified volume to a new name specified by "newvolumename". If the volume is referenced in the /etc/exports file, remember to make the name change in /etc/exports also so that the affected file system can be exported by the filer after the filer reboots. The "volumerename" command does not automatically up... | def volume_rename(self, volume, new_volume_name):
return self.request( "volume-rename", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ],
}, {
} ) | [
"def rename(self, new_name):\n\n if not new_name:\n raise LvmVolumeError(_(\"No new name for logical volume given.\"))\n\n new_name = str(new_name).strip()\n if new_name == '':\n raise LvmVolumeError(_(\"Empty name for logical volume given.\"))\n\n if new_name == se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a flexible volume that is a clone of a "backing" or "parent" flexible volume. A clone is a volume that is a writable snapshot of another volume. Initially, the clone and its parent share the same storage; more storage space is consumed only as one volume or the other changes. If a specific snapshot name within t... | def volume_clone_create(self, parent_volume, volume, use_snaprestore_license=None, force_worm_clone=None, junction_active=None, qos_policy_group_name=None, space_reserve=None, junction_path=None, parent_snapshot=None, volume_type=None):
return self.request( "volume-clone-create", {
'use_snaprestore_... | [
"def create_clone(\n self,\n client,\n vol_name,\n subvol_name,\n snap_name,\n target_subvol_name,\n validate=True,\n **kwargs,\n ):\n clone_cmd = f\"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the overridden methods in an object. | def get_overridden_methods(cls: type, obj: Type['spines.base.BaseObject']):
common = cls.__dict__.keys() & obj.__class__.__dict__.keys()
return [
m for m in common if cls.__dict__[m] != obj.__class__.__dict__[m]
and callable(cls.__dict__[m])
] | [
"def get_object_methods(obj):\n import utool as ut\n attr_list = (getattr(obj, attrname) for attrname in dir(obj))\n methods = [attr for attr in attr_list if ut.is_method(attr)]\n return methods",
"def is_overridden(obj):\n return getattr(obj, \"__is_overriden__\", True)",
"def get_original_metho... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the full subcategory of the objects of ``self`` having no nonzero zero divisors. A zero divisor in a ring `R` is an element `x \in R` such that there exists a nonzero element `y \in R` such that `x \cdot y = 0` or `y \cdot x = 0` | def NoZeroDivisors(self):
return self._with_axiom('NoZeroDivisors') | [
"def clustersWithout0(self):\n clusters = [] # liste de clusters (individu)\n temp_list = [] # liste temporaire contenant un seul cluster\n\n for i in self.individual: # pour chaque élément dans l'individu\n if i != 0: # si l'élément est différent de 0\n temp_list.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the full subcategory of the division objects of ``self``. A ring satisfies the division axiom if all nonzero elements have multiplicative inverses. | def Division(self):
return self._with_axiom('Division') | [
"def barycentric_subdivision(self):\n return self.face_poset().order_complex()",
"def divide(self, frac):\n # by default, the element is indivisible\n return [self]",
"def getDivisors(self):\n return self.__divisors",
"def __div__(self, other):\n tccd = []\n if isinstance(o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return ``True`` if this is the zero ring. | def is_zero(self):
return self.one() == self.zero() | [
"def is_zero(self):\n return self == self.number_field().ideal(0)",
"def is_zero(self):\n if not self.monomials and not self.coeffs:\n return True\n else:\n return False",
"def __bool__(self):\n return self.zero.defined and self.zero.value == 0.0",
"def iszero... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the Lie bracket `[x, y] = x y y x` of `x` and `y`. | def bracket(self, x, y):
return x*y - y*x | [
"def _coords(self, x, y):\n return y, x * 2",
"def x_and_y_to_xy(x, y):\n return flatten(zip(x,y))",
"def polygon_under_graph(x, y):\n return [(x[0], 0.), *zip(x, y), (x[-1], 0.)]",
"def stack_coordinates(\n x: np.ndarray,\n y: np.ndarray\n) -> np.ndarray:\n return np.vstack((x, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Returns the homset from ``self`` to ``Y`` in the category ``category`` | def _Hom_(self, Y, category):
if category is not None and not category.is_subcategory(Rings()):
raise TypeError("%s is not a subcategory of Rings()"%category)
if Y not in Rings():
raise TypeError("%s is not a ring"%Y)
from sage.rings.homset import Ring... | [
"def head_category_set(self) -> FrozenSet[Category]:\n return self._subcategory_sets[self._head_index]",
"def subSetClasse(labeledSet):\n res_plus = ls.LabeledSet(labeledSet.getInputDimension())\n res_moins = ls.LabeledSet(labeledSet.getInputDimension())\n for i in range(labeledSet.size()):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The monoid of the ideals of this ring. | def ideal_monoid(self):
try:
from sage.rings.ideal_monoid import IdealMonoid
return IdealMonoid(self)
except TypeError:
from sage.rings.noncommutative_ideals import IdealMonoid_nc
return IdealMonoid_nc(self) | [
"def identity_morphism(self):\n return KenzoChainComplexMorphism(__idnt_mrph__(self._kenzo))",
"def one_from_one_basis(self):\n return self.monomial(self.one_basis()) #.",
"def binomial(self):\n return self._binomial",
"def identity_morphism(self):\n from sage.schemes.gener... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the characteristic of this ring. | def characteristic(self):
from sage.rings.infinity import infinity
from sage.rings.integer_ring import ZZ
order_1 = self.one().additive_order()
return ZZ.zero() if order_1 is infinity else order_1 | [
"def characteristic_polynomial(self):\n # TODO\n pass",
"def risk_characteristic(self):\n return self._risk_characteristic",
"def get_characteristic(self, uuid):\r\n for char in self.characteristics:\r\n if char.get_uuid() == uuid:\r\n return char\r\n\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Quotient of a ring by a twosided ideal. | def quotient_ring(self, I, names=None):
return self.quotient(I,names=names) | [
"def discount(t,r):\r\n return (1+r)**(-t)",
"def _radius_eq23(th, th1):\n return np.sin(th1)/np.sin(th+th1)",
"def quotient_by_principal_ideal(self, f, names=None):\n from sage.rings.ideal import Ideal\n I = Ideal(f)\n if I.is_zero():\n return self\n f = I.gen()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extend this ring by one or several elements to create a polynomial ring, a power series ring, or an algebraic extension. This is a convenience method intended primarily for interactive use. | def __getitem__(self, arg):
def normalize_arg(arg):
if isinstance(arg, (tuple, list)):
# Allowing arbitrary iterables would create confusion, but we
# may want to support a few more.
return tuple(arg)
elif isinstance... | [
"def base_extend(self, R):\n from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\n\n if R.has_coerce_map_from(self.base_ring()):\n return PolynomialRing(R, names=self.variable_name(), sparse=self.is_sparse())\n else:\n raise TypeError(\"no such bas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return whether this element is a unit in the ring. | def is_unit(self):
if self.is_one() or (-self).is_one():
return True
if self.is_zero(): # now 0 != 1
return False
raise NotImplementedError | [
"def has_unit(obj):\n return hasattr(obj, UNIT_ATTR_NAME)",
"def has_units(obj):\n return hasattr(obj, 'units')",
"def is_unit(ustr):\n ustr = backwards.bytes2unicode(ustr)\n if is_null_unit(ustr):\n return True\n try:\n as_unit(ustr)\n except ValueError:\n return False\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Metodo llamado al oprimir el boton de convertir a formato .CSV y TFRecord | def convertFiles(self):
try:
if self.ui.lineE4.text() != '' and self.ui.lineE5.text() != '':
if self._toCSV():
if(self._generarTFRecord()): #crear TENSORFLOW RECORD
print('TFRecord creados con exito')
else:
... | [
"def _saveCSV( self ):",
"def test_export_csv_to_file(self):\n pass",
"def _write_tfrecords_file(self, annotations, path_to_tfrecords):",
"def _generarTFRecord(self):\n try:\n argslist = []\n mydir = str(os.path.join(os.getcwd(), 'tools'))\n dirTF = str(os.path.d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Metodo llamado al oprimir boton 'Generar TFRecord | def _generarTFRecord(self):
try:
argslist = []
mydir = str(os.path.join(os.getcwd(), 'tools'))
dirTF = str(os.path.dirname(self.ui.lineE4.text()))
for set in self.sets:
#arg1 = str(os.environ['ENV1'])
arg1 = 'python'
... | [
"def make_TF_instance(TF_rec):\n tf = TFInstance(protein_accession=TF_rec.name,\n name=TF_rec.name,\n description=TF_rec.description)\n tf.save()",
"def create_tfrecord(task_name, split, processor, tokenizer, pad_for_eval=False):\n if task_name != FLAGS.task_name and t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
boton OK (default) carga el archivo especificado desde la carpeta ~/Documentos/LKE_framework/object_detection/samples/configs/ | def cargarConfigs(self):
try:
self.dirModelConfig = os.path.join(OBJECTDETECTIONPATH, "samples/configs/{}".format(str(self.ui.lineE9.text())))
print("Modelo NUEVO seleccionado: {}".format(str(self.dirModelConfig)))
file = open(self.dirModelConfig, 'r')
with fil... | [
"def test_object_detection(init_env, config):\n config_file = generate(config)\n run_all_steps(init_env, config_file)",
"def ensure_config_path() -> str:\n home = os.path.expanduser('~')\n mce_config_dir = os.path.join(home, '.mce')\n if not os.path.exists(mce_config_dir):\n print(f'creating... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
descargar modelo buscar en el archivo '/src/model4download.csv' la ruta especificada de descarga del modelo segun el archivo de configuracion precargado | def downloadModel(self):
self.ui.downModel.setEnabled(0)
m4d = os.path.join(os.getcwd(), "src/models4download.csv")
flag = False
url = None
try:
# abrir modelos para descarga
with open(m4d, 'r') as csvFile:
reader = csv.reader(csvFile)
... | [
"def downloadModel(self):\n \"\"\" If user select multi row, only data from currentRow is downloaded and loaded into moose \"\"\"\n selectedRow = self.resultsPanel.currentRow()\n modelId = self.resultsPanel.item(selectedRow, 0).text()\n modelSBML = str(self.client.service.getModelSBMLByI... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
cancelar descarga del modelo seleccionado en la carpeta ~/models | def cancelDonwload(self):
if self.thread3.isRunning():
try:
print("Hilo activado y listo para detener")
self.ui.downModel.setEnabled(1)
self.ui.progressBar.setValue(0)
modelsDir = str(os.path.join(os.getcwd(), "models")) # se guarda e... | [
"def cancel(self):\n url = self._path.format(self.custom_model_id, self.custom_model_version_id)\n self._client.delete(url)",
"def test_model_select(self, modelo):\n self.assertNotEqual(modelo, '', note=\"El modelo no debe estar vacío.\")",
"def reset_context():\n global _model\n _mod... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
permite verificar si el archivo de configuracion esta cargado | def _checkModelConfig(self):
if (self.modelConfig.__eq__('')):
print('Debe cargar primero el archivo de configuración')
self.statusBar().showMessage('Debe cargar primero el archivo de configuración')
return False
else:
return True #true porque no esta vaci... | [
"def check_configuration(file):\n return os.path.isfile(file)",
"def _check_file(self):\n if not os.path.exists(self.file_path):\n return False\n self._migrate()\n config = configparser.RawConfigParser()\n config.read(self.file_path)\n try:\n config.get(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
exportar grafico computacional para poder usarlo con nuestros datos, se le pueden pasar como parametros el numero de entrenamiento especifico o por default tomar el ultimo guardado correctamente Para exportar el grafico computacional despues del entrenamiento es necesario correr el siguiente codigo From tensorflow/mode... | def exportGraph(self):
try:
if(self._checkModelConfig()):
num_check = ''
b = 0 #si b = 1 los datos son validos y se puede exportar
#cambiar el checkpoint segun sea el radioButton
if(self.ui.rb_lastCheck.isChecked()):
... | [
"def export_frozen_inference_graph(\n checkpoint_path, pipeline_config_path, output_dir\n):\n # Import here because they are sooooo slow\n sys.path.append(etac.TF_OBJECT_DETECTION_DIR)\n from google.protobuf import text_format\n from object_detection import exporter # pylint: disable=import-error\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
validar si el numero de checkpoint existe | def _validarCheckpoint(self, num_check):
dirCheckpoint = os.path.join(os.getcwd(), 'projects/{}/training/'.format(self.nameProject))
for root, dirs, files in os.walk(dirCheckpoint):
for file_name in files:
indexstr = file_name.find('model.ckpt-{}.meta'.format(num_check))
... | [
"def test_invalid_input_checkpoint_step(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input '\n '`checkpoint_step` is invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, 0, -1, 0.0, 1.0, math.nan, -math.nan, mat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given nonsequential nodes, elements, boundary elements containing homogenous displacements in [1 .. n_space_dimensions], and update_ratio between (0, 1), returns the nodes in updated positions. | def smooth_neighbor_nonweighted(*, nodes, elements, boundary, update_ratio):
assert update_ratio > 0.0 and update_ratio < 1.0
displacements = dict() # empty prior to update
boundary_keys = boundary.keys()
elements_wo_element_number = tuple([x[1:] for x in elements])
adj = adjacencies_upper_diago... | [
"def _divideElement(self, elemID, nPerElement, maxElemId, keysNotToCopy=[]): \n if len(self.Modes)>0:\n raise Exception('Cannot divide graph when mode data is present')\n if len(self.Motions)>0:\n raise Exception('Cannot divide graph when motion data is present')\n\n\n max... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load all cisco required snmp tables. | def _load_snmp_tables(self):
self._logger.info("Start loading MIB tables:")
self._if_table = self._snmp.get_table("IF-MIB", "ifDescr")
self._logger.info("ifIndex table loaded")
self._logger.info("MIB Tables loaded successfully") | [
"def readAllTables():\n pidMap = SCOS.MIB.readTable(\"pid.dat\")\n picMap = SCOS.MIB.readTable(\"pic.dat\")\n tpcfMap = SCOS.MIB.readTable(\"tpcf.dat\")\n pcfMap = SCOS.MIB.readTable(\"pcf.dat\")\n plfMap = SCOS.MIB.readTable(\"plf.dat\", uniqueKeys=False)\n ccfMap = SCOS.MIB.readTable(\"ccf.dat\")\n cpcMap ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that update_if_dirty works. We use the last_modified field as a proxy for knowing whether or not we actually did a save. | def test_update_if_dirty(self):
create_tables(TestModel)
x = TestModel()
x.my_int = 3
x.my_string = "foo"
x.save()
last_mod = x.last_modified
django_util.update_if_dirty(x, my_int=3, my_string="bar")
assert_not_equal(x.last_modified, last_mod)
last_mod = x.last_modified
django_... | [
"def test_save(self):\n instance1 = BaseModel()\n attr_updated_before_save = instance1.updated_at\n instance1.save()\n attr_updated_after_save = instance1.updated_at\n self.assertNotEqual(attr_updated_before_save, attr_updated_after_save)",
"def test_that_save_func_update_update... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute and return the armstrong number | def armstrong_number(number):
total = 0
num_len = len(number)
for i in range(len(number)):
total += int(number[i]) ** num_len
return total | [
"def get_armstrong_value(num):\n num = str(num)\n length = len(num)\n armstrong_value = 0\n for char in num:\n armstrong_value += int(char)**length\n return armstrong_value",
"def final_amt(p, r, n, t):\r\n a = p*(1+r/n)**(n*t)\r\n return a",
"def avhrr( redchan, nirchan ):\n\tif( ni... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs the component importer. Project should not be in a transaction. acm_files is a list of acm or xml AVM component model file, which will be imported into the project. | def run_importer(project, lib_root, acm_file):
print 'Running Component Importer'
importer = win32com.client.DispatchEx("MGA.Interpreter.CyPhyComponentImporter")
importer.Initialize(project) # initialize GMEConsole and MGAGateway
project.BeginTransactionInNewTerr() # transaction is required by the... | [
"def import_jobs(rd_client, projects, files):\n for file_path in files:\n job = open(file_path, 'r').read()\n print(\"Importing %s\" % file_path)\n response = rd_client.import_job(\n job, fmt=\"yaml\",project=projects,dupeOption=\"update\"\n )\n if response['failed'] is not None:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calls the set_focus method once after 0.5 second interval. The focus must be returned after short interval so this method is necessary. | def set_focus_real(self):
Clock.schedule_once(self.set_focus, 0.5) | [
"def setFocus():\n pass",
"def _on_focus_changed(self, old, new):\n self._update_focus_widget()",
"def focus_change(self, func):\r\n return self._subscribe(\"focus_change\", func)",
"def set_focused(self):\n self.has_keyboard_focus = True",
"def set_focus(self):\n self.logger.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
closes the GUI window | def close_window(self):
Window.close() | [
"def close_window(_):\n root.destroy()",
"def close_window(window):\r\n window.destroy()",
"def close(self):\n\n Dialog.close(self)\n gui.no_modal_dialog=True",
"def doQuit(self):\n\n self.mainWin2.destroy()",
"def shutdown_gui(self):\n Gtk.main_quit()",
"def closeWin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
draw graph of test result when a device button is clicked | def draw_graph(self, dev, txt):
"""x-axis reps bias_volt and y-axis reps cont_curr."""
if txt != '':
self.firstbox.device.text = "Summary of: " + dev
f = open(tst.get_path(), 'r')
s = f.read()
bias_v = []
cont_i = []
if l... | [
"def drawTestOutput(p_test, Y_test, weight_test, xmin, xmax, num_bins, node, class_tag): #class_tag = ['tth', 'ttb', 'ttc']\n plt.figure()\n\n\n d_bins = (xmax-xmin) / float(num_bins)\n\n nNode=flavorTransfor(node)\n x_l=0.\n x_h=1.\n # the histogram of the data\n #test_sig\n evt_tot, cat = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
take tester's choice of device and put the test result into the | def test_device(self, txt):
"""appropriate button."""
try:
# Manual testing mode
if not tst.get_auto():
if ((txt == '') | (len(txt) != 2)):
popup.give_warning("please insert correct information.")
elif (not((ord(txt[0]) in ra... | [
"def create_testbed_device_instance(self, dev_name_info, hint):\n testbed_dev = None\n if hint == \"AP\":\n testbed_dev = TestBedAP(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"AP\"\n if hint == \"STA\":\n testbed_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets up logging for a test of a chip. | def setup_logging():
"""it is called by start_test() and enables creation of separate logs for consecutive testing."""
if tst.get_log() != "":
tst.get_log().removeHandler(tst.get_hdlr1())
tst.get_data().removeHandler(tst.get_hdlr2())
log_fn = "{}{}_{}_{}_{}_log.txt".format(logs_folde... | [
"def startTest(self, event):\r\n self._setupLoghandler()",
"def setup_logging():\n client = logging.Client()\n client.get_default_handler()\n client.setup_logging()",
"def setup_logging(self):\n logfile = self.configuration['options'].get('logfile', None)\n if logfile and isinstanc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display the bullets to the screen. | def display_bullets(self):
pygame.draw.rect(self.screen, self.settings.bullet_color, self.bullet) | [
"def _draw_bullets(self, window):\n for bullet in self._bullets:\n if not self._is_visible(bullet['rect']):\n bullet['visible'] = False\n if bullet['visible']:\n pygame.draw.rect(window, bullet['color'], bullet['rect'])\n self._bullets = [\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for installhook subcommand | def test_install_hook(self, _, install_hook):
result = self.cli.invoke(cli.cli, ["install-hook"])
expected_path = os.path.join(u"/hür", u"dur", hooks.COMMIT_MSG_HOOK_DST_PATH)
expected = u"Successfully installed gitlint commit-msg hook in {0}\n".format(expected_path)
self.assertEqual(res... | [
"def test_install_hook_negative(self, install_hook):\n result = self.cli.invoke(cli.cli, [\"install-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_config.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for installhook subcommand with a specific target option specified | def test_install_hook_target(self, _, install_hook):
# Specified target
result = self.cli.invoke(cli.cli, ["--target", self.SAMPLES_DIR, "install-hook"])
expected_path = os.path.join(u"/hür", u"dur", hooks.COMMIT_MSG_HOOK_DST_PATH)
expected = "Successfully installed gitlint commit-msg ho... | [
"def test_install_hook_negative(self, install_hook):\n result = self.cli.invoke(cli.cli, [\"install-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_config.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Negative test for installhook subcommand | def test_install_hook_negative(self, install_hook):
result = self.cli.invoke(cli.cli, ["install-hook"])
self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)
self.assertEqual(result.output, u"tëst\n")
expected_config = config.LintConfig()
expected_config.target = os.pat... | [
"def test_uninstall_hook_negative(self, uninstall_hook):\n result = self.cli.invoke(cli.cli, [\"uninstall-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Negative test for uninstallhook subcommand | def test_uninstall_hook_negative(self, uninstall_hook):
result = self.cli.invoke(cli.cli, ["uninstall-hook"])
self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)
self.assertEqual(result.output, u"tëst\n")
expected_config = config.LintConfig()
expected_config.target = ... | [
"def on_uninstall(self):\n pass",
"def uninstall(runner, hook_type='pre-commit'):\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Examine the current room based on the light level and list contents. | def look_around(self):
if self.current_room.light is True:
print(self.current_room.description + '\n')
if len(self.current_room.items) == 1:
print(f'You can see a {self.current_room.items[0]}.\n')
elif len(self.current_room.items) > 1:
print('Y... | [
"def explore_room(room):\n time_check()\n items = [i[\"name\"] for i in object_relations[room[\"name\"]]]\n print(\"You explore the room. This is \" + room[\"name\"] + \". You find \" + \", \".join(items))",
"def roomInfo():\n # global LOC, ROOMS_VISITED, DEAD_GUARD_HAS_UNIFORM\n if LOC not in ROOM... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a sha signature using the current time, database secret and the record object or the res_model and res_id parameters Return the sha signature and the time of generation in a tuple | def object_shasign(record=False, res_model='', res_id=None, **kw):
secret = request.env['ir.config_parameter'].sudo().get_param('database.secret')
shasign = False
timestamp = int(time())
if record:
shasign = sha1('%s%s%s%s' % (record._model, record.id, secret, timestamp)).hexdigest()
elif re... | [
"def get_sign(self) -> Tuple[str, int]:\n md5 = hashlib.md5()\n timestamp = int(datetime.datetime.now().timestamp())\n md5.update(\n (self.auth[\"user_key\"] + str(timestamp) + self.auth[\"user_secret\"]).encode(\n \"utf-8\"\n )\n )\n sign = md... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build an Elasticsearch document from the person instance. | def get_es_document_for_person(cls, person, index=None, action="index"):
index = index or cls.index_name
# Get published titles
titles = {
t.language: t.title
for t in Title.objects.filter(page=person.extended_object, published=True)
}
# Prepare portrait... | [
"def from_elasticsearch(cls, document):\n return cls(**document['_source'])",
"def to_document(self):\n try:\n return search.Document(\n doc_id=str(self.key.urlsafe()),\n fields=self._get_document_fields())\n\n except (TypeError, ValueError) as e:\n raise DocumentCreationE... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Format an person stored in ES into a consistent and easytoconsume record for API consumers | def format_es_object_for_api(es_person, best_language):
return {
"id": es_person["_id"],
"portrait": get_best_field_language(
es_person["_source"]["portrait"], best_language
),
"title": get_best_field_language(
es_person["_source"][... | [
"def elasticsearch_format(self, entry):\n date_obj = self.parse_date(entry[\"reg_date\"])\n entry[\"reg_date\"] = datetime.strftime(date_obj, \"%Y-%m-%dT%H:%M:%S.000Z\")\n # all bulk data need meta data describing the data\n meta_dict = {\n \"index\": {\n \"_ind... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Format a document stored in ES into an easytoconsume record for autocomplete consumers. This method differs from the regular one as objects retrieved from query VS complete queries can be formatted differently; and consumers of autocomplete do not need full objects. | def format_es_document_for_autocomplete(es_document, language=None):
return {
"id": es_document["_id"],
"kind": "persons",
"title": get_best_field_language(es_document["_source"]["title"], language),
} | [
"def elasticsearch_format(self, entry):\n date_obj = self.parse_date(entry[\"reg_date\"])\n entry[\"reg_date\"] = datetime.strftime(date_obj, \"%Y-%m-%dT%H:%M:%S.000Z\")\n # all bulk data need meta data describing the data\n meta_dict = {\n \"index\": {\n \"_ind... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an initialised ci.CompassInterface object. Note `Depends` adds the oAuth2 integration with OpenAPI. | async def ci_user(request: requests.Request, token: str = Depends(oauth2_scheme)) -> ci.CompassInterface:
return await get_current_user(request, token) | [
"def overlayCompassRose(self, compassRose=None):\r\n SlTrace.lg(\"overlayCompassRose\", \"compass_rose\")\r\n canvas = self.get_canvas()\r\n if canvas is None:\r\n return\r\n sc = self.sc\r\n gmi = self.get_gmi()\r\n if compassRose is not None:\r\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads a text file | def load_text_file(self):
with open(self.file_name, "r") as filino:
data = filino.readlines()
return data | [
"def _load_text_from_file(self, file_path):\n try:\n return open(file_path, 'r').read()\n except IOError:\n print \"Warning: File name not found.\"\n print \"No text loaded.\"\n return str()",
"def load_file(self, file_path):\n ...",
"def open_fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the tokens .by_resource_server, Ensure that only one token was gotten, and return that token. If the token_data includes a "refresh_token" field, update self.refresh_token to that value. | def _extract_token_data(self, res: OAuthTokenResponse) -> dict[str, t.Any]:
token_data_list = list(res.by_resource_server.values())
if len(token_data_list) != 1:
raise ValueError(
"Attempting refresh for refresh token authorizer "
"didn't return exactly one to... | [
"def refresh_tokens(self) -> Dict[str, Union[str, int]]:\n LOGGER.info(\"Refreshing tokens ...\")\n token = self._oauth.refresh_token(f\"{self.host}{ENDPOINT_TOKEN}\")\n\n if self.token_updater is not None:\n self.token_updater(token)\n\n return token",
"def refresh_token(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare a line for recognition; this inverts it, transposes it, and pads it. | def prepare_line(line,pad=16):
line = line * 1.0/np.amax(line)
line = np.amax(line)-line
line = line.T
if pad>0:
w = line.shape[1]
line = np.vstack([np.zeros((pad,w)),line,np.zeros((pad,w))])
return line | [
"def _fill_line(line):\n # Length must be 164: TID, FGCID, IFX, IFY, 4 * 10 * (PFiPX, PFjPY, occupancy, speed)\n while len(line) < 164:\n line.append('')\n line.append('')\n line.append('')\n line.append('')\n return line",
"def _prepare_line(self, line):\r\n return lin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute a vector consisting of the Euclidean norm of the rows of the 2D array. | def rownorm(a):
return np.sum(np.array(a)**2,axis=1)**.5 | [
"def norm(self) -> float:\n return np.sqrt(self.inner_product(self).real)",
"def euclidean_norm(self) -> float:\n return self._euclidean_norm",
"def l2_norm(x):\n return np.sqrt(np.dot(x.T, x))",
"def norm(self) -> ScalarFunction:\n a = sympy.Integer(0)\n for i in self._vec:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sum the outer products of the `us` and `vs`. Values are clipped into the range `[lo,hi]`. This is mainly used for computing weight updates in logistic regression layers. | def sumouter(us,vs,lo=-1.0,hi=1.0,out=None):
result = out or np.zeros((len(us[0]),len(vs[0])))
for u,v in zip(us,vs):
result += np.outer(np.clip(u,lo,hi),v)
return result | [
"def outerprod(u,v):\n\tW = torch.einsum('...i,...j->...ij',u,v)\n\treturn W",
"def __mul__(self, *args):\n return _vnl_vectorPython.vnl_vectorUS___mul__(self, *args)",
"def uAvProductErrorProp(u, v, S):\n u = np.matrix(u).reshape(1,3)\n v = np.matrix(v).reshape(1,3)\n rows = S.shape[0]\n col... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all weights as a single vector. This is mainly a convenience function for plotting. | def allweights(self):
aw = list(self.weights())
weights,derivs,names = list(zip(*aw))
weights = [w.ravel() for w in weights]
derivs = [d.ravel() for d in derivs]
return np.concatenate(weights),np.concatenate(derivs) | [
"def get_weights(self, ):\n return [w for l in self.weights for w in l.flat]",
"def weights ( self ) :\n N = len ( self ) \n return array ( 'd' , ( self.weight ( i ) for i in range ( N ) ) )",
"def get_weights(self):\r\n return self.weights ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the weights using the deltas computed in the last forward/backward pass. Subclasses need not implement this, they should implement the `weights` method. | def update(self):
if not hasattr(self,"verbose"):
self.verbose = 0
if not hasattr(self,"deltas") or self.deltas is None:
self.deltas = [np.zeros(dw.shape) for w,dw,n in self.weights()]
for ds,(w,dw,n) in zip(self.deltas,self.weights()):
ds.ravel()[:] = self.mo... | [
"def update_weights(self):\n for layer in xrange(len(self.weights)):\n self.update_weights_layer(layer)",
"def _update_weights(self, _batch_weight_gradients):\n for _weight_gradient in _batch_weight_gradients:\n _weight_gradient = list(reversed(_weight_gradient))\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the internal state array for the last forward propagation. This is mostly used for visualizations. | def states(self):
return np.array(self.state[:self.last_n]) | [
"def get_state(self) -> np.ndarray:\n return np.copy(self.state)",
"def get_state(self) -> numpy.ndarray:\n env_data = [\n bool(self.gym_env.lander.awake),\n bool(self.gym_env.game_over),\n copy.copy(self.gym_env.prev_shaping),\n copy.copy(self.gym_env.pre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Allocate space for the internal state variables. `n` is the maximum sequence length that can be processed. | def allocate(self,n):
ni,ns,na = self.dims
vars = "cix ci gix gi gox go gfx gf"
vars += " state output gierr gferr goerr cierr stateerr outerr"
for v in vars.split():
setattr(self,v,np.nan*np.ones((n,ns)))
self.source = np.nan*np.ones((n,na))
self.sourceerr = ... | [
"def alloc(n):\n if Coefficients._nvx:\n nvx = Coefficients._nvx\n else:\n nvx = n\n Coefficients._aP = np.zeros(nvx)\n Coefficients._aE = np.zeros(nvx)\n Coefficients._aW = np.zeros(nvx)\n Coefficients._Su = np.zeros(nvx)\n Coefficients._Sp = n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform forward propagation of activations and update the internal state for a subsequent call to `backward`. Since this performs sequence classification, `xs` is a 2D array, with rows representing input vectors at each time step. Returns a 2D array whose rows represent output vectors for each input vector. | def forward(self,xs):
ni,ns,na = self.dims
assert len(xs[0])==ni
n = len(xs)
self.last_n = n
N = len(self.gi)
if n>N: raise RecognitionError("input too large for LSTM model")
self.reset(n)
forward_py(n,N,ni,ns,na,xs,
self.source,
... | [
"def forward_states(X, wx, wRec):\n # Initialise the matrix that holds all states for all input sequences.\n # The initial state s0 is set to 0.\n S = np.zeros((X.shape[0], X.shape[1]+1))\n # Use the recurrence relation defined by update_state to update the \n # states trough time.\n for k in ran... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An MLP implementation by stacking two `Logreg` networks on top of each other. | def MLP1(Ni,Ns,No):
lr1 = Logreg(Ni,Ns)
lr2 = Logreg(Ns,No)
stacked = Stacked([lr1,lr2])
return stacked | [
"def compute_log_reg(self):\n \n self.X = self.data.iloc[:,:-1].values\n self.X = sm.add_constant(self.X)\n self.y = self.data.iloc[:,-1]\n self.model = sm.Logit(self.y, self.X).fit(disp=False)",
"def stack_models_and_evaluate_accuracy(train_probas, val_probas, test_probas, y_tr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An LSTM layer with a `Logreg` layer for the output. | def LSTM1(Ni,Ns,No):
lstm = LSTM(Ni,Ns)
if No==1:
logreg = Logreg(Ns,No)
else:
logreg = Softmax(Ns,No)
stacked = Stacked([lstm,logreg])
return stacked | [
"def lstm_layer(return_sequences=True):\n return LSTM(240, dropout= 0.1, recurrent_dropout= 0.1,\n return_sequences=return_sequences)",
"def compute_log_reg(self):\n \n self.X = self.data.iloc[:,:-1].values\n self.X = sm.add_constant(self.X)\n self.y = self.data.iloc[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of target classes `cs` and a total maximum number of classes, compute an array that has a `1` in each column and time step corresponding to the target class. | def make_target(cs,nc):
result = np.zeros((2*len(cs)+1,nc))
try:
for i,j in enumerate(cs):
result[2*i,0] = 1.0
result[2*i+1,j] = 1.0
result[-1,0] = 1.0
except(IndexError):
LOG.critical('Cannot index target class. Did you load a model that was trained on less c... | [
"def fast_cm(tru, pred, num_classes):\r\n bin = tru * num_classes + pred\r\n h = np.bincount(bin, minlength=num_classes*num_classes)\r\n return h.reshape((num_classes, num_classes))",
"def competitive_learning(x, classes, c = 1):\n a = -5\n b = 5\n w = (b - a)*np.random.random_sample((x.shape[1]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform alignment between the `outputs` of a neural network classifier and some targets. The targets themselves are a time sequence of vectors, usually a unary representation of each target class (but possibly sequences of arbitrary posterior probability distributions represented as vectors). | def ctc_align_targets(outputs,targets,threshold=100.0,verbose=0,debug=0,lo=1e-5):
outputs = np.maximum(lo,outputs)
outputs = outputs * 1.0/np.sum(outputs,axis=1)[:,np.newaxis]
# first, we compute the match between the outputs and the targets
# and put the result in the log domain
match = np.dot(ou... | [
"def process_outputs(self, outputs, image_size):\n boxes = []\n box_class = []\n box_confidences = []\n i = 0\n for output in outputs:\n boxes.append(output[:, :, :, 0:4])\n box_class.append(self.sigmoid(output[:, :, :, 5:]))\n box_confidences.appe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a codec containing ASCII characters plus the default character set from ocrolib. | def ocropus_codec():
import ocrolib
base = [c for c in ascii_labels]
base_set = set(base)
extra = [c for c in ocrolib.chars.default if c not in base_set]
return Codec().init(base+extra) | [
"def test_default(self):\n self.assertEqual(Codec.default(), Latin1Codec())",
"def get_data_encoding():",
"def getdefaultencoding():\n\tpass",
"def setdefaultencoding(name):\n\tpass",
"def register_codec():\n def inner_register(encoding):\n if encoding != 'cly':\n return None\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get internal states of an LSTM network for making nice state plots. This only works on a few types of LSTM. | def getstates_for_display(net):
if isinstance(net,LSTM):
return net.state[:net.last_n]
if isinstance(net,Stacked) and isinstance(net.nets[0],LSTM):
return net.nets[0].state[:net.nets[0].last_n]
return None | [
"def getStates():",
"def __getstate__(self):\n W_list = []\n bhid_list = []\n bvis_list = []\n for layer in self.dA_layers:\n W, bhid, bvis = layer.get_params()\n W_list.append(W.get_value(borrow=True))\n bhid_list.append(bhid.get_value(borrow=True))\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get or set the SEM status | def sem_status(self, voltage=-1, turn_off=False, turn_on=False):
if voltage > -1:
self.comm('SEM ' + str(voltage))
ret_string = self.status('RDE', 4)
else: #NOT IMPLEMENTED
ret_string = self.status('RDE', 4)
sem_voltage = int(ret_string)
if turn_off ... | [
"def advapi32_QueryServiceLockStatus(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hSCManager\", \"lpLockStatus\", \"cbBufSize\", \"pcbBytesNeeded\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def read_sem_voltage(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get or set the emission status. | def emission_status(self, current=-1, turn_off=False, turn_on=False):
emission_current = -1
if turn_off ^ turn_on:
if turn_off:
self.comm('EMI 0')
if turn_on:
self.comm('EMI 1')
ret_string = self.status('ROP', 3)
filament_on = ret_... | [
"def get_status (self):\n return self.__status",
"def status(self) -> Optional['outputs.EntityStatus']:\n return pulumi.get(self, \"status\")",
"def set_power_outage_event_status(self, status, timeout=RESPONSE_DELAY):\n\n command.create_set_command(\n command.PROTOCOL_COMMAND_SET_P... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the voltages on the lens system | def read_voltages(self):
print 'Not possible on this QMG model' | [
"def read_voltage(self):\n return self.read_raw() * self._scale_factor",
"def get_voltage(self):\n pass",
"def Read_Voltage(self):\n self.voltage = -999.0\n self.current = -999.0\n try:\n if self.ser.isOpen():\n self.ser.flushInput()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the selected SEM voltage | def read_sem_voltage(self):
sem_voltage = self.status('RDE', 4)
return sem_voltage | [
"def read_voltages(self):\n print 'Not possible on this QMG model'",
"def Read_Voltage(self):\n self.voltage = -999.0\n self.current = -999.0\n try:\n if self.ser.isOpen():\n self.ser.flushInput()\n time.sleep(0.1)\n self.ser.writ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the preamp range | def read_preamp_range(self):
preamp_index = self.status('RDE', 1)
preamp_range = self.ranges(index=preamp_index)
return preamp_range | [
"def test_get_range(self):\n loader = Loader('./tests/example.npz')\n loader.load_file()\n data_range = loader.get_range()\n self.assertEqual(np.float16(2.156), data_range[0])\n self.assertEqual(np.float16(21.94), data_range[1])",
"def read_range(read):\n range_pb = range_pb2.R... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if measurement is running | def measurement_running(self):
running = self.comm('STW')[6] == '0'
return running | [
"def running(self):\n return self.status == \"STARTED\"",
"def is_running(self):\n return self.motors.are_running()",
"def isRunning (self):\n\t\tjobid = self.job.id ()\n\t\tif not jobid:\n\t\t\treturn False\n\t\treturn Popen (['qstat', '-j', jobid], stdout=open(devnull, 'w'), stderr=open(devnull,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read exactly `remaining` bytes from the socket. Blocks until the required bytes are available and return the data read as raw bytes. Call to this function blocks until required bytes are available in the socket. Arguments | def read_k_bytes(sock, remaining=0):
ret = b"" # Return byte buffer
while remaining > 0:
d = sock.recv(remaining)
ret += d
remaining -= len(d)
return ret | [
"def read_socket(self):\n data = b''\n part = self.s.recv(4096)\n data += part\n while len(part) == 4096: # QUESTION: what if it's *exactly* 4096?\n part = self.s.recv(4096)\n data += part\n return data",
"def recv_bytes(self, amount=1024):\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send a frame to remote socket. We first send the size of frame in bytes followed by the actual frame. frame is serialized using cPickle module. Arguments | def send_frame(sock, frm):
if frm is None or (sock is None or type(sock) != socket.socket):
return # Nothing to send
frm_raw_bytes = pickle.dumps(frm)
dsize = len(frm_raw_bytes)
sock.sendall(dsize.to_bytes(4, byteorder="big"))
sock.sendall(frm_raw_bytes)
return True | [
"def send_frame(self, frame):\n self.transport.write(frame.pack())",
"def send(self, frame):\n self._logger.debug('frame=\"%s\"', frame)\n content = \"\\n\".join([self._auth, str(frame)])\n return self.interface.sendto(content, self._addr)",
"def sendFrame(self, img):\n try:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receive a frame from the socket. Reads the size of frame first followed by the actual data. frame is then deserialized and returned as an instance of class frame. Arguments | def recv_frame(sock, timeout=None):
if sock is None or type(sock) != socket.socket:
raise TypeError("Socket expected!")
# Read the size from the channel first
if timeout is not None:
# Do not wait for more that `timeout` seconds
sock.settimeout(timeout)
try:
frm_len = in... | [
"def receive_next_frame_view(self):\n frame_length = int.from_bytes(self.socket.recv(4), self.endianness) # first we read the length of the frame\n frame_bytes = self.receive_bytes(frame_length)\n\n if self.gzip:\n try:\n return gzip.decompress(frame_bytes), frame_len... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method gets all the details of a specific artist using their login token. | def get_artist(self, request):
user = request.user
result = ArtistProfile.call(user=user)
if result.failed:
return Response(
errors=dict(errors=result.error.value),
status=status.HTTP_400_BAD_REQUEST
)
return Response(data=result.v... | [
"def get_artist(self):\n self.artist = self.spotify_client.get_artist(self.artist_name)",
"def get_artists(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('artists', search, start,\r\n max_items)",
"def get_artis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method gets all the details of a specific artist using their ID. | def get_artist_via_id(self, request, artist_id):
result = ArtistDetail.call(artist_id=artist_id)
if result.failed:
return Response(errors=dict(errors=result.error.value), status=status.HTTP_400_BAD_REQUEST)
return Response(data=result.value, status=status.HTTP_200_OK) | [
"def artist(self, artist_id):\n\n trid = self._get_id(\"artist\", artist_id)\n return self._get(\"artists/\" + trid)",
"def get_artist(self, artist_id):\n response = self.__get_data(self.url.artists_url().format(id=str(artist_id)))\n return Artist(artist_id=artist_id, name=response['na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method gets the data analytics of a specific artist using their ID. | def get_artist_analytics_via_id(self, request, artist_id):
info = ArtistSongPerMonth.call(artist_id=artist_id)
if info.failed:
return Response(errors=dict(errors=info.error.value), status=status.HTTP_400_BAD_REQUEST)
return Response(data=info.value, status=status.HTTP_200_OK) | [
"def get_artist(id_artist: int) -> dict:\n sql_request = sql_request_artist(id_artist)\n sql_data = get_data_from_db(sql_request)\n artist = create_artist(sql_data)\n return artist",
"def artist(self, artist_id):\n\n trid = self._get_id(\"artist\", artist_id)\n return self._get(\"artists... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Base64 encodes the file 'f' | def b64(f):
with open(f, 'r') as infile:
return base64.b64encode(infile.read()) | [
"def _encode_file_base64_(self, file_path):\n encoded_file = base64.b64encode(open(file_path, 'rb').read())\n return self._base64_to_str(encoded_file)\n # return str(encoded_file)[2:-1]",
"def fio_to_b64s(fio: fileIO) -> str:\n fio.seek(0)\n b64s = base64.b64encode(fio.read()).decode('u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CSRF route. Set the CSRF cookie and return a `JSONResponse with the token`. We need this REST endpoint to protect against CSRF because all GraphQL queries use POST method, so they are not safe to transmit the token. | async def csrf(request): # pylint: disable=unused-argument
token = get_new_token()
response = JSONResponse({"csrftoken": token})
response.set_cookie(
settings.CSRF_COOKIE_NAME,
token,
httponly=settings.CSRF_COOKIE_HTTPONLY,
secure=settings.CSRF_COOKIE_SECURE,
)
retur... | [
"def csrf_token():\n token = '123' # must use isdangerous module generate a csrf token\n return {'csrf_token': token}",
"async def add_csrf_token_cookie(request, response):\n token = await generate_token()\n\n # Set secure httponly csrf token\n response.cookies['t'] = token\n response.cookies['t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract the version from the package. | def extract_version():
# Regular expression for the version
_version_re = re.compile(r"__version__\s+=\s+(.*)")
with open("pdftools/__init__.py", "r") as f:
content = f.read()
version_match = _version_re.search(content)
version = str(ast.literal_eval(version_match.group(1)))
return vers... | [
"def get_version_from_package() -> str:\n\n path = os.path.join(os.path.dirname(__file__), \"pdchaoskit/__init__.py\")\n path = os.path.normpath(os.path.abspath(path))\n with open(path) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n token, version = line.sp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to create an adaptive bitrate streaming template. Up to 100 templates can be created. | def CreateAdaptiveDynamicStreamingTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateAdaptiveDynamicStreamingTemplate", params, headers=headers)
response = json.loads(body)
model = models.Creat... | [
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def _create_template(self):\n Template.objects.create(\n resume=\"a test\",\n shortcut='atest',\n subject=\"a subject\",\n body=\"A body {{ testme }}\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to create a custom animated image generating template. Up to 16 templates can be created. | def CreateAnimatedGraphicsTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateAnimatedGraphicsTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateAnimatedGraphic... | [
"def generate_image(self) -> None:",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_defi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to create a custom image processing template. A template can include at most 10 operations, for example, cropscalecropblurscalecropscalecropblurscale. You can have up to 16 image processing templates. | def CreateImageProcessingTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateImageProcessingTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateImageProcessingTe... | [
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to create a custom image sprite generating template. Up to 16 templates can be created. | def CreateImageSpriteTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateImageSpriteTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateImageSpriteTemplateRespon... | [
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_definition, num_instances, num_replace)\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to create a custom time point screencapturing template. Up to 16 templates can be created. | def CreateSnapshotByTimeOffsetTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateSnapshotByTimeOffsetTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateSnapsho... | [
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def _create_template(self):\n Template.objects.create(\n resume=\"a test\",\n shortcut='atest',\n subject=\"a subject\",\n body=\"A body {{ testme }}\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to create a VOD subapplication. | def CreateSubAppId(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateSubAppId", params, headers=headers)
response = json.loads(body)
model = models.CreateSubAppIdResponse()
model._deseriali... | [
"def create_app(instanceAddress, appName, description, permission=[\n 'read:account',\n 'write:account',\n 'read:blocks',\n 'write:blocks',\n 'read:drive',\n 'write:drive',\n 'read:favorites',\n 'write:favorites',\n 'read:following',\n 'write:following',\n 'read:messaging',\n 'wr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to create a custom transcoding template. Up to 100 templates can be created. | def CreateTranscodeTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateTranscodeTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateTranscodeTemplateResponse()
... | [
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def create_service_template(self, payload):\n return self._request('post', path='/templates', data=json.dumps(payload), value_only=True)",
"def _create_template(self):\n Template.objects.create(\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to create a custom watermarking template. Up to 1,000 templates can be created. | def CreateWatermarkTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateWatermarkTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateWatermarkTemplateResponse()
... | [
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete a custom video content recognition template. | def DeleteAIRecognitionTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteAIRecognitionTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteAIRecognitionTemplate... | [
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def DeleteTagTemplateField(self, request, context):\n context.set_code(grpc.S... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete an adaptive bitrate streaming template. | def DeleteAdaptiveDynamicStreamingTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteAdaptiveDynamicStreamingTemplate", params, headers=headers)
response = json.loads(body)
model = models.Delet... | [
"def destroy_cluster_template(self, cluster_template_id):",
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POS... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete a custom animated image generating template. | def DeleteAnimatedGraphicsTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteAnimatedGraphicsTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteAnimatedGraphic... | [
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def DeleteImageProcessingTemplate(self, request):\n try:\n params ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete an image processing template. | def DeleteImageProcessingTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteImageProcessingTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteImageProcessingTe... | [
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete an image sprite generating template. | def DeleteImageSpriteTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteImageSpriteTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteImageSpriteTemplateRespon... | [
"def delete_sprite(self, delete):\n if delete:\n self.__inactive_sprite = True\n else:\n self.__inactive_sprite = False",
"def DeleteImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete a custom sampled screencapturing template. | def DeleteSampleSnapshotTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteSampleSnapshotTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteSampleSnapshotTempl... | [
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete a custom time point screencapturing template. | def DeleteSnapshotByTimeOffsetTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteSnapshotByTimeOffsetTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteSnapsho... | [
"def destroy_cluster_template(self, cluster_template_id):",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_tem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete a custom transcoding template. | def DeleteTranscodeTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteTranscodeTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteTranscodeTemplateResponse()
... | [
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete a custom watermarking template. | def DeleteWatermarkTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteWatermarkTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteWatermarkTemplateResponse()
... | [
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to delete keyword samples in batches. | def DeleteWordSamples(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteWordSamples", params, headers=headers)
response = json.loads(body)
model = models.DeleteWordSamplesResponse()
model._... | [
"def delete_documents(self, content_source_key, ids):\n endpoint = \"sources/{}/documents/bulk_destroy\".format(content_source_key)\n return self.session.request(\"post\", endpoint, json=ids)",
"def delete_message_batch(self, **kwargs):\n for entry in kwargs['Entries']:\n self.queu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to query the download links of CDN access logs of a VOD domain name. 1. Only download links of CDN logs for the last 30 days can be queried. 2. By default, CDN generates a log file every hour. If there is no CDN access for a certain hour, no log file will be generated for the hour. 3. A CDN log downloa... | def DescribeCdnLogs(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeCdnLogs", params, headers=headers)
response = json.loads(body)
model = models.DescribeCdnLogsResponse()
model._deseri... | [
"def downloadList():\n # quicky function to grab the filenames from the download log\n config = getConfig()\n filenames = []\n guids = []\n logdict = {}\n if os.path.exists( os.path.sep.join( (config.options[\"homedir\"], \"download.log\") )):\n log = open( os.path.sep.join( (config.options... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This API is used to get file attributes asynchronously. Currently, this API can only get the MD5 hash of a file. If the file queried is in HLS or DASH format, the attributes of the index file will be returned. | def DescribeFileAttributes(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeFileAttributes", params, headers=headers)
response = json.loads(body)
model = models.DescribeFileAttributesResponse()
... | [
"def file_attributes(self):\n ret = self._get_attr(\"fileAttributes\")\n return ret",
"def get_file_info(self, file_id):\n return self.__request(\"GET\", \"files/%s\" % (file_id, ))",
"def get_metadata(self, filename):\n f, metadata = self.api_client.get_file_and_metadata(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |