query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Extracts indices from the quad tensor | def quad2ind(T):
return poly2ind(T) | [
"def _get_update_indices(self, B: int, S: int, Q: int, q: int) -> TensorType:\n\n idxB = tf.tile(tf.range(B, dtype=tf.int32)[:, None, None], (1, S, 1))\n idxS = tf.tile(tf.range(S, dtype=tf.int32)[None, :, None], (B, 1, 1))\n idxQ = tf.tile(tf.convert_to_tensor(q)[None, None, None], (B, S, 1))\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
With correct values, are valid distribution centers returned? | def test_get_distribution_centers_success(self):
# Get distribution centers
distribution_centers = distribution_center_service.get_distribution_centers(self.loopback_token)
# TODO: Update to use assertIsInstance(a,b)
# Check all expected object values are present
distribution_c... | [
"def find_center(self):\n st = np.argsort(self.cluster.neighbors.pmem)[::-1]\n\n pdf = self.cluster.neighbors.pmem[st]\n pdf /= np.sum(pdf)\n cdf = np.cumsum(pdf, dtype=np.float64)\n cdfi = (cdf * st.size).astype(np.int32)\n\n rand = (np.random.uniform(size=1) * st.size).as... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the heading from the compass in the range 0 2pi | def get_heading(self) -> float:
x, _, z = self._compass.getValues()
heading = atan2(x, z) % tau
return add_independent_jitter(heading, 0, tau, std_dev_percent=0.4, can_wrap=True) | [
"def get_heading(self):\n [_, _, _, _, _, _, mx, my, _] = self.get_axis()\n heading = 180 * math.atan2(my, mx) / math.pi\n if heading < 0:\n heading += 360\n return float(\"{0:.2f}\".format(heading))",
"def heading_corrector(self, heading):\n\t\theading = heading % (2 * pi)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NECESARIO PARA TODAS LAS VERSIONES Indexa el contenido de un fichero. Para tokenizar la noticia se debe llamar a "self.tokenize" Dependiendo del valor de "self.multifield" y "self.positional" se debe ampliar el indexado. En estos casos, se recomienda crear nuevos metodos para hacer mas sencilla la implementacion | def index_file(self, filename):
# Un fichero esta compuesto por noticias, cada noticia por cinco campos y cada campo por unos tokens
with open(filename) as fh:
jlist = json.load(fh)
self.docs[self.doc_cont] = filename
# Contador de la posición de una noticia en un fi... | [
"def index_file(self, filename):\n\n with open(filename) as fh:\n if self.multifield:\n self.sections = ['title', 'keywords', \"article\", 'summary'] # si es multifield se actualizan las secciones a indexar\n\n self.doc_id += 1 # id del filename (clave)\n self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NECESARIO PARA LA AMPLIACION DE PERMUTERM Crea el indice permuterm (self.ptindex) para los terminos de todos los indices. | def make_permuterm(self):
# Si se activa la función multifield
if self.multifield:
multifield = ['title', 'date', 'keywords', 'article', 'summary']
else:
multifield = ['article']
for field in multifield:
# Se crea la lista de permuterms de un token
... | [
"def make_permuterm(self):\n ####################################################\n ## COMPLETAR PARA FUNCIONALIDAD EXTRA DE STEMMING ##\n ####################################################\n\n if self.multifield:\n # Searching for the different fields\n f = []\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update x1 and x2. | def update_x1_x2(self, x1, x2, fn, ut, delta_t):
dx2 = fn + ut
x2 = x2 + dx2 * delta_t
x1 = x1 + x2 * delta_t
return x1, x2 | [
"def updatePoints(self, x, y):",
"def increase_coordinates(self, x, y):\n if x:\n self.x += x\n if y:\n self.y += y",
"def update_value(self, node1, label, node2):\n values = self.get_values(node1, label)\n if len(values) != 1:\n raise KGTKException('... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Patches `pip install` to provide default certificate with the lowest priority. This ensures that the bundled certificates are used unless the user specifies a custom cert via any of pip's option passing mechanisms (config, envvar, CLI). A monkeypatch is the easiest way to achieve this, without messing too much with the... | def monkeypatch_for_cert(tmpdir):
from pip._internal.commands.install import InstallCommand
# We want to be using the internal certificates.
cert_path = os.path.join(tmpdir, "cacert.pem")
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.certifi", "cacert.pem"))... | [
"def allow_self_signed_certificate():\n\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n ssl._create_default_https_context = _create_unverified_https_context\n except AttributeError:\n # legacy Python that doesn't verify HTTPS certificates by default\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes in X_train, X_validate and X_test dfs with numeric values only Returns scaler, X_train_scaled, X_validate_scaled, X_test_scaled dfs | def Min_Max_Scaler(X_train, X_validate, X_test):
scaler = MinMaxScaler().fit(X_train)
X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)
X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.colum... | [
"def generate_scaled_splits(train, validate, test, scaler=MinMaxScaler()):\n scaler.fit(train)\n\n train_scaled = pd.DataFrame(scaler.transform(train), columns=train.columns)\n validate_scaled = pd.DataFrame(scaler.transform(validate), columns=validate.columns)\n test_scaled = pd.DataFrame(scaler.transf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return an example of a complex reflection group. | def example(self):
from sage.combinat.root_system.reflection_group_real import ReflectionGroup
return ReflectionGroup((1,1,3), (2,1,2)) | [
"def example(self):\n from sage.combinat.root_system.reflection_group_real import ReflectionGroup\n return ReflectionGroup((1,1,3), (3,1,2))",
"def example(self, G = None):\n from sage.groups.perm_gps.permgroup_named import DihedralGroup\n if G is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return the degrees of ``self``. | def degrees(self): | [
"def degrees(self):\n return int(self)",
"def _get_degree(self) -> \"int\" :\n return _core.NurbsCurve2D__get_degree(self)",
"def _get_degree(self) -> \"int\" :\n return _core.NurbsCurve3D__get_degree(self)",
"def toDegree(self):\n self.x = self.x * 180 / pi\n self.y = self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return the number of reflection hyperplanes of ``self``. This is also the number of distinguished reflections. For real groups, this coincides with the number of reflections. This implementation uses that it is given by the sum of the codegrees of ``self`` plus its rank. | def number_of_reflection_hyperplanes(self):
from sage.rings.all import ZZ
return ZZ.sum(codeg+1 for codeg in self.codegrees()) | [
"def coxeter_number(self):\n return (self.number_of_reflection_hyperplanes()\n + self.number_of_reflections()) // self.rank()",
"def number_of_reflections_of_full_support(self):\n n = self.rank()\n h = self.coxeter_number()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return the cardinality of ``self``. It is given by the product of the degrees of ``self``. | def cardinality(self):
from sage.rings.all import ZZ
return ZZ.prod(self.degrees()) | [
"def cardinality(self, m):\n if self.tripleBuilder:\n self.tripleBuilder.card(m, \"cardinality\")\n return self",
"def get_cardinality(self):\r\n return len(self.dict_idx2elem)",
"def degree_on_basis(self, I):\n return I.size()",
"def getCapacityFactor(self): \n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return whether ``self`` is wellgenerated. A finite complex reflection group is well generated if the number of its simple reflections coincides with its rank. | def is_well_generated(self):
return self.number_of_simple_reflections() == self.rank() | [
"def is_well_generated(self):\n return True",
"def is_real(self):\n return self.degrees().count(2) == self.number_of_irreducible_components()",
"def is_injective(self):\n # Some matrix representation is picked at random:\n matrix_rep = self._matrices.values()[0]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return whether ``self`` is real. A complex reflection group is real if it is isomorphic to a reflection group in `GL(V)` over a real vector space `V`. Equivalently its character table has real entries. | def is_real(self):
return self.degrees().count(2) == self.number_of_irreducible_components() | [
"def isreal(self):\n return np.all(np.isreal(self.data))\n # return np.isrealobj(self._data)",
"def iscomplex(self):\n return np.any(np.iscomplex(self.data))\n # return np.iscomplexobj(self._data)",
"def is_real(dtype: DTypeLike) -> bool:\n dtype = _normalize_type(dtype)\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return ``self`` as a matrix. | def _matrix_(self):
return self.to_matrix() | [
"def tomatrix(self):\n from sympy.matrices import Matrix\n\n if self.rank() != 2:\n raise ValueError('Dimensions must be of size of 2')\n\n return Matrix(self.shape[0], self.shape[1], self._array)",
"def getMatrix(self) -> \"SbMatrix &\":\n return _coin.SoGetMatrixAction_get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return the Coxeter number of an irreducible reflection group. This is defined as `\frac{N + N^}{n}` where `N` is the number of reflections, `N^` is the number of reflection hyperplanes, and `n` is the rank of ``self``. | def coxeter_number(self):
return (self.number_of_reflection_hyperplanes()
+ self.number_of_reflections()) // self.rank() | [
"def number_of_reflection_hyperplanes(self):\n from sage.rings.all import ZZ\n return ZZ.sum(codeg+1 for codeg in self.codegrees())",
"def number_of_reflections_of_full_support(self):\n n = self.rank()\n h = self.coxeter_number()\n l =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return an example of a wellgenerated complex reflection group. | def example(self):
from sage.combinat.root_system.reflection_group_real import ReflectionGroup
return ReflectionGroup((1,1,3), (3,1,2)) | [
"def example(self):\n from sage.combinat.root_system.reflection_group_real import ReflectionGroup\n return ReflectionGroup((1,1,3), (2,1,2))",
"def example(self, G = None):\n from sage.groups.perm_gps.permgroup_named import DihedralGroup\n if G is None:\n G = Dih... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return ``True`` as ``self`` is wellgenerated. | def is_well_generated(self):
return True | [
"def is_well_generated(self):\n return self.number_of_simple_reflections() == self.rank()",
"def Explicit(self) -> bool:",
"def __bool__(self):\n return not hasattr(self, 'missing')",
"def hasManual(self) -> bool:\n ...",
"def justEvaluated(self) -> bool:\r\n return False",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return the (unique) conjugacy class in ``self`` containing all Coxeter elements. | def coxeter_elements(self):
return self.coxeter_element().conjugacy_class() | [
"def conjugacy_class(self):\n return self.parent().conjugacy_class(self)",
"def conjugacy_class(self, g):\n from sage.groups.conjugacy_classes import ConjugacyClass\n return ConjugacyClass(self, g)",
"def unique_classes(self):\n return self.__unique_classes",
"def GetCo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return the number of reflections with full support. | def number_of_reflections_of_full_support(self):
n = self.rank()
h = self.coxeter_number()
l = self.cardinality()
codegrees = self.codegrees()[:-1]
return (n * h * prod(codegrees)) // l | [
"def number_of_reflection_hyperplanes(self):\n from sage.rings.all import ZZ\n return ZZ.sum(codeg+1 for codeg in self.codegrees())",
"def coxeter_number(self):\n return (self.number_of_reflection_hyperplanes()\n + self.number_of_reflections()) // self.r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return the ``p``th rational Catalan number associated to ``self``. It is defined by | def rational_catalan_number(self, p, polynomial=False):
from sage.arith.all import gcd
from sage.combinat.q_analogues import q_int
h = self.coxeter_number()
if not gcd(h,p) == 1:
raise ValueError("parameter p = %s i... | [
"def c_residue(a, p):\n if p == 3:\n if a % p == 0:\n return 0\n else:\n return 1\n elif p % 3 == 1:\n b1, b2 = decomposite_p(p)\n return c_symbol(a, 0, b1, b2)\n else:\n return c_symbol(a, 0, p, 0)",
"def getDigit(self, x, p):\n return x //... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return the Catalan number associated to ``self``. It is defined by | def catalan_number(self, positive=False, polynomial=False):
return self.fuss_catalan_number(1, positive=positive,
polynomial=polynomial) | [
"def catalan(num):\r\n if num == 1:\r\n return 1\r\n else:\r\n return int((4*(num-1)+2)/(num+1)*catalan(num-1))",
"def c1(self):\n return self.counter_get('C1')",
"def number(self):\n if hasattr(self, 'number'):\n return self.number\n else:\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete an active login token from the activeTokens list. | def deleteToken(self, token):
taskMgr.remove(token.getDeleteTask())
print 'Deactivated token: %s' % token
print 'Token: %s, IP: %s' % (token.getToken(), token.getIP())
token.cleanup()
self.activeTokens.remove(token)
print 'Tokens: %s' % self.activeTokens | [
"def delete_auth_token(self):\n if not self.token:\n return Exception('No token loaded, unable to delete.')\n response = self.api_request(method='DELETE', path='auth/%s/' %self.token)\n return response",
"def delete_token(self, token):\n raise NotImplementedError",
"def co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Conjugate multiple verbs using multiprocessing. | def conjugate(self, verbs, subject='abbrev'):
if isinstance(verbs, str):
# If only a single verb is passed, call the _conjugate method directly
return self._conjugate(verbs, subject)
else:
with ProcessPoolExecutor() as executor:
results = list(executor... | [
"def conjugate(x):\n\n pass",
"def conjugate(self):\n self.imaginary *= -1",
"def test_conjugation_optimization(simple_drudge):\n\n dr = simple_drudge\n\n a, b, c, d = dr.ds[:4]\n\n p = IndexedBase('p')\n x = IndexedBase('x')\n y = IndexedBase('y')\n z = IndexedBase('z')\n\n targe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
| This is the main method of this class. | It first checks to see if the verb is in Verbiste. | If it is not, and a pretrained scikitlearn pipeline has been supplied, the method then calls the pipeline to predict the conjugation class of the provided verb. | Returns a Verb object or None. | def _conjugate(self, verb, subject='abbrev'):
verb = verb.lower()
prediction_score = 0
if not self.conjug_manager.is_valid_verb(verb):
logger.warning(
_('The supplied word: {0} is not a valid verb in {1}.').format(verb, LANGUAGE_FULL[self.language]))
retur... | [
"def get_text_predict_pipeline(self,choice,inputCol):\n c = Components()\n allStages = [c.getDocumentAssembler(inputCol,\"document\"),c.getTokenizer(\"document\",\"tokens\"), \n c.getNormalizer(\"tokens\",\"normalized\"),c.getStopWordCleaner(\"normalized\",\"cleaned\"), \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the path to a dataset directory of given name, possibly create it if it doesn't exist. Arguments | def get_dataset_dir(dataset_name, datasets_base_dir=None, create=True):
base_dir = find_datasets_base_dir(datasets_base_dir)
full_path = os.path.join(base_dir, dataset_name)
if os.path.exists(full_path):
return full_path
elif create:
os.makedirs(full_path)
return full_path
e... | [
"def _get_dataset_dir(dataset_name, data_dir=None):\n if not data_dir:\n data_dir = os.path.join(os.getcwd(), 'Data')\n data_dir = os.path.join(data_dir, dataset_name)\n return data_dir",
"def get_dataset_dir():\n # current_dir = get_project_dir()\n # return os.path.join(current_dir, 'data')... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function which prepares input data for custom myrange2 and myrange3 functions. It makes sure that at least one but no more than three integers were supplied. Returns tuple of three integers, where the last number is never zero. Can raise TypeError or ValueError exceptions in case of wrong input data. | def _get_arguments(*args):
def no_arguments():
raise TypeError('range expected at least 1 arguments, got 0')
def one_argument():
return 0, args[0], 1
def two_arguments():
return args[0], args[1], 1
def three_arguments():
return args[0], args[1], args[2]
selector ... | [
"def case_three_negative_ints():\n return -1, -2, -6",
"def _valid_range(self, spec):\n valid = None\n for key, dim in spec.items():\n assert key in self.data\n v = self.data[key].valid_range(dim[-3:])\n if v is None:\n raise Dataset.OutOfRangeError... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a function to a batched version (maps over multiple inputs). This takes a function that returns a scalar (such as a loss function) and returns a new function that maps the function over multiple arguments (such as over multiple random seeds) and returns the average of the results. It is useful for generating a... | def batch_mean(fun, in_axes):
mapped_fun = jax.vmap(fun, in_axes=in_axes)
def batch_fun(*args):
return jnp.mean(mapped_fun(*args))
return batch_fun | [
"def sum_over_batch_size(loss_function):\n def wrapper(*args, **kwargs):\n batch_loss = fn.sum(loss_function(*args, **kwargs))\n batch_loss.data /= np.size(args[0].data)\n return batch_loss\n return wrapper",
"def make_average(fn, num_samples=100):\n def avg_fn(*args):\n i = 0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds a function that generates a binary mask. For example, `f = build_mask(5)` returns a function that generates masks of total length 5. Calling this function with an array of integers, e.g. | def build_mask(max_length: int):
def mask_fun(index: jnp.array) -> jnp.array:
"""Builds a binary mask."""
return jnp.where(
jnp.arange(max_length) < index, jnp.ones(max_length),
jnp.zeros(max_length))
return jax.vmap(mask_fun) | [
"def make_mask(size, idx_true=None):\r\n\r\n # TODO: make work for n dimensional? is this something the np.ma module could do better?\r\n\r\n if idx_true is None:\r\n idx_true = list(range(size))\r\n\r\n mask = []\r\n for i in range(size):\r\n if i in idx_true:\r\n mask += [True... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an array of shape (number_of_sequences, sequence_length, element_dimension), and a 1D array specifying which indices of each sequence to select, return a (number_of_sequences, element_dimension)shaped array with the selected elements. | def select(sequences, indices):
assert len(indices) == sequences.shape[0]
# shape indices properly
indices_shaped = indices[:, jnp.newaxis, jnp.newaxis]
# select element
selected_elements = jnp.take_along_axis(sequences, indices_shaped, axis=1)
# remove sequence dimension
selected_elements = jnp.squee... | [
"def search_sequence_numpy(arr):\n seq = np.array([1,1,1])\n # Store sizes of input array and sequence\n Na, Nseq = arr.size, seq.size\n\n # Range of sequence\n r_seq = np.arange(Nseq)\n\n # Create a 2D array of sliding indices across the entire length of input array.\n # Match up with the inpu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the networkfunction, the basic loss function, and a regularization function, return a loss function which maps a tuple of network parameters and a training batch to a loss value. | def make_loss_function(network_apply_fun, basic_loss_fun, regularization_fun):
def total_loss_fun(params, batch):
"""
Maps network parameters and training batch to a loss value.
Args:
batch: a dictionary with keys ['inputs', 'index', 'labels']
'inputs': sequence of inputs with shape (batch... | [
"def calculate_loss(self, activations, labels):\n\n # get the regularisation for each layer in the model\n regularisation = 0.0\n for layer in self.layers:\n regularisation += layer.get_regularisation()\n\n loss, gradients = self.loss_function(activations, labels)\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a network function and number of outputs, returns an accuracy function | def make_acc_fun(network_apply_fun, num_outputs=1):
if num_outputs == 1:
prediction_function = lambda x: (x >= 0.).astype(jnp.int32)
else:
prediction_function = lambda x: x.argmax(axis=-1).astype(jnp.int32)
@jax.jit
def accuracy_fun(params, batch):
all_time_logits = network_apply_fun(params, batch... | [
"def accuracy(outputs, targets) -> float:\n\n preds = outputs.reshape(-1, outputs.shape[2]).argmax(dim=1)\n targets = targets.reshape(-1) \n\n return (torch.sum(preds == targets).float() / len(targets)).item()",
"def accuracy(neural_network, test_data, n_classes):\n n_correct = 0\n #feed each train... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Used to insert BranchHash, alongwith Branch contents and Branch list within this hash | def dbsApiImplInsertBranchInfo(self, branchInfo):
funcInfo = inspect.getframeinfo(inspect.currentframe())
xmlinput = "<?xml version='1.0' standalone='yes'?>"
xmlinput += "<dbs>"
xmlinput += "<branch_info branch_hash='"+branchInfo.get('Hash', '')+"'"
xmlinput += " content='"+base64.binascii.b2a_ba... | [
"def add_branch(self, tree):\n# print 'ADDING BRANCH', tree\n self._insert_branch(None, tree)\n self._cleanup_branches()",
"def insert(self, top, geo_hash, payload):\n current = top\n self.geo_queue.put(payload)\n for digit in geo_hash:\n current = current.make... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Abstract method for writing a particular row for the table | def _write_row(self, row):
return | [
"def write_row(self, row):\n r = [to_text(s) for s in row]\n self.writer.writerow(r)\n self.row_count += 1",
"def output_row(output_db, table_name, row):\n row = clean_output_row(row, table_name)\n insert_row(output_db, table_name, row)",
"def handle_row(self, row):\n pass",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets whether or not we are at the end row of a project group | def _set_end_of_project(self, end_of_project):
self.end_of_project = end_of_project | [
"def endRow(self):\n\t\tnumWidgets = len(self.widgets)\n\t\t# Find number of GUI objects since last divie call\n\t\tsize = numWidgets - self.divieSize\n\t\tself.divies.append(JPLDivies('endRow',size))\n\t\tself.divieSize = numWidgets",
"def _isEndOfRow(self):\r\n\t\tinfo=self.copy()\r\n\t\tinfo.expand(textInfos.U... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the particular index from the headers given the title. | def _get_header_index(self, title):
return self._get_header_list().index(title) | [
"def get_column_index(self, title, row=1):\r\n try:\r\n columns = self.get_column_titles_with_index(row)\r\n return columns[title]\r\n except Exception as traceback_error:\r\n statement = \"Problem finding column titled {}\".format(title)\r\n error_logger.lo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the project number and result object, creates a list of relavent information to print per row. | def _get_row_list(self, project, result):
row_list = [
project,
result.get_sample_name()
]
if self.include_reportable_status:
row_list.append(result.get_reportable_serovar_status())
row_list.extend([
result.get_qc_status(),
re... | [
"def results_strings_list(query_result):\n return [\"\"\"*{title} {section}. Taught by {instructor} on {time} at {location}.* \\n\\tRecommend rating: {recommend}.\\n\\tHours per week: {hours}. \\n\\tInteresting rating: {interesting}.\n \"\"\".format(title=x[0].title(),\n section=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets information about the run of this iridasistrresults script. | def _get_irida_sistr_run_info(self):
info = OrderedDict()
info['appname'] = self.appname
info['version'] = __version__
info['command_line'] = self.command_line
info['irida_url'] = self.irida_url
info['username'] = self.username
info['app_run_date'] = datetime.now(... | [
"def print_activity_run_details(activity_run):\r\n print(\"\\n\\tActivity run details\\n\")\r\n print(\"\\tActivity run status: {}\".format(activity_run.status))\r\n if activity_run.status == 'Succeeded':\r\n print(\"\\tNumber of bytes read: {}\".format(activity_run.output['dataRead']))\r\n p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the particular column number from the headers given the title. | def _get_header_column_number(self, title):
return self._get_header_index(title) + 1 | [
"def get_column_index(self, title, row=1):\r\n try:\r\n columns = self.get_column_titles_with_index(row)\r\n return columns[title]\r\n except Exception as traceback_error:\r\n statement = \"Problem finding column titled {}\".format(title)\r\n error_logger.lo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the particular column letter from the headers given the title. | def _get_header_column_letter(self, title):
return self._to_letter(self._get_header_index(title)) | [
"def get_column_index(self, title, row=1):\r\n try:\r\n columns = self.get_column_titles_with_index(row)\r\n return columns[title]\r\n except Exception as traceback_error:\r\n statement = \"Problem finding column titled {}\".format(title)\r\n error_logger.lo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
First off, this function gets information from resources/databaseconfig.json in order to connect to a database. Then the function save a model into the database. | def save(model: Article):
with open("resources/database-config.json") as f:
db_info = json.load(f)
try:
connection = psycopg2.connect(user=db_info["user"],
password=db_info["password"],
host=db_info["host"],
... | [
"def __call__(self):\n self.create_database()",
"def register_models() -> None:\n db.create_all()\n db.session.commit()",
"def dbSave(self, env):\n\t\traise NotImplementedError, 'Flat File Saving Not Implemented'",
"def save_database_name(database_name):\n database = {\"DATABASE\": databas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Defines a route on the scheems API based on the provided `table`. | def scheems_route(
table: Union[DeclarativeMeta, Table],
*,
identified_by: Optional[str] = "pkey",
methods: Optional[Sequence[Methods]] = ("GET",),
allow_bulk: bool = False,
allowed_filters: Sequence[str] = None,
) -> Route:
if isinstance(table, DeclarativeMeta):
table = table.__tabl... | [
"def associate_route_table(DryRun=None, SubnetId=None, RouteTableId=None):\n pass",
"def route( self, routing_table ):\n routing_index = ((self.routing % len(routing_table) ) - 1)\n return routing_table[ routing_index ]",
"def set_route_table(self, route_table):\n # get configmap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generic callback function for the `/model/{identifier} route. To be used along with `functools.partial` to provide the model and identifier. | async def _get_identifier_callback(
request: Request, identified_by: str, model: Type[BaseModel]
) -> Response:
try:
identifier_value = request.path_params[identified_by]
except KeyError:
raise MissingRequiredParameter(
f"Required parameter {identified_by} was not provided."
... | [
"def _get_identifier(model):\n pass",
"def natural_getsert(model) -> Callable: # takes a django model\n def handler(identifier: Optional[str]): # returns an entity\n if identifier is None:\n return None\n\n # prepare the natural key into a dict that can be passed to __init__\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge CLI params with configuration file params. Note that the configuration params will overwrite the CLI params. | def _merge_params(cli, config):
# update CLI params with configuration; overwrites
params = dict(list(cli.items()) + list(config.items()))
return params | [
"def config(self, config_data, preserve=True):\n if isinstance(config_data, dict):\n if preserve:\n # on env server core doesn't send all required values on cli. inputs that\n # come in via secureParams needs to be updated, but not all of them (e.g. log_path).\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solve pose from image points Return (rotation_vector, translation_vector) as pose. | def solve_pose(self, image_points):
assert image_points.shape[0] == self.model_points_68.shape[0], "3D points and 2D points should be of same number."
(_, rotation_vector, translation_vector) = cv2.solvePnP(
self.model_points, image_points, self.camera_matrix, self.dist_coefs)
... | [
"def compute_pose(view, completed_views, K, img_matches):\r\n points_2d = np.empty((0, 2))\r\n points_3d = np.empty((0, 3))\r\n\r\n for view_n in completed_views:\r\n pts_found = 0\r\n match = img_matches[(view.name, view_n.name)]\r\n if match is not None:\r\n logging.info(f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get marks ready for pose estimation from 68 marks | def get_pose_marks(self, marks):
pose_marks = []
pose_marks.append(marks[30]) # Nose tip
pose_marks.append(marks[8]) # Chin
pose_marks.append(marks[36]) # Left eye left corner
pose_marks.append(marks[45]) # Right eye right corner
pose_marks.append(marks... | [
"def detect_marks_keras(self, image_np):\n predictions = self.sess.predict_on_batch(image_np)\n\n # Convert predictions to landmarks.\n marks = np.array(predictions[0]).flatten()\n marks = np.reshape(marks, (-1, 2))\n\n return marks",
"def citation_marks(self) -> Iterator[Dict[s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that recognizers loaded from `_recognizers.py` files can be overriden by users provided recognizers. | def test_imported_recognizers_override():
called = [False, False]
def numeric_id(input, pos):
called[0] = True
def fqn(input, pos):
called[0] = True
recognizers = {
'base.COMMA': comma_recognizer,
'base.NUMERIC_ID': numeric_id,
'base.FQN': fqn
}
g = G... | [
"def test_imported_recognizers_override_by_importing_grammar_file():\n\n g = Grammar.from_file(os.path.join(this_folder, 'model_override.pg'))\n assert g\n\n t = g.get_terminal('base.NUMERIC_ID')\n assert t is not None\n\n assert t.recognizer.__doc__ == 'Check override'",
"def get_recognizers(self,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that recognizers loaded from `_recognizers.py` files can be overriden in importing grammar `_recognizers.py` file by providing FQN of the imported terminal relative from the importing grammar file. | def test_imported_recognizers_override_by_importing_grammar_file():
g = Grammar.from_file(os.path.join(this_folder, 'model_override.pg'))
assert g
t = g.get_terminal('base.NUMERIC_ID')
assert t is not None
assert t.recognizer.__doc__ == 'Check override' | [
"def test_imported_recognizers_override():\n\n called = [False, False]\n\n def numeric_id(input, pos):\n called[0] = True\n\n def fqn(input, pos):\n called[0] = True\n\n recognizers = {\n 'base.COMMA': comma_recognizer,\n 'base.NUMERIC_ID': numeric_id,\n 'base.FQN': fq... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Places tensor on CUDA device. | def cuda(tensor):
return tensor.to(args.device) | [
"def place(tensor, device=-1):\n\n if device < 0:\n return tensor.cpu()\n else:\n return tensor.cuda(device)",
"def cuda(self: T, device: Optional[int] = None) -> T:\n return self.to(torch.device(f\"cuda:{device}\" if device is not None else \"cuda\"))",
"def _set_var2cuda(self, tenso... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encodes pair inputs for pretrained models using the template [CLS] sentence1 [SEP] sentence2 [SEP]. Used for SNLI, MNLI, QQP, and TwitterPPDB. Returns input_ids, segment_ids, and attention_mask. | def encode_pair_inputs(sentence1, sentence2):
inputs = tokenizer.encode_plus(
sentence1, sentence2, add_special_tokens=True, max_length=args.max_seq_length
)
input_ids = inputs['input_ids']
segment_ids = inputs['token_type_ids']
attention_mask = inputs['attention_mask']
padding_length =... | [
"def prepare_for_model(\n self,\n ids: List[int],\n pair_ids: Optional[List[int]] = None,\n entity_ids: Optional[List[int]] = None,\n pair_entity_ids: Optional[List[int]] = None,\n entity_token_spans: Optional[List[Tuple[int, int]]] = None,\n pair_entity_token_spans:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encodes multiple choice inputs for pretrained models using the template [CLS] context [SEP] ending_i [SEP] where 0 <= i < len(endings). Used for SWAG and HellaSWAG. Returns input_ids, segment_ids, and attention_masks. | def encode_mc_inputs(context, start_ending, endings):
context_tokens = tokenizer.tokenize(context)
start_ending_tokens = tokenizer.tokenize(start_ending)
all_input_ids = []
all_segment_ids = []
all_attention_masks = []
for ending in endings:
ending_tokens = start_ending_tokens + tokeniz... | [
"def encode_pair_inputs(sentence1, sentence2):\n\n inputs = tokenizer.encode_plus(\n sentence1, sentence2, add_special_tokens=True, max_length=args.max_seq_length\n )\n input_ids = inputs['input_ids']\n segment_ids = inputs['token_type_ids']\n attention_mask = inputs['attention_mask']\n pad... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wraps label in tensor. | def encode_label(label):
return cuda(torch.tensor(label)).long() | [
"def axis_label(label):\n\n def result(func):\n func.__axis_label__ = label\n return func\n\n return result",
"def to_real_label(template_label):\n qlabels = [oplabel.qubits[i] for i in template_label.qubits]\n return _Label(template_label.name, qlabels)",
"def augment_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Selects data processor using task name. | def select_processor():
return globals()[f'{args.task}Processor']() | [
"def get_processor(self, name):\n if name is None:\n name = 'processor0'\n return self.processors.get(name, None)",
"def get_task(self, task_name):",
"def get_a_task(self, task_name):\n raise NotImplementedError",
"def get_input_task(self, name=''):\n port = self.get_inp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
retrieve and sign in the user corresponding to provider and validated access token | def _auth_oauth_signin(self, cr, uid, provider, validation, params, context=None):
oauth_uid = validation['user_id']
user_ids = self.search(cr, uid, [("oauth_uid", "=", oauth_uid), ('oauth_provider_id', '=', provider)])
if not user_ids:
raise openerp.exceptions.AccessDenied()
... | [
"def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n provider = serializer.data.get(\"provider\", None)\n strategy = load_strategy(request)\n\n try:\n backend = load_backend(strategy=strategy, na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
[Faint internal] Creates a new prompt (>>>) in the interpreter, and resets command states _g_printed and _g_silent | def interpreter_new_prompt():
global _g_printed
global _g_silent
_g_printed = False
_g_silent = False
int_ran_command() | [
"def prompt(self):\n\t\t_globals._console.write(f'{self.prompt_str} ')",
"def before_prompt():\n sys.stdout.write(BEFORE_PROMPT)\n # Flushing is important as the command timing feature is based on\n # BEFORE_OUTPUT and BEFORE_PROMPT\n sys.stdout.flush()",
"def after_prompt():\n sys.stdout.write(A... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reloads the users ini file | def reload_ini():
__inifile__(ifaint.inipath) | [
"def reload_():\n load_conf(True)",
"def reload_settings():\n refresh_config()",
"def load_user():\n old_dir = os.getcwd()\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n with open(\"../config/user.yaml\", \"r\") as stream:\n SETTINGS[\"user\"] = yaml.safe_load(stream)\n os... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect a function to the key specified by keycode. For interactive binding, try bind( function ) instead. | def bindk( keycode, function, modifiers=0 ):
ifaint._binds[ (keycode,modifiers) ] = function
int_bind_key( keycode, modifiers ) | [
"def bind_key(self, key, callback):\n\n from kivy.core.window import Window\n\n def _on_keyboard(window, keycode, *args):\n if key == keycode:\n return callback()\n\n Window.bind(on_keyboard=_on_keyboard)",
"def keybind(self, name, command):\n self.command('ke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes all keybinds to the specified function. | def unbindf( function ):
d = ifaint._binds
for key in d.keys():
if d[key].__name__ == function:
del d[key] | [
"def remove(self, *args: Keys | str | KeyHandlerCallable) -> None:\n found = False\n\n if callable(args[0]):\n assert len(args) == 1\n function = args[0]\n\n # Remove the given function.\n for b in self.bindings:\n if b.handler == function:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List what keyboard shortcuts are connected to Python functions. With verbose=True, doc strings are included. With numeric=True, key code values are shown instead of characters or key names. See also "bind" for help on binding. | def binds( verbose=False, numeric=False ):
# Dict to keep track of printed doc-strings under verbose,
# to avoid repeating them for multiple binds
printed = {}
for key, modifiers in sorted(ifaint._binds, key=lambda bind : bind[0] ):
func = ifaint._binds[(key, modifiers)]
print " " + key... | [
"def show_shortcuts():\n key_map = {\"mod1\": \"alt\", \"mod4\": \"mod\"}\n shortcuts_path = os.path.join(home_path, \"qtile_shortcuts\")\n shortcuts = open(shortcuts_path, 'w')\n shortcuts.write(\"{0:25}| {1:25}\\n\".format(\"KEYS COMBINATION\", \"COMMAND\"))\n shortcuts.write(\"{0:50}\\n\".format(\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unit tests for the PartCategoryParameterTemplate model | def test_part_category_parameter_templates(self):
electronics = PartCategory(self.api, pk=3)
# Ensure there are some parameter templates associated with this category
templates = electronics.getCategoryParameterTemplates(fetch_parent=False)
if len(templates) == 0:
for name... | [
"def create_category_parms(self, node):\n\n #parm_group\n parm_group = node.parmTemplateGroup()\n\n #fldr\n fldr = parm_group.containingFolder('categories')\n \n #lightcategories\n hou_parm_template = hou.StringParmTemplate(\"lightcategories\", \"Light Selection\", ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test various functions of Part class, mostly starting with get... These are wrappers for other functions, so the testing of details of the function should be done elsewhere. | def test_part_get_functions(self):
# Get list of parts
parts = Part.list(self.api)
# For each part in list, test some functions
for p in parts:
functions = {
'getSupplierParts': SupplierPart,
'getBomItems': BomItem,
'isUsedIn'... | [
"def test_access_erors(self):\n\n with self.assertRaises(TypeError):\n Part(self.api, 'hello')\n \n with self.assertRaises(ValueError):\n Part(self.api, -1)\n\n # Try to access a Part which does not exist\n with self.assertRaises(requests.exceptions.HTTPError... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that errors are flagged when we try to access an invalid part | def test_access_erors(self):
with self.assertRaises(TypeError):
Part(self.api, 'hello')
with self.assertRaises(ValueError):
Part(self.api, -1)
# Try to access a Part which does not exist
with self.assertRaises(requests.exceptions.HTTPError):
... | [
"def _check_partno(self, ctx=None):\n if hasattr(self, \"Item\") and self.teilenummer and not self.Item:\n raise ue.Exception(\"part_number\", self.teilenummer, self.t_index)",
"def test_add_ifc_errors(self):\n pass",
"def test_error_fields(self):\n self._d.log_error('abcd', 'som... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that we can list Part objects, and apply certain filters | def test_part_list(self):
parts = Part.list(self.api)
self.assertTrue(len(parts) >= 19)
parts = Part.list(self.api, category=5)
n = len(parts)
for i in range(5):
prt = Part.create(self.api, {
"category": 5,
"name": f"Special Part {n... | [
"def test_part_get_functions(self):\n\n # Get list of parts\n parts = Part.list(self.api)\n\n # For each part in list, test some functions\n for p in parts:\n functions = {\n 'getSupplierParts': SupplierPart,\n 'getBomItems': BomItem,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that we can edit a part | def test_part_edit(self):
# Select a part
p = Part.list(self.api)[-1]
name = p.name
# Ajdust the name
if len(name) < 40:
name += '_append'
else:
name = name[:-10]
p.save(
data={
'name': name,
... | [
"def test_command_edit(self):\n pass",
"def test_cet_line(self):\n test_data = \"Here is some text\"\n self.edit.set_edit_text(test_data)\n\n self.assertEqual(self.edit.get_line(0), test_data)",
"def test_edit_view(self):\n c = self.c\n response = c.get(reve... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test we can create and delete a Part instance via the API | def test_part_delete(self):
n = len(Part.list(self.api))
# Create a new part
# We do not specify 'active' value so it will default to True
p = Part.create(
self.api,
{
'name': 'Delete Me',
'description': 'Not long for this... | [
"def test_part_api(self):\n url = reverse('api-part-list')\n\n # Check JSON response\n response = self.client.get(url, HTTP_ACCEPT='application/json')\n self.assertEqual(response.status_code, 200)",
"def test_part_detail(self):\n\n pk = 1\n\n response = self.client.get(re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test image upload functionality for Part model | def test_image_upload(self):
# Grab the first part
p = Part.list(self.api)[0]
# Ensure the part does *not* have an image associated with it
p.save(data={'image': None})
# Create a dummy file (not an image)
with open('dummy_image.jpg', 'w') as dummy_file:
du... | [
"def test_upload_image(self):\n\n url = image_upload_url(self.recipe.id)\n\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n \"\"\"Creating a temporary file in the system with .jpg extension\"\"\"\n\n img = Image.new('RGB', (10, 10)) # Creating a random image 10x10\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that an internal price can be set for a part | def test_set_price(self):
test_price = 100.0
test_quantity = 1
# Grab the first part
p = Part.list(self.api)[0]
# Grab all internal prices for the part
ip = InternalPrice.list(self.api, part=p.pk)
# Delete any existsing prices
for price in ip:
... | [
"def test_get_price(self):\n self.assertEqual(get_price('unused_string'), 24)",
"def _check_price(self, cr, uid, ids, context=None):\n for quote in self.browse(cr, uid, ids, context=context):\n for line in quote.quotes_products_ids:\n if line.price_unit < 0.0 :\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Part instance metadata | def test_metadata(self):
# Grab the first available part
part = Part.list(self.api, limit=1)[0]
part.setMetadata(
{
"foo": "bar",
},
overwrite=True,
)
metadata = part.getMetadata()
# Check that the metadata has been ... | [
"def test_metadata_definition(self):\n\n self.assertEqual(DummyFieldModifier.META_NAME, \"Test Name\")\n self.assertEqual(DummyFieldModifier.META_DESCRIPTION, \"Test Description\")\n self.assertIsInstance(DummyFieldModifier.META_ARGS, dict)\n self.assertIsInstance(DummyFieldModifier.META_OPERANDS, dict)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test getRequirements function for parts | def test_get_requirements(self):
# Get first part
prt = Part.list(self.api, limit=1)[0]
# Get requirements list
req = prt.getRequirements()
# Check for expected content
self.assertIsInstance(req, dict)
self.assertIn('available_stock', req)
self.assertIn... | [
"def test_requirements_single_item(self):\n pass",
"def test_getinvestmentrequirements(self):\n pass",
"def get_requirements(self):\n pass",
"def test_requirement_in_ha_core():\n request = requests.get(\n \"https://raw.githubusercontent.com/home-assistant/home-assistant/dev/setu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests for generating a key for a PartTestTemplate | def test_generateKey(self):
self.assertEqual(PartTestTemplate.generateTestKey('bob'), 'bob')
self.assertEqual(PartTestTemplate.generateTestKey('bob%35'), 'bob35')
self.assertEqual(PartTestTemplate.generateTestKey('bo b%35'), 'bob35')
self.assertEqual(PartTestTemplate.generateTestKey('BO... | [
"def test_azure_service_api_keypair_generate_post(self):\n pass",
"def test_create_service_key(self):\n pass",
"def test_create_key(self):\n self.assert_requires_auth(self.instance.create_key, \"title\", \"key\")",
"def test_create_entitlement_template(self):\n pass",
"def test_a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints the description of every parameter of CHILD. | def print_parameter_descriptions(self):
length = 0
for parameter in self.parameter_descriptions:
if len(parameter) > length:
length = len(parameter)
for parameter in self.parameter_descriptions:
print(parameter,
(length -... | [
"def show_parameters(self):\n for p in self.parameters:\n print p",
"def print_desc(self):\n print(self.description)\n return",
"def _show_desc(self, depth=0, symbol_table={}):\n block_indent = \"\\t\"*depth\n\n # show node id\n symbol = self._get_symbol(symb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the parameters defining the mesh. | def set_mesh(self,
OPTREADINPUT=10,
OPTINITMESHDENS=0,
X_GRID_SIZE=10000.,
Y_GRID_SIZE=10000.,
OPT_PT_PLACE=1,
GRID_SPACING=200.,
NUM_PTS='n/a',
INPUTDATAFILE='n/a',
I... | [
"def set_parameters(self, params, **kargs):\n self._solver.set_parameters(params, **kargs)",
"def set_surface_pars(self):\n\n self.surface_pars = imexam_defpars.surface_pars",
"def set_params(self, **values):\n pc, pe = {}, {}\n for k, v in values.items():\n if k.startswit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the parameters defining the bedrock. | def set_bedrock(self,
BEDROCKDEPTH=1e10,
REGINIT=0.,
MAXREGDEPTH=100.):
self.parameters['BEDROCKDEPTH'] = BEDROCKDEPTH
self.parameters['REGINIT'] = REGINIT
self.parameters['MAXREGDEPTH'] = MAXREGDEPTH | [
"def set_params(self, params):\n super().set_params(params)\n\n params = dict_to_namespace(params)\n self.params.name = getattr(params, 'name', 'BaxAcqFunction')\n self.params.acq_str = getattr(params, \"acq_str\", \"exe\")\n self.params.min_neighbors = getattr(params, \"min_neigh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the parameters defining the regular stratigraphic grid. | def set_stratigraphic_grid(self,
OPTSTRATGRID=0,
XCORNER=0.,
YCORNER=0.,
GRIDDX=200.,
GR_WIDTH=10000.,
GR_LENGTH=10000.,
... | [
"def generateGridSettings(self):\n tiles = TiledRenderer(self.window, self.map)\n mw = tiles.tmx_data.width\n mh = tiles.tmx_data.height\n self.grid_dim = (mw, mh)\n w, h, gm = self.window.width, self.window.height, 0\n self.grid_margin = gm\n ssize = tiles.tmx_data.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the parameters defining a uniform uplift. | def set_uniform_uplift(self,
UPRATE=0.001,
UPDUR=1e10):
self.parameters['OPTNOUPLIFT'] = 0
self.parameters['UPTYPE'] = 1
self.parameters['UPRATE'] = UPRATE
self.parameters['UPDUR'] = UPDUR | [
"def set_block_uplift(self,\n FAULTPOS,\n UPRATE=0.001,\n SUBSRATE=0.,\n UPDUR=1e10):\n self.parameters['OPTNOUPLIFT'] = 0\n self.parameters['UPTYPE'] = 2\n self.parameters['FAULTPOS'] = FAULTPOS\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the parameters defining a block uplift. | def set_block_uplift(self,
FAULTPOS,
UPRATE=0.001,
SUBSRATE=0.,
UPDUR=1e10):
self.parameters['OPTNOUPLIFT'] = 0
self.parameters['UPTYPE'] = 2
self.parameters['FAULTPOS'] = FAULTPOS
self.pa... | [
"def set_uniform_uplift(self,\n UPRATE=0.001,\n UPDUR=1e10):\n self.parameters['OPTNOUPLIFT'] = 0\n self.parameters['UPTYPE'] = 1\n self.parameters['UPRATE'] = UPRATE\n self.parameters['UPDUR'] = UPDUR",
"def set_uplift_maps(self,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the parameters defining uplift maps. | def set_uplift_maps(self,
NUMUPLIFTMAPS,
UPMAPFILENAME,
UPTIMEFILENAME='n/a',
UPDUR=1e10):
self.parameters['OPTNOUPLIFT'] = 0
self.parameters['UPTYPE'] = 12
self.parameters['UPRATE'] = 0
self.... | [
"def updateParams(self,mapName):\n pass",
"def set_params(self, **kwargs):\n _api.warn_external(\n \"'set_params()' not defined for locator of type \" +\n str(type(self)))",
"def set_uniform_uplift(self,\n UPRATE=0.001,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the hydraulic geometry. | def set_hydraulic_geometry(self,
CHAN_GEOM_MODEL=1,
HYDR_WID_COEFF_DS=10.,
HYDR_WID_EXP_DS=0.5,
HYDR_WID_EXP_STN=0.5,
HYDR_DEP_COEFF_DS=1.,
... | [
"def geometry(self, geometry):\n self._geometry = geometry",
"def geometry_change():\n register.geometry(str(length_scale.get()) + \"x\" + str(height_scale.get()))",
"def setGeometryData(self, path, hitArea):\n # type: (QPainterPath, QPainterPath) -> None\n self.__bou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the detachment law 'Power law, form 1' for fluvial erosion and deposition. | def set_detachment_power_law_form_1(self,
KB=0.0005,
KR=0.0005,
KT=1000.,
MB=0.66667,
NB=0.66667,
... | [
"def set_detachment_dummy_law(self):\n self.parameters['OPTNOFLUVIAL'] = 0\n self.parameters['DETACHMENT_LAW'] = 4",
"def set_detachment_power_law_form_2(self,\n KB=0.0005,\n KR=0.0005,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the detachment law 'Power law, form 2' for fluvial erosion and deposition. | def set_detachment_power_law_form_2(self,
KB=0.0005,
KR=0.0005,
KT=1000.,
MB=0.66667,
NB=0.66667,
... | [
"def set_detachment_dummy_law(self):\n self.parameters['OPTNOFLUVIAL'] = 0\n self.parameters['DETACHMENT_LAW'] = 4",
"def set_detachment_power_law_form_1(self,\n KB=0.0005,\n KR=0.0005,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the detachment law 'Almost parabolic law' for fluvial erosion and deposition. | def set_detachment_almost_parabolic_law(self,
KB=1e-4,
KR=1e-4,
MB=0.5,
NB=1,
BETA=1):
self.... | [
"def set_detachment_dummy_law(self):\n self.parameters['OPTNOFLUVIAL'] = 0\n self.parameters['DETACHMENT_LAW'] = 4",
"def set_detachment_generalized_fqs_law(self,\n KB=1e-4,\n KR=1e-4,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the detachment law 'Generalized f(Qs) detachmentrule' for fluvial erosion and deposition. | def set_detachment_generalized_fqs_law(self,
KB=1e-4,
KR=1e-4,
MB=0.5,
NB=1,
BETA=1):
self.parame... | [
"def set_detachment_dummy_law(self):\n self.parameters['OPTNOFLUVIAL'] = 0\n self.parameters['DETACHMENT_LAW'] = 4",
"def set_detachment_power_law_form_2(self,\n KB=0.0005,\n KR=0.0005,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the detachment law 'Dummy law for no fluvial erosion' for fluvial erosion and deposition. | def set_detachment_dummy_law(self):
self.parameters['OPTNOFLUVIAL'] = 0
self.parameters['DETACHMENT_LAW'] = 4 | [
"def set_detachment_power_law_form_1(self,\n KB=0.0005,\n KR=0.0005,\n KT=1000.,\n MB=0.66667,\n NB=0.66667,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the transport law for fluvial erosion and deposition. | def set_transport_law(self,
OPTDETACHLIM=0,
TRANSPORT_LAW=1,
KF=617.,
MF=0.66667,
NF=0.66667,
PF=1.5):
self.parameters['OPTNOFLUVIAL'] = 0
self.para... | [
"def set_fluid_props(self):\n \n self.nu = self.mu / self.rho",
"def set_sensor(self):\n self.sensor = Feel(self, self.world,\n # 1 short feeler at the mouth that can detect Plasmoids\n [(0, 15)], ['soft'],\n positi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the parameters for landsliding. Landsliding also requires `ROCKDENSITYINIT` and `WOODDENSITY` (see `material_parameters`). | def set_landsliding(self,
OPT_LANDSLIDES=0,
OPT_3D_LANDSLIDES=0,
FRICSLOPE=1.,
DF_RUNOUT_RULE=0,
DF_SCOUR_RULE=0,
DF_DEPOSITION_RULE=0):
self.parameters['OPT_LANDSLIDES... | [
"def set_landscape_parameters(landscape, params):\n if landscape == \"L\":\n Lowland.set_parameters(params)\n elif landscape == \"H\":\n Highland.set_parameters(params)\n else:\n raise ValueError('Lowland and Highland are the'\n 'only... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the parameters for eolian deposition. | def set_eolian_deposition(self,
OPTLOESSDEP=0,
LOESS_DEP_RATE=0.):
self.parameters['OPTLOESSDEP'] = OPTLOESSDEP
self.parameters['LOESS_DEP_RATE'] = LOESS_DEP_RATE | [
"def set_params(self, **values):\n pc, pe = {}, {}\n for k, v in values.items():\n if k.startswith('e_'):\n pe[k[2:]] = v\n elif k.startswith('c_'):\n pc[k[2:]] = v\n else:\n raise ValueError( # pragma: no cover\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets weathering. Weathering also requires `ROCKDENSITYINIT` and `SOILBULKDENSITY` (see `material_parameters`). | def set_weathering(self,
CHEM_WEATHERING_LAW=0,
MAXDISSOLUTIONRATE=0.099,
CHEMDEPTH=0.18,
PRODUCTION_LAW=0,
SOILPRODRATE=0.00055,
SOILPRODRATEINTERCEPT=0.00055,
... | [
"def update_weather(self):\r\n global weather\r\n global draw_wthr\r\n weather = getWeather();\r\n draw_wthr = True;\r\n self._weather.config(text = weather)",
"def set_weather(self, weather: Union[carla.WeatherParameters, List[carla.WeatherParameters]]):\n if isinstance(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets vegetation. Vegetation also requires `OPTFOREST` (see `forest_parameters`) and `OPTFIRE` (see `fire_parameters`). | def set_vegetation(self,
OPTVEG=0,
OPTGRASS_SIMPLE=1,
VEG_KVD=0.,
VEG_TV=1.,
TAUC=0.,
VEG_TAUCVEG=0.):
self.parameters['OPTVEG'] = OPTVEG
self.parameters['OPTGRASS_SI... | [
"def set_forest(self,\n OPTFOREST=0,\n ROOTDECAY_K=0.,\n ROOTDECAY_N=0.,\n ROOTGROWTH_A=1.,\n ROOTGROWTH_B=1.,\n ROOTGROWTH_C=1.,\n ROOTGROWTH_F=1.,\n ROOTSTRENGTH_J=0.,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets forest. Forest also requires `WOODDENSITY` (see `material_parameters`) and `FSEED` (see `run_control_parameters`). | def set_forest(self,
OPTFOREST=0,
ROOTDECAY_K=0.,
ROOTDECAY_N=0.,
ROOTGROWTH_A=1.,
ROOTGROWTH_B=1.,
ROOTGROWTH_C=1.,
ROOTGROWTH_F=1.,
ROOTSTRENGTH_J=0.,
... | [
"def setup_classification_forest(\n max_predictors_per_split, num_trees=100, min_examples_at_split=30,\n min_examples_at_leaf=30):\n\n return RandomForestClassifier(\n n_estimators=num_trees, min_samples_split=min_examples_at_split,\n min_samples_leaf=min_examples_at_leaf,\n ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes a parameter value into a CHILD input file. | def write_parameter(self, input_file, parameter, value, parameter_name=None):
if parameter_name is None:
parameter_name = parameter
if parameter in self.parameter_descriptions:
parameter_name += ': ' + self.parameter_descriptions[parameter] + '\n'
else:
parame... | [
"def do_write_params(self, arg):\n # Store original output channel\n old_stdout = sys.stdout\n # Open file and write\n with open(arg, \"w\") as f:\n sys.stdout = f\n self.parameters.pretty_print()\n # Restore original output channel\n sys.stdout = old_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Locates the CHILD input file. | def locate_input_file(self, realization=0):
return os.path.join(self.base_directory,
self.parameter_values[realization]['OUTFILENAME'] + '.in') | [
"def test_read_input_does_not_crash_same_dir(self):\n ugen.read_input(\"./file1\")",
"def test_read_input_does_not_crash_same_dir_filename(self):\n ugen.read_input(\"file1\")",
"def test_read_input_does_not_crash_diff_dir(self):\n ugen.read_input(\"../input/file1\")",
"def StartingOpen():... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes the CHILD input file. | def delete_input_file(self, realization=0):
if os.path.isfile(self.locate_input_file(realization)) == True:
subprocess.call('rm ' + self.locate_input_file(realization), shell=True) | [
"def delete_file(self):\n os.remove(self.full_path())\n self.size = 0",
"def remove(self, file):\n pass",
"def delete_resource_file(\r\n self, resource: GenomicResource, filename: str) -> None:",
"def remove(self, fd):",
"def deleted(self, src, path):",
"def delete(file=Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts the parameters from a CHILD input file. | def extract_input_file_parameters(self,
parameters,
realization=None,
input_file_path=None):
file_suffix = ''
if realization is not None:
file_suffix = '_' + str(realization) ... | [
"def parse(input_file):\n # TODO: is json or xml more suitable for the input file format?\n parameters = dict()\n\n try:\n # open and parse the file\n pass\n except FileNotFoundError:\n print(\"Input file '%s' not found\" % input_file)\n sys.ex... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Translates a square name 'b3' into indexes (1,2) | def get_square_index(square):
assert(len(square) == 2)
square_index = (int(square[1]) - 1, ord(square[0].lower()) - 97)
assert(0 <= square_index[0] < 8 and 0 <= square_index[1] < 8)
return square_index | [
"def _cord_to_index(cord):\n\t\tletnum = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9}\n\t\tx = letnum[cord[0].lower()]\n\t\ty = int(cord[1])\n\t\treturn (y * 10) + x",
"def clust_index(i, j):\n return 3 * (i // 3) + j // 3",
"def test_bracket_to_index(self):\n true ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a generator of all moves that the current player could take from this position | def get_all_next_moves(self, player=None, check=True):
if player is None:
player = self.current_player
for i in range(8):
for j in range(8):
if self.get_square(i, j)[0] == player:
moves = self.get_valid_moves(i, j, check)
fo... | [
"def moves(self):\n for i in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n m = State(self.x + i[0], self.y + i[1], self.distance + 1)\n if m.valid:\n yield m",
"def get_possible_moves(self) -> list:\n raise NotImplementedError(\"Override this!\")",
"def get_possible_mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve EventInfo object from SG | def getEventInfo (aKey):
return PyK.retrieve('xAOD::EventInfo',aKey) | [
"def get_sg_event(event_id):\n\n params = {'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'id': event_id}\n\n response = requests.get(SG_URL + 'events', params=params)\n\n return response.json()",
"def getEvent(self) -> \"SoEvent const *\":\n return _coin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests edge mutation can add edge between nodes | def test_edge_mutation_for_graph():
graph_without_edge = \
OptGraph(OptNode({'name': 'logit'}, [OptNode({'name': 'one_hot_encoding'}, [OptNode({'name': 'scaling'})])]))
primary = OptNode({'name': 'scaling'})
graph_with_edge = \
OptGraph(OptNode({'name': 'logit'}, [OptNode({'name': 'one_hot_... | [
"def test_add_node(nodes_to_add):\n from graph import Graph\n graph = Graph()\n\n for node in nodes_to_add:\n graph.add_node(node)\n\n assert graph.nodes() == nodes_to_add",
"def test_edge_match(self):\n e1 = ed.Edge(\"O\",\"B\")\n e2 = ed.Edge(\"O\",\"T\")\n self.assertTru... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests single_drop mutation can remove node | def test_drop_mutation_for_linear_graph():
linear_two_nodes = OptGraph(OptNode({'name': 'logit'}, [OptNode({'name': 'scaling'})]))
linear_one_node = OptGraph(OptNode({'name': 'logit'}))
composer_requirements = GPComposerRequirements(primary=['scaling'],
... | [
"def test_delete_node_using_delete(self):\n pass",
"def test_delete_decision_tree_using_delete(self):\n pass",
"def test_drop_single_label(self):\n self.stack.drop(\"lsat7_2002_70@PERMANENT\", in_place=True)\n self.assertListEqual(self.stack.names, self.predictors[0:5])",
"def test... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests boosting mutation can add correct boosting cascade | def test_boosting_mutation_for_linear_graph():
linear_one_node = OptGraph(OptNode({'name': 'knn'}, [OptNode({'name': 'scaling'})]))
init_node = OptNode({'name': 'scaling'})
model_node = OptNode({'name': 'knn'}, [init_node])
boosting_graph = \
OptGraph(
OptNode({'name': 'logit'},
... | [
"def test_breed(self):\n\t\tpass",
"def test_mutate(self):\n\n\t\tpass",
"def test_gradient_boosting(n_samples=1000, distance = 0.6):\n # Generating some samples correlated with first variable\n testX, testY = generate_sample(n_samples, 10, distance)\n trainX, trainY = generate_sample(n_samples, 10, di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checking the correct conversion of hyperparameters in nodes when nodes are passing through adapter | def test_pipeline_adapters_params_correct():
init_alpha = 12.1
pipeline = pipeline_with_custom_parameters(init_alpha)
# Convert into OptGraph object
adapter = PipelineAdapter()
opt_graph = adapter.adapt(pipeline)
# Get Pipeline object back
restored_pipeline = adapter.restore(opt_graph)
... | [
"def convertToTweakNodePlug(*args, **kwargs):\n \n pass",
"def _check_model_params(self):",
"def node_features(self):",
"def process_epidemic_parameters(self):",
"def ClaimClassifierHyperParameterSearch():\n\n pass",
"def is_node_with_weight(node: NNCFNode) -> bool:",
"def _check_parameters... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |