query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Tests that custom error is raised in the default eigenvalue representation. | def test_eigvals_undefined(self):
with pytest.raises(qml.operation.EigvalsUndefinedError):
MyOp.compute_eigvals()
with pytest.raises(qml.operation.EigvalsUndefinedError):
op.eigvals() | [
"def test_error_deterministic_model_with_realizations(ensemble_cube, interpreter):\n ensemble_cube.attributes[\"mosg__model_configuration\"] = \"uk_det\"\n ensemble_cube.attributes[\"title\"] = \"UKV Model on UK 2 km Standard Grid\"\n msg = \"Deterministic model should not have . realizations\"\n with p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that custom error is raised in the default diagonalizing gates representation. | def test_diaggates_undefined(self):
with pytest.raises(qml.operation.DiagGatesUndefinedError):
MyOp.compute_diagonalizing_gates(wires=[1])
with pytest.raises(qml.operation.DiagGatesUndefinedError):
op.diagonalizing_gates() | [
"def test_diagonalizing_gates_overlapping(self):\n diag_op = ValidOp(qml.S(0), qml.PauliX(0))\n diagonalizing_gates = diag_op.diagonalizing_gates()\n\n assert len(diagonalizing_gates) == 1\n diagonalizing_mat = diagonalizing_gates[0].matrix()\n\n true_mat = np.eye(2)\n\n as... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that custom error is raised in the default generator representation. | def test_generator_undefined(self):
with pytest.raises(qml.operation.GeneratorUndefinedError):
gate.generator() | [
"def test_custom_formatting():\r\n \r\n try: SampleAPI.execute('custom_err.fail')\r\n except Exception, e:\r\n assert e.data['error'] == True\r\n assert 'desc' in e.data\r\n assert e.data['num'] == 99\r\n # hook can modified the error instance directly\r\n assert e.http_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the default of an operation raised to a zero power is an empty array. | def test_pow_zero(self):
assert len(gate.pow(0)) == 0 | [
"def test_11_empty_input(self):\n out, err = self._iquery(\n 'create temp array empty<val:double>[k=0:39:4:20]',\n quiet=False)\n assert not err, err\n self._array_cleanups.append(\"empty\")\n out, err = self._iquery('redimension(empty, <val:double>[k=0:39:3])',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the default of an operation raised to the power of one is a copy. | def test_pow_one(self):
pow_gate = gate.pow(1)
assert len(pow_gate) == 1
assert pow_gate[0].__class__ is gate.__class__ | [
"def testPower(self):\n f8 = self.f8\n self.assertTrue(f8(1, 1, 1) ** 2 == f8(1, 1, 0))",
"def test_pow_method_with_non_numeric_power_raises_error(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operator\"\"\"\n num_wires = 1\n\n with pyte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that custom error is raised in the default pow decomposition. | def test_pow_undefined(self):
with pytest.raises(qml.operation.PowUndefinedError):
gate.pow(1.234) | [
"def test_pow_method_with_non_numeric_power_raises_error(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operator\"\"\"\n num_wires = 1\n\n with pytest.raises(ValueError, match=\"Cannot raise an Operator\"):\n _ = DummyOp(wires=[0]) ** DummyOp(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests if the function raises an exception if the input operation has no generator | def test_no_generator_raise(self):
class CustomOp(qml.operation.Operation):
num_wires = 1
num_params = 1
op = CustomOp(0.5, wires=0)
with pytest.raises(
qml.operation.GeneratorUndefinedError,
match="Operation CustomOp does not have a generator",... | [
"def test_generator(self):\n def generate(a: int):\n assert check_argument_types()\n yield a\n yield a + 1\n\n gen = generate(1)\n next(gen)",
"def ensure_empty(gen):\n try:\n next(gen)\n return False\n except StopIteration:\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if the function correctly returns the derivative of RX | def test_rx(self):
p = 0.3
op = qml.RX(p, wires=0)
derivative = operation_derivative(op)
expected_derivative = 0.5 * np.array(
[[-np.sin(p / 2), -1j * np.cos(p / 2)], [-1j * np.cos(p / 2), -np.sin(p / 2)]]
)
assert np.allclose(derivative, expected_derivativ... | [
"def test_derivative():\n\t# Testing for derivative of exponential at points x=0,1,2\n\tactual = np.array([(np.exp(1)-np.exp(0)), (np.exp(2)-np.exp(0))/2, (np.exp(2)-np.exp(1))])\n\t# Testing implementation\n\tdef exponential():\n\t\tt = np.linspace(0,2,3)\n\t\tex = np.vectorize(np.exp)\n\t\tex = ex(t)\n\t\treturn ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if the function correctly returns the derivative of PhaseShift | def test_phase(self):
p = 0.3
op = qml.PhaseShift(p, wires=0)
derivative = operation_derivative(op)
expected_derivative = np.array([[0, 0], [0, 1j * np.exp(1j * p)]])
assert np.allclose(derivative, expected_derivative) | [
"def phase_shift(H, op):\n return -2*np.pi*np.matmul(H, op.tran) / op.DEN",
"def phase_shift(phase):\n\n # sort data\n y = np.argsort(phase)\n data = phase[y]\n\n # double phase data so it goes from 0 to 2\n ph2 = data.copy()\n ph2 = ph2+1.0\n datan = np.concatenate((data,ph2))\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if the function correctly returns the derivative of CRY | def test_cry(self):
p = 0.3
op = qml.CRY(p, wires=[0, 1])
derivative = operation_derivative(op)
expected_derivative = 0.5 * np.array(
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, -np.sin(p / 2), -np.cos(p / 2)],
[0, 0, ... | [
"def test_coherent_state_deriv():\n t = symbols('t', is_positive=True)\n alpha = Function('alpha')\n expr = CoherentStateKet(alpha(t), hs=1)\n assert not expr.diff(t).is_zero",
"def test_derivative():\n\t# Testing for derivative of exponential at points x=0,1,2\n\tactual = np.array([(np.exp(1)-np.exp(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if the function correctly returns the derivative of CRY if the wires are not consecutive. This is expected behaviour, since without any other context, the operation derivative should make no assumption about the wire ordering. | def test_cry_non_consecutive(self):
p = 0.3
op = qml.CRY(p, wires=[1, 0])
derivative = operation_derivative(op)
expected_derivative = 0.5 * np.array(
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, -np.sin(p / 2), -np.cos(p / 2)],
... | [
"def test_cnot_cz(self, wires, res):\n commutation = qml.is_commuting(qml.CNOT(wires=wires[0]), qml.CZ(wires=wires[1]))\n assert commutation == res",
"def test_cnot_x(self, wires, res):\n commutation = qml.is_commuting(qml.CNOT(wires=wires[1]), qml.PauliX(wires=wires[0]))\n assert comm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure that size of input for `heisenberg_expand` method is validated | def test_input_validation(self):
class DummyOp(qml.operation.CVOperation):
num_wires = 1
op = DummyOp(wires=1)
with pytest.raises(ValueError, match="Heisenberg matrix is the wrong size"):
U_wrong_size = np.eye(1)
op.heisenberg_expand(U_wrong_size, op.wires) | [
"def gaussian_expansion(input_array, n_grid_points, d_max):\n\n return expanded_array",
"def small_input_problem(\n problem: DerivativesTestProblem, max_input_numel: int = 100\n) -> DerivativesTestProblem:\n if problem.input.numel() > max_input_numel:\n skip(\"Input is too large:\" + f\" {problem.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that `heisenberg_expand` raises exception if it receives an array with order > 2 | def test_wrong_input_shape(self):
class DummyOp(qml.operation.CVOperation):
num_wires = 1
op = DummyOp(wires=1)
with pytest.raises(ValueError, match="Only order-1 and order-2 arrays supported"):
U_high_order = np.array([np.eye(3)] * 3)
op.heisenberg_expand(... | [
"def test_series_empty():\n with pytest.raises(ValueError):\n expand_grid(others={\"x\": pd.Series([], dtype=int)})",
"def test_unflatten_error_too_many_elements(self):\n\n reshaped = np.reshape(flat_dummy_array, (16, 2, 2))\n\n with pytest.raises(ValueError, match=\"Flattened iterable has... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the case where the wire_order is None it returns the original matrix | def test_no_wire_order_returns_base_matrix(self):
res = qml.operation.expand_matrix(self.base_matrix_2, wires=[0, 2])
assert np.allclose(self.base_matrix_2, res) | [
"def test_sparse_matrix_extra_wire(self):\n\n t = qml.PauliX(0) @ qml.PauliZ(1)\n s = t.sparse_matrix(wires=[0, 1, 2])\n\n assert s.shape == (8, 8)\n assert np.allclose(s.data, [1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0])\n assert np.allclose(s.indices, [4, 5, 6, 7, 0, 1, 2, 3])\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the case where the broadcasted original matrix is not changed | def test_no_expansion_broadcasted(self):
res = qml.operation.expand_matrix(
self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[0, 2]
)
assert np.allclose(self.base_matrix_2_broadcasted, res) | [
"def test_permutation_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[2, 0]\n )\n\n perm = [0, 2, 1, 3]\n expected = self.base_matrix_2_broadcasted[:, perm][:, :, perm]\n assert np.allclose(expected, re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the case where the broadcasted original matrix is permuted | def test_permutation_broadcasted(self):
res = qml.operation.expand_matrix(
self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[2, 0]
)
perm = [0, 2, 1, 3]
expected = self.base_matrix_2_broadcasted[:, perm][:, :, perm]
assert np.allclose(expected, res) | [
"def test_permutation_operator_dim_2_2_perm_1_2():\n res = permutation_operator([2, 2], [1, 2])\n expected_res = np.identity(4)\n bool_mat = np.isclose(res, expected_res)\n np.testing.assert_equal(np.all(bool_mat), True)",
"def test_permutation_operator_dim_3_perm_1_2():\n res = permutation_operato... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the case where the broadcasted original matrix is expanded | def test_expansion_broadcasted(self):
res = qml.operation.expand_matrix(
self.base_matrix_1_broadcasted, wires=[2], wire_order=[0, 2]
)
expected = np.array(
[
[
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, ... | [
"def test_no_expansion_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[0, 2]\n )\n assert np.allclose(self.base_matrix_2_broadcasted, res)",
"def test_expand_matrix_usage_in_operator_class_broadcasted(self, tol):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests differentiation in autograd by computing the Jacobian of the expanded matrix with respect to the canonical matrix. | def test_autograd(self, i, base_matrix, tol):
base_matrix = pnp.array(base_matrix, requires_grad=True)
jac_fn = qml.jacobian(self.func_for_autodiff)
jac = jac_fn(base_matrix)
assert np.allclose(jac, self.expected_autodiff[i], atol=tol) | [
"def test_autograd(self, tol, batch_dim):\n dev = qml.device(\"default.qubit.autograd\", wires=2)\n params = np.array([0.543, -0.654], requires_grad=True)\n if batch_dim is not None:\n params = np.outer(np.arange(1, 1 + batch_dim), params, requires_grad=True)\n tangent = np.ar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests differentiation in torch by computing the Jacobian of the expanded matrix with respect to the canonical matrix. | def test_torch(self, i, base_matrix, tol):
import torch
base_matrix = torch.tensor(base_matrix, requires_grad=True)
jac = torch.autograd.functional.jacobian(self.func_for_autodiff, base_matrix)
assert np.allclose(jac, self.expected_autodiff[i], atol=tol) | [
"def jacobian(Q, d):\n return zeros([n, n])",
"def J_dense(x): # dense Jacobian\n return np.array([[1.004, -1e3*x[2], -1e3*x[1]],\n [-0.004, 1.0 + 1e3*x[2] + 60.0*x[1], 1e3*x[1]],\n [0.0, -60.0*x[1], 1.0]])",
"def test_jacobian_variable_multiply(self, torch_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests differentiation in jax by computing the Jacobian of the expanded matrix with respect to the canonical matrix. | def test_jax(self, i, base_matrix, tol):
import jax
base_matrix = jax.numpy.array(base_matrix)
jac_fn = jax.jacobian(self.func_for_autodiff)
jac = jac_fn(base_matrix)
assert np.allclose(jac, self.expected_autodiff[i], atol=tol) | [
"def J_dense(x): # dense Jacobian\n return np.array([[1.004, -1e3*x[2], -1e3*x[1]],\n [-0.004, 1.0 + 1e3*x[2] + 60.0*x[1], 1e3*x[1]],\n [0.0, -60.0*x[1], 1.0]])",
"def jacobian(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests differentiation in TensorFlow by computing the Jacobian of the expanded matrix with respect to the canonical matrix. | def test_tf(self, i, base_matrix, tol):
import tensorflow as tf
base_matrix = tf.Variable(base_matrix)
with tf.GradientTape() as tape:
res = self.func_for_autodiff(base_matrix)
jac = tape.jacobian(res, base_matrix)
assert np.allclose(jac, self.expected_autodiff[i], ... | [
"def J_dense(x): # dense Jacobian\n return np.array([[1.004, -1e3*x[2], -1e3*x[1]],\n [-0.004, 1.0 + 1e3*x[2] + 60.0*x[1], 1e3*x[1]],\n [0.0, -60.0*x[1], 1.0]])",
"def test_FullOrderRecovery(self):\n Q = tf.linalg.diag([1.0, 2.0, 3.0])\n def Qv(v):\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a broadcasted 2 qubit gate on consecutive wires correctly expands to 4 qubits. | def test_expand_two_consecutive_wires_broadcasted(self, tol):
U2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / np.sqrt(3)
U2 = np.tensordot([2.31, 1.53, 0.7 - 1.9j], U2, axes=0)
# test applied to wire 0+1
res = qml.operation.expand_matrix(U2, [0, 1], [0, 1, 2... | [
"def test_6q_circuit_20q_coupling(self):\n # ┌───┐┌───┐┌───┐┌───┐┌───┐\n # q0_0: ┤ X ├┤ X ├┤ X ├┤ X ├┤ X ├\n # └─┬─┘└─┬─┘└─┬─┘└─┬─┘└─┬─┘\n # q0_1: ──┼────■────┼────┼────┼──\n # │ ┌───┐ │ │ │\n # q0_2: ──┼──┤ X ├──┼────■────┼──\n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a broadcasted 3 qubit gate on nonconsecutive nonascending wires correctly expands to 4 qubits | def test_expand_three_nonconsecutive_nonascending_wires_broadcasted(self, tol):
# test applied to wire 3, 1, 2
res = qml.operation.expand_matrix(Toffoli_broadcasted, [3, 1, 2], [0, 1, 2, 3])
# change the control qubit on the Toffoli gate
rows = [0, 4, 1, 5, 2, 6, 3, 7]
Toffoli_br... | [
"def test_5q_circuit_20q_coupling(self):\n # ┌───┐\n # q_0: ──■───────┤ X ├───────────────\n # │ └─┬─┘┌───┐\n # q_1: ──┼────■────┼──┤ X ├───────■──\n # ┌─┴─┐ │ │ ├───┤┌───┐┌─┴─┐\n # q_2: ┤ X ├──┼────┼──┤ X ├┤ X ├┤ X ├\n # └─... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that the method is used correctly with a broadcasted matrix by defining a dummy operator and checking the permutation/expansion. | def test_expand_matrix_usage_in_operator_class_broadcasted(self, tol):
perm = [0, 2, 1, 3]
permuted_matrix = self.base_matrix_2_broadcasted[:, perm][:, :, perm]
expanded_matrix = np.tensordot(
np.tensordot(
np.kron(SWAP, I),
np.kron(I_broadcasted, se... | [
"def test_no_expansion_broadcasted(self):\n res = qml.operation.expand_matrix(\n self.base_matrix_2_broadcasted, wires=[0, 2], wire_order=[0, 2]\n )\n assert np.allclose(self.base_matrix_2_broadcasted, res)",
"def test_permutation_broadcasted(self):\n res = qml.operation.exp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that `args_to_gan_model` produces correctly named functions. | def testargs_to_gan_model_name(self):
def loss_fn(x):
return x
new_loss_fn = args_to_gan_model(loss_fn)
self.assertEqual('loss_fn', new_loss_fn.__name__)
self.assertTrue('The gan_model version of' in new_loss_fn.__docstring__) | [
"def _args_to_gan_model(loss_fn):\n # Match arguments in `loss_fn` to elements of `namedtuple`.\n # TODO(joelshor): Properly handle `varargs` and `keywords`.\n argspec = tf_inspect.getargspec(loss_fn)\n defaults = argspec.defaults or []\n\n required_args = set(argspec.args[:-len(defaults)])\n args... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that optional args can be changed with tuple losses. | def test_tuple_respects_optional_args(self):
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg2'])
def args_loss(arg1, arg2, arg3=3):
return arg1 + 2 * arg2 + 3 * arg3
loss_fn = args_to_gan_model(args_loss)
loss = loss_fn(tuple_type(arg1=-1, arg2=2), arg3=4)
# If `arg3` were no... | [
"def test_HoLE_args():\n testing_function_with_args('hole')",
"def test_ignorearg(self):\n self.assertEqual(check_args(self.ignorearg), {})",
"def test_check_args_correct_args(self):\n\n retval = check_args([1, 2, 3, 4, 5, 6])\n self.assertEqual(0, retval)",
"def test_error_ignored_arg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the input model type for `cycle_consistency_loss`. | def test_model_type(self):
with self.assertRaises(ValueError):
tfgan.losses.cycle_consistency_loss(self._model_x2y) | [
"def test_correct_loss(self):\n loss = tfgan.losses.cycle_consistency_loss(\n tfgan.CycleGANModel(\n model_x2y=self._model_x2y,\n model_y2x=self._model_y2x,\n reconstructed_x=tf.constant([9, 8], dtype=tf.float32),\n reconstructed_y=tf.constant([7, 2], dtype=tf.f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the output of `cycle_consistency_loss`. | def test_correct_loss(self):
loss = tfgan.losses.cycle_consistency_loss(
tfgan.CycleGANModel(
model_x2y=self._model_x2y,
model_y2x=self._model_y2x,
reconstructed_x=tf.constant([9, 8], dtype=tf.float32),
reconstructed_y=tf.constant([7, 2], dtype=tf.float32)))
... | [
"def test_model_type(self):\n with self.assertRaises(ValueError):\n tfgan.losses.cycle_consistency_loss(self._model_x2y)",
"def testConvergence(self):\n synthetic_test = Synthetic()\n # Silence output of fit\n save_stdout = sys.stdout\n sys.stdout = open( os.devnull, 'w' )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test StarGAN generator loss wrapper. | def test_stargan_generator_loss_wrapper(self):
loss_fn = tfgan.losses.wargs.wasserstein_generator_loss
wrapped_loss_fn = tfgan.losses.stargan_generator_loss_wrapper(loss_fn)
loss_result_tensor = loss_fn(
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_... | [
"def generator_loss(discriminator_gen_outputs):\n loss = tf.losses.sigmoid_cross_entropy(\n tf.zeros_like(discriminator_gen_outputs), discriminator_gen_outputs)\n return loss",
"def test_stargan_discriminator_loss_wrapper(self):\n loss_fn = tfgan.losses.wargs.wasserstein_discriminator_loss\n w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test StarGAN discriminator loss wrapper. | def test_stargan_discriminator_loss_wrapper(self):
loss_fn = tfgan.losses.wargs.wasserstein_discriminator_loss
wrapped_loss_fn = tfgan.losses.stargan_discriminator_loss_wrapper(loss_fn)
loss_result_tensor = loss_fn(
self.discriminator_generated_data_source_predication,
self.discriminator_ge... | [
"def loss(alpha_star, alpha, mu_star, mu, l, r):\n d_real = discriminator_expectation(alpha, mu_star, l, r)\n d_fake = 1 - discriminator_expectation(alpha, mu, l, r)\n return d_real + d_fake",
"def ls_discriminator_loss(scores_real, scores_fake):\n \n loss = None\n \n ########################... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test StaGAN gradient penalty wrapper. | def test_stargan_gradient_penalty_wrapper(self):
if tf.executing_eagerly():
# Can't use `tf.gradient` when executing eagerly
return
loss_fn = tfgan.losses.wargs.wasserstein_gradient_penalty
tfgan.losses.stargan_gradient_penalty_wrapper(loss_fn)
wrapped_loss_fn = tfgan.losses.stargan_gradient... | [
"def test_policy_gradient(self):\n model = VanillaPolicyGradient(self.hparams.env)\n self.trainer.fit(model)",
"def gradient(self, var, bayesianOptimizer):\n pass",
"def test_fast_gradient_method():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n input_np = np.a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
creates a learning rate decay operation in tensorflow using inverse time decay | def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
train = tf.train.inverse_time_decay(alpha, global_step, decay_step,
decay_rate, staircase=True)
return train | [
"def _decay(self):\n costs = []\n for var in tf.trainable_variables():\n if var.op.name.find(r'DW') > 0:\n costs.append(tf.nn.l2_loss(var))\n # tf.summary.histogram(var.op.name, var)\n\n self.wdec = tf.add_n(costs)\n return tf.multiply(self.args.weight_decay, self.wdec)",
"def learn... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches Distinct locations for selected biomimic type, country and state_province | def fetch_distinct_locations(self, query_dict):
cursor = self.connection.cursor()
query = """SELECT DISTINCT geo.location
FROM `cnx_logger` log
INNER JOIN `cnx_logger_biomimic_type` biotype
ON biotype.`biomimic_id`=log.`biomimic_id`
... | [
"def get_locations_choices():\n locations = [(loc.id, '%s, %s' % (loc.country.title(), loc.city.title()))\n for loc in db_queries.get_all_locations()]\n return locations",
"def all_locations(self) -> List[Tuple[str, str, str]]:\n query = \"\"\"\n SELECT\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches Distinct Subzones for selected biomimic type, country, state_province, location and zones | def fetch_distinct_sub_zones(self, query_dict):
cursor = self.connection.cursor()
query = """SELECT DISTINCT prop.sub_zone
FROM `cnx_logger` log
INNER JOIN `cnx_logger_biomimic_type` biotype
ON biotype.`biomimic_id`=log.`biomimic_id`
... | [
"def _fetch_all_zones(self):\n query = tables.zones.select()\n return self.storage.session.execute(query).fetchall()",
"def allCountries():",
"def subdivisions(self, *args, search_term=None, ordering='name'):\n return CountrySubdivision.list_for_country(\n country_code=self.alpha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches Distinct Wave Exp for selected biomimic type, country, state_province, location, zones and sub_zones | def fetch_distinct_wave_exposures(self, query_dict):
cursor = self.connection.cursor()
query = """SELECT DISTINCT prop.wave_exp FROM `cnx_logger` log
INNER JOIN `cnx_logger_biomimic_type` biotype
ON biotype.`biomimic_id`=log.`biomimic_id`
INNER JO... | [
"def get_observations(self):\n self.config.validate()\n log.info(\"Fetching observations.\")\n datastore_path = make_path(self.settings[\"observations\"][\"datastore\"])\n if datastore_path.is_file():\n datastore = DataStore().from_file(datastore_path)\n elif datastore_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the where_condition for the Select Query | def build_where_condition(self, query_dict):
analysis_type = query_dict.get("analysis_type")
where = (" WHERE biotype.`biomimic_type`=\'%s\'") % query_dict['biomimic_type']
if query_dict.get('country') is not None:
where += " AND geo.`country`=\'%s\'" % (query_dict['country'])
... | [
"def _where(self):\n result = []\n result.extend(self._partition_selector())\n result.extend(self._job_and_fuzzer_selector())\n\n result = ' AND '.join(result)\n if result:\n return 'WHERE ' + result\n\n return ''",
"def BuildSubQuery(self):\n MultipleValuePairs = ''\n #This i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert new Biomimic Type Data in DB | def insert_biomimic_type_data(self, cursor, biomimic_type):
corrupt = False
query = """ INSERT INTO `cnx_logger_biomimic_type` (`biomimic_type`)
VALUES (%s)"""
try:
res = cursor.execute(query, (biomimic_type,))
except MySQLdb.Error:
res = 0
... | [
"def insert(self, sql):",
"def create_data_type():\n logger.info('Creating Data Types..')\n\n data_codes = ['DAILY', 'INTRADAY']\n data_description = ['Data for a 24 period', 'Data for a 1 minute perioo']\n\n for code, description in zip(data_codes, data_description):\n DataType.objects.update_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert new Properties Data in DB | def insert_properties_data(self, cursor, record):
corrupt = False
values = " VALUES (\'%s\'" % record.get("zone")
if record.get('sub_zone') is None:
values += ", NULL"
else:
values += ", \'%s\'" % record.get("sub_zone")
if record.get('wave_exp') is None:
... | [
"def insert_properties(self, properties: list) -> None:\n self.properties.insert_many(properties)",
"def insert_data(self, data:dict,):\n \n assert(isinstance(data, dict))\n field, value = \"\", \"\"\n for key in data.keys():\n field += key + ', '\n value +... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for Duplicate Microsite Id in DB | def check_for_duplicate(self, cursor, microsite_id):
query = """SELECT `logger_id`
FROM `cnx_logger`
WHERE `microsite_id`=%s"""
cursor.execute(query, (microsite_id,))
results = cursor.fetchall()
results = list(results)
return len(results) > 0 | [
"def flag_duplicate_in_whole_db():\n pubs = Publication.objects.all()\n duplicates = get_duplicate_pubs(pubs)\n for pub in pubs:\n if pub in duplicates:\n with transaction.atomic():\n pub.status = Publication.STATUS_DUPLICATE\n pub.checked_for_duplicates = Fa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse new Logger Temperature Data | def parse_logger_temp(self, data_list):
parsed_record = dict()
error = False
if (len(data_list) != 2) or \
(self.is_not_float(data_list[1])) or \
(data_list[0] == "None") or \
(data_list[0] == ""):
error = True
else:
# handle datet... | [
"def process_temperature():\n \n \"\"\"for mutliple Sensors\"\"\"\n\n for SENSOR in W1ThermSensor.get_available_sensors():\n\tlogging.info(\"Sensor %s has temperature %.2f\" % (SENSOR.id, SENSOR.get_temperature()))\n \tG.labels(\"%s\" % SENSOR.id).set(\"%.2f\" % SENSOR.get_temperature())",
"def read_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts new Logger Temperature in DB | def insert_logger_temp(self, records, logger_id):
# records is a list of dict
cursor = self.connection.cursor()
proper_counter = 0
query = """INSERT IGNORE INTO `cnx_logger_temperature` (`logger_id`, `Time_GMT`, `Temp_C`)
VALUES (%s, %s, %s)"""
values = [(logge... | [
"def log_temperature(temp):\n \n conn = sqlite3.connect(dbname)\n curs = conn.cursor()\n\n curs.execute(\"INSERT INTO temps values(datetime('now', 'localtime'), '{0}', '{1}' )\".format(temp['temperature'], temp['id']))\n\n conn.commit()\n conn.close()",
"def insert():\n\t#get values sent from se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch logger_id according to microsite_id | def find_microsite_id(self, microsite_id):
cursor = self.connection.cursor()
query = """SELECT `logger_id` as 'logger_id' FROM `cnx_logger` WHERE microsite_id=%s"""
cursor.execute(query, (microsite_id,))
results = cursor.fetchone()
cursor.close()
if results is None:
... | [
"def check_for_duplicate(self, cursor, microsite_id):\n query = \"\"\"SELECT `logger_id`\n FROM `cnx_logger`\n WHERE `microsite_id`=%s\"\"\"\n cursor.execute(query, (microsite_id,))\n results = cursor.fetchall()\n results = list(results)\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method updates the summary table with count, min and max dates of the microsite id of the temperature records inserted | def update_summary_table(self, logger_id):
cursor = self.connection.cursor()
select_query = ("""SELECT COUNT(*), MIN(Time_GMT), MAX(Time_GMT)
FROM cnx_logger_temperature WHERE logger_id=%s""")
cursor.execute(select_query, (logger_id,))
select_results = cursor.... | [
"def update_all():\r\n \r\n # Delete everything in summary table\r\n q_string = \"\"\"\r\n\tTRUNCATE summary;\r\n \"\"\"\r\n try:\r\n cursor.execute(q_string)\r\n except:\r\n print(\"ERROR: Could not delete summary table data\")\r\n sys.exit()\r\n print(\"Summary table trun... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the underlying neural network. | def neural_network(self):
return self._neural_network | [
"def inference_network(self):\n return self.DNN",
"def get_network(self) -> nx.Graph:\n return self.graph",
"def train_network(self):\n return self._train_network",
"def build(self) -> NeuralNetworkModel:\n return NeuralNetworkModel(\n self.node_counts, self.activation_f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the warm start flag. | def warm_start(self) -> bool:
return self._warm_start | [
"def warm_mist_enabled(self):\n if self.warm_mist_feature:\n return self.details['warm_mist_enabled']\n return False",
"def start_training(self):\n if self.minibatch_method == 'random' or self.minibatch_method == 'prioritized':\n start = False if len(self.experience_repl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the warm start flag. | def warm_start(self, warm_start: bool) -> None:
self._warm_start = warm_start | [
"def startOnly(self):\n self.begin=time.time()",
"def _starting(self):\n \n self.__state = runlevel.STATE_STARTING",
"def start_new_minibatch(self):\n\n return True",
"def set_train(self):\n BaseModule.train_flag = True",
"def startAt(self, startkey):\n if startkey:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns current initial point | def initial_point(self) -> np.ndarray:
return self._initial_point | [
"def _choose_initial_point(self) -> np.ndarray:\n if self._warm_start and self._fit_result is not None:\n self._initial_point = self._fit_result.x\n elif self._initial_point is None:\n self._initial_point = algorithm_globals.random.random(self._neural_network.num_weights)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the initial point | def initial_point(self, initial_point: np.ndarray) -> None:
self._initial_point = initial_point | [
"def set_zero_point(self):\n self.current_position = 0.0\n self.goal_position = 0.0",
"def setStartingPoint(self, *args) -> \"void\":\n return _coin.SoDragger_setStartingPoint(self, *args)",
"def _choose_initial_point(self) -> np.ndarray:\n if self._warm_start and self._fit_result is not Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns trained weights as a numpy array. The weights can be also queried by calling `model.fit_result.x`, but in this case their representation depends on the optimizer used. | def weights(self) -> np.ndarray:
self._check_fitted()
return np.asarray(self._fit_result.x) | [
"def get_weights(self):\r\n return self.weights # returning the weight matrix\r",
"def model_weights_as_vector(model):\r\n weights_vector = []\r\n\r\n for layer in model.layers: # model.get_weights():\r\n if layer.trainable:\r\n la... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Choose an initial point for the optimizer. If warm start is set and the model is already trained then use a fit result as an initial point. If initial point is passed, then use this value, otherwise pick a random location. | def _choose_initial_point(self) -> np.ndarray:
if self._warm_start and self._fit_result is not None:
self._initial_point = self._fit_result.x
elif self._initial_point is None:
self._initial_point = algorithm_globals.random.random(self._neural_network.num_weights)
return s... | [
"def set_initial_location(self):\n self.changed = True\n self.new_location = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn\n # random initial velocities of swarm\n self.velocities[0, :] = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn",
"def _... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wraps the given `ObjectiveFunction` to add callback calls, if `callback` is not None, along with evaluating the objective value. Returned objective function is passed to `Optimizer.minimize()`. | def _get_objective(
self,
function: ObjectiveFunction,
) -> Callable:
if self._callback is None:
return function.objective
def objective(objective_weights):
objective_value = function.objective(objective_weights)
self._callback(objective_weights, ... | [
"def minimize(self, cost_function, initial_params=None, callback=None):\n\n if self.keep_value_history:\n cost_function = recorder(cost_function)\n\n jacobian = None\n if hasattr(cost_function, \"gradient\") and callable(\n getattr(cost_function, \"gradient\")\n ):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CNN model based on the paper. | def CNN_paper(input_shape):
inputs = Input(shape=input_shape)
x = Conv2D(96, kernel_size=(3,3), padding='same', strides=1, kernel_initializer='he_normal')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(96, kernel_size=(3,3), padding='same', strides=1, kernel_initiali... | [
"def construct_model():\n #Building the MLP\n \n model = Sequential()\n model.add(Flatten(input_shape=(64,64,3)))\n model.add(Dense(1000, activation=\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(512, activation=\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(3, activation=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create new TXs at rate lambda and do PoW | def issue_txs(self, Time):
if MODE[self.NodeID]>0:
if MODE[self.NodeID]==2:
if self.BackOff:
self.LastIssueTime += TAU#BETA*REP[self.NodeID]/self.Lambda
while Time+STEP >= self.LastIssueTime + self.LastIssueWork/self.Lambda:
... | [
"def block(max_number_of_txns, exp_time):\n blk = {'transactions':[transaction(randrange(2, max_txt_length)) for i in range(randrange(1, max_number_of_txns))], 'time':exp_time}\n return blk",
"def handle(self, *args, **options):\n number_accounts_per_node = 150\n\n nodes_list = get_nodes()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Implements uniform random tip selection | def select_tips(self):
ValidTips = [tip for tip in self.TipsSet if self.ValidTips[tip.NodeID]]
if len(ValidTips)>1:
Selection = sample(ValidTips, 2)
elif len(self.Ledger)<2:
Selection = [self.Ledger[0]]
else:
Selection = self.Ledger[-2:-1]
... | [
"def random_subset(self, perc=0.5):",
"def sample_random_node(self):\n #Naive Approach \n return self.tree[int(self.rng.random()*len(self.tree))] # OUT OF BOUNDS ERRORS? Check this",
"def rand_temp():\n return BASE_T + random() * RAND_MULT",
"def random_states(T, n, cutoff=25, qutip=True):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
schedule txs from inbox at a fixed deterministic rate NU | def schedule_txs(self, Time):
# sort inboxes by arrival time
self.Inbox.AllPackets.sort(key=lambda p: p.EndTime)
self.Inbox.SolidPackets.sort(key=lambda p: p.EndTime)
for NodeID in range(NUM_NODES):
self.Inbox.Packets[NodeID].sort(key=lambda p: p.Data.IssueTime)
... | [
"def schedule_safety_net_messages():\n\tsnc = SafetyNetCenter()\n\tnow = datetime.datetime.now()\n\tsnc.schedule_safety_net_messages(\n\t\twindow_start=now - snc.window, \n\t\twindow_finish=now, \n\t\tthreshold=snc.threshold, \n\t\ttimeout=snc.timeout)",
"def tick_scheduler():\n\n scheduler.tick()",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the robot's inverse kinematic (IK) for a given frame. The IK for 6axis industrial robots returns by default 8 possible solutions. These solutions have an order. That means that if you call IK on two subsequent frames and compare the 8 configurations of the first frame with the 8 configurations of the second f... | def inverse_kinematics(self, robot, frame_WCF, start_configuration=None, group=None, options=None):
options = options or {}
solver = options.get("solver")
if solver:
self.planner = PLANNER_BACKENDS[solver]()
elif not self.planner: # no solver, no planner
raise V... | [
"def inverse_kinematics_offset_wrist(frame, params, q6_des=0.0):\n\n ZERO_THRESH = 0.00000001\n d1, a2, a3, d4, d5, d6 = params\n\n solutions = []\n\n T02, T12, T22 = frame.xaxis\n T00, T10, T20 = frame.yaxis\n T01, T11, T21 = frame.zaxis\n T03, T13, T23 = frame.point\n T02 *= -1\n T03 *=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call kubernetes api container to retrieve the deployment id | def get_deployment_id():
try:
config.load_incluster_config()
nodes = client.CoreV1Api().list_node(watch=False)
if len(nodes.items) > 0:
return nodes.items[0].metadata.labels.get("hyperpilot/deployment", "")
except config.ConfigException:
print("Failed to load configur... | [
"def test_get_deployment_by_id(self):\n response = self.client.open(\n '/RadonCTT/deployment/{deploymentId}'.format(deployment_id=789),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def get_id(dic):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call kubernetes api container to retrieve the deployment id | def deployment_id(self):
try:
config.load_incluster_config()
nodes = client.CoreV1Api().list_node(watch=False)
if len(nodes.items) > 0:
return nodes.items[0].metadata.labels.get("hyperpilot/deployment", "")
except config.ConfigException:
pr... | [
"def get_deployment_id():\n try:\n config.load_incluster_config()\n nodes = client.CoreV1Api().list_node(watch=False)\n if len(nodes.items) > 0:\n return nodes.items[0].metadata.labels.get(\"hyperpilot/deployment\", \"\")\n except config.ConfigException:\n print(\"Failed... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a deck of 108 Uno cards. Return a list of UnoCard namedtuples (for cards w/o suit use None in the namedtuple) | def create_uno_deck():
cards = [UnoCard(suit, face) for suit, face in product(SUITS, [0])]
two_of = chain(range(1, 10), 'Draw Two,Skip,Reverse'.split(','))
cards.extend(UnoCard(suit, face) for suit, face in product(SUITS, two_of) for _ in range(2))
four_of = 'Wild,Wild Draw Four'.split(',')
car... | [
"def create_deck():\n card_deck = []\n for x in range(6):\n for suit in ('H', 'S', 'C', 'D'):\n for rank in range(2, 11):\n card_deck.append((str(rank) + str(suit)))\n for face_cards in ('A', 'J', 'Q', 'K'):\n card_deck.append((str(face_cards) + str(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the squared HS distance between the autocovariance operators of two time series || \\scov^{(y)}_{\\tau} \\scov^{(x)}_{\\tau} ||_{HS}^2 = 1/T2 ( Tr(K_1 x K_1^\\tau) + Tr(K_2 x K_2^\\tau) 2 Tr(K_{1,2} x K_{2,1}^\\tau ) ) | def distance_hsac_truncated(K, T1, T2, tau=1):
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the truncated matrices of the non-shifted series
K1 = K[:T1 - tau, :T1 - tau]
K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]
K12 = K[:T1 - tau, T1:T1 + T2 - tau]
# define the truncated matric... | [
"def pseudochi2(ssys,sK):\n self.ssys = ssys\n self.sK = sK\n \n # Compute CLs value at the signal value that is supposed to be the observed and expected and limit\n # Should be as close to 0.05 \n expCL = self.getCL_direct(s=self.explim,method='simulate',N=100000,nobs=s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the components 1/T2 (tr1, tr2, tr12) of HSAC mode {"truncated"/"cyclic"} defines way to compute HSAC | def distance_hsac_decomp(K, T1, T2, tau=1, mode="truncated"):
assert mode in ["truncated", "cyclic"], "Unknown HSAC mode (%s)" % mode
assert T1 == T2, "the series should be of same duration"
assert tau <= T1 / 2.0, "Too big tau"
T = T1
if mode == "truncated":
# define the truncated matrices ... | [
"def distance_hsac_truncated(K, T1, T2, tau=1):\n assert tau <= min(T1 / 2.0, T2 / 2.0), \"Too big tau\"\n # define the truncated matrices of the non-shifted series\n K1 = K[:T1 - tau, :T1 - tau]\n K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]\n K12 = K[:T1 - tau, T1:T1 + T2 - tau]\n # define the tru... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Procesa una columna de la serie, calculando los valores de todas las transformaciones posibles para todos los intervalos de tiempo. Devuelve la lista de acciones (dicts) a indexar en Elasticsearch | def process_column(col, index):
# Filtro de valores nulos iniciales/finales
col = col[col.first_valid_index():col.last_valid_index()]
orig_freq = col.index.freq
series_id = col.name
actions = []
# Lista de intervalos temporales de pandas EN ORDEN
freqs = constants.PANDAS_FREQS
if orig... | [
"def map_func(row):\n return (row[\"device_id\"], row[\"indexed_ts\"]), row",
"def _extract(self, ix, column):\n _df = (\n self.df[ix][self.years + [column]]\n .set_index(column)\n .transpose()\n .applymap(convert)\n )\n _df.columns.name = \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sum and product of two numbers | def sumproduct(x, y):
return (x+y, x*y) | [
"def product(num1, num2):\n\treturn num1 * num2",
"def calculate_product(val1, val2):\n return val1 * val2",
"def prod(numbers: Sequence[number_t]) -> number_t:\n return reduce(_operator.mul, numbers)",
"def product(numbers):\n return reduce(mul, numbers, 1)",
"def scalar_prod(a,b):\n res=0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function takes in the 3D matrix of the satellite data, as well as the minimum values of the latitude and longitude, the spacing and shape of common grid. For each band it creates an empty array which has the size od the common grid. It creates indices for lat and lon from the original satellite image. Then it popu... | def regridding(image_data, min_lon, min_lat, spacing, shape_common_grid):
# create an empty m x n array for each channel
band_data = np.zeros((shape_common_grid)) ####define for each band
band_data = band_data[0,:,:]
band1_data = copy.copy(band_data) #band_data[0,:,:]
band2_data = copy.copy(band_data) #band_d... | [
"def interpolate(m: np.ndarray, n: np.ndarray, sgrid: np.ndarray, points_on_sphere: np.ndarray, radius: np.ndarray):\n #print(\"Interpolate\")\n\n\n # =========================\n center_grid = np.zeros((m.shape[0],3))\n east_grid = np.zeros((m.shape[0],3))\n south_grid = np.zeros((m.shape[0],3))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a table name for a content type (remove periods). | def _table_name_for(content_type):
return content_type.replace('.', '') | [
"def __generate_table_name__(self, table, username, end_point_type):\n # If private endpoint\n if end_point_type == 'private':\n if username != \"\":\n return table + \"_\" + username\n else:\n return table\n else:\n return table",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Audit logs tend to have periods (.) in their column names. Take those out. If a log column has the same name as an existing column in the database, but the capitalization doesn't match, rename the column to the existing one. Otherwise SQL will throw an error for duplicate column names. | def _validate_column_names(df):
to_rename = {}
for column in df:
if '.' in column:
to_rename[column] = column.replace('.', '')
return df.rename(columns=to_rename) | [
"def normalize_col_name(self, col_name, used_column_names, is_relation):\n field_params = {}\n field_notes = []\n\n new_name = clean_utf8(col_name)\n new_name = col_name.lower()\n if new_name != col_name:\n field_notes.append('Field name made lowercase.')\n\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Not all audit logs have all available columns. There columns in the database might change as logs come in. Check whether all columns in a log already exist in the current table. | def _validate_existing_columns(self, df, content_type, engine):
if inspect(engine).has_table(self._table_name_for(content_type=content_type)):
new_cols = df.columns.tolist()
missing_cols = set(new_cols) - set(self._existing_columns_for(content_type, engine=engine))
return not... | [
"def checkColumns(self, row, columns, log):\n rescols = set(row.keys())\n cols = set(columns.values())\n if not rescols >= cols:\n log.error(\n \"result missing columns: '%s'\",\n \",\".join(cols.difference(rescols)),\n )\n return F... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Different logs sometimes have identical columns names but with different capitalization (for some reason); merge these columns. | def _deduplicate_columns(df):
to_check = df.columns.tolist()
leading_columns = []
to_merge = collections.defaultdict(collections.deque)
for column in to_check:
for leading_column in leading_columns:
if column.lower() == leading_column.lower() and column != lea... | [
"def _clean_column_names(self):\n self.logger.info(\"Set up column name cleaning.\")\n self.pipeline.steps.append(\n (\"clean_column_names\", TransformerWrapper(CleanColumnNames()))\n )",
"def _joined_names_column(df):\n return df.apply(\n lambda row: ','.join(set([\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write cached logs to database for a content type. | def _process_cache(self, content_type):
df = pandas.DataFrame(self.results_cache[content_type])
df = self._validate_column_names(df=df)
df = self._validate_column_value(df=df)
table_name = self._table_name_for(content_type=content_type)
engine = create_engine(self.connection_str... | [
"def insert_to_cache(params,content):\n\tdir_name = get_dir_name(params)\n\tf = open(cache_folder+dir_name+'/data.csv',\"w\")\n\tf.write(content)\n\tf.close()",
"def _writeTmpCacheToCache(self, tmpCache, type_):\n cursor = self._conn.cursor()\n for index in tmpCache:\n data = tmpCache[ind... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the trained naive bayes model (self.prior and self.likelihood) on testing dataset, by performing maximum a posteriori (MAP) classification. The accuracy is computed as the average of correctness by comparing between predicted label and true label. | def test(self, test_set, test_label):
# YOUR CODE HERE
accuracy = 0
pred_label = np.zeros((len(test_set)))
probs = np.zeros((len(test_set)))
# predict every sample X by likelihood
for X_idx, X in tqdm(enumerate(test_set), total=len(pred_label), desc='BAYES MODEL TEST'):
... | [
"def test(self, file_dir=\"training_data\"):\n print(\"loading testing data\")\n test_data = MNIST(file_dir)\n img, lbl = test_data.load_testing()\n\n correct = 0\n for i in range(0, len(img)):\n self.classify(img[i])\n b = np.where(self.activations[-1] == ma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the feature likelihoods for high intensity pixels for each of the classes, by sum the probabilities of the top 128 intensities at each pixel location, | def intensity_feature_likelihoods(self, likelihood):
# YOUR CODE HERE
feature_likelihoods = np.sum(likelihood[:, 128:256, :], axis=1)
return feature_likelihoods | [
"def top2gating(logits: torch.Tensor, capacity_factor: float) ->Tuple[Tensor, Tensor, Tensor, Tensor]:\n gates = F.softmax(logits, dim=1)\n num_tokens = gates.shape[0]\n num_experts = gates.shape[1]\n capacity = math.ceil(2 * num_tokens / num_experts * capacity_factor)\n indices1_s = torch.argmax(gat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate both grant_type is a valid string and grant_type is allowed for current workflow | def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
assert(grant_type in GRANT_TYPE_MAPPING) # mapping misconfiguration
return request.client.authorization_grant_type in GRANT_TYPE_MAPPING[grant_type] | [
"def validate_grant_type(self, client_id, grant_type, client, request,\r\n *args, **kwargs):\r\n if self._usergetter is None and grant_type == 'password':\r\n log.debug('Password credential authorization is disabled.')\r\n return False\r\n\r\n default_g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save access and refresh token, If refresh token is issued, remove old | def save_bearer_token(self, token, request, *args, **kwargs):
if request.refresh_token:
# remove used refresh token
try:
RefreshToken.objects.get(token=request.refresh_token).revoke()
except RefreshToken.DoesNotExist:
assert() # TODO though be... | [
"def refresh_access_token():\n client = Client(sm.access_token)\n auth_dict = client.refresh_access_token(\n client_id=sm.client_id,\n client_secret=sm.client_secret,\n refresh_token=sm.refresh_token)\n logger.debug('Auth Dict: %s', auth_dict)\n\n # Save the dict back to Secret Mana... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute dEdX based on the cluster size Results expressed in MeV.cm2/g | def GetdEdX(self,clusterSize):
return self.GetEloss(clusterSize)/(self.pitch*100/self.rho) | [
"def occupancyEMD(swcList, voxelSize):\n\n occupancyDistributionDict = calcOccupancyDistribution(swcList, voxelSize)\n bins = np.arange(1, len(swcList) + 1)\n occupancyDistribution = [occupancyDistributionDict[x] for x in bins]\n perfectOverlapDist = np.zeros(bins.shape)\n perfectOverlapDist[-1] = 1\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns patient_x and patient_Y for patient_id | def get_features_for_patient(patient_id):
patient_id = str(patient_id)
patient_ema_features, patient_engagement = get_EMA_features_and_target_for_patient(
patient_id)
patient_module_features = get_module_features_for_patient(
patient_id).transpose().fillna(0)
patient_features = patient_e... | [
"def get_trajectory():\n return patient_id_trajectory",
"def _GetElementCoords(ID,array):\n coords = np.where(array==ID)\n return coords[0][0],coords[1][0]",
"def get_patient_by_id(self, index):\n return self.patient_dict[index]",
"def extract_xy_values(student_ids, var_names, variable... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves the overall featureselection columns for all patients. | def get_FS_cols(list_patients_objects, max_features=10, technique='correlation'):
if technique == 'correlation':
features = get_patients_correlated_score(
list_patients_objects).index.to_series().tolist()
if technique == 'top_features':
features = get_top_features(list_patients_obj... | [
"def get_feature_columns(self):\n return self.feature_columns",
"def get_df_with_all_features(self):\n return self.df_with_all_extracted_features",
"def getAllColumns (self):\n\n return self.columns",
"def create_all_features(self):\n self.create_boatsize_sex_weight_feature()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an object with all the performances of all MLmodels. | def extract_performances_from_models(ml_models, performance_metric='mae'):
results = []
for model in ml_models:
results.append({
'model_name': model['name'],
'model_'+performance_metric: model['score'][performance_metric]
})
return results | [
"def evaluate_models(num_splits=10):\n models = {\"Decision Tree\": tree.DecisionTreeClassifier(),\n \"Nearest Neighbor\": neighbors.KNeighborsClassifier(),\n \"Random Forest\": ensemble.RandomForestClassifier(),\n \"Linear SVM\": svm.SVC(kernel=\"linear\"), # the linear k... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an average of all MAE scores for all patients. | def get_patients_mean_MAE_score(list_patients_objects):
return pd.DataFrame(list_patients_objects).mean()['MAE'] | [
"def _normal_scores_average(self):\n average_scores = npi.group_by(self.normal_score_matrix[:, 0], self.normal_score_matrix[:, 3], np.mean)\n\n return average_scores",
"def average(self):\n # Sum each data item across all Exons in the list\n summed_data = None\n for exon in self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stores a patient object (containing the patient_id) to a npy format. | def save_patient_object(patient_object, prefix='', path_to_features=FEATURE_PATH):
np.save(path_to_features + str(prefix) + str(patient_object['patient_id'])+'_'+'top_features.npy', patient_object) | [
"def save_npy(object, file_name):\n\twith open(file_name, \"wb\") as fw:\n\t\tnp.save(fw, object)",
"def save_npy(self, filename):\n np.save(filename, self.data)",
"def save_object_npy(alfpath, dico, object):\n alfpath = Path(alfpath)\n status = check_dimensions(dico)\n if status != 0:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper to turn a string into a list of not empty lines and returns it. | def _non_empty_lines(output):
return [line for line in output.splitlines() if line.strip()] | [
"def split_and_strip_non_empty_lines(text):\r\n return [line.strip() for line in text.splitlines() if line.strip()]",
"def remove_empty_lines(strIn):\n return os.linesep.join([s for s in strIn.splitlines() if s.strip()])",
"def no_emptylines(array):\n new_array = []\n for a in array:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms EE image in numpy Array based on the aoi region | def get_array_from_image(self, image):
return geemap.ee_to_numpy(image, region=self.aoi) | [
"def fancyConvert(image):",
"def FI(image):\n a = iulib.floatarray()\n iulib.narray_of_numpy(a,transpose(image[::-1,...]))\n return a",
"def transform_image(self, inputImage: np.ndarray, imageColor: str) -> np.ndarray:\n pass",
"def register_component_images(fixed_array, moving_array, componen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save image of column with file name f_name, you can precise title, FOR EXAMPLE title= 'NDVI index on Nice' get_data.output_images(df['NDVI'],'NDVI_Nice',title=title) or title= 'Normalized temperature in Nice' get_data.output_images(df['Norm_Temp'],'Temp_Nice',title=title) | def output_images(self, col, f_name, cmap='RdYlGn', title='', band=10):
img = np.array(col)
img = img.reshape((self.shapes[band][0], self.shapes[band][1]))
plt.figure(figsize=(15, 10))
if title != '':
plt.title(title, fontsize=30)
plt.imshow(img, cmap=cmap)
... | [
"def get_image_filepath(data_dir, row):\n return os.path.join(data_dir, f\"{row.Species}___{row.Label}\", row.Filename)",
"def image_name(self, image_id: int):\n image_id_expanded = \"0\" * (12 - len(str(image_id))) + str(image_id)\n if self.mode == \"train\":\n return \"COCO_train2014... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Displays folium map of Temp (Celsius) and NDVI with scales | def display_folium_map(self, min_temp=20, max_temp=40, minNDVI=-1, maxNDVI=1):
linearndvi = cmp.LinearColormap(
['#d73027', '#fc8d59', '#fee08b', '#d9ef8b', '#91cf60', '#1a9850'],
vmin=minNDVI,
vmax=maxNDVI,
caption='NDVI - Vegetation index' #Caption for Color sc... | [
"def display_basemap():\n world = gp.read_file(gp.datasets.get_path('naturalearth_lowres'))\n world.plot()",
"def plot_maps(df):\r\n\r\n df = pd.read_csv('Results/final_df.csv', index_col=0)\r\n\r\n fig = go.Figure(data=go.Scattergeo(\r\n lat=df.loc[:, 'Latitude'],\r\n lon=df.loc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
REMOVES SEA PIXELS OF A DATAFRAME based on the sea_pixel_table output of sea_pixel_of_Nice_ref_image class method | def remove_sea(self, working_df):
if type(self.sea_pixels) == "<class 'pandas.core.frame.DataFrame'>":
print(
'No criteria for exclusion of the sea, plz provide a sea_pixels df when instanciating class Smartrees'
)
return working_df
output_df = workin... | [
"def seam_removal_mask(self, remove_pix, mask):\n m, n = mask.shape\n output = np.zeros((m, n - 1))\n for row in range(m):\n col = remove_pix[row]\n output[row, :] = np.delete(mask[row, :], [col])\n mask = np.copy(output)\n return mask",
"def remove_seam(im... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save the learned policy to disk. Since we only learned the last step therefore we save the value of the last step along with the index of the homing policy for previous step. | def _save_policy(learned_policy, policy_folder_name, horizon, policy_index):
if not os.path.exists(policy_folder_name):
os.makedirs(policy_folder_name)
learned_policy[horizon].save(folder_name=policy_folder_name, model_name="step_%d" % horizon)
with open(policy_folder_name + "prev_... | [
"def save_model(self):\n saver = PolicySaver(self.agent.policy)\n saver.save(self.model_dir)",
"def save_policy(self, save_dir) -> None:\n pickle.dump(self, open(f\"{save_dir}policy.p\", \"wb\"))",
"def _save_snapshot(self) -> None:\n for brain_name in self.current_policy_snapshot:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the policy from the disk | def read_policy(self, policy_folder_name, horizon, previous_step_homing_policy, delete=False):
homing_policy = dict()
with open(policy_folder_name + "prev_policy_index", 'rb') as fobj:
policy_index = pickle.load(fobj)
for j in range(1, horizon):
homing_policy[j] = previ... | [
"def read_policy_data(policy_id):\n if not os.path.exists(\"data/policy{}.txt\".format(policy_id)):\n pull_policy_data.write_data(policy_id)\n with open(\"data/policy{}.txt\".format(policy_id), \"rb\") as f:\n raw_policy_data = f.read()\n return json.loads(raw_policy_data.decode(\"utf-8\"))",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
That method handle post_save signal from Model3d and check if a reward must be granted to the owner of the Model3d. | def check_if_user_earned_collector_reward(sender, instance, created, **kwargs):
logger.debug("check_if_user_earned_collector_reward raised")
if created:
user = instance.user
sketchfab_services.check_collector_reward(user) | [
"def check_if_user_earned_star_reward(sender, instance, **kwargs):\n\n logger.debug(\"check_if_user_earned_star_reward raised\")\n\n hits = instance.hits\n content_object = instance.content_object\n if type(content_object) is Model3d:\n sketchfab_services.check_star_reward(content_object, hits)",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
That method handle post_save signal from HitCount and check if a reward must be granted to the owner of the Model3d. | def check_if_user_earned_star_reward(sender, instance, **kwargs):
logger.debug("check_if_user_earned_star_reward raised")
hits = instance.hits
content_object = instance.content_object
if type(content_object) is Model3d:
sketchfab_services.check_star_reward(content_object, hits) | [
"def check_if_user_earned_collector_reward(sender, instance, created, **kwargs):\n\n logger.debug(\"check_if_user_earned_collector_reward raised\")\n if created:\n user = instance.user\n sketchfab_services.check_collector_reward(user)",
"def observe(self, pre_observation, action, reward, post_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the distance model. | def set_distance_model(
self, model: DistanceModelStr | QtSpatialAudio.QSpatialSound.DistanceModel
):
self.setDistanceModel(DISTANCE_MODEL.get_enum_value(model)) | [
"def setDistance(self, distance) -> None:\n ...",
"def set_arbitrary_distance(self):\n self.dist = np.min(self.mx-self.mn)",
"def set_distorion(self, distortion):\n self.dist = distortion",
"def set_velocity_dist(self, velocity_dist):\n self.erase_lookup_tables()\n if self.i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return current distance model. | def get_distance_model(self) -> DistanceModelStr:
return DISTANCE_MODEL.inverse[self.distanceModel()] | [
"def _get_distance(self):\n\n # implement here",
"def get_distance():\n return sensor.distance_cm()",
"def get_distance_vector(self):\r\n return self.routingTable[self.sourceRouter]",
"def get_distance(self):\n\n\t\tif self.tour_completed:\n\t\t\treturn self.distance_travelled\n\t\treturn Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This class __init__ method will return a list with all disconnected clusters of a city. Each cluster is a list of the street ways connected themselves. !!! ATENTION !!! It considers the city streets network as a UNDIRECTED GRAPH!!! | def __init__(self, city):
self.clusters = []
# We get all city street network nodes:
allNodes = city.getStreetNodes()
# When a node is added to a cluster, we eliminate it from the allNodes list.
# So we go adding nodes to a cluster while allNodes list is not empty.
while ... | [
"def __init__(self):\n\n self.clusters = [ ]",
"def cluster_conn(self):\n assert self.clusters\n cluster_conn = []\n for i, cluster_atoms in enumerate(self.clusters):\n this_cluster_conn = []\n ext_bond = []\n for ia in cluster_atoms:\n v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method returns the clusters found | def getClusters(self):
return self.clusters | [
"def get_cluster_list(self):\n LOG.info(\"Getting clusters\")\n return self.client.request(constants.GET,\n constants.GET_CLUSTER.format\n (self.server_ip), payload=None,\n querystring=constants.\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generate xpaths based on a tag_name and attributes | def generate_xpath(self, tag_name, attributes):
xpaths_plural = "<ul>"
for key in attributes.keys():
xpaths_plural += "<li>.//{}[contains(@{}, '{}')]</li>".format(
tag_name, key, attributes[key])
xpaths_plural += "</ul>"
return xpaths_plural | [
"def xpath(self, *args, **kw):\n nodes = super(_Tag, self).xpath(*args, **kw)\n\n renderer = self.renderer\n for node in nodes:\n node._renderer = renderer\n\n return nodes",
"def xpath_iter(xpath):\n for separator, token in re.compile('(|/|\\.\\.)/([^/]+)').findall(xpath... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
capture image of the element that is pointed to. | def capture_element(self, element, name):
LOG.info(f'Performing element image capture {element}')
location = element.location
size = element.size
img = self.driver.get_screenshot_as_png()
img = Image.open(BytesIO(img))
left = location['x']
top = location['y']
... | [
"def capture(self):\n # insert the canvas\n self.fitsimage.add(self.canvas, tag='mycanvas')",
"def capture(self):\n self.camera = self.ids['camera']\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n self.camera.export_to_png(\"IMG_{}.png\".format(timestr))\n print(\"Captured... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs NEAT's genetic algorithm for at most n generations. If n is None, run until solution is found or extinction occurs. | def run(self, fitness_function, n=None):
if self.config.no_fitness_termination and (n is None):
raise RuntimeError("Cannot have no generational limit with no fitness termination")
k = 0
while n is None or k < n:
k += 1
self.reporters.start_generation(self.g... | [
"def evolve(self, n):\n assert n > 0, \"Cannot evolve 0 or less generations!\"\n\n # Prepare the genomes\n self.reset()\n \n # Whether the performance details should be printed\n verbose = False\n\n for i in range(n):\n\n # Print information every 50th tim... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Actions for returning all the s3 objects in buckets | def _get_s3_objects(self):
try:
s3_actions = S3Actions()
object_details_list = s3_actions.list_objects_in_buckets(self.bucket_name)
if not object_details_list:
return 'Objects not found',404
else:
return object_details_list,2... | [
"def list():\n return [b.name for b in s3.buckets.all()]",
"def bucket_listing(bucket):\n response = s3.list_objects(Bucket=bucket)\n\n file_listing = []\n for file_data in response[\"Contents\"]:\n data = {\"filename\": file_data[\"Key\"], \"size\": file_data[\"Size\"]}\n file_listing.a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert GC bert checkpoint to Google original checkpoint 1. combine `word_embeddings` if split 2. rename scope `bert/encoder/layer_x/attention/projection/` to `bert/encoder/layer_x/attention/output/` 3. add back attention_projection_bias. 4. split `qkv_weight` to query,key,value, and add relative bias. 5. rename `Group... | def convert_gc_ckpt_to_google(ckpt_file,
output_dir=None,
include_qkv_bias=False,
dtype=tf.float32):
graph = tf.Graph()
dir_name, ckpt_name = os.path.split(os.path.abspath(ckpt_file))
if not output_dir:
output_... | [
"def _get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test simple remote submission with one pilot. | def test__remote_simple_submission(self):
session = rp.Session()
c = rp.Context('ssh')
c.user_id = self.test_ssh_uid
c.user_key = self.test_ssh_key
session.add_context(c)
pm = rp.PilotManager(session=session)
cpd = rp.ComputePilotDescription()
cpd.reso... | [
"def test__remote_pilot_wait(self):\n session = rp.Session()\n c = rp.Context('ssh')\n c.user_id = self.test_ssh_uid\n c.user_key = self.test_ssh_key\n\n session.add_context(c)\n\n pm = rp.PilotManager(session=session)\n\n cpd = rp.ComputePilotDescription()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if we can wait for different pilot states. | def test__remote_pilot_wait(self):
session = rp.Session()
c = rp.Context('ssh')
c.user_id = self.test_ssh_uid
c.user_key = self.test_ssh_key
session.add_context(c)
pm = rp.PilotManager(session=session)
cpd = rp.ComputePilotDescription()
cpd.resource ... | [
"def wait(self, state=None, timeout=None):\n\n if not state : states = rps.FINAL\n elif not isinstance(state, list): states = [state]\n else : states = state\n\n\n if self.state in rps.FINAL:\n # we will never see another state pro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clean the member and request index to match database state. Use this function when your tests depends on having a clean index state. This is an "expensive" fixture to run, thus only use it if your tests really doesn't work without. | def clean_index(member_service, requests_service, db):
list(
current_search.delete(
index_list=[
Request.index._name,
Member.index._name,
ArchivedInvitation.index._name,
]
)
)
list(
current_search.create(
... | [
"def setUp(self):\n self.indices_client = IndicesClient(client=self.es)\n self.indices_client.delete(index='_all')",
"def clean(self):\n # Load elastic search class\n es_instance = es.ESIntegration()\n\n # Remove indice\n es_instance.indice = 'lbdf'\n es_instance.i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |