query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Update satellite state obtained from orbit propagation. This method should be called before each attitude integration step! It updates internal variables needed for disturbance torque computation. | def update_satellite_state(self, integration_date):
self.in_date = integration_date
self.spacecraft_state = self.state_observer.spacecraftState
self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition()
self.satVel_i = self.spacecraft_state.getPVCoordinates().getVelocity() | [
"def updateOrbit(self):\n orbits.orbitParams(self)\n self.nu += self.nudot * self.dt",
"def update_satellite_state(self, current_date):\n self.in_date = current_date\n self.spacecraft_state = self.state_observer.spacecraftState\n\n self.satPos_i = self.spacecraft_state.getPVCoor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute disturbance torques acting on satellite. This method computes the disturbance torques, which are set to active in satellite's setting file. | def compute_torques(self, rotation, omega, dt):
# shift time from integration start to time of attitude integration step
curr_date = self.in_date.shiftedBy(dt)
self.inertial2Sat = rotation
self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)
self.satPos_s = np.array([self.sa... | [
"def compute_torques(self, rotation, omega, dt):\n # shift time @ which attitude integration currently is\n try:\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n omega = Ve... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute gravity gradient torque if gravity model provided. This method computes the Newtonian attraction and the perturbing part of the gravity gradient for every cuboid defined in dictionary inCub at time curr_date (= time of current satellite position). The gravity torque is computed in the inertial frame in which th... | def _compute_gravity_torque(self, curr_date):
if self._to_add[0]:
# return gravity gradient torque in satellite frame
body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)
body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())
... | [
"def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n sat2body = body2sat.revert()\n\n satM = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute magnetic torque if magnetic model provided. This method converts the satellite's position into Longitude, Latitude, Altitude representation to determine the geo. magnetic field at that position and then computes based on those values the magnetic torque. | def _compute_magnetic_torque(self, curr_date):
if self._to_add[1]:
gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)
topoframe = TopocentricFrame(self.earth, gP, 'ENU')
topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)
lat = gP.... | [
"def get_motor_load_torque(self):\n # Start with the brake normal\n # change to 17deg (tan 17?)\n # change to torque using the pitch of the thread on the ball screw\n # (^ make sure to take friction into account)\n # That should give us the torque acting on the motor. If this torq... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes dipole Model. This method uses the simplified dipole model implemented in DipoleModel.py Which needs to initialize the induced Magnetic density in the hysteresis rods. It also adds the hysteresis rods and bar magnets specified in the settings file to the satellite using the DipoleModel class. | def _initialize_dipole_model(self, model):
for key, hyst in model['Hysteresis'].items():
direction = np.array([float(x) for x in hyst['dir'].split(" ")])
self.dipoleM.addHysteresis(direction, hyst['vol'], hyst['Hc'], hyst['Bs'], hyst['Br'])
# initialize values for Hysteresis (ne... | [
"def initialize(self, grid=None, input_file=None, intensity=None, stormduration=None):\n self.grid = grid \n if self.grid==None:\n self.grid = create_and_initialize_grid(input_file) ##<- this is the same input file used for parameters. \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update satellite state obtained from orbit propagation. This method should be called before each attitude integration step! It updates internal variables needed for disturbance torque computation. | def update_satellite_state(self, current_date):
self.in_date = current_date
self.spacecraft_state = self.state_observer.spacecraftState
self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition()
self.satVel_i = self.spacecraft_state.getPVCoordinates().getVelocity() | [
"def update_satellite_state(self, integration_date):\n self.in_date = integration_date\n self.spacecraft_state = self.state_observer.spacecraftState\n\n self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition()\n self.satVel_i = self.spacecraft_state.getPVCoordinates().getVe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute disturbance torques acting on satellite. This method computes the disturbance torques, which are set to active in satellite's setting file. | def compute_torques(self, rotation, omega, dt):
# shift time @ which attitude integration currently is
try:
curr_date = self.in_date.shiftedBy(dt)
self.inertial2Sat = rotation
self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)
omega = Vector3D(float... | [
"def torque(self):\n pass",
"def compute_torques(self, rotation, omega, dt):\n # shift time from integration start to time of attitude integration step\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satP... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute gravity gradient torque if gravity model provided. This method computes the Newtonian attraction and the perturbing part of the gravity gradient for every cuboid defined in dictionary inCub at time curr_date (= time of current satellite position). The gravity torque is computed in the inertial frame in which th... | def _compute_gravity_torque(self, curr_date):
if self._to_add[0]:
body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)
body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())
sat2body = body2sat.revert()
satM = self.state_o... | [
"def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute magnetic torque if magnetic model provided. This method converts the satellite's position into Longitude, Latitude, Altitude representation to determine the geo. magnetic field at that position and then computes based on those values the magnetic torque. | def _compute_magnetic_torque(self, curr_date):
if self._to_add[1]:
gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)
topoframe = TopocentricFrame(self.earth, gP, 'ENU')
topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)
lat = gP.... | [
"def get_motor_load_torque(self):\n # Start with the brake normal\n # change to 17deg (tan 17?)\n # change to torque using the pitch of the thread on the ball screw\n # (^ make sure to take friction into account)\n # That should give us the torque acting on the motor. If this torq... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute torque acting on satellite due to solar radiation pressure. This method uses the getLightingRatio() method defined in Orekit and copies parts of the acceleration() method of the SolarRadiationPressure and radiationPressureAcceleration() of the BoxAndSolarArraySpacecraft class to to calculate the solar radiation... | def _compute_solar_torque(self, curr_date):
if self._to_add[2]:
inertial2Sat = self.spacecraft_state.getAttitude().getRotation()
ratio = self.SolarModel.getLightingRatio(self.satPos_i,
self.in_frame,
... | [
"def torque(self):\n T = np.zeros([self.Nparticles, 3])\n\n for i in range(self.Nparticles):\n p = np.dot(self.alpha_t[i],self.sol[i])\n a0_inverse = np.linalg.inv(self.alpha_0_t[i])\n T[i] = 0.5*np.real(\n np.cross(np.conj(p), np.dot(a0_inverse, p)))\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wait for task to complete async 'accepted' task Notes Certain operations use an async task pattern, where a 202 response on the initial POST is returned along with a self link to query. The operation is complete when one of the following is true (depends on the REST API) The self link returns 200 (as opposed to 202) | def _wait_for_task(self, task_url):
response, status_code = self._client.make_request(
http_utils.parse_url(task_url)['path'],
advanced_return=True
)
# check for async task pattern success/failure
if status_code != constants.HTTP_STATUS_CODE['OK']:
r... | [
"def _wait_for_task(self, task_url):\n\n response, status_code = self._client.make_request(\n requests.utils.urlparse(task_url).path,\n advanced_return=True\n )\n\n if status_code != constants.HTTP_STATUS_CODE['OK']:\n raise Exception('Wait for async task timed ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test migration of field group references in field definitions | def test_field_fieldgroup_references(self):
# Create field group
self.test_group = RecordGroup_migration.create(
self.testcoll, test_group_id, test_group_create_values
)
# Create field definition referencing field group
self.test_field = RecordField.create(
... | [
"def test_update_group_custom_field(self):\n pass",
"def test_update_custom_field_definition(self):\n pass",
"def test_groups_group_ref_put(self):\n pass",
"def test_defining_only_or_defer_on_nonexistant_fields_fails(self):",
"def test_search_group_custom_field(self):\n pass",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test migration of field without tooltip | def test_field_comment_tooltip(self):
# Create field definition
self.test_field = RecordField.create(
self.testcoll, test_field_id, test_field_tooltip_create_values
)
# Apply migration to collection
migrate_coll_data(self.testcoll)
# Read field definition ... | [
"def test_help_text(self):\n post = PostModelTest.Post\n verbose = post._meta.get_field('text').help_text\n self.assertEquals(verbose, 'Введите текст')",
"def test_update_transaction_dispute_custom_field(self):\n pass",
"def test_validation_column(self):\n assert self.check.va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test migration of view fields | def test_migrate_view_fields(self):
self.test_view = RecordView.create(
self.testcoll, test_view_id, test_view_create_values
)
migrate_coll_data(self.testcoll)
# Read field definition and check for inline field list
view_data = self.check_entity_values(
... | [
"def test_set_field(self):\n pass",
"def test_v1_migrate(self):\n pass",
"def test_Migration_columns(self):\n migration = self.DBSession.query(Migration).filter_by().first()\n if self.engine.dialect.name == 'sqlite': # pragma: no cover\n # pysqlite driver always convert t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test migration of list fields | def test_migrate_list_fields(self):
self.test_list = RecordList.create(
self.testcoll, test_list_id, test_list_create_values
)
migrate_coll_data(self.testcoll)
# Read field definition and check for inline field list
view_data = self.check_entity_values(
... | [
"def test_with_listfields_from_migration(self) -> None:\n data = self.create_request_data(\n {\n \"motion\": {\n \"5\": self.get_motion_data(\n 5,\n {\n \"title\": \"motion/5\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the reference_currency of this CreditSupportAnnex. | def reference_currency(self, reference_currency):
if self.local_vars_configuration.client_side_validation and reference_currency is None: # noqa: E501
raise ValueError("Invalid value for `reference_currency`, must not be `None`") # noqa: E501
self._reference_currency = reference_currency | [
"def reference_rate(self, reference_rate):\n\n self._reference_rate = reference_rate",
"def set_reference(self, reference):\n\n self.reference = reference",
"def support_credit(self, support_credit):\n\n self._support_credit = support_credit",
"def card_currency(self, card_currency):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the collateral_currencies of this CreditSupportAnnex. | def collateral_currencies(self, collateral_currencies):
if self.local_vars_configuration.client_side_validation and collateral_currencies is None: # noqa: E501
raise ValueError("Invalid value for `collateral_currencies`, must not be `None`") # noqa: E501
self._collateral_currencies = coll... | [
"def applied_currencies(self, applied_currencies):\n\n self._applied_currencies = applied_currencies",
"def currencies(self, currencies):\n\n self._currencies = currencies",
"def ccbill_allowed_currencies(self, ccbill_allowed_currencies):\n\n self._ccbill_allowed_currencies = ccbill_allowed... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the isda_agreement_version of this CreditSupportAnnex. | def isda_agreement_version(self, isda_agreement_version):
if self.local_vars_configuration.client_side_validation and isda_agreement_version is None: # noqa: E501
raise ValueError("Invalid value for `isda_agreement_version`, must not be `None`") # noqa: E501
if (self.local_vars_configurati... | [
"def sepa_agreement_date(self, sepa_agreement_date):\n\n self._sepa_agreement_date = sepa_agreement_date",
"def agreement_date(self, agreement_date):\n\n self._agreement_date = agreement_date",
"def ata_version(self, ata_version: SmartSsdAtaVersion):\n\n self._ata_version = ata_version",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the margin_call_frequency of this CreditSupportAnnex. | def margin_call_frequency(self, margin_call_frequency):
if self.local_vars_configuration.client_side_validation and margin_call_frequency is None: # noqa: E501
raise ValueError("Invalid value for `margin_call_frequency`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.c... | [
"def set_margin(self, margin):\n _pal.lib.geometry_set_margin(self._geometry, c.c_float(margin))",
"def exposureMargin(self, exposureMargin):\n\n self._exposureMargin = exposureMargin",
"def call_limits(self, call_limits):\n self._call_limits = call_limits",
"def set_clock_frequency(self,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the valuation_agent of this CreditSupportAnnex. | def valuation_agent(self, valuation_agent):
if self.local_vars_configuration.client_side_validation and valuation_agent is None: # noqa: E501
raise ValueError("Invalid value for `valuation_agent`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation an... | [
"def ecsgeneration(self, ecsgeneration):\n self._ecsgeneration = ecsgeneration",
"def agent_requirement(self, agent_requirement):\n\n self._agent_requirement = agent_requirement",
"def support_agent_list(self, support_agent_list):\n self._support_agent_list = support_agent_list",
"def set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the threshold_amount of this CreditSupportAnnex. | def threshold_amount(self, threshold_amount):
if self.local_vars_configuration.client_side_validation and threshold_amount is None: # noqa: E501
raise ValueError("Invalid value for `threshold_amount`, must not be `None`") # noqa: E501
self._threshold_amount = threshold_amount | [
"def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)",
"def threshold(self, threshold):\n\n self._threshold = threshold",
"def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass",
"def setThreshold(self, t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the rounding_decimal_places of this CreditSupportAnnex. | def rounding_decimal_places(self, rounding_decimal_places):
if self.local_vars_configuration.client_side_validation and rounding_decimal_places is None: # noqa: E501
raise ValueError("Invalid value for `rounding_decimal_places`, must not be `None`") # noqa: E501
self._rounding_decimal_pla... | [
"def rounding_decimal(self, rounding_decimal):\n\n self._rounding_decimal = rounding_decimal",
"def round(self, round):\n\n self._round = round",
"def round_num(self, round_num: int):\n\n self._round_num = round_num",
"def set_precision(prec = None):\n context = decimal.getcontext(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the initial_margin_amount of this CreditSupportAnnex. | def initial_margin_amount(self, initial_margin_amount):
if self.local_vars_configuration.client_side_validation and initial_margin_amount is None: # noqa: E501
raise ValueError("Invalid value for `initial_margin_amount`, must not be `None`") # noqa: E501
self._initial_margin_amount = init... | [
"def init_margin(self, init_margin):\n\n self._init_margin = init_margin",
"def set_margin(self, margin):\n _pal.lib.geometry_set_margin(self._geometry, c.c_float(margin))",
"def set_margin(self, value):\n value = u.decimal(value)\n if u.isempty(value):\n self.sale_price =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the minimum_transfer_amount of this CreditSupportAnnex. | def minimum_transfer_amount(self, minimum_transfer_amount):
if self.local_vars_configuration.client_side_validation and minimum_transfer_amount is None: # noqa: E501
raise ValueError("Invalid value for `minimum_transfer_amount`, must not be `None`") # noqa: E501
self._minimum_transfer_amo... | [
"def set_minimum(self, min_value):\n\n self._progress.setMinimum(min_value)",
"def buy_min_amount(self, buy_min_amount):\n\n self._buy_min_amount = buy_min_amount",
"def set_min(self, min_value):\n self._min = min_value",
"def minimum_value(self, minimum_value):\n self._minimum_val... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot the land uses of the resting points | def plotLandUse(layer, x):
# features of the layer
features = layer.getFeatures()
# Create empty list for landuses
list_lu = []
# Iterate over features and add to a list
for feature in features:
list_lu.append(feature['Landuse'])
list_lu.sort()
# bins of the landuse numbers
... | [
"def show_landmarks(image, landmarks):\n plt.imshow(image)\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=50, marker='.', c='r')\n plt.pause(0.001)",
"def show_landmarks(image, landmarks):\n plt.imshow(image)\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n plt.pause... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Switches between two operations depending on a scalar value (int or bool). Note that both `then_expression` and `else_expression` should be symbolic tensors of the same shape. Arguments | def switch(condition, then_expression, else_expression):
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x | [
"def symbolic_ifelse(cond, then, else_):\n def outfunc(df): \n cond_out = maybe_eval(df, cond)\n n = len(cond_out)\n if cond_out.all():\n return maybe_eval(df, then) >> maybe_tile(n)\n elif not cond_out.any():\n return maybe_eval(df, else_) >> maybe_tile(n)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a session that will use CPU's only | def make_session(num_cpu=None, make_default=False):
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type ... | [
"def single_threaded_session():\n return make_session(num_cpu=1)",
"def make_session(num_cpu=None, make_default=False, graph=None):\n if num_cpu is None:\n num_cpu = int(os.getenv(\"RCALL_NUM_CPU\", multiprocessing.cpu_count()))\n tf_config = tf.ConfigProto(\n inter_op_parallelism_threads=n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a session which will only use a single CPU | def single_threaded_session():
return make_session(num_cpu=1) | [
"def make_session(num_cpu=None, make_default=False, graph=None):\n if num_cpu is None:\n num_cpu = int(os.getenv(\"RCALL_NUM_CPU\", multiprocessing.cpu_count()))\n tf_config = tf.ConfigProto(\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n tf_config.gp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Linear interpolation between initial_p and final_p over schedule_timesteps. After this many timesteps pass final_p is returned. | def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p | [
"def __init__(self, schedule_timesteps, final_p, initial_p=1.0):\n self.schedule_timesteps = schedule_timesteps\n self.final_p = final_p\n self.initial_p = initial_p",
"def _interpolate(start, end, step, total_steps):\n diff = end - start\n progress = step / ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that syntax error are caught when reading a mapping. | def test_read_mapping_errors(content):
with pytest.raises(IOError):
vermouth.map_input._read_mapping_partial(content.split('\n'), 1) | [
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_input_reader_errors():\n with pytest.raises(TypeError):\n load_input_reader(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_input_reader({})... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that regular mapping files are read as expected. | def test_read_mapping_file(case):
reference = collections.defaultdict(lambda: collections.defaultdict(dict))
for from_ff, to_ff in itertools.product(case.from_ff, case.to_ff):
reference[from_ff][to_ff][case.name] = (
case.mapping, case.weights, case.extra
)
ffs = case_to_dummy_f... | [
"def test_fileMappings(self) -> None:\n fileMappings = loadFileMappings()\n self.assertTrue(fileMappings is not None)\n self.assertFalse(len(fileMappings) == 0)",
"def test_read_mapping_directory(ref_mapping_directory):\n dirpath, ref_mappings = ref_mapping_directory\n from_names = list... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that read_mapping_file can read more than one molecule. | def test_read_mapping_file_multiple(reference_multi):
content, reference = reference_multi
from_names = list(reference.keys())
to_names = []
block_names = []
for k in reference:
to_names.extend(reference[k].keys())
for to in reference[k]:
block_names.extend(reference[k][... | [
"def test_read_mapping_file(case):\n reference = collections.defaultdict(lambda: collections.defaultdict(dict))\n for from_ff, to_ff in itertools.product(case.from_ff, case.to_ff):\n reference[from_ff][to_ff][case.name] = (\n case.mapping, case.weights, case.extra\n )\n\n ffs = cas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a file tree with mapping files. | def ref_mapping_directory(tmpdir_factory):
basedir = tmpdir_factory.mktemp('data')
mapdir = basedir.mkdir('mappings')
template = textwrap.dedent("""
[ molecule ]
dummy_{0}
[ from ]
{1}
[ to ]
{2}
[ atoms ]
0 X1{0} A{0} B{0}
1 X2{0} ... | [
"def _buildMappings(self):\n files = _findFiles()\n\n all_data = []\n\n # Read all the target files in reverse.\n for path in reversed(files):\n # Open the target file.\n with open(path) as handle:\n # Load the json data and convert the data from unic... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that mapping files from a directory are propely found and read. | def test_read_mapping_directory(ref_mapping_directory):
dirpath, ref_mappings = ref_mapping_directory
from_names = list(ref_mappings.keys())
to_names = []
block_names = []
mapping = {}
weights = {}
for k in ref_mappings:
to_names.extend(ref_mappings[k].keys())
for to in ref... | [
"def test_fileMappings(self) -> None:\n fileMappings = loadFileMappings()\n self.assertTrue(fileMappings is not None)\n self.assertFalse(len(fileMappings) == 0)",
"def test_map(self, audio_store_and_expected_files):\n audio_store = audio_store_and_expected_files[0]\n expected_fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Eğitilmiş modelin eğitilen feature ve label bağlı grafiğini çizer | def plot_the_model(trained_weight, trained_bias, feature, label):
#Eksenleri tanımla.
plt.xlabel("feature")
plt.ylabel("label")
#feature değerlerini ve label değerlerinin grafiğini çiz.
plt.scatter(feature, label)
#Modeli temsil eden kırmızı çizgi oluştur. Kırmızı çizgi
#(x0, yo) koordina... | [
"def predict_only(self):",
"def gen_features(self, X):",
"def get_train_labels(self, window, scene):\n pass",
"def trainModel(self):\n\n # read file. prepare file lists.\n self.images, self.trainImageCount, self.names = self.file_helper.getFiles(self.train_path)\n # extract SIFT Fe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets text of a Plone document if it exists and reindex the document The text is coming from a browser view template tag | def setPageText(portal, page, viewName):
if page is None:
return
request = getattr(portal, 'REQUEST', None)
if request is not None:
view = queryMultiAdapter((portal, request), name=viewName)
if view is not None:
text = bodyfinder(view.index()).strip()
page.set... | [
"def change_document(self, new_text):\n self.text = new_text\n self.sample = new_text",
"def set_text(self, new_text):\n\n self.output['text'] = new_text",
"def updateText(self):\n self.errorFlag = False\n try:\n self.setText(self.field.editorText(self.node))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a MaildropHost if Products.MaildropHost is available... If MaildropHost exist, PloneGazette will use it to send mails. This will avoid duplicate emails send as reported by | def addMaildropHost(self):
portal = getToolByName(self, 'portal_url').getPortalObject()
if not hasattr(portal, "MaildropHost"):
try:
portal.manage_addProduct['MaildropHost'].manage_addMaildropHost('MaildropHost', title='MaildropHost')
except AttributeError:
# if MaildropH... | [
"def manage_addMailHost(id, title='', smtp_host=None,\n localhost='localhost', smtp_port=25,\n timeout=1.0):",
"def manage_addMailServer( self, id='MailServer', title='', host=None, port=None, REQUEST=None ):\n self._setObject( id, MailServer( id, title, host, port ) )\n\n if REQUEST is not No... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to add our wanted indexes to the portal_catalog. | def addCatalogIndexes(portal):
catalog = getToolByName(portal, 'portal_catalog')
indexes = catalog.indexes()
wanted = (('standardTags', 'KeywordIndex'),
('iamTags', 'KeywordIndex'),
('isearchTags', 'KeywordIndex'),
('hiddenTags', 'KeywordIndex'))
indexables = []... | [
"def add_catalog_indexes(context, logger):\n if logger is None:\n logger = logging.getLogger('bungenicms.membershipdirectory')\n \n # Run the catalog.xml step as that may have defined new metadata columns. \n # We could instead add <depends name=\"catalog\"/> to the registration of our \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts and parses data from a sqlite3.Cursor object | def cursor_data(c):
# pull column description
d = []
for i in range(len(c.description)):
d.append(c.description[i][0])
# fetch column entries
c = c.fetchall()
# compile list
info = []
for i in range(len(c)):
# compile dictionary entry
entry = {}
... | [
"def construct_list(cursor):\n header = [h[0] for h in cursor.description]\n data = cursor.fetchall()\n return header, data",
"def _readData(self):\n self.conn = sqlite3.connect('Shipments.db')\n self.cur = self.conn.cursor()",
"def map_row_to_dict(cursor: sqlite3.Cursor, row_data):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves and parses current time stamp from the 'datetime' module. | def get_time():
dt = datetime.datetime.now()
dt_parsed = dt.strftime("%Y-%m-%d %H:%M:%S")
return dt_parsed | [
"def datetime(self):\r\n if 'observation_time_rfc822' in self.data \\\r\n and self.data['observation_time_rfc822']:\r\n tstr = self.data['observation_time_rfc822']\r\n tstr = ' '.join(tstr.split(' ')[:-2])\r\n return datetime.strptime(tstr, '%a, %d %b %Y %H:%M:%S')\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches the ten most recent news articles from an RSS feed. | def get_news(url):
# parse RSS feed into list of dictionaries
feed = feedparser.parse(url)
# no RSS feed articles for url
if len(feed['entries']) == 0:
return []
# get first ten articles from the RSS feed
news = []
i = 0
while True:
if i == len(fee... | [
"def get_top_articles(update=False):\n # use caching to avoid running unnecessary DB queries at each page load\n key = 'top_ten'\n articles = memcache.get(key)\n\n logging.warn('MEMCACHE | Wiki articles %s' % str(articles))\n\n if (articles is None) or (len(articles) == 0) or update:\n # neces... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calls the new_transformations.sql script on a date by date basis | def run_sql_transformations(self):
conn = pg2.connect(user='postgres', dbname='penny', host='localhost', port='5432', password='password')
for d in self.get_list_of_dates():
print(d)
df = pd.read_sql("Select count(*) as acount from auctions where auctiontime < '... | [
"def run_script():\n\n tables_dict = parse_table_data()\n source_dtypes = get_data_types(tables_dict)\n data_map = OrderedDict(zip(source_dtypes, POSTGRES_DTYPES_MAP))\n tables_dict = clean_data_types(tables_dict, data_map)\n\n create_tables = generate_table_scripts(tables_dict)\n create_pks = gen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Players may join a game of Pig | def test_join(self):
pig = game.pig.Pig('PlayerA', 'PlayerB', 'PlayerC')
self.assertEqual(pig.get_players(), ('PlayerA', 'PlayerB', 'PlayerC')) | [
"def player_joined(self):\n\n self.in_game = True",
"def enough_players():\n return True",
"def join(self):\n # Check if there is at least one open game \n # Connect to game if exist \n if GameSessionMixin.waiter is not None:\n enemy = GameSessionMixin.waiter\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
db cursor as a "self closing" context manager | def cursor(cls) -> Iterator[sqlite3.Cursor]:
with closing(cls.db.cursor()) as cur:
yield cur | [
"def cursor():\n dbh = handle()\n return dbh.cursor()",
"def cursor(self):\n with self.conn as c:\n yield c.cursor()",
"def getCursor():\n return get_db().cursor()",
"def _close_cursor(self):\n self.cursor.close()\n self.cursor = None",
"def with_cursor(fn):\n @fu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the available pollutants per country from the summary. | def pollutants_per_country(cls) -> dict[str, dict[str, int]]:
with cls.cursor() as cur:
cur.execute(
"SELECT country_code, pollutant, pollutant_id FROM summary"
)
output: dict[str, dict[str, int]] = defaultdict(dict)
for country_code, pollutant, p... | [
"def pollutants_per_country(summary):\n output = dict()\n\n for d in summary.copy():\n country = d.pop(\"ct\")\n\n if country in output:\n output[country].append(d)\n else:\n output[country] = [d]\n\n return output",
"def country_resources(self, country):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Darkens the given color by multiplying luminosity by the given amount. Input can be matplotlib color string, hex string, or RGB tuple. | def darken_color(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], amount * c[1], c[2]) | [
"def darken_color(color, amount):\r\n color = [int(col * (1 - amount)) for col in hex_to_rgb(color)]\r\n return rgb_to_hex(color)",
"def darken_color(color, factor=0.5):\n rgba = mpl.colors.to_rgba(color)\n return tuple(factor * c for c in rgba[:3]) + rgba[3:]",
"def saturate_color(color, amount):\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function needed for backwards compatibility with the old "col" argument in plt functions. It returns the default color 'C0' if both arguments are None. If 'color' is not None, it always uses that. If 'color' is None and 'col' is an integer, it returns the corresponding 'CN' color. If 'col' is neither None nor integer, ... | def get_color(col, color):
if color is None and col is None:
return 'C0'
if col is None:
return color
if not isinstance(col, int):
raise ValueError("`col` must be an integer. Consider using `color` instead.")
return 'C{}'.format(col) | [
"def _color_for_labels(label_color, default_color, seq_index):\n if label_color is None:\n if hasattr(default_color, '__getitem__'):\n c = default_color[seq_index]\n else:\n c = default_color\n else:\n c = label_color\n\n return c or 'black'",
"def return_color(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render suborganizations like it is originally made in collective.contact.core. | def render_original_suborgs(self):
original_suborgs_view = self.context.restrictedTraverse('@@original-suborganizations')
return original_suborgs_view() | [
"def organizations(self):\n self.elements('organizations')",
"def sub_organization(self) -> object:\n return self._sub_organization",
"def render_subgraph(self, ontol, nodes, **args):\n subont = ontol.subontology(nodes, **args)\n return self.render(subont, **args)",
"def organizati... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return group title. If self.short, turn "My config (My suffix)" into "My suffix". | def group_title(self, group):
group_title = group.getProperty('title')
if self.short:
splitted = group_title.split('(')
if len(splitted) > 1:
group_title = group_title.split('(')[-1][:-1]
return html.escape(group_title) | [
"def group_short_name(self) -> str:\n return pulumi.get(self, \"group_short_name\")",
"def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"",
"def getShortName(self) -> str:\n return self.short_name",
"def title(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of top 10 video IDs in the descending order of view count from the VIDEOS table. | def get_most_viewed(self): # WORKS
self.cur.execute("SELECT video_ID FROM videos ORDER BY CAST(view_count as decimal) DESC LIMIT 10")
most_viewed_video_IDs = []
for ID in self.cur.fetchall():
most_viewed_video_IDs.append(ID[0])
return most_viewed_video_IDs | [
"def get_most_popular_talks_by_views(videos):\n return sorted(videos , key= lambda x: int(x.metrics[\"viewCount\"]), reverse = True)",
"def popular():\r\n d = data_loader.vid_patient_tuples_dict\r\n most_popular_videos = []\r\n for k in sorted(d, key=lambda k: len(d[k]), reverse=True):\r\n most... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if the entered username and password corresponds to a valid user in the USERS and ADMINS table. | def is_valid_user(self, username, password): # WORKS
done1 = self.cur.execute("SELECT password FROM users WHERE username=\"{}\"".format(username))
done2 = self.cur.execute("SELECT username FROM admins WHERE username=\"{}\"".format(username))
if done1 == 0 and done2 == 0: # If both queries are un... | [
"def validate_authentication(self, username, password):\n return self.user_table[username]['pwd'] == password",
"def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password",
"def is_logged_in_user_valid(user_name, password)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the password of the user in the USERS table. | def update_password(self, username, password): #WORKS
password_hash = generate_password_hash(password)
try:
self.cur.execute("UPDATE users SET password = \"{}\" WHERE username = \"{}\"".format(password_hash, username))
self.db.commit()
except:
self.db.rollback... | [
"def update_password(\n self,\n user: User,\n password: str,\n ) -> None:\n user.password = hashers.make_password(password)",
"def update_password(self, user, password):\n user.password = hashers.make_password(password)",
"def update_password(username, new_password):\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates VIDEOS table with video ID, uploader username and video title in the VIDEOS table. | def upload_video(self, video_ID, username, title): #WORKS
try:
view_count = 0
self.cur.execute("INSERT INTO videos VALUES(\"{}\", \"{}\", \"{}\", {}, NULL)".format(video_ID, title, username, view_count))
self.db.commit()
except:
self.db.rollback() | [
"def update_watched(self, username, video_ID): #WORKS\n try:\n done = self.cur.execute(\"SELECT * FROM watched WHERE username = \\\"{}\\\" AND video_ID = \\\"{}\\\"\".format(username, video_ID))\n if done == 1: # If the query was successful, one row exists\n self.cur.exec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the title of the video with the corresponding video ID from the VIDEOS table. | def get_video_title(self, video_ID): #WORKS
try:
self.cur.execute("SELECT video_title FROM videos WHERE video_ID = \"{}\"".format(video_ID))
title = self.cur.fetchone()[0]
return title
except:
return "Error getting title" | [
"def video_title(self):\n # type: () -> string_types\n return self._video_title",
"def fetch_title(self, movie_id):\n movie = tmdbsimple.Movies(movie_id)\n request = movie.info()\n\n return movie.title",
"def get_title(self):\n self._clean_title()\n return self.v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the view count for the corresponding video ID in the VIDEOS table. | def update_view_count(self, video_ID): #WORKS
try:
self.cur.execute("UPDATE videos SET view_count = view_count + 1 WHERE video_ID = \"{}\"".format(video_ID)) # Adds 1 to the existing value.
self.db.commit()
except:
self.db.rollback() | [
"def put(self, url):\n # data = request.json\n video_url = pafy.new(url)\n update_url_views(url, video_url.viewcount)\n return {\"views\": video_url.viewcount}\n # return None, 204",
"def viewedVideo(videoId):\n\n if videoId in movieViewCounts:\n movieViewCounts['video... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds the username and video ID to the WATCHED table. | def update_watched(self, username, video_ID): #WORKS
try:
done = self.cur.execute("SELECT * FROM watched WHERE username = \"{}\" AND video_ID = \"{}\"".format(username, video_ID))
if done == 1: # If the query was successful, one row exists
self.cur.execute("UPDATE watched... | [
"def upload_video(self, video_ID, username, title): #WORKS\n try:\n view_count = 0\n self.cur.execute(\"INSERT INTO videos VALUES(\\\"{}\\\", \\\"{}\\\", \\\"{}\\\", {}, NULL)\".format(video_ID, title, username, view_count))\n self.db.commit()\n except:\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the username of the user that uploaded the video with the corresponding video ID. | def get_video_uploader(self, video_ID): #WORKS
try:
done = self.cur.execute("SELECT uploader FROM videos WHERE video_ID = \"{}\"".format(video_ID))
uploader = self.cur.fetchone()[0]
return uploader
except:
return "Error getting username" | [
"def get_username(self):\n \n if self.livestream_user:\n return self.livestream_user\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n path_term = p.path.split('/')\n \n if len(path_term) == 3:\n if path_ter... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the date when the video was uploaded from VIDEOS table. | def get_upload_date(self, video_ID):
self.cur.execute("SELECT upload_date FROM videos WHERE video_ID = \"{}\"".format(video_ID))
return self.cur.fetchone()[0] | [
"def creation_date_video(path_to_file):\n print(\"Last modified: %s\" % time.ctime(os.path.getmtime(path_to_file)))\n print(\"Created: %s\" % time.ctime(os.path.getctime(path_to_file)))\n # return os.path.getctime(path_to_file)",
"def get_video_publishing_date(self, response):\n return response.cs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a random video ID from the VIDEOS table. | def get_random_ID(self): # WORKS
self.cur.execute("SELECT video_ID FROM videos ORDER BY RAND() LIMIT 1") # Selects video_ID from 1 random row.
return self.cur.fetchone()[0] | [
"def get_video_id() -> str:\n return find_canvas_widget_by_name(\"video id\").get()",
"def play_random_video(self):\n allVideos = self._video_library.get_all_videos()\n videoPlay = []\n for video in allVideos:\n videoPlay.append(video.video_id)\n\n num = random.randin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of video IDs watched by the user from the WATCHED table. | def get_watched(self, username): # WORKS
self.cur.execute("SELECT video_ID FROM watched WHERE username = \"{}\"".format(username))
watched_video_IDs = []
for ID in self.cur.fetchall():
watched_video_IDs.append(ID[0])
return watched_video_IDs | [
"def get_watchlists(user_id):\n # user = User.query.get(user_id)\n\n watchlists = Watchlist.query.filter(Watchlist.user_id == user_id).all()\n\n return watchlists",
"def view_watched_movies(username: str) -> list[tuple]:\n with connection:\n return connection.execute(VIEW_WATCHED_MOVIES, (usern... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the view count of the video with the corresponding video_ID. | def get_views(self, video_ID): # WORKS
self.cur.execute("SELECT view_count FROM videos WHERE video_ID = \"{}\"".format(video_ID))
return self.cur.fetchone()[0] | [
"def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done",
"def get_num_videos():\n return Video.objects.count()",
"def count_videos(self):\n return len(self.videos)",
"def update_view_count(self, video_ID): #WORKS\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes the video from the database. | def delete_video(self, video_ID): # WORKS
try:
self.cur.execute("DELETE FROM videos WHERE video_ID = \"{}\"".format(video_ID))
self.db.commit()
os.remove('static/videos/' + str(video_ID) + '.mp4')
os.remove('static/images/' + str(video_ID) + '.jpg')
except... | [
"def delete_video(self):\n self.multipler_request('video/%s/delete' % self.file_id)",
"def delete_video(event_id, video_id):\n event = Event.query.get_or_404(event_id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of all videos uploaded by the user with the corresponding username. | def get_uploaded(self, username): # WORKS
self.cur.execute("SELECT video_ID FROM videos WHERE uploader = \"{}\"".format(username))
uploaded_video_IDs = []
for ID in self.cur.fetchall():
uploaded_video_IDs.append(ID[0])
return uploaded_video_IDs | [
"def get_videos(self, user):\n raise NotImplementedError",
"def get_download_videos(self, username):\r\n for db_row in self.id_cursor.execute(\"SELECT * FROM users\"):\r\n if db_row[0] == username:\r\n return db_row[3]\r\n return \"\"",
"def get_videos(self, user):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if the username is present in the USERS table. | def is_user_present(self, username): # WORKS
done = self.cur.execute("SELECT username FROM users WHERE username = \"{}\"".format(username))
if done == 1:
return True
else:
return False | [
"def has_user(self, username):\n return username in self.user_table",
"def _username_exists(self, username):\n\t\tencrypted_username = helper.encrypt_credentials(username, 'dummy')[0]\n\t\ttry:\n\t\t\tif c.execute('SELECT * FROM users WHERE username = ?', (encrypted_username, )).fetchone() is None:\n\t\t\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a maximum of 5 random video IDS from the VIDEOS table. | def get_five_random_IDs(self):
self.cur.execute("SELECT video_ID FROM videos ORDER BY RAND() LIMIT 5")
IDs = []
for ID in self.cur.fetchall():
IDs.append(ID[0])
return IDs | [
"def get_random_ID(self): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY RAND() LIMIT 1\") # Selects video_ID from 1 random row.\n return self.cur.fetchone()[0]",
"def play_random_video(self):\n allVideos = self._video_library.get_all_videos()\n videoPlay = []\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds the video ID to the FLAGS table. | def flag_ID(self, username, video_ID):
done = self.cur.execute("SELECT video_ID from flags WHERE video_ID = \"{}\"".format(video_ID))
if done == 0: # Not yet flagged by any user.
try:
self.cur.execute("INSERT INTO flags VALUES(\"{}\", \"{}\")".format(video_ID, username))
... | [
"def flag_video(self, video_id, flag_reason=\"Not supplied\"):\n video = self._video_library.get_video(video_id)\n if video == None:\n print(f\"Cannot flag video: Video does not exist\")\n return\n if video.flagged == True:\n print(f\"Cannot flag video: Video is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns username of flagger of the video ID. | def get_flagger(self, video_ID):
self.cur.execute("SELECT username FROM flags WHERE video_ID = \"{}\"".format(video_ID))
return self.cur.fetchone()[0] | [
"def get_username(self):\n \n if self.livestream_user:\n return self.livestream_user\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n path_term = p.path.split('/')\n \n if len(path_term) == 3:\n if path_ter... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of flagged videos from FLAGS table. | def get_flagged(self):
self.cur.execute("SELECT video_ID FROM flags")
flagged_IDs = []
for ID in self.cur.fetchall():
flagged_IDs.append(ID[0])
return flagged_IDs | [
"def get_saved_videos_features():\n return db.get_all_videos()",
"def get_flag_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM flags\")\n return done",
"def get_videos(self, binary_labels: bool = False) -> list(tuple((str, np.array))):\n dataset = []\n vids = [f for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of videos of user flagged by other users. | def get_flagged_num(self, username):
done = self.cur.execute("SELECT flags.video_ID FROM videos,flags WHERE videos.video_ID = flags.video_ID AND videos.uploader = \"{}\"".format(username))
return done | [
"def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done",
"def get_videos_count(self, user_settings=None, user_id=None, login=None, email=None):\n return objects_module.users.get_video... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of videos in the VIDEOS table. | def get_video_count(self):
done = self.cur.execute("SELECT video_ID FROM videos")
return done | [
"def get_num_videos():\n return Video.objects.count()",
"def count_videos(self):\n return len(self.videos)",
"def number_of_videos(self):\r\n\r\n num_videos = len(self._video_library.get_all_videos())\r\n print(f\"{num_videos} videos in the library\")",
"def number_of_videos(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of flagged videos in the VIDEOS table. | def get_flag_count(self):
done = self.cur.execute("SELECT video_ID FROM flags")
return done | [
"def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done",
"def get_num_videos():\n return Video.objects.count()",
"def count_videos(self):\n return len(self.videos)",
"def get_flagged_num(self, username):\n done = self.cur.execute(\"S... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of videos uploaded by the user from VIDEOS table. | def get_user_video_count(self, username):
done = self.cur.execute("SELECT video_ID FROM videos WHERE uploader = \"{}\"".format(username))
return done | [
"def get_num_videos():\n return Video.objects.count()",
"def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done",
"def get_videos_count(self, user_settings=None, user_id=None, login=None, email=None):\n return objects_module.users.get_vide... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of views on all videos uploaded by the user from VIDEOS table. | def get_user_view_count(self, username):
self.cur.execute("SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \"{}\"".format(username))
return self.cur.fetchone()[0] | [
"def get_views(self, video_ID): # WORKS\n self.cur.execute(\"SELECT view_count FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n return self.cur.fetchone()[0]",
"def get_num_videos():\n return Video.objects.count()",
"def get_user_video_count(self, username):\n done = self.cu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the video ID of the video uploaded by the user with most views. | def get_best_video_ID(self, username):
self.cur.execute("SELECT video_ID FROM videos WHERE uploader = \"{}\" ORDER BY view_count DESC".format(username))
return self.cur.fetchone()[0] | [
"def get_most_viewed(self): # WORKS\n self.cur.execute(\"SELECT video_ID FROM videos ORDER BY CAST(view_count as decimal) DESC LIMIT 10\")\n most_viewed_video_IDs = []\n for ID in self.cur.fetchall():\n most_viewed_video_IDs.append(ID[0])\n return most_viewed_video_IDs",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the video ID of the user's favourite video. | def get_fav_video_ID(self, username):
self.cur.execute("SELECT video_ID FROM watched WHERE username = \"{}\" ORDER BY CAST(count as decimal) DESC".format(username))
return self.cur.fetchone()[0] | [
"def get_favourites(self, username):\n self.cur.execute(\"SELECT video_ID FROM favourites WHERE username = \\\"{}\\\"\".format(username))\n favourites = []\n for ID in self.cur.fetchall():\n favourites.append(ID[0])\n return favourites",
"def get_video_id(self, obj):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns list of videos favourited by the user from FAVOURITES table. | def get_favourites(self, username):
self.cur.execute("SELECT video_ID FROM favourites WHERE username = \"{}\"".format(username))
favourites = []
for ID in self.cur.fetchall():
favourites.append(ID[0])
return favourites | [
"def favorite_list(self, user_id=None):\n return self._get('favorites.json', {'user_id': user_id}, auth=True)",
"def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(res... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes the video from FLAGS table. | def delete_flag(self, video_ID):
try:
self.cur.execute("DELETE FROM flags WHERE video_ID = \"{}\"".format(video_ID))
self.db.commit()
except:
self.db.rollback() | [
"def delete_video(self):\n self.multipler_request('video/%s/delete' % self.file_id)",
"def do_delete(input_m3u, index):\n delete_video(input_m3u, index)",
"def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(vi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a dictionary of video IDs, title and a list of video titles. For Fuzzy Search. | def video_dict(self):
self.cur.execute("SELECT video_ID, video_title FROM videos")
videos = {}
video_titles = []
for video in self.cur.fetchall():
video_titles.append(video[1])
videos.update({video[0] : video[1]})
return videos, video_titles | [
"def fuzzy(search_key, videos, video_titles):\n best_matches = process.extract(search_key, video_titles, limit=10)\n best_match_titles = []\n for match in best_matches:\n best_match_titles.append(match[0])\n best_match_IDs = []\n for title in best_match_titles:\n for ID in videos:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Drop any existing tables and create the SBML classes schema. URI is a string interpreted as an rfc1738 compatible database URI. | def init(db_uri, drop):
engine = create_engine(db_uri)
if drop.lower().startswith("y"):
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine) | [
"def reset_db(self):\n try:\n with self.engine.begin() as connection:\n rows = connection.execute('SELECT schema_name FROM information_schema.schemata')\n schemas = {row['schema_name'] for row in rows} - self.INITIAL_SCHEMAS\n query = ';'.join('DROP SCH... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
switch from hostview to services view and viceversa | def _controller_switch(self, widget, test, newpage):
if newpage == 0:
# switch to host view
self.on_services_view = False
try:
self.main.workspace.remove(self.services_view.notebook)
except:
# empty workspace
self.main.workspace.remove(self.main.welcome_note)
try:
self.main.works... | [
"def switch_view(self, view):\n self._mutex.acquire()\n\n if view != self._active_content:\n av = self._views.get(self._active_content)\n hav = self._views.get(self._active_content + '-head')\n\n bv = self._views.get(view)\n hbv = self._views.get(view + '-he... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add / remove host scope | def _scope(self, widget, add, targets):
for host_obj in targets:
self.engine.database.switch_scope(add, host_obj)
self._sync() | [
"def test_get_hosts(self):\n new_scope = add_ip_scope('cyoung', 'test group', auth.creds, auth.url,\n network_address=term_access_ipam_network_scope)\n Scope1 = IPScope(term_access_ipam_network_scope, auth.creds, auth.url)\n Scope1.gethosts()\n self.assert... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
show / hide outofscope targets | def _showhide_scope(self, widget):
self.host_list.toggle_scope()
self.services_list.toggle_scope()
self._sync() #reset=True) | [
"def ShowDockingGuides(guides, show):\r\n\r\n for target in guides:\r\n \r\n if show and not target.host.IsShown():\r\n target.host.Show()\r\n target.host.Update()\r\n \r\n elif not show and target.host.IsShown(): \r\n target.host.Hide()",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
show / hide logs notebook | def _showhide_logs(self, widget):
if self.main.view_logs.get_active():
self.logger.notebook.show()
else:
self.logger.notebook.hide() | [
"def hide_log(self):\n self.log_view.Hide()\n self.log_label.Hide()\n self.log_scheduler.pause_job(\"log_job\")\n self.SetSize((self.ui_width, self.ui_height_min))",
"def show_messages(self):\n self.masterlog.revealme()",
"def handle_logger(self):\n\n if self.console_dock... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
serviceslist service click event this will generate the scene in the scenes dictionary | def services_row(self, listbox, cell, listboxrow):
self._clear_workspace()
if str(cell) in self.scenes["services_view"]:
# check if the scene was already loaded
self.services_view = self.scenes["services_view"][str(cell)]
else:
# get selected port
(model, pathlist) = self.services_list.servicestr... | [
"def on_select_scene(self, scene):\n pass",
"def services():\n\n return render_template('pages/services.html', \n title='Services')",
"def on_enable_scene(self):",
"def on_stage_clicked(e: Event, options: Dict[str, Any]) -> None:\r\n trace('Stage is clicked!')",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specifies the type of threshold criteria Expected value is 'DynamicThresholdCriterion'. | def criterion_type(self) -> str:
return pulumi.get(self, "criterion_type") | [
"def condition_threshold(self) -> Optional[pulumi.Input['MetricThresholdArgs']]:\n return pulumi.get(self, \"condition_threshold\")",
"def conditional_probability_threshold(self, threshold):\n self._conditional_probability_threshold = float(threshold)\n return self",
"def _estimate_threshol... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Name of the metric. | def metric_name(self) -> str:
return pulumi.get(self, "metric_name") | [
"def metric_name(self):\n pass",
"def metric_name(self) -> str:\n return self._metric_name",
"def metric_name(self):\n return self._metric_name",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self):\n if self.cur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
the criteria time aggregation types. | def time_aggregation(self) -> str:
return pulumi.get(self, "time_aggregation") | [
"def types(self) -> Set[TimeType]:\n\n if self._types:\n return self._types\n\n month = self.date.month\n\n for month_range, time_type in self.__mapping__.items():\n if month in month_range:\n self._types.add(time_type)\n\n return self._types",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped. | def skip_metric_validation(self) -> Optional[bool]:
return pulumi.get(self, "skip_metric_validation") | [
"def new_metric_under_test(self):\n pass",
"def test_custom_metrics(self):\n pass",
"def on_metric_found(self, metric):",
"def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()",
"def test_invalid_metric_name(mock_post):\n token = \"asdashdsauh_8aeraerf\"\n tags = {\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
the id of the action group to use. | def action_group_id(self) -> Optional[str]:
return pulumi.get(self, "action_group_id") | [
"def action_group_id(self) -> str:\n return pulumi.get(self, \"action_group_id\")",
"def action_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action_group_id\")",
"def group_act(group_id):\n return parser.OFPActionGroup(group_id)",
"def actionGroup( self ):\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specifies the metric alert criteria for multiple resource that has multiple metric criteria. | def __init__(__self__, *,
odata_type: str,
all_of: Optional[Sequence[Any]] = None):
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria')
if all_of is not None:
pulumi.set(__self__, "all_of", all_of) | [
"def __init__(__self__, *,\n odata_type: str,\n all_of: Optional[Sequence['outputs.MetricCriteriaResponse']] = None):\n pulumi.set(__self__, \"odata_type\", 'Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria')\n if all_of is not None:\n pulumi.set(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specifies the metric alert criteria for a single resource that has multiple metric criteria. | def __init__(__self__, *,
odata_type: str,
all_of: Optional[Sequence['outputs.MetricCriteriaResponse']] = None):
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria')
if all_of is not None:
pulumi.set(__self__, "a... | [
"def __init__(__self__, *,\n odata_type: str,\n all_of: Optional[Sequence[Any]] = None):\n pulumi.set(__self__, \"odata_type\", 'Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria')\n if all_of is not None:\n pulumi.set(__self__, \"all_of\", all_o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The list of metric criteria for this 'all of' operation. | def all_of(self) -> Optional[Sequence['outputs.MetricCriteriaResponse']]:
return pulumi.get(self, "all_of") | [
"def to_criteria(self):\r\n c = []\r\n if self.minmax_criteria is not None:\r\n c.extend(self.minmax_criteria.values())\r\n\r\n return c",
"def GetCriteriaList(cls):\r\n\r\n if hasattr(cls, '_criteria_list'):\r\n return cls._criteria_list\r\n\r\n cls._criteria_list = [\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
the criteria threshold value that activates the alert. | def threshold(self) -> float:
return pulumi.get(self, "threshold") | [
"def getThreshold(self): # real signature unknown; restored from __doc__\n pass",
"def condition_threshold(self) -> Optional[pulumi.Input['MetricThresholdArgs']]:\n return pulumi.get(self, \"condition_threshold\")",
"def actualthreshold(self):\n return self._actualthreshold",
"def action_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
the criteria time aggregation types. | def time_aggregation(self) -> str:
return pulumi.get(self, "time_aggregation") | [
"def types(self) -> Set[TimeType]:\n\n if self._types:\n return self._types\n\n month = self.date.month\n\n for month_range, time_type in self.__mapping__.items():\n if month in month_range:\n self._types.add(time_type)\n\n return self._types",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped. | def skip_metric_validation(self) -> Optional[bool]:
return pulumi.get(self, "skip_metric_validation") | [
"def new_metric_under_test(self):\n pass",
"def test_custom_metrics(self):\n pass",
"def on_metric_found(self, metric):",
"def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()",
"def test_invalid_metric_name(mock_post):\n token = \"asdashdsauh_8aeraerf\"\n tags = {\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The number of failed locations. | def failed_location_count(self) -> float:
return pulumi.get(self, "failed_location_count") | [
"def locations_count(self):\n return len(self.locations)",
"def num_failed(self):\n return self.test_results.num_failed",
"def failed_count(self):\n return self._failed_count",
"def num_locations(self):\n return len(self.locations)",
"def number_of_locations(self):\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Application Insights web test Id. | def web_test_id(self) -> str:
return pulumi.get(self, "web_test_id") | [
"def get_test_ID(self):\n return self.test_id",
"def unique_id(self):\n # This is the automation unique_id\n return self._automation.automation_id",
"def application_insights_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_insights_id\")",
"def web_id(self):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pass in an index_name to be deleted. Raises an IndexNotFound exception if the index is missing on the node. | def delete_index(index_name):
try:
ES.indices.delete(index=[index_name])
except NotFoundError:
raise IndexNotFound(index_name) | [
"def delete_index(self, name):\n path = build_path(index_path,name)\n return self.request.delete(path,params=None)",
"def delete_index(self, index_name):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"DELETE\", \"/1/indexes/%s\" % quote(index_name.encode('utf8'), safe=''), se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a document based on instance of a model. Returns None if successful and an error string if it's not. | def create_document(obj):
index = obj.get_index_name()
doc_type = obj.get_document_type()
body = obj.get_document_body()
exists = ES.exists(index=index, doc_type=doc_type, id=obj.pk)
if not exists:
ES.create(index=index, doc_type=doc_type, body=body, id=obj.pk)
return None
retu... | [
"def create_object(obj):\n try:\n db.session.add(obj)\n db.session.commit()\n except SQLAlchemyError:\n sentry.captureException()\n db.session.rollback()\n current_app.logger.exception(\"Failed to CREATE {}\".format(obj))\n return None\n else:\n # create ela... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a document based on the instance. Raises a DocumentNotFound exception if the document is not found on the index. | def get_document(obj):
try:
return ES.get(
index=obj.get_index_name(), doc_type=obj.get_document_type(), id=obj.pk)
except NotFoundError:
raise DocumentNotFound(obj.get_index_name(), obj.pk) | [
"def get_document(index, ID):\n try:\n global ES\n return ES.get(index = index, id = ID)\n\n except Exception as e:\n return e",
"def get_document(self, index_name: str, idx):\n\n return self.es.get(index = index_name, id = idx)",
"def get_document(self, a_document_id_or_instan... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |