query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Pulls the given repository on the give system | def pull(connection, rid=None, repo=None):
if repo is None:
repo = Repository(connection, rid)
return repo.pull() | [
"def pull_repository(repo: Repository):\n origin = Repo(repo.directory).remote() # todo: handle exceptions: can throw ValueError\n origin.pull()",
"def repo_pull(self):\n\n if self.clowder_repo is None:\n exit_clowder_not_found()\n\n if is_offline():\n print(fmt.offline_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the statistics for the all builders. | def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:
print('getting list of builders...')
stats = BuildStats()
for builder in requests.get(BASE_URL).json().keys():
# TODO: maybe filter the builds to the ones we care about
stats += get_builder_stats(builder, time_window )
... | [
"def get_builder_stats(builder: str, time_window: datetime.datetime) -> BuildStats:\n print('Gettings builds for {}...'.format(builder))\n # TODO: can we limit the data we're requesting?\n url = '{}/{}/builds/_all'.format(BASE_URL, builder)\n stats = BuildStats()\n for build, results in requests.get(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the statistics for one builder. | def get_builder_stats(builder: str, time_window: datetime.datetime) -> BuildStats:
print('Gettings builds for {}...'.format(builder))
# TODO: can we limit the data we're requesting?
url = '{}/{}/builds/_all'.format(BASE_URL, builder)
stats = BuildStats()
for build, results in requests.get(url).json(... | [
"def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:\n print('getting list of builders...')\n stats = BuildStats()\n for builder in requests.get(BASE_URL).json().keys():\n # TODO: maybe filter the builds to the ones we care about\n stats += get_builder_stats(builder, time_w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create metric descriptors on Stackdriver. Recreating these with every call is fine. | def gcp_create_metric_descriptor(project_id: str):
client = monitoring_v3.MetricServiceClient()
project_name = client.project_path(project_id)
for desc_type, desc_desc in [
["buildbots_percent_failed", "Percentage of failed builds"],
["buildbots_builds_successful", "Number of successful bui... | [
"def _create_metrics(self):\n self.registry = prometheus_client.CollectorRegistry()\n self.quota_free_count = prometheus_client.Gauge(\n 'kuryr_quota_free_count', 'Amount of quota available'\n ' for the network resource', labelnames={'resource'},\n registry=self.regist... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
initialize a receptor library by setting the number of receptors, the number of substrates it can respond to, and optional additional parameters in the parameter dictionary | def __init__(self, num_substrates, num_receptors, parameters=None):
# the call to the inherited method also sets the default parameters from
# this class
super(LibraryBinaryNumeric, self).__init__(num_substrates,
num_receptors, parameters) ... | [
"def _get_receptor_params(self):\r\n if self._generic_numbers:\r\n self.starting_res_num = min(self._generic_numbers)\r\n self.ending_res_num = max(self._generic_numbers)\r\n self.seq_len = len(self._generic_numbers)\r\n else:\r\n self.starting_res_num = min... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate the number of steps to do for `scheme` | def get_steps(self, scheme):
if scheme == 'monte_carlo':
# calculate the number of steps for a monte-carlo scheme
if self.parameters['monte_carlo_steps'] == 'auto':
steps_min = self.parameters['monte_carlo_steps_min']
steps_max = self.parameters['monte_car... | [
"def get_steps_num():\n return 0",
"def decode_step_count(self, board=None):\n # TODO decide which one is better.. not crucial\n # steps = 0\n # for key_pow, val_coor in self.read_bits.items():\n # steps += (self.matrix_board[val_coor] * 2) ** key_pow\n # return step... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the sorted `sensitivity_matrix` or sorts the internal sensitivity_matrix in place. This function rearranges receptors such that receptors reacting to an equal number of substrates and to similar substrates are close together. | def sort_sensitivity_matrix(self, sensitivity_matrix=None):
if sensitivity_matrix is None:
sens_mat = self.sens_mat
else:
sens_mat = sensitivity_matrix
data = [(sum(item), list(item)) for item in sens_mat]
sens_mat = np.array([item[1] for item in sorted(... | [
"def SortAndFilterSuspects(self, suspects):\n if not suspects or len(suspects) == 1:\n return suspects\n\n suspects.sort(key=lambda suspect: -suspect.confidence)\n max_score = suspects[0].confidence\n min_score = max(suspects[-1].confidence, 0.0)\n if max_score == min_score:\n return []\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
iterate over all mixtures and yield the mixture with probability | def _iterate_mixtures(self):
if self._iterate_steps > self.parameters['max_steps']:
raise RuntimeError('The iteration would take more than %g steps'
% self.parameters['max_steps'])
hi = self.commonness
Jij = self.correlations
... | [
"def sampler(dataset):\n while True:\n indices = np.random.permutation(len(dataset))\n for idx in indices:\n yield dataset[idx]",
"def _sample_binary_mixtures(model, steps, dtype=np.uint):\n mixture_size = model.parameters['fixed_mixture_size']\n \n if not model.is_cor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculates mixture statistics using a brute force algorithm | def mixture_statistics_brute_force(self):
Z = 0
hist1d = np.zeros(self.Ns)
hist2d = np.zeros((self.Ns, self.Ns))
# iterate over all mixtures
for c, weight_c in self._iterate_mixtures():
Z += weight_c
hist1d += c * weight_c
... | [
"def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
estimates the mixture statistics | def mixture_statistics_estimate(self):
ci_mean = self.substrate_probabilities
if self.is_correlated_mixture:
J_ij = self.correlations
pi_s = ci_mean
bar_pi_s = 1 - pi_s
ci_mean = pi_s * (1 + 2*bar_pi_s*np.dot(J_ij, pi_s))
... | [
"def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * we... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gets the entropy in the mixture distribution using brute force | def mixture_entropy_brute_force(self):
Z, sum_wlogw = 0, 0
# Naive implementation of measuring the entropy is
# p(c) = w(c) / Z with Z = sum_c w(c)
# H_c = -sum_c p(c) * log2(p(c))
# This can be transformed to a more stable implementation:
# H_c = log2(Z) - 1... | [
"def entropy(samples):\r\n counterList = classCounter(samples)\r\n probability = [x / sum(counterList) for x in counterList]\r\n return -sum([x * math.log2(x) for x in probability if x != 0])",
"def permutation_entropy(x, n, tau):",
"def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-norm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
estimates the average activity of the receptor as a response to single ligands. `ret_receptor_activity` determines whether the mean receptor activity will also be returned. `approx_prob` determines whether the probabilities of encountering ligands in mixtures are calculated exactly or only approximative, which should w... | def receptor_crosstalk_estimate(self, ret_receptor_activity=False,
approx_prob=False, clip=False,
ignore_correlations=False):
if not ignore_correlations and self.is_correlated_mixture:
r_n, r_nm = self.receptor_activity_estimate... | [
"def receptor_activity_estimate(self, ret_correlations=False,\n approx_prob=False, clip=False):\n S_ni = self.sens_mat\n p_i = self.substrate_probabilities\n\n # calculate receptor activity assuming uncorrelated mixtures \n if approx_prob:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculates the average activity of each receptor `method` can be ['brute_force', 'monte_carlo', 'estimate', 'auto']. If it is 'auto' than the method is chosen automatically based on the problem size. | def receptor_activity(self, method='auto', ret_correlations=False, **kwargs):
if method == 'auto':
if self.Ns <= self.parameters['brute_force_threshold_Ns']:
method = 'brute_force'
else:
method = 'monte_carlo'
if method == 'brute_f... | [
"def receptor_score(self, method='auto', multiprocessing=False):\n init_arguments = self.init_arguments\n init_arguments['parameters']['initialize_state']['sensitivity'] = 'exact'\n init_arguments['parameters']['sensitivity_matrix'] = self.sens_mat\n joblist = [(copy.deepcopy(self.init_a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
estimates the average activity of each receptor. `ret_correlations` determines whether the correlations between receptors are returned in addition to the mean activations. `approx_prob` determines whether the probabilities of encountering substrates in mixtures are calculated exactly or only approximative, which should... | def receptor_activity_estimate(self, ret_correlations=False,
approx_prob=False, clip=False):
S_ni = self.sens_mat
p_i = self.substrate_probabilities
# calculate receptor activity assuming uncorrelated mixtures
if approx_prob:
# app... | [
"def receptor_crosstalk_estimate(self, ret_receptor_activity=False,\n approx_prob=False, clip=False,\n ignore_correlations=False):\n if not ignore_correlations and self.is_correlated_mixture:\n r_n, r_nm = self.receptor_activity... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate the mutual information. `excitation_method` can be ['brute_force', 'monte_carlo', 'estimate', 'auto'] If it is 'auto' than the excitation_method is chosen automatically based on the problem size. `ret_prob_activity` determines whether the probabilities of the different outputs are returned or not | def mutual_information(self, excitation_method='auto', **kwargs):
if excitation_method == 'auto':
if self.Ns <= self.parameters['brute_force_threshold_Ns']:
excitation_method = 'brute_force'
else:
excitation_method = 'monte_carlo'
... | [
"def mutual_information_brute_force(self, ret_prob_activity=False):\n base = 2 ** np.arange(0, self.Nr)\n\n # prob_a contains the probability of finding activity a as an output.\n prob_a = np.zeros(2**self.Nr)\n for c, prob_c in self._iterate_mixtures():\n # get the associated... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate the mutual information by constructing all possible mixtures | def mutual_information_brute_force(self, ret_prob_activity=False):
base = 2 ** np.arange(0, self.Nr)
# prob_a contains the probability of finding activity a as an output.
prob_a = np.zeros(2**self.Nr)
for c, prob_c in self._iterate_mixtures():
# get the associated output ...... | [
"def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * we... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a simple estimate of the mutual information. `approx_prob` determines whether the probabilities of encountering substrates in mixtures are calculated exactly or only approximative, which should work for small probabilities. | def mutual_information_estimate(self, approx_prob=False):
# this might be not the right approach
q_n = self.receptor_activity_estimate(approx_prob=approx_prob)
q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob)
# calculate the approximate mutual info... | [
"def __estimate_mutual_information(priors, joints):\n # marginal factorizations\n mar_facts = np.zeros((priors.shape[0], priors.shape[0], 2, 2))\n # outers[i, j, 0, 0] := P(X_i=0) * P(X_j=0)\n mar_facts[:, :, 0, 0] = np.outer(priors[:, 0], priors[:, 0])\n # outers[i, j, 0, 1] := P... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculates the usefulness of each receptor, measured by how much information it adds to the total mutual information. `method` determines which method is used to determine the mutual information. `multiprocessing` determines whether multiprocessing is used for determining the mutual informations of all subsystems. | def receptor_score(self, method='auto', multiprocessing=False):
init_arguments = self.init_arguments
init_arguments['parameters']['initialize_state']['sensitivity'] = 'exact'
init_arguments['parameters']['sensitivity_matrix'] = self.sens_mat
joblist = [(copy.deepcopy(self.init_arguments)... | [
"def mutual_information(self, excitation_method='auto', **kwargs):\n if excitation_method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n excitation_method = 'brute_force'\n else:\n excitation_method = 'monte_carlo'\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
optimizes the current library to maximize the result of the target function using gradient descent. By default, the function returns the best value and the associated interaction matrix as result. `direction` is either 'min' or 'max' and determines whether a minimum or a maximum is sought. `steps` determines how many o... | def optimize_library_descent(self, target, direction='max', steps=100,
multiprocessing=False, ret_info=False,
args=None):
# get the target function to call
target_function = getattr(self, target)
if args is not None:
t... | [
"def optimize_library_anneal(self, target, direction='max', steps=100,\n ret_info=False, args=None):\n # lazy import\n from .optimizer import ReceptorOptimizerAnnealer # @UnresolvedImport\n \n # prepare the class that manages the simulated annealing\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
optimizes the current library to maximize the result of the target function using simulated annealing. By default, the function returns the best value and the associated interaction matrix as result. `direction` is either 'min' or 'max' and determines whether a minimum or a maximum is sought. `steps` determines how man... | def optimize_library_anneal(self, target, direction='max', steps=100,
ret_info=False, args=None):
# lazy import
from .optimizer import ReceptorOptimizerAnnealer # @UnresolvedImport
# prepare the class that manages the simulated annealing
annealer... | [
"def optimize_library_descent(self, target, direction='max', steps=100,\n multiprocessing=False, ret_info=False,\n args=None):\n # get the target function to call\n target_function = getattr(self, target)\n if args is not None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generator function that samples mixtures according to the `model`. `steps` determines how many mixtures are sampled `dtype` determines the dtype of the resulting concentration vector | def _sample_binary_mixtures(model, steps, dtype=np.uint):
mixture_size = model.parameters['fixed_mixture_size']
if not model.is_correlated_mixture and mixture_size is None:
# use simple monte carlo algorithm
prob_s = model.substrate_probabilities
for _ in range(int(... | [
"def _iterate_mixtures(self):\n \n if self._iterate_steps > self.parameters['max_steps']:\n raise RuntimeError('The iteration would take more than %g steps'\n % self.parameters['max_steps'])\n \n hi = self.commonness\n Jij = self.correlatio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test the performance of the brute force and the Monte Carlo method | def performance_test(Ns=15, Nr=3):
num = 2**Ns
hs = np.random.random(Ns)
model = LibraryBinaryNumeric(Ns, Nr, hs)
start = time.time()
model.mutual_information_brute_force()
time_brute_force = time.time() - start
print('Brute force: %g sec' % time_brute_force)
start = time.time(... | [
"def main():\n hash_str = '5411ba21c470e12d49f351a2d240e43618032950'\n salt = '0d71906d0f735e6196c80d0a7cb1748e'\n encrypted_code = 'Ul5SR0ISYFxUXl8OOxITFBFWVlIRQVtRXV4bHQs4ExQREhMUERJDRlhcRxwWZltdQhJaRxFTE0BUQUcUXF1XQV1XFB07EhMUE' \\\n 'RITFBFCQV1fRhsTZVdAQBFhRldSV0BHV0dfGhYbOQ=='\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns Classroom in good representation for user | def __str__(self):
return 'Classroom {} has a capacity of {} persons and ' \
'has the following equipment: {}.'.format(
self.number, str(self.capacity), ', '.join(self.equipment)) | [
"def __repr__(self):\n return f\"User('{self.username}', '{self.room.name}')\"",
"def create_classroom(course_code: str, course_name: str, period: int, teacher: str) -> Dict:\n classroom = {\"course_code\": course_code,\n \"course_name\": course_name,\n \"period\": period... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Classroom, Classroom > bool Returns True if first room have bigger capacity then second room | def is_larger(self, room2):
return self.capacity > room2.capacity | [
"def is_larger(self, classroom):\n if self.capacity > classroom.capacity:\n return True\n return False",
"def is_larger(self, room):\n return True if self.capacity > room.capacity else False",
"def __gt__(self, other: Card) -> bool:\n return not self.__le__(other)",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Classroom, Classroom > list Returns the equipment in first room which is missing in second | def equipment_differences(self, room2):
return sorted(list(set(self.equipment).difference(room2.equipment))) | [
"def equipment_differences(self, classroom):\n differences = []\n for equipment in self.equipment:\n if equipment not in classroom.equipment:\n differences.append(equipment)\n return differences",
"def equipment_differences(self, room):\n return [i for i in se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a datetime object and returns POSIX UTC in nanoseconds | def date_to_nano(ts):
return calendar.timegm(ts.utctimetuple()) * int(1e3) | [
"def convert_datetime_to_nanoseconds(datetime_obj):\n\n jan1_2001 = datetime.strptime(\"01-01-2001, 00:00:00.000000\", \"%m-%d-%Y, %H:%M:%S.%f\")\n\n difference = datetime_obj - jan1_2001\n\n UTC_time_diff = 7*60*60 # 7 hours converted into seconds\n\n seconds_difference = (difference.days*24*60*60) + d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
crop a square from a random location in image | def crop_square(image, size):
width, height = image.size
top = random.randint(0, max(0, height-size))
left = random.randint(0, max(0, width-size))
bottom = min(top + size, height)
right = min(left + size, width)
return image.crop((left, top, right, bottom)) | [
"def __randomCrop(self, img):\n limit = self.PROCESSING_DIM - self.INPUT_DIM\n # pick 2 random integers less than this limit as the origin of the cropped image\n x_start = np.random.randint(limit)\n y_start = np.random.randint(limit)\n return img.crop((x_start, y_start, x_start + ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates an SGF file with the game provided within the temp directory | def _get_input_filepath(self, game_id: int) -> str:
with self._db_connection as connection:
with connection.cursor() as cursor:
cursor.execute('SELECT sgf_content FROM games WHERE id=%s', (game_id,))
if cursor.rowcount == 0:
raise GameNotFoundError... | [
"def _generate_gas_file(dest_dir, d_hubble_0):\n template = templates.get(\"ahf.gas.param\")\n gas_file_path = os.path.join(dest_dir, \"gas.param\")\n\n with open(gas_file_path, \"w+\") as gas_file:\n gas_file.write(template.format(dHubble0=d_hubble_0))\n\n return gas_file_path",
"def saveGame(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return a tuple of (isHit, hitResult). isHit is a Boolean with is true in case of hit and false in case of miss. in case of hit hitResult is HPA if request.addr is cached in the tlb, otherwise hitResult is None | def lookup(self, request):
if request.addr in self._addressMap:
return (True, self._addressMap[request.addr])
else:
return (False, None) | [
"def is_hit(self, address):\n set_index, tag = self.get_index_tag(address)\n one_set = [x[\"Tag\"] for x in self.cache[set_index]]\n return one_set.index(tag) if tag in one_set else -1",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
update the tlb with the translation address from updateObj | def update(self, updateObj):
#if we've allocated all free entries in tlb
if len(self._allocatedQ) == self._maxSize:
#remove the old entries from the tlb (fifo order)
oldUpdateObj = self._allocatedQ.popleft()
del self._addressMap[oldUpdateObj.requestAddr]
reqA... | [
"def update_translation(self):\n pass",
"def _update_object(self, data_dict):\r\n pass",
"def rs_edit_upd(obj):\n verts = [x.co for x in obj.data.vertices]\n if verts[0] != Vector():\n fix = Vector(verts[0])\n for i in range(len(verts)):\n obj.data.vertices[i].co = o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create asset, need correct type, title, label and url | def create(self) -> requests.request:
# Check needed values
if None in [self.args.type, self.args.title, self.args.label, self.args.url]:
raise Exception('Provide all parameters for asset creation')
# Check type
if self.args.type not in ['photo', 'video']:
raise E... | [
"def create_assettype(self, name, label):\n\n url = \"/api/assettype/{0}\".format(name)\n params = {'label': label}\n response = self._query(\"put\", url, params)\n return response",
"def create_asset(asset_name, asset_path):\n\n raise RuntimeError('create_asset function not impleme... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update asset, needs ID, title, label and url | def update(self) -> requests.request:
# Check if id is set
if self.args.id is None:
raise Exception('Provide id of asset you want to update')
# Check URL validity
if self.args.url is not None and self.check_url_invalidity():
raise Exception('Provided URL is not v... | [
"def update_asset(cls, id, asset_data):\n\n return ph_base._update_record('asset', id, asset_data)",
"def put(asset, **kwargs):\n id_key = next(_parse_id(kwargs), None)\n\n if id_key is None:\n raise TypeError('unable to parse id key')\n\n data_key = next(_parse_id(kwargs, _not=True), None)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete asset, needs ID | def delete(self) -> requests.request:
# Check if id is set
if self.args.id is None:
raise Exception('Provide id of asset you want to delete')
# Send DELETE request
return requests.delete(self.REQUEST_URL + str(self.args.id)) | [
"def delete_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type, action='DELETE')",
"def deleteById(self, asset_id):\n self.sql_execute(\n \"DELETE FROM %s \" % (self.__tablename__) +\n \"WHERE asset_id=?\",\n (asset_id)\n )"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if URL is invalid, False if it is not | def check_url_invalidity(self) -> bool:
validate = URLValidator()
try:
validate(self.args.url)
return False
except ValidationError:
return True | [
"def is_valid_url(url):\n validate = URLValidator()\n try:\n validate(url) # check if url format is valid\n except ValidationError:\n return False\n\n return True",
"def is_valid_url(self):\n url_obj = urlparse(self.url)\n if not url_obj: \n logger.warn('url_obj ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator that lifts an unary predicate into a Predicate. | def predicate(f):
wrapper = Predicate(f)
update_wrapper(wrapper, f)
return wrapper | [
"def load_unary_predicate(node, symast):\n return load_unary(node, symast, \"Unary_Predicate\", {\"not\":ast.make_not})",
"def skipUnlessReturnsTrue(predicate):\n if not predicate():\n desc = getattr(predicate, '__doc__', None) or repr(predicate)\n return skip(\"predicate evaluated to false: %... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the transaction_for_doi method | def test_get_transaction_for_doi(self):
# Submit a reserve, then use the assigned doi to get the transaction record
reserve_kwargs = {
"input": join(self.input_dir, "pds4_bundle_with_contributors.xml"),
"node": "img",
"submitter": "my_user@my_node.gov",
"f... | [
"def test_get_transaction_for_identifier(self):\n # Submit a reserve, then use the PDS identifier to get the transaction record\n reserve_kwargs = {\n \"input\": join(self.input_dir, \"pds4_bundle_with_contributors.xml\"),\n \"node\": \"img\",\n \"submitter\": \"my_use... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the transaction_for_identifier method | def test_get_transaction_for_identifier(self):
# Submit a reserve, then use the PDS identifier to get the transaction record
reserve_kwargs = {
"input": join(self.input_dir, "pds4_bundle_with_contributors.xml"),
"node": "img",
"submitter": "my_user@my_node.gov",
... | [
"def test_retrieve_transaction(self):\n pass",
"def test_create_transaction(self):\n pass",
"def test_transaction_tid(self, transactions):\n for item in transactions:\n transaction = self.transaction()\n transaction.tid = item.get('tid')\n tid = transaction.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the output_label_for_transaction method | def test_get_output_label_for_transaction(self):
# Submit a reserve, then use the PDS identifier to get the transaction record
reserve_kwargs = {
"input": join(self.input_dir, "pds4_bundle_with_contributors.xml"),
"node": "img",
"submitter": "my_user@my_node.gov",
... | [
"def test_label(self, op, decimals, expected):\n assert op.label(decimals=decimals) == expected",
"def test_labels(self):\n self.compliance_tester.test_labels(self.oi)",
"def test_label_model_logger(self):",
"def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns count of open changes per reviewer per project Fetches all open changes from gerrit, and returns a dictionary containing all projects with open changes, and for each project, all reviewers and the count of changes they are reviewing. e.g. { | def get_open_change_reviewers_per_project():
config = GerritFetchConfig()
open_changes = fetch.fetch_open_changes(
config.hostname(), config.username(), config.port())
open_change_reviewers_per_project = {}
for gerrit_change in open_changes:
project = gerrit_change.project
review... | [
"def _get_reviewer_change_count(reviewer, project_name, from_datetime):\n if project_name == PROJECT_ALL:\n # changes across all projects after from_datetime\n changes = reviewer.changes.filter(\n timestamp__gte=from_datetime).distinct()\n else:\n # changes in given project aft... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an UTCaware datetime in case of USE_TZ=True. | def tz_aware(value: datetime) -> datetime:
if settings.USE_TZ:
value = value.replace(tzinfo=timezone.utc)
return value | [
"def utcnow_aware():\n return datetime.now(zulutime())",
"def utcdatetime(*args, **kwargs):\n from datetime import datetime\n from django.conf import settings\n\n if settings.USE_TZ:\n kwargs.update(tzinfo=utc)\n\n return datetime(*args, **kwargs)",
"def utcnow() -> datetime:\n return d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a step into calculated metrics | def add_step(self):
assert self.y_real is not None and self.y_predicted is not None
# Calculates some metrics
rmse = Metrics.rmse_loss(self.y_real, self.y_predicted)
mse = Metrics.mse_loss(self.y_real, self.y_predicted)
cm = Metrics.confusion_matrix(self.y_real, self.y_predicted... | [
"def on_step_end(self, step, logs):\n self.metrics[logs['episode']].append(logs['metrics'])",
"def add_step(self, step):\n if not step:\n return\n temp = {Result.__STEP: step.get_name(),\n Result.__STATUS: step.get_status(),\n Result.__MESSAGE: step.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all Event by user_id | def get_event_by_user_id(user_id):
return Event.query.filter(Event.user_id == user_id).order_by(Event.created_at.desc()).all() | [
"def get_user_events(meeting_id, user_id):\n global collection\n meeting = collection.find_one({ '_id': meeting_id })\n for user in meeting['users']:\n if user['user_id'] == user_id:\n return user['events']",
"def event_get(tenant_id, user_id=None):",
"async def retrieve_user_events(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and return Job Details | def create_job_detail(company_name, job_title, application_deadline, job_listing_url, state, city, application_listed, salary):
job_detail = JobDetail(company_name = company_name, job_title = job_title, application_deadline = application_deadline, job_listing_url = job_listing_url, state = state , city = city, app... | [
"def _details(self) -> Mapping[str, Any]:\n return self._connection.request(\"GET\", f\"/jobs/{self.id}\").json()",
"def create_job():\n data = request.json\n job = {\n \"repository_url\": data.pop(\"repo_url\", None),\n \"commit_hash\": data.pop(\"commit_hash\", None),\n \"branc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all job detail. | def get_job_detail():
return JobDetail.query.all() | [
"def _details(self) -> Mapping[str, Any]:\n return self._connection.request(\"GET\", f\"/jobs/{self.id}\").json()",
"def list():\n\treturn _jobs.all()",
"async def list_jobs(self) -> Dict[str, JobInfo]:\n return await self._job_info_client.get_all_jobs()",
"def get_all_tasks(self):\n if n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a job detail by primary key. | def get_job_detail_by_id(job_detail_id):
return JobDetail.query.get(job_detail_id) | [
"def get_item(self, job_id: int) -> DatabaseEntry:\n try:\n with Session(self.engine) as session:\n job_data = (\n session.query(\n Job.__table__, Project.location, JobStatus.status, JobType.type\n )\n .... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all job applied. | def get_job_applied():
return JobCompletedApplication.query.all() | [
"def get_all_jobs():\n return base_jobs.JobMetaclass.get_all_jobs()",
"def get_jobs(self):\r\n\r\n # TODO: add jobs as well..\r\n return list(JOBS.keys())",
"def job_list(self):\n return self._job_list",
"def get_job_list(self):\n return self.job_list",
"def all_jobs(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a job applied by job id. | def get_job_applied_by_job_id(job_id):
return JobCompletedApplication.query.filter(JobCompletedApplication.job_id == job_id).first().job_applied_id | [
"def get_job(self, job_id):\n return self.jobs[job_id]",
"def get_job(self, job_id):\n job = None\n for job_instance in self.running_jobs:\n if job_id == job_instance.id:\n job = job_instance\n break\n\n return job",
"def get_submitted_job(self, job_id: str) -> Job:\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all note created. | def get_note():
return Note.query.all() | [
"def notes(self):\r\n return self._request('notes', 'all')",
"def listNotes() -> list:\n list_of_notes = []\n for note in Note.objects.all():\n list_of_notes.append({\n 'uuid': note.uuid, 'title': note.title,\n 'author': note.author, 'body': note.body, 'created_at': local... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all notes for job applied id. | def all_note_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Note' ).all() | [
"def all_jd_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Job Description' ).order_by(Note.note_date_created.desc()).first()",
"def notes(self):\r\n return self._request('notes', 'all')",
"def all_recruiter_by_job_applied... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all job description for job applied id. | def all_jd_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Job Description' ).order_by(Note.note_date_created.desc()).first() | [
"def job_description(self):\n return self._job_description",
"def get_job_description(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.get_job_description',\n [job], self._service_ver, context)",
"def read_job(self, id):\n uri = '/2012-09... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all recruiter details for job applied id. | def all_recruiter_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Recruiter Contact' ).all() | [
"def all_resume_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all()",
"def scrape_recruitment(self):\n d = self.driver\n recruitment_page = self.guildwork_url + '/recruitment'\n d.get(recruitment_pag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all Resume for job applied id. | def all_resume_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all() | [
"def all_recruiter_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Recruiter Contact' ).all()",
"def get_job_applied():\n\n return JobCompletedApplication.query.all()",
"def iter_hpc_job_ids(self):\n assert self._job_sta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all Follow up Template for job applied id. | def all_followup_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Follow-up').all() | [
"def prep_templates(self):\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT prep_template_id\n FROM qiita.prep_template\n WHERE artifact_id IN (\n SELECT *\n FROM qiita.find_artifact_roots(%s))\"\"\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all Interview question by job applied id. | def all_interview_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_... | [
"def all_resume_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all()",
"def get_questions(self, obj):\n queryset = Question.objects.filter(sheet=obj)\n questions = []\n for q in queryset:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all Interview question by job user id. | def all_interview_by_user_id(user_id):
return Note.query.filter(Note.user_id == user_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_date_created.desc()).all() | [
"def all_interview_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
create and return Application Progress | def create_application_progress(application_state, job_applied_id , created_at):
app_progress = ApplicationProgress(application_state = application_state, job_applied_id = job_applied_id, created_at = created_at)
db.session.add(app_progress)
db.session.commit()
return app_progress | [
"def getProgress(self):",
"def start_maint(self):\n\t\treturn self.write({'state': 'progress'})",
"def get_progress(self):\n raise NotImplementedError",
"def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)",
"def progress_bar_new() -> str:\n pb_id = int(request... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all Application Progress created. | def get_application_progress():
return ApplicationProgress.query.all() | [
"def get_progress(self):\n # TODO: Cache progress or children array?\n children = self.get_children()\n progresses = [child.get_progress() for child in children]\n progress = reduce(Progress.add_counts, progresses, None)\n return progress",
"def getProgress(self):",
"def get_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a Application Progress by primary key. | def get_application_progress_by_id(app_progress_id):
return ApplicationProgress.query.get(app_progress_id) | [
"def get_application_progress():\n\n return ApplicationProgress.query.all()",
"def get_result_by_primary_key(self, pk):\n session = self.session_factory()\n result = session.query(PipelineRun).filter_by(id=pk).first()\n session.close()\n return result",
"def find(self, primary_key... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the last job_id record | def get_last_job_id():
return JobDetail.query.with_entities(JobDetail.job_id).order_by(JobDetail.job_id.desc()).first()[0] | [
"def last_job(self):\n self.ensure_logged_in()\n try:\n jobs = self.job_list(False, 0, 1)\n return jobs[0]\n except xmlrpc.client.Fault as e:\n raise BoaException(e).with_traceback(e.__traceback__)",
"def get_last_job_applied_id():\n\n return JobCompletedAp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the last job applied id record | def get_last_job_applied_id():
return JobCompletedApplication.query.with_entities(JobCompletedApplication.job_applied_id).order_by(JobCompletedApplication.job_applied_id.desc()).first()[0] | [
"def get_last_job_id():\n\n return JobDetail.query.with_entities(JobDetail.job_id).order_by(JobDetail.job_id.desc()).first()[0]",
"def last_job_key(self):\n return self._last_job_key",
"def last_job(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the cost given the target. This method must be called after `forward` has been called. | def cost(self, cost_object, target):
return cost_object.f(self.a[-1], target).mean(axis=0).sum() | [
"def cost(self) -> float:",
"def determine_cost(self):\n pass",
"def _cost_function(self):\n return np.sum((self._inp.dot(self._bias) - self._pred) ** 2) / (2 * self._data_set_len)",
"def calculate_total_cost(state):\r\n return state.cost()",
"def cost(self):\n if self.__cost is None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get Enrollment Dataframe (enrollment_.csv) | def get_enrollment_df(ftype):
assert ftype=='train' or ftype=='test'
enroll_df = pd.read_csv('data/%s/enrollment_%s.csv' % (ftype, ftype))
return enroll_df | [
"def get_data() -> pd.DataFrame:\n print(\"Retrieving case study data from: {}\".format(CASE_STUDY_CSV_DIR))\n return pd.read_csv(CASE_STUDY_CSV_DIR, delimiter=',')",
"def read_data():\n df = pd.read_csv('faculty.csv')\n df.columns = df.columns.str.strip()\n df.degree = df.degree.str.strip(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get Log Dataframe (log_.csv) | def get_log_df(ftype):
assert ftype=='train' or ftype=='test'
log_df = pd.read_csv('data/%s/log_%s.csv' % (ftype, ftype))
log_df['time'] = pd.to_datetime(log_df['time'])
log_df['action_date'] = log_df.time.apply(lambda x: x.date())
log_df['action_dow'] = log_df['time'].apply(lambda x: x.weekday())
... | [
"def load_log(dir_):\n df = pandas.read_csv(os.path.join(dir_, 'log.csv'),\n error_bad_lines=False,\n warn_bad_lines=True)\n if not len(df):\n print(\"empty df at {}\".format(dir_))\n return\n df['model'] = dir_\n return df",
"def log_to_da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get Trainning Labels Dataframe (truth_train.csv) | def get_labels_df():
labels_df = pd.read_csv('data/train/truth_train.csv', header=None)
return labels_df | [
"def __get_labels(self):\n\n uncertain_pairs_index = self.__query_pairs()\n\n to_label_raw = self.all_raw_data.loc[uncertain_pairs_index]\n to_label_features = self.all_features.loc[uncertain_pairs_index]\n\n # Remove uncertain pairs from the candidate pool\n self.all_features.dro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get Object Dataframe (object.csv) | def get_obj_df():
obj_df = pd.read_csv('data/object.csv')
obj_df = obj_df.drop_duplicates()[['course_id', 'module_id', 'category', 'start']]
obj_df['start'] = pd.to_datetime(obj_df[obj_df['start'] != 'null']['start'])
return obj_df | [
"def get_obj_df(self) -> pd.DataFrame:\n df = pd.DataFrame(self.obj, columns=[\"x\", \"y\", \"m\", \"dx\", \"dy\"])\n df['iter'] = self.current_iteration\n return df",
"def cif_df(cif_object) -> DataFrame:\n if cif_object is None:\n return DataFrame()\n row_list = cif_object.row_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replaces the given province's tradegood with the new one defined in the tradegoods.bmp map. | def replace_tradegood(prov_num, new_tradegood):
directory = os.getcwd()+"\\shatterednippon\\history\\provinces\\"
for file in os.listdir(directory):
if file.startswith(str(prov_num)):
old_tradegood = find_tradegood(directory+file)
if old_tradegood is None:
print("Province: %s has no \"trade_goods\" variab... | [
"def _map_bands_to_pcigale_old(band, verbose=False):\n\n # Format band ids to CIGALE names\n unknown_band = [band[i] for i in arange(0, len(band))]\n for i in arange(0, len(band), 1):\n band_str = band[i]\n if 'CFHT' in band_str:\n if 'u' in band_str:\n band[i] = 'CF... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds the given province file's tradegood and returns it, else returns None. | def find_tradegood(filepath):
with open(filepath) as f:
for line in f:
if "trade_good" in line:
return line.replace("trade_goods = ", "").strip()
return None | [
"def replace_tradegood(prov_num, new_tradegood):\n\tdirectory = os.getcwd()+\"\\\\shatterednippon\\\\history\\\\provinces\\\\\"\n\tfor file in os.listdir(directory):\n\t\tif file.startswith(str(prov_num)):\n\t\t\told_tradegood = find_tradegood(directory+file)\n\t\t\tif old_tradegood is None:\n\t\t\t\tprint(\"Provin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks definition.csv if provinces.bmp's corresponding pixel's RBG value is in the definition list. Returns the province number if it finds the pixel in the list, returns None otherwise. | def get_province_number(corr_pixel):
corr_pixel = str(corr_pixel).strip("()").replace(", ", ";") #Reformats the pixel to ensure it can be compared.
with open(os.getcwd()+"\\shatterednippon\\map\\definition.csv", "r") as definitions:
prov_num = 1
for line in definitions:
if corr_pixel in line:
return prov_n... | [
"def region_of_province(province_in: str) -> str:\n region = None\n for r in ITALY_MAP:\n for p in ITALY_MAP[r]:\n if province_in == p:\n region = r\n return region",
"def identify_undefined_pixels(pdict, intervention_list):\n\n rau_raster = get_rau_raster(pdict)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the names of the tradegoods and the RGB color values for each defined tradegood in 00_tradegoods.txt as two seperate lists. | def get_defined_tradegoods():
names = []
colors = []
with open(os.getcwd()+"\\shatterednippon\\common\\tradegoods\\00_tradegoods.txt", "r") as f:
for line in f:
if line[0].isalpha():
names.append(line.strip("={} \n"))
elif "color" in line:
numbers = tuple(map(int, re.sub("[^\d. ]\s*", "", line).split... | [
"def _rgb_txt_names_and_numbers(path_to_file):\n if not path_to_file or not os.path.isfile(path_to_file):\n return []\n if not hasattr(_rgb_txt_names_and_numbers, \"result\"):\n rgb_lines = [_rgb_txt_line(_) for _ in open(path_to_file)]\n _rgb_txt_names_and_numbers.result = [\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load an internal yaml node parsing, defaulting to a scalar value. | def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "YamlModifier":
value = loader.construct_scalar(typing.cast(yaml.ScalarNode, node))
return cls(value) | [
"def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yamal_file, ImportLoader)",
"def load(self, value):\n if self.is_none(value):\n value = self.load_none\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse yaml node into this class object for Lobotomy processing. | def parse_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "YamlModifier":
return cls._from_yaml(loader, node) | [
"def test_parse_yaml(self) -> None:\n pass",
"def from_yaml(cls, y):\n return cls(yaml.load(y, AttrLoader))",
"def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register the comparator with the PyYaml loader. | def register(cls):
yaml.add_constructor(cls.label(), cls.parse_yaml)
yaml.add_representer(cls, cls.dump_yaml) | [
"def setComparator(self, comparator: dict):\n self._comparator = comparator",
"def test_yaml_loader(self):\n self.loader_test('obo_sample.yaml', Package, yaml_loader)",
"def add_constructors(loader):\n loader.add_constructor('tag:yaml.org,2002:str',\n Loader._c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the closed form of a '_{side}_inline.nii.gz' mask in numpy array and also the clipped array | def close_mask_in(im_slice_2d, side):
new_slice = im_slice_2d.copy()
x_no_0, y_no_0 = np.nonzero(im_slice_2d)
if len(x_no_0) == 0: return new_slice, new_slice
#breakpoint()
x1 = x_no_0.min()
x2 = x_no_0.max()
if side == "l":
x_mid = x2; x_aux1 = x_mid - 9 + 1; x_aux2 = x2 + 1... | [
"def get_uniform_interpolated_ct_and_boolean_mask_cubic_volumes(\n ann,\n side_length = None, \n verbose = True ):\n bbox = ann.bbox(image_coords=True)\n bboxd = ann.bbox_dimensions(image_coords=True)\n rxy = ann.scan.pixel_spacing\n\n # { Begin input checks.\n if side_length is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is used for both 'xcworkspace' and 'xcodeproj' classes. It returns a list of schemes that are labeled as 'user' or 'shared'. | def schemes(self):
schemes = [];
# shared schemes
if XCSchemeHasSharedSchemes(self.path.obj_path) == True:
shared_path = XCSchemeGetSharedPath(self.path.obj_path);
shared_schemes = XCSchemeParseDirectory(shared_path);
for scheme in shared_schemes:
... | [
"def schemes(self):\n return self.m_authschemes",
"def available_color_schemes() -> list[str]:\n return list(COLOR_SCHEMES.keys())",
"def list_schemes(schemes):\n for scheme in schemes:\n puts('{}:'.format(blue(scheme)))\n default_handler = LSCopyDefaultHandlerForURLScheme(scheme)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns x and y derivatives of a 2D gauss kernel array for convolutions | def gauss_derivative_kernels(size, size_y=None):
size = int(size)
if not size_y:
size_y = size
else:
size_y = int(size_y)
y, x = mgrid[-size: size + 1, -size_y: size_y + 1]
# x and y derivatives of a 2D gaussian with standard dev half of size
# (ignore scale factor)
gx = - x... | [
"def gradient(x, data, kernel, sigma):\n # the number of data points and the dimensionality\n n,d = data.shape \n\n # first, we form the u_i\n u = 1./(sigma**2)*(data - x)\n\n # now we form the c_i\n d_squared = np.sum((data - x)**2, axis=1)\n c = kernel(d_squared)\n\n return -1. * np.mean(c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns x and y derivatives of an image using gaussian derivative filters of size n. The optional argument ny allows for a different size in the y direction. | def gauss_derivatives(im, n, ny=None):
gx, gy = gauss_derivative_kernels(n, size_y=ny)
imx = signal.convolve(im, gx, mode='same')
imy = signal.convolve(im, gy, mode='same')
return imx, imy | [
"def dndxfi(x, n, param, h=1 / 100000):\n if (param[1] == 1 or param[1] == 2) and (n==1):\n he = param[2]\n if param[1]==1:\n if param[0]==0:\n return -1/he\n else:\n return 1/he\n elif param[1]==2:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator that can be used to cache ReusableBytesIO objects intended for reading. The decorator makes sure the objects are immutable and reset to position 0. The decorated function can either return pure ReusableBytesIO objects or dicts. | def buffer_object_cacher(key=None, maxsize=None):
if not config.enable_caching:
return lambda x: x
def decorator(fun):
# Cache the results.
cached_fun = cachetools.cached(cachetools.LRUCache(maxsize=maxsize),
key=lambda *x,**y: cachetools.keys.hashkey(ke... | [
"def make_buffer():\n return BytesIO()",
"def cached_load(filepath: str) -> io.BytesIO:\n with open(filepath, 'rb') as f:\n return io.BytesIO(f.read())",
"def mutable_cache(maxsize=10):\n\n sentinel = object()\n make_key = functools._make_key\n\n def decorating_function(user_function):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a tuple representing the hardware specs. | def getHardware(self):
return (self.vendorId, self.deviceId, self.physicalMemory, self.osInfo, self.cpuSpeed[0]) | [
"def systemSpec(self):\n\n cinfo = cpuinfo.get_cpu_info()\n data = {}\n data[\"Processor\"] = cinfo['brand_raw']\n data[\"CPU\"] = cinfo['count']\n\n if os_info.platformName() == 'Linux':\n Hardware_info = self.getUnixSystemSpec(data)\n elif os_info.platformName(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if the other session or sample has the same hardware specs as this one, false otherwise. | def sameHardware(self, other):
return (self.vendorId == other.vendorId and \
self.deviceId == other.deviceId and \
self.physicalMemory == other.physicalMemory and \
self.osInfo == other.osInfo and \
self.cpuSpeed[0] == other.cpuSpeed[0]) | [
"def __eq__(self, other):\n return self.hw_driver == other.hw_driver and self.config == other.config",
"def _on_same_device(self, other: \"PArray\") -> bool:\n this_device = self._current_device_index\n return this_device in other._array",
"def match(uspec1, uspec2):\n \n if uspec1.is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the average FPS for this player, over all of the player's different sessions. | def calcFrameRate(self):
tot = 0
count = 0
for session in self.sessions:
for sample in session.samples:
if not sample.isLoading:
tot += sample.fps
count += 1
if count:
self.avgFps = tot / count
s... | [
"def fps(self) -> float:\n try:\n return len(self._frame_times) / sum(self._frame_times)\n except ZeroDivisionError:\n return 0",
"def fps(self):\r\n fps = self.frames/(self.frameClock.getTime())\r\n self.frameClock.reset()\r\n self.frames = 0\r\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the clientfps lines from the indicated logfile, and writes card_performance.csv, without building up large tables. | def quickAnalyzeCards(self, filename):
assert filename.endswith('.txt')
file = open(filename, 'r')
quickCards = {}
for line in file:
line = line.strip()
if not line:
continue
columns = line.split('|')
if columns[1] != 'cl... | [
"def performance_report(logfile, mode_52):\r\n\r\n code = test_encoding(logfile)\r\n\r\n rgx_start = re.compile(r\"^.*Change job number\")\r\n if not have_change_job_line(logfile, code):\r\n rgx_start = re.compile(r\"^.*SERVER_STATE\\: Receiving Ground Data\")\r\n\r\n if mode_52:\r\n rgx_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write the samples for all players with less than 10 fps average frame rate to the indicated text file. This generates a new log file that may be analyzed independently. | def writeLowPlayers(self, filename):
assert filename.endswith('.txt')
file = open(filename, 'w')
samples = []
for player in self.players:
if player.lowFps:
for session in player.sessions:
for sample in session.samples:
... | [
"def debug_file(self, pkt_count, attack_count, data_list, ds_calc_time, ds_vals, metric_means, distances):\n # Current frame no. //\n # Current frame metric data //\n # Current sliding window data\n # Distances for each metric\n # DS probabilities, BPA's, time to calculate\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns total number of players whose avg fps is less than 10, total number of players whose avg fps is between 10 and 25, and total number of players whose avg fps is more than 25. | def __countPlayers(self, players):
numLow = sum(map(lambda p: p.lowFps, players))
numHigh = sum(map(lambda p: p.highFps, players))
numMed = len(players) - numLow - numHigh
return '%s, %s, %s' % (numLow, numMed, numHigh) | [
"def stats(detections, faces):\n vp, fp, fn, vn = 0, 0, 0, 0\n max_label = np.max(faces[:, 0])\n for i in range(max_label + 1):\n detections_i = get_label_with_index(detections, i)\n faces_i = get_label_with_index(faces, i)\n local_vp = 0\n for face in faces_i:\n foun... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads PCIList, which contains a list of the known PCI devices by vendor ID/device ID. See | def readPCIList(self):
self.vendors = {}
self.devices = {}
vendorId = None
vendorName = None
for line in PCIList.split('\n'):
stripped = line.lstrip()
if not stripped or stripped[0] == ';':
continue
if line[0] != '\t':
... | [
"def _get_pci_devices(self):\n\n system = self._get_host_details()\n if ('links' in system['Oem']['Hp'] and\n 'PCIDevices' in system['Oem']['Hp']['links']):\n # Get the PCI URI and Settings\n pci_uri = system['Oem']['Hp']['links']['PCIDevices']['href']\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds extra device names that we know explicitly from some external source. | def addExtraDevices(self):
# These tables were extracted from
# pirates/src/piratesgui/GameOptions.py.
ati_device_list = [
["ATI MOBILITY/RADEON X700", 0x5653],
[1, "Radeon X1950 XTX Uber - Limited Edition", 0x7248],
[1, "Radeon X1950 XTX Ub... | [
"def add_extra_args(self):\n self.parser.add_argument('--device', dest='device', type=str, help='Device ID, e.g. d--0001')",
"def ext_devices(self, ext_devices):\n self._ext_devices = ext_devices",
"def load_devices():",
"def addDeviceDescriptor(string: str, deviceDescriptor: cern.japc.core.Devi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Counts the frequencies of samples of given variables ``vars`` and calculates probabilities with additive smoothing. | def get_probs(self, *vars):
freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])
k = np.prod([len(v.values) for v in vars])
return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k) | [
"def update_posterior_probs(vars_):\n vars_.weighted_sums += np.power(vars_.dprime_map[vars_.focus],2) * vars_.visual_field\n vars_.post_probs = np.exp(vars_.weighted_sums) * vars_.prior_prob\n vars_.post_probs /= np.sum(vars_.post_probs)",
"def count_smoothing(freq_dist, vocabulary, alpha=1):\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If not given, computes the absolute total info gain for attributes a and b. Generates an Interaction object. | def attribute_interactions(self, a, b, total_rel_ig_ab=None):
var_a = self.data.domain.variables[a]
var_b = self.data.domain.variables[b]
ig_a = self.info_gains[var_a.name]
ig_b = self.info_gains[var_b.name]
if not total_rel_ig_ab:
ig_ab = ig_a + ig_b - (self.class_en... | [
"def information_gain(attribute_name_var, instances_var):\n global data_instance\n global_distribution_list = classify_list(data_instance.attr_list[-1], instances_var)\n entropy_value = entropy(global_distribution_list)\n\n information_value = information(attribute_name_var, instances_var)\n informat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the Interaction objects for n most informative pairs of attributes. For this to work, ``interaction_matrix`` must be called first. It uses a partial sort and then a full sort on the remaining n elements to get the indices of attributes. | def get_top_att(self, n):
if not self.int_M_called:
raise IndexError("Call interaction_matrix first!")
flat_indices = np.argpartition(np.tril(-self.int_matrix, -1).ravel(), n - 1)[:n]
# TODO: Consider using the partial sort from the bottleneck module for faster sorting
row_in... | [
"def get_object_intent_by_index(self, i):\n obj_row = self.np_table[i, :]\n att_inds = obj_row.nonzero()[0]\n atts = [self.attributes[j] for j in att_inds]\n return set(atts)",
"def top_n_combined(self, n):\n top = set()\n for feat_set in self.itervalues():\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the list of names of args/kwargs without defaults from `fun` signature. | def get_required_kwargs(fun, skip_positional=0):
sig = inspect.signature(fun)
# the params from signature with up to skip_positional filtered out
# (less only if there is not enough of positional args)
params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())
if i >... | [
"def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return details.args[-nopt:]",
"def get_func_arg_names(func):\n return get_func_code(func).co_varnames",
"def get_default_args(func):\n signature = inspect.signature(func)\n return {\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
When a team is created, its survey is automatically created. | def test_create_team_creates_survey(self):
user = User.create(name='User Foo', email='user@foo.com')
user.put()
code = 'trout viper'
team_response = self.testapp.post_json(
'/api/teams',
{
'name': 'Team Foo',
'code': code,
... | [
"def test_teams_create(self):\n pass",
"def test_create_team(self):\n pass",
"def test_generate_survey(self):\n\n result = generate_survey.apply((self.user.id,\n self.report.get_daily().id)).get()\n self.assertTrue(result, \"should create a surv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can get the survey for a team you own. | def test_get_for_team(self):
user, team_dict = self.test_create_team_creates_survey()
response = self.testapp.get(
'/api/teams/{}/survey'.format(team_dict['uid']),
headers=self.login_headers(user),
)
survey_dict = json.loads(response.body)
self.assertTrue(... | [
"def get_survey():\n # Display survey page\n return render_template(\"survey.html\")",
"def get_survey(self, survey_id):\n\n if self.has_survey(survey_id):\n return self.surveys[survey_id]\n else:\n self.logger.warning(f\"Could not find survey {survey_id}\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can't get a survey for someone else's team. | def test_get_for_other_forbidden(self):
user, team_dict = self.test_create_team_creates_survey()
other = User.create(name='Other', email='other@foo.com')
other.put()
self.testapp.get(
'/api/teams/{}/survey'.format(team_dict['uid']),
headers=self.login_headers(othe... | [
"def test_get_for_team(self):\n user, team_dict = self.test_create_team_creates_survey()\n response = self.testapp.get(\n '/api/teams/{}/survey'.format(team_dict['uid']),\n headers=self.login_headers(user),\n )\n survey_dict = json.loads(response.body)\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Client dict should have portalfriendly metric labels. | def test_metric_labels(self):
team_id = 'Team_foo'
m1 = Metric.create(name='Foo Condition', label='foo_condition')
m2 = Metric.create(name='Bar Condition', label='bar_condition')
Metric.put_multi([m1, m2])
survey = Survey.create(team_id=team_id, metrics=[m1.uid, m2.uid])
... | [
"def test_get_host_configuration_metrics1(self):\n pass",
"def test_get_host_configuration_metrics(self):\n pass",
"def test_label_metric(self):\n\n url = reverse(\"reports-openshift-costs\")\n client = APIClient()\n client.get(url, **self.headers)\n\n registry = save_r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pause pattern while self.pauseNow is True return imediatly if self.playStatus == False | def pauseCheck(self):
while (self.playStatus == False and self.pauseNow == True):
self.isPause = True
time.sleep(.25)
self.isPause = False
return self.playStatus | [
"def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break",
"def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()",
"def pausePlaying(self):\n # FIXME Perhaps pause-playing should actu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For now, we are only returning the label for the first authorization. | def get_label(self):
auth = self.authorizations[0]
return auth.label | [
"def get_label(self) -> str:\n if self.found:\n return self.detail['label']\n else:\n logger.warning(\"Return empty for label as fail to find ontology on OLS for term \"+self.short_term)\n return \"\"",
"def get_label(self, label):\n return self.labels[label]",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change the value of every pixel by following x_n = 0.5x_p^2 where x_n is the new value and x_p is the original value | def change_value(image):
out = None
#####################################
# START YOUR CODE HERE #
#####################################
image = image / 255
out = np.empty_like(image)
height, width, _ = image.shape
for h in range(height):
for w in range(width):
... | [
"def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n self.set_led( y * 16 + x, value)\n else:\n self.set_led((y-8) * 16 + (x+8), value)",
"def compute(self):\n for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes a value from the set. Returns true if the set contained the specified element. | def remove(self, val: int) -> bool:
if val in self.set:
self.set.remove(val);
self.nums.remove(val);
return True;
return False; | [
"def remove(self, val: int) -> bool:\n if val in self.set:\n self.set.remove(val)\n return True\n return False",
"def remove(self, val: int) -> bool:\n if val in self.sett:\n self.sett.remove(val)\n return True\n return False",
"def remove(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Indicates whether the identifier provided is contained in this namespace. | def contains(self, identifier):
uri = identifier if isinstance(identifier, six.string_types) else (
identifier.uri if isinstance(identifier, Identifier) else None
)
return uri.startswith(self._uri) if uri else False | [
"def in_namespace(self, namespace: Sequence[str]):\n if self.namespace is None:\n return False\n\n for index, item in enumerate(namespace):\n try:\n if item != self.namespace[index]:\n return False\n except IndexError:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pop function removes the top element of the stack and returns it. If the stack is empty, it raises Stack Exception | def pop(self) -> object:
if self.is_empty()== True: # if size of array is 0, raise exception
raise StackException
else:
top_stack = self.da.get_at_index(self.size()-1) # initialize the top of the stack (last element)
self.da.remove_at_index(self.size()-1) # remove it
... | [
"def pop() -> Any:\n\tglobal stack\n\tif stack:\n\t\ta = stack[-1]\n\t\tstack.pop()\n\t\treturn a\n\telse:\n\t\treturn None",
"def pop(self):\n # If the stack is empty, then display an error and return a junk value.\n if self.is_empty():\n print(\"ERROR:: Stack is empty.\")\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that output table has headers item listed in field_names. | def assertTableHeaders(self, output_lines, field_names):
table = self.parser.table(output_lines)
headers = table['headers']
for field in field_names:
self.assertIn(field, headers) | [
"def assert_table_structure(self, items, field_names):\n for item in items:\n for field in field_names:\n self.assertIn(field, item)",
"def test_check_header_required_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |