query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Constructs a new Course object from given JSON data
def from_json(cls, json_data: dict): return cls( json_data['course_id'], json_data['course_name']['pl'], json_data['course_name']['en'], json_data['term_id'] )
[ "def parse_course_json(self, json):\n dept_short = json['subject']\n department = self.dept_keys[dept_short]\n number = json['catalogNbr']\n title = json ['titleLong']\n\n # Whether different enrollment patterns are truly divided into different\n # enrollGroups seems to var...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a generator (1time iterator) of certificates in the batch
def get_certificate_generator(self): for cert in self.certificates_to_issue: data_to_issue = self.certificate_handler.get_byte_array_to_issue(cert) yield data_to_issue
[ "def get_certificate_generator(self):\n client = boto3.client('s3',\n aws_access_key_id='AKIAIPZZ2DOBQEVC6V6A',\n aws_secret_access_key='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'\n )\n resp = client.list_objects_v2(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a generator (1time iterator) of certificates in the batch
def get_certificate_generator(self): client = boto3.client('s3', aws_access_key_id='AKIAIPZZ2DOBQEVC6V6A', aws_secret_access_key='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' ) resp = client.list_objects_v2(Bucket=BUC...
[ "def batcher(iterator, batchsize):\n it = iter(iterator)\n while True:\n batch = tuple(itertools.islice(it, batchsize))\n if not batch:\n return\n yield batch", "def channel_request_iterator(batch_size):\n\n print('Establishing connection to channels API')\n\n page = 1\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates the input weights for the reservoir. It does this by generating first an array of either random or unitary array and then applying a mask to reduce the inputs down to that required by the input fraction. If a sequence of input gains are given, then the corresponding dimension's weights are adjusted by that ga...
def generate_input_weights(self): input_weights = np.random.uniform(self.input_weight_bounds[0], self.input_weight_bounds[1], \ size=(self.N, self.sequence_dimension + 2)) # 2 added for the distractor and cue if isinstance(self.input_gain, Sequence): for i in input_weig...
[ "def compute_sample_weight(class_weight, y, *, indices=...):\n ...", "def _sample_weights(self, batch_size, distrib_means, distrib_vars):\n\n distrib_cov = np.diag(np.sqrt(distrib_vars))\n return np.random.multivariate_normal(distrib_means, distrib_cov, batch_size)", "def generate_weight_mask(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a set of unique task sequences for memorization
def generate_recall_task_sequences(self): self.multitrial_input_signal = np.zeros((self.num_trials, self.task.total_duration, self.task.input_dimension, 1)) self.multitrial_target_signal = np.zeros((self.num_trials, self.sequence_length, self.sequence_dimension, 1)) self.task_sequences = set([]...
[ "def get_task_ids(computation):\n return computation._tasks.keys()", "def unusedTasks(self):\n return functools.reduce(lambda x, y : x + y, [b.getTasks() for b in self._binqueue], []) + self._queue", "def _generate_tasks(review_milestone, reviewer, chunk_map, chunk_id_task_map=defaultdict(list), max_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns two arrays of size Q x T x K x 1 (num_trials, time, input dimension, 1[for vector operations]) in second array (the target array) K=output dimension
def generate_input(self, trials=None): if self.distraction_range == None: if trials == None: trials = self.num_trials multitrial_input_signal = np.zeros((trials, self.task.total_duration, self.task.input_dimension, 1)) multitrial_target_signal = np.zeros((t...
[ "def _init_trend_array(self):\n\t\tself.T = [sum([self.X[i + self.q] - self.X[i]\n\t\t for i in range(self.q)]) / (self.q ** 2)]", "def RungeKuttaArray(h, u0, A, B = None, printAll = False, returnT=False):\n try:\n # if A is a 3D array\n A0 = A.shape[0]\n AType = A.dtype\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a fixed point analysis on the reservoir
def fixed_point_analysis(self): stable_state_list = [] num_diverged = 0.0 input_signal, _ = self.generate_recall_task_sequences() for i in range(self.num_trials): # Run reservoir self.esn.Reset() # Loop up to but don't include the que and recall peri...
[ "def fit_points_to_fp(self):\n #Cost/Gradient functions very similar to bgfs_cost/bgfs_gradient, but treat fixed points\n # as constants.\n #examine_results of the resulting points, use the points that fall within an\n # acceptable threshold.\n #TODO try out matching smaller set...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Passes input to each QueryLineHandler in use
def parse(self, input): query = None for handler in self._line_handlers: try: query = handler.handle(input) except Exception as e: query = None finally: if query is not None: return query retu...
[ "def __init__(self):\n super(LogParser, self).__init__([self.StandardQueryHandler(),\n self.CmdQueryHandler(),\n self.UpdateQueryHandler()])", "def _handleInput(self, paramInput):\n pass", "def read_input_queries(self):\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Declares the QueryLineHandlers to use
def __init__(self): super(LogParser, self).__init__([self.StandardQueryHandler(), self.CmdQueryHandler(), self.UpdateQueryHandler()])
[ "def add_shipping_query_handler(self, handler_dict):\n self.shipping_query_handlers.append(handler_dict)", "def start_handlers(self):\n self.__init_command_handlers()", "def add_pre_checkout_query_handler(self, handler_dict):\n self.pre_checkout_query_handlers.append(handler_dict)", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a specified number of objects at random from a pickle file No more than (batch_size + 1) objects required to be in memory at once.
def load_batch(filepath, batch_size): # First we need to find how many pickled objects there are in the file # I don't know any more efficient way to do this than to just load and discard every object in the file _file = open(filepath, mode='rb') obj_count = 0 while True: try: pi...
[ "def LoadSample(file_pattern, num):\n ret = []\n seen= 0\n for filename in glob.glob(file_pattern):\n seen += 1\n if len(ret) < num:\n r = record.Record.FromString(file(filename).read())\n if r: ret.append(r)\n else:\n n = random.randint(0, seen - 1)\n if n < num:\n r = record...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a tuple of array, index along axis. Copied from dask.array.chunk.argtopk_preprocess
def argreduce_preprocess(array, axis): import dask.array import numpy as np # TODO: arg reductions along multiple axes seems weird. assert len(axis) == 1 axis = axis[0] idx = dask.array.arange(array.shape[axis], chunks=array.chunks[axis], dtype=np.intp) # broadcast (TODO: is this needed?) ...
[ "def _gather_for_multidim_indexing(args: GatherArgs):\n # Guess the axis.\n axis = args.dnums.collapsed_slice_dims[0]\n squeezed_indices = tf.squeeze(args.start_indices, -1)\n op_shape = jax2tf._eval_shape(args.op_shape)\n start_indices = _clip((op_shape[axis],), squeezed_indices, (1,))\n return tf.gather(arg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for authentication_challenge_authenticate_post ChallengeAuthenticate
def test_authentication_challenge_authenticate_post(self): pass
[ "def test_authentication_challenge_get_post(self):\n pass", "def test_authentication_challenge_cancel_post(self):\n pass", "def test_api_v1_authenticate_post(self):\n pass", "def test_challenge_response(self):\n api = self.api\n session = {'expire':datetime.now()+timedelta(1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for authentication_challenge_cancel_post ChallengeCancel
def test_authentication_challenge_cancel_post(self): pass
[ "def test_authentication_challenge_authenticate_post(self):\n pass", "def test_authentication_challenge_get_post(self):\n pass", "def test_challenge_response(self):\n api = self.api\n session = {'expire':datetime.now()+timedelta(1), 'challenge':None, 'auth':False}\n api.sessio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for authentication_challenge_get_post ChallengeGet
def test_authentication_challenge_get_post(self): pass
[ "def test_authentication_challenge_authenticate_post(self):\n pass", "def test_authentication_challenge_cancel_post(self):\n pass", "def test_api_v1_authenticate_post(self):\n pass", "def test_challenge_response(self):\n api = self.api\n session = {'expire':datetime.now()+ti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for authentication_challenge_refuse_sms_post ChallengeRefuseSms
def test_authentication_challenge_refuse_sms_post(self): pass
[ "def test_authentication_challenge_sms_post(self):\n pass", "def test_authentication_challenge_get_post(self):\n pass", "def test_authentication_challenge_cancel_post(self):\n pass", "def test_authentication_challenge_authenticate_post(self):\n pass", "def send_sms_with_callback_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for authentication_challenge_sms_post ChallengeSms
def test_authentication_challenge_sms_post(self): pass
[ "def test_authentication_challenge_refuse_sms_post(self):\n pass", "def test_authentication_challenge_get_post(self):\n pass", "def test_authentication_challenge_authenticate_post(self):\n pass", "def _sms_test(log, ads):\n for length in message_lengths:\n message_array = [rand_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Eigenvector centrality for nodes in the graph (like Google's PageRank). Eigenvector centrality is a measure of the importance of a node in a directed network. It rewards nodes with a high potential of (indirectly) connecting to highscoring nodes. Nodes with no incoming connections have a score of zero. If you want to m...
def eigenvector_centrality(graph, max_iter=100, tolerance=1.0e-6, weight=None, start_value=None): if len(graph) == 0: raise GraphitAlgorithmError('Cannot compute centrality for graph without nodes') # If no initial vector is provided, start with the all-ones vector. if isinstance(start_value, dict...
[ "def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None):\n if type(G) == networkx.MultiGraph or type(G) == networkx.MultiDiGraph:\n raise Exception(\"eigenvector_centrality() not defined for graphs with multiedges.\")\n\n# if not G.weighted:\n# raise Exception(\"eigenvector_centrality...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prompt the user to choose an action to perform.
def prompt_for_action(): while True: print() print("What would you like to do?") print() print(" A = add an item to the inventory.") print(" R = remove an item from the inventory.") print(" C = generate a report of the current inventory levels.") print(" O...
[ "def chooseAction(self, choice):\n\n option = 'undefinedMethod'\n\n #If the choice is a valid option, prepare to run the corresponding method\n try:\n option = self._options[choice]\n except Exception:\n pass\n\n #Run the corresponding method. Based on:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prompt the user to select a product. We return the code for the selected product, or None if the user cancelled.
def prompt_for_product(): while True: print() print("Select a product:") print() n = 1 for code,description,desired_number in datastorage.products(): print(" {}. {} - {}".format(n, code, description)) n = n + 1 s = input("> ").strip() ...
[ "def products_menu(self, category_selection):\n\n show_products = self.database.get_products_from_category(category_selection)\n products_menu = Menu(\"PRODUCTS MENU\", self.about_products_display, show_products)\n products_menu.clear_screen()\n products_menu.display()\n product_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prompt the user to select a location. We return the code for the selected location, or None if the user cancelled.
def prompt_for_location(): while True: print() print("Select a location:") print() n = 1 for code,description in datastorage.locations(): print(" {}. {} - {}".format(n, code, description)) n = n + 1 s = input("> ").strip() if s == "":...
[ "def getLocation():\n location=input(\"please input the location you want to look at : \")\n if not location:\n location = LOCATION\n return location", "def location_lookup(self, req_location):\n location = False\n try:\n location = self.samecodes[req_location['code']]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the given report to the user. 'report' is a list of strings containing the contents of the report.
def show_report(report): print() for line in report: print(line) print()
[ "def select_report(self, report=0):\n #reports = [\"Civil financial statement\",\n # \"Criminal financial statement\",\n # \"Family mediation financial statement\",\n # \"Financial statement summary\"]\n\n # Find the report name present on screen\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check if an active account exists
def active_account_exists(public_address): return app.kin_sdk.check_account_exists(public_address) and app.kin_sdk.check_account_activated(public_address)
[ "def existsAccount(self, user: str) -> bool:\n pass", "async def is_account_exist(self,account):\r\n async with self._db.acquire() as conn:\r\n accounts= [dict(row.items()) async for row in await conn.execute(\r\n Account.select().where((Account.c.account == account)))\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create an account for the given public address
def create_account(public_address, initial_xlm_amount): #TODO all repeating logic? print('creating account with balance:%s' % initial_xlm_amount) try: return app.kin_sdk.create_account(public_address, starting_balance=initial_xlm_amount, memo_text=TX_MEMO_PREFIX, activate=True) except Exception ...
[ "def create_address(self, address: Address):\n pass", "def create_account():\n eth_account = Account()\n account = eth_account.create(get_random_bytes(32))\n return (account.address, account.privateKey)", "def create_account(self):\n name = self.user.first_name + ' ' + self.user.last_name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send kins to an address
def send_kin(public_address, amount, memo=TX_MEMO_PREFIX): # sanity: if public_address in (None, ''): log.error('cant send kin to address: %s' % public_address) return False, None if amount is None or amount < 1: log.error('cant send kin amount: %s' % amount) return False,...
[ "async def _send_interests(self, prefix, send_time):\n\n # begin timing\n self._time['current'] = self._time['start'] = time.time()\n\n # send interests for a specified amount of time\n i = 0\n while time.time() - self._time['start'] < send_time:\n if self._send_latency...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send kins to an address using the payment service
def send_kin_with_payment_service(public_address, amount, memo=TX_MEMO_PREFIX): # sanity: if public_address in (None, ''): log.error('cant send kin to address: %s' % public_address) return False, None if amount is None or amount < 1: log.error('cant send kin amount: %s' % amount) ...
[ "def send_kin(public_address, amount, memo=TX_MEMO_PREFIX):\n\n # sanity:\n if public_address in (None, ''):\n log.error('cant send kin to address: %s' % public_address)\n return False, None\n\n if amount is None or amount < 1:\n log.error('cant send kin amount: %s' % amount)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ensures that the given tx_hash is a valid payment tx, and return a dict with the memo, amount and to_address
def extract_tx_payment_data(tx_hash): if tx_hash is None: raise InvalidUsage('invlid params') # get the tx_hash data. this might take a second, # so retry while 'Resource Missing' is recevied count = 0 tx_data = None while (count < config.STELLAR_TIMEOUT_SEC): try: t...
[ "def process_tx_hash(self, tx_hash: str) -> List[Any]:\n pass", "def get_transaction_details(tx_hash, coin_symbol='btc', limit=None, tx_input_offset=None, tx_output_offset=None,\n include_hex=False, show_confidence=False, confidence_only=False, api_key=None):\n\n assert is_valid_hash(tx_hash), tx...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(int, str) > number Return the fees for a book that is days_late days late for a borrower in the age group age_group. >>> overdue_fees(2, SENIOR) 2 days late, SENIOR borrower 0.5 >>> overdue_fees(5, ADULT) 5 days late, ADULT borrower 10
def overdue_fees(days_late, age_group): if days_late < 4: late_fees = days_late * 1 elif days_late >= 4 and days_late <= 6: late_fees = days_late * 2 elif days_late > 6: late_fees = days_late * 3 if age_group == 'child': late_fees *= 0.5 elif age_group =...
[ "def fee(self):\n\n fees = 10\n if self.balance > 10.0 and self.balance < 1000.0:\n self.balance -= fees\n print(\" Your balance now is $\", self.balance, \"due to having less than $1000, which initiates a fee of $10\")\n return self.balance\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function used to parse lines from pindel output. >>> line_del = \ '0\\tD 4\\tNT 0 ""\\tChrID chr1\\tBP 67443969\\t67443974\\t' + \ 'BP_range 67443969\\t67443980\\tSupports 5\\t1\\t+ 5\\t1\\t 0\\t0\\t' + \ 'S1 6\\tSUM_MS 300\\t1\\tNumSupSamples 1\\t1\\tSample_name 1 0 5 1 0 0' >>> line_ins = '0\\tI 51\\t' + \ 'NT 51 "CA...
def parse_pindel_line(line,report_min_depth=False): columns = line.rstrip('\r\n').split('\t') # column = 'ChrID chr1' chr = __extract_value(columns[_column_converter_pindel['chr']]) # column = 'BP_range 67443969' start = int(__extract_value(columns[_column_converter_pindel['start']])) # column =...
[ "def parse_csv_line(csv_line):", "def extractRecordFromTrace(fn_in, fn_out):\n if not fn_in or not fn_out:\n return\n f_in = open(fn_in, \"r\")\n f_out = open(fn_out, \"a\")\n i,src, dst, octs =0, 0, 0, 0\n try:\n data = csv.reader(f_in, delimiter=\",\")\n for line in data:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the MFCC features from the test files, saves them to disk, and returns the saved file name.
def create_ceps_test(fn): #print fn sample_rate, X = scipy.io.wavfile.read(fn) X[X==0]=1 ## #np.nan_to_num(X) ceps, mspec, spec = mfcc(X,sample_rate) ## #ceps = whiten(ceps) base_fn, ext = os.path.splitext(fn) print base_fn #ceps = my_mfcc(fn) data_fn = base_fn + ".ceps" #final...
[ "def extract_and_save_mfcc(audio_path: str, output_path: str, name: str,\n duration: int, n_mfcc: int=20, verbose_level: int=0,\n **kwargs):\n if verbose_level > 1:\n print('[INFO] processing file {}'.format(audio_path))\n y, sr = librosa.load(audio_pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the initial state.velocity The initial states are specified in a dict indexed by field. The format should be w_init_field[field] = 'df.Function(...)'. The work dicts w_ and w_1 are automatically initialized from these functions elsewhere in the code.
def initialize(H, Hmin, interface_thickness, solutes, restart_folder, field_to_subspace, inlet_velocityOil, inlet_velocity, concentration_left, enable_NS, enable_PF, enable_EC, **namespace): w_init_field = dict() if not restart_folder: ...
[ "def _create_state_init_parameters(self):\n self.init_ws, self.init_bs, self.init_norms = [], [], []\n # shallow copy of the state shapes:\n state_shapes = list(self.rnn_pre_attention.state_shape)\n if self.rnn_post_attention:\n state_shapes += self.rnn_post_attention.state_sh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Phase field mobility function.
def pf_mobility(phi, gamma): # return gamma * (phi**2-1.)**2 # func = 1.-phi**2 + 0.0001 # return 0.75 * gamma * max_value(func, 0.) return gamma # Function to control PF mobility over time.
[ "def phase_space_volume(self) -> float:", "def calc_motive(self):\n # For brevity, \"dimensionless\" prefix omitted from \"position\" and \"motive\" variable names.\n \n self[\"motive_data\"] = {}\n self[\"motive_data\"][\"dps\"] = DimensionlessLangmuirPoissonSoln()\n\n self[\"motive_data\"][\"spcl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
produce a list of the free positions.
def _freePos(self): res = [] for i, row in enumerate(self.mazeTable): for j, p in enumerate(row): if p == False: res.append((i, j)) return res
[ "def free_positions(self):\n # Get the list of all free positions.\n free_pos = [pos for pos, val in self.maze.items() if not val]\n\n # Here we use a generator on a dictionary to create the adjacency list.\n # However, for Python 3, we force evaluation on the legal_moves.values\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot both confusion matrix and ROC curce on the same figure.
def plot_conf_matrix_and_roc(estimator, X, y, figure_size=(16, 6)): # Compute tpr, fpr, auc and confusion matrix fpr, tpr, thresholds = roc_curve(y, estimator.predict_proba(X)[:, 1]) auc = roc_auc_score(y, estimator.predict_proba(X)[:, 1]) conf_mat_rf = confusion_matrix(y, estimator.predict(X)) ...
[ "def plot(self):\r\n tpr, fpr, thresholds = self.__calc_tpr_fpr()\r\n self.results = np.column_stack((tpr, fpr, thresholds))\r\n\r\n # %%% TODO START YOUR CODE HERE %%%\r\n\r\n fig = plt.figure()\r\n plt.plot(fpr, tpr)\r\n fig.suptitle('ROC Plot')\r\n plt.xlabel('Tru...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot roc and PR curves for all models. Arguments
def plot_roc_and_pr_curves(models, X_train, y_train, X_valid, y_valid, roc_title, pr_title, labels): fig, axes = plt.subplots(1, 2, figsize=(14, 6)) if not isinstance(X_train, list): for i, model in enumerate(models): model_fit = model.fit(X_train, y_train) model_probs...
[ "def plot_pr_and_roc_curves(labels_and_model_outputs: LabelsAndPredictions) -> None:\n print_header(\"ROC and PR curves\", level=3)\n _, ax = plt.subplots(1, 2)\n\n fpr, tpr, thresholds = roc_curve(labels_and_model_outputs.labels, labels_and_model_outputs.model_outputs)\n\n plot_auc(fpr, tpr, \"ROC Curv...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should error without relative_path argument.
def test_missing_relative_path(self): opened = self.post('/sync-file', {'chunk': 'abc'}) response = opened.response self.assert_has_error_code(response, 'INVALID_ARGS')
[ "def test_relative_base_upload_path(self):\n base_path = \"tmp\"\n user_supplied_index_name = \"a89933473b2a48948beee2c7e870209f\"\n with self.assertRaises(ValueError):\n utils.format_upload_path(base_path, user_supplied_index_name)", "def test_init_get_abs_path_throws(self):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should synchronize file remotely to shared library location.
def test_valid_shared(self): support.create_project(self, 'peter-2') project = cauldron.project.get_internal_project() response = support.run_remote_command( 'open "{}" --forget'.format(project.source_directory) ) self.assert_no_errors(response) project = cau...
[ "def syncfile( src_path, tgt_path, tmpbase=None, keeptmp=False,\n synctimes=False, syncperms=False, syncowner=False, syncgroup=False,\n pre_checksums=False, post_checksums=True ):\n if tmpbase is None:\n #TODO - If tmpbase is None, create one at the mountpoint\n # tmpbase ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all callbacks linked to this object.
def all(self): callbacks = {} handler = type(self).script if handler: dicts = handler.get_callbacks(self.obj) for callback_name, in_list in dicts.items(): new_list = [] for callback in in_list: callback = self.format_cal...
[ "def GetCallbacks(self):\n return {\"UpdatePos\": self._MarkDirty,\n \"DeleteObject\": self._DelObject,\n \"AddObject\": self._AddObject}", "def callback_iter(self) -> Iterable:\n return chain(\n self.callbacks.success,\n self.callbacks.retry,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the specified callback bound to this object.
def remove(self, callback_name, number): handler = type(self).script if handler: handler.del_callback(self.obj, callback_name, number)
[ "def remove(self, callback):\n self._listeners.remove(callback)", "def remove_callback(self):\n\n\t\tself.callback = None\n\n\t\treturn", "def removeDeleteCallback(self, *args):\n return _coin.ScXMLStateMachine_removeDeleteCallback(self, *args)", "def removeDeleteCallback(self, *args) -> \"void\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract atoms from the SVG atom elements
def _parse_atoms_from_svg(atom_elements, mol: rdkit.Chem.Mol): result = [] for atom_svg in atom_elements: try: atom_id_str = re.search(r"\d+", atom_svg.attrib.get("class")).group(0) atom_id = int(atom_id_str) if atom_id >= mol.GetNumAtoms(): continue ...
[ "def _parse_labels_from_svg(path_elements, atoms):\n atom_id_re = r\"atom-\\d+\"\n for label_svg in path_elements:\n try:\n match = re.fullmatch(atom_id_re, label_svg.attrib[\"class\"])\n if not match:\n continue\n\n atom_id = int(match.group(0)[5:])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse atom label information from the SVG.
def _parse_labels_from_svg(path_elements, atoms): atom_id_re = r"atom-\d+" for label_svg in path_elements: try: match = re.fullmatch(atom_id_re, label_svg.attrib["class"]) if not match: continue atom_id = int(match.group(0)[5:]) atoms[atom...
[ "def parse_label(self):\n name = self.consume(\"ID\")\n self.consume(\":\")\n statement = self.parse_statement()\n return self.semantics.on_label(name.val, statement, name.loc)", "def get_annotation(label):\n assert get_xsi_type(label) == 'saltCore:SAnnotation'\n return (label.at...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract bonding information from SVG elements
def _parse_bonds_from_svg(bond_elements, mol): result = [] re_bond_regex = r"bond-\d+" for bond_svg in bond_elements: try: if not re.search(re_bond_regex, bond_svg.attrib["class"]): continue atoms = re.findall(r"atom-\d+", bond_svg.attrib["class"]) ...
[ "def process_svg(svg_data):\n tree = ElTree.fromstring(svg_data)\n parent_map = {c: p for p in tree.iter() for c in p}\n point_annotations = tree.findall(\".//{}\".format(circle_el))\n point_names = tree.findall(\".//{}/../{}\".format(circle_el, text_el))\n circle_groups = tree.findall(\".//{}[{}]\"....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save image with the text 'No image available' as a png.
def _png_no_image(path_to_image, width): font = None font_path = _supply_font() if font is not None: font_path = ImageFont.truetype(font_path, size=(int(width / 8))) else: font = ImageFont.load_default() white = (255, 255, 255) black = (0, 0, 0) img = Image.new("RGBA", (wi...
[ "def save_png(self, filename):\n if self.png:\n data = base64.decodebytes(bytes(self.png, 'ascii'))\n with open(filename, 'wb') as f:\n f.write(data)\n else:\n warnings.warn('No png image available! Try auto_save_png() instead?')", "def write_png(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Platform nonspecific function to locate sansserif font in the environment.
def _supply_font(): font = "" if platform == "linux" or platform == "linux2": font = "/usr/share/fonts/gnu-free/FreeSans.ttf" elif platform == "darwin": font = "/Library/Fonts/arial.ttf" elif platform == "win32": font = "c:\\windows\\font\\arial.ttf" if os.path.isfile(font):...
[ "def get_fontname(environment):\n if environment == EnvironmentType.DEV:\n if os.name == 'nt': # ultraman or 4KOFFICE\n return 'c:/Windows/Boot/Fonts/segmono_boot.ttf'\n return '/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-B.ttf'\n\n if environment == EnvironmentType.PROD:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes thumbnails of given size from given image
def make_thumbnail(image, size=(100, 100)): im = Image.open(image) im.convert('RGB') # convert mode im.thumbnail(size) # resize image thumb_io = BytesIO() # create a BytesIO object im.save(thumb_io, 'webp', quality=85) # save image to BytesIO object thumbnail = ...
[ "def resize_image(image, size):\n image.thumbnail(size)\n return image", "def create_thumbnail( infile, outfile, size ):\n try:\n im = Image.open(infile)\n im.thumbnail(size,Image.ANTIALIAS)\n im.save(outfile,\"JPEG\")\n return im.size\n except IOError,e:\n sys.stderr.write(\"An error occur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup arcpy and the list of tool parameters.
def __init__(self): self.params = arcpy.GetParameterInfo()
[ "def __init__( self, config_filename, tool_root_dir, app ):\n self.tools_by_id = {}\n self.workflows_by_id = {}\n self.tool_panel = odict()\n self.tool_root_dir = tool_root_dir\n self.app = app\n try:\n self.init_tools( config_filename )\n except:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test List[Optional[] vs Optional[List[]]
def test_list_of_optional(): @model class OptionalItem: names: List[Optional[str]] assert OptionalItem.from_server({'names': ['a', None]}) == OptionalItem(names=['a', None]) with pytest.raises(ValidationError): assert OptionalItem.from_server({'names': None}) == OptionalItem(names=None...
[ "def tameNoneList(maybeList):\n\n ret = []\n\n if isinstance(maybeList, (list, tuple)):\n for item in flattenList(maybeList):\n if item is not None:\n ret.append(item)\n\n # If maybeList is not None\n elif maybeList:\n ret.append(maybeList)\n\n return ret", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test a dict of a primitive.
def test_dict_of_primitive(): @model class Foo: names: Dict[str, bool] assert Foo.from_server({'names': {'a': True, 'b': False}}) == Foo(names={'a': True, 'b': False})
[ "def _is_primitive(self, valid_dict):\n if valid_dict['type'] in self.primitive_keys:\n return True\n return False", "def test_valchk_dict_value_type():\n\n allowed = {\"test\": str, \"test2\": int, \"test3\": bool}\n passed = badparams(allowed)\n ep = Endpoint()\n\n assert ep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test custom marshal/unmarshal functions for a field.
def test_custom_marshal_functions(): def dump(data, instance, field): # Instead of {v: 1}, output {1: v} data[getattr(instance, field.name)] = field.name return data def load(data, field): # Consume all other keys, sum length of all sum = 0 for k, v in data.item...
[ "def register(self, field_name, func, fake=...):\r\n ...", "def test_serializer_field_values(self):\n pass", "def test_convert(schema, value, read_only):\n return_value = object_.convert(schema=schema, value=value, read_only=read_only)\n\n assert return_value == {\"key\": \"value\"}", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the PolyField with primitives
def test_with_primitives(self): f = PolyField({ str: fields.String(), int: fields.Integer() }) assert f.serialize('num', {'num': 10}) == 10 assert f.serialize('num', {'num': 'test'}) == 'test' with pytest.raises(ValidationError): assert f.ser...
[ "def polyPrimitive(sideLength=\"string\", axis=\"string\", radius=\"string\", polyType=int, constructionHistory=bool, name=\"string\"):\n pass", "def test_primitives(self):\n g = FHIRGraph()\n tests = [self.is_xsd_primitive(p, g) for p in g.subjects(RDFS.subClassOf, FHIR.Primitive)]\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all nondefault quotas.
def list_quotas(self, params=None): url = "v2/lbaas/quotas" if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBodyList(resp...
[ "def list_rqss(self):\n return self.resource_quota_set_manager.list_objects()", "def get_all_quotas(cls, context, resources):\n default_quota = cls.get_default_quotas(context, resources)\n project_list = cls._get_vnc_conn().projects_list()['projects']\n ret_list = []\n for proje...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get Quotas for a project.
def get_quotas(self, project_id, params=None): url = self._QUOTAS_URL.format(project_id=project_id) if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) ...
[ "def _get_quotas(self, context, project_id, resources):\n # Grab and return the quotas (without usages)\n quotas = DbQuotaDriver.get_project_quotas(\n context, resources, project_id)\n\n return dict((k, v) for k, v in quotas.items())", "def get_projects(self):\n return ', '....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update a Quotas for a project.
def update_quotas(self, project_id, **kwargs): url = self._QUOTAS_URL.format(project_id=project_id) put_body = jsonutils.dumps(kwargs) resp, body = self.put(url, put_body) body = jsonutils.loads(body) self.expected_success(202, resp.status) return rest_client.ResponseBody...
[ "def update(self, q={}, values={}):\n self.store.update(q, values, multi=True, upsert=True)", "def update_quest(unit_id, quest_id):\n update = flask.request.json\n\n if any(field not in models.Quest.editable_fields for\n field in update.iterkeys()):\n msg = MESSAGE_FOR_400 % ', '.jo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete an Quotas for a project.
def delete_quotas(self, project_id): url = self._QUOTAS_URL.format(project_id=project_id) resp, body = self.delete(url) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body)
[ "def delete_project(conn, id):\n sql = 'DELETE FROM projects WHERE id=?'\n cur = conn.cursor()\n cur.execute(sql, (id,))\n conn.commit()", "def deleteProject(path,ty=None):\n if ty:\n path = os.path.join(path,ty)\n try:\n shutil.rmtree(path)\n print(\"deleted\")\n except:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes median, average or minimal length of a FAST5 read.
def compute_on_length(fast5_file, stat): with h5py.File(f, "r") as hdf: hdf_path = "Analyses/RawGenomeCorrected_000/" hdf_events_path = '{hdf_path}BaseCalled_template/Events'.format(hdf_path=hdf_path) event_lengths = hdf[hdf_events_path]["length"] if stat == "median": avg...
[ "def median(data_set):\n data_set_length = len(data_set)\n sorted_data_set = sorted(data_set)\n midpoint = data_set_length // 2\n if data_set_length % 2:\n return sorted_data_set[midpoint]\n else:\n hi = sorted_data_set[midpoint]\n lo = sorted_data_set[midpoint - 1]\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the key to be used in external maps for this object
def extern_key(self): return '%s %s' % (self.objtype.lower(), self.name)
[ "def extern_key(self):\n return self.name", "def get_key(self) -> str:\n\n raise RuntimeError('Synthetic get_key is missing on RuntimeMapSubclass!')", "def json_key(self):\n pass", "def key(self):\n return self.field_name", "def getCacheKey(self):\n\t\treturn self.cacheKey", "def k...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return SQL statement to create COMMENT on object
def comment(self): if hasattr(self, 'description'): descr = "'%s'" % self.description else: descr = 'NULL' return "COMMENT ON %s %s IS %s" % ( self.objtype, self.identifier(), descr)
[ "def strip_comments_from_sql(statement: str) -> str:\n return ParsedQuery(statement).strip_comments() if \"--\" in statement else statement", "def genStatementDoc(self, statement, ctx):\n pass", "def __repr__(self):\n\n return '\\n'.join(textwrap.TextWrapper(\n width=FileComment.cols...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return SQL statement to DROP the object
def drop(self): return "DROP %s %s" % (self.objtype, self.identifier())
[ "def drop_table(table_name):\n\n drop_table = f\"DROP TABLE {table_name};\"\n return drop_table", "def delete(self, sql):", "def drop_table(self):\n self.connect()\n try:\n sql = \"drop table if exists {0}\".format(self.tablename)\n self.cursor.execute(sql)\n exc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return SQL statement to RENAME the object
def rename(self, newname): return "ALTER %s %s RENAME TO %s" % (self.objtype, self.name, newname)
[ "def get_rename_sql(old, new):\n return 'ALTER TABLE {} RENAME TO {};'.format(old, new)", "def visit_table(self, param):\n table, newname = param\n self.start_alter_table(table)\n self.append(\"RENAME TO %s\"%newname)\n self.execute()", "def gtable_rename(object_id, input_para...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a SQL ALTER statement to RENAME the schema object
def rename(self, newname): return "ALTER %s %s RENAME TO %s" % (self.objtype, self.qualname(), newname)
[ "def get_rename_sql(old, new):\n return 'ALTER TABLE {} RENAME TO {};'.format(old, new)", "def alter_table(redshift_schema, redshift_table, redshift_table_new):\n alter_command = \"ALTER TABLE {redshift_schema}.{redshift_table}\" \\\n \" RENAME TO {redshift_table_new};\"\\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a SQL SET search_path if not in the 'public' schema
def set_search_path(self): stmt = '' if self.schema != 'public': stmt = "SET search_path TO %s, pg_catalog" % quote_id(self.schema) return stmt
[ "def find_schema_path():\n schema_files_path = DEFAULT_SCHEMA_FILES_PATH\n for path in schema_files_path:\n if os.path.exists(os.path.join(path, 'bootstrap.sql')):\n return path\n raise SchemaFilesNotFound('Searched ' + os.pathsep.join(schema_files_path))", "def public_filesystem_locati...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the dictionary by querying the catalogs This is may be overriden by derived classes as needed.
def _from_catalog(self): for obj in self.fetch(): self[obj.key()] = obj
[ "def __init__(self, **kwargs):\n self.catalog_items = {}\n\n for cls in self.__class__.__subclasses__():\n subclass = cls(**kwargs)\n namespace = subclass.namespace\n catalog_resources = subclass.catalog_resources\n\n for k, v in catalog_resources.items():\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch all objects from the catalogs using the class query
def fetch(self): if not self.dbconn.conn: self.dbconn.connect() data = self.dbconn.fetchall(self.query) return [self.cls(**dict(row)) for row in data]
[ "def _allInstances(cls):\n return pyalaocl.asSet(_theSession().findByClass(cls))", "def all(self, cls=None):\n if cls is None:\n objs = []\n for obj in self.__mdoels:\n objs.extend(self.__session.query(eval(obj)).all())\n else:\n if type(cls...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update descendants' `ancestor fee/size/sigops` for a transaction being removed from the mempool. On the first recursion we also remove ourselves from `depends` of first children.
def update_descendants(self, txid: str, fee: int, size: int, sigopscost: int, first: bool = False): # If we have no descendants, just return if not self[txid].spentby: logger.debug(f"no descendants to update for {txid}") return # Each tx in txid.spentby should have (this...
[ "def remove_transaction(self, txid: str):\n if txid not in self:\n logger.error(f\"not removed {txid} from mempool as not found\")\n return\n # Remove ancestor fee/size from descendants\n self.update_descendants(\n txid=txid,\n fee=int(self[txid].fees...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a transaction from the mempool. Unlike Bitcoin Core, we are modifying the mempool in place so that we can create a second blocktemplate after the first.
def remove_transaction(self, txid: str): if txid not in self: logger.error(f"not removed {txid} from mempool as not found") return # Remove ancestor fee/size from descendants self.update_descendants( txid=txid, fee=int(self[txid].fees["base"] * COI...
[ "def remove_block(self, blocktemplate):\n logger.debug(f\"starting intersection of blocktemplate and mempool\")\n\n i = 0\n for transaction in blocktemplate.tx:\n self.remove_transaction(transaction[\"txid\"])\n i += 1\n logger.info(f\"deleted {i} transactions from ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Intersects transactions in a `blocktemplate` and `mempool`
def remove_block(self, blocktemplate): logger.debug(f"starting intersection of blocktemplate and mempool") i = 0 for transaction in blocktemplate.tx: self.remove_transaction(transaction["txid"]) i += 1 logger.info(f"deleted {i} transactions from mempool after int...
[ "def __block_equal(self, a, b):\r\n \r\n return self.__block2pair(a) == self.__block2pair(b)", "def _within_TX(self, Tx):\n ## Tests whether a transaction is related to this node in \n ## any way. If not there is no case for processing it.\n return within_TX(Tx, self.shard[0], s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each cluster in Cdb, run pairwise ANIn
def run_anin_on_clusters(Bdb, Cdb, data_folder, **kwargs): n_c = kwargs.get('n_c', 65) n_maxgap = kwargs.get('n_maxgap', 90) n_noextend = kwargs.get('n_noextend', False) n_method = kwargs.get('method', 'mum') p = kwargs.get('processors', 6) dry = kwargs.get('dry',False) overwrite = kwargs.g...
[ "def runCoClustering(self):\n return 0", "def main():\n\n centroids = cc.main()\n\n #convert to c array representation\n #c_centroids = c_array(centroids)\n #print(c_centroids['1'])\n\n test_data, target_values = mD.read_data('testData_small.txt') #to be filled in\n\n for i, instance in enume...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
genomes is a list of locations of genomes in .fasta file. This will do pairwise comparisons of those genomes using the nucmer settings given
def run_nucmer_genomeList(genomes,outf,b2s,c=65,maxgap=90,noextend=False,method='mum',dry=False): # Run commands on biotite cmds = [] for g1 in genomes: for g2 in genomes: out = "{0}{1}_vs_{2}".format(outf,get_genome_name_from_fasta(g1),get_genome_name_from_fasta(g2)) cmds.a...
[ "def compute_genome_distances(genomes):\n p = Pool(initializer=init_pool, initargs=(None, genomes))\n genome_edit_dists = p.starmap(genome_distance,\n list(itertools.product(range(len(genomes)), range(len(genomes))))[:0]) # TODO\n p.close()\n if genome_edit_dists: # ch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run MASH pairwise within all samples in Bdb
def all_vs_all_MASH(Bdb, data_folder, **kwargs): MASH_s = kwargs.get('MASH_sketch',1000) dry = kwargs.get('dry',False) overwrite = kwargs.get('overwrite', False) mash_exe = kwargs.get('mash_exe', 'mash') p = kwargs.get('processors',6) # set up logdir if 'wd' in kwargs: logdir = kwa...
[ "def psd_pairwise_comparison():\n # Location of the data \n base_dir = '../example/' \n \n # Data resolution, in nanometers \n resolution = {'res_xy_nm': 100, 'res_z_nm': 70}\n \n # Threshold value for the probability maps. This value does not usually need to be changed. \n thresh = 0.9\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns (alignment length, similarity errors) tuple from passed .delta. filename path to the input .delta file Extracts the aligned length and number of similarity errors for each aligned uniquelymatched region, and returns the cumulative total for each as a tuple.
def parse_delta(filename): aln_length, sim_errors = 0, 0 for line in [l.strip().split() for l in open(filename, 'rU').readlines()]: if line[0] == 'NUCMER' or line[0].startswith('>'): # Skip headers continue # We only process lines with seven columns: if len(line) == 7: ...
[ "def parse_delta(filename):\n aln_length, sim_errors = 0, 0\n for line in [l.strip().split() for l in open(filename, \"r\").readlines()]:\n if line[0] == \"NUCMER\" or line[0].startswith(\">\"): # Skip headers\n continue\n # We only process lines with seven columns:\n if len(l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a tuple of ANIm results for .deltas in passed directory. delta_dir path to the directory containing .delta files org_lengths dictionary of total sequence lengths, keyed by sequence Returns the following pandas dataframes in a tuple; query sequences are
def process_deltadir(deltafiles, org_lengths, logger=None): # Process directory to identify input files #deltafiles = glob.glob(delta_dir + '*.delta') Table = {'querry':[],'reference':[],'alignment_length':[],'similarity_errors':[], 'ref_coverage':[],'querry_coverage':[],'ani':[], 'reference_le...
[ "def process_deltadir(delta_dir, org_lengths, logger=None):\n # Process directory to identify input files - as of v0.2.4 we use the\n # .filter files that result from delta-filter (1:1 alignments)\n deltafiles = pyani_files.get_input_files(delta_dir, \".filter\")\n\n # Hold data in ANIResults object\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dictionary representing the board. The keys are (x, y) tuples and the values are '~' and '.' strings to represent waves.
def getNewBoard(): board = {} for x in range(BOARD_WIDTH): for y in range(BOARD_HEIGHT): # Add wave characters to the board: if random.randint(0, 1) == 0: board[(x, y)] = '~' else: board[(x, y)] = '.' return board
[ "def get_board_dict(self):\n return {p: self.as_string(p) for p in board.positions()}", "def getAllBoardCoord(driver):\n board_list = ['hi', 'mid', 'lo']\n board_dict = {}\n for b in board_list: \n tmp = getTemplate(b)\n game_image = getGameImage(driver, 'layer2')\n board_coo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the board data structure with a sonar device character. Remove treasure chests from the chests list as they are found. Return False if this is an invalid move. Otherwise, return the string of the result of this move.
def makeMove(board, chests, x, y): smallestDistance = 100 # Any chest will be closer than 100. for cx, cy in chests: distance = math.sqrt((cx - x) * (cx - x) + (cy - y) * (cy - y)) if distance < smallestDistance: # Use the closest chest. smallestDistance = distance smallestDi...
[ "def makeMove(board, chests, x, y):\n if not isValidMove(x, y):\n return False\n \n smallestDistance = 100 # any chest will be closer than 100.\n for cx, cy in chests:\n if abs(cx - x) > abs(cy - y):\n distance = abs(cx - x)\n else:\n distance = abs(cy - y)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the reference to the loaded library object. For example, if `libtype` is
def lib(self): return self._lib
[ "def getLibrary(self):\n return self._library", "def load_library():\n #load library\n lib=ctypes.cdll.LoadLibrary(find_path())\n return lib", "def getSingleLibrary(self, context, id):\n libraries = [l for l in self.getLibraries(context) if l['id']==id]\n\n for l in libraries:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function calculates the average of the m first numbers in the k line of the data
def f(m, k): return list(map(lambda x: numpy.average(data[k][:x]), m))
[ "def find_averages_of_subarrays(k, nums):\n result = []\n window_sum, window_start = 0, 0\n # 1. For all possible window ends\n for window_end in range(len(nums)):\n window_sum += nums[window_end]\n # 2. Check if valid window\n if window_end >= (k - 1):\n # 3. Calculation...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculates the upper bound by chebyshev for the given epsilon
def chebyshev(m, eps): return 1 / (m * 4.0 * eps ** 2)
[ "def bin_probability(q_ranges, epsilon):\n return rp(q_ranges[0], epsilon)*rp(q_ranges[1], epsilon)/4", "def debye_fn_cheb(x):\n val_infinity = 19.4818182068004875;\n xcut = -log_eps\n \n assert(x > 0.0) #check for invalid x\n\n if x < 2.0*np.sqrt(2.0)*sqrt_eps:\n return 1.0 - 3.0*x/8.0 +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function crates the plot for question num 29. it presents the mean of all tosses up to m for every m from 1 to 1000
def make_plot(): t = numpy.arange(1, 1000, 1) plt.plot(f(t, 0), color="red", label="line number 1") plt.plot(f(t, 1), color="blue", label="line number 2") plt.plot(f(t, 2), color="green", label="line number 3") plt.plot(f(t, 3), color="orange", label="line number 4") plt.plot(f(t, 4), color="pur...
[ "def MSTD(X , m , M , step , n_runs , max_iter = 2000 , n_jobs = -1):\n fig, ax = plt.subplots(1 , 2 , figsize = (20 , 7))\n mean = []\n for i in tqdm(range(m , M+step , step)):\n #for i in range(m , M+step , step): #uncomment if you don't want to use tqdm (and comment the line above !)\n s = Sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function crates the plot that presents for every epsilon (from the list above) and for every m, what is the upper bound of the probability to get a mean that is far from the expected value more then epsilon, by hoeffding and chebyshev. it also presents the percentage of sequences that satisfy the distance from eps...
def make_bound_plot(): t = numpy.arange(1, 1000, 1) i = 0 for e in epsilon: plt.figure(i) i += 1 plt.plot(numpy.clip(chebyshev(t, e), 0, 1), color="red", label="Chebyshev") plt.plot(numpy.clip(hoeffding(t, e), 0, 1), color="blue", label="Hoeffding") plt.plot(g(e), col...
[ "def epsautoconfeval(epsilon):\n\n # # distribution of all distances in matrix\n # hstplt = SingleMessagePlotter(specimens, tokenizer+'-distance-distribution-histo', args.interactive)\n # hstplt.histogram(tril(sm.distances), bins=[x / 50 for x in range(50)])\n # plt.axvline(epsilon, label=\"manually det...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that searches all subfolders of given folder. This function assumes that all files in that folder are image files If this is not the case errors will occur as no check is carried out. For each file, it checks that both of its dimensions are bigger than min_pixels. If so, it will rescale and crop the image to m...
def find_all_files(min_pixels, origin_folder, target_folder): #count = 0 for root, dirs, files in os.walk(origin_folder): vis_files = [f for f in files if not f[0] == '.'] copy = True """ copy = False if(root.endswith("indoor")): print("I am in...
[ "def resize_images():\n logger.info(\"Resizing images\")\n path = f\"{top_dir}/train/\"\n resize_path = f\"{top_dir}/resized/train/\"\n sep = '/'\n resizeSize = (500, 800)\n for folder in os.listdir(path):\n logger.info(folder)\n files = os.listdir(path + folder)\n for file in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read time/voltage data from CSV file assuming time in first column in seconds assuming voltage in second column in mV
def read_time_volts(fname='ziva.csv'): from numpy import genfromtxt data = genfromtxt(fname, delimiter=',', skip_header=10) time = [row[0] for row in data] volts = [row[1] for row in data] return time, volts
[ "def load_data(f):\n import csv\n with open(f, newline='') as csvfile:\n ecgreader = csv.reader(csvfile, delimiter=' ')\n time, voltage, high_voltages = organize_data(ecgreader, f)\n return time, voltage, high_voltages", "def read_wave_probe_csv(filename):\n time, eta = [], []\n data ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find Nyquist frequency from time data
def find_nyq_freq(time): samp_time=time[1]-time[0] samp_rate=1/samp_time nyq_freq=0.5*samp_rate return nyq_freq
[ "def freq(n, dt):\n import numpy as np\n return 1.0*np.arange(n)/n/dt", "def fftfreq(n, dtype=torch.float, device=torch.device(\"cpu\")):\n return (torch.arange(n, dtype=dtype, device=device) + n // 2) % n - n // 2", "def freq_per_yearday(self):\n feat = [int(log.split('\\t')[11]) for log in sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this finds the maximum of the peaks in the filtered signal and the timepoint at which it occurs
def find_max_peak(volt_minus_dc, time): from scipy import signal from numpy import arange, std import numpy as np #Find all peaks peak_ind = signal.find_peaks_cwt(volt_minus_dc, arange(1,10)) volt_peak =[volt_minus_dc[int(i)]for i in peak_ind] time_peak =[tim...
[ "def detect_peaks_1d(timeseries, delta_peak, threshold, peak_width=5):\n\n # Sort time series by magnitude.\n max_idx = np.squeeze(timeseries.argsort())[::-1]\n\n # Remove peaks within delta_peak to the array boundary\n max_idx = max_idx[max_idx > delta_peak]\n max_idx = max_idx[max_idx < np.size(tim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload spark application parameter dictionary to S3 as JSON file
def _upload_parameter_json(self, output_dictionary, local_file_path, cluster_file_path): parameters_json_content = json.dumps(output_dictionary, sort_keys=True, default=lambda o: o.__dict__) logging.info("Uploading parameter JSON.\n Content:\n {}".format(parameters_json_content)) # store dicti...
[ "def upload_dictionary(self, bucket_name, file_name, dictionary):\n s3_object = self.s3.Object(bucket_name, file_name)\n s3_object.put(Body=json.dumps(dictionary))", "def save_apps_s3(apps, bucket):\n local_path = LOCAL_DIR\n s3_path = 'apps/apps.json'\n if local_path.exists():\n fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute sparksubmit command for algorithm to be executed
def _spark_submit(self, application_class): # get spark submit String spark_str = self._get_spark_submit_str() logging.info("Executing Spark: {}".format(spark_str)) # Add step to EMR cluster. step_name = "EMR Step: Running Spark Application Class {}".format(application_class) ...
[ "def submit_pyspark_job(self, job_config):\n\n job_config = {\n \"args\": [\n \"gs://fynd-new-bucket/a/README.txt\"\n ],\n \"pythonFileUris\": [\n job_config.get('files.zip')\n ],\n \"mainPythonFileUri\": job_config.get('mai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes the spark application parameter file from S3
def _remove_parameter_json(self, parameter_file_path): self._execution_system.s3_util.delete_object(parameter_file_path)
[ "def delete_file(key):\n try:\n s3_bucket.Object(key).delete()\n except Exception as e:\n print(e)", "def delete_file_from_bucket(self):\n self.s3_client.delete_object(Bucket=BUCKET_NAME, Key=FILENAME)\n print(\"File %s deleted from Bucket: %s\" % (FILENAME, BUCKET_NAME))", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses putcall parity to calculate put price given stock price, call price, strike price, risk free interest rate, and time to expiration
def put_price(s, c, x, r, t): return (c - s + x*math.exp(-r*t))
[ "def put(stock_price, strike_price):\n return (strike_price - stock_price) * (strike_price > stock_price)", "def stockbot2(ticker, secs, rsi_per):\n #The net liquid value of all assets\n netl = 2000\n #Cash on hand after buying stock\n cash = 2000\n #amount of stock held\n amount = 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses putcall parity to calculate put price given stock price, call price, strike price, risk free interest rate, and time to expiration
def put_price(s, c, x, r, t): return (c - s + x*math.exp(-r*t))
[ "def put(stock_price, strike_price):\n return (strike_price - stock_price) * (strike_price > stock_price)", "def stockbot2(ticker, secs, rsi_per):\n #The net liquid value of all assets\n netl = 2000\n #Cash on hand after buying stock\n cash = 2000\n #amount of stock held\n amount = 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a place name or address, return the nearest MBTA stop and the distance from the given place to that stop.
def find_stop_near(place_name): latitude, longitude = get_lat_long(place_name) mbta_station_name, distance, stop_lat, stop_lon = get_nearest_station(latitude, longitude) return mbta_station_name, distance, stop_lat, stop_lon,
[ "def getStopNear(place):\n\tlatlng = getLatLong(place)\n\tinfo = getClosestMBTA(latlng)\n\tprint 'Nearest MBTA stop: ' + info[0]\n\tprint 'Distance away: ' + info[1] + ' miles'", "def find_stop_near(place_name):\n try:\n lat_lng = get_lat_long(place_name)\n nearest_stop = get_nearest_station(lat_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts domain from url
def extract_domain(url): return urlparse(url).netloc
[ "def _get_domain_url(self, url):\n o = urlparse(url)\n return o.scheme + \"://\" + o.netloc", "def getBaseDomain(url):\n\treturn urlparse.urlparse(url).netloc", "def parse_domain(url):\n\n if not url.startswith('http://'):\n url = 'http://' + url\n top_level_domains = get_tlds()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split list into n lists
def split_list(li, n): k, m = divmod(len(li), n) return [li[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)]
[ "def do_chunkify(lst,n):\n return [lst[i::n] for i in range(n)]", "def chunks(l, n):\n \n if n<1:\n n=1\n return [l[i:i+n] for i in range(0, len(l), n)]", "def chunks(l, n):\n o = int(np.round(len(l)/n))\n out = []\n # For item i in a range that is a length of l,\n for i in range(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build a db by reading all songs info in the json_file with given db_name
def create_songs_db_from_json_file(json_file_name: str, db_name: str) -> None: with open(json_file_name, encoding="utf8") as reader: data = json.load(reader) if os.path.exists(db_name): raise Exception("param db_name:"+db_name+" already exists in path !") pass else: default_...
[ "def SQSDatabase(path, name_constraint=''):\n db = TinyDB(storage=MemoryStorage)\n dataset_filenames = recursive_glob(path, '*.json')\n dataset_filenames = [fname for fname in dataset_filenames if name_constraint.upper() in fname.upper()]\n for fname in dataset_filenames:\n with open(fname) as fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds item to the list. If list is small enough, will add it to the head, else to the tail (evicting oldest stored tail item).
def append(self, item): # Keep count of all elements ever added (even though they may not be # actually stored). self.total_count += 1 # List is still short, grow head. if len(self.head) < self.head_size: self.head.append(item) else: # List is long enough to start using tail. Grow ta...
[ "def add(self, item):\n temp = Node(item)\n temp.set_next(self.head)\n self.head = temp", "def append(self, item):\n #create a new node\n newNode = Node(item)\n #find node at tail\n #point from that last node in tail to new node\n if self.tail == None :\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates a signature and extract the exception if any.
def _signature_from_message(message): assert isinstance(message, unicode), repr(message) lines = message.splitlines() if not lines: return '', None if _STACK_TRACE_MARKER not in lines: # Not an exception. Use the first line as the 'signature'. # Look for special messages to reduce. if lines[0]...
[ "def computeSignature(self, image, signature=...) -> signature:\n ...", "def computeSignatures(self, images, signatures) -> None:\n ...", "def signature_checking(self,meta):\n if self.vertification(meta):\n pass\n else:\n raise Exception('Incorrect Signature')",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yields _ErrorRecord objects from the logs.
def _extract_exceptions_from_logs(start_time, end_time, module_versions): if start_time and end_time and start_time >= end_time: raise webob.exc.HTTPBadRequest( 'Invalid range, start_time must be before end_time.') try: for entry in logservice.fetch( start_time=start_time or None, en...
[ "def _yield_logs(start_time, end_time):\n # If module_versions is not specified, it will default to the current version\n # on current module, which is not what we want.\n # TODO(maruel): Keep request.offset and use it to resume the query by using it\n # instead of using start_time/end_time.\n module_versions ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if an _ErrorCategory should be ignored.
def _should_ignore_error_category(monitoring, error_category): if not monitoring: return False if monitoring.silenced: return True if (monitoring.silenced_until and monitoring.silenced_until >= utils.utcnow()): return True if (monitoring.threshold and len(error_category.events) < monitor...
[ "def _category_exclude_func(self, test, result):\n if test.categories:\n category = self._callbacks.get_category()\n if category is None:\n self._callbacks.undefined_category(test, result)\n return True\n elif category not in test.categories:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a logservice.RequestLog for a request id or None if not found.
def _log_request_id(request_id): request = list(logservice.fetch( include_incomplete=True, include_app_logs=True, request_ids=[request_id])) if not request: logging.info('Dang, didn\'t find the request_id %s', request_id) return None assert len(request) == 1, request return request[0]
[ "def _get_request(self, request_id: str) -> Optional[Request]:\n with self._lock:\n try:\n return self._requests[request_id]['request']\n except KeyError:\n return None", "def getLogEntryById( self, id=None ):\n return self._getLogEntry( id )", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the proof for Predicate implementation
def verifyPredicateProof(proof: PredicateProof, credDefPks, nonce, attrs: Dict[str, Dict[str, T]], revealedAttrs: Sequence[str], predicate: Dict[str, Sequence[str]]): Tau = [] subProofC, subProofPredicate, C, CList =...
[ "def proof(self):\n return self.__proof", "def is_valid_result_proof(result_proofs, cal_result):\n\n if len(result_proofs) < 1:\n logging.error(\"MISBEHAVIOUR: Empty Result Proof \",\n extra=logger.NODE_INFO)\n return False\n\n\n result = result_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to manage the accounts modification
def accounts_modify(request): # Creating the default value account = None # If not declared in settings, configuring a default value # http://www.django-rest-framework.org/api-guide/exceptions/#exception-handling-in-rest-framework-views try: nfe = settings.NON_FIELD_ERRORS_KEY except At...
[ "def test01_edit_account(self):\n\n self.lg('Create new username, user:%s password:%s' % (self.username, self.password))\n self.Users.create_new_user(self.username, self.password, self.email, self.group)\n self.lg('create new account %s' % self.account)\n self.Accounts.create_new_account...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
function to return the mapping list for streetname, this is the list of corrections/changes which is frequently occuring in dataset
def getStreetMapping(): mapping = { "Rd" : "Road", "up" :"UP", "delhi":"Delhi", "Delhi.":"Delhi", "delhi": "Delhi", "noida":"Noida", "Noida," : "Noida", "NOIDA" : "Noida", "gurgaon" : "Gurgaon", "NAGAR" : "Nagar", "nagar" : "Na...
[ "def getPostCodeMapping():\n postcode_mapping = {\n \"110031v\" : \"110031\", #removed the extra v in the end\n \"2242\" : \"122001\", # manually scanned the OSM file for pincode for same place\n \"10089\" : \"110085\", #checked manually on internet\n \"1100002\" : \"110002\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
function to return the mapping list for postalcode, this is the list of corrections/changes which is frequently occuring in dataset
def getPostCodeMapping(): postcode_mapping = { "110031v" : "110031", #removed the extra v in the end "2242" : "122001", # manually scanned the OSM file for pincode for same place "10089" : "110085", #checked manually on internet "1100002" : "110002", "1100049" : "110049", ...
[ "def extract_corrections(mapping):\n def is_correction(description):\n _correction = 'correction' in description\n _effect = 'effect' in description\n _bias = 'bias' in description\n\n return _correction or _effect or _bias\n\n corrections = tuple(\n entry['abbreviation']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks for whether the tag currently checking while parsing is postalcode tag
def is_postal(elem): return (elem.attrib['k'] == "addr:postcode")
[ "def is_zip(elem):\n return elem.attrib['k'] == \"addr:postcode\"", "def hasTagAtAddress(self,tag,addr):\n return HopperLowLevel.hasTagAtAddress(self.__internal_document_addr__,tag.__tag_internal__,addr)", "def is_postcode(query_string, tokens):\n\n if len(tokens) == 1:\n if len(tokens[0]) =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }