query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
used to choose the manga from the list.
def choose_manga(index): while index == -1: # any message. print("\nSelect the manga to download\n") # This is to display the list of manga available to download. for i in range(1,len(anime_list)): print(str(i)+'. '+anime_list[i]) # This block is to check weather input given is a integer or not. try: ...
[ "async def manga(self, ctx, *, title):\n cmd = \"manga\"\n await self.fetch_info(ctx, cmd, title)", "def phones_by_manufacturer(mnf,phoneList):\n selectedPhones = []\n for phone in phoneList:\n if phone['mnf'] == mnf:\n selectedPhones.append(phone)\n return selectedPhones"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_trained_representations(method) > Array, int Gets fully trained representations for given method, cell type and chromosome. obtain sniper and sca representations from respective methods. Should contain SNIPER and SCA positions end internal representations.
def get_trained_representations(self, method="hiclstm"): pred_data = pd.read_csv( self.cfg.output_directory + "%s_%s_predictions_chr%s.csv" % (method, self.cell, str(self.chr)), sep="\t") pred_data = pred_data.drop(['Unnamed: 0'], axis=1) representations, start, stop = s...
[ "def get_meth_codes(self):\n try:\n raw_codes = pd.io.json.read_json('https://api.earthref.org/MagIC/method_codes.json')\n except urllib2.URLError:\n return [], []\n except httplib.BadStatusLine:\n return [], []\n code_types = raw_codes.ix['label']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_tadbs() > Array Gets TAD Boundaries to knockout.
def get_tadbs(self): dom_ob = Domains(cfg, chr, mode="ko") tf_ob = TFChip(cfg, chr) tadbs = dom_ob.get_tad_boundaries(tf_ob, ctcf="negative") cum_pos = get_cumpos(self.cfg, self.chr) tadbs = tadbs + cum_pos tadbs = np.array(tadbs) return tadbs
[ "def get_turbines(self):\r\n\r\n return self.turbines", "def list_tiddlers(self, bag):\n self._prepare_twp(bag)\n return self.build_non_js_version(bag)", "def bidders(self):\n return self._bidders", "def tribus(self):\n return self._tribus", "def bake_dynamics():\n obj = doc.Ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_ctcf_indices() > Array Gets CTCF positions to knockout.
def get_ctcf_indices(self): "gets CTCF positions" ctcf_ob = TFChip(cfg, chr) data = ctcf_ob.get_ctcf_data() data = data.filter(['start'], axis=1) "converts to cumulative indices" cum_pos = get_cumpos(self.cfg, self.chr) data["start"] = data["start"] + cum_pos ...
[ "def get_chemical_indices(self):\n return self.indices", "def computeCindices(self):\n\n self.surf_index_C = PUBSlib.computesurfindices(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m)\n self.edge_index_C = PUBSlib.computeedgeindices(self.nedge, self.ngroup,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
normalize_embed(representations, zero_embed) > Array, Array Normalize each row separately.
def normalize_embed(self, representations, zero_embed): "normalize representations" for n in range(len(representations)): norm = np.linalg.norm(representations[n, :]) if norm == 0: continue else: representations[n, :] = representations...
[ "def normalize(self, embeddings):\n\n # Calculation is different for matrices vs vectors\n if len(embeddings.shape) > 1:\n return embeddings / np.linalg.norm(embeddings, axis=1).reshape(-1, 1)\n\n return embeddings / np.linalg.norm(embeddings)", "def normalize_embeddings(embeddings...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ko_representations(representations, start, indices, zero_embed, mode) > Array, Array Alter representations to feed to knockout.
def ko_representations(self, representations, start, indices, zero_embed, mode="average"): window = self.cfg.ko_window size = len(representations) if isinstance(indices, (int, np.integer)): indices = [indices] "alter according to mode in config" for ind in indices:...
[ "def _build_representations(self):\n \n N = self.order()\n\n # Build all the Irreducible Representations\n for k in range(0, int(N // 2) + 1):\n self.irrep(k)\n \n # Build all Representations\n\n # add all the irreps to the set of representations alrea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compute_kodiff(pred_data, ko_pred_df, indices) > Array Compute difference between predicted contacts after and before knockout
def compute_kodiff(self, pred_data, ko_pred_df, ind): "initialize" ko_diffs = np.zeros((11,)) win = self.cfg.ko_increment diff = np.arange(0, 101, 10) "compute diff" for j, d in enumerate(diff): "take subset of knockout data in window" if j == 0:...
[ "def j_index(true_labels, predicts):\n if not hasattr(true_labels, 'shape'):\n true_labels = np.asarray(true_labels)\n if not hasattr(predicts, 'shape'):\n predicts = np.asarray(predicts)\n N, L = true_labels.shape\n s = 0.0\n for i in range(N):\n inter = sum((true_labels[i, :] *...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
perform_ko(model) > Array Loads data for chromosome. Loads representations. Alters representations. Gets padding representation. Runs through decoder. Computes mean diff between WT and KO. Saves predictions.
def perform_ko(self, model): cfg = self.cfg "load data" if cfg.run_tal and cfg.hnisz_region == "tal1": self.cfg.get_tal1_only = True data_loader = self.prepare_tal1_lmo2() elif cfg.run_tal and cfg.hnisz_region == "lmo2": self.cfg.get_lmo2_only = True...
[ "def perform_lmo2_ko(self, model):\n\n \"save representations\"\n self.chr = 11\n self.cfg.get_lmo2_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"lmo2\"\n _, ko_pred_df, _ = self.perform_ko(model)\n\n return ko_pred_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
change_index(list_split) > list, list get locations from index.
def change_index(self, list_split): "format index" temp = [k.split('|')[-1] for k in list_split] chr_list = [] index_list = [] for t in temp: index = t.split(':') chr_list.append(index[0]) index_list.append(index[1].split('-')) "prepa...
[ "def reindexObject(idxs=[]):", "def move_index(self):\n\n index = bpy.context.scene.list_index\n list_length = len (bpy.context.scene.my_list) - 1\n # (index starts at 0)\n new_index = index + (-1 if self.direction == 'UP' else 1)\n bpy.context.scene.list_index = max (0 , min (n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert_to_hic_format() > No return object. Assigns positions and chr. Convert 5C to HiC like format.
def convert_to_hic_format(self): if self.cfg.tal_mode == "wt": hek_mat = pd.read_csv(self.hek_file, sep="\t") elif self.cfg.tal_mode == "tal1_ko": hek_mat = pd.read_csv(self.tal1ko_file, sep="\t") elif self.cfg.tal_mode == "lmo2_ko": hek_mat = pd.read_csv(sel...
[ "def load_hic(cfg, chr):\r\n try:\r\n data = pd.read_csv(\"%s%s/%s/hic_chr%s.txt\" % (cfg.hic_path, cfg.cell, chr, chr), sep=\"\\t\",\r\n names=['i', 'j', 'v'])\r\n data = data.dropna()\r\n data[['i', 'j']] = data[['i', 'j']] / cfg.resolution\r\n data[['i', '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
prepare_tal1_lmo2(cfg) > DataLoader prepare dataloader to train.
def prepare_tal1_lmo2(self): "load Hi-C like data" tal_df = pd.read_csv(cfg.hic_path + cfg.cell + "/tal_df.txt", sep="\t") lmo2_df = pd.read_csv(cfg.hic_path + cfg.cell + "/lmo2_df.txt", sep="\t") "preprocess" tal_df = tal_df.drop(['Unnamed: 0'], axis=1) lmo2_df = lmo2_...
[ "def train_tal1_lmo2(self, model):\n\n \"summary writer\"\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n writer = SummaryWriter('./tensorboard_logs/' + cfg.model_name + timestr)\n\n \"initialize optimizer and prepare dataloader\"\n self.cfg.get_tal1_only = False\n self.cfg....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
train_tal1_lmo2(model, cfg) > No return object Train model on 5C data from TAL1 and LMO2 regions.
def train_tal1_lmo2(self, model): "summary writer" timestr = time.strftime("%Y%m%d-%H%M%S") writer = SummaryWriter('./tensorboard_logs/' + cfg.model_name + timestr) "initialize optimizer and prepare dataloader" self.cfg.get_tal1_only = False self.cfg.get_lmo2_only = Fal...
[ "def test_tal1_lmo2(self, model):\n\n \"prepare dataloader\"\n data_loader = self.prepare_tal1_lmo2()\n\n \"test model\"\n self.cfg.full_test = True\n self.cfg.compute_pca = False\n self.cfg.get_zero_pred = False\n _, _, _, pred_df, _ = model.test(data_loader)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test_tal1_lmo2(model) > DataFrame Test model on 5C data from TAL1 and LMO2 regions.
def test_tal1_lmo2(self, model): "prepare dataloader" data_loader = self.prepare_tal1_lmo2() "test model" self.cfg.full_test = True self.cfg.compute_pca = False self.cfg.get_zero_pred = False _, _, _, pred_df, _ = model.test(data_loader) "save predictio...
[ "def perform_lmo2_ko(self, model):\n\n \"save representations\"\n self.chr = 11\n self.cfg.get_lmo2_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"lmo2\"\n _, ko_pred_df, _ = self.perform_ko(model)\n\n return ko_pred_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
perform_tal1_ko(model) > DataFrame Performs knockout of selected sites in TAL1 and LMO2 regions.
def perform_tal1_ko(self, model): "save representations" self.chr = 1 self.cfg.get_tal1_only = True ko_ob.test_tal1_lmo2(model) "perform ko" self.cfg.hnisz_region = "tal1" _, ko_pred_df, _ = self.perform_ko(model) return ko_pred_df
[ "def perform_lmo2_ko(self, model):\n\n \"save representations\"\n self.chr = 11\n self.cfg.get_lmo2_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"lmo2\"\n _, ko_pred_df, _ = self.perform_ko(model)\n\n return ko_pred_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
perform_tal1_ko(model) > DataFrame Performs knockout of selected sites in TAL1 and LMO2 regions.
def perform_lmo2_ko(self, model): "save representations" self.chr = 11 self.cfg.get_lmo2_only = True ko_ob.test_tal1_lmo2(model) "perform ko" self.cfg.hnisz_region = "lmo2" _, ko_pred_df, _ = self.perform_ko(model) return ko_pred_df
[ "def perform_tal1_ko(self, model):\n\n \"save representations\"\n self.chr = 1\n self.cfg.get_tal1_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"tal1\"\n _, ko_pred_df, _ = self.perform_ko(model)\n return ko_pred_df",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This script takes in the congressional records csv file and removes any data with the year 2001. This somehow happened along the way even though our data should only have years 2006 and on.
def main(): cong = pd.read_csv(sys.argv[1], parse_dates = ["date"]) cong = cong[cong["date"].dt.year != 2001] # Removes about 1400 instances cong.to_csv("congressionalRecords.csv")
[ "def createFileByYearIgnoreMissingColumn(year, destinationFolder='Formatted Files Without Missing'):\n\trows = []\n\tallRecords = 0\n\t\n\t# Setup database connection\n\tcur = db.cursor()\n\n\t\n\t# Create a header row\n\tindicatorList = ['Country']\n\tindicatorIDs = []\n\n\tcur.execute(\"SELECT DISTINCT indicator_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to handle user command. Receives parsed input and sends data to Central Server (CS) accordingly to specified command.
def handleUserCommand(cmd): """ Receive and parse input. """ cmd = cmd.split() if cmd and cmd[0] in client.getCommands(): task = cmd[0] client.connect() # Exit command. if task == "exit": client.disconnect(...
[ "def handle_command(self, data):\n if data is not None:\n command, input = data\n if command == CommandTypes.GUI:\n self.exec_gui(input)\n elif command == CommandTypes.CONNECT:\n self.exec_connect(input)\n elif command == CommandTypes....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to handle Central Server responses. Handles received answer from the Central Server (CS) and displays them.
def handleCSResponses(filename, ptc): cmd = client.receiveData(3) # List command CS response. if cmd == "FPT": # Read space. client.receiveData(1) ptc_count = '' message = '' while True: char = client.receiveDa...
[ "def handle_server_response(my_socket, cmd):\r\n valid_msg, response = protocol.get_msg(my_socket)\r\n if valid_msg:\r\n list_cmd = cmd.split()\r\n if list_cmd[0] == 'DIR':\r\n print(\"The files in {} are: \".format(list_cmd[1]))\r\n print(response)\r\n if list_cmd[0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the smallest number n that is a multiple of both a and b. >>> multiple(3, 4) 12 >>> multiple(14, 21) 42
def multiple(a, b): import math return a * b // math.gcd(a, b)
[ "def least_common_multiple(a, b):\n a, b = min(a, b), max(a, b)\n\n if b % a == 0:\n return b\n\n found = False\n number_i = 1\n multiple_i = a\n while not found:\n if multiple_i % b == 0:\n return multiple_i\n number_i += 1\n multiple_i = a * number_i", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of unique digits in positive integer n >>> unique_digits(8675309) All are unique 7 >>> unique_digits(1313131) 1 and 3 2 >>> unique_digits(13173131) 1, 3, and 7 3 >>> unique_digits(10000) 0 and 1 2 >>> unique_digits(101) 0 and 1 2 >>> unique_digits(10) 0 and 1 2
def unique_digits(n): uni = 0 found = {i: False for i in range(10)} while n != 0: d = n % 10 if not found[d]: found[d] = True uni += 1 n = n // 10 return uni
[ "def unique_digits(n):\n \"*** YOUR CODE HERE ***\"\n s = []\n while n>0:\n s.append(n%10)\n n=n//10\n set(s)\n return len(set(s))\n\n \"\"\"Bonus Code that counts how many repeating digits in a number\n ud=0\n while n>0:\n x=1\n #print (\"n:\",n)\n nc ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract data from the summarizer and dump it to the reporter
def dump(self, summarizer): pass
[ "def summariseSuiteResult(self, suite):", "def summaries(self, data):\n return data", "def summariseResult(self, test):", "def exportAnalysisDataAfterIteration(self):\n\n\n # Metrics output\n df = pd.DataFrame(self.data[\"Diversity\"])\n df.to_pickle(self.outfolder + \"/metrics ana...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract oblique slice from SimpleITK image. Efficient, because it rotates the grid and only samples the desired slice.
def extract_slice_from_sitk_image(sitk_image, point, Z, X, new_size, fill_value=0): num_dim = sitk_image.GetDimension() orig_pixelid = sitk_image.GetPixelIDValue() orig_direction = sitk_image.GetDirection() orig_spacing = np.array(sitk_image.GetSpacing()) new_size = [int(el) for el in new_size] #...
[ "def slice_image(image):\n rz_image = resize_image(image)\n\n # new image size\n (new_height, new_width) = rz_image.shape\n\n # 3% height\n startY = int(0.03 * new_height)\n\n # 60% height\n endY = int(20 * startY)\n\n # 10% width\n startX = int(0.05 * new_width)\n\n endX = int(new_wid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a timer at the end of which a new vessel will be generated.
def _create_vessel_generation_timer(self): inter_arrival_time = self.inter_arrival_time_sampler() self.generation_timer = SimulationTimer( duration=inter_arrival_time, target_function=self.generate_vessel) TimerScheduler.get_instance().schedule(self.generation_timer)
[ "def handle_create_timer(self, message):\n if self.neon_in_request(message):\n content = self._extract_alert_params(message, AlertType.TIMER)\n content[\"kind\"] = int(AlertType.TIMER)\n LOG.info(content)\n self.confirm_alert(\"timer\", content, message)", "def _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the vessel and its components and add them to the world.
def generate_vessel(self): # FIXME: This should be dependent on the vessel type vessel_velocity = self.default_speed_knots vessel = self.world.create_entity() spawn_point = random_point_in_polygon(self.spawn_area) vessel_info = self.vessel_info_sampler() velocity = Vel...
[ "def place_vessel(type, x, y):\n vessel = scene.addObject(type, \"gameLogic\")\n vessel.worldPosition = (x, y, 0.0)\n return vessel", "def initGL(self):\t\t\n\n\t\tpass", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoGLVBOElement_init(self, state)", "def __init__(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes counts and proportions for assignments of tokens to topics.
def write_token_topic_freqs(fpath, topic_counts): topic_props = topic_counts / np.sum(topic_counts) with open(fpath, 'w', newline='') as outfile: fwriter = csv.writer(outfile) fwriter.writerow(['topic', 'token_count', 'token_proportion']) for (t_index, (t_count, t_prop)) in en...
[ "def _update_topic_size(self, documents: pd.DataFrame):\n self.topic_sizes_ = collections.Counter(documents.Topic.values.tolist())\n self.topics_ = documents.Topic.astype(int).tolist()", "def _write_topic(self, topic):\n index_of = self._index\n startElement, endElement, newline = self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get entropy of topics' spread over subreddits to see how subredditspecific they are.
def get_subreddit_entropy(token_assignment_counts): sub_entropy_counts = [] sub_entropy_props = [] sub_list = [sub for sub in token_assignment_counts.keys()] k = len(token_assignment_counts[sub_list[0]]) for topic_index in range(k): topic_counts = [] topic_props = [] ...
[ "def corrected_discrete_entropy(topTopics):\n uniqueTopTopics = set()\n uniqueTopTopics.update(topTopics)\n sumVar = 0\n for topic in uniqueTopTopics:\n pTopic = topTopics.count(topic)/float(len(topTopics))\n sumVar += pTopic*np.log(pTopic)\n if len(topTopics) > 1: # handles edge case o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each topic, return which subreddit had the most tokens assigned to the topic. Do this based on count of tokens as well as on which subreddit had the highest proportion of its tokens assigned to the topic.
def get_subreddits_w_max_topics(token_assignment_counts): max_topic_counts = [] max_topic_props = [] sub_list = [sub for sub in token_assignment_counts.keys()] k = len(token_assignment_counts[sub_list[0]]) for topic_index in range(k): sub_topic_counts = [] sub_topic_prop...
[ "def most_frequent_words(subreddit):\n freq_dists = []\n names = []\n titles_all = []\n for name, data in subreddit.items()[-1:]:\n titles_subs = []\n all_words = ['']\n for sub_id, sub in data.items():\n all_words = \" \".join([fixer(comment, True, False) \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and returns a dictionary for each LDA model with relevant model data and initialized places to store counts for each model.
def initialize_model_counters(model_info, subreddit_list): model_counts_dict = {} for training_corpus_type, sample_name, corpus_name, k_list in model_info: for k in k_list: model_name = sample_name + '-' + str(k) model_dir = os.path.join(cons.lda_dir, training_corpus_type,...
[ "def load_models_and_predictors(self):\n self.models = {}\n self.predictors = {}\n model_paths = [path.join(self.data_dir, timepoint + self.embedding_suffix) for timepoint in self.timepoints]\n predictor_handles = [timepoint for timepoint in self.timepoints]\n loaded_models = Para...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function probabilistically assigns each token to a topic from specified models and writes assignment statistics to file.
def get_topic_distributions(model_info, corpus_name, subreddit_list): # initialize where topic counts will be stored for each model indicated in model_info model_dict = initialize_model_counters(model_info, subreddit_list) print() # iterate through each subreddit, each of its documents, and eac...
[ "def initialize_model_counters(model_info, subreddit_list):\r\n model_counts_dict = {}\r\n\r\n for training_corpus_type, sample_name, corpus_name, k_list in model_info:\r\n for k in k_list:\r\n model_name = sample_name + '-' + str(k)\r\n model_dir = os.path.join(cons.lda_dir, trai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a units_dict from the environment instance if the numerical value of 'factor' is a match for a derived unit defined in the environment instance and the dimensions stored in the units_dict are equal to 'dims'. Returns an empty dict, otherwise.
def _get_units_by_factor( factor: float, dims: Dimensions, units_env: Callable, power: Union[int, float] ) -> dict: ## TODO Write a pow() to handle fractions and rationals new_factor = fraction_pow(factor, -Fraction(1 / power)) units_match = _match_factors(new_factor, units_env()) try: units...
[ "def _dims_quotient(dimensions: Dimensions, units_env: Callable) -> Optional[Dimensions]:\n derived = units_env()[\"derived\"]\n defined = units_env()[\"defined\"]\n all_units = ChainMap(defined, derived)\n potential_inv = None # A flag to catch a -1 value (an inversion)\n quotient = None\n quoti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Part of the __str__ and __repr__ process. Returns a string representing the SI unit components of the Physical instance extracted from the list of tuples, 'unit_components', using 'repr_format' as given by the _repr_x_ function it was called by. If 'repr_format' is not given, then terminal output is assumed.
def _get_unit_string(unit_components: list, repr_format: str) -> str: dot_operator = "·" # new: · , # old: ⋅ pre_super = "" post_super = "" pre_symbol = "" post_symbol = "" if repr_format == "html": dot_operator = "&#8901;" pre_super = "<sup>" post_super = "</sup>" e...
[ "def __str__(self):\n return unit_format.Generic.to_string(self)", "def _repr_latex_(self):\n return unit_format.Latex.to_string(self)", "def __str__(self):\n return str(self.unitName + ' (' + self.hexLocation + ')')", "def __str__(self):\r\n return str(self.value) + ' ' + self.uni...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns 'symbol' formatted appropriately for the 'repr_format' output.
def _format_symbol(prefix: str, symbol: str, repr_format: str = "") -> str: # if r"\text" or "^" in symbol: # in case pre-formatted latex from unit_string # return symbol symbol_string_open = "" symbol_string_close = "" dot_operator = "·" ohm = "Ω" if repr_format == "html": dot_op...
[ "def __repr__(self):\r\n return \"<Symbol({0}, {1})>\".format(self.id, self.language.name)", "def symbol(self):\n\t\tif self.rot == \"x\":\n\t\t\treturn self.symbol_tx\n\t\telif self.rot == \"z\":\n\t\t\treturn self.symbol_tz\n\t\telse:\n\t\t\treturn self.symbol_ty", "def priceToString(price, symbol=\"sy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number in 'power' as a formatted exponent for text display.
def _format_exponent(power: Union[int, float], repr_format: str = "", eps=1e-7) -> str: if power == 1: return "" if abs((abs(power) - round(abs(power)))) <= eps: power = int(round(power)) exponent = str(power) if not repr_format: exponent = _get_superscript_string(exponent) ...
[ "def power(number, exp=2):\n return number ** exp", "def exponent(a, b):\n result_exp = round(a ** b, 4)\n print(\"The result of \" + str(a) + \" raised to the \" + str(b) + \" is \" + str(result_exp))\n return str(a) + \" ** \" + str(b) + \" = \" + str(result_exp)", "def pow_to_mul_string(expr):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an integer value that represents the exponent of a unit if the dimensions array is a multiple of one of the defined derived units in dimension_keys. Returns None, otherwise. e.g. a force would have dimensions = [1,1,2,0,0,0,0] so a Physical object that had dimensions = [2,2,4,0,0,0,0] would really be a force to...
def _powers_of_derived(dims: Dimensions, units_env: Callable) -> Union[int, float]: quotient_1 = _dims_quotient(dims, units_env) quotient_2 = _dims_basis_multiple(dims) quotient_1_mean = None if quotient_1 is not None: quotient_1_mean = cache_vec_mean(quotient_1, ignore_empty=True) if quoti...
[ "def _dims_quotient(dimensions: Dimensions, units_env: Callable) -> Optional[Dimensions]:\n derived = units_env()[\"derived\"]\n defined = units_env()[\"defined\"]\n all_units = ChainMap(defined, derived)\n potential_inv = None # A flag to catch a -1 value (an inversion)\n quotient = None\n quoti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a Dimensions object representing the elementwise quotient between 'dimensions' and a defined unit if 'dimensions' is a scalar multiple of a defined unit in the global environment variable. Returns None otherwise.
def _dims_quotient(dimensions: Dimensions, units_env: Callable) -> Optional[Dimensions]: derived = units_env()["derived"] defined = units_env()["defined"] all_units = ChainMap(defined, derived) potential_inv = None # A flag to catch a -1 value (an inversion) quotient = None quotient_result = No...
[ "def get_dimensions(self, units):\n return self.id.to(units), self.od.to(units)", "def _powers_of_derived(dims: Dimensions, units_env: Callable) -> Union[int, float]:\n quotient_1 = _dims_quotient(dims, units_env)\n quotient_2 = _dims_basis_multiple(dims)\n quotient_1_mean = None\n if quotient_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wraps vec.divide with an lru_cache
def cache_vec_divide(tuple_a, tuple_b, ignore_zeros): return vec.divide(tuple_a, tuple_b, ignore_zeros)
[ "def divide(key, it):\n def accumulate(acc, el):\n if key(el):\n acc[0].append(el)\n else:\n acc[1].append(el)\n\n return acc\n\n\n return reduce(accumulate, it, ([], []))", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorUL___div__(self, *ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wraps vec.mean with an lru_cache
def cache_vec_mean(tuple_a, ignore_empty): return vec.mean(tuple_a, ignore_empty)
[ "def vm_impl_reduce_mean(self):\n\n def vm_impl(x, axis):\n x = x.asnumpy()\n out = vm.mean(x, axis)\n return Tensor(out)\n\n return vm_impl", "def lru_cache(maxsize=100):\n \n def decorating_function(user_function):\n stats = [0, 0] # Hits, misses\n dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if d1 and d2 are parallel vectors. False otherwise.
def _check_dims_parallel(d1: Dimensions, d2: Dimensions) -> bool: return vec.multiply(d1, vec.dot(d2, d2)) == vec.multiply(d2, vec.dot(d1, d2))
[ "def parallel(self, vector):\n if self.cross(vector) == 0:\n return True\n return False", "def non_parallel(self, vector):\n if (self.parallel(vector) is not True and\n self.perpendicular(vector) is not True):\n return True\n return False", "def _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns `dims` if `dims` is a scalar multiple of one of the basis vectors. Returns None, otherwise. This is used as a check to see if `dims` contains only a single dimension, even if that single dimension is to a higher power. e.g. if `dims` equals Dimensions(2, 0, 0, 0, 0, 0, 0) then `dims` will be returned. if `dims`...
def _dims_basis_multiple(dims: Dimensions) -> Optional[Dimensions]: count = 0 for dim in dims: if dim: count += 1 if count > 1: return None return dims
[ "def only(self, dims: str or tuple or list or 'Shape'):\n if isinstance(dims, str):\n dims = parse_dim_order(dims)\n if isinstance(dims, (tuple, list)):\n return self[[i for i in range(self.rank) if self.names[i] in dims]]\n elif isinstance(dims, Shape):\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string "prefix" of an appropriate value if self.value should be prefixed i.e. it is a big enough number (e.g. 5342 >= 1000; returns "k" for "kilo")
def _auto_prefix(value: float, power: Union[int, float], kg: bool = False) -> str: if value == 0: return "" kg_factor = 0 if kg: kg_factor = 3 prefixes = _prefixes abs_val = abs(value) value_power_of_ten = math.log10(abs_val) value_power_of_1000 = value_power_of_ten // (3 * p...
[ "def _auto_prefix_kg(value: float, power: Union[int, float]) -> str:\n prefixes = _prefixes\n if abs(value) >= 1:\n for prefix, power_of_ten in prefixes.items():\n if abs(value) >= (power_of_ten / 1000.) ** abs(power):\n return prefix\n else:\n reverse_prefixes = sor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just like _auto_prefix but handles the one special case for "kg" because it already has a prefix of "k" as an SI base unit. The difference is the comparison of 'power_of_ten'/1000 vs 'power_of_ten'.
def _auto_prefix_kg(value: float, power: Union[int, float]) -> str: prefixes = _prefixes if abs(value) >= 1: for prefix, power_of_ten in prefixes.items(): if abs(value) >= (power_of_ten / 1000.) ** abs(power): return prefix else: reverse_prefixes = sorted(prefixes...
[ "def _auto_prefix(value: float, power: Union[int, float], kg: bool = False) -> str:\n if value == 0:\n return \"\"\n kg_factor = 0\n if kg:\n kg_factor = 3\n prefixes = _prefixes\n abs_val = abs(value)\n value_power_of_ten = math.log10(abs_val)\n value_power_of_1000 = value_power_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if 'value' is some form of NaN, whether float('nan') or a numpy or pandas Nan.
def is_nan(value: Any) -> bool: # Test for numpy.nan and float('nan') if not value == value: return True else: return False
[ "def is_nan(value: Any) -> bool:\n import numpy as np\n\n try:\n return np.isnan(value)\n except TypeError:\n return True", "def is_nan(val):\n return isinstance(val, float) and isnan(val)", "def has_nan_values(self):\n import numpy as np\n return np.any(np.isnan(self.as_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raises 'a' to the power of 'b' with the intention of returning a Fraction if the result can be expressed as a Fraction. Returns a float otherwise.
def fraction_pow(a: Fraction, b: Fraction) -> Union[Fraction, float]: if isinstance(b, int): return a**b else: c = a**b if isinstance(c, Fraction): return 1 / c x, y = c.as_integer_ratio() d = Decimal(str(x / y)) m, n = d.as_integer_ratio() ret...
[ "def rational_div(a,b):\r\n if isinstance(a,Rational) and isinstance(b,Rational):\r\n return Fraction(a,b)\r\n return div(a,b)", "def power(a, b):\n\n if b == 0:\n return 1\n\n return a * power(a, (b - 1))", "def log_frac(a, b):\n return safe_log(a) - safe_log(b)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialises an Agent object _y, _x from web scraping passed into constructor function agent gets copy of the environment agent gets copy of list of all other agents sets (y,x) randomly in [0,300] if(y,x) arguments missing store attribute set to 0
def __init__(self,environment, agents,_y= None ,_x = None):#doesnt need _y and _x setting if generating random values below #self._x = random.randint(0,10) #changed from 300 to check share_with_neighbour #self._y = random.randint(0,10) if (_x == None): self._x = random.ra...
[ "def reset(self,**kwargs):\n self.rng = np.random.default_rng()\n try: \n # self.nb_agents = kwargs['nb_agents']\n self.nb_targets = kwargs['nb_targets']\n except:\n # self.nb_agents = np.random.random_integers(1, self.num_agents)\n self.nb_targets = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines move() behaviour of Agent agent's y and x move randomly +1, torus solution to deal with boundary effects
def move(self): if random.random() < 0.5: self._y = (self._y + 1) % 300 else: self._y = (self._y - 1) % 300 if random.random() < 0.5: self._x = (self._x + 1) % 300 else: self._x = (self._x - 1) % 300
[ "def plane_move(self): \r\n \r\n #Move bacteria in xy plane \r\n # Generate random number from which xy movement will be decided\r\n randnum = random.random()\r\n # 5% chance of bacteria moving in -ve x direction\r\n if randnum <= self.prob_west:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Agent eats 10 if environment is >10 at (y,x) or remainder if environment <10. Amount eaten is added to store
def eat(self): if self.environment[self._y][self._x] > 10: self.environment[self._y][self._x] -= 10 self.store += 10 else: self.store += self.environment[self.y][self.x] self.environment[self.y][self.x] = 0
[ "def eat(self, store_capacity, consumption_rate):\n \n if self.environment[self.y][self.x] > consumption_rate:\n self.environment[self.y][self.x] -= consumption_rate\n self.environment[self.y][self.x] = round(\n self.environment[self.y][...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks distance between each agent using distance_between Takes "neighbourhood" argument Prevents agent checking against itself
def share_with_neighbours(self, neighbourhood): #print(neighbourhood) #testing initial setup for agent in self.agents: if agent != self: distance = self.distance_between(agent) #print("distance between", self._x, self._y, " : ", # ...
[ "def share_with_neighbours(self, neighbourhood):\n for agent in self.agents:\n if agent == self:\n continue\n else:\n distance = self.distance_between(agent) \n if distance <= neighbourhood:\n sum = self.store + agent.store...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overwrites __str__ to return _x and _y coordinate of agent and store
def __str__(self): return "agent-_x: {0}, agent-_y: {1}, store-agent: {2}".format(self._x, self._y, self.store)
[ "def __str__(self):\n return 'The agents (x,y) coordinates are (' + str(self.x) + ',' + \\\n str(self.y) + ') and the agent is storing ' + str(round(self.store,1))\\\n + ' units'", "def __str__(self) -> str:\n\n # Builds a string representation of the point and returns it\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wolf randomly traverses +/3 along x axis direction
def traverse(self): if random.random() < 0.5: self._x = (self._x + 3) % 300 else: self._x = (self._x - 3) % 300
[ "def move(self):\r\n if random.random() < 0.5:\r\n self._y = (self._y + 1) % 300\r\n else:\r\n self._y = (self._y - 1) % 300\r\n \r\n if random.random() < 0.5:\r\n self._x = (self._x + 1) % 300\r\n else:\r\n self._x = (self._x - 1) % 300", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If wolf is closer than 10 units in x and y direction and the wolf has eaten less than 3 sheep in the 24 iteration period, the wolf moves to the agent (y,x) position and deletes (eats) the agent. Prints number of sheep left and total sheep eaten
def chase(self, num_of_sheep, sheep, total_sheep_eaten): for sheep_i in sheep: if abs(sheep_i._x - self._x)< 10 and abs(sheep_i._y - self._y) < 10: #print("before eating, total sheep eaten: ", total_sheep_eaten) if s...
[ "def healTeamates(self):\n x, y, z = es.getplayerlocation(self.userid)\n team = es.getplayerteam(self.userid)\n player = sourcerpg.players[self.userid]\n if team not in (2, 3):\n return\n if player is not None:\n level = player[skillName]\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
....Total products in top 5 common brands for every Retailer......
def runQueryatBrandLevel(): df = pd.DataFrame() query1 = "SELECT brand,count(id) AS totalProduct from productinfo where date=%s group by brand ORDER BY count(id) DESC " results1 = sql.read_sql(query1, con=conn, params=[date1]) results1['retailer']=retName[0] df = df.append(results1) ...
[ "def reduce_products(data, top_percent): \n # number of products\n n_of_products = data.product_id.nunique()\n\n # output\n print('Total Number of Products: {0}'.format(n_of_products))\n\n # 20% is the regular percentage of reducing the products\n top_20 = int(n_of_products * top_percent)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append visits and error from a different response into this response
def append(self, other): if not isinstance(other, SIRIResponse): raise TypeError("Expected a SIRIResponse object") self.errors += other.errors for stop_code, visits in other.visits.items(): if stop_code in self.visits: raise ValueError("Merging requests fo...
[ "def _adapt_response(self, response):\n errors, meta = super(ServerError, self)._adapt_response(response)\n return errors[0], meta # single error instead of array", "def add_error(self, error: any):\n if ERRORS_KEY in self.response:\n self.response[ERRORS_KEY].append(error)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
define_op is a callable that translates tokens into objects bin_op and un_op provide functions for performing binary and unary operations
def evaluate(self, define_op, bin_op=_BINARY_OPERATOR_MAP, un_op=_UNARY_OPERATOR_MAP): if self.right: # binary or implicit operator op_text = self.operator[1] if self.operator else '' if op_text not in bin_op: raise DefinitionSyntaxError('missing bina...
[ "def makeBinOp(opdict, next) :\n op = reduce(operator.or_, [\n a(Token('op', k)).expectsMsg(repr(k)) >> const(v)\n for k,v in opdict.iteritems()\n ])\n return (next + many((op + next) >> tuple)) \\\n >> unarg(eval)", "def op(operator):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function creates input_files and input_classes for testing. The same tif file is used for all samples.
def create_sample_data(num_files): filename = "/home/timhu/test_tif/l8_median_india_vis_500x500_402382.0.tif" possible_classes = list(range(16)) input_files = np.empty((num_files,), dtype=object) input_labels = np.zeros((num_files,), dtype=np.int64) for f in range(num_files): input...
[ "def generate_testfiles(self):\n print(\"Opening files...\")\n data = self.open_test_files()\n print(\"Assemble and concat...\")\n testdata, labels = self.assemble_and_concat(**data)\n print(\"Removing nans and saving...\")\n self.remove_nans(testdata, labels)\n data...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the config based on values in 'config'
def set_config(self, config): for key in config.keys(): self.config[key] = config[key]
[ "def set_config(**kwargs) -> None:\n _conf.update(kwargs)", "def apply_config(self, config):\n raise NotImplementedError", "def define_config(self, config: str) -> None:\n self.config = config", "def set_config():\n env = get_current_environment()\n app.config.from_object('server.config...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads the blocklist specified by 'url' in the config
def download_list(self, url=None): def on_retrieve_data(data, current_length, total_length): if total_length: fp = float(current_length) / total_length if fp > 1.0: fp = 1.0 else: fp = 0.0 self.file_progress...
[ "def downloadChunks(url):\n global download_list\n baseFile = os.path.basename(url)\n\n \n #move the file to a more uniq path\n\n os.umask(0002)\n\n temp_path = os.getcwd()\n\n try:\n\n file = baseFile\n if os.path.exists(file):\n print baseFile, \"already exists\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports the downloaded blocklist into the session
def import_list(self, blocklist): def on_read_ip_range(start, end): """Add ip range to blocklist""" self.blocklist.add_rule(start, end, BLOCK_RANGE) self.num_blocked += 1 def on_finish_read(result): """Add blocklist to session""" self.core.ses...
[ "def load_block_table():\n global UCDBlocks\n f = open(os.path.join(os.path.dirname(__file__), BLOCK_FILE), 'rb')\n UCDBlocks = load(f)\n f.close()", "def download_list(self, url=None):\n def on_retrieve_data(data, current_length, total_length):\n if total_length:\n fp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add ip range to blocklist
def on_read_ip_range(start, end): self.blocklist.add_rule(start, end, BLOCK_RANGE) self.num_blocked += 1
[ "def iprange(start_ip, end_ip):\n queue = Queue.Queue()\n ip_range = []\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n tmp = start\n \n ip_range.append(start_ip)\n while tmp != end:\n start[3] += 1\n for i in (3, 2, 1):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to autodetect the blocklist type
def auto_detect(self, blocklist): self.config["list_compression"] = detect_compression(blocklist) self.config["list_type"] = detect_format(blocklist, self.config["list_compression"]) log.debug("Auto-detected type: %s compression: %s", self.config["list_type"], self.config["list_compression"]) ...
[ "def get_block_types(self) -> list:\n \n block_types = list(self._config_per_block_type().keys())\n if len(block_types) > 1 and 'generic' in block_types:\n block_types.remove('generic')\n return block_types", "def inspectblocktype(self, block_type):\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the given ical_file, and create objects in Guidebook via the API
def parse(self, ical_file): cal = self.get_ical_object(ical_file) # Determine what timezone these events should be interpreted as. self.x_wr_timezone = self.get_ical_timezone_info(cal) # Determine the date range of events we care about limit_start = self.today - timedelta(days=s...
[ "def parse_ics_file(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n return VCalendar.from_ical(file.read())", "def get_ical_object(self, ical_file):\n # get a string representation of the ical_file if we don't already have one\n if not isinstance(ical_file, basestrin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the timezone info of an calendar object parsed by Calendar.from_ical(). Return the 'XWRTIMEZONE' if present, None o.w.
def get_ical_timezone_info(self, cal): ical_xwr_timezone = cal.get('X-WR-TIMEZONE', None) if ical_xwr_timezone: ical_xwr_timezone = pytz.timezone(ical_xwr_timezone.rstrip('/')) # remove trailing slashes return ical_xwr_timezone
[ "def get_timezone(self):\n try:\n return self.user_data['Bootstrap']['Timezone']\n except KeyError:\n return None", "def _get_adjtime_timezone():\n adjtime_file = \"/etc/adjtime\"\n if os.path.exists(adjtime_file):\n cmd = [\"tail\", \"-n\", \"1\", adjtime_file]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if session_start_time and session_end_time are within limit_start and limit_end. False otherwise.
def is_within_time_and_date_limits(self, session_start_time, session_end_time, limit_start, limit_end): return session_start_time > limit_start and session_end_time < limit_end
[ "def is_current_time_between(start_hour, start_min, end_hour, end_min):\r\n now = datetime.now()\r\n start = datetime(year=now.year, month=now.month, day=now.day, hour=start_hour, minute=start_min, second=0)\r\n end = datetime(year=now.year, month=now.month, day=now.day, hour=end_hour, minute=end_min, seco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a Calendar object from an ical_file. Return that parsed object. Raise InvalidiCalendarFile on bad iCal input.
def get_ical_object(self, ical_file): # get a string representation of the ical_file if we don't already have one if not isinstance(ical_file, basestring): ical_file.seek(0) ical_file_string = ical_file.read() else: ical_file_string = ical_file try: ...
[ "def parse_ics_file(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n return VCalendar.from_ical(file.read())", "def get_calendar(self, calendar_id):\n return # osid.calendaring.Calendar", "def load_cal(entry: dict) -> Calendar:\n\n if \"cache\" in entry and entry[\"cac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
triggers pedestrian_tracking process on rtsp link using a thread
def trigger_process(cfg, args): try: t = Thread(target=pedestrian_tracking, args=(cfg, args)) t.start() return jsonify({"message": "Pedestrian detection started successfully"}) except Exception: return jsonify({'message': "Unexpected exception occured in process"})
[ "def thread_handler():\n\n print(\"thread_handler_init\")\n while True:\n try:\n if listener.can_record and not listener.capture_frame:\n listener.recording = True\n time.sleep(.4)\n print(\"recording\")\n while listener.hand_vel < ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a positive integer num into an 8bit bit vector
def bin_array(num): # source: https://stackoverflow.com/a/47521145/1103264 return np.array(list(np.binary_repr(num).zfill(8))).astype(np.int8)
[ "def bit_vec_transform(num):\n vec = [0]*10\n vec[int(num)] = 1\n return vec", "def eightbits(number):\n # useful only so far in context of a forwardmask or any bitmask.\n prefix, value = bin(number).split('b')\n return '0b%0.8i' % (int(value),)", "def __to_bits(data_byte):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The edge sizes are the number of vertices within each edge.
def edge_sizes(self) -> typing.Tuple[int]: return copy.deepcopy(self._edge_sizes)
[ "def number_of_edges(self) -> int:\n count = 0\n for vertice in self.__graph:\n count += len(self.__graph[vertice])\n return count // 2", "def getNumEdges(self): \n return self.__E", "def get_edge_list_len(self):\n return self.edge_list_len", "def edge_sum(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a nonrandom multihypergraph with the given degree sequence. This instantiates a bipartite graph, using the degree sequence and edge sequence as the bipartite degree sequence. The result is a nonrandom multihypergraph graph. To sample a multihypergraph approximately uniformly at random, the switch chain can be ap...
def from_degree_sequence( degree_sequence: typing.Sequence[int], edge_sequence: typing.Sequence[int] ) -> "SwitchMultiHypergraph": # argument checks if not random_graph.utils.bipartite_degree_sequence_graphical(degree_sequence, edge_sequence): raise ValueError("Degree sequence is...
[ "def random(cls, maxA=20, maxB=40, prob=.05):\n assert prob >= 0 and prob <= 1\n G = nx.algorithms.bipartite.generators.random_graph(maxA, maxB, p=prob)\n G.remove_nodes_from([n for n in G if G.degree(n) == 0])\n G = max((G.subgraph(cc) for cc in nx.connected_components(G)), key=lambda c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the current hypergraph object into a bipartite graph via canonical realisation. This chooses a labelling for the edges of the hypergraph, and uses this to create the associated bipartite graph. Note that because edges in the hypergraph are unlabelled (unlike the vertices in the bipartite graph), this can resul...
def to_bipartite_graph(self, shuffle_edges: bool = True) -> "SwitchBipartiteGraph": # get edges in desired order (this determines labelling) hyperedges = list(self.edges) if shuffle_edges: random.shuffle(hyperedges) else: hyperedges = sorted(tuple(sorted(edge)) fo...
[ "def make_false_label_edges(self, dict_class_label_edge):\n data_path = self.args.data_name + '_false_edges_balanced_{}.pickle'.format(self.args.false_per_true)\n if os.path.exists(os.path.join(self.args.data_name, data_path)):\n with open(os.path.join(self.args.data_name, data_path), 'rb')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test whether the multihypergraph graph is simple. A multihypergraph is simple if no two edges are the same.
def simple(self) -> bool: return random_graph.utils.all_unique(tuple(sorted(neighbourhood)) for neighbourhood in self.edges)
[ "def is_multigraph(self):\n s = set()\n for (a1, s1), (a2, s2) in self.bonds:\n if (a1, a2) in s:\n return True\n else:\n s.add((a1, a2))\n return False", "def is_pseudomanifold(self):\n if not self.is_pure():\n return Fals...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scan for existing Harmontown episodes, find the latest one by file name, not file date and return it
def last_episode(): highest_episode = 125 # The one before the first regular video episode available online highest_date = datetime.date(2014, 11, 3) for filename in os.listdir(HARMONTOWN_DIRECTORY): matches = re.match('Harmontown - S01E(\d+) - (\d+)-(\d+)-(\d+)\.mp4', filename) if matches...
[ "def determine_next_episode(\n\t\tself):\n\n\t\tresult = dict()\n\n\t\tsjmanager.log.log('Trying to determine next show to watch')\n\n\t\t# First up, check which season and which episode is in the watch cache.\n\t\trow = self.sql.execute(\"\"\"SELECT \n\t\t\tseason_title,\n\t\t\tepisode_title,\n\t\t\tfinished \n\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write invitation record to DynamoDB.
def write_inv_record(uid_1, uid_2): d = {} d2 = {} now = datetime.datetime.now().isoformat() d['uid'] = uid_1 d['timestamp'] = now d['partner'] = uid_2 d2['uid'] = uid_2 d2['timestamp'] = now d2['partner'] = uid_1 mytable.put_item(d, overwrite=True) mytable.put_item(d2, ov...
[ "def dynamo_put(data):\n logger.info('dynamo_put: uploading event data to DynamoDb...')\n\n # dynamodb\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(settings.dynamodb_table)\n # create a new item (row)\n # source: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dyn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append invitation records to DynamoDB.
def append_inv_records(uids): # Get current timestamp, in "YYYY-MM-DD HH:MM:SS" format. today = datetime.datetime.today() timestamp = str(today.year) + '-' + str(today.month) + '-' + str(today.day) \ + ' ' + str(today.hour) + ':' + str(today.minute) + \ str(today.second) ...
[ "def append_transaction_details(data):\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n table = dynamodb.Table(DYNAMO_DB_NAME)\n print(\"data to append\", data)\n result = table.update_item(\n Key={'username': str(data['username'])},\n UpdateExpression=\"SET statements = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if Broadcast is to the node in anyway (group or direct).
def broadcast_is_to_this_node(self, b:Broadcast): return( b.is_to_all() or \ b.to == self.network_addr or \ b.to_secure_group() in self.joined_secure_groups or \ b.to_gen_group() in self.joined_groups)
[ "def _is_broadcast(self):\n pass", "def _bcastIsOwn(self, host):\n netinfo = NetworkInfo()\n local_addresses = netinfo.get_local_addresses()\n return host in local_addresses", "def test_broadcast(self):\n if _debug: TestVLAN._debug(\"test_broadcast\")\n\n # three elemen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a payload, the to, and the from; returns decrpyted and b64 decoded payload. Used to decrpyt a payload to this node or a secure group it may be a part of.
def payload_decryptor(self, payload:bytes, to, frm): if to.startswith(b'*'): # no extra encryption, just b64 decode return base64_decode(payload) if len(to) <= 1: # at this point len(to) > 1 raise ExceptionWithResponse(RespCode.PRSER, "Invalid 'to' address.", back_to=frm) ...
[ "def decode_payload(cls, payload: bytes) -> MsgGenericPayload:\n pass", "def crds_decode(msg):\n if isinstance(msg, dict) and \"crds_encoded\" in msg:\n ascii = msg[\"crds_payload\"]\n b64 = ascii.encode(\"ascii\")\n compressed = base64.b64decode(b64)\n utf8 = gzip.decompress...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encrypts and b64 encodes the constructed payload(pre_payload) given the broadcast information.
def payload_encryptor(self, b:Broadcast, pre_payload:bytes): if b.to_gen_group(): #includes 'all' (*) return base64_encode(pre_payload) if b.to_secure_group(): group_name = b.to_secure_group() if group_name in self.joined_secure_groups: group_key =...
[ "def encrypt_payload(self, payload):\n encrypter = AES.new(\n self.key,\n AES.MODE_CBC,\n self.encryption_meta.payload\n )\n payload = self._pad_payload(payload)\n return encrypter.encrypt(payload)", "def encrypt(self, payload):\n encrypted_data ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a Broadcast object and makes a TransmittableBroadcast object which includes the broadcast encoded, encyted, and ready to transmit.
def make_transmittable_broadcast(self, broadcast:Broadcast) -> TransmittableBroadcast: encrypted = self.crypto.sign_and_encrypt_with_network_key( broadcast.encode('0.1', self.payload_encryptor)) # x01x01 means: version 1, normal broadcast ret...
[ "def _make_broadcast_socket(self):\n self.broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.broadcast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,1)", "def send_broadcasts(self):\n\n @self.conflict_resolver.managed_transaction\n def get_ready_broad...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A decorator that makes a class inherit documentation from its parents.
def inherit_doc(cls): for name, func in vars(cls).items(): if name.startswith("_"): continue if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, "__doc__", None): ...
[ "def inherit_function_doc(parent):\n def doc_wrapper(method):\n func_name = method.__name__\n assert (func_name in dir(\n parent)), '%s.%s is not a method! Cannot inherit documentation' % (\n parent.__name__, func_name)\n\n # Set the documentation. This only ever happen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a start time in strng or datetime.datetime format, use the spacepy library to download the solar wind data for the time range [tlag_hr, t], where lag_hr is the integer or float hour lag. The dbase kwarg is passed directly into spacepy.omni.get_omni to get hourly solar wind values. to_df converts the solar wind da...
def get_solar_wind_data(t, lag_hr, dbase='QDhourly', to_df=True): if isinstance(t, str): t = dateutil.parser.parse(t) omni_times = pd.date_range(t-timedelta(hours=lag_hr), t, freq='h') try: data = spacepy.omni.get_omni(omni_times.to_pydatetime(), dbase=dbase) except ValueError as err: ...
[ "def import_forecast(t_start,t_end,hours = \"all\",info = (\"GHI\",),\\\n grid_list = \"all\",sub_h_freq = 'all',\\\n sub_D_freq = 'all'):\n root = return_to_root()\n #Sanitycheck for different input \n if \"Fortrolig_data\" not in os.listdir(root):\n raise(O...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solves a system of linear equations
def solve_linear_equations(*args: List[RealNumber]) -> List[RealNumber]: # Check to see if solution is underdetermined (num_eq < num_var) if len(args) < len(args[0]) - 1: # -1 because the RH side is not a variable raise UnderDeterminedError m = Matrix(list(args)) # Put Matrix in Reduced-Row E...
[ "def _solve_system(self):\n result, residual = optimize.nnls(self._lgs_A.toarray(), np.asarray(self._lgs_b))\n\n self._lgs_sol = result", "def solve(self):\r\n\r\n # A pre-allocation for the matrix used to solve the system\r\n matrix = []\r\n\r\n # Each unknown must be put into ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapper doesn't effect allqubit quantum errors.
def test_remap_all_qubit_quantum_errors(self): model = NoiseModel() error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) model.add_all_qubit_quantum_error(error1, ['u3'], False) model.add_all_qubit_quantum_error(error2, ['cx'], False) remapped_model = ...
[ "def test_remap_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_quantum_error(error1, ['u3'], [0], False)\n model.add_quantum_error(error2, ['cx'], [1, 2], False)\n\n remapped_model = remap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapping of quantum errors.
def test_remap_quantum_errors(self): model = NoiseModel() error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) model.add_quantum_error(error1, ['u3'], [0], False) model.add_quantum_error(error2, ['cx'], [1, 2], False) remapped_model = remap_noise_model...
[ "def test_remap_all_qubit_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_all_qubit_quantum_error(error1, ['u3'], False)\n model.add_all_qubit_quantum_error(error2, ['cx'], False)\n\n remap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapping of nonlocal quantum errors.
def test_remap_nonlocal_quantum_errors(self): model = NoiseModel() error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) model.add_nonlocal_quantum_error(error1, ['u3'], [0], [1], False) model.add_nonlocal_quantum_error(error2, ['cx'], [1, 2], [3, 0], False) ...
[ "def test_remap_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_quantum_error(error1, ['u3'], [0], False)\n model.add_quantum_error(error2, ['cx'], [1, 2], False)\n\n remapped_model = remap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapping of allqubit readout errors.
def test_remap_all_qubit_readout_errors(self): model = NoiseModel() error1 = [[0.9, 0.1], [0.5, 0.5]] model.add_all_qubit_readout_error(error1, False) remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False) self.assertEqual(remapped_model, model)
[ "def test_remap_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n model.add_readout_error(error1, [1], False)\n model.add_readout_error(error2, [0, 2], False)\n\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapping of readout errors.
def test_remap_readout_errors(self): model = NoiseModel() error1 = [[0.9, 0.1], [0.5, 0.5]] error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]] model.add_readout_error(error1, [1], False) model.add_readout_error(error2, [0, 2], False) remapped_model...
[ "def test_remap_all_qubit_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n model.add_all_qubit_readout_error(error1, False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n self.assertEqual(remapped_model, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test reduction mapping of noise model.
def test_reduce_noise_model(self): error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) roerror1 = [[0.9, 0.1], [0.5, 0.5]] roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]] model = NoiseModel() model.add_all_qubit_quantum_err...
[ "def test_reduce_remapped_noise_model(self):\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n roerror1 = [[0.9, 0.1], [0.5, 0.5]]\n roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n\n model = NoiseModel()\n model.add_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test reduction and remapping of noise model.
def test_reduce_remapped_noise_model(self): error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) roerror1 = [[0.9, 0.1], [0.5, 0.5]] roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]] model = NoiseModel() model.add_all_qubit_qu...
[ "def test_reduce_noise_model(self):\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n roerror1 = [[0.9, 0.1], [0.5, 0.5]]\n roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n\n model = NoiseModel()\n model.add_all_qubit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether a name is valid as an entry name. Checks a name against an assortment of DOSlike filename rules.
def is_valid_entry_name(filename): allowed = string.ascii_letters + string.digits + "_^$~!#%&-{}@`'()" reserved = ['CON', 'PRN', 'AUX', 'CLOCK$', 'NUL', 'COM0', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', 'LPT0', 'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5',...
[ "def isValidName(self, filename):\n if filename in RESERVED_WORDS:\n return False\n tnam = filename[:].lower()\n return NAME_MATCH(tnam) is not None", "def is_valid_file_name_linux(name:str) -> bool:\r\n return not any( c in invalid_linux_char for c in name )", "def is_valid_f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a tuple of size information given a list of entries. Projects the meta data size and raw data size of a GOB if it were created with the given list of entries.
def get_gob_size(entries): # Header + Catalog Offset + Catalog meta_size = GOB_HEADER_SIZE + GOB_CATALOG_OFFSET_SIZE + (GOB_CATALOG_ENTRY_SIZE * len(entries)) # Raw Data data_size = sum([len(entry[1]) for entry in entries]) return (meta_size, data_size)
[ "def _get_sizes(self) -> int:\n pass", "def list_sizes(location=None):", "def _parse_sizes(self):\n with open(self._data_set, 'r') as f:\n # First line\n return tuple([int(v) for v in f.readline().split()])", "def sizes(self):\n return np.array([entry.data[\"size\"] ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes a GOB container given a path and a list of GOB entries.
def write(filename, entries): meta_size, data_size = get_gob_size(entries) if (meta_size + data_size) > GOB_MAX_SIZE: raise GOBException('Cannot create GOB because it would exceed maximum size.') for entry in entries: if not is_valid_entry_name(entry[0]): raise GOBException('"' ...
[ "def test_write_gro():\n B = Bead(1,'PHE','BB',1,np.array([0.,0.,0.]),np.array([0.,0.,0.]),'SC5')\n G = Gro('testing gro construction',1,[B],[30.3,30.3,30.3])\n G.write('test.gro')", "def node_json_making(path, output=\"sp_nodes.txt\"):\n print(\"making nodes file...\")\n with open(output, 'w') as ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function creates the genbank file from the gff, protein and nucleotide sequences.
def writeGbkFile(output_file_name, contig_dict, sample_name, nucleotide_seq_dict, protein_seq_dict): date = genbankDate() output_file_name_tmp = output_file_name + ".tmp" outputfile = open(output_file_name_tmp, 'w') count = 0 outputStr = "" for key in contig_dict: first = True i...
[ "def create_genbank(fasta_file, UPLOAD_FOLDER, phage_id, payload):\n headers = payload.get_json()\n gb_file = os.path.join(UPLOAD_FOLDER, phage_id + \".gb\")\n genome = SeqIO.read(fasta_file, \"fasta\").seq\n genome = Seq(str(genome), IUPAC.unambiguous_dna)\n record = SeqRecord(genome, id='', name=he...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls Impala HS2 API's GetExecSummary method on the given query handle
def get_exec_summary(self, operation_handle, session_handle): req = ImpalaHiveServer2Service.TGetExecSummaryReq(operationHandle=operation_handle, sessionHandle=session_handle) # GetExecSummary() only works for closed queries try: self.close_operation(operation_handle) except QueryServerException,...
[ "def test_execute_sum_query(self):\n url = \"?\"\n query_params = self.mocked_query_params(url, AzureInstanceTypeView)\n handler = AzureReportQueryHandler(query_params)\n\n filters = self.ten_day_filter\n for filt in handler._mapper.report_type_map.get(\"filter\"):\n qf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls Impala HS2 API's GetRuntimeProfile method on the given query handle
def get_runtime_profile(self, operation_handle, session_handle): req = ImpalaHiveServer2Service.TGetRuntimeProfileReq(operationHandle=operation_handle, sessionHandle=session_handle) # TGetRuntimeProfileReq() only works for closed queries try: self.close_operation(operation_handle) except QuerySer...
[ "def advapi32_GetCurrentHwProfile(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpHwProfileInfo\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "async def on_profile_query(self, args: JsonDict) -> JsonDict:\n\n if not self.hs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks for existence of piece name/type.
def exists_piece(conn, piecetype, piecename): return bool( conn.execute( """SELECT * FROM pieces WHERE piecetype=? AND piecename=?;""", (piecetype, piecename), ).fetchall() )
[ "def CheckType(player, type):\n print \"Checking\", player.name + \"'s hand:\"\n for elem in type:\n print \"Testing\", elem.tag\n if not elem.tag in TYPEELEMS.keys():\n print elem.tag, \"type subelement not recognized.\"\n continue\n elif not TYPEELEMS[elem.tag](pla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an piece entry of type in the conn db. conn takes a sqlite3.connect object.
def create_piece(conn, piecetype, piecename, content, catchall=""): if not exists_piece(conn, piecetype, piecename): conn.execute( """INSERT INTO pieces VALUES (?,?,?,?)""", (piecetype, piecename, content, catchall), ) conn.commit()
[ "def sqlite_tab(sqlite_conn):\n def create_tab(tab_name):\n cursor = sqlite_conn.cursor()\n cursor.execute('''CREATE TABLE aapl_1_day( \n time TEXT, \n open REAL, \n high REAL, \n low REAL, \n close REAL,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure the superuser can access the API
def test_superuser_access(self): self.client.force_authenticate(self.superuser) response = self.client.get("/api/robot/") assert response.status_code == 200
[ "def check_authorization(self):\n pass", "def test_get_as_suporte(self):\n self.client.force_login(self.user_suporte)\n self.response = self.client.get(r('category:sub_create'))\n self.assertEqual(403, self.response.status_code)", "def admin_require_permission():\n if not current_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure an authenticated user can access the API
def test_authenticated_user_access(self): self.client.force_authenticate(self.user) response = self.client.get("/api/robot.json/") assert response.status_code == 200
[ "def check_authorization(self):\n pass", "def authorize(self):\n return True", "def check_auth(self):\n\n if not self.authenticated:\n raise UserNotAuthenticatedError()", "def handle_missing_authorization(self, *args, **kwargs):\n return False", "def test_get_with_auth...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify we can get back csv results
def test_result_csv_format(self): self.client.force_authenticate(self.superuser) response = self.client.get("/api/robot.csv") expected = [ "id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message", f"2,Pass,{self.today},0.1,re...
[ "def test_export_csv_to_file(self):\n pass", "def test_export_csv_in_job(self):\n pass", "def test_remote_csv(self):\n with hxl.data(URL_CSV, InputOptions(timeout=10)) as source:\n self.compare_input(source)", "def test_CSV(self):\n CSVReporter(self.filename).write_repor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify superuser sees all results
def test_testresult_filter__as_superuser(self): self.client.force_authenticate(self.superuser) response = self.client.get("/api/robot.json") data = response.json() assert data["count"] == 3
[ "def test_superuser_can_see_any_profile(self):\n SUPERUSER = 0\n self.client.login(\n username=self.users[SUPERUSER].get('username'),\n password=self.users[SUPERUSER].get('password')\n )\n for user in User.objects.all():\n response = self.client.get('/1.0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load fam file (PLINK sample information file) into a df
def load_sample_info(fam_file, categorical_phenotype): df = pd.read_table(fam_file, header=None, sep=" ") df.columns = ["FID", "IID", "IID_father", "IID_mother", "sex", "phenotype"] # Update 'sex' df["sex"] = df["sex"].astype("category") df["sex"] = df["sex"].cat.rename_categories({1: "male", 2: "fe...
[ "def read_feat(file):\n df = pd.read_csv(file, sep=\" \", names=[\"node_id\"] + list(range(0, 1364)))\n return df", "def load_hep_data(self,variables2plot=[]):\n file = uproot.open(self.hep_data)\n data = file[self.treename]\n self.df = data.pandas.df( self.features+['target']+var...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load bim file (PLINK extended MAP file) into a list of variants
def load_variant_info(bim_file, max_variants): variant_info = pd.read_table(bim_file, header=None, sep="\t") # Note 'position' is in centimorgans, 'coordinate' is what pandas-genomics refers to as 'position' (in base-pairs) variant_info.columns = [ "chromosome", "variant_id", "positi...
[ "def load_all_variants( vcf_file, variants ):\n\t\n\twith open( vcf_file, \"r\" ) as f:\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tif line[0] != '#':\n\t\t\t\tparts = line.strip().split('\\t')\n\t\t\t\ttry:\n\t\t\t\t\tvariants[ parts[0] + '_%_' + parts[1].zfill( 9 ) ]\n\t\t\t\texcept KeyError:\n\t\t\t\t\tvari...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }