query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
test Precision with name = Precision
def test_Precision1(): metric = fluid.metrics.Precision("Precision") # 生成预测值和标签 preds = [[0.1], [0.7], [0.8], [0.9], [0.2], [0.2], [0.3], [0.5], [0.8], [0.6]] labels = [[0], [1], [1], [1], [1], [0], [0], [0], [0], [0]] preds = np.array(preds) labels = np.array(labels) metric.upd...
[ "def linearPrecision():\n pass", "def precision(key):\n return physical_constants[key][2] / physical_constants[key][0]", "def _get_precision(model, X_train, X_test, K=10):\n print(\"Fitting model...\")\n model.fit(X_train.T)\n test_precision = precision_at_k(model, X_train, X_test, K=K)\n prin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test Recall with name=Recall
def test_Recall1(): metric = fluid.metrics.Recall("Recall") # 生成预测值和标签 preds = [[0.1], [0.7], [0.8], [0.9], [0.2], [0.2], [0.3], [0.5], [0.8], [0.6]] labels = [[0], [1], [1], [1], [1], [0], [0], [0], [0], [0]] preds = np.array(preds) labels = np.array(labels) metric.update(preds...
[ "def getRecall(label, confusionMatrix):\n ##### START OF YOUR CODE HERE ######\n return 0\n ##### END OF YOUR CODE HERE ######", "def test_retrain():\n resp = client.retrain(\"5909\")\n assert resp == True", "def recall(true, predicted):\n # Create variables to track true positive and false n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test DetectionMAP overlap_threshold=0.8 ap_version='11point'
def test_DetectionMAP3(): train_program = fluid.Program() startup_program = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(train_program, startup_program): detect_res = fluid.layers.data( name='detect_res', shape=[10, 6], ...
[ "def test_map_05(self):\n annotations = torch.Tensor([[250.8200, 168.2600, 320.9300, 233.1400, 0.0000],\n [435.3500, 294.2300, 448.8100, 302.0400, 2.0000],\n [447.4400, 293.9100, 459.6000, 301.5600, 2.0000],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
switch two nodes, carrying edges along.
def switch_nodes(self,one,two): new = self.clone_node(one) self.del_node(one) self.clone_node(two,one) self.del_node(two) self.clone_node(new,two) self.del_node(new)
[ "def switch_nodes(self):\n\n\t\t# Get current info\n\t\tnode_A=self.node_A\n\t\tport_A=self.port_A\n\t\tapp_id_A=self.app_id_A\n\t\tnode_B=self.node_B\n\t\tport_B=self.port_B\n\t\tapp_id_B=self.app_id_B\n\t\tDF=self.DF\n\n\t\t# Update\n\t\tself.node_A=node_B\n\t\tself.port_A=port_B\n\t\tself.app_id_A=app_id_B\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a subgraph of g which is a maximal tree.
def max_tree(g,rootvertex=0): vertexset = set(g.nodes()) tree = xgraph() while set(tree.nodes())!=vertexset: targets = vertexset - set(tree.nodes()) sources = tree.nodes()[:] done = False while not done: source = sources.pop() for edge in g.dict[source...
[ "def biggest_component(G):\n Gc = max(nx.connected_component_subgraphs(G), key=len)\n print(\"Larget component size: {}\".format(len(Gc.nodes)))\n return Gc", "def to_junction_tree(g: nx.Graph):\n return nx.maximum_spanning_tree(g, weight='weight', algorithm='kruskal')", "def min_maximal_matching(G)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads all file from a folder
def _load_folder(self, folder): for f in os.listdir(folder): self._load_file(os.path.join(folder, f))
[ "def process_folder(self, path):\n filenames = os.listdir(path)\n for filename in filenames:\n self.process_file(os.path.join(path, filename))", "def load_all_blocks(folder_path):\n\n import os\n import numpy as np\n gt_files = np.sort(os.listdir(folder_path))\n\n block_list=[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits the single block data (all zones in same dataset) into a single file per zone
def split_zones(self, folder=None): if len(self.data): if folder is None: folder = "/".join(self.loaded_files[0].split("/")[:-1]) if not os.path.isdir(folder): os.makedirs(folder) grouped = self.data.groupby('Name...
[ "def subdivide(ctx, input, zones_f, outdir, prefix, zone_field, buffer_distance, verbose):\n if verbose:\n warnings.filterwarnings('default')\n else:\n warnings.filterwarnings('ignore')\n\n t0 = time.time()\n command = click.get_current_context().info_name\n\n click.echo((msg.STARTING)....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes SiteSWI list dict and translates them to WAN network names, returns list with unique entries.
def get_unique_wan_networks_from_swi_dict(swi_dict, swi_to_wn_dict, id_wan_network_name_dict): wan_network_name_list = [] # iterate to the SWI, map SWI to name, add to list. for siteid, swi_list in swi_dict.iteritems(): for site_wan_interface in swi_list: wan_network_id = swi_to_wn_dic...
[ "def sw_link_map(net):\n aps = net.aps\n # for ap in aps:\n # print 'net ap: ',ap\n links_obj=net.links\n link_objs_filtered={}\n for obj in links_obj:\n if \"sta\" not in str(obj):\n if \"wifi\" not in str(obj):\n intf1=int(str(obj.intf1).split('-')[0].replace('ap',''...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Two list of sites, return siteswi dict for each with siteswi only matching those lists.
def site_swi_dicts(siteid_list_a, siteid_list_b, site_swi_all_dict): a_dict = {} b_dict = {} for siteid in siteid_list_a: entry = site_swi_all_dict.get(siteid, None) if entry: a_dict[siteid] = entry for siteid in siteid_list_b: entry = site_swi_all_dict.get(siteid,...
[ "def update_calculations(site_a_wan_networks, site_b_wan_networks, site_a_swi_dict, site_b_swi_dict,\n wn_to_swi_dict, wan_network_name_id_dict):\n return_site_a_swi_dict = {}\n return_site_b_swi_dict = {}\n swi_lista = []\n swi_listb = []\n\n # Convert WN names to swi IDs\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to take site swi dicts, current anynets, and calculate stats and new anynets needed.
def calculate_vpn_links(site_a_swi_dict, site_b_swi_dict, all_anynets, swi_to_site_dict, site_id_to_role_dict): calculated_anynets = {} current_anynets = {} counted_sitea = {} counted_siteb = {} counted_swia = {} counted_swib = {} new_anynets = {} statistics = { 'current_anynet...
[ "def update_calculations(site_a_wan_networks, site_b_wan_networks, site_a_swi_dict, site_b_swi_dict,\n wn_to_swi_dict, wan_network_name_id_dict):\n return_site_a_swi_dict = {}\n return_site_b_swi_dict = {}\n swi_lista = []\n swi_listb = []\n\n # Convert WN names to swi IDs\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes an updated WN Name list for List A/List B, returns updated SiteSWI dicts only containing WAN Networks In lists given
def update_calculations(site_a_wan_networks, site_b_wan_networks, site_a_swi_dict, site_b_swi_dict, wn_to_swi_dict, wan_network_name_id_dict): return_site_a_swi_dict = {} return_site_b_swi_dict = {} swi_lista = [] swi_listb = [] # Convert WN names to swi IDs for wn_name_...
[ "def sw_link_map(net):\n aps = net.aps\n # for ap in aps:\n # print 'net ap: ',ap\n links_obj=net.links\n link_objs_filtered={}\n for obj in links_obj:\n if \"sta\" not in str(obj):\n if \"wifi\" not in str(obj):\n intf1=int(str(obj.intf1).split('-')[0].replace('ap',''...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> from pathlib import Path >>> entry, = Processor.parse_entries([Path("doctor who 2005 s03e01 YTS.mkv")]) >>> entry['title'] 'doctor who' >>> entry['year'] 2005 >>> entry['season'] 3 >>> entry['episode'] 1 >>> entry['type'] 'episode'
def parse_entries(entries: List[Path]): return [guessit(entry.name) for entry in entries]
[ "def _ParseFileEntry(self, mediator, file_entry):", "def test_Dataheap_Video_002_parseFilename_02(self):\n files = [ # (path, result: (title, season, episode, subtitle ))\n (u\"A_Title/Season 1/02 Subtitle1.mpg\", (u\"A Title\", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
prodcons receive \"prod\" to create producer prodcons receive \"cons\" to create consumer num is how many you want to create
def getinputs(prodcons, num): allowed_type = -1 for i in range(0, num): allowed_types = set() """ while len(allowed_types) != 5: #allowed_type = random.randint(0, 5) allowed_type = \ int(input('Digite o allowed_type (1 a 5) para dicionar ao %s %...
[ "def produce_consume():\n logger = logging.getLogger(__name__)\n\n even_consumer = actors.Printer.start(\"Even Printer\")\n odd_consumer = actors.Printer.start(\"Odd Printer\")\n producer = NumberGenerator.start(\"RNG\")\n producer.proxy().register(even_consumer, 'even number')\n producer.proxy()....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for the home page renders 'blog_index.html' which diplays the blog posts in the database filtered by date
def blog_index(request): posts = Post.objects.all().order_by('-created_on') context = { "posts": posts, } return render(request, "blog/blog_index.html", context)
[ "def blog_index(request):\n posts = Post.objects.all().order_by('-created_on')\n context = {\n \"posts\": posts,\n }\n return render(request, \"blog_index.html\", context)", "def get(self):\r\n blogposts = BlogPosts.query().order(-BlogPosts.posted_on)\r\n self.render(\"blog.html\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for each of the posts associated with the category passed in renders 'blog_category.html'
def blog_category(request, category): posts = Post.objects.filter( categories__name__contains=category ).order_by( '-created_on' ) context = { "category": category, "posts": posts } return render(request, "blog/blog_category.html", context)
[ "def blog_posts_by_category(request, category_id):\n\n category = get_object_or_404(Category, pk=category_id)\n return blog_generic_view(request, list_detail.object_list,\n queryset=category.post_set.all())", "def posts_by_category(request, category_slug):\n list_of_posts = Pos...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for each of the posts accessed by their primary keys renders 'blog_detail.html'
def blog_detail(request, pk): post = Post.objects.get(pk=pk) context = { "post": post, } return render(request, "blog/blog_detail.html", context)
[ "def post_detail(request, blog_slug):\n blog_post = Post.objects.get(blog_slug=blog_slug)\n\n context = {\n 'blog_post':blog_post\n }\n\n return render(request=request, context=context, template_name=\"pages/blog_detail.html\")", "def get(self):\r\n blogposts = BlogPosts.query().order(-B...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalizes a (partial) credit card number to a common format. '1234' > 'xxxxxxxxxxxx1234' '4277 19xx xxxx 1234' > '427719xxxxxx1234' '42771234' > '4277xxxxxxxx1234' '4277x' > '4277xxxxxxxxxxxx'
def normalize(value): # Step 1: remove all whitespace value = "".join(value.split()) # Step 2: split by non-numbers parts = list( re.split(r'[^0-9]+', value) ) # Step 3: determine the number of padding chars to insert padding = max(0, 16 - len("".join(parts))) # Step 4: Assemble list of parts new_par...
[ "def normalize_phone(phone):\n raw_digits = re.sub('[^0-9]', '', phone)\n return \"%s-%s-%s\" % (raw_digits[0:3], raw_digits[3:6], raw_digits[6:10])", "def short_account_number(account_number: str) -> str:\n return f\"{account_number[:2]}...{account_number[-4:]}\"", "def normalize_postal_code(postal_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Embed a list of n documents/words into an ndimensional matrix of embeddings
def embed_documents(self, documents: List[str], verbose: bool = False) -> np.ndarray: truncated_docs = [self._truncate_document(doc) for doc in documents] embeddings = self.embedding_model.encode(truncated_docs, show_progress_bar=verbose) return embeddings
[ "def embed_words(self, words: List[str], verbose: bool = False) -> np.ndarray:\n embeddings = self.embedding_model.encode(words, show_progress_bar=verbose) \n return embeddings", "def generate_embeddings(self):\n docs = self.DB_manger.get_all()\n if docs is None:\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Embed a list of n words into an ndimensional matrix of embeddings
def embed_words(self, words: List[str], verbose: bool = False) -> np.ndarray: embeddings = self.embedding_model.encode(words, show_progress_bar=verbose) return embeddings
[ "def make_embedding_matrix(texts, size):\n token_texts = list(map(tokenize, texts))\n model = gensim.models.word2vec.Word2Vec(sentences=token_texts, size=size, min_count=1)\n return model", "def words_embedding(words: list, glove):\n\n word_embeddings = map(partial(get_word_vec, glove=glove), words)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate an activation key which should allow the corresponding user to register if valid. Returns whether or not the key is valid.
def register_invite(self, activation_key): if SHA1_RE.search(activation_key): try: profile = self.get(activation_key=activation_key) except self.model.DoesNotExist: return False if not profile.activation_key_expired(): return T...
[ "def test_activation_invalid(self):\n # Wrong key\n self.failIf(SignupManager.activate_user('wrong_key'))\n\n # At least the right length\n invalid_key = 10 * 'a1b2'\n self.failIf(SignupManager.activate_user(invalid_key))", "def test_activation_valid(self):\n user = Signu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and returns a new BetaInviteProfile with necessary activation key logic.
def create_invite_profile(self, email): salt = sha.new(str(random.random())).hexdigest()[:5] activation_key = sha.new(salt+email).hexdigest() profile = self.create(email=email, activation_key=activation_key) # Send the thank you email profile.thank_...
[ "def create_profile(self, user):\n salt = hashlib.sha1(six.text_type(random.random()).encode('ascii')).hexdigest()[:5]\n salt = salt.encode('ascii')\n email = user.email\n if isinstance(email, six.text_type):\n username = email.encode('utf-8')\n activation_key = hashlib...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Emails all persons who have signed up for the beta at least time_days days ago. Useful for bringing in users based on how early they signed up.
def invite_specific_age(self, time_days): pass
[ "def email_users_sched():\n with app.app_context():\n # get the dates of the beginning and end of the week\n monday_date = date.today() - timedelta(days=3)\n friday_date = date.today() + timedelta(days=1)\n # get a subquery of the userids for user's that have signed up for a test this...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invites all the remaining people who have not yet been emailed. Useful for closing out the beta and allowing everyone to join.
def invite_all(self): pass
[ "async def _inv_list(self, ctx):\n invites = await self.bot.invites_from(ctx.message.server)\n if len(invites) == 0:\n await self.bot.say(\":warning: There currently no invites active.\")\n else:\n await self.bot.say(\"Invites: {0}\".format(\", \".join(map(str, invites))))", "def application_fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An activation key is expired only after it is used by the user, and will be reset to the string "ALREADY_ACTIVATED". Reactivating is not permitted, and so this method returns true in this case.
def activation_key_expired(self): return self.activation_key == self.ACTIVATED
[ "def key_expired(self, user):\n activated = RegistrationProfile.objects.get(user=user)\n date_joined = user.date_joined\n expiration_date = datetime.timedelta(days=self.days)\n if date_joined + expiration_date <= datetime_now():\n activated.activation_key = self.ACTIVATED\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders the stored activation key void by reseting it to the string "ALREADY_ACTIVATED".
def destroy_key(self): self.activation_key = self.ACTIVATED self.save() return True
[ "def activate(request, activation_key, template_name='registration/activate.html'):\n\tactivation_key = activation_key.lower() # Normalize before trying anything with it.\n\taccount = RegistrationProfile.objects.activate_user(activation_key)\n\n\tif (account):\n\t\tlogger.info(\"%s - account-activate: user %s\" %(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show startsplashscreen for the given time. The regular UI is shown afterwards.
def show_start_screen(self, time_ms: int) -> None: image_widget = ImageWidget("images/splash_screen.png") v_layout = QVBoxLayout() h_layout = QHBoxLayout() h_layout.addWidget(image_widget) v_layout.addLayout(h_layout) widget = QWidget() widget.setLayout(v_layout) ...
[ "def splash_page(self) -> None:\r\n self.get(self.base_url + '/splash.html')", "def _show_start_screen(self):\n bg = self.model.get_level_start_image()\n self._display_game_image(bg)", "def showStartScreen(self):\n self.current_screen = self.start_screen\n self.game_over ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle menu action for bell sound selection
def on_bell_select(self): print("on_bell_select was triggered")
[ "def menu_selected(self):\n if not self.muted:\n self.menu_selected_sound.play()", "def on_menuitem_select (self, id):\n\t\t\n\t\tpass", "def display_change_wav_menu():\r\n print(\"### Change Wave Menu ###\\n\"\r\n \"Welcome to change wave file menu! \\n\"\r\n \"1. reverse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle menu action for volume settings
def on_volume_setting(self): print("on_volume_setting was triggered")
[ "def volume_control(self):\n\n volume = self.volume_prompt.text()\n if self.PushBtn.isChecked():\n direction = \"D\"\n elif self.PullBtn.isChecked():\n direction = \"P\"\n else:\n raise Exception(\"Somethings wrong in the volume_control function\")\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all positional distances in a ngram sequence
def get_ngram_distances(ngram): res = [] for item in set(ngram): # Get the position index for all occurences of a ngram pos_idx = [i for i,_ngram in enumerate(ngram) if _ngram==item] if len(pos_idx) > 1: res = res + get_distances(pos_idx) return res
[ "def _compute_distances(self, spacy_en_dir=\"en\"):\n nlp = spacy.load(spacy_en_dir)\n df = self._base.get_all_text()\n print(\"tokenizing\")\n tqdm.pandas()\n df[\"noun_tokens\"] = df.sentence.progress_apply(lambda text: ReviewApp._graph_tokenize(text, nlp))\n print(\"buil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run inference on RGBD data to visualize and save point cloud.
def main(): parser = argparse.ArgumentParser(description="Get points clouds and save them.") parser.add_argument("save_dir", help="Save destination dir.") args = parser.parse_args() camera_data = CameraData() run_inference = RunInference() camera_visualizer = CameraVisualizer() camera_data...
[ "def run_inference(self, path):\n self.vgg_model.eval()\n if use_gpu:\n self.vgg_model = self.vgg_model.cuda()\n img = Image.open(path).convert('RGB').copy()\n # img = img.resize((900, 1200))\n img = np.asarray(img)\n shape = img.shape\n img = img[:, :, ::...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the template to the given context data. Overwrites the generate method to add support for subreports. Delegates on the standard generate method for everything else.
def generate(self, *args, **kwargs): def _subreport(field=None, filename=None, source=None, filepath=None, source_format=None, encoding=None, context=None): """ Method that can be referenced from the template to include subreports. When called it will process the file as a te...
[ "def _generate_context_data(self, context):\n raise NotImplementedError", "def render_template(self, template_name, output_name, context):\n raise NotImplementedError()", "def oo_render(self, context=None):\n if context is None: context = {}\n self.log(\"Generating report: step 1...\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that can be referenced from the template to include subreports. When called it will process the file as a template, write the generated data to a temp file, and return a reference (filename) to this output file for later usage. The OOTemplate will will use this data, after the main template is generated, to do a...
def _subreport(field=None, filename=None, source=None, filepath=None, source_format=None, encoding=None, context=None): # Field is a binary field with a base64 encoded file that we will # use as source if it is specified source = field and base64.decodestring(field) or source ...
[ "def generate(self, *args, **kwargs):\n def _subreport(field=None, filename=None, source=None, filepath=None, source_format=None, encoding=None, context=None):\n \"\"\"\n Method that can be referenced from the template to include subreports.\n When called it will process the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper, around the render method of the original template, that adds support for subreports and format conversion. The wrapper is required as these operations need to be performed, using OpenOffice, after the document generation by the template.
def oo_render(self, context=None): if context is None: context = {} self.log("Generating report: step 1...") # Generate the stream from the template (Relatorio) data = self.generate(**context).render().getvalue() self.log("...step 1 done.") # # Next steps need ...
[ "def generate(self, *args, **kwargs):\n def _subreport(field=None, filename=None, source=None, filepath=None, source_format=None, encoding=None, context=None):\n \"\"\"\n Method that can be referenced from the template to include subreports.\n When called it will process the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the default values and try to connect to OpenOffice (or even start it).
def __init__(self, openoffice_port=8100, autostart_openoffice=True, logger=None): import uno self.logger = logger self.port = openoffice_port self.autostart = autostart_openoffice # # Try to connect with retries (to start OpenOffice if autostart_openoffice is enabled) ...
[ "def testStartTwoOpenOfficeWithTheSameAddress(self):\n second_openoffice = OpenOffice()\n second_openoffice.loadSettings(\"localhost\", 4090,\n self.working_path,\n self.office_binary_path,\n self.uno_path,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Opens a document with OpenOffice
def open_document(self, file_name): import uno file_url = uno.systemPathToFileUrl(abspath(file_name)) if os.environ.get('OSTYPE', False) == 'FreeBSD': # Workaround a problemas con OpenOffice 3.1 en FreeBSD file_url = file_url.encode('UTF-8') load_properties = { ...
[ "def open_document(self, fileURL):\n self.interface.factory.not_implemented(\"DocumentApp.open_document()\")", "def openOffice(self, rtf_filename):\n try:\n # Connection with Word\n word_app = win32com.client.Dispatch('Word.Application')\n # Hide\n word_ap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves a OpenOffice document to a file. The file format will be detected (based on the file extension) and the document will be converted to that format (see EXPORT_FILTER_MAPS).
def save_document(self, document, file_name, close_document=True): import uno file_url = uno.systemPathToFileUrl(abspath(file_name)) if os.environ.get('OSTYPE', False) == 'FreeBSD': # Workaround a problemas con OpenOffice 3.1 en FreeBSD file_url = file_url.encode...
[ "def export_as():\n\tglobal export_path\n\tfiles = [(\"Text files\",\"*.docx\"),\n\t\t\t (\"PDF files\",\"*.pdf\"),\n\t\t\t (\"all files\",\"*.*\")] \n\ttry:\n\t\texport_path = asksaveasfile(filetypes = files, defaultextension = files).name \n\texcept:\n\t\treturn\n\t\n\tget_file(export_path)", "def DocSave(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts the given file into the current document. The file contents will replace the placeholder text.
def replace_text_with_file_contents(self, document, placeholder_text, file_name): import uno file_url = uno.systemPathToFileUrl(abspath(file_name)) search = document.createSearchDescriptor() search.SearchString = placeholder_text found = document.findFirst( search ) whi...
[ "def _InsertText(self, key, text):\n with self._TempDirContext() as tempdir:\n file_path = os.path.join(tempdir, 'tempfile')\n osutils.WriteFile(file_path, text)\n self._Insert(key, file_path)", "def insert_file(self, pic_path, file_name):\n with open(pic_path, 'rb') as image:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper to create a tuple of PropertyValue items from a dictionary.
def make_properties(self, properties_dict): import uno props = [] for key in properties_dict: prop = uno.createUnoStruct("com.sun.star.beans.PropertyValue") prop.Name = key prop.Value = properties_dict[key] props.append(prop) return tuple(p...
[ "def get_hashable_value_tuple_from_dict(d):\n return tuple(map(\n lambda k: tuple(d[k]) if isinstance(d[k], list) else d[k],\n sorted(d.keys())))", "def _dict_to_tuples(dictionary: dict) -> tuple:\n\n return tuple(sorted(dictionary.items()))", "def _from_dict_to_pairs(d):\n result = [...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract a TensorMetadata NamedTuple describing `result`.
def _extract_tensor_metadata(result: torch.Tensor) -> TensorMetadata: shape = result.shape dtype = result.dtype requires_grad = result.requires_grad stride = result.stride() memory_formats = { torch.contiguous_format, torch.channels_last, torch.channels_last_3d, } m...
[ "def get_tensor_info(self, uid: str):\n value, order = self.tensors[uid]\n return (value.shape, order)", "def __extract_fn(self, tfrecord):\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'size': tf....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wraps handle_response and initializes a NASAResponse object.
def handle_search_response(response): response = handle_response(response) return NASAResponse(response)
[ "def _handle_response(self,\n response_type: str,\n response: capnp._DynamicStructBuilder):\n self._RESPONSE_HANDLER.handle_response(response_type, response)", "def process_response(self, request, response):\n return self.__process_awesome_response(req...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the functional S for a given velocity U by combining metric and penalty terms
def calc_S(self, U): if not self.populated: self.populate_arrays(U) return self.metric() + self.penalty()
[ "def get_Sv(J, N1, N2, Ut):\n ##== Calculate Su ==##\n ## Express Ut in terms of Z_{t} and Z_{t+1}\n MKt, MHt, Kt, Ht, X1t, X2t, X2tL1 = symbols('MKt MHt Kt Ht X1t X2t X2tL1')\n MKt1, MHt1, Kt1, Ht1, X1t1, X2t1 = symbols('MKt1 MHt1 Kt1 Ht1 X1t1 X2t1')\n \n T,_ = linear_eq_to_matrix([Ut],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The penalty term, or matching functional, of S.
def penalty(self): diff = self.Q[-1] - self.qB return 1/(2*self.sigma_sq)*assemble(inner(diff,diff)*dx)
[ "def penalty(x):\n\treturn \tx**4 - 4*x**2 - 2*x + 10", "def compute_penalty(self, batch, x):\n if self.decoder_l2 == 0.0:\n return 0\n\n if self.decoder_l2_type == 'schlichtkrull-l2':\n return self.scoring_function.s_penalty(batch, x)\n else:\n return self.sc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solve q hat at each timestep
def calc_Qh(self): qh = self.qh_at_t1() # Find q hat at each time step by stepping backwards in time from qh1 p = TestFunction(self.V) qh_prev = TrialFunction(self.V) a = inner(p, qh_prev)*dx A = assemble(a) qh_prev = Function(self.V) # unknown at next...
[ "def psolver(ham,q=0.,T=arange(0,2,.02),dt0=.01,n=5,aa=1,init=0,talk='some',plt=False):\n\tN=2*n+1\t\t\t\t\t\t\t\t\t# Size of matrices\n\tc0 = zeros((len(T),N),dtype=complex)\t# Matrix of coefficients\n\t\n\tk = ham['k']; p_g = ham['p_g']; A = ham['A']; y = ham['y']; w = ham['w'];\n\t\n\tif init is None:\n\t\tc0[0,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New figure with precalcuated axis bounds and aspect 1
def new_figure(self): f = plt.figure() f.subplots_adjust(bottom=0.1,top=0.97,left=0.06,right=0.98) plt.axis(self.axis_bounds) ax = plt.gca() ax.set_aspect(1) plt.draw()
[ "def newfig(scale=1.0, ratio=0):\n\n #width in x*\\textwidth scale (0,1]\n fig = plt.figure(figsize=figsize(scale, ratio))\n ax = fig.add_subplot(111)\n return fig, ax", "def gearth_fig(self):\n\t\taspect = np.cos(np.mean([self.llcrnrlat, self.urcrnrlat]) * np.pi/180.0)\n\t\txsize = np.ptp([self.urcrn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot a single curve q, or anything else, because this splits the curve into x and y, then does plot(x,y) it's probably only useful for curves
def plot(self, Q): self.new_figure() plt.plot(*self.split_array(Q))
[ "def _plot_curve(x, y, title, x_lab, y_lab, save_path=False, show=False):\n plt.title(title)\n plt.plot(x, y, 'k')\n plt.plot([(0, 0), (1, 1)], 'r--')\n plt.xlim([-0.1, 1.1])\n plt.ylim([-0.1, 1.1])\n plt.ylabel(x_lab)\n plt.xlabel(y_lab)\n if save_path is not...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split a numpy array, or UFL coefficient form into X, Y numpy vectors.
def split_array(self,q): if isinstance(q, np.ndarray): x = 1.0*q else: x = 1.0*q.vector().array() X = x[0:np.size(x)/2] Y = x[np.size(x)/2: np.size(x)] return X,Y
[ "def unpack(self):\n all_data = [vector.data for vector in self.X]\n arrays = zip(*all_data)\n\n return arrays", "def split_array(self, which):\n assert which in \"xrb\"\n # self.dummy.toarray(\"x\")\n # self.coarse.toarray(which)\n # x = self.dummy.x\n # xc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate S for a velocity U and Immersion class params. This is called by the BFGS optimiser
def S(U, *args): kwargs = args[0] #hack to get kwargs back out.. im = Immersion(**kwargs) return im.calc_S(U)
[ "def calc_S(self, U):\n if not self.populated:\n self.populate_arrays(U) \n\n return self.metric() + self.penalty()", "def sgd(self):\n import math\n for i, j, r in self.samples:\n # Computer prediction and error\n prediction = self.get_rating(i, j)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate dSdu for a velocity U and Immersion class params. This is called by the BFGS optimiser
def dS(U, *args): kwargs = args[0] #hack to get kwargs back out.. im = Immersion(**kwargs) return im.calc_dS(U)
[ "def _get_d_u_d_params(self, parameters):\n # Setup\n gradient = {param: 0 for param in parameters}\n as_dict = self.model.parameters.as_dict()\n\n # Get source location\n trajectory = self.model.get_trajectory(self.dataset.time)\n u_ = np.sqrt(trajectory.x**2 + trajectory....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a piece in the board, and return the row and column indexes.
def where_is(piece, state): for row_index, row in enumerate(state): for col_index, current_piece in enumerate(row): if current_piece == piece: return row_index, col_index
[ "def get_pos(self, piece):\r\n if piece == \"K\":\r\n return (WhiteKing.row, WhiteKing.col)\r\n for i in range(8):\r\n if piece == \"P\" + str(i):\r\n return (WhitePawn.row[i], WhitePawn.col[i])", "def _look_for(board_state, needle='X'):\n\n # Horizontals\n for i in range(3):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dict of riders, keyed by Bib number
def fetch_riders(race_id): uri = RIDERS_URI.format(race_id=race_id) r = requests.get(uri) data = r.json() riders = {} for item in data: for key in item.keys(): if key in unwanted_keys: del item[key] if key in ["FirstName", "LastName"]: ...
[ "def bibliographies(self) -> Dict[\"BibliographyKey\", \"BibliographyValue\"]:\n return self.data[\"bibliographies\"]", "def get_recids_from_bibkey_file(path: str, db) -> set():\n bibkey_regex = re.compile(r\"[a-zA-Z]{1,20}:[0-9]{4}[a-z]{0,10}\")\n with open(path, \"r\") as bibfile:\n bibkeys ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates several tabular CSV exports of riders and classements
def export_rider_csv(race_id, riders, num_stages): export_formats = ( ("general", "position"), ("general", "time_gap"), ("sprint", "position"), ("sprint", "points"), ("mountain", "position"), ("mountain", "points"), ("youth", "position"), ) for (class...
[ "def generate_csv():\n\tdata_frame = get_all_occupancy_data(False)\n\tdata_frame = resample_timestamp(data_frame)\n\tprint('Resample time stamp DONE')\n\tdata_frame = clean_data(data_frame)\n\tprint('Clean data DONE')\n\tdata_frame = add_public_holidays(data_frame)\n\tprint('Add holidays DONE')\n\tdata_frame = add_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the task parameters used for the HIT .
def get_task_params(self, datum): raise NotImplementedError() # return { # "Title": "Short title", # "Description": "Longer description, # "FrameHeight": "1200", # "AssignmentDurationInSeconds": "300", # "LifetimeInSeconds": "86400", # ...
[ "def taskParameters(self, task):\n return (p for n, p in self.named_parameters() if f'taskSpecificLayer.{task}' in n)", "def get_parameters_for_task(model, task_id):\n parameters = get_default_parameters()\n parameters['model'] = model\n parameters['task_id'] = task_id\n\n if task_id == 4:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Code to display HIT.
def view_task(self, request, hit): # NOTE: You should have your own implementation here. return render(request, 'task.html', {'input': hit})
[ "def get_hit(HITId=None):\n pass", "def display(self):\n\n print(self.grid[0], '\\n', self.grid[1], '\\n', self.grid[2], '\\n',\n self.grid[3], '\\n', f'Your score is {self.score}')\n # print(self.grid[1])\n # print(self.grid[2])\n # print(self.grid[3])\n # print...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert Options members to bits and pack into single byte.
def options_to_byte(*options): byte = 0 for op in options[0]: byte |= op.value return pack('>B', byte)
[ "def send_options(self):\n\t\t# maybe a bit overkill, as we're only sending 1 byte\n\t\twith BytesIO(self.options_to_byte(self.options)) as s:\n\t\t\tself.send_data(s, 1)", "def byte_to_options(byte):\n\t\t_byte = unpack('>B', byte)[0]\n\t\toptions = []\n\t\tfor i in range(8):\n\t\t\tif _byte & (1 << i):\n\t\t\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert single byte to list of Options members.
def byte_to_options(byte): _byte = unpack('>B', byte)[0] options = [] for i in range(8): if _byte & (1 << i): options.append(Options(1<<i)) return options
[ "def parse_options(data: bytearray) -> Generator[BaseOption, None, None]:\n while data:\n kind = data[0]\n opt = _PARSE_KIND_TBL.get(kind, SizedOption).from_bytes(data)\n yield opt\n\n if opt is end_of_options:\n return", "def options_from(self):\n return [(x.name,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send_data(stream, length) > hash of sent data as packed bytes Send length amount of data in chunks of Transport.BUFFERSIZE from binary stream. Raises RuntimeError if connection is broken.
def send_data(self, stream, length): sha = hashlib.sha256() totalsent = 0 while totalsent < length: chunk = stream.read(self.BUFFERSIZE) sha.update(chunk) chunksent = 0 while chunksent < len(chunk): sent = self.sock.send(chunk) if not sent: raise RuntimeError('connection broken') chu...
[ "def send_data_block(s, data):\n message = b''.join([struct.pack(\"!I\", len(data)), data])\n #log.debug(\"sending data block length {} ({})\".format(len(message), message[:64]))\n s.sendall(message)", "def recv_data(self, stream, size):\n\t\tsha = hashlib.sha256()\n\t\tbytesread = 0\n\t\twhile bytesread...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
recv_data(stream, size) > hash of received data as packed bytes Receive size amount of data in chunks of Transport.BUFFERSIZE from binary stream. Raises RuntimeError if connection is broken.
def recv_data(self, stream, size): sha = hashlib.sha256() bytesread = 0 while bytesread < size: chunk = self.sock.recv(self.BUFFERSIZE) if not chunk: raise RuntimeError('connection broken') stream.write(chunk) sha.update(chunk) bytesread += len(chunk) return sha.digest()
[ "def send_data(self, stream, length):\n\t\tsha = hashlib.sha256()\n\t\ttotalsent = 0\n\t\twhile totalsent < length:\n\t\t\tchunk = stream.read(self.BUFFERSIZE)\n\t\t\tsha.update(chunk)\n\n\t\t\tchunksent = 0\n\t\t\twhile chunksent < len(chunk):\n\t\t\t\tsent = self.sock.send(chunk)\n\t\t\t\tif not sent:\n\t\t\t\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a list of Options members packed into a single byte.
def send_options(self): # maybe a bit overkill, as we're only sending 1 byte with BytesIO(self.options_to_byte(self.options)) as s: self.send_data(s, 1)
[ "def options_to_byte(*options):\n\t\tbyte = 0\n\t\tfor op in options[0]:\n\t\t\tbyte |= op.value\n\t\treturn pack('>B', byte)", "def options(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Option]:", "def byte_to_options(byte):\n\t\t_byte = unpack('>B', byte)[0]\n\t\toption...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send size of file with filename packed into bigendian 8 bytes.
def send_file_size(self, filename): length = os.path.getsize(filename) length_bytes = pack('>Q', length) with BytesIO(length_bytes) as f: self.send_data(f, 8)
[ "def recv_file_size(self):\n\t\ttry:\n\t\t\twith BytesIO() as s:\n\t\t\t\tself.recv_data(s, 8)\n\t\t\t\ts.seek(0)\n\t\t\t\treturn unpack('>Q', s.read())[0]\n\t\texcept RuntimeError:\n\t\t\treturn 0", "def read_size(file: typing.IO[bytes]) -> int:\n return struct.unpack_from(\"<L\", file.read(4))[0]", "def wr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send filename padded to 255 bytes.
def send_filename(self, fn): if len(fn) > 255: raise FilenameTooLongError('%s contains more than 255 characters' % fn) with BytesIO(bytes(fn.ljust(255, '\x00'), 'utf-8')) as f: self.send_data(f, 255)
[ "def create_file_name():\n # This generates a name that is between 3 to 63 chars long\n return str(uuid.uuid4())", "def write_name(file: typing.IO[bytes], name: str):\n while len(name) < 4:\n name += \" \"\n\n if len(name) > 4:\n raise ValueError(f\"'{name}' must be exactly 4 characters ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
call listen() on the underlying server socket. This also makes sure the socket is bound before calling listen().
def listen(self): try: self.s_sock.getsockname() # check if socket already bound except OSError: self.s_sock.bind(('', 0)) self.s_sock.listen(3)
[ "def _listen(self):\n try:\n self.serversocket.listen()\n # print server listening message\n print(\"Server listening at \" + self.ip_address + \"/\" + str(self.port))\n except:\n # handle exception\n print(\"Failed at server binding or listening:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
recv_options() > True/False Receives a list of Options members as a packed byte, unpacks them to a list and assigns to self.options.
def recv_options(self): try: self.options = self.byte_to_options(self.sock.recv(1)) except struerror: return False return True
[ "def options(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Option]:", "def _init_options(self):\n #: a dictionary of telnet option ``opt`` bytes that follow an\n # *IAC DO* or *DONT* command, and contains a value of ``True``\n # until an *IAC WILL* or *...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Receive size of file packed into bigendian 8 bytes and return it as integer. Returns 0 on unsuccessful unpack.
def recv_file_size(self): try: with BytesIO() as s: self.recv_data(s, 8) s.seek(0) return unpack('>Q', s.read())[0] except RuntimeError: return 0
[ "def read_size(file: typing.IO[bytes]) -> int:\n return struct.unpack_from(\"<L\", file.read(4))[0]", "def next_bytes_as_int(file: BinaryIO, n_bytes=1) -> int:\n if n_bytes == 1:\n return struct.unpack(\"B\", file.read(n_bytes))[0]\n if n_bytes == 4:\n return struct.unpack(\"<i\", file.read...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add random mass values to a as an extra column
def add_random_mass_column(a): n_evt = len(a) m_min = min(resonant_signal_masses) m_max = max(resonant_signal_masses) masses = m_min + (m_max - m_min) * np.random.rand(n_evt, 1) a = np.hstack([a, masses]) return a
[ "def gen_random(self, field_name, random):\r\n ...", "def randomfill_mass(self, mass):\n for i in range(mass):\n flat_idx = np.random.randint(0, self.size-1)\n self.latt.flat[flat_idx] += 1\n\n return", "def mutation(self):\n \"\"\" Мутация бота - немножко меняю...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the Goal object to a dictionary and adds the tasks if there are some
def to_dict(self): result = { "id": self.goal_id, "title": self.title } if len(self.tasks) > 0: result["tasks"] = [task.to_dict() for task in self.tasks] return result
[ "def get_required_tasks(self) -> Dict[str, Tuple[type]]:\n pass", "def calculate_task_metadata(cls, settings, trials):\n\n tasks_used = [trial['task_name'] for trial in trials if trial['task_name'] != 'none']\n occur = Counter(tasks_used)\n task_dict = dict(occur) # e.g. {'task_1': 6} ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve all results for a mnemonic in the requested time range.
def get_records( self, mnemonic, starttime, endtime, result_format=None, time_format=None ): if result_format is None: result_format = self.default_format if not isinstance(starttime, Time): starttime = ...
[ "def _fetch_by_query(self, query: str) -> Dict[str, Any]:\n resp = requests.get(\n self.url + \"/api/v1/query_range\",\n {\"query\": query, \"start\": self.start, \"end\": self.end, \"step\": 5},\n )\n resp.raise_for_status()\n return resp.json()", "def _listResul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main Smart Mirror Template.
def smartmirror(): top_banner_temp = source_template("top_banner", app.config) right_top_panel_temp = source_template("right_top_panel", app.config) right_bottom_panel_temp = source_template("right_bottom_panel", app.config) left_panel_temp = source_template("left_panel", app.config) bottom_banner_t...
[ "def setup_smartmirror():\n version = app.config.get(\"SM_VERSION\")\n plugin_lib = app.config.get(\"PLUGIN_LIB\")\n top_banner_plugins = plugin_lib.get(\"top_banner\")\n left_panel_plugins = plugin_lib.get(\"left_panel\")\n right_top_plugins = plugin_lib.get(\"right_top_panel\")\n right_bottom_pl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
configure your smartmirror from the frontend
def setup_smartmirror(): version = app.config.get("SM_VERSION") plugin_lib = app.config.get("PLUGIN_LIB") top_banner_plugins = plugin_lib.get("top_banner") left_panel_plugins = plugin_lib.get("left_panel") right_top_plugins = plugin_lib.get("right_top_panel") right_bottom_plugins = plugin_lib.ge...
[ "def smartmirror():\n top_banner_temp = source_template(\"top_banner\", app.config)\n right_top_panel_temp = source_template(\"right_top_panel\", app.config)\n right_bottom_panel_temp = source_template(\"right_bottom_panel\", app.config)\n left_panel_temp = source_template(\"left_panel\", app.config)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Route to upgrade the pi to the latest version. WARNING Only run this on the pi this could overwrite you local repo!!
def upgrade_pi(): version = app.config.get("SM_VERSION") if request.method == "POST": if app.config.get("environment") == "testing": return jsonify({ "status": "Upgrades are not permitted in testing environments!" }) update = q.enqueue(upgrade_pi_process) ...
[ "def update_app():\n pull_project()\n restart_app()", "def update(self) -> None:\n import git\n git_dir = git.cmd.Git(get_path_to_pyflow())\n msg = git_dir.pull()\n print(msg)", "def upgrade_cmd(ctx, to_version):\n logger = logging.getLogger('populus.cli.upgrade')\n proje...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Route to reboot the pi. WARNING Only run this on the pi this will reboot the server
def reboot_pi(): version = app.config.get("SM_VERSION") if request.method == "POST": if app.config.get("environment") == "testing": return jsonify({ "status": "Restarting is not permitted in testing environments!" }) reboot = q.enqueue(restart_pi_process) ...
[ "def reboot(self):\n self.send_command(api.reboot)", "def restart_route():\n # using run instead of sudo because sudo prompts for a password\n run('sudo /etc/init.d/mwana-route restart')\n # print out the top of the log file in case there are errors\n import time\n time.sleep(2)\n run('he...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Endpoint for the Top Banner.
def top_banner_endpoint(): tb_config = app.config.get("top_banner").keys()[0] if tb_config == "greetings": data = top_banner.GreetingPlugin(app.logger) return jsonify(data.greetings()) elif tb_config == "quotes": data = top_banner.QuotePlugin(app.logger) return jsonify(data....
[ "def top(self, category=0):\n return Top(self.base_url, category)", "def bottom_banner_endpoint():\n bb_config = app.config.get(\"bottom_banner\").keys()[0]\n if bb_config == \"us_holidays\":\n year = datetime.now().year\n data = bottom_banner.UsHolidays(year, app.logger)\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Route for the bottom banner.
def bottom_banner_endpoint(): bb_config = app.config.get("bottom_banner").keys()[0] if bb_config == "us_holidays": year = datetime.now().year data = bottom_banner.UsHolidays(year, app.logger) return jsonify(data.us_holidays()) elif bb_config == "chuck_norris": data = bottom_b...
[ "def top_banner_endpoint():\n tb_config = app.config.get(\"top_banner\").keys()[0]\n\n if tb_config == \"greetings\":\n data = top_banner.GreetingPlugin(app.logger)\n return jsonify(data.greetings())\n elif tb_config == \"quotes\":\n data = top_banner.QuotePlugin(app.logger)\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Endpoint for the reminders form.
def reminders_ui_endpoint(): # reminders_form = RemindersForm() form_validation = [ 'start_date', 'start_time', 'end_date', 'end_time', 'comment' ] if request.method == 'POST': status = 1 res = { "status": "", "data": {} ...
[ "async def reminders(self, ctx):\n user = ctx.message.author.id\n res = self.get_reminders(user)\n if not res:\n await ctx.send('No reminders saved for you!')\n return\n\n t = PrettyTable()\n t.left_padding_width = 1\n t.right_padding_width = 1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete reminder from the database with its id.
def delete_reminders_endpoint(_id): try: reminder = ReminderModel.find_by_id(_id) reminder.delete_from_db() return jsonify({"status": "success"}) except Exception as e: app.logger.error(e) return jsonify({"status": "error"})
[ "def delete(reminder_id: int, app: Flask, db: SQLAlchemy) -> int:\n reminder: ReminderModel = ReminderModelService.retrieve_by_reminder_id(\n reminder_id, app\n )\n if reminder:\n\n RemindersTimeSlotModelService.delete_all_by_reminder_id(\n reminder_id, app,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get Redis cache data helper function.
def get_redis_cache(redis_conn, redis_key): cached_data = redis_conn.get(redis_key) if cached_data: data = json.loads(cached_data) else: data = False return data
[ "def _get_data_from_cache(self):\n logging.debug('Loading data from cache file: %s', self._cache_file)\n try:\n data = pickle.load(open(self._cache_file, \"rb\"))\n return data\n except EOFError:\n return {}", "def _load_from_cache(self):\n return cache...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to cache data in redis
def cache_data(data, redis_conn, redis_key, cache_timer): str_data = json.dumps(data) redis_conn.set(redis_key, str_data) redis_conn.expire(redis_key, cache_timer)
[ "def dynCache():\n pass", "def get_redis_cache(redis_conn, redis_key):\n cached_data = redis_conn.get(redis_key)\n if cached_data:\n data = json.loads(cached_data)\n else:\n data = False\n\n return data", "def cache(tag = \"*\", design = \"*\", store = \"*\"):\r\n\r\n job = {\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines and returns all the variables that need to be consistent between a question and an answer. Usually only names and variable/symbol names.
def init_consistent_qa_variables(self): if self.debug: m1, m2, g, a2 = symbols('m1 m2 g a2') else: m1, m2, g, a2 = symbols('m1 m2 g a2') return m1, m2, g, a2
[ "def init_consistent_qa_variables(self):\n if self.debug:\n U, V = symbols ('U V')\n else:\n U, V = self.get_symbols(2)\n return U, V", "def get_quiz_text_and_answer():\n \n # Code Review Comment: Based on the first code review suggesting, the quiz text\n # and ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read in .wav file, return the log power spectral density with the frequency axis determined by the constant freqs.
def wav_to_logPSD(infile): signal, sr = wavload(infile, sr = None) signal = resample(signal, sr, srfinal) signal = signal/(10*np.var(signal)) signal = signal - np.mean(signal) logflogpsd, logffreqs, times = interp_logpsd(signal, sr, int(window*sr), int(overlap*sr), freqs...
[ "def read_wav(filename):\n s,fs = load(filename) # scipy reads int\n s = np.array(s)/float(max(abs(s)))\n s = add_wgn(s) # Add jitter for numerical stability\n return fs,s", "def load_wavelen(wavelength_file: str):\n\n q = np.loadtxt(wavelength_file)\n if q.shape[1] > 2:\n q = q[:, 1:3]\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do the whole preprocessing scheme at once, saving a pickled PCA object and a .npy array with the data in the reduced representation. Unreduced spectrograms are not saved. Since these are all stored at once and the covariance matrix for all of them is computed, this method requires a substantial amount of RAM (something...
def wav_to_PCA(infolder='../speech_corpora/', outfile='../Data/processedspeech12.npy', pcafilename = '../Data/spectropca12.pickle', testfile = 'test12.npy', ncomponents = 200, whiten = True, maxspectros=100000): infilelist = [] for pth, subd, files in os.walk(infolder): for fname in file...
[ "def do_PCA_and_save(activations_dir, save_dir):\n\n layers = ['layer_1','layer_2','layer_3','layer_4','layer_5','layer_6','layer_7','layer_8']\n n_components = 100\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n for layer in tqdm(layers):\n activations_file_list = glob.glob(a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the replace map for the given vault dir.
def vault_replace_map(vault_dir: Path) -> ReplaceMap: return _replace_map(_alias_map(vault_dir))
[ "def _cache_mappings(self):\n self._lang_to_code = {c.lang: c for c in self.codes}\n self._locale_to_info = {i.lang: i for i in self.info}", "def load_zip_map(dirs):\n zip_map = {d: [] for d in dirs}\n for d in tqdm(dirs):\n for fn in os.listdir(d):\n if fn.lower().endswith(\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
My own implementation of cv2.drawMatches as OpenCV 2.4.9 does not have this function available but it's supported in OpenCV 3.0.0 This function takes in two images with their associated keypoints, as well as a list of DMatch data structure (matches) that contains which keypoints matched in which images. An image will b...
def drawMatches(img1, kp1, img2, kp2, matches): # Create a new output image that concatenates the two images together # (a.k.a) a montage rows1 = img1.shape[0] cols1 = img1.shape[1] rows2 = img2.shape[0] cols2 = img2.shape[1] out = np.zeros((max([rows1, rows2]), cols1+cols2, 3), dtype='uin...
[ "def drawMatches(img1, kp1, img2, kp2, matches):\r\n\r\n # Create a new output image that concatenates the two images together\r\n # (a.k.a) a montage\r\n rows1 = img1.shape[0]\r\n cols1 = img1.shape[1]\r\n rows2 = img2.shape[0]\r\n cols2 = img2.shape[1]\r\n\r\n out = np.zeros((max([rows1,rows2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given np.ndarrays of tls_sde and tls_period, return an array "limit", which represents the boundary in SDE versus Period space between "above threshold" and "below threshold" planets. Also, return the array "abovelimit", which is just the boolean array `tls_sde > limit`. The construction of the boundary is performed by...
def get_tls_sde_versus_period_detection_boundary(tls_sde, tls_period, make_plots=False): assert np.all(np.isfinite(tls_period)) assert np.all(np.isfinite(tls_sde)) assert len(tls_sde) == len(tls_period) N_lcs = len(tls_period) WRN_THRESHOLD = 5e3 ...
[ "def find_limits(pts):\n arr_pts = np.array(pts)\n return np.min(arr_pts, 0), np.max(arr_pts, 0)", "def _bound_pvals(pvals):\n return np.where(pvals < 2.2e-16, 2.2e-16, np.where(pvals > 1 - 2.2e-16, 1 - 2.2e-16, pvals))", "def maxLk_interval(self, z, zs):\n izmax = np.argmax(z)\n zmax...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a boolean mask from a list of indices
def make_mask(size, idx_true=None): # TODO: make work for n dimensional? is this something the np.ma module could do better? if idx_true is None: idx_true = list(range(size)) mask = [] for i in range(size): if i in idx_true: mask += [True] else: ...
[ "def make_mask_from_index(data: np.ndarray, index_list: list[int]) -> np.ndarray:\n\n mask = np.zeros_like(data)\n for idx in index_list:\n mask += np.where(data == idx, 1, 0)\n mask = np.where(mask > 0, 1, 0)\n return mask", "def _get_label_mask_from_label_indices(self, label_indices):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sort one list by another list
def sort_by(list_to_sort, list_to_sort_by, descend=True): sorted_lists = [(cid, did) for did, cid in sorted(zip(list_to_sort_by, list_to_sort))] if descend: sorted_lists = sorted_lists[::-1] ordered = np.array(sorted_lists)[:, 0] ordered_by = np.array(sorted_lists)[:, 1] return lis...
[ "def sort_ab(a, b):\n\n outlist = []\n\n ia = 0\n ib = 0\n\n while ia < len(a) and ib < len(b):\n\n if a[ia] < b[ib]:\n outlist.append(a[ia])\n ia += 1\n\n else:\n outlist.append(b[ib])\n ib += 1\n\n # Add any remaining items:\n outlist.ext...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[player name] Fetches leaderboard for active or previous tournament, optionally pass a name to find player in standings
def golf(self, irc, msg, args, options, search=None): options = dict(options) type_is_champions = options.get('champions') if type_is_champions: trn = self._fetchCurrent(type_='s') else: trn = self._fetchCurrent() if not trn[1]: irc.re...
[ "def search(name=None, score=None, country=None):\n try:\n leaders = None\n if name is not None:\n if score is not None:\n if country is not None:\n # find people locally by score and name\n country = country.capitalize()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads samples from tsv file where first column is the sentence and second column is the integer label
def load_from_tsv(tsv_file): # Load data from files all_examples = list(open(tsv_file, "r", encoding='utf-8').readlines()) split_lines = [l.split('\t') for l in all_examples] sentences = [s[0].strip() for s in split_lines] label_integers = [int(s[1].strip()) for s in split_lines]...
[ "def LoadData(filename, mode='train', model='tweet'):\n ids, labels, sentences = [], [], []\n with gzip.open(filename, 'rt') as f:\n for line in f:\n tweetid, lang, tweet = line.split('\\t')\n\n idx = int(tweetid) % 10 # use this to partition data\n if mode == 'train' ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In this function, You need to design your own algorithm or model to find the matches and generate a matches_test.csv in the current folder. you are given locu_train, foursquare_train json file path and matches_train.csv path to train your model or algorithm. Then you should test your model or algorithm with locu_test a...
def get_matches(locu_train_path, foursquare_train_path, matches_train_path, locu_test_path, foursquare_test_path): pd.options.mode.chained_assignment = None fs = pd.read_json(foursquare_train_path) locu = pd.read_json(locu_train_path) truth = pd.read_csv(matches_train_path) fs_test = pd.read_json(fo...
[ "def create_pre_match_features(row):\n\n\tv=[] #vector to be populated\n\tv.append(row[\"tourney_date\"])\n\tv.append(row[\"tourney_name\"])\n\n\t#print(\"creating pre-match features for {} vs {}\".format(row[\"winner_name\"],row[\"loser_name\"]))\n\n\tdate=row[\"tourney_date\"]\n\tr=row[\"round\"]\n\tsur=row[\"sur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all processes seen in ``last_seen_seconds`` seconds.
def get_active_processes(self, last_seen_seconds=None): if last_seen_seconds is None: last_seen_seconds = self.heartbeat_interval seconds_ago = now() - datetime.timedelta(seconds=last_seen_seconds) return self.sa_session.query(WorkerProcess).filter(WorkerProcess.update_time > seconds...
[ "def get_active_processes(self, last_seen_seconds=None):\n if last_seen_seconds is None:\n last_seen_seconds = self.heartbeat_interval\n seconds_ago = now() - datetime.timedelta(seconds=last_seen_seconds)\n return self.sa_session.query(WorkerProcess).filter(WorkerProcess.table.c.upda...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the given Joy message following the conversion rule
def convert(self, joy_msg): return Converter.convert_with_rules(joy_msg, self.rules)
[ "def get_message_converter(self, msg_type):", "def convert_msg(self, zmsg: TZMQMessage) -> Message:\n msg = self.message_factory(zmsg)\n msg.from_zmsg(zmsg)\n return msg", "def _telethon_msg_to_mymessage(msg, target_id, your_name, target_name):\n return MyMessage(msg.message + (msg.stick...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluates a given expression using values from the joy message For example, expression 'a1 + a3 if b0 else a5' evaluates to the result of 'a1 + a3' if b0 is high, otherwise to the value of a5. If expr is a list or tuple, a list is returned by recursively calling Converter.eval.
def eval(joy_msg, expr): if not isinstance(expr, six.string_types) and isinstance(expr, collections.Iterable): return [Converter.eval(joy_msg, _expr) for _expr in expr] # Modules available for mathematical computation global_vars = { 'm': np if np is not None else math, ...
[ "def evaluate(expr,**bindings):\n expr = expr.replace(\" \", \"\")\n paren, lst, lst_op = 0, -1, None\n #finds the last operator to be evaluated.\n for i in range(len(expr)):\n if expr[i] == \"(\":\n paren = paren + 1\n elif expr[i] == ')':\n paren = paren - 1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the Fresnel reflectivity at the given Q/wavelength.
def reflectivity(self, Q, L=1): # If Q < 0, then we are going from substrate into incident medium. # In that case we must negate the change in scattering length density # and ignore the absorption. drho = self.rho-self.Vrho S = 4*pi*choose(Q<0,(-drho,drho)) \ + 2j*pi/...
[ "def spectral_fwhm(self):\n wave = self.central_wavelength\n return wave / self.info.instrument.spectral_resolution", "def estimate_fluctuations(self, Q_l, Q_r, s1, s2):\n\t\t# Estimate the value of the star region using the HLLE speeds\n\t\tQ_hat = (self.f(Q_r) - self.f(Q_l) - s2 * Q_r + s1 * Q_l) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take a logfile line and return a Row object with hostname and bytes transferred. Return None if regex doesn't match.
def line_to_row(line): m = line_re.match(line) if m: return Row(host_name=m.group(1), bytes=m.group(2)) else: #print("nothing") return None
[ "def parse_log_line(line: str) -> LogEntry:\n match = LOGPAT.match(line)\n if not match:\n # we could catch that error and skip the line\n raise ValueError(f'incorrect log format: {line}')\n\n entry = match.groups()\n parsed_time = parse(entry[3][:11] + ' ' + entry[3][12:])\n size = int...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an instance of a Model class, query the largest existing value for the `order` field and add one. This is useful for prepopulating DB fields (e.g. goals.Category.order, survey.LikertQuestion.order, etc).
def get_max_order(model): result = model.objects.aggregate(Max('order')) current_num = result['order__max'] or 0 return current_num + 1
[ "def reorder_values(self, order):\n if isinstance(self, (LookupField, MultipleLookupField)):\n values_to_save = []\n for idx, value_id in enumerate(order):\n value = self.lookupvalues.get(pk=value_id)\n value.order = idx\n values_to_save.appe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates metric `metric_fn` during the validation step.
def calc_val_metric(true, pred, metric_fn, pred_fn): true_np = to_np(true) pred_np = to_np(pred) metric_list = [] for t, p in zip(true_np, pred_np): metric_list.append(metric_fn(pred_fn(t), pred_fn(p))) return np.mean(metric_list)
[ "def test_metric_function(self):\n model = FakeSemanticSegmentationModel()\n batch, output, _ = get_fake_batch_output()\n batch_replicated, outputs_replicated = (jax_utils.replicate(batch),\n jax_utils.replicate(output))\n\n # Test metric function in the pmappe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure workflow hashcodes are behaving as expected
def test_create_workflow_hashcode(): tmp_args1 = argparse.Namespace(epochs=100, expected_cells=1000, use_cuda=True) tmp_args2 = argparse.Namespace(epochs=200, expected_cells=1000, use_cuda=True) tmp_args3 = argparse.Namespace(epochs=100, expected_cells=500, use_cuda=True) hashcode1 = create_workflow_ha...
[ "def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)", "def _hash_flow(self, flow: Flow) -> str:\n prefect_version = Version(prefect.__version__)\n\n if prefect_version < parse(\"0.15.0\"):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the setup of these randomstate fixtures. The state0 fixture is one set of params. The state fixture is a combinatorial set of params, only one of which matches the state0 setup. We want to make sure that when we have fixtures set up the same way, then randomness behaves the same (and different when set up differen...
def test_perturbedrandomstate_fixture_meets_expectations(perturbed_random_state0_dict, perturbed_random_state_dict): prs = perturbed_random_state_dict['state'] params = (perturbed_random_state_dict['seed'], perturbed_random_state_dict['n']) prs0 = per...
[ "def runTest(self):\n \n # Since we are creating new states for experiments from the first one \n # the test is going to create the first state from all the others by applying\n # first experiment changes and then check if it produces the same state\n \n state, expe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For two lists of tensors, check that they are all close
def _check_all_close(tensors1, tensors2) -> bool: assert len(tensors1) == len(tensors2), \ 'Must pass in same number of tensors to check if they are equal' equal = True for t1, t2 in zip(tensors1, tensors2): equal = equal and torch.allclose(t1, t2) return equal
[ "def is_equal_tf(\n x: (tf.Tensor, np.ndarray, List),\n y: (tf.Tensor, np.ndarray, List),\n atol: float = 1.0e-7,\n) -> bool:\n x = tf.cast(x, dtype=tf.float32).numpy()\n y = tf.cast(y, dtype=tf.float32).numpy()\n return np.all(np.isclose(x, y, atol=atol))", "def equal_lists(list_a: Union[list, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute toeplitz matrix for 2d conv with multiple in and out channels.
def toeplitz_mult_ch(kernel, input_size, padding=1): r, m, n = input_size kernel_size = kernel.shape output_size = (kernel_size[0], input_size[1] - (kernel_size[2] - 1) + 2 * padding, input_size[2] - (kernel_size[3] - 1) + 2 * padding) T = np.zeros((output_size[0], int(np.prod(output_size[1:]))...
[ "def toeplitz_filter_mat(filter, img_size, mode='valid'):\n\n if filter.ndim == 1:\n nx = img_size[0]\n\n if mode == 'valid':\n first_row = np.pad(filter, (0, nx - filter.size), mode='constant', constant_values=0)\n first_col = np.zeros(nx - filter.size + 1)\n first...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for song, display results. out can be "m3u" or "null", defaults to humanreadable
def do_search(out=None, edit_mode=False): duration = 0 start_t = time() fields = list(valid_tags) fields.remove('filename') fields = tuple(fields) if callable(out): song_output = out elif out == 'm3u': print "#EXTM3U" def song_output(song): print u"#EXT...
[ "def search(self, item):\n try:\n songs = self._echofun(pyechonest.song.search, title=item.title,\n results=100, artist=item.artist,\n buckets=['id:musicbrainz', 'tracks'])\n pick = self._pick_song(songs, item)\n if pick is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that each Term synonym is stored as Synonym object
def _check_synonyms(self, term): for s in term.synonyms: self.assertIsInstance(s, pronto.synonym.Synonym)
[ "def check_synonym(word, word2):\n l_syns = list()\n lmtzr = WordNetLemmatizer()\n word = lmtzr.lemmatize(word)\n synsets = wn.synsets(word2)\n for synset in synsets:\n for i in range(0,len(synset.lemma_names)):\n\t\t\tif word == synset.lemma_names[i] and similarity.semantic_match(word,word2) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }