query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Method that sets UI state to 'default'
def _set_default_state(self): self.state = 'default' self.title = '' self.cell_info_view.is_hidden = True self.log_view.is_hidden = False self.map_view.cam_offset = [0, 0]
[ "def set_default(self, default=None):\r\n self.default = default", "def reset_value(self):\n if not isinstance(self._default_value, _NoWidgetValue):\n self.set_value(self._default_value)", "def setWidgetsToDefaults(self):\n\n print(\"default controls\")\n for name, widget ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method handles player input in 'default' state
def _handle_input_default(self, val): player_input = val game = self.game player = game.player handled = False # input handled flag if game.is_waiting_input: if player_input == terminal.TK_ESCAPE: # game quit on ESC text = _('Do you really want to qu...
[ "def __playHumanTurn__(self, choice):\n self.__inputChoice__(choice)", "def get_player_input(self, game_state):\n successful_input = False\n while not successful_input:\n scenario = game_state.scenario\n actions = scenario.actions\n print game_state\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method handles player input in 'closing_door' state
def _handle_input_closing_door(self, val): player_input = val handled = False # input handled flag if player_input == terminal.TK_ESCAPE: # exit to default state self._set_default_state() handled = True elif player_input in (terminal.TK_KP_4, terminal.TK_LEFT): ...
[ "def close_door(self):\n if self.door_open:\n self.do(2, \"Closing door\")\n self.door_open = False", "def onExitVehicle(self, inEvent: RealTimeEvent):\n\n player_id = inEvent.parameters['player_id']\n player_location = inEvent.parameters['player_location']\n vehi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method handles player input in 'targeting' state
def _handle_input_targeting(self, val): player_input = val handled = False # input handled flag if player_input == terminal.TK_ESCAPE: # exit to default state self.stop_targeting() handled = True elif player_input == terminal.TK_ENTER: # if player chooses the c...
[ "def updateTargetMode(self):\n\t\tplayer = scene.objects['Link']\n\t\tif (player.gamepad.isZPressed()):\n\t\t\tif (self.targetObject != None and self.canTargetCurrentObject()):\n\t\t\t\tcont = logic.getCurrentController()\n\t\t\t\tself.trackTargetObject(cont)\n\t\t\telse:\n\t\t\t\tplayer.camManager.cameraToBackPlay...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove highlighting string by index (change color)
def unhighlight(self, index): if index == self.selected: self.labels[index].color_fg = self.color_bg self.labels[index].color_bg = self.color_fg else: self.labels[index].color_fg = self.color_fg self.labels[index].color_bg = self.color_bg self.high...
[ "def unhighlight(self):\n return self.tag_remove(\"highlight\", \"1.0\", Tk.END)", "def stop_highlights():\r\n\r\n for v in sg.VEHICLES:\r\n v.stop_highlight()", "def _color_clear(self):\n self._color_text()", "def highlight(regexes, color_themes, text, print_output=False):\n #Build...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dash definition of an HTML table for a Pandas dataframe
def make_dash_table(df): table = [] for index, row in df.iterrows(): html_row = [] for i in range(len(row)): html_row.append(html.Td([row[i]])) table.append(html.Tr(html_row)) return table
[ "def generate_table(df):\n return dash_table.DataTable(\n id='table',\n columns=[\n {\"name\": i, \"id\": i, \"selectable\": True} for i in df.columns\n ],\n page_size=14,\n style_cell={'padding': '5px',#'textAlign': 'right',\n 'fontSize':12,'whiteS...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of nodes to execute. This method returns the minimal list of nodes that need to be executed in graph G in order to return the requested outputs. The ordering of the nodes is fixed.
def get_execution_order(cls, G): # Get the cache dict if it exists cache = G.graph.get('_executor_cache', {}) output_nodes = G.graph['outputs'] # Filter those output nodes who have an operation to run needed = tuple(sorted(node for node in output_nodes if 'operation' in G.nodes[...
[ "def _process_graph(graph: tf.Graph) -> List[str]:\n all_nodes = [x.name for x in graph.as_graph_def().node]\n print(\"############\")\n print(all_nodes)\n nodes = [x for x in all_nodes if x in POSSIBLE_OUTPUT_NODES | MODEL_CONSTANTS]\n print(\"List of nodes to export for brain TODO(oleguer put name ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start a new list. 'todo new'
def cmd_new(self, event): usr = event["sender"] if not self.todos.has(usr): self.todos.set(usr, []) return "Created a new todo list, now you can add new items using todo add <item>" return "You have a previous todo list, you can type !todo list to view it or !todo reset ...
[ "def new_list(self):\r\n self.app.clear_data()\r\n self.populate_listbox(self.app.data)\r\n self.set_infobox_msg(\"New list created.\")", "def new_list(request):\n # create new list\n list_ = List.objects.create()\n # create new item from post request and associate it with the list\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add item to the todo list. 'todo add '
def cmd_add(self, event, item): usr = event["sender"] if not self.todos.has(usr): return "You need to start a todo list first. type !todo new" user_list = self.todos.get(usr) user_list.append(item) self.todos.set(usr, user_list) return "item {} added".format(i...
[ "def add_todo_item(self, todo_list_id, item):\n todo_list = self.todo_lists[todo_list_id]\n assert isinstance(todo_list, TodoList)\n todo_list.add_item(item=item)\n todo_list.save()", "def add_item(self, item):", "def new_item(self, event) -> None:\n\n # Get the text from the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove item from the todo list 'todo pop'
def cmd_pop(self, event): usr = event["sender"] if not self.todos.has(usr): return "You need to start a todo list first. type !todo new" user_list = self.todos.get(usr) item = user_list.pop() self.todos.set(usr, user_list) return "item {} removed".format(item)
[ "def remove_item(self, event) -> None:\n\n # Set the state to the list of todos, filtered for the one that should be deleted\n self.state[\"todolist\"] = list(\n filter(\n lambda item: item.id != event.target.getAttribute(\"index\"),\n self.state.get(\"todolist...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
list items from the todo list 'todo list'
def cmd_list(self, event): usr = event["sender"] if not self.todos.has(usr): return "You need to start a todo list first. type !todo new" return "items: {}".format(self.todos.get(usr))
[ "def list(config):\n store = api_todo.Todo()\n #tasks = api_sort(store.ls())\n tasks = store.ls()\n headers = ['id', 'Priority', 'done', 'description']\n data = []\n for el in tasks:\n identifier, content, _, _, active, priority = el\n data.append([identifier, priority, \"\" if activ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
reset items from the todo list 'todo reset'
def cmd_reset(self, event): usr = event["sender"] if not self.todos.has(usr): return "You need to start a todo list first. type !todo new" self.todos.set(usr, []) return "Your todo list has been reset"
[ "def reset(self):\n self.lines = []\n self.total_todos = 0\n self.active_todos = []\n self.done_todos = []", "def do_clear():\n itemcnt = todolist.get_count()\n confirmwarn = 'This will clear all {} items from the list.'.format(\n itemcnt)\n confirmmsg = 'Clear the enti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new DataConnectorInstance
def create(self, id, type, config, disabled=values.unset): data = None if type == DataConnectorInstance.ConnectorType.COUCHDB: data = values.of({ 'id': id, 'type': type, 'disabled': disabled, 'server': config['server'],...
[ "def create_connector(self, connector_name, database_type, environment_id): \n\n self.obj = GenericModel({ x:None for x in self.swagger_map.values()}, self.swagger_types, self.swagger_map)\n self.connector_name = connector_name\n self.database_type = database_type\n self.environment_id ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch song details using Shazam API.
def fetch_song_details(mp3_file_content): shazam = Shazam(mp3_file_content) try: recognize_generator = shazam.recognizeSong() song_details = next(recognize_generator) if not song_details[1].get("track"): logger.info("Can't recognize song") return None retu...
[ "def ask_spotify(title):\n song = sp.search(q=title, limit=1, type='track')\n try:\n info = song['tracks']['items'][0]\n if info:\n artist = info['artists'][0]['name']\n song_name = info['name']\n song_url = info['external_urls']['spot...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add lyrics to MP3 file.
def add_lyrics(file_name, lyrics): try: tags = ID3(file_name) uslt_output = USLT(encoding=3, lang="eng", desc="desc", text=lyrics) tags["USLT::'eng'"] = uslt_output tags.save(file_name) except Exception as e: logger.error(f"Error adding lyrics: {e}")
[ "def add(self, song):\n try:\n f = open(self.filename, \"a\")\n f.write(song+\"\\n\")\n f.close()\n except FileNotFoundError:\n raise\n self._load_songs_from_file()", "def write_lyrics(song):\r\n\r\n # ./lyricDirectory/Chart/Artist/Song\r\n tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add album art to MP3 file.
def add_album_art(file_name, image_url): try: img = requests.get(image_url, stream=True).raw audio = EasyMP3(file_name, ID3=ID3) audio.tags.add( APIC( encoding=3, mime="image/png", type=3, desc="Cover", ...
[ "def import_albumart(self, albumart):\n super(MP3AlbumArt, self).import_albumart(albumart)\n frame = APIC(0, albumart.mimetype, 0, '', albumart.dump())\n self.track.entry.tags.add(frame)\n self.track.modified = True", "def write_mp3_tags(artist, album):\n \n mp3_file_list = glob....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator which executes given function and after that sets session's notifcation to nonactive.
def throw_notification_once(func): @functools.wraps(func) def wrapper(*args, **kwargs): if args == (): retval = func() else: retval = func(args) if type(retval).__name__ == "unicode": session['notification_active'] = False return retval ret...
[ "def no_authentication(func):\n func.no_authentication = True\n return func", "def unauthenticated(fnc):\r\n fnc.unauthenticated = True\r\n return fnc", "def make_session_run_hook():", "def session(f):\n\n def new_func(*args, **kwargs):\n ctx = click.get_current_context()\n if not...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for deleting post with given post_id.
def delete_post(post_id): g.db.delete_post(post_id) return redirect(url_for('admin.show_admin_posts'))
[ "def delete_post(id):\n db = get_db()\n db.execute(\n 'DELETE FROM post WHERE id=?',\n (id,)\n )\n db.commit()", "def deletePostById(self, id):\n if not id:\n logger.error('Failed to delete beceuase Id was not provided')\n raise exceptions.IdNotSpecifiedError...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for "host/admin/widgets". Shows all widgets.
def show_admin_widgets(): widgets = g.db.get_widgets() for widget in widgets: if len(widget['body']) > 100: widget['body'] = widget['body'][:100] + "..." return render_template('admin/widgets.djhtml', widgets=widgets)
[ "def widgets(self):\n if self._widgets is None:\n ws = self._get_json(self.WIDGETS_PATH)['widgets']\n self._widgets = dict(([w['name'], w] for w in ws))\n return self._widgets", "def cmd_list_widgets(self):\r\n return self.widgetMap.keys()", "def get_widgets(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for "host/admin/widgets/new". Creator of new widgets.
def show_new_widget_forms(): if request.method == 'POST': g.db.add_widget( request.form['name'], request.form['body']) session['notification_active'] = True session['notification_title'] = "Widget created!" session['notification_description'] = "Widg...
[ "def create_widgetbar(self):\n \n self.plugin_manager.create_widgets()", "def create_widgets(self):\n self.make_button_grid()\n self.make_control_buttons()", "def _new_button_clicked(self):\n current_widget = self._get_selected_widget()\n current_widget.create_new()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for deleting widget with given widget_id.
def delete_widget(widget_id): g.db.delete_widget(widget_id) return redirect(url_for('admin.show_admin_widgets'))
[ "def delete_widget( self, widget_id ) :\n\n # don't delete anything from an active menu\n if self.is_posted :\n return 0\n\n # removing it from the list of dictionaries is sufficient\n for i in range( len( self._wdicts ) ) :\n if self._wdicts[ i ][ 'widget_id' ] == ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate n_cluster random clusters with random variance sigma distributed over an area of max_area^n_features.
def generate_clusters(max_area, n_clusters, sigma): # ----- Define gaussian distributions / clusters ----- means = [] for _ in range(n_clusters): means.append([np.random.randint(0, max_area) for _ in range(N_FEATURES)]) covs = [] for _ in range(n_clusters): cov = np.diag([(np.random....
[ "def gen_random_clusters(num_clusters):\r\n cluster_list = []\r\n \r\n for dummy_idx in range(num_clusters):\r\n cluster_list.append(alg_cluster.Cluster(set([]), random.uniform(-1, 1), \\\r\n random.uniform(-1, 1), 0, 0))\r\n\r\n return cluster_list"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is the CFF subr/gsubr call depth > 10?
def com_adobe_fonts_check_cff_call_depth(cff_analysis): any_failures = False if cff_analysis.glyphs_exceed_max or cff_analysis.glyphs_recursion_errors: any_failures = True for gn in cff_analysis.glyphs_exceed_max: yield FAIL, \ Message('max-depth', ...
[ "def com_adobe_fonts_check_cff2_call_depth(cff_analysis):\n\n any_failures = False\n\n if cff_analysis.glyphs_exceed_max or cff_analysis.glyphs_recursion_errors:\n any_failures = True\n for gn in cff_analysis.glyphs_exceed_max:\n yield FAIL, \\\n Message('max-depth',\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is the CFF2 subr/gsubr call depth > 10?
def com_adobe_fonts_check_cff2_call_depth(cff_analysis): any_failures = False if cff_analysis.glyphs_exceed_max or cff_analysis.glyphs_recursion_errors: any_failures = True for gn in cff_analysis.glyphs_exceed_max: yield FAIL, \ Message('max-depth', ...
[ "def com_adobe_fonts_check_cff_call_depth(cff_analysis):\n\n any_failures = False\n\n if cff_analysis.glyphs_exceed_max or cff_analysis.glyphs_recursion_errors:\n any_failures = True\n for gn in cff_analysis.glyphs_exceed_max:\n yield FAIL, \\\n Message('max-depth',\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does the font use deprecated CFF operators or operations?
def com_adobe_fonts_check_cff_deprecated_operators(cff_analysis): any_failures = False if cff_analysis.glyphs_dotsection or cff_analysis.glyphs_endchar_seac: any_failures = True for gn in cff_analysis.glyphs_dotsection: yield WARN, \ Message('deprecated-operator-do...
[ "def test_non_varying_glyphs_bug356():\n actual_path = get_temp_file_path()\n font_path = get_input_path('bug356.otf')\n stderr_path = runner(CMD + ['-s', '-e', '-a', '-o', 'cff',\n '-f', font_path, actual_path])\n expected_path = get_expected_path('bug356.txt')\n asser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the given text is written in the given language.
def is_correct_language(text, test_lang): detected_lang = langdetect.detect(text) return detected_lang == test_lang
[ "def is_english(text):\r\n try:\r\n detected_lang = langdetect.detect(text)\r\n return detected_lang == 'en'\r\n except:\r\n return False", "def isNotEnglishText(text):\n if isinstance(text, str):\n not_english = any([ord(c) > 128 for c in text])\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the given text is written in english.
def is_english(text): try: detected_lang = langdetect.detect(text) return detected_lang == 'en' except: return False
[ "def isNotEnglishText(text):\n if isinstance(text, str):\n not_english = any([ord(c) > 128 for c in text])\n return not_english\n # Not string\n return False", "def isEnglishWord(self, word):\n if re.match('^[a-zA-Z]*$', word):\n return True\n else:\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor. Does absolutely nothing
def __init__ ( self ) : None
[ "def __init__(self, *args):\n super(Base, self).__init__()", "def __init__(self):\n this = _coin.new_SoFile()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoByteStream()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Standard sigma clipping algorithm. Used specifically to create a better range when plotting the output spectrum. If range_only is true, only the sigmaclipped range is output.
def sig_clip(data,sigthresh=3.,range_only=True,args=False): if args: range_only = False prevlen = 0 g = np.where(np.fabs(data-np.median(data)) < np.std(data)*sigthresh) while ((len(g[0]) < prevlen) | (prevlen == 0)): prevlen = len(g[0]) g = np.where(np.fabs(data-np.median(da...
[ "def sigma_clip(data,sig=3,iters=1,cenfunc='median',varfunc=np.var,maout=False):\n data = np.array(data,copy=False)\n oldshape = data.shape\n data = data.ravel()\n \n mask = np.ones(data.size,bool)\n if iters is None:\n lastrej = sum(mask)+1\n while(sum(mask)!=lastrej):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enter an email and password then submit the credentials
def submit_new_account_credentials(self, email: str, password: str): BaseElement(self.driver, locators.TERMS_CHECKBOX_AGREEMENT_TEXT).wait_until_displayed() self.driver.get(construct_a_b_test_control_url(self.driver.current_url)) TextElement(self.driver, locators.EMAIL_INPUT).set_text(email) ...
[ "def post_user():\n required_data = {\"email\", \"password\"}\n return post(cls, None, None, required_data)", "def test_begin_update_credentials_email_password(self):\n self.login()\n\n user = {\n \"current_password\": \"password\",\n \"email\": \"admin2@localhost\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait until the Personal Info page is displayed
def wait_until_personal_info_displayed(self): TextElement(self.driver, locators.DOB_INPUT).wait_until_displayed()
[ "def expose_profile_content():\n for i in range(1, 4):\n scroll_down()\n scroll_up()\n expand_all_sections()\n sleep(1)\n\n is_page_complete = find_elements_by_css(css_selector='span.artdeco-loader__bars') == list()\n if is_page_complete:\n break\n elif...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in a df and constructs message adjacency list and message matrix
def create_matrix(im_df): im_columns = ['sender', 'sender_buddy', 'receiver', 'receiver_buddy', 'time_stamp', 'subject', 'content'] im_df["sender_user"] = im_df["sender_buddy"].apply(lambda x : map_address_user(x)) im_df["receiver_user"] = im_df["receiver_buddy"].apply(lambda x : map_address_user(x)) ...
[ "def get_adjacency(dataframe):\n \n # Number of nodes in the graph\n n_nodes = dataframe.shape[0]\n\n # Calculate distances. Due to the high dimensional data (> 1300 dimensions) the cosine distance is chosen\n distances = np.zeros((n_nodes, n_nodes))\n \n for i, a in dataframe.iterrows():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the kcore nodes of the graph by the date
def plot_kcore_networkx(message_adj_list,k): # for time, message_adj_list in message_adj_list_dict.items(): G = nx.Graph() for src in range(len(message_adj_list)): for dest in message_adj_list[src]: G.add_edge(src, dest) G.remove_edges_from(nx.selfloop_edges(G)) kcore_G = nx.k_c...
[ "def plot_nodes_over_time(self, counts, name):\n plt.plot(\n range(self.start, len(counts['nodes'])), \n counts['nodes'][self.start:])\n plt.xlabel(\"Time\")\n plt.ylabel(\"Number of Nodes\")\n plt.title(name)\n plt.savefig(join(self.plots_path, name, 'nodes....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Colors the graph based on core level
def color_kcore_networkx(message_adj_list): loop_count = 0 # for time, message_adj_list in message_adj_list_dict.items(): G = nx.Graph() for src in range(len(message_adj_list)): for dest in message_adj_list[src]: G.add_edge(src, dest) G.remove_edges_from(nx.selfloop_edges(G)) ...
[ "def base_color(self):\n ...", "def set_color(self):\n nodes = cmds.ls(sl=True) or []\n if nodes:\n color = cmds.getAttr('{0}.overrideColorRGB'.format(nodes[0]))[0]\n color = QtGui.QColor(color[0]*255, color[1]*255, color[2]*255)\n color = QtWidgets.QColorDial...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a file, returns the number of lines it contains. The current file position should be preserved as long as the file supports tell() and seek().
def count_lines(file): old_position = file.tell() file.seek(0) count = 0 while file.readline() != '': count += 1 file.seek(old_position) return count
[ "def linecountinfile(file_or_filename):\n f = open_file_or_filename(file_or_filename)\n numlines = 0\n for line in f:\n numlines += 1\n f.close()\n return numlines", "def count_lines(file_path):\n # type: (pathlib.Path) -> int\n\n if file_path.suffixes[-1] == '.gz':\n with gzip....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a filename, returns a random line.
def random_line(filename): linecount = count_lines(open(filename)) chosen_line_number = random.randrange(linecount) return linecache.getline(filename, chosen_line_number)
[ "def random_word():\n file_name = \"wordbank.txt\"\n number_of_lines = file_len(file_name) - 1\n target_line = randint(0, number_of_lines)\n target_word = linecache.getline(file_name, target_line)\n return target_word.replace(\"\\n\", \"\")", "def get_random_file(path):\n files = get_files(path)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tells whether two json strings, once decoded, are the same dictionary
def assertSameJSON(self, json1, json2): return self.assertDictEqual(json.loads(json1), json.loads(json2))
[ "def compare_jsons(json1, json2):\n return json.loads(json1) == json.loads(json2)", "def assert_json_equal(obj1, obj2):\n converted1 = json.loads(json.dumps(obj1))\n converted2 = json.loads(json.dumps(obj2))\n assert converted1 == converted2", "def is_subdict(json1, json2, desc1=\"json1\", desc2=\"j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string that merges the id and the description in a single string This will also replace spaces with underscore
def merge_id_desc(id: Union[str, int], description: Optional[str]) -> str: desc = description.strip().replace(" ", "_") aux = click.style(str(id), fg="yellow") if not desc == "": aux += f"_{desc}" return aux
[ "def _description_string(self) -> str:", "def createId(self, title):\n s1 = unicode(r\"'\\;/ &:ÀÁÂÄÇÈÊÉËÌÎÍÏÒÔÓÖÙÛÚÜÝŸàâáäçèêéëìîíïòôóöùûúüýÿÑñ\", 'utf-8')\n s2 = unicode(r'-------aaaaceeeeiiiioooouuuuyyaaaaceeeeiiiioooouuuuyyNn', 'utf-8')\n\n # XXX This is not efficient at all\n # but...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take a string representig hours and minutes and returns the integers.
def parse_ore_minuti(s: str) -> Tuple[int, int]: # strip spaces s = s.strip() # detect which format is used if ":" in s: # hh:mm ss = s.split(":") h = int(ss[0]) m = int(ss[1]) if len(ss) > 1 else 0 else: # parse hour only (i.e. 3 or 3.5) fh = float(s) h = ...
[ "def get_hhmm(time_str): \n (hh, mmxx) = time_str.split(':')\n hh = int(hh)\n mm = int(mmxx[:2])\n xx = mmxx[2:]\n if xx == 'PM': \n hh += 12\n return [hh, mm]", "def get_minutes(t):\n time = [int(i) for i in t.split(':')]\n return time[0] + time[1] / 60", "def convert_start_to_mi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yield the elements in Link instance s in reverse order. >>> list(rev(Link(1, Link(2, Link(3))))) [3, 2, 1] >>> next(rev(Link(2, Link(3)))) 3
def rev(s): if s is not Link.empty: yield from rev(s.rest) yield s.first
[ "def reverse(self):\n\n '''\n 1->2->3 .... 3->2->1\n '''\n\n # use deep copy because python is pass-by-assignment\n curr = copy.deepcopy(self.head)\n nextNode = None\n prevNode = None\n\n while(curr):\n nextNode = curr.next\n curr.next = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The reference quantity used to perform conversions
def _reference(self): rq = 1*unit_registry['dimensionless'] for u, d in self.dimensionality.items(): rq = rq * u._reference**d return rq * self.magnitude
[ "def convert_to_reference(self):\n\n factor, units = self._convert_to_reference(self.units)\n\n return self.__class__(self.magnitude * factor, units)", "def reftemp(self) -> Quantity:\r\n return self._temperature", "def quantity(self):\n if self._quantity is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
performs the equivalent of ndarray.put() but enforces units values must be an Quantity with the same units as self
def put(self, indicies, values, mode='raise'): if not isinstance(values, Quantity): values = Quantity(values) if values._dimensionality != self._dimensionality: values = values.rescale(self.units) self.magnitude.put(indicies, values, mode)
[ "def _build_quantity_class(registry, force_ndarray):\n\n @total_ordering\n class _Quantity(object):\n \"\"\"Quantity object constituted by magnitude and units.\n\n :param value: value of the physical quantity to be created.\n :type value: str, Quantity or any numeric type.\n :param...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a tuple for pickling a Quantity.
def __reduce__(self): return (_reconstruct_quantity, (self.__class__, np.ndarray, (0, ), 'b', ), self.__getstate__())
[ "def to_tuple(self) -> Tuple:\n return self.symbol, tuple(self.dofs), self.factor, tuple(tuple(t) for t in self.qn_list)", "def multiple_quantity(self):\n return self._multiple_quantity", "def get_queue_tuple(self):\n return tuple(self.queue)", "def _build_quantity_class(registry, force_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes as input a file as a list of lines Returns a dict that maps filenames to MD5 hashes
def dict_from_md5_file(md5list): file2hash = {} for line in md5list: line = line.split(' ') hash = line[0] file = line[-1].lstrip('./') file2hash[file] = hash return file2hash
[ "def hash_files(file_list):\n # Create a list of tuples for the original files\n hashes = []\n\n for filepath in file_list:\n md5_hash = get_md5(filepath)\n filename = filepath.split(\"/\")[-1]\n hashes.append((filename, md5_hash))\n\n return hashes", "def readsums(filepath):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the request_id of this HistoricalImportStatus. Request id of the historical import in the organization
def request_id(self, request_id): self._request_id = request_id
[ "def request_element_id(self, request_element_id):\n\n self._request_element_id = request_element_id", "def put_request_id(self, request_id):\n insert_query = \"insert or replace into %s (request_id, time_stamp) values('%s', %d)\" % (\n self.table_name, request_id, time.time()\n )\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the date_import_ended of this HistoricalImportStatus.
def date_import_ended(self): return self._date_import_ended
[ "def date_import_ended(self, date_import_ended):\n \n self._date_import_ended = date_import_ended", "def date_import_started(self):\n return self._date_import_started", "def end_date(self):\r\n return self._end_date", "def end_date(self):\n return self.end.date()", "def ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the date_import_ended of this HistoricalImportStatus.
def date_import_ended(self, date_import_ended): self._date_import_ended = date_import_ended
[ "def date_import_ended(self):\n return self._date_import_ended", "def date_import_started(self, date_import_started):\n \n self._date_import_started = date_import_started", "def end_date_before(self, end_date_before):\n\n self._end_date_before = end_date_before", "def setEndPeriodi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the date_import_started of this HistoricalImportStatus.
def date_import_started(self): return self._date_import_started
[ "def date_import_started(self, date_import_started):\n \n self._date_import_started = date_import_started", "def get_start_date(self):\n return date.fromordinal(self.start_date_ordinal)", "def start_date(self):\n return self._moment.get(\"startDate\")", "def _start_date(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the date_import_started of this HistoricalImportStatus.
def date_import_started(self, date_import_started): self._date_import_started = date_import_started
[ "def date_import_started(self):\n return self._date_import_started", "def start_datetime(self, start_datetime: datetime):\n\n self._start_datetime = start_datetime", "def date_import_ended(self, date_import_ended):\n \n self._date_import_ended = date_import_ended", "def started(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the date_created of this HistoricalImportStatus.
def date_created(self): return self._date_created
[ "def created(self):\n return time.strftime('%Y-%m-%d %H:%M %Z', time.localtime(self.createdDate))", "def date_import_started(self):\n return self._date_import_started", "def get_creation_date_time(self):\n return self._root[\"CreationDateTime\"]", "def creation_datetime(self):\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the date_created of this HistoricalImportStatus.
def date_created(self, date_created): self._date_created = date_created
[ "def created_since(self, created_since):\n\n self._created_since = created_since", "def date_import_started(self, date_import_started):\n \n self._date_import_started = date_import_started", "def SetCommentCreatedOn(self, _date):\n self.comment_created_on = _date", "def create_date...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the date_modified of this HistoricalImportStatus.
def date_modified(self): return self._date_modified
[ "def last_modified(self) -> datetime:\n return self.workspace.get_last_modified(self.file)", "def last_modified_at(self) -> \"datetime\":\n return self._attrs.get(\"last_modified_at\")", "def last_modified(self):\n return self.metadata.last_modified", "def lastmodifieddate(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the date_modified of this HistoricalImportStatus.
def date_modified(self, date_modified): self._date_modified = date_modified
[ "def last_modified_at(self, last_modified_at: \"datetime\"):\n self._attrs[\"last_modified_at\"] = last_modified_at", "def setModified(self, modified=True):\n if modified != self.modified:\n self.modified = modified\n self.allActions['FileSave'].setEnabled(modified)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the active of this HistoricalImportStatus. Whether this historical import is active or not
def active(self, active): self._active = active
[ "def is_active(self, is_active):\n self._is_active = is_active", "def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", rest_name=\"active\", parent=self, path_helper=self._path_hel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the type of this HistoricalImportStatus. Whether this historical import is of type csv or json
def type(self, type): allowed_values = ["Csv", "Json"] if type.lower() not in map(str.lower, allowed_values): # print("Invalid value for type -> " + type) self._type = "outdated_sdk_version" else: self._type = type
[ "def set_line_type(self, line, type):\n self._set_line_type(line, type)", "def set_type(self, type):\n return _raw_util.raw_message_set_type(self, type)", "def set_content_type( self, type ):\n self.headers[ \"content-type\" ] = type", "def set_type(self, entry_type):\n if entry_ty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bench a log probability distribution.
def bench_log_probability( distribution, n=10000000, symbol=5 ): tic = time.time() for i in range(n): logp = distribution.log_probability( symbol ) return time.time() - tic
[ "def benchmark_distribution_log_probabilities():\n\t\n\tdistributions = [ UniformDistribution( 0, 17 ),\n\t NormalDistribution( 7, 1 ),\n\t LogNormalDistribution( 7, 1 ),\n\t ExponentialDistribution( 7 ),\n\t GammaDistribution( 7, 3 ),\n\t ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run log probability benchmarks.
def benchmark_distribution_log_probabilities(): distributions = [ UniformDistribution( 0, 17 ), NormalDistribution( 7, 1 ), LogNormalDistribution( 7, 1 ), ExponentialDistribution( 7 ), GammaDistribution( 7, 3 ), GaussianKe...
[ "def bench_log_probability( distribution, n=10000000, symbol=5 ):\n\n\ttic = time.time()\n\tfor i in range(n):\n\t\tlogp = distribution.log_probability( symbol )\n\treturn time.time() - tic", "def log_bernoulli_with_logits(x, logits):\n\tlog_prob = -bce(input=logits, target=x).sum(-1)\n\treturn log_prob", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The user workspace link repository.
def user_workspace_link_repository(self) -> UserWorkspaceLinkRepository:
[ "def current_user_repos_url():\n return _BASE_URL_V1 % 'user/repositories'", "def pointer(self):\n return self._repo", "def repository(self):\n return self._data['repository']", "def repo_dir(self):", "def link_workspace_document(workspace_doc_uid):", "def full_repository(self):\n ba...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The vacation collection repository.
def vacation_collection_repository(self) -> VacationCollectionRepository:
[ "def habit_collection_repository(self) -> HabitCollectionRepository:", "def project_collection_repository(self) -> ProjectCollectionRepository:", "def chore_collection_repository(self) -> ChoreCollectionRepository:", "def inbox_task_collection_repository(self) -> InboxTaskCollectionRepository:", "def big_pl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The project collection repository.
def project_collection_repository(self) -> ProjectCollectionRepository:
[ "def chore_collection_repository(self) -> ChoreCollectionRepository:", "def project_repository(self) -> ProjectRepository:", "def person_collection_repository(self) -> PersonCollectionRepository:", "def habit_collection_repository(self) -> HabitCollectionRepository:", "def big_plan_collection_repository(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The project database repository.
def project_repository(self) -> ProjectRepository:
[ "def project_collection_repository(self) -> ProjectCollectionRepository:", "def cloud_db(self):\r\n return self._get_client(\"database\")", "def _get_database_directory():\n return get_database_directory()", "def _get_db_url(self):\n return os.path.join(self.groc_dir, self.db_name)", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The inbox task collection repository.
def inbox_task_collection_repository(self) -> InboxTaskCollectionRepository:
[ "def email_task_collection_repository(self) -> EmailTaskCollectionRepository:", "def inbox_task_repository(self) -> InboxTaskRepository:", "def slack_task_collection_repository(self) -> SlackTaskCollectionRepository:", "def email_task_repository(self) -> EmailTaskRepository:", "def get_inbox(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The inbox task repository.
def inbox_task_repository(self) -> InboxTaskRepository:
[ "def inbox_task_collection_repository(self) -> InboxTaskCollectionRepository:", "def email_task_repository(self) -> EmailTaskRepository:", "def email_task_collection_repository(self) -> EmailTaskCollectionRepository:", "def slack_task_repository(self) -> SlackTaskRepository:", "def slack_task_collection_rep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The habit collection repository.
def habit_collection_repository(self) -> HabitCollectionRepository:
[ "def chore_collection_repository(self) -> ChoreCollectionRepository:", "def vacation_collection_repository(self) -> VacationCollectionRepository:", "def project_collection_repository(self) -> ProjectCollectionRepository:", "def person_collection_repository(self) -> PersonCollectionRepository:", "def email_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The chore collection repository.
def chore_collection_repository(self) -> ChoreCollectionRepository:
[ "def habit_collection_repository(self) -> HabitCollectionRepository:", "def project_collection_repository(self) -> ProjectCollectionRepository:", "def smart_list_collection_repository(self) -> SmartListCollectionRepository:", "def person_collection_repository(self) -> PersonCollectionRepository:", "def vaca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The big plan collection repository.
def big_plan_collection_repository(self) -> BigPlanCollectionRepository:
[ "def big_plan_repository(self) -> BigPlanRepository:", "def project_collection_repository(self) -> ProjectCollectionRepository:", "def chore_collection_repository(self) -> ChoreCollectionRepository:", "def habit_collection_repository(self) -> HabitCollectionRepository:", "def vacation_collection_repository(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The big plan repository.
def big_plan_repository(self) -> BigPlanRepository:
[ "def big_plan_collection_repository(self) -> BigPlanCollectionRepository:", "def plan_dir(self) -> Path:\n return Path(self.root_dir) / '.git' / 'plan'", "def project_repository(self) -> ProjectRepository:", "def project_collection_repository(self) -> ProjectCollectionRepository:", "def fast_into_rep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The smart list collection repository.
def smart_list_collection_repository(self) -> SmartListCollectionRepository:
[ "def smart_list_repository(self) -> SmartListRepository:", "def smart_list_item_repository(self) -> SmartListItemRepository:", "def chore_collection_repository(self) -> ChoreCollectionRepository:", "def smart_list_tag_repository(self) -> SmartListTagRepository:", "def habit_collection_repository(self) -> Ha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The smart list repository.
def smart_list_repository(self) -> SmartListRepository:
[ "def smart_list_item_repository(self) -> SmartListItemRepository:", "def smart_list_collection_repository(self) -> SmartListCollectionRepository:", "def smart_list_tag_repository(self) -> SmartListTagRepository:", "def list_manager(self):\n return ListManager(self)", "def listobject(self):\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The smart list tag repository.
def smart_list_tag_repository(self) -> SmartListTagRepository:
[ "def smart_list_repository(self) -> SmartListRepository:", "def smart_list_item_repository(self) -> SmartListItemRepository:", "def smart_list_collection_repository(self) -> SmartListCollectionRepository:", "def test_repo_list_tags(self):\n pass", "def tags(self) -> List:", "def list_tags():\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The smart list item repository.
def smart_list_item_repository(self) -> SmartListItemRepository:
[ "def smart_list_repository(self) -> SmartListRepository:", "def smart_list_collection_repository(self) -> SmartListCollectionRepository:", "def smart_list_tag_repository(self) -> SmartListTagRepository:", "def list_manager(self):\n return ListManager(self)", "def chore_collection_repository(self) -> ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The metric collection repository.
def metric_collection_repository(self) -> MetricCollectionRepository:
[ "def metric_entry_repository(self) -> MetricEntryRepository:", "def chore_collection_repository(self) -> ChoreCollectionRepository:", "def habit_collection_repository(self) -> HabitCollectionRepository:", "def big_plan_collection_repository(self) -> BigPlanCollectionRepository:", "def project_collection_rep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The metric entry repository.
def metric_entry_repository(self) -> MetricEntryRepository:
[ "def metric_collection_repository(self) -> MetricCollectionRepository:", "def get_metric_descriptor(self): # noqa\n with self._md_cache_lock:\n if self._metric_descriptor is None:\n self._metric_descriptor = metric_descriptor.MetricDescriptor(\n self.name,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The person collection repository.
def person_collection_repository(self) -> PersonCollectionRepository:
[ "def chore_collection_repository(self) -> ChoreCollectionRepository:", "def project_collection_repository(self) -> ProjectCollectionRepository:", "def habit_collection_repository(self) -> HabitCollectionRepository:", "def people_by_jurisdiction(jurisdiction_id):\n return collection_by_jurisdiction(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The push integration group repository.
def push_integration_group_repository(self) -> PushIntegrationGroupRepository:
[ "def project_repository(self) -> ProjectRepository:", "def push(self):\n command = \"hg push\"\n call(command, shell=True, stdout=PIPE, stderr=PIPE)\n hg_log.debug('push %s to central pacha' % self.path)", "def push():\n#\tupdate_local_repo()\n#\ttest()\n\tpush_repo_changes()\n\tupdate_remo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Slack task collection repository.
def slack_task_collection_repository(self) -> SlackTaskCollectionRepository:
[ "def slack_task_repository(self) -> SlackTaskRepository:", "def inbox_task_collection_repository(self) -> InboxTaskCollectionRepository:", "def email_task_collection_repository(self) -> EmailTaskCollectionRepository:", "def inbox_task_repository(self) -> InboxTaskRepository:", "def project_collection_reposi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Slack task repository.
def slack_task_repository(self) -> SlackTaskRepository:
[ "def slack_task_collection_repository(self) -> SlackTaskCollectionRepository:", "def inbox_task_repository(self) -> InboxTaskRepository:", "def email_task_repository(self) -> EmailTaskRepository:", "def project_repository(self) -> ProjectRepository:", "async def repo(self, ctx, repository):\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The email task collection repository.
def email_task_collection_repository(self) -> EmailTaskCollectionRepository:
[ "def email_task_repository(self) -> EmailTaskRepository:", "def inbox_task_collection_repository(self) -> InboxTaskCollectionRepository:", "def slack_task_collection_repository(self) -> SlackTaskCollectionRepository:", "def inbox_task_repository(self) -> InboxTaskRepository:", "def project_collection_reposi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The email task repository.
def email_task_repository(self) -> EmailTaskRepository:
[ "def email_task_collection_repository(self) -> EmailTaskCollectionRepository:", "def inbox_task_repository(self) -> InboxTaskRepository:", "def inbox_task_collection_repository(self) -> InboxTaskCollectionRepository:", "def slack_task_repository(self) -> SlackTaskRepository:", "def slack_task_collection_rep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The fast info repository.
def fast_into_repository(self) -> FastInfoRepository:
[ "def _get_repo_info(self):\n repo: git.Repo = git.Repo(h4dconfig.ROOT_DIR)\n info = GitInfo(\n repo.active_branch.name, repo.active_branch.commit.hexsha, repo.is_dirty()\n )\n if info.is_dirty:\n repo.git.reset()\n with open(self.diff_dir / \"working_dir....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a processingInstruction event. Differs from the overridden method by writing the tag with no "?" at the end.
def processing_instruction(self, target, data): if self._element_name: self.write_ascii('>') self._element_name = None self.write_ascii('<?') self.write_encode(target, 'processing-instruction target') if data: self.write_ascii(' ') self.wr...
[ "def handle_starttag(self, tag, attrs) -> None:\n if tag in self.keeptags:\n self.textdata += f'<{tag}>'", "def _process_event(self, operation, event):\n\n event_type, data, pos = event\n if event_type == START:\n tag, attrs = data\n\n # check how these tag sh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory method to instantiate a NotCompetent_byRoles
def manage_addNotCompetent_byRoles(self, id, title='', REQUEST=None): obj = NotCompetent_byRoles(id, title=title) self._setObject(id, obj) if REQUEST is not None: qs = 'manage_tabs_message=NotCompetent_byRoles+added.' my_url = self.absolute_url() REQUEST['RESPONSE'].redirect(f'{my_u...
[ "def test_not_role(self):\n BonitaUser.find_by_role(role='coucou')", "def init_role(role): # -> None:\n ...", "def require_role(role):\n\n def make_wrapper(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if not role in g.roles:\n raise Forbidden('RBAC For...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace unknown cells with an array of possible choices.
def populateUnknown(self): choices = [1, 2, 3, 4, 5, 6, 7, 8, 9] for row in range(9): for column in range(9): if not self.isSet(row, column): self.grid[row][column] = list(choices) self.initial += 1 return 0
[ "def call_select_on_initial_values(\n grid: np.ndarray, candidate_per_constraint, constraint_map_per_cell\n):\n for (row_index, col_index), cell_value in np.ndenumerate(grid):\n if cell_value == 0:\n continue\n select(\n candidate_per_constraint,\n constraint_map...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test full metaanalysis stream.
def test_meta_analysis(self): pass
[ "def test_musicals_get(self):\n pass", "def test_read_from_stream(self):\n aavf = parser.Reader(open(SAMPLE_FILE, \"r\")).read_records()\n record_list = [record for record in aavf]\n\n assert isinstance(aavf, AAVF)\n\n assert aavf.metadata.get(\"fileformat\") == \"AAVFv1.0\", \\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set a key in the database to value.
async def set(self, key: str, value: str) -> None: async with aiohttp.ClientSession() as session: async with session.post(self.db_url, data={key: value}) as response: response.raise_for_status()
[ "def direct_set(self, key: str, value):\n set_store_value(self.store, key, value)", "def __setitem__(self, key, value):\n query = self.store.update().where(self.store.c.key == key).values(value=value)\n result = self.conn.execute(query)\n if result.rowcount == 0:\n query = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load this history object from the model_dir
def load(cls, model_dir) -> 'History': path = os.path.join(model_dir, 'history.json') hist = load_json(path) return cls(*hist)
[ "def load_model(self):\n filename = filedialog.askopenfilename()\n if filename:\n self.model_path = filename\n self.reload()", "def load_model(self):\n raise NotImplementedError", "def load_model(self, path):\n with open(path, \"rb\") as in_file:\n se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
the last epoch in the complete history
def last_epoch(self) -> int: if len(self): return self[-1].epoch return -1
[ "def get_last_epoch(self):\n return self.buffer[:, -(self.EPOCH_LENGTH * self.fs):]", "def epoch(self):\n\n return self._last_epoch", "def get_current_epoch(exp_path):\n last_save = os.listdir(exp_path)\n last_save = [int(''.join(c for c in name if c.isdigit()))\n for name in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
save this history to a file history.json in ``model_dir```.
def save(self, model_dir: str): path = os.path.join(model_dir, 'history.json') save_json(self._history, path)
[ "def save_history_model(history, filename, storage_location):\n \n # Pickle the result\n location = os.path.join(storage_location, filename + '.pkl')\n \n with open(location, 'wb') as output_file:\n pickle.dump(history, output_file)", "def write_history(self, history):\n\n logging.deb...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets up the data matrix (number of samples x 6 columns) and the `analysis_settings` dictionary with algorithm parameters then (by default) starts the importance sampler routine. This is the driver routine that you will use to load your data matrix and also set parameters for the curvefitting procedure. Running the func...
def run_importance_sampler(analysis_settings=None, run_sampler=True): if analysis_settings is None: # Populating the analysis_settings struct with algorithm settings analysis_settings = {} analysis_settings['working_dir'] = '.' # specifies the root subdirectory to find data and store resul...
[ "def setup(self):\n # Convert initial params into matlab array\n self.initial_params_mat = matlab.double(list(self.initial_params))\n self.eng.workspace['x_data'] = matlab.double(self.data_x.tolist())\n self.eng.workspace['y_data'] = matlab.double(self.data_y.tolist())\n\n self.en...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get data from textbox
def get_text(self): data = self.txtbox.get(1.0, END) print(data)
[ "def get_text(self):\n data = self.txtbox.get(1.0, END)\n test = self.txtbox.selection_get()", "def Get_ActiveControl_TextBox_Data(self,SheetName,TextboxName):\n ws = self.wb.Worksheets(SheetName)\n return ws.Shapes(TextboxName).OLEFormat.Object.Object.Value", "def fetch_user_input(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save data to a file
def save_data(self, data): file = self.get_file() with open(file, "w") as f: f.write(data)
[ "def write_to_file(self, data):", "def save_data_to_file(file, data):\n msg = \"Saving data to file {}...\".format(file)\n LOGGER.info(msg)\n COLORED_PRINT(msg)\n os.makedirs(os.path.dirname(file), exist_ok=True)\n with open(file, \"a+\") as f:\n f.write(data)", "def write_file(self, data)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a square object based on how good the player is playing. points number of points given to the player if the square is hit. x, y position of the center of the square. a length of the side of the square. vx, vy velocity of the square. color color of the square. image image of the square.
def __init__(self, screen, player_points): self.points = random.randint(50, 100) + int((player_points / 2) ** 0.5) self.x = random.randint(int(W_WIDTH * 0.1), int(W_WIDTH * 0.9)) self.y = random.randint(int(W_HEIGHT * 0.1), int(W_HEIGHT * 0.9)) self.a = int(2 * (W_WIDTH + W_HEIGHT) / s...
[ "def __init__(self, screen, player_points):\n self.points = random.randint(20, 50) + int((player_points / 5))\n\n self.x = random.randint(int(W_WIDTH * 0.1), int(W_WIDTH * 0.9))\n self.y = random.randint(int(W_HEIGHT * 0.1), int(W_HEIGHT * 0.9))\n\n self.r = int((W_WIDTH + W_HEIGHT) / se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a ball object based on how good the player is playing. points number of points given to the player if the ball is hit. x, y position of the center of the ball. r radius of the ball. vx, vy velocity of the ball. color color of the ball. image image of the ball.
def __init__(self, screen, player_points): self.points = random.randint(20, 50) + int((player_points / 5)) self.x = random.randint(int(W_WIDTH * 0.1), int(W_WIDTH * 0.9)) self.y = random.randint(int(W_HEIGHT * 0.1), int(W_HEIGHT * 0.9)) self.r = int((W_WIDTH + W_HEIGHT) / self.points) ...
[ "def __draw_objects(self, img):\n if self.ball:\n (x, y), radius = self.ball\n cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 0), 2)\n cv2.putText(img, \"BALL\", (int(x)+15, int(y)-15), cv2.FONT_ITALIC, 0.6, (0, 0, 255, 255), 2)\n for goal in [self.goal_y, sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes points, scored in this game, to file data.txt.
def write_to_file(points): output = open("data.txt", 'a') print(points, file=output) output.close()
[ "def write_score(self):\r\n file = open('scores.txt', 'a')\r\n row = \"{}: {}\\n\".format(self.name, self.score)\r\n file.write(row)", "def update_scores(self) -> None:\n with open('highscores.txt', 'w') as f:\n for user, points in self.scores:\n f.write(f'{us...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flush the endpoint queue into the given backend queues. This method should assign each query in the endpoint_queue to a backend in the backend_queues. Queries are assigned by popping them from the endpoint queue and pushing them onto a backend queue. The method must also return a set of all backend tags so that the cal...
def flush(self, endpoint_queue, backend_queues): assigned_backends = set() return assigned_backends
[ "def flush_signal_queue(self, apps, options):\r\n from django.conf import settings\r\n from signalqueue import SQ_RUNMODES as runmodes\r\n from signalqueue.worker import backends\r\n \r\n queue_name = options.get('queue_name')\r\n queues = backends.ConnectionHandler(setting...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to retrieve information about the closest ndt server
def get_ndt_server(): mlabns=urllib2.urlopen('http://mlab-ns.appspot.com/ndt').read() #returns a JSON object referring to the closest mlab server server = json.loads(mlabns)['fqdn'].encode('ascii') return server
[ "def find_best_server(self):\n pass", "def server_information(self):", "def find_next_server(self,server):\n index = self.server_table.index(server)\n for server_index in range(index,len(self.server_table)):\n if(self.server_table[server_index].ip != '-1'):\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that takes in NDT output and returns whether or not that run was successfully executed.
def ndt_success(ndt_output): lower_output= ndt_output.lower() if "fail" in lower_output or "done" not in lower_output: return False return True
[ "def command_successful(args):\n return_code, _ = run_command(args)\n return return_code == 0", "def check_data_on_vm(vm_name, command_to_run, expected_output):\n _, out, _ = get_vm_data(vm_name, command_to_run)\n return expected_output in out", "def check_run_script_stdout(self, tc):\n isdif...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that runs ndt on the client. Creates a log file 'client.log' and appends to the testID log file for today.
def run_ndt (): print "Running NDT test." ndt_server = get_ndt_server() ndt_testID= create_testID() print "Client "+str(clientID)+": Running ndt test at "+ time.strftime("%x,%H:%M:%S") print "Test id: "+ ndt_testID web100path= configPath+"web100clt" test_output = subprocess.Popen([ web100pa...
[ "def start_new_testLog():\n\n open(clientPath+\"yesterdays_testIDs.log\", 'w').close()\n shutil.copyfile(clientPath+\"todays_testIDs.log\", clientPath+\"yesterdays_testIDs.log\")\n \n today= open(clientPath+\"todays_testIDs.log\", 'w')\n today.write(time.strftime(\"%m/%d/%Y\")+\"\\n\")\n today.close()",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that enters one test time into the scheduler. Checks to make sure that the event is in the future
def schedule_one_task(start_time, function): now= time.localtime() if start_time > now: governor.enterabs(time.mktime(start_time), 1, function, ())
[ "def _run(self, upper, updating_until):\n \n # this is the main event loop of the simulator!\n while len(self._eventlist) > 0:\n t = self._eventlist.get_min()\n if t >= upper: break\n self._process_one_event()\n\n # after all the events, make sure we don'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that enters all test times for one day into the scheduler
def scheduleTests(schedule): for task in schedule: schedule_one_task(task, run_ndt)
[ "def run(self):\n schedule.every().day.at(\"13:02\").do(self.fn)\n while True:\n schedule.run_pending()\n time.sleep(1)", "def test_get_schedule(self):\n pass", "def scheduled(self, scheduler):", "def test_create_schedule(self):\n pass", "def test_50_calenda...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }