query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Marshal functions for a python primitive. This is the base case for our recursive function.
def primitive_marshal_funs(type_ref): assert type_ref.type_ in python_primitives return ('identity', 'identity')
[ "def addPrimitive(primitive):", "def native(self) -> Structure:", "def RuntimeType(self) -> _n_0_t_0:", "def encode_primitive(self, validator, value):\n # type: (bv.Primitive, typing.Any) -> typing.Any\n raise NotImplementedError", "def get_pack_py_def(self, indent=4, level=0):\n self.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the marshal functions for a map type_ref. These may contain many layers of nested function calls,
def map_marshal_funs(type_ref): assert type_ref.type_ == 'Map' type_params_dict = dict(type_ref.type_params) key_type_ref = type_params_dict['Key'] #key_marshal, key_unmarshal = type_ref_marshal_funs(key_type_ref) # SPECIAL TREATMENTFOR KEYS assert key_type_ref.type_ == 'string' key_marsha...
[ "def primitive_marshal_funs(type_ref):\n assert type_ref.type_ in python_primitives\n return ('identity', 'identity')", "def list_marshal_funs(type_ref):\n assert type_ref.type_ == 'List'\n \n item_type_ref = dict(type_ref.type_params)['Item']\n item_marshal, item_unmarshal = type_ref_marshal_fu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the marshal functions for a list data type.
def list_marshal_funs(type_ref): assert type_ref.type_ == 'List' item_type_ref = dict(type_ref.type_params)['Item'] item_marshal, item_unmarshal = type_ref_marshal_funs(item_type_ref) template = 'transform_list(%s)' marshal_fun = template % item_marshal unmarshal_fun = template % item_unma...
[ "def get_function_record_types(self):\n return # osid.type.TypeList", "def get_function_search_record_types(self):\n return # osid.type.TypeList", "def list_multiple_data_types():\n return [93, 77, 'fiftyfive', 54, 44, 31, 26, 20, 17, 3]", "def getListItemTypes(self, *args) -> \"SoTypeList ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
inserting numbers neighboring the insects in the raw_array with only insect data
def populateWithNumbers(self, raw_array, scaling): for y in range(self.height): for x in range(self.width): if raw_array[y][x] == -1: for i in range(-1, 2): for j in range(-1, 2): # checking bounds ...
[ "def contiguous_pack2(arr, startAt=0):\n unqItms = _N.unique(arr) # 5 uniq items\n nUnqItms= unqItms.shape[0] # \n\n contg = _N.arange(0, len(unqItms)) + unqItms[0]\n nei = _N.where(unqItms > contg)[0]\n for i in xrange(len(nei)):\n arr[_N.where(arr == unqItms[nei[i]])[0]] = contg[ne...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
inserting canvas to tiles from final raw_array data
def addCanvasToTiles(self, raw_array, scaling): self.tile_array = [] self.insects_left = 0 for y in range(self.height): tmp = [] # generator returning random insect name from insect_list used for inserting appropriate canvas insect_generator = (random.choice(s...
[ "def tile_canvas(self):\r\n if not self._tile_canvas:\r\n\r\n # make blank tile_canvas\r\n self._tile_canvas = Image.new(\"RGBA\", (\r\n (np.ptp(self._X) + 1) * TILE_SIZE,\r\n (np.ptp(self._Y) + 1) * TILE_SIZE)) # (x,y) peak to peak = number of tiles * TIL...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
binding events to every tile on the board
def bindBoardEvents(self): assert self.tile_array for x in self.tile_array: for y in x: y.tile.bind("<Button-1>", lambda _, y=y: self.uncoverCheck(y)) if DEBUG_FLAG: y.tile.bind("<Button-2>", lambda _, y=y: self.uncoverAll()) ...
[ "def handleClick(self, event):\n print(str(event.x) + ' ' + str(event.y))\n for listener in self.listeners:\n listener.handleClick(self.coordsToGrid(event.x, event.y))", "def _handle_tile_state_click(self, x, y, button):\n add = (button == sdl2.SDL_BUTTON_LEFT)\n tile = self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if there are any adjacent clear tiles and uncovering them
def uncoverClear(self, tile, insect_check=False): stack = [tile] visited = set() while len(stack) > 0: tmp_tile = stack.pop() if tmp_tile.tile_name == "tile_clear" and tmp_tile not in visited: for i in range(-1, 2): for j in range(-1, 2...
[ "def find_clearing_to_land():\n # Find a place on the lower half of the screen where there is no identifiable objects\n # Move closer... check again... repeat till height is near 0\n # land and power down\n pass", "def get_adjacent_tiles(self):\n return list(set(self.corner_right.get_tiles()) & set...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make an example for training and testing. Outputs a tuple (label, features) where label is +1 if capital letters are the majority, and 1 otherwise; and features is a list of letters.
def get_example(): features = random.sample(string.ascii_letters, NUM_SAMPLES) num_capitalized = len([ letter for letter in features if letter in string.ascii_uppercase ]) num_lowercase = len([ letter for letter in features if letter in string.ascii_lowercase ]) if num_capitalized > num_lowercase: ...
[ "def make_example(sequence, label):\n ex = tf.train.SequenceExample()\n # Context: sequence length and label\n ex.context.feature[LEN_FEAT_NAME].int64_list.value.append(len(sequence))\n ex.context.feature[LABEL_FEAT_NAME].float_list.value.append(label)\n\n # Feature lists: words\n fl_tokens = ex.f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resample dataset so that the result contains the same number of lines per category in categ_column.
def rebalance_by_categorical(dataset: pd.DataFrame, categ_column: str, max_lines_by_categ: int = None, seed: int = 1) -> pd.DataFrame: categs = dataset[categ_column].value_counts().to_dict() max_lines_by_categ = max_lines_by_categ if max_lines_by_categ else min(categs.values()) ...
[ "def preprocess(df, freq, horiz):\n\n # resample demand to `freq`\n if \"category\" not in df:\n df.rename({\"family\": \"category\"}, axis=1, inplace=True)\n\n df.loc[:,\"timestamp\"] = pd.DatetimeIndex(df[\"timestamp\"])\n\n df2 = resample(df, freq, horiz)\n\n df2[\"t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes two brackets, one from (x11, y1) to (x12, y1) and the second from (x21, y2) to (x22, y2), and connects them with a line (with given arrow style)
def make_range_connection_bracket(x11: float, x12: float, x21: float, x22: float, y1: float, y2: float, arrow_style: m_patches.ArrowStyle, color: str, opacity: float = 1., linewidth: float = 1.): middle_1 = (x11 + x21) / 2 middle_2 = (x12 + x22) / 2 y11 = y1 + 1 y21 = y...
[ "def arrow(x0,x1,y0,y1,col=\"Black\"):\n ob = lines([(x0,y0),(x1,y1)],col)\n ob[\"arrows\"] = quoteString(\"to\")\n return ob", "def _draw_arrow(self, x1, y1, x2, y2, tag=None):\n line = self.canvas.create_line((x1, y1), (x2, y2))\n poly = self.canvas.create_polygon((x2-13, y2+5),\\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this test that no matter what the output is, the result is fit with the dns1123 validation regex
def test_clean_name_dns1123(self, input_value, func, max_size): result = func(input_value) # this is a regex used by k8s to validate the right name for dns1123 assert re.match(r"(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?", result) assert len(result) <= max_size
[ "def test_ip_adress(result):\n\n assert re.match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.)'\n r'{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$',\n result.json()['query']), \\\n \"The value of a 'query' field is not correct IP address.\"", "def test_sanitize_j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override to ensure that the ``choices`` argument is a ``Choices`` object.
def __init__(self, choices, *args, **kwargs): super(NamedExtendedChoiceFormField, self).__init__(*args, **kwargs) if not isinstance(choices, Choices): raise ValueError("`choices` must be an instance of `extended_choices.Choices`.") self.choices = choices
[ "def assert_choices(self, choices):\n self.assert_in_help('choices: %s' % choices)", "def _validate_choices(self, value):\n # Check choices if passed\n if self.choices:\n if value not in self.choices:\n raise ValidationError(\n 'Value {} is restric...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(int) > int Return the number of happy numbers that are in the range of n >>> count_happy_numbers(20002) 5 >>> count_happy_numbers(120003) 729
def count_happy_numbers(n): all_happy = 0 if len(str(n)) > 4: for i in range(1, n + 1): if happy_number(i) is True: all_happy += 1 return all_happy
[ "def happy_numbers(m, n):\n lst_of_happy_in_range = []\n lst_of_happy_in_range = [int(i) for i in range(m, n + 1)\n if happy_number(i) == True]\n return lst_of_happy_in_range", "def find_happy(N):\n return [i for i in range(N) if is_happy(i)]", "def uglyNumbers(n):\n\tdef ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(int, int) > list Return all happy numbers from range m to n >>> happy_numbers(100, 20002) [10001, 10010, 10100, 11000, 20002] >>> happy_numbers(20002, 29002) [20002, 20011, 20020, 20101, 20110, 20200, 21001, 21010, 21100, 22000]
def happy_numbers(m, n): lst_of_happy_in_range = [] lst_of_happy_in_range = [int(i) for i in range(m, n + 1) if happy_number(i) == True] return lst_of_happy_in_range
[ "def generate_integers(m, n):\n return list(range(m,n+1))", "def find_happy(N):\n return [i for i in range(N) if is_happy(i)]", "def getNumberList(n):\n\tresult = []\n\ti = 0\n\twhile i < n:\n\t\tresult.append(i)\n\t\ti += 1\n\treturn result", "def intlist(n):\n list1 = []\n while n:\n list...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch a sensitive value from different sources.
def convert_sensitive(value): if value is None: # Not found return None # Short-circuit in case the value starts with value:// (ie, it is enforced) if value.startswith('value://'): return value[8:] if value.startswith('env://'): envvar = value[6:] LOG.debug('Loading va...
[ "def get_value(self, **kwargs):\n return self.source_from(self, **kwargs)", "def test_core_get_stored_value_v1(self):\n pass", "def _retrieve_value(self, data, value):\n logging.info('Getting value for {}'.format(value))\n retrieve_data = []\n m_data = DotMap(data)\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to load `filename` as configuration file for logging.
def _load_log(self, filename): assert(filename) _here = Path(__file__).parent # Try first if it is a default logger _logger = _here / f'loggers/{filename}.yaml' if _logger.exists(): with open(_logger, 'r') as stream: dictConfig(yaml.load(stream, Loade...
[ "def load_config_file():\n\n try:\n return try_load_config()\n except FileNotFoundError:\n logging.error(\"Config file does not exist\")\n raise", "def logging_file_config(self, config_file):\n parser = configparser.ConfigParser()\n parser.read([config_file])\n if p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reverses an index, if needed
def _rev(self, idx: int) -> int: if self.order_reversed: if idx < 0: idx = -idx - 1 else: idx = len(self._view) - idx - 1 if idx < 0: raise IndexError return idx
[ "def reverse_idx(self):\n self.reverseIdx = {i : word for word, i in self.wordIdx.items()}\n return True", "def reverse(self, in_place=False):\n pass", "def reverse_dataframe(df):\n return df[::-1].reset_index(drop=True)", "def fix_reverse_index(self):\n tree = etree.HTML(self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Go to a specified offset. Positive offests are from the beginning of the view, negative from the end of the view, so that 0 is the first flow, 1 is the last flow.
def go(self, offset: int) -> None: if len(self) == 0: return if offset < 0: offset = len(self) + offset if offset < 0: offset = 0 if offset > len(self) - 1: offset = len(self) - 1 self.focus.flow = self[offset]
[ "def seek(self, seek_pos=0.0):\n self.sendmessage('JUMP ' + str(seek_pos) + 's')", "def jmp(self, offset):\n self.ip += int(offset)", "def seek(self, offset):\n self._filelike.seek(offset)", "def jump_to_page(self, page: int) -> None:\n overlap = 1 if self.column >= 2 else 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set focus to the next flow.
def focus_next(self) -> None: if self.focus.index is not None: idx = self.focus.index + 1 if self.inbounds(idx): self.focus.flow = self[idx] else: pass
[ "def focus_next(self, window: Optional[wrappers.Window] = None) -> None:\n pass", "def focus(self):\n self.node.focus()", "def setFocus():\n pass", "def set_focus(self):\n self.logger.info(f\"Set focus on element: {self.selectors}\")\n self.element.set_focus()", "def set_focus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set focus to the previous flow.
def focus_prev(self) -> None: if self.focus.index is not None: idx = self.focus.index - 1 if self.inbounds(idx): self.focus.flow = self[idx] else: pass
[ "def focus_prev(self, window: Optional[wrappers.Window] = None) -> None:\n pass", "def setFocus():\n pass", "def focus(self):\n self.node.focus()", "def restore_previous_tab(self):\n\n if self._previous_tab:\n if not self.set_current_tab(self._previous_tab):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the current view order.
def set_order(self, order_key: str) -> None: if order_key not in self.orders: raise exceptions.CommandError( "Unknown flow order: %s" % order_key ) order_key = self.orders[order_key] self.order_key = order_key newview = sortedcontainers.SortedListW...
[ "def set_render_order(self, order):\n self._set_render_order(order)", "def set_module_order(self, order):\n with self.order_lock:\n self.module_order.set(order)\n self._listeners.notify(\"order\")\n self._listeners.notify(\"dependency\")", "def defineSlideOrder(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clears both the store and view.
def clear(self) -> None: self._store.clear() self._view.clear() self.sig_view_refresh.send(self) self.sig_store_refresh.send(self)
[ "def clear(self):\n self.collection.clear()", "def clear(self):\n self.models = {}\n self.model_ids = []", "def clear (self):\n for object in self._objects[:]:\n object.destroy ()\n self._objects = []", "def clear(self):\n self.msg_store = ''", "def clear...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clears only the unmarked flows.
def clear_not_marked(self) -> None: for flow in self._store.copy().values(): if not flow.marked: self._store.pop(flow.id) self._refilter() self.sig_store_refresh.send(self)
[ "def remove(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:\n for f in flows:\n if f.id in self._store:\n if f.killable:\n f.kill()\n if f in self._view:\n # We manually pass the index here because multiple flows may h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a value from the settings store for the specified flow.
def getvalue(self, flow: mitmproxy.flow.Flow, key: str, default: str) -> str: return self.settings[flow].get(key, default)
[ "def __get_value_from_datastore(name):\n # type: (str) -> str\n setting = GaeEnvSettings.query(\n GaeEnvSettings.name == str(name)).get() # type: GaeEnvSettings\n if not setting:\n return None\n return setting.value # type: str", "def get_value(self, key):\n if self.settings.has...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set a value in the settings store for the specified flows.
def setvalue( self, flows: typing.Sequence[mitmproxy.flow.Flow], key: str, value: str ) -> None: updated = [] for f in flows: self.settings[f][key] = value updated.append(f) ctx.master.addons.trigger("update", updated)
[ "def set(ctx, setting, value):\n ctx.obj.config.set(setting, value)\n ctx.obj.config.save()", "def set(name, value):\n Configuration.settings[name] = value", "def set(\n ctx: Context, type: str, encrypted: bool, setting: str, value: str = None\n ):\n cli_context: CliCon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Duplicates the specified flows, and sets the focus to the first duplicate.
def duplicate(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None: dups = [f.copy() for f in flows] if dups: self.add(dups) self.focus.flow = dups[0] ctx.log.alert("Duplicated %s flows" % len(dups))
[ "def update(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:\n for f in flows:\n if f.id in self._store:\n if self.filter(f):\n if f not in self._view:\n self._base_add(f)\n if self.focus_follow:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes the flow from the underlying store and the view.
def remove(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None: for f in flows: if f.id in self._store: if f.killable: f.kill() if f in self._view: # We manually pass the index here because multiple flows may have the sam...
[ "def clear(self) -> None:\n self._store.clear()\n self._view.clear()\n self.sig_view_refresh.send(self)\n self.sig_store_refresh.send(self)", "def teardown_with(self, flow):\n from x2py.flow import Flow\n backup = Flow.thread_local.current\n Flow.thread_local.curre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolve a flow list specification to an actual list of flows.
def resolve(self, flow_spec: str) -> typing.Sequence[mitmproxy.flow.Flow]: if flow_spec == "@all": return [i for i in self._store.values()] if flow_spec == "@focus": return [self.focus.flow] if self.focus.flow else [] elif flow_spec == "@shown": return [i for ...
[ "def get_flow_list(self):\n if (self.host_url != None) and (self.flow_id != None):\n url = self.host_url + 'flow/' + str(self.flow_id)\n\n # get flow list\n try:\n result = requests.get(url) # GET\n\n except Exception as err:\n self.f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get flow with the given id from the store. Returns None if the flow is not found.
def get_by_id(self, flow_id: str) -> typing.Optional[mitmproxy.flow.Flow]: return self._store.get(flow_id)
[ "def getFlow(self, **kwargs):\n\n allParams = ['id']\n\n params = locals()\n for (key, val) in list(params['kwargs'].items()):\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method getFlow\" % key)\n params[key] = v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if view is in marked mode.
def get_marked(self) -> bool: return self.show_marked
[ "def is_marked(markable):\n return bool(getattr(markable, _ATTR_DATA_MARKINGS, False))", "def has_mark(self):", "def is_marked(self,flag=None): \n if flag is None:\n # No flag -> check whether set is empty\n if self._flags:\n return True\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle whether to show marked views only.
def toggle_marked(self) -> None: self.show_marked = not self.show_marked self._refilter()
[ "def toggle_view(self):\n self.emit(SIGNAL(\"toggle_view_div\"))", "def get_marked(self) -> bool:\n return self.show_marked", "def mark(self):\n\n self.is_marked = True\n self.show()", "def toggle(self):", "def toggle_unread_only(self):\n was_unread_only = self.unread_only...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is this 0 <= index < len(self)?
def inbounds(self, index: int) -> bool: return 0 <= index < len(self)
[ "def verify_index(self, index, a_list):\n return 0 <= index < len(a_list)", "def indexists(list_input, index: int) -> bool:\r\n return index <= len(list_input) - 1", "def contains_index(self, index):\n return self.point_def.index <= index <= self.point_def.array_last_index", "def is_on_bounda...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates a list of flows. If flow is not in the state, it's ignored.
def update(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None: for f in flows: if f.id in self._store: if self.filter(f): if f not in self._view: self._base_add(f) if self.focus_follow: ...
[ "def _update_flows(self):\n ts = self.asbce.lastflows_timestamp\n fd = self.asbce.flows()\n for tag in (k for k, v in self.tracker.db.iteritems()\n if v.status in (\"ONCALL\", \"ONHOLD\")):\n try:\n d = {}\n for k in self.tracker.db[ta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show the main page with all the posts. The index uses pagination, 5 posts per page. request The request from the client. Return the base.html page if the page exists otherwise returns 400 error page.
def index(request): tmp_posts = Post.objects.order_by("-pub_date") posts_page = Paginator(tmp_posts, 5) # Default to page one when none is given page = request.GET.get("page", 1) try: posts = posts_page.page(page) except EmptyPage: return render( request, ...
[ "def get(self):\n posts = Post.query()\n self.render('post-index.html',\n posts=posts,\n user=self.user)", "def home(): #changed from redirect to 5 most recent posts. \n posts = Post.query.order_by(Post.created_at.desc()).limit(5).all() #pulls posts from DB, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for posts according to what was given. The search will be based if the header contains some or all the text of what was given. The search uses pagination, 5 Posts per page. If the given page parameter returns an EmptyPage exception an error page will be given. request The request from the client. Return the base...
def search(request): tmp_posts = Post.objects.order_by("-pub_date").filter(header__contains=request.GET["header"]) posts_page = Paginator(tmp_posts, 5) # Default to page one when none is given page = request.GET.get("page", 1) try: posts = posts_page.page(page) except EmptyPage: ...
[ "def search():\n if not g.search_form.validate():\n return redirect(url_for('main.explore'))\n \n page = request.args.get('page', 1, type=int)\n posts, total = Post.search(g.search_form.q.data, page, \n current_app.config['POSTS_PER_PAGE'])\n \n next_url = url_for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show the post content when the request is a GET. If a POST is given, it will try to save a comment for the POST, according to the values passed. request The request from the client. str The post header. The return will always be the template file "detail.html".
def show_post(request, str): # It's guaranteed to always have a unique header. post = get_object_or_404(Post, header=str) comments = post.comment_set.order_by("-comment_date") # Save a comment to given post. if (request.method == "POST"): # Because we add a few things at the comment creati...
[ "def get(self, request, pk, *args, **kwargs):\n post = Post.objects.get(pk=pk)\n form = CommentForm()\n comments = Comment.objects.filter(post=post).order_by('-created_on')\n\n context = {\n 'post': post,\n 'form': form,\n 'comments': comments,\n }...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Substract trajectory from current instance.
def __sub__(self, other_traj): return Trajectory(self.modes - other_traj.modes)
[ "def __sub__(self, delta):\n copy = self.__copy__(self)\n copy -= delta\n return copy", "def __sub__(self, other):\n return Wrench(\n self.force - other.force,\n self.torque - other.torque)", "def __sub__(self, other: Vector) -> Vector:\n return Vector(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate (approximate) equality of trajectory and current instance.
def __eq__(self, other_traj, rtol = 1e-5, atol = 1e-8): return np.allclose(self.modes, other_traj.modes, rtol = rtol, atol = atol)
[ "def is_approx_equal(self, other):\n raise NotImplementedError", "def __eq__(self, other):\n if isinstance(other, Solution):\n equalities = np.isclose(self.x, other.x, rtol=Solution.eps, atol=Solution.eps)\n return np.all(equalities)\n else:\n raise InvalidCom...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the element(s) of the modes indexed by the given key.
def __getitem__(self, key): return self.modes[key]
[ "def find(self, key):\n return list(self.iterate(key))", "def elements_in_set(self, key) -> List:\n root = self.find(key)\n return [r for r in self.data if self.find(r) == root]", "def get_list(self, key):\n return self.__settings[key]", "def search(self, key):\n\n symbols=[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the value(s) of the modes indexed by the given key.
def __setitem__(self, key, value): self.modes[key] = value
[ "def csi_set_modes(self, modes, qmark, reset=False):\n flag = not reset\n\n for mode in modes:\n self.set_mode(mode, flag, qmark, reset)", "def __getitem__(self, key):\r\n return self.modes[key]", "def ctx_set(flags: Flags, fdict: FlagDict, key: str, value: Any):\n key = ctx_f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new trajectory with rounded modes.
def __round__(self, decimals = 6): return Trajectory(np.around(self.modes, decimals = decimals))
[ "def get_trajectory(self):\n start_point = [0.3, 0, 0.295]\n end_point = [0.5, -0.2, 0.295] \n\n milestones = np.array([start_point, end_point])\n self.num_waypoints = np.size(milestones, 0)\n\n return trajectory.Trajectory(milestones=milestones)", "def __round__(self, ndigits...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Question function, after user input this function the response and prints the returned result to the then calls the replay function
def question(): input('Ask your question and press the [Enter] button.') answer = response() print('\nAsking the spirits...') for thought in range(3): print('.', end='') time.sleep(1) print("\n{}\n".format(answer)) replay()
[ "def question(self, question):\n if self.use_STT:\n self.say(question)\n response = STT.wait_for_voice()\n else:\n naoqiutils.speak(question)\n response = raw_input(question + \"\\n> \")\n return response", "def ask_question(question) :\n logger....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Response function, which holds a list of possible answers. when called function uses the random module and randomly chooses an answer from the and returns the choice
def response(): response_list = ['Yes', 'No', 'My sources point to yes', 'Maybe', 'The outcome does not look good', "I can't say for sure", "Perhaps", "Don't count on it", "Everything is blurry... Ask again...", "The spirits say... Yes", "The spirits say... No", "Chances ar...
[ "def generateResponse(self, questionText):\n\n if self.randomly:\n return random.choice(self.responses)\n else:\n return self.responses[0]", "def get_sadge_response():\n return random.choice(sadge_list)", "def make_multiple_choice(question, choices, which=1, randomize=True...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Confirm that xz is installed
def test_xz(): lexed = shlex.split("command -v xz") proc = Popen(lexed, stdout=PIPE, stderr=PIPE, shell=True) proc.wait() return bool(proc.returncode == 0)
[ "def test_xrt_installed(host):\n _xrt_installed_only(host)\n assert host.file(\"/opt/xilinx/xrt/bin/xbutil\").exists", "def test_xrt_aws_installed(host):\n _aws_only(host)\n _xrt_installed_only(host)\n assert host.file(\"/opt/xilinx/xrt/bin/awssak\").exists", "def test_haproxy_is_installed(host):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decompress an xz resource
def decompress_xz(file_name): lexed = shlex.split("xz -d \"%s\"" % file_name) proc = Popen(lexed, stdout=PIPE, stderr=PIPE, shell=False) proc.wait() return proc.returncode
[ "def RunDecompress(args):\n compressed_apex_fp = args.input\n decompressed_apex_fp = args.output\n return decompress(compressed_apex_fp, decompressed_apex_fp)", "def __decompress_archive(self):\n self.decompress_path = self.cwd.joinpath(PathVariables.SRC__DECOMPRESSED)\n self.log.debug(\"decompre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of files in ZIP matching pattern
def get_files_in_zip(zip_file, pattern): file_list = list() lexed = shlex.split("unzip -t \"%s\" \"%s\"" % (zip_file, pattern)) proc = Popen(lexed, stdout=PIPE, stderr=PIPE, shell=False) proc.wait() if proc.returncode != 0: return None for line in proc.stdout.read().split("\n"): ...
[ "def search_zip(fname, pattern):\n matches = []\n zipf = zipfile.ZipFile(fname, 'r')\n for name in zipf.namelist():\n with zipf.open(name) as f:\n for line in f.readlines():\n if match := pattern.search(line):\n matches.append((fname, name, line, match.gr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get BIP39 English wordlist
def get_wordlist(): with open(WORDLIST_FILE) as english: wordlist = english.readlines() return [word.strip() for word in wordlist]
[ "def get_word_list(self)->list:\n return self.word_list", "def get_wordle_list(filename: string) -> string:\n with open(filename, \"r\", encoding=\"utf-8\") as file_handler:\n return file_handler.read()", "def get_possible_words(self) -> list:\n raise NotImplemented", "def get_wordlist...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert binary string to hex string. If the binstring provided is not length mod 4, 0 left padding is assumed.
def bin2hex(binstring): if not isinstance(binstring, basestring): raise ValueError #return n_bits / 8 if n_bits % 8 == 0 else (n_bits / 8) + 1 n_bits = len(binstring) hexlen = n_bits / 4 if n_bits % 4 == 0 else (n_bits / 4) + 1 hex_str = hex(int(binstring, 2))[2:].zfill(hexlen) #remove leadi...
[ "def binary_string_to_hex(self,binary_string):\n length = len(binary_string)\n if length%4 != 0:\n raise Exception('Length of binary ({}) not divisible by 4. Cannot convert to hex.'.format(length))\n else:\n hex_string = ''\n for i in range(0,length-1,4): ## beg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute BIP39 checksum from entropy expressed as binary string
def checksum(entropy_binstring): hasher = hashlib.sha256() data = decode_binary_string(entropy_binstring) hasher.update(data) checksum_hex = hasher.hexdigest() checksum_bin = hex2bin(checksum_hex) ent = len(entropy_binstring) / ENT_MOD return checksum_bin[0:ent]
[ "def bin_checksum(s):\n return bin_sha256(bin_sha256(s))[:4]", "def compute_checksum(bin_msg):\n assert len(bin_msg) > 0\n cksum = 0\n for b in bin_msg:\n cksum += b\n return cksum % 256", "def Checksum(cls, string):\n # Get the last 10 bits\n c = crc32(string.encode('utf-8')) & (2 *...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obtain indices in wordlist from binary string
def binstring2word_index(binstring): indices = [int( #interpret chunk as binary string and covert to int binstring[i*WORDLIST_PIECE_BITS: #take chunk of 11 bits (i+1)*WORDLIST_PIECE_BITS], 2) for i in range(len(binstring)//WORDLIST_PIECE_BITS)] return indices
[ "def get_indices(mnemonic):\n if len(mnemonic) == 0:\n raise ValueError\n return [get_index_from_word(word) for word in mnemonic.split()]", "def words_to_indices(self, sentence):\n\t\tindices = []\n\t\tif self.bos:\n\t\t\tindices.append(2)\n\t\tfor word in sentence:\n\t\t\tif word in self.worddict:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obtain 11bit string from word index in [0, 2047]
def word_index2binstring(index): if index < 0 or index > 2047: raise WordNotDefinedAtIndexError() return dec2bin(index, zero_padding=11)
[ "def findId(n):\n if type(n) != str:\n return None\n indexes = {}\n for i in xrange(len(n)):\n if n[i].isalpha():\n indexes[i] = n[i]\n size = len(indexes.keys())\n numPossible = 2**size\n wordBuilder = [i for i in n]\n \n for i in xrange(numPossible):\n binar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the BIP39 word from the English wordlist at specified 0based index
def get_word_from_index(index): if index < 0 or index > 2047: raise WordNotDefinedAtIndexError() return get_wordlist()[index]
[ "def word_index2binstring(index):\n if index < 0 or index > 2047:\n raise WordNotDefinedAtIndexError()\n return dec2bin(index, zero_padding=11)", "def get_word(self):\n # Todo get a list of words fron somewhere\n pass", "def getindex(wordletter):\n newindexvalue = (alphabet.index(w...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the 0based index of a word in English wordlist
def get_index_from_word(word, wordlist=None): if wordlist is None: wordlist = get_wordlist() for index, word_comp in enumerate(wordlist): if word_comp == word: return index raise InvalidWordError()
[ "def getindex(wordletter):\n newindexvalue = (alphabet.index(wordletter))\n return newindexvalue", "def listPosition(word):\n return word_order(word)", "def get_word_index(self, word):\n if self.contain(word):\n return self.dict[word]\n else:\n raise ValueError('Cann...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of word indices, get full mnemonic from English wordlist
def get_mnemonic(indices): if len(indices) == 0: raise ValueError return " ".join([get_word_from_index(index) for index in indices])
[ "def get_indices(mnemonic):\n if len(mnemonic) == 0:\n raise ValueError\n return [get_index_from_word(word) for word in mnemonic.split()]", "def decode_indices(indices, vocabulary):\n\n decoded_tokens = [vocabulary[index] for index in indices]\n return \" \".join(decoded_tokens)", "def idx2wo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a mnemonic sentence, get the word indices for the English wordlist
def get_indices(mnemonic): if len(mnemonic) == 0: raise ValueError return [get_index_from_word(word) for word in mnemonic.split()]
[ "def words_to_indices(self, sentence):\n\t\tindices = []\n\t\tif self.bos:\n\t\t\tindices.append(2)\n\t\tfor word in sentence:\n\t\t\tif word in self.worddict:\n\t\t\t\tindices.append(self.worddict[word])\n\t\t\telse:\n\t\t\t\tindices.append(1)\n\t\tif self.eos:\n\t\t\tindices.append(3)\n\t\treturn indices", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert complete mnemonic setence to binstring and verify checksum. The returned value will not include the checksum.
def mnemonic2binstring(mnemonic, print_warning=True): if mnemonic == '': raise ValueError binstring = '' wordlist = get_wordlist() for word in mnemonic.split(): index = get_index_from_word(word, wordlist=wordlist) binstring += word_index2binstring(index) if len(binstring) % ...
[ "def mne2bin(mnemonic):\n val = 0\n for ch in mnemonic:\n val = (val << 6) | M2B[ch]\n s = \"\"\n while val > 0:\n s += chr(val & 0xFF)\n val >>= 8\n r = \"'\"\n for ch in s:\n r += \"\\\\x%02X\" % ord(ch)\n r += \"'\"\n return r", "def test_scl_bcc_with_correct...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert raw entropy as binary string (sans checksum) to bip39 mnemonic
def binstring2mnemonic(entropy_bin): checksum_bin = checksum(entropy_bin) combined_bin = "{0}{1}".format(entropy_bin, checksum_bin) indices = binstring2word_index(combined_bin) mnemonic = get_mnemonic(indices) return mnemonic
[ "def mnemonic2binstring(mnemonic, print_warning=True):\n if mnemonic == '':\n raise ValueError\n binstring = ''\n wordlist = get_wordlist()\n for word in mnemonic.split():\n index = get_index_from_word(word, wordlist=wordlist)\n binstring += word_index2binstring(index)\n\n if len...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
take the array of numbers stored in arr and return the string true if any combination of numbers in the array can be added up to equal the largest number
def array_addition(lst): greatest = max(lst) sorted_nums = sorted(lst) without_greatest = sorted_nums[:-1] total_sums = [] idx = 1 while idx < len(without_greatest): perms = list(itertools.permutations(without_greatest, idx)) for perm in perms: if sum(perm) == greatest: return True idx += ...
[ "def descending(array):\n for index in range(array.size() - 1):\n if arr[index] <= arr[index + 1]:\n return False\n\n return True", "def _comb_long(c, nmax):\n if nmax == 0:\n return []\n c = np.asanyarray(c)\n return np.concatenate([c >= o +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves and loads results of function to json.
def save_load_results_to_json(func, print=True): def wrapper(filename, *args, **kwargs): full_path = os.path.join(_path, filename) if os.path.exists(full_path): if print: logging.info("Loading results for %s from %s." % (func.__name__, filename)) with open(fu...
[ "def saveResults(self):\n fname = join(self.seriesOutputDir, 'results.json')\n with open(fname, 'w') as outputFile:\n outputFile.write(json.dumps(self.results))", "def save_rms_data(filename, results):\n with open(filename, \"w\") as f:\n json.dump(results, f)", "def __export_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a syslog to the server. Make sure the port is open though
def send_syslog(string): global SYSLOGSOCK if not SYSLOGSOCK: print("Creating socket to", HOST, PORT) SYSLOGSOCK = socket.socket(socket.AF_INET, socket.SOCK_STREAM) SYSLOGSOCK.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) SYSLOGSOCK.connect((HOST, PORT)) string = stri...
[ "def test3_output_syslog_enable(self):\n cmd = 'python3 -c \"from dnstap_receiver.receiver import start_receiver; start_receiver()\" -c ./tests/dnstap_syslog.conf'\n o = execute_dnstap(cmd)\n \n self.assertRegex(o, b\"Output handler: syslog\")", "def handle_syslog_request(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a bunch of data every few seconds to simulate actual callback stuff
def send_timed(count, interval=30): print("Sending data", count, "times at an interval of", interval, "seconds") for i in range(count): # 50% chance to send 2-5 creds if random.random() < 0.50: for j in range(random.randint(2, 5)): cred = gen_cred() pr...
[ "def main():\n initData()\n\n # Loop to simulate data connections\n # Current set up\n # Once an hour send hsk\n # Every 10 min send hsk\n # Every 5 min send spec and nrbd\n # This can be changed for different cycles\n while 1:\n connection([\"time\",\"spec\",\"nrbd\",\"hsk\"])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overall Allocation Cost Metric
def overall_cost(system, control_input, environment_input): costs = _calc_resource_allocation_cost(system, control_input, environment_input) return sum(costs) if len(costs) > 0 else 0.0
[ "def cost_perf_index(self):\n \n ev = self.apc * self.budget\n \n return ev / self.ac", "def administration_overhead_cost(self, *args, **kwargs):\n result = 0\n for overhead in self.overhead_list:\n result += overhead.administration_overhead_rate\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Maximum Allocation Cost Metric
def max_cost(system, control_input, environment_input): costs = _calc_resource_allocation_cost(system, control_input, environment_input) return max(costs) if len(costs) > 0 else 0.0
[ "def _GetMaximalMetrics(self):\n metrics = list(self._hpc.free_metrics)\n metrics += list(self._hpc.non_free_metrics)[0:self._hpc.max_counters]\n return metrics", "def max_capacity(self) -> jsii.Number:\n return self._values.get(\"max_capacity\")", "def compute_optimum(self):\n assert sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Average Allocation Cost Metric
def avg_cost(system, control_input, environment_input): costs = _calc_resource_allocation_cost(system, control_input, environment_input) return mean(costs) if len(costs) > 0 else 0.0
[ "def compute_average_macs_cost(self):\n\n for m in self.modules():\n m.accumulate_macs = accumulate_macs.__get__(m)\n\n macs_sum = self.accumulate_macs()\n\n for m in self.modules():\n if hasattr(m, 'accumulate_macs'):\n del m.accumulate_macs\n\n return macs_sum / self.__batch_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate resource allocation cost for every application instance placed on nodes
def _calc_resource_allocation_cost(system, control_input, environment_input): costs = [] for app in system.apps: # TODO: calculate only for internal nodes? for node in system.nodes: if not control_input.app_placement[app.id][node.id]: continue nb_instances...
[ "def calculate_capacity_for(m_name, m_pods, m_cpu, m_mem, node_map):\n # print(\n # f\"Checking capacity of metric: {m_name}\\n\"\n # f\" CPU: {m_cpu}\\n\"\n # f\" memory: {m_mem}\\n\"\n # f\" pods: {m_pods}\"\n # )\n\n metric_capacity = 0\n for node in node_map.valu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set default value for location based on selected warehouse
def default_get(self, fields): result = super(SaleOrder, self).default_get(fields) if 'warehouse_id' in result: warehouse_obj = self.env['stock.warehouse'] result['location_id'] = warehouse_obj.browse(result['warehouse_id']).lot_stock_id.id return result
[ "def _onchange_warehouse_location_domain(self):\n\n location_obj = self.env['stock.location']\n location_id = self.warehouse_id.lot_stock_id # main warehouse location\n location_parent = location_id.location_id # location id is parent location n model stock.location\n\n self.location_i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
in case wharehouse change then we need to change location to default location of new selected wharehouse also set domain for child of new selected whrehouse
def _onchange_warehouse_location_domain(self): location_obj = self.env['stock.location'] location_id = self.warehouse_id.lot_stock_id # main warehouse location location_parent = location_id.location_id # location id is parent location n model stock.location self.location_id = locatio...
[ "def choose_new_location(self, new_locations):\n\t\tpass", "def set_adm_location(self):\n match = None\n if self.has_non_empty_attribute(\"municipio\"):\n try_match = utils.q_from_first_wikilink(\"es\", self.municipio)\n link_match = utils.get_item_from_dict_by_key(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read configuration file from config.ini
def read_config(): config = cp.ConfigParser() config.read("config.ini") return config
[ "def _read_config():\n\n import configparser\n import os\n\n basepath = os.getcwd()\n prev = None\n while basepath != prev:\n prev = basepath\n path = os.path.join(basepath, 'uriconfig.ini')\n if os.path.exists(path):\n break\n basepath = os.path.split(basepath)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read command line arguments
def read_args(): parser = argparse.ArgumentParser(description='Taiko data analysis toolkit') parser.add_argument('-f', help='Write frames', action='store_true') return vars(parser.parse_args())
[ "def command_line_arguments():\n return sys.argv", "def main():\n # set up the program to take in arguments from the command line", "def read_args(self):\n cmd = []\n for index in sys.argv:\n cmd = cmd + index.split(\"=\")\n cmd.pop(0)\n\n\n for index , item in enume...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the next auction task if a task with auction time == current time step exists in the unallocated tasks
def _next_auction_task(self, time_step: int) -> Optional[Task]: assert time_step >= 0 if self._unallocated_tasks: assert self._unallocated_tasks[0].auction_time >= time_step, \ f'Top unallocated task auction time {self._unallocated_tasks[0].auction_time} at time step: {time_s...
[ "def get_next_task(self, tasks):\n if len(tasks) == 0:\n return None\n else:\n schedulable =[]\n for task in tasks:\n if task[\"state\"] == \"init\":\n schedulable.append(task)\n elif task[\"state\"] == \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
reads in a CSV file containing 40kb bins. for now, assuming there is only one type of binlist per file
def readBins(filename): reader = csv.reader(open(filename, 'rU')) chr_num = 0 bins = {} for row in reader: if len(row) == 1: if row[0][0:3] == 'chr': chr_num = int(row[0].lstrip('chr')) if len(row) > 1: assert chr_num not in bins.keys() bins[chr_num] = [int(x) for x in row] return bins
[ "def _get_bin_count(self, file):\n bins = 0\n\n with open(file, 'r', encoding = self.encoding) as f:\n for line in f:\n try:\n if float(line.split(',')[0]):\n bins = bins + 1\n except: pass\n\n return bins", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate FHIR conformance resources and validate example FHIR resources against the conformance resources by running the HL7 FHIR implementation guide publisher.
def validate(ig_control_filepath, clear_output, publisher_opts): try: app.validate(ig_control_filepath, clear_output, publisher_opts) except Exception as e: logger.exception(str(e)) logger.info('❌ Validation failed!') exit(1) else: logger.info('✅ Validation succeeded!...
[ "def test_validate_schema_2(self):\n\n # prepare\n validator = EsdlValidator()\n\n # execute, validate against 1 schema\n result = validator.validate(self.esdlHybrid, [self.schemaTwo])\n validationProducer = result.schemas[0].validations[0]\n validationStorage = result.sche...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience method to add the necessary configuration for the resource(s) to the IG configuration so that the resource is included in the generated IG site. NOTE The resource file, `data_path`, must already be in the IG site root. This CLI command does not move the file into the site root. \b
def add(data_path, ig_control_filepath): try: app.update_ig_config(data_path, ig_control_filepath) except Exception as e: logger.exception(str(e)) logger.info(f'❌ Add {data_path} to IG failed!') exit(1) else: logger.info(f'✅ Add {data_path} to IG succeeded!')
[ "def add_resources(resources_dir, site_config, referable_test=is_common_media_file):\n item_config = site_config.get(\"item_config\")\n for file in Path(resources_dir).glob(\"**/*\"):\n if file.is_file():\n if referable_test(file):\n id = file.name\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse string template and or copy dictionary template.
def _get_template_dict(template): if isinstance(template, str): return parse_template(template) if isinstance(template, dict): return copy.deepcopy(template) raise ValueError("Input template should be a string or dictionary")
[ "def parse_template(tmplt, type_dict, all_permutations=False, codes={}, read_only_codes=False,\n c=1, w='', must_choose_ind=None):\n if tmplt.startswith('$'):\n if ':' in tmplt:\n _cv = tmplt[1:].partition(':')\n if _cv[0] in codes:\n return parse_tem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct the request body to create application.
def _create_application_request(app_metadata, template): app_metadata.validate(["author", "description", "name"]) request = { "Author": app_metadata.author, "Description": app_metadata.description, "HomePageUrl": app_metadata.home_page_url, "Labels": app_metadata.labels, ...
[ "def _create_application(\n self,\n name,\n client_type=None,\n grant_type=None,\n capability=None,\n user=None,\n data_access_type=None,\n end_date=None,\n **kwargs\n ):\n client_type = client_type or Application.CLIENT_PUBLIC\n grant_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct the request body to update application.
def _update_application_request(app_metadata, application_id): request = { "ApplicationId": application_id, "Author": app_metadata.author, "Description": app_metadata.description, "HomePageUrl": app_metadata.home_page_url, "Labels": app_metadata.labels, "ReadmeBody": ...
[ "def _create_application_request(app_metadata, template):\n app_metadata.validate([\"author\", \"description\", \"name\"])\n request = {\n \"Author\": app_metadata.author,\n \"Description\": app_metadata.description,\n \"HomePageUrl\": app_metadata.home_page_url,\n \"Labels\": app_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct the request body to create application version.
def _create_application_version_request(app_metadata, application_id, template): app_metadata.validate(["semantic_version"]) request = { "ApplicationId": application_id, "SemanticVersion": app_metadata.semantic_version, "SourceCodeUrl": app_metadata.source_code_url, "TemplateBody...
[ "def create_application_version(self, version_name, version_date, application_id):\n params = {'versionName' : version_name, 'versionDate' : version_date}\n return self._request('POST', 'rest/applications/' + str(application_id) + '/version', params)", "def _create_application_request(app_metadata, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether the botocore ClientError is ConflictException.
def _is_conflict_exception(e): error_code = e.response["Error"]["Code"] return error_code == "ConflictException"
[ "def has_conflict(self,local_path):\n if self.verbose:\n self.log.info(\"(%s)\\n%s\" % (inspect.stack()[0][3],local_path))\n try:\n info = self.client.info2(local_path, recurse=False)\n if not info[0][1]['wc_info']['conflict_work']:\n self.log.error(\"co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrap botocore ClientError exception into ServerlessRepoClientError.
def _wrap_client_error(e): error_code = e.response["Error"]["Code"] message = e.response["Error"]["Message"] if error_code == "BadRequestException": if "Failed to copy S3 object. Access denied:" in message: match = re.search("bucket=(.+?), key=(.+?)$", message) if match: ...
[ "def _mock_boto3_kwargs_exception(*args, **kwargs):\n raise ClientError(operation_name=\"\", error_response={})", "def wrap_keystone_exception(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except keystone_exceptions.Author...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Backup the MESSENGERuvvs database tables. Dump the MESSENGERuvvs data into SQL files that can be restored if
def databasebackups(): # Read in current config file if it exists configfile = os.path.join(os.environ['HOME'], '.nexoclom') datapath = None if os.path.isfile(configfile): for line in open(configfile, 'r').readlines(): key, value = line.split('=') if key.strip() == '...
[ "def backup_database():\n backup_filename = syt.make_project_path(\"/resources/database_backups/\"+syt.add_timestamp_to_filename(db.database))\n syt.log_info(\"Backing up the database\")\n syt.copy_file(db.database, backup_filename)\n syt.log_info(\"Backedup to {}\".format(backup_filename))", "def per...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get Kerberos details in a cluster.
def cluster_kerberos_info(self, cluster_name): try: r = None if self.version == 7: r = requests.get( "{}://{}:{}/api/v40/clusters/{}/kerberosInfo".format( self.http, self.cloudera_manager_host_ip, ...
[ "def kerberos_http_auth(self):\n\n try:\n r = None\n if self.version == 7:\n r = requests.get(\n \"{}://{}:{}/api/v40/cm/kerberosPrincipals\".format(\n self.http,\n self.cloudera_manager_host_ip,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get SSL staus of various services.
def ssl_status(self): try: path_status = path.exists("{}".format(self.config_path["hdfs"])) if path_status == True: xml_data = subprocess.Popen( "cat {} | grep HTTPS_ONLY".format(self.config_path["hdfs"]), shell=True, ...
[ "def ssl(self):\r\n return self.sslobj", "def get_ssl_certs():\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get kerberos status of various services.
def kerberos_http_auth(self): try: r = None if self.version == 7: r = requests.get( "{}://{}:{}/api/v40/cm/kerberosPrincipals".format( self.http, self.cloudera_manager_host_ip, se...
[ "def smb_service_status(mnode):\n g.log.info(\"Getting SMB Service status on %s\", mnode)\n return g.run(mnode, \"service smb status\")", "def get_kerberos_servers(self, ad=None):\n if ad is None:\n ad = self.middleware.call_sync('activedirectory.config')\n AD_DNS = ActiveDirectory_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of encryption zone in cluster.
def encryption_zone(self): try: enc_zoneList = pd.DataFrame() xml_data = subprocess.Popen( "sudo hdfs crypto -listZones", shell=True, stdout=subprocess.PIPE, encoding="utf-8", ) xml_data.wait(10) ...
[ "def ex_list_zones(self):\r\n list_zones = []\r\n request = '/zones'\r\n response = self.connection.request(request, method='GET').object\r\n list_zones = [self._to_zone(z) for z in response['items']]\r\n return list_zones", "def list_zones(self):\r\n return list(self.ite...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set bill payment id.
def set_bill_payment_id(self, bill_payment_id): self.bill_payment_id = bill_payment_id
[ "def get_bill_payment_id(self):\n return self.bill_payment_id", "def bill_number(self, bill_number):\n self._bill_number = bill_number", "def setPayment(self, payment):\n self.payment = payment", "def bitpay_invoice_id(self, bitpay_invoice_id):\n \n self._bitpay_invoice_id =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get bill payment id.
def get_bill_payment_id(self): return self.bill_payment_id
[ "def payment_id(self):\n return numbers.PaymentID(hexlify(self._decoded[65:-4]).decode())", "def bill_number(self):\n return self._bill_number", "def bitpay_invoice_id(self):\n return self._bitpay_invoice_id", "def set_bill_payment_id(self, bill_payment_id):\n self.bill_payment_id ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set paid through account id.
def set_paid_through_account_id(self, paid_through_account_id): self.paid_through_account_id = paid_through_account_id
[ "def save_account_id(self, account_id):\n self.wepay_account_id = account_id\n self.save()", "def set_paid(self):\n\n self.paid = True", "def payBooking(self, selectedBooking):\n selectedBooking.setPaid(True)", "def paypal_id(self, paypal_id):\n\n self._paypal_id = paypal_id...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get paid through account id.
def get_paid_through_account_id(self): return self.paid_through_account_id
[ "def get_account_id(self):\n return self.wepay_account_id", "def account_id(self): # DG: renamed\n pass", "def get_user_account_id(self):\n return self.response_json[\"account\"][\"id\"]", "def get_paid_through_account_name(self, paid_through_acount_name):\n return self.paid_thro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set paid through account name.
def set_paid_through_account_name(self, paid_through_account_name): self.paid_through_account_name = paid_through_account_name
[ "def get_paid_through_account_name(self, paid_through_acount_name):\n return self.paid_through_account_name", "def autoname(self):\n\t\tif not self.email_account_name:\n\t\t\tself.email_account_name = (\n\t\t\t\tself.email_id.split(\"@\", 1)[0].replace(\"_\", \" \").replace(\".\", \" \").replace(\"-\", \" ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get paid through account name.
def get_paid_through_account_name(self, paid_through_acount_name): return self.paid_through_account_name
[ "def account_name(self) -> str:\n return self['accountName']", "def get_account_by_name(self, name: str): \r\n return self.accounts[name] if name in self.accounts else None", "def account_name(session):\r\n iam = session.client('iam')\r\n account_name = \"Null\"\r\n response = iam.list_ac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set whether it is single payment bill.
def set_is_single_bill_payment(self, is_single_bill_payment): self.is_single_bill_payment = is_single_bill_payment
[ "def get_is_single_bill_payment(self):\n return self.is_single_bill_payment", "def is_bill(self, is_bill):\n\n self._is_bill = is_bill", "def set_paid(self):\n\n self.paid = True", "def mark_paid(self):\n\t\tlogger.info('Marking bill %s as paid' % self.name)\n\t\tself.amount_due = 0.0\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get whether it is single payment bill.
def get_is_single_bill_payment(self): return self.is_single_bill_payment
[ "def set_is_single_bill_payment(self, is_single_bill_payment):\n self.is_single_bill_payment = is_single_bill_payment", "def billable(self):\n\t\treturn self.status in self.BILL_STATUSES", "def get_bill_payment_id(self):\n return self.bill_payment_id", "def has_note(payment_id):\n return paym...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saving gaphor.UML model elements.
def test_save_uml(self): self.element_factory.create(UML.Package) self.element_factory.create(UML.Diagram) self.element_factory.create(UML.Comment) self.element_factory.create(UML.Class) out = PseudoFile() storage.save(XMLWriter(out), factory=self.element_factory) ...
[ "def save_model(self):\n f1 = open(self.name + '_' + 'words', 'w')\n f2 = open(self.name + '_' + 'word_lengths', 'w')\n f3 = open(self.name + '_' + 'stems', 'w')\n f4 = open(self.name + '_' + 'sentence_lengths', 'w')\n f5 = open(self.name + '_' + 'word_pair', 'w')\n f1.writ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save a diagranm item too.
def test_save_item(self): diagram = self.element_factory.create(UML.Diagram) diagram.create(CommentItem, subject=self.element_factory.create(UML.Comment)) out = PseudoFile() storage.save(XMLWriter(out), factory=self.element_factory) out.close() assert "<Diagram " in out...
[ "def _save_safe(self, cislo_lv, item):\n\n id_lv = self.saved_items.get(cislo_lv)\n if id_lv:\n save_whole_item(item, id_lv, logger=self.logger, cislo_lv=cislo_lv)\n else:\n id_lv = save_whole_item(item, logger=self.logger,\n cislo_lv=cis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test connection loading of an association and two classes. (Should count for all linelike objects alike if this works).
def test_connection(self): c1 = self.create(ClassItem, UML.Class) c2 = self.create(ClassItem, UML.Class) c2.matrix.translate(200, 200) self.diagram.canvas.update_matrix(c2) assert tuple(self.diagram.canvas.get_matrix_i2c(c2)) == (1, 0, 0, 1, 200, 200) a = self.create(Ass...
[ "def test_set_associations_2_adjacent(self):\n test_object = self.test.adjacent_association6\n actual = test_object._set_associations()\n expected_count = 4\n self.assertEqual(expected_count, len(actual))", "def test_set_associations_1_adjacent(self):\n test_object = self.test.a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listen for requests on the named channel and dispatch them to the given handler. This method can be called multiple times.
def listen(self, name, handler):
[ "def handle_channel(self, input_channel=None):\n # type: (InputChannel) -> None\n input_channel.start_sync_listening(self.handle_message, self.tracker_store)", "def handle(self) -> None:\n while True:\n raw_command = self.request.recv(1024)\n if not raw_command:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve a clone of an existing basket
def clone(self): position_set = deepcopy(self.position_set) return Basket(position_set=position_set, clone_parent_id=self.id, parent_basket=self.ticker)
[ "def test_copyBasket(self):\n basket1 = self.createBasket()\n basket1.addItem(\"beans\")\n basket1.addItem(\"spaghetti hoops\")\n\n basket2 = self.createBasket()\n basket2.copyFrom(basket1)\n\n self.assertEqual(basket1.total(), basket2.total())\n self.assertEqual(bas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload basket composition history
def upload_position_history(self, position_sets: List[PositionSet]) -> Dict: if self.default_backcast: raise MqValueError('Unable to upload position history: option must be set during basket creation') historical_position_sets = [] for position_set in position_sets: self....
[ "def process_basket(self, basket: BaseBasket, request: HttpRequest) -> None:", "def save_history(cube, field, filename): \n\n try:\n history.append(cube.attributes['history'])\n except KeyError:\n pass", "def uploadtoDB(date, item, place, cost):", "def create_basket():\n basket = {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the most recent rebalance data for a basket Usage Retrieve the most recent rebalance data for a basket Examples Retrieve the most recent rebalance data for a basket >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.get_latest_rebalance_data() See also
def get_latest_rebalance_data(self) -> Dict: return GsIndexApi.last_rebalance_data(self.id)
[ "def get_latest_rebalance_date(self) -> dt.date:\n last_rebalance = GsIndexApi.last_rebalance_data(self.id)\n return dt.datetime.strptime(last_rebalance['date'], '%Y-%m-%d').date()", "def get_latest_bar(self, exchange, symbol):\n try:\n bars_list = self.latest_symbol_data[exchange]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the most recent rebalance date for a basket
def get_latest_rebalance_date(self) -> dt.date: last_rebalance = GsIndexApi.last_rebalance_data(self.id) return dt.datetime.strptime(last_rebalance['date'], '%Y-%m-%d').date()
[ "def get_latest_rebalance_data(self) -> Dict:\n return GsIndexApi.last_rebalance_data(self.id)", "def get_bank_latest_rates(bank_id):\n r = aliased(Rate)\n max_dates = Rate.query.with_entities(func.max(r.update_time).label('maxdate')). \\\n filter(and_(Rate.bank_id == r.bank_id, Rate.bank_id =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }