query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
builds urls for the directions and distance matrix apis
def build_url(start, end, transit_mode): transit = "" traffic = "best_guess" depart = "now" if transit_mode: transit = transit_mode direc_url = g_api_base_url + dir_url + "origin=" + start + "&destination=" + end + trans_url \ + transit + goog_dir_key dist_url = g_api_base_url + ...
[ "def build_urls(self):\n self.build_equity_quote_url()\n self.build_equity_chart_url()", "def create_url(_origin_details, travel_start_date, travel_start_time, destination_list):\n prefix = 'https://timetable.search.ch/api/route.json?one_to_many=1'\n\n origin_body = f'&from={_origin_details}&d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines the way to parse the magic command ``%%maml``.
def maml_parser(): parser = MagicCommandParser(prog="maml", description='Runs a maml script.') parser.add_argument('-q', '--quiet', action='store_true', default=False, help='hide output') return parser
[ "def maml(self, line, cell):\n parser = self.get_parser(CsMLMagics.maml_parser, \"maml\")\n args = self.get_args(line, parser)\n\n if args is not None:\n quiet = args.quiet\n out, err = maml(cell, not quiet)\n if out:\n print(out)\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines magic command ``%%maml``.
def maml(self, line, cell): parser = self.get_parser(CsMLMagics.maml_parser, "maml") args = self.get_args(line, parser) if args is not None: quiet = args.quiet out, err = maml(cell, not quiet) if out: print(out) if err: ...
[ "def maml_parser():\n parser = MagicCommandParser(prog=\"maml\",\n description='Runs a maml script.')\n parser.add_argument('-q', '--quiet', action='store_true', default=False,\n help='hide output')\n return parser", "def mlflow_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines the way to parse the magic command ``%%mlnet``.
def mlnet_parser(): parser = MagicCommandParser(prog="mlnet", description='Compiles and wrap a C# function into a Python function.\n' 'Automatically adds ML.net dependencies.') parser.add_argument('name', type=str, help=...
[ "def mlnet(self, line, cell):\n line, cell = CsMagics._preprocess_line_cell_maml( # pylint: disable=W0212\n line, cell)\n\n parser = self.get_parser(CsMagics.CS_parser, \"CS\")\n args = self.get_args(line, parser)\n\n if args is not None:\n name = args.name\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines magic command ``%%mlnet``.
def mlnet(self, line, cell): line, cell = CsMagics._preprocess_line_cell_maml( # pylint: disable=W0212 line, cell) parser = self.get_parser(CsMagics.CS_parser, "CS") args = self.get_args(line, parser) if args is not None: name = args.name dep = CsMa...
[ "def mlnet_parser():\n parser = MagicCommandParser(prog=\"mlnet\",\n description='Compiles and wrap a C# function into a Python function.\\n'\n 'Automatically adds ML.net dependencies.')\n parser.add_argument('name', typ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a pyplot plot and save to buffer.
def gen_plot(fpr, tpr): plt.figure() plt.xlabel("FPR", fontsize=14) plt.ylabel("TPR", fontsize=14) plt.title("ROC Curve", fontsize=14) plot = plt.plot(fpr, tpr, linewidth=2) buf = io.BytesIO() plt.savefig(buf, format='jpeg') buf.seek(0) plt.close() # plt.show() return buf
[ "def plot(values, plot_filename):\n matplotlib.use('Agg')\n fig = plt.figure()\n plt.subplot(111)\n plt.plot(values)\n fig.canvas.draw()\n fig.savefig(plot_filename)", "def save_plot(self):\r\n\t\t# Generate the plot\r\n\t\tself.generate_plot()\r\n\t\t# Create save directory\r\n\t\tdirectory = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the local configuration for a container.
async def delete_local_configuration_routine(self, name: str): plat = get_local_platform_routines() user = LocalUserRoutines(plat) manager = LocalContainerConfigurationManager(user) cont = self.GetItemByName(name) manager.DeleteByID(cont.GetID())
[ "def remove_local_config(self):\n with ignored(OSError):\n os.remove(os.path.join(self.rundir, const.LOCAL_CONFIG_FILE))", "def delete_configuration_set(ConfigurationSetName=None):\n pass", "def delete_container(self, container: Container):", "def delete_container(ContainerName=None):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a drop table or database SQL statement.
def drop_statement(self, object_type, object_name): drop_statement = "DROP %s %s" % (object_type, object_name) return drop_statement
[ "def get_table_delete_stmt(cls):\r\n table_name = get_table_name(cls)\r\n exec_stmt = [f\"DROP TABLE {table_name}\"]\r\n return ''.join(exec_stmt)", "def to_sql_statement_drop(self) -> str:\n return sql_text(f\"DROP FUNCTION {self.schema}.{self.signature}\")", "def drop(name):\n\t\treturn \"DROP...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function connects to the device provided when called (dev) in the instantiated testbed (testbed_obj) and executes the provided show command (if none was provided, 'show version' is executed by default. If the Save option = True (s in the command line) was provided then the output will be saved to a JSON file in th...
def device_info(dev, testbed_obj, showcmd='show version', save_to_json=False, logstdout=True): device = testbed_obj.devices[dev] device.connect(log_stdout=logstdout) response = device.parse(showcmd) print(f"Response from {dev} is of type {type(response)} and length {len(response)}") print(f"RAW res...
[ "def executeShow(self,\n rsrcType,\n showAdditionalParams=[],\n rsrcAdditionalParams=[]):\n\n args = [\"show\",\n \"--wavefrontHost\", util.wavefrontHostName,\n \"--apiToken\", util.wavefrontApiToken] \\\n + sho...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.
def expected_bucket_owner(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "expected_bucket_owner")
[ "def expected_bucket_owner(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"expected_bucket_owner\")", "def owner(self) -> pulumi.Output['outputs.BucketOwnerResponse']:\n return pulumi.get(self, \"owner\")", "def owner(self) -> Optional[pulumi.Input['BucketOwnerArgs']]:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an existing BucketLifecycleConfigurationV2 resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, bucket: Optional[pulumi.Input[str]] = None, expected_bucket_owner: Optional[pulumi.Input[str]] = None, rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.I...
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.
def expected_bucket_owner(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "expected_bucket_owner")
[ "def expected_bucket_owner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expected_bucket_owner\")", "def owner(self) -> pulumi.Output['outputs.BucketOwnerResponse']:\n return pulumi.get(self, \"owner\")", "def owner(self) -> Optional[pulumi.Input['BucketOwnerArgs']]:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get original model if the input model is a model wrapper.
def get_ori_model(model: nn.Module) -> nn.Module: if is_model_wrapper(model): return model.module else: return model
[ "def underlying_model(self):\n return self.model_chain[-1] if self.model_chain else None", "def _get_internal_model(self):\n return self.internal_model", "def unwrap_model(model: Union[nn.Module, nn.DataParallel, DistributedDataParallel]) -> nn.Module:\n if is_model_wrapped(model):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load annotation from annotations.json file
def _load_annotations(self): annotation_file = self._filepath(self.ANNOTATION_FILE) with open(annotation_file) as f: json_data = json.load(f) return json_data
[ "def _load_annotation(json_path):\n # Open the file containing the annotation\n with open(json_path) as annotation_file:\n\n # Parse the AI2D annotation from the JSON file into a dictionary\n annotation = json.load(annotation_file)\n\n # Return the annotation\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the data indices txt file.
def _load_split_indices(self): split_file = self.SPLITS.get(self.split) indices_file = self._filepath(split_file) with open(indices_file) as txt_file: idx_data = [int(i) for i in txt_file.readline().split()] return idx_data
[ "def read_index(self):\n temp_index_data = np.genfromtxt(\n self.file_name, skip_header=MesaProfileIndex.index_start_line - 1,\n dtype=None)\n self.model_number_string = MesaProfileIndex.index_names[0]\n self.profile_number_string = MesaProfileIndex.index_names[-1]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert the bbox record to BBox2D objects.
def _convert_to_bbox2d(single_bbox): label = single_bbox["label_id"] bbox = single_bbox["bbox"] canonical_bbox = BBox2D( x=bbox[0], y=bbox[1], w=bbox[2], h=bbox[3], label=label ) return canonical_bbox
[ "def _decode_bbox(self, normalized_bbox):\n #apply the inverse of transformation\n y1,x1,y2,x2 = preprocess.apply_transformation(normalized_bbox,\n np.linalg.inv(self.transformation))\n\n w,h = self.image_size\n y1,x1,y2,x2 = y1*h,x1*w,y2*h,x2*w\n return vot.Rectang...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download dataset from GCS
def download(self): cloud_path = f"gs://{const.GCS_BUCKET}/{self.GCS_PATH}" # download label file label_zip = download_file_from_gcs( cloud_path, self.root, self.LABEL_ZIP ) with zipfile.ZipFile(label_zip, "r") as zip_dir: zip_dir.extractall(self.root) ...
[ "def download_data():\n # Load the Dataset from the public GCS bucket\n bucket = storage.Client().bucket('cloud-samples-data')\n # Path to the data inside the public bucket\n blob = bucket.blob('ml-engine/sonar/sonar.all-data')\n # Download the data\n blob.download_to_filename('sonar.all-data')", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds number of documents in the Tweet collection matching a given search_term (and location, if provided).
def count_tweets(search_term, location=None): if location: return len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location))) else: return len(Tweet.objects(keyword_search_term=search_term))
[ "def count_term_in_document(self, term, document):\n doc = self.get_document(document)\n for docterm, value in doc.get_terms():\n if docterm == term:\n return value\n return 0", "def count_term(self, term):\n term_entry = self.get_term(term)\n if term_e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates a keyword's historical sentiment (restricted within a location, if provided).
def get_historical_sentiment(search_term, location=None): if location: positive = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location) & Q(sentiment_type="positive"))) negative = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location) &...
[ "def get_historical_sentiment_avg(search_term, location=None):\r\n\r\n total = 0\r\n\r\n if location:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location))\r\n count = len(tweets)\r\n else:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the average sentiment score for a given keyword (restricted within a location, if provided).
def get_historical_sentiment_avg(search_term, location=None): total = 0 if location: tweets = Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location)) count = len(tweets) else: tweets = Tweet.objects(Q(keyword_search_term=search_term)) count...
[ "def get_avg_sentiment(self, search_phrase):\n # get relevant documents\n self.relevant_documents = self.get_relevant_documents(search_phrase)\n\n # return average polarity for phrase\n average_polarity = 0\n for i in self.relevant_documents['hits']['hits']:\n average_p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the average sentiment score in a given query set of Tweets.
def get_query_sentiment_avg(tweets): total = 0 count = len(tweets) for tweet in tweets: total += tweet.sentiment_score # Calculate average avg = total / count avg = float("{0:.2f}".format((float(avg)))) return avg
[ "def insight(user_dict):\n users_sent = user_sentiment(user_dict)\n tweet_sent = tweet_sentiment(user_dict)\n return np.mean(users_sent), np.mean(tweet_sent)", "def get_query_statistics(tweets, sentiment_aggregate_list):\r\n\r\n total = len(tweets)\r\n positive_percentage = float(\"{0:.2f}\".format...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates basic statistics for a given query set of Tweets.
def get_query_statistics(tweets, sentiment_aggregate_list): total = len(tweets) positive_percentage = float("{0:.2f}".format((float(sentiment_aggregate_list[0][1]/total*100)))) neutral_percentage = float("{0:.2f}".format((float(sentiment_aggregate_list[1][1]/total*100)))) negative_percentage = flo...
[ "def analyze_tweets():", "def query_all_tweets(query):\n year = 2006\n month = 3\n\n limits = []\n while date(year=year, month=month, day=1) < date.today():\n nextmonth = month + 1 if month < 12 else 1\n nextyear = year + 1 if nextmonth == 1 else year\n\n limits.append(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Aggregates sentiment types for a given tweet collection.
def aggregate_sentiment(tweets): positive = 0 negative = 0 neutral = 0 for tweet in tweets: if tweet.sentiment_type == "positive": positive += 1 elif tweet.sentiment_type == "negative": negative += 1 else: neutral += 1 resu...
[ "def collect(self, collect_types: List[str]) -> None:\n valid_types = [x for x in collect_types if x in self._valid_types.keys()]\n for ctype in valid_types:\n self._collect_tweets(ctype)", "def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.twe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the predominant sentiment type from a list of sentiments. (Eg [[positive, 3],[neutral, 10],[negative,15]])
def predominant_sentiment(sentiment_aggregate_list): positive = int(sentiment_aggregate_list[0][1]) neutral = int(sentiment_aggregate_list[1][1]) negative = int(sentiment_aggregate_list[2][1]) if positive > neutral and positive > negative: return "positive" elif neutral > positive ...
[ "def classify_sentiment(sent_index):\n\n\tif sent_index < -0.5:\n\t\treturn 'negative'\n\tif sent_index <= 0.5 and sent_index >= -0.5:\n\t\treturn 'neutral'\n\tif sent_index >= 0.5:\n\t\treturn 'positive'", "def classify(tweets, positives, negatives):\n sentiment_list = makelist(tweets, positives, negatives)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets sentiment statistics for average sentiment for a given keyword (and location, if specified) over the past 10 days.
def get_sentiment_overtime(keyword, location=None): # Get date 10 days ago ten_days_ago = datetime.now() - timedelta(days=10) # Get raw PyMongo collection collection = Tweet._get_collection() if location: match = { "$match": { "k...
[ "def get_historical_sentiment_avg(search_term, location=None):\r\n\r\n total = 0\r\n\r\n if location:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location))\r\n count = len(tweets)\r\n else:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the top 10 most positive / negative sentiment triggers from the past 7 days.
def get_sentiment_trends(order): # Get date seven days ago seven_days_ago = datetime.now() - timedelta(days=7) # Get raw PyMongo collection collection = Tweet._get_collection() # Perform aggregate query result = collection.aggregate([ { "$match": ...
[ "def recent_documents(self, count=10):\n recent_docs = heapq.nlargest(count, self.documents)\n return recent_docs", "def get_top_tweets():\n Tweet.top_tweets = [(k, v) for k, v in sorted(Tweet.hashtag_counter.items(), key=lambda item: item[1], reverse=True)]\n top_10_tweets = {}\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the feed url into self.entries using the feedparser module.
def __init__(self, url=URL): self.entries = feedparser.parse(url).entries
[ "def __init__(self, url):\n\n self.url = url\n self.feed, self.keys, self.entries = self.parse_rss_feed()", "def parse_feed(self):\n parsed_feed = feedparser.parse(self.rss_url)\n # Check for malformed feed\n if parsed_feed['bozo']:\n raise Exception('malformed rss fe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of episode IDs (itunes_episode attribute) of the episodes the pass in domain was mentioned in.
def get_episode_numbers_for_mentioned_domain(self, domain: str) -> list: return [ep.itunes_episode for ep in self.entries if domain.lower() in ep.summary.lower()]
[ "def get_episodes(self) -> List[Episode]:", "def episode_information(self):\n return [info.get('episode') for info in self.environment_information if 'episode' in info]", "def episodes(self):\n episodes = []\n for series in self.series:\n episodes.extend(series.episodes)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of episodes that had one of more special guests featured (use SPECIAL_GUEST).
def number_episodes_with_special_guest(self) -> int: return len([ep for ep in self.entries if SPECIAL_GUEST in ep.summary])
[ "def get_num_episodes(self) -> int:\n return len(self.episodes)", "def get_count_episodes(self):\n return self.sess.run(self.count_episodes)", "def get_num_episodes(self):\n return self.num_evaluation_episodes", "def guestCount(self):\n return len( self.guests )", "def num_episod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the average duration in seconds of a Python Bytes episode, as
def get_average_duration_episode_in_seconds(self) -> NamedTuple: times = [ep.itunes_duration for ep in self.entries] format_times = [] for time in times: if not time.startswith('00'): time = '0' + time format_times.append(time) dts = [datetime.st...
[ "def get_wav_duration(wav_bytes: bytes) -> float:\n with io.BytesIO(wav_bytes) as wav_buffer:\n wav_file: wave.Wave_read = wave.open(wav_buffer, \"rb\")\n with wav_file:\n frames = wav_file.getnframes()\n rate = wav_file.getframerate()\n return frames / float(rate)"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build an index from word to set of document indexes This does the exact same thing as create_index() except that it uses your htable. As a number of htable buckets, use 4011. Returns a listofbuckets hashtable representation.
def myhtable_create_index(files): res_buckets = htable(4011) for id, file in enumerate(files): if file[-4:] == '.txt': word_list = words(get_text(file)) for word in word_list: value = htable_get(res_buckets, word) if value == None: ...
[ "def myhtable_create_index(files):\n\n # dct_index = defaultdict() # Create an empty dict\n # for file in files: # Iterate through every given file names\n # s_content = get_text(file) # Turn each file name into a string content\n # lst_word = words(s_content) # Turn the string content into a li...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This does the exact same thing as index_search() except that it uses your htable. I.e., use htable_get(index, w) not index[w].
def myhtable_index_search(files, index, terms): res_file = [] count = 0 if len(terms) == 0: print('empty terms') return for term in terms: term = term.lower() count += 1 if count == 1: s = htable_get(index, term) if s == None: ...
[ "def test_search_index_get(self):\n pass", "def test_search_indices_get(self):\n pass", "def myhtable_index_search(files, index, terms):\n # set_file = set(files)\n # for term in terms:\n # set_file = set_file.intersection(index[term]) # The set will getting smaller under each loop\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests if builsing an dirichlet ensemble is running without problems
def test_dirichletensemble(): np.random.seed(seed=2) X, y = make_blobs(n_samples=200, centers=2, n_features=2, cluster_std=4, random_state=2) n_train = 100 trainX, testX = X[:n_train, :], X[n_train:, :] trainy, testy = y[:n_train], y[n_train:] n_members = 5 stack = Dir...
[ "def should_build_ensemble(self, num_subnetworks):", "def test_convergence(self):\n try:\n return self._test_convergence\n except:\n return False", "def test_training(self):\n\t\tpass", "def test_full_workflow():\n from libensemble.ensemble import Ensemble\n from libe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Map s_new to t_new based on known mapping of s (source) to t (target), with s original/intrinsic coordinates and t intrinsic/original coordinates
def mapping(s, t, s_new, k,c): n, s_dim = s.shape t_dim = t.shape[1] n_new = s_new.shape[0] # 1. determine nearest neighbors dist = np.sum((s[np.newaxis] - s_new[:,np.newaxis])**2,-1) nn_ids = np.argsort(dist)[:,:k] # change to [:,:k] nns = np.row_stack([s[nn_ids[:,ki]] for ki in range(k)]) ...
[ "def find_mapping(target: np.ndarray, source_color: np.ndarray, n_col=5, n_row=5, offset=0)-> np.ndarray:\n results = []\n target = target.astype(np.uint8)\n #create RGBA image to overlay target and source segments\n overlay = cv2.cvtColor(target, cv2.COLOR_GRAY2RGBA).astype(np.uint16)\n source_gray ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read source and creates a new brace token
def create_token(self): token = Token(PAREN.get(self.current_char), "brace") self.current_char = self.source.read(1) return token
[ "def _parse_till_closing_brace(stream):\n rv = \"\"\n in_braces = 1\n while True:\n if EscapeCharToken.starts_here(stream, '{}'):\n rv += stream.next() + stream.next()\n else:\n c = stream.next()\n if c == '{': in_braces += 1\n elif c == '}': in_bra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Receives a char and returning if its a left or right brace
def should_lex(cls, char): return char == '{' or char == '}'
[ "def __isDelimitadores(self, char):\n return char in r\"(){},\"", "def check_for_open_bracket(char_in):\n global bracket_count\n if char_in is \"[\" or char_in is \"{\":\n bracket_count += 1\n return True\n else:\n return False", "def check_for_close_bracket(char_in):\n g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crop graph image Crops the desired image by it's type.
def crop_image(self): image_data = Image.open(self.img_path) return image_data.crop(self.data_type)
[ "def crop_image(self, image):\n\n pass", "def crop_image(self, img):\n img.crop_image(self._center, 1.1 * self._radius)", "def crop_pic(self):\n\n if self._isSmall:\n self._allind = {'chart': [(740,240),(2400,3250)]}\n\n self._cut_chart = self.pic[740:2400,240:3250,:]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform Image into array Transform cropped image into an numpy multidimensional array.
def np_image_matrix(self): return np.array(self.crop_image())
[ "def image_to_array(self, img):\n x = np.asarray(img, dtype=self.dtype)\n if len(x.shape) == 3:\n if self.channels_first:\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if self.channels_first:\n x = x.reshape((1, x.shape[0], x.shape[1]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find Blue pixels Finds all blue pixels inside the graph area, which represents the desired points of the graph. The method generates a numpy 2d array with these pixels relative positions.
def blue_matrix(self): return np.vstack(np.where(self.np_image_matrix() == 2))
[ "def getBlue(self):\n return self.pixels[self.x, self.y][2]", "def get_blue(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 2]", "def get_blue(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_blue()", "def blue_channel(im...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clean repeated j pixels Find the first item of each row and gets the pixels with the lowest j value, which represents the biggest real value of the y axis of the graph, crossed with x axis.
def clean_double_values(self): trans_blue = self.blue_matrix().transpose() b_array = [] for i in trans_blue: min_col = [i[0], i[1]] for j in trans_blue[0:]: if j[1] == min_col[1]: if j[0] < min_col[0]: min_col[0]...
[ "def smallestReact(im):\r\n b , c , h , w = im.size()\r\n m = np.count_nonzero(im == 1 , axis = 1)\r\n C = np.zeros((b,4))\r\n for i in range(b):\r\n if m.sum(axis = 1)[i].sum() == 0:\r\n #there is no mask predicted\r\n max_x = w\r\n min_x = 0\r\n max_y...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves csv file into image folder saves generated data by class into a csv file with the name, plus the type of data. This method keeps track if the file was generate, and replace it with a new one
def save_values(self): f_name = self.img_path.split('.')[0] + '_{}_'.\ format(self.data_type_name) + '.csv' dir_name = os.path.join(self.base_dir, f_name) if not os.path.exists(dir_name): for data_list in self.converted_values(): with open(f_name, 'a') as ...
[ "def save_to_file(self):\n file_name = 'data/out/' + self.type + '.csv'\n logger.info('Saving pre-processed data to ' + file_name)\n\n self.data.to_csv(file_name, index=False)", "def save_csv(self):\n if not self.__is_csv():\n # creates the csv file if it did not exist.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run an acrosssubject classification Decode responses on each hand separately from CPRO data Limit to ROIs within SMN network
def conditionDecodings(data, rois, ncvs=100, effects=False, motorOutput=False,confusion=False, decoder='similarity', nproc=5): ncond = data.shape[1] # two motor outputs nSubjs = data.shape[2] nsamples = nSubjs * ncond stats = np.zeros((len(rois),nsamples)) rmatches = np.zeros((len(rois),)) ...
[ "def execute(self, requests):\n responses = []\n for request in requests:\n infer_outputs = pb_utils.get_input_tensor_by_name(\n request, self.input_names[0])\n im_infos = pb_utils.get_input_tensor_by_name(request,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dictionary containing all diagnostic light curves. The dictionary will provide a light curve for each matrix in the design matrix collection.
def _create_diagnostic_lightcurves(self): if self.coefficients is None: raise ValueError("you need to call `correct()` first") lcs = {} for idx, submatrix in enumerate(self.dmc.matrices): # What is the index of the first column for the submatrix? firstcol_idx...
[ "def generate_materials_dict(self):\n c = 299792458.0\n w_mat = 2 * np.pi * c / self.l_mat - self.w0\n l2_mat = (self.l_mat * 1e6) ** 2\n\n n_air = 1 + 0.05792105 * l2_mat / (238.0185 * l2_mat - 1) + 0.00167917 * l2_mat / (57.362 * l2_mat - 1)\n air_ip = interp1d(w_mat, n_air, bou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produce diagnostic plots to assess the effectiveness of the correction.
def _diagnostic_plot(self): if not hasattr(self, "corrected_lc"): raise ValueError( "Please call the `correct()` method before trying to diagnose." ) with plt.style.context(MPLSTYLE): _, axs = plt.subplots(2, figsize=(10, 6), sharex=True) ...
[ "def galactic_correction_summary(self):\n t = self.GalacticCorrection(self).x\n dd = dict()\n for i,c in enumerate(t.T):\n dd[i] = dict(offset=c.mean()-1,std=c.std())\n df = pd.DataFrame(dd).T\n fig, ax = plt.subplots()\n ax.errorbar(x=range(8), y=df.offset.value...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes a discrete point in time, and puts the position, velocity, and acceleration into a ROS JointTrajectoryPoint() to be put into a RobotTrajectory.
def trajectory_point(self, t, jointspace): point = JointTrajectoryPoint() delta_t = .01 if jointspace: x_t, x_t_1, x_t_2 = None, None, None ik_attempts = 0 theta_t_2 = self.get_ik(self.target_position(t-2*delta_t)) theta_t_1 = self.get_ik(self.targ...
[ "def trajectory_point(self, t, jointspace):\n point = JointTrajectoryPoint()\n delta_t = .01\n if jointspace:\n x_t, x_t_1, x_t_2 = None, None, None\n ik_attempts = 0\n theta_t = theta_t_1 = theta_t_2 = None\n while theta_t_2 is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remember to call the constructor of MotionPath
def __init__(self, total_time, kin, limb, ar_tag_pos): # raise NotImplementedError self.r = .1 MotionPath.__init__(self, limb, kin, total_time) self.ar_tag_pos = np.array([ar_tag_pos[0],ar_tag_pos[1],ar_tag_pos[2]]) self.ar_tag_pos[2] = 0.282 self.start_pos = [ar_tag_po...
[ "def __init__(self, limb, kin, total_time, goal_pos, num_way, start_pos=None):\n MotionPath.__init__(self, limb, kin, total_time)\n self.start_pos = start_pos\n self.goal_pos = goal_pos\n self.num_way = num_way\n self.base_frame = 'base'\n self.tool_frame = 'left_hand_camer...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the arm's desired velocity in workspace coordinates at time t. You should NOT simply take a finite difference of self.target_position()
def target_velocity(self, time): x_v = self.w*self.r*cos(self.w*time) y_v = -self.w*self.r*sin(self.w*time) z_v = 0 # raise NotImplementedError return np.array([x_v,y_v,z_v])
[ "def getVel(self, t):\n tt = t - self.__startTime\n return Vec3(self.__startVel[0],\n self.__startVel[1],\n self.__startVel[2] + (self.__zAcc * tt))", "def velocity(self):\n multirotor_state = self.sim_handler.client_state\n airsim_lin_vel = multir...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the arm's desired x,y,z acceleration in workspace coordinates at time t. You should NOT simply take a finite difference of self.target_velocity()
def target_acceleration(self, time): x_a = -self.w**2*self.r*sin(self.w*time) y_a = -self.w**2*self.r*cos(self.w*time) z_a = 0 # raise NotImplementedError return np.array([x_a,y_a,z_a])
[ "def atTime(self, t):\n return self.position + self.velocity * t + self.acceleration * t**2", "def _acceleration(self, t, r, v):\n real_accel = -G * (\n M_SUN * (r - self._r_sun) / np.linalg.norm(r - self._r_sun) ** 3\n + self._M_P * (r - self._r_p) / np.linalg.norm(r - self._r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build UPDATE SQL statement.
def sql(self): if not self._table_names: raise ValueError('UPDATE requires at least one table') if not self._values and not self._values_raw: raise ValueError('UPDATE requires at least one value') table_refs = [', '.join(self._table_names)] param_values = [] ...
[ "def generate_update_sql(self, fieldupdate, condition):\n return \"UPDATE %s SET %s WHERE %s\" % (self.tablename, fieldupdate, condition)", "def getSQL_update(table, **kwargs):\n kvs = ''\n kvs_where = ''\n for k, v in kwargs.items():\n if k.startswith('where'):\n kvs_where += k[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``count`` along some dimension(s).
def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ...
[ "def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``prod`` along some dimension(s).
def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.prod, dim=dim, skipna=ski...
[ "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``sum`` along some dimension(s).
def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipn...
[ "def _sum_remaining_dims(data: sc.DataArray, dim: str) -> sc.DataArray:\n to_be_summed = set(data.dims) - set([dim])\n summed = data\n for dim_ in to_be_summed:\n summed = sc.sum(summed, dim_)\n return summed", "def sum(self, dim=None, keepdim=False):\n return array_funcs.sum(self, dim, keep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``std`` along some dimension(s).
def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ...
[ "def std(self, axis=None):\n return self.map(lambda group, imgs: imgs.std(axis=axis))", "def nanstd(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.nanstd, **kwargs)", "def divide_by_temporal_std(self):\n if not hasattr(self,'mean_removed_data'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``var`` along some dimension(s).
def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ...
[ "def var(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n ddof: int = 0,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``median`` along some dimension(s).
def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True...
[ "def median(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.median,\n dim=dim,\n skipna=skipna,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``cumsum`` along some dimension(s).
def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True...
[ "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``cumprod`` along some dimension(s).
def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=Tr...
[ "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``count`` along some dimension(s).
def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.count, dim=dim, keep_attrs=keep_attrs, **kwargs, )
[ "def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``mean`` along some dimension(s).
def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, keep_attrs=keep_att...
[ "def mean(self, dim=None, keepdim=False):\n return array_funcs.mean(self, dim, keepdim)", "def reduce_mean(input_tensor, axis=None):\n\n return pd.mean(input_tensor, axis)", "def mean(self, axis=None):\n return self.map(lambda group, imgs: imgs.mean(axis=axis))", "def _mean(input, reduction_indic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``prod`` along some dimension(s).
def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.prod, dim=dim, skipna=s...
[ "def prod(self, dataArray, index=None, keep=None):\n dims = self._getDimsToOperate(dataArray, index, keep)\n return dataArray.prod(dims)", "def _prod(input, reduction_indices, keep_dims=None, name=None):\n result = _op_def_lib.apply_op(\"Prod\", input=input,\n reducti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``var`` along some dimension(s).
def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ...
[ "def var(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n ddof: int = 0,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contain...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``median`` along some dimension(s).
def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, keep_attrs=keep...
[ "def median(self, axis=None):\n return self.map(lambda group, imgs: imgs.median(axis=axis))", "def median(array: ulab.numpy.ndarray, *, axis: int = -1) -> ulab.numpy.ndarray:\n ...", "def median(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``cumsum`` along some dimension(s).
def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, keep_attrs=keep...
[ "def cumsum(self, dim):\n return array_funcs.cumsum(self, dim)", "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``cumprod`` along some dimension(s).
def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, keep_attrs=ke...
[ "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``all`` along some dimension(s).
def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_redu...
[ "def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``any`` along some dimension(s).
def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_redu...
[ "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``all`` along some dimension(s).
def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_redu...
[ "def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``any`` along some dimension(s).
def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_redu...
[ "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``var`` along some dimension(s).
def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_num...
[ "def var(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n ddof: int = 0,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.var,\n dim=dim,\n skipna=ski...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``cumsum`` along some dimension(s).
def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True...
[ "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this Dataset's data by applying ``cumprod`` along some dimension(s).
def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=Tr...
[ "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``count`` along some dimension(s).
def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_...
[ "def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``all`` along some dimension(s).
def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_re...
[ "def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``any`` along some dimension(s).
def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_re...
[ "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``var`` along some dimension(s).
def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_n...
[ "def var(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n ddof: int = 0,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.var,\n dim=dim,\n skipna=s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``cumsum`` along some dimension(s).
def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, keep_attrs=keep...
[ "def cumsum(self, dim):\n return array_funcs.cumsum(self, dim)", "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``cumprod`` along some dimension(s).
def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, keep_attrs=ke...
[ "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``all`` along some dimension(s).
def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_re...
[ "def all(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce this DataArray's data by applying ``any`` along some dimension(s).
def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_re...
[ "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Menaikan jabatan role xp member ke role xp selanjutnya(admin only).
async def promote(self, ctx, *, member = None): # Only allow admins to change server stats if not await self._can_run(ctx): return em = discord.Embed(color = 0XFF8C00, description = "Menaikan jabatan role xp member ke role xp selanjutnya\n\n" ...
[ "async def add_xp(self, ctx, number: int, member: discord.Member = None):\n if member.bot:\n return\n if member is None:\n member = ctx.author\n if os.path.isfile(f'assets/json/server/{str(ctx.guild.id)}/level.json'):\n with open(f'assets/json/server/{str(ctx.gu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Authenticates a user based on the OIDC code flow.
def authenticate(self, request, **kwargs): self.request = request if not self.request: return None state = self.request.GET.get('state') code = self.request.GET.get('code') nonce = kwargs.pop('nonce', None) if not code or not state: return None ...
[ "def get_authenticated_user(self, callback, http_client=None):\r\n # Verify the OpenID response via direct request to the OP\r\n args = dict((k, v[-1]) for k, v in self.request.arguments.items())\r\n args[\"openid.mode\"] = u(\"check_authentication\")\r\n url = self._OPENID_ENDPOINT\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Size the core of the LVL Shifter given K_ratio, the ratio of the NMOS to PMOS
def _design_lvl_shift_core_size(cload: float, k_ratio: float, inv_input_cap: float, fanout: float, is_ctrl: bool) -> Tuple[int, int, int]: out_inv_input_cap = cload / fanout print(f'cload = {cload}') inv_m = int(round(out_inv_input_cap / inv_input_cap)) ...
[ "def _SizeCalculator(partition_size):\n # Max image size grows less than partition size, which means\n # footer size grows faster than partition size.\n return int(math.pow(partition_size, 0.95))", "def calc_size(self):\n\t\t# m = - (n * ln(p)) / (ln(2))**2\n\t\treturn int(ceil(- (float(self.capaci...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the NMOS segments and the PMOS segements ratio for the core, this function designs the internal inverter. For control level shifter, we don't care about matching rise / fall delay, so we just size for fanout.
async def _design_lvl_shift_internal_inv(self, pseg: int, nseg: int, out_inv_m: int, fanout: float, pinfo: Any, tbm_specs: Dict[str, Any], is_ctrl: bool, has_rst: bool, dual_output: boo...
[ "def erf_inverter(sp_path, \n\t\t\t\t inv_name, \n\t\t\t\t inv_drive_strength, \n\t\t\t\t parameter_dict,\n\t\t\t\t fpga_inst, \n\t\t\t\t spice_interface): \n\t\n\tif ERF_MONITOR_VERBOSE:\n\t\tprint(\"ERF MONITOR: \" + inv_name) \n\t \n\tpmos_name = inv_name + \"_pmos\"\n\tnmos_name = inv_name + \"_nmos\"\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given all other sizes and total output inverter segments, this function will optimize the output inverter to minimize rise/fall mismatch.
async def _design_output_inverter(self, inv_in_pseg: int, inv_in_nseg: int, pseg: int, nseg: int, inv_nseg: int, inv_pseg: int, out_inv_m: int, fanout: float, pinfo: Any, tbm_specs: Dict[str, Any], has_rst,...
[ "async def _design_lvl_shift_internal_inv(self, pseg: int, nseg: int, out_inv_m: int,\n fanout: float,\n pinfo: Any, tbm_specs: Dict[str, Any], is_ctrl: bool,\n has_rst: bool, dual_out...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a dictionary of parameters for the layout class LevelShifter
def _get_lvl_shift_params_dict(pinfo: Any, seg_p: int, seg_n: int, seg_inv_p: int, seg_inv_n: int, seg_in_inv_p: int, seg_in_inv_n: int, out_inv_m: int, has_rst: bool, dual_output: bool, is_ctrl: bool = False, ...
[ "def _build_param_dict(self):\n \n \"\"\"\n DHE Trying this new model with menu_path and then final submenu for\n both read and write operations\n \"\"\"\n self._param_dict.add(Parameter.BAUDRATE,\n r'Baudrate:\\s+(\\d+)',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle mocked API request for repo existence check.
def callback_repo_check(self, request, uri, headers, status_code=404): self.assertEqual( request.headers['Authorization'], 'token {0}'.format(self.OAUTH2_TOKEN) ) # Handle the new "rerun" repo differently if self.TEST_RERUN_REPO in uri: status_code = 4...
[ "def test_get_github_repos_info_positive(self):\n self.assertIsNotNone(app.get_github_repos_info(\"dhh\")[\"repo_info\"])", "def test_api_repo_status_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Params2()\n path, method = default_api.api_r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Manage both add and delete of team membership. ``action_list`` is a list of tuples with (``username``, ``added (bool)``) to track state of membership since this will get called multiple times in one library call.
def callback_team_membership( request, uri, headers, success=True, action_list=None ): # pylint: disable=too-many-arguments username = uri.rsplit('/', 1)[1] if not success: status_code = 500 if request.method == 'DELETE': if success: ...
[ "def actions(self, request, action_list, group):\n return action_list", "def update_member_list(request, **kwargs):\n data = request.DATA\n loadbalancer_id = data.get('loadbalancer_id')\n pool_id = kwargs.get('pool_id')\n existing_members = kwargs.get('existing_members')\n members_to_add = k...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register repo check URL and method.
def register_repo_check(self, body): httpretty.register_uri( httpretty.GET, re.compile( '^{url}repos/{org}/({repo}|{repo_rerun})$'.format( url=self.URL, org=self.ORG, repo=re.escape(self.TEST_REPO), ...
[ "def addRepository(self, uri):\n pass", "def repository_create_hosted():\n pass", "def repository_create_proxy():\n pass", "def addRepository(self, name, url):\n sslVerify = \"yes\" if url.startswith(\"https\") else \"no\"\n self.manager.addKickstartRepository(self.currentProject, b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register url for repo create.
def register_repo_create(self, body): httpretty.register_uri( httpretty.POST, '{url}orgs/{org}/repos'.format( url=self.URL, org=self.ORG, ), body=body )
[ "def create(cls, repo, name, url, **kwargs):\r\n repo.git.remote( \"add\", name, url, **kwargs )\r\n return cls(repo, name)", "def repository_create_hosted():\n pass", "def repo_add(self, name, url, **kwargs):\n\n self.helm_client.repo_add(name, url, **kwargs)", "def addRepository(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple hook list URL.
def register_hook_list(self, body=None, status=200): if body is None: body = json.dumps( [{ 'url': '{url}repos/{org}/{repo}/hooks/1'.format( url=self.URL, org=self.ORG, repo=self.TEST_REPO ) }] ...
[ "def get_list_link(self):", "def test_webhooks_list(self):\n pass", "def url(self):\n return reverse('snippet-list')", "def url_list(path):\n match = re.match(r'^.*(/wa/[A-Za-z0-9/-]+)([A-Za-z-]+)/([0-9]+/)?$', path)\n return u'%s%s%s/' % (match.group(1), match.group(2), \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register team repo addition.
def register_team_repo_add(self, body): httpretty.register_uri( httpretty.PUT, re.compile( r'^{url}teams/\d+/repos/{org}/({repo}|{rerun_repo})$'.format( url=self.URL, org=self.ORG, repo=re.escape(self.TEST_REPO),...
[ "def add_collaborator_team_to_repo(self, name_team, name_repo, permission):\n\n if name_repo not in self.repos:\n print_status('FAIL', 'Team (%s) cannot be added to repository (%s) as it cannot be found.' % (name_team, name_repo))\n return False\n else:\n obj_team = se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return tables of cells in neuronPop population name connected to mitrals specified in args, via neuronProj projection name if args is not specified get all.
def exportTable(network, neuronProj, neuronPop, colours, \ args={}, spikes=True, allcells=True): exportDict = {'spikes':spikes,'data_tables':[]} if array(colours).shape == (3,): coloursList = False else: coloursList = True ## get cells connected to mitrals specified in args. if not specified...
[ "def get_grid_search(*args):\n import itertools\n grid = []\n for e in itertools.product(*args):\n grid.append({'lr': e[0],\n 'epochs': e[1],\n 'alpha': e[2],\n 'beta': e[3],\n 'lambda': e[4],\n 'nhid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
We record the position in S of the first occurence of a letter. If we encounter the letter a second time, we check their spacing.
def well_spaced(S, D): seen = [None] * 26 for i, c in enumerate(S): if seen[ord(c) - ord("a")] is None: seen[ord(c) - ord("a")] = i else: if i - seen[ord(c) - ord("a")] != D[ord(c) - ord("a")] + 1: return False return True
[ "def alphabet_position(letter):\n letter = letter.lower()\n alpha = string.ascii_lowercase #\"abcdefghijklmnopqustuvwxyz\"\n return alpha.find(letter)", "def alphabet_position(char):\n if type(char) != type(''):\n return -1\n if len(char) != 1:\n return -1\n if char.isalpha():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register publisher on nameserver. This works for PUBSUB only
def register_publisher(self, hostname, expire=-1):
[ "def register_publisher(self, publisher):\n topic = publisher.topic\n self.send(json.dumps({\n 'op': 'advertise',\n 'id': publisher.advertise_id,\n 'topic': publisher.topic,\n 'type': publisher.message_type,\n 'latch': publisher.latch,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unregister publisher on nameserver. This works for PUBSUB only
def unregister_publisher(self, hostname):
[ "def unregister(self):\n return self._engine.exec(\"subscription-manager unregister\")", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = Non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register router on the nameserver. This works for ROUTER proxy only
def register_router(self, hostname, expire=-1):
[ "def registerOnWebServer(self):\n proxyName = \"PYRONAME:\" + self._webServer\n webServerProxy = Pyro4.Proxy(proxyName)\n print(\"Registering {} with Webserver: {} and WebserverProxy: {}\".format(self.processName, proxyName, webServerProxy))\n webServerProxy.registerOnWebServer(self.proc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unregister router on the nameserver. This works for ROUTER proxy only
def unregister_router(self, hostname):
[ "def unregister(self, pollster):\n pollster.unregister(self._router_socket)", "def unregister_server():\n (code, message) = rest_api.unregister_server(request)\n if (code == 200):\n return message\n else:\n abort(code)", "def delete_router(self, router):\r\n return self.dele...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register target on nameserver. If record already exists and has expiration timeout it will be updated. Existing records without timeout will stay untouched
def register(self, target, hostname, listener_type, expire=-1):
[ "def register_router(self, hostname, expire=-1):", "def register_publisher(self, hostname, expire=-1):", "def _set_target_info_by_name(self, targets, port, target_name, iqn):\n host_iqn_registered_in_target = (\n self._get_host_iqn_registered_in_target_by_name(\n port, target_na...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unregister target from nameserver.
def unregister(self, target, hostname, listener_type):
[ "def unregister_target(self, target_name, f):\n return self.targets.pop(target_name)", "def _delServer(self, name):\r\n if name in self.servers:\r\n server = self.servers[name]\r\n del self.servers[name]\r\n delattr(self, server._py_name)", "def unregister_router(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all hosts from nameserver by target.
def get_hosts(self, target, listener_type):
[ "def _get_hosts(self, headers):\n return self.client().get(HOST_URL, headers=headers)", "def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(ori...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }