query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Fill all pixels of the surface with color, preserve transparency.
def fill(surface, color): w, h = surface.get_size() r, g, b, _ = color for x in range(w): for y in range(h): a = surface.get_at((x, y))[3] surface.set_at((x, y), pygame.Color(r, g, b, a))
[ "def fill_color(self, _col):\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n self.__framebuffer[(x, y)] = _col", "def recolor_surface(surface, color):\n w, h = surface.get_size()\n r, g, b = color\n for x in range(w):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the absolute url for this plan_proposal for preview purposes.
def get_absolute_url(self): return reverse('plan_proposal', kwargs = {'project_name': self.project.slug, 'proposal_name': self.slug})
[ "def get_absolute_url(self) -> str:\n return self.proposition.get_absolute_url()", "def get_sprint_plan_url(self):\n try:\n plan_issue = self.config.get('sprint', 'plan_issue')\n project_url = self.get_project_url()\n url = '{project_url}/issues/{plan_issue}'.format(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sent by clients when they enter a room. A status message is broadcast to all people in the room.
def joined(message): #room = session.get('room') room='abc' join_room(room) #emit('status', {'msg': session.get('name') + ' has entered the room.' + message['msg']}, room=room) emit('status', {'msg': 'Yao has entered the room.'}, room=room) #emit('status', {'msg': 'Yao has entered the room.'}, r...
[ "def joined(message):\n room = 'general_room'\n join_room(room)\n emit('status', {'msg': current_user.name + ' has entered the room.'}, room=room)", "def joined(message):\n room = message.get('room')\n join_room(room)\n emit('status', {'msg': message.get('name') + ' has entered the room: '+room}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
print all customers with the current time and id in CSV format.
def print_customers(self): output = '' for i in range(len(self.customers)): output += f'Customer no. {self.customers[i].id} is in {self.customers[i].state[0]} section\n' #print(output) with open('oneday.csv','a') as outfile: for i in range(len(self.customers)): ...
[ "def print_customers(self):\n for one_customer in self.customers:\n print(f'{self.minutes}, {one_customer.name}, {one_customer.state}')", "def print_customers(self):\n self.current_time = self.get_time()\n return f'Supermarket(\"{self.customers}\", \"{self.current_time}\")'", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
removes every customer that is not active any more.
def remove_existing_customers(self): for i in range(len(self.customers)): if self.customers[i].is_active() == False: self.customers[i]= 'out' self.customers = [item for item in self.customers if item!='out' ]
[ "def remove_exiting_customers(self):\n self.customers=[c for c in self.customers if c.is_active()]", "def remove_existing_customers(self):\n # remove the customers which are not active (.is_active )\n self.to_move = False\n #for cust in self.customers:\n # print(cust.state)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function for creating new `Item` Elements. This is used until we get to InstanceElement, where we then use that class for all of the elements instead instead.
def _new_item(class_name=None): class_name = class_name or "Folder" return ElementTree.Element("Item", attrib={ "class": class_name })
[ "def _create_item_element(\n parent: str,\n topic: str,\n item: str,\n idl_type: str,\n unit: str,\n description_text: str,\n element_count: int,\n) -> None:\n it = etree.SubElement(parent, \"item\")\n efdb_name = etree.SubElement(it, \"EFDB_Name\")\n efdb_name.text = item\n descrip...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Right now, comment matching is only done to inline comments for simplicity. If a more sophisticated pattern is implemented to pick up block comments, this test can be removed.
def test_does_not_match_block_comments(self): comment = dedent("""\ --[[ Hello, World! --]]""") script = rbxmx.ScriptElement(source=comment) first_comment = script.get_first_comment() assert first_comment is None
[ "def test_inline_comment(self):\n self.assert_okay(\"inline-comment\")", "def test_mixed_comments(self):\n self.assert_okay(\"mixed-comments\")", "def test_comments(self):\n\n text = \"\"\"\n This is a text% with some inline comment\n More text.\n % A free comment\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dict with validation rules for a field Used directly in widget templates
def get_validators_for_field(field): # TODO: Add more validation methods validators = {} if v.validation_includes(field.attr.validator, v.Email): validators['email'] = True if v.validation_includes(field.attr.validator, v.Number): validators['number'] = True if v.validation_inclu...
[ "def validation_rules(self):\n return self._validation_rules", "def getValidators():", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def validation_runner(val_funk, field, value, requires):\n if hasattr(requires, '__iter__') and not isinsta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns selected="selected" if the option value matches field's default Used directly in widget templates
def is_option_selected(option, field): if field.attr.default and option[0] == field.attr.default: # and option[0] != self.empty: return ' selected="selected"' else: return ''
[ "def first_selected_option(self):\n option = super(Select, self).first_selected_option\n return extend_webelement(option)", "def selection_field_value(context, widget, data):\n return context.selection_field", "def selected_value(self):\n option = self.selected_option\n return opt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to plot results from sampling (neighborhood radii or iterations)
def plot_sampling(fname, df, of="r_neighbor", show=True): xlabel = r"Neighborhood $r_{c}$" logx = False if of == "n_iter": xlabel = "#Cycles" logx = True fig, ax = plt.subplots(figsize=(15, 5)) gb = df.groupby([of]) aggregation = {"stress": [np.mean, np.std], "correlation": [n...
[ "def plot_selected( self , *args , **kwargs ):\n\n\t\tprojection = kwargs.get('projection', self.projection)\n\t\tsave_fig = kwargs.get( 'save_fig', None )\n\t\ttype_colors = kwargs.get('type_colors', self.type_colors)\n\t\ttitle = ' Locations of Genomes at time of final sampling lineage depth: ' + str( kwargs.get(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Post an answer to the given tweets
def answer_to_tweets(api, tweets): try: last_tweet_id = 0 for tweet in tweets: print("Sending an answer to tweet {}: '{}'".format(tweet["id"], tweet["text"])) api.statuses.update(status=TARGET_TWEET_ANSWER, ...
[ "def reply(api, tweet):\n for t in tweet:\n # to create a reply you simply @ and mark the tweet id\n api.update_status(\".@\"+t[0]+\" have you heard of tourzan.com its a good travel resource. \"\n \"the travel tips and guides are top notch.\")", "def post_tweet(status_text, t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the id of the last tweet we answered to
def get_last_tweet_id(): if not os.path.exists(LAST_TWEET_FILE): return 0 try: with open(LAST_TWEET_FILE, 'rb') as last_tweet_file: return pickle.load(last_tweet_file) except pickle.UnpicklingError: return 0
[ "def most_recent_id(self):\n # http://stackoverflow.com/questions/14432862/how-to-get-the-document-with-max-value-for-a-field-with-map-reduce-in-pymongo\n mrt = self.db.tweets.find_one(sort=[(\"_id\", -1)])\n\n # on first execution there'll be no tweets, so mrt is null\n # None is twarc'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the id of the last tweet the bot considered
def update_last_tweet_id(last_tweet_id): if last_tweet_id: with open(LAST_TWEET_FILE, 'wb') as last_tweet_file: pickle.dump(last_tweet_id, last_tweet_file)
[ "def get_my_last_tweet_id(api):\n for _ in range(3):\n try:\n last_tweet = api.user_timeline(id=api.me().id, count=1)[0]\n except IndexError:\n time.sleep(3)\n continue\n else:\n break\n else:\n raise Exception(\"The last tweet was not fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calc the global position of every local Obstacle, Returns list
def calcGlobalObstaclePosition(self, obstacles): global_obstacle_list = [] for obstacle in obstacles: #Wandeln Winkeldaten für Globalberechnung: -90zu+90 und +90zu-90 0=0 #ScanList[i][0]=degrees(asin(sin(radians(ScanList[i][0])+radians(180)))) ...
[ "def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resol...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pickel Robos Path every Xsec.
def saveRoboPath(self): if time.time()-self.timeold > 2: self.RoboPath.append([round(self.RoboPosX,1),round(self.RoboPosY,1)]) self.timeold = time.time()
[ "def run(every=45):\n print(f\"Scheduling refuel time for every {every} minutes.\")\n seconds = every * 60\n pic = Path.joinpath(Path(__file__).parent, \"pic.png\")\n try:\n img = Image.open(pic)\n while(True):\n for i in tqdm.trange(seconds):\n time.sleep(1)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set Robo Position zb bei Start
def setRoboPos(self,x,y): self.RoboPosX=x self.RoboPosY=y
[ "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def move_to_origin(self) -> None:\n\n _bb = self.bb()\n if _bb.x < 0:\n self.translate(a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return latest Obstacles and clears obstacles
def getObstacles(self): ausgabeObstacle = self.globalObstaclesList + self.globalHardObstaclesList self.globalObstaclesList = [] return(ausgabeObstacle)
[ "def clearObstacles(self):\r\n \r\n self.lock.acquire()\r\n self._obstacles = []\r\n self.updated.set()\r\n self.changes.append('obstacles')\r\n self.lock.release()", "def reset_obstacles(self):\n self.obstacles = np.array([])", "def get_obstacles(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an existing SSLCertificate resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, certificate: Optional[pulumi.Input[str]] = None, certificate_id: Optional[pulumi.Input[int]] = None, creation_timestamp: Optional[pulumi.Input[str]] = None, ...
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n active: Optional[pulumi.Input[bool]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ca_pem: Optional[pulumi.Input[str]] = None,\n certifica...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks that the user wants to finish or not. Perform some verification of the input.
def check_if_user_has_finished(): ok_to_finish = True user_input_accepted = False while not user_input_accepted: user_input = input("Do you want to finish (y/n): ").lower() if user_input == 'y': user_input_accepted = True elif user_input == 'n': ok_to_finish =...
[ "def do_done(self, arg):\n self.action.exit_judge()\n self.prompt = \"main > \"", "def ask_user(self):\n if self.wait:\n msg = 'Press <ENTER> for next time step. '\n msg += 'Type \"end\" to run to end: '\n user_input = input(msg)\n if user_input.low...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate image from Blender scene file (.blend)
def generate_blenderimage(scene_file, output=None, script_file=None, frame=1): cmd = [BLENDER, "-b", scene_file, "-y"] previous_wd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(scene_file))) if script_file: cmd.append("-P") cmd.append(script_file) if output: outbase,...
[ "def main(args):\n parser = argparse.ArgumentParser(\"Converts lots of blend files\")\n parser.add_argument('--resolution', help=\"Resolution Multiplier\", type=float)\n config = parser.parse_args(args)\n\n\n prefix = bpy.data.filepath.split('.')[0]\n outimage = prefix + '.png'\n export_png(outima...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate image from blender scene file(.blend) with changed parameters
def generate_img_with_params(scene_file, script_name="tmp.py", xres=800, yres=600, crop=None, use_compositing=False, output=None, frame=1): if crop is None: crop = [0, 1, 0, 1] crop_file_src = generate_blender_crop_file([xres, yres], [crop[0], ...
[ "def generate_blenderimage(scene_file, output=None, script_file=None, frame=1):\n cmd = [BLENDER, \"-b\", scene_file, \"-y\"]\n previous_wd = os.getcwd()\n os.chdir(os.path.dirname(os.path.abspath(scene_file)))\n if script_file:\n cmd.append(\"-P\")\n cmd.append(script_file)\n if output...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The is a bigram collocation finder.
def collocationFinder(document,nbest=4): chain = lambda x : list(itertools.chain(*pos.tokenize_words(pos.tokenize_sents(x)))) stopset = set(stopwords.words('english')) filter_stops = lambda w: len(w) < 3 or w in stopset bcf = BigramCollocationFinder.from_words(chain(document)) bcf.apply_word_filter(filter_stops) ...
[ "def bigram_finder(self):\n return BigramCollocationFinder(self.word_fd, self.bigram_fd)", "def bigram_collocation_feats(self, documents, top_n=..., min_freq=..., assoc_measure=...):\n ...", "def extract_collocations(records, num_collocations, collocation_window, compare_collocations = False):\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a set of tagger_class and conll2000 training sentences, this function returns a good backoff POS tagger.
def backoff_tagger(train_sents, tagger_classes, backoff=None): for cls in tagger_classes: backoff = cls(train_sents,backoff=backoff) return backoff
[ "def make_backoff_tagger():\n\n\treturn backoff_tagger(treebank.tagged_sents(), \n\t\t[UnigramTagger, BigramTagger, TrigramTagger],\n\t\tbackoff=DefaultTagger('NN'))", "def nltk_brill_pos_tagger(input_dict):\n chunk = input_dict['training_corpus']['chunk']\n corpus = input_dict['training_corpus']['corpus']\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a backoff tagger that useses a UnigramTagger, BigramTagger, TrigramTagger, and a Default tagger that returns NN
def make_backoff_tagger(): return backoff_tagger(treebank.tagged_sents(), [UnigramTagger, BigramTagger, TrigramTagger], backoff=DefaultTagger('NN'))
[ "def backoff_tagger(train_sents, tagger_classes, backoff=None):\n\tfor cls in tagger_classes:\n\t\tbackoff = cls(train_sents,backoff=backoff)\n\treturn backoff", "def create_tagger():\n train_sents = brown.tagged_sents()\n\n # These regexes were lifted from the NLTK book tagger chapter.\n t0 = nltk.Regex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
should accept a dict or callable as first argument
def test_takes_dict_or_callable(self): scope1 = Scope({ 'where': 'foo' }) self.assertEqual(scope1.finder_options, { 'where': 'foo' }) call = lambda(cls): cls.where('foo') scope2 = Scope(call) self.assertEqual(scope2.callable, call)
[ "def callableize(f_or_d):\n return f_or_d.get if isinstance(f_or_d,dict) else f_or_d", "def callFuncBasedOnDict(func, argdict, **kwargs):\n if argdict is None:\n argdict = {}\n seldict = selectArgsFromDict(func, argdict)\n if kwargs is not None:\n seldict.update(kwargs)\n return func(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
should raise exception on bad arguments
def test_errors_on_bad_argument(self): self.assertRaises(Exception, Scope, 'foo') self.assertRaises(Exception, Scope, 1) self.assertRaises(Exception, Scope, []) self.assertRaises(Exception, Scope, tuple())
[ "def test_parse_argument_invalid():\n assert_raises(ValueError, _parse_argument, \"foo\")", "def _validate_arguments():\n exobj = (putil.exh.get_exh_obj()\n if putil.exh.get_exh_obj() else\n putil.exh.ExHandle())\n exobj.add_exception(\n exname='illegal_argument',\n ex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
should set `model` to owner when instance is None
def test_sets_model_to_owner(self): self.assertEqual(self.Test.foo.model, self.Test)
[ "def owner(self, owner):\n self._owner = owner", "def setParent(self, model):\n self.parent = model", "def owner(self, owner):\n \n self._owner = owner", "def set_owner(self, owner: Owner):\n ...", "def pre_save(self, obj):\n obj.owner = self.request.user\n c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
should delegate to `callable` when present
def test_delegates_callable(self): foo = self.Test.foo self.assertEqual(foo(), 123) foo(1, 2, 3, foo='bar') self.assertEqual(((1, 2, 3), dict(foo='bar')), self.last_call)
[ "def deferred_call(self, callable):\n def _proxy(*args, **kwargs):\n if self.get_token():\n return callable(*args, **kwargs)\n else:\n log.info(\"Did not call %r as a token was not available\", callable)\n\n return _proxy", "def getCallable():", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function performs a grid search over a set of different learning rates and a number of hidden layer neurons.
def grid_search(verbose): # Load Ising data. Ising_Data = prepare_Ising_DNN() # Perform grid search over learning rate and number of hidden neurons. N_neurons=np.logspace(0,3,4).astype("int") # Check number of neurons over multiple decades. learning_rates=np.logspace(-6,-1,6) # Pre-allocate v...
[ "def grid_search():\n best_acc = 0\n best_hyper_params = (None, None, None)\n size_arr = [10, 20]\n rate_arr = [.1, .2, .3]\n reg_arr = [3, 4, 5]\n\n for size in size_arr:\n for rate in rate_arr:\n for reg in reg_arr:\n curr_acc = get_avg_acc(3, size, rate, reg)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the next trash day for a given date
def next_regular_trash_day(date: str) -> str: parsed_date = parser.parse(date) day_of_week = parsed_date.weekday() if day_of_week < TRASH_DAY: delta = TRASH_DAY - day_of_week elif day_of_week == TRASH_DAY: delta = 0 else: delta = 7 - (day_of_week - TRASH_DAY) next_trash...
[ "def get_next_day(self):\n self.date += timedelta(days=1)\n return self.date", "def _next_trading_day(self, day):\n next_day = self._trading_days.shift(-1)[day]\n return next_day if not pd.isnull(next_day) else None", "def get_next_trading_day(self) -> CalendarDate:\n\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets the next trash day taking holidays into consideration
def next_trash_day(date: str, holidays: list) -> dict: next_regular = next_regular_trash_day(date) weekdays = get_weekdays(next_regular) default_trash_day = {'type': 'default', 'schedule': calendar.day_name[TRASH_DAY]} if holiday.contains_holiday(weekdays): holiday_name = holiday.get_holiday(wee...
[ "def next_regular_trash_day(date: str) -> str:\n parsed_date = parser.parse(date)\n day_of_week = parsed_date.weekday()\n\n if day_of_week < TRASH_DAY:\n delta = TRASH_DAY - day_of_week\n elif day_of_week == TRASH_DAY:\n delta = 0\n else:\n delta = 7 - (day_of_week - TRASH_DAY)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get `Tokenizer` and `Model` for a model name.
def get_tokenizer_and_model(model_name: str): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModel.from_pretrained(model_name) model.output_hidden_states = True return tokenizer, model
[ "def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of all the activation norms.
def get_activation_norms( parameters: List[Parameter], normalize: bool = True ) -> List[float]: with torch.no_grad(): all_norms = [] for param in parameters: if len(param.size()) != 2: continue norms = param.norm(dim=1, p=2) if normali...
[ "def get_weight_norms(self, sess, matrix_norm_fxn = lambda x: np.linalg.norm(x, ord = 1)):\n model_norms = []\n weights_list = self.get_weights_np(sess)\n for weights in weights_list:\n norm = matrix_norm_fxn(weights)\n model_norms.append(norm)\n return model_norms"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an array of all weight magnitudes in the parameter list.
def get_weight_norms(parameters: List[Parameter]) -> np.ndarray: with torch.no_grad(): norms = torch.cat([param.abs().flatten() for param in parameters]) return norms.numpy()
[ "def get_magnitudes(self):\n return [w.Magnitude for w in self.Waves]", "def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag", "def magnitudes(atoms):\n results = []\n for atom in atoms.values():\n results.append(magnitude(atom))\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a list of parameters tied to the embedding layer.
def get_embed_params(model) -> List: return [param for name, param in model.named_parameters() if "embed" in name]
[ "def param(self):\n parameters = []\n for layer in self.layers:\n parameters.extend(layer.param)\n return parameters", "def parameters(self) -> List[torch.tensor]:\n return [\n tensor\n for layer in self.layers if layer.has_trainable_params\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get dictionary of parameters partitioned by layer number.
def get_params_by_layer(model) -> Dict: layers = defaultdict(list) for name, param in model.named_parameters(): pieces = name.split(".") if pieces[0] == "encoder" and pieces[1] == "block": layer = int(pieces[2]) layers[layer].append(param) return layers
[ "def _get_params_for_layer(layer_type):\n return {\n layers.Conv1D: ([4, 2], (3, 6)),\n layers.Conv2D: ([4, (2, 2)], (4, 6, 1)),\n layers.Conv2DTranspose: ([2, (3, 3)], (7, 6, 3)),\n layers.Conv3D: ([2, (3, 3, 3)], (5, 7, 6, 3)),\n layers.Conv3DTranspose: ([2, (3, 3, 3)], (5, 7...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look for new photos on the google drive
def main(): credentials = get_credentials() http = credentials.authorize(httplib2.Http()) service = discovery.build('drive', 'v3', http=http) i = 0 total = 0 nextPageToken=None while True: results = service.files().list( pageSize=30, fields="nextPageToken, fi...
[ "def retrieve_pics(drive_service):\n pic_q = Queue()\n page_token = None\n while True:\n response = drive_service.files().list(q=\"mimeType='image/jpeg'\",\n spaces='drive',\n fields='nextPageToken, files(id, n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Linearly interpolate two setpoints.
def interpolate_setpoints(base_setpoint, other_setpoint, parameter): time = parameter * base_setpoint.time + (1 - parameter) * other_setpoint.time position = parameter * base_setpoint.position + (1 - parameter) * other_setpoint.position velocity = parameter * base_setpoint.velocity + (1 - parame...
[ "def _interpolate( p1, p2, x ):\n return p1[ 1 ] + _slope( p1, p2 ) * ( x - p1[ 0 ] )", "def _interpolate(x, x1, x2, y1, y2):\n m = (y1 - y2) / (x1 - x2)\n y = (x - x2) * m + y2\n return y", "def test_interpolate(p1: Tensor, p2: Tensor, size: int) -> None:\n points = torch.stack((p1, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate overlap between two sets. The sets are acquired from data1 and data2 respectively.
def calc_overlap(data1, data2, label1=None, label2=None, index='dice'): if label1 is not None: positions1 = np.where(data1 == label1) data1 = list(zip(*positions1)) if label2 is not None: positions2 = np.where(data2 == label2) data2 = list(zip(*positions2)) # calculate over...
[ "def overlap(x, y):\r\n return set(x).intersection(set(y))", "def calc_overlap_of_graphs(edges1, edges2):\n stats = statstools.calculate_overlap_scores(edges1, edges2)\n stats['interactions1'] = stats['set1_size']\n stats['interactions2'] = stats['set2_size']\n del stats['set1_size']\n del stats...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate overlaps for leaveoneout cross validation. Each sample has its own region of interest (ROI). For each iteration, overlap between the ROI in the left sample and the ROI in remaining samples
def loocv_overlap(X, prob, metric='dice'): assert X.ndim == 2, 'The input X must be a 2D array!' assert X.dtype == np.bool, "The input X's data type must be bool!" n_samp, _ = X.shape remain_idx_arr = np.ones((n_samp,), dtype=np.bool) overlaps = np.zeros((n_samp,)) for left_idx in range(n_samp)...
[ "def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calcu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate elbow score for a partition specified by labels
def elbow_score(X, labels, metric='euclidean', type=('inner', 'standard')): if type == ('inner', 'standard'): score = 0 for label in set(labels): sub_samples = np.atleast_2d(X[labels == label]) dists = cdist(sub_samples, sub_samples, metric=metric) tmp_score = np....
[ "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that a vertex will appear in at least one of the possible positions
def vertex_at_least_once(self,vertex): clauses = [] for position in range(0,self.graph.num_vertices): clauses.append(ClauseVariable(False,position,vertex)) return clauses
[ "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def contains_several_vertices(self, currentState):\n\t\treturn ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that a vertex will appear at most once at this is done by considering all combinations of vertices and ensuring that vertex can only ever appear at one position for each pair of positions.
def vertex_at_most_once(self,vertex): clauses = [] for (p1,p2) in itertools.combinations(range(0,self.graph.num_vertices),2): clause = [ ClauseVariable(True,vertex,p1), ClauseVariable(True,vertex,p2)] clauses.append(clause) return clauses
[ "def creates_vertex_conflict(self, new_op):\n for agent, position in self.curr_positions.items():\n if agent != new_op.agent and \\\n position['location'] == new_op.edge[1] and \\\n self.overlapping(position['time'], new_op.time):\n return True ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes a minisat output instance back into a clause variable
def minisat_decode(clause_str): factor = ClauseVariable.encoding_factor() int_value = int(clause_str) compliment = (int_value < 0) int_value = abs(int_value) position = (int_value % factor) -1 vertex = math.ceil(int_value/factor)-1 return ClauseVariable(complimen...
[ "def decode_result(found):\n ...", "def decode_results(self, outputs):\n ...", "def decode(self):\n instr = self.fetch()", "def question_to_statement(self, question, answer):\n qid = question[1]\n pos_statement = self.statements[qid]\n if answer:\n return pos_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write out minisait in format of minisat command runner
def write_minisat(self): num_variables = len(self.label_encodings) num_clauses = self.num_clauses clauses = self.clauses outfile = MinisatRunner.temp_in out = open(outfile,"w") try: out.write("p cnf %3d %3d\n" % (num_variables,num_clauses)) for cla...
[ "def write_output(self):", "def _write_start():\n from ._common import header\n\n out = \"{:5}{}\\n\".format(\"----*\", header)\n return [out[:11] + \"MOP: 123456789*123456789*1234\" + out[40:]]", "def writeMets( self, output=sys.stdout ):\n output.write( self.mets.getXml() )", "def writeOutpu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
choice the form of payment
def choice_payment(payment=None): if payment is None: return 3 # 支付宝 else: return payment
[ "def test_individual_ACH(self):\n form_data = self.form_data()\n form_data['payment_type'] = 'DirectDebit'\n form = DonationPaymentForm(data=form_data)\n self.assertTrue(form.is_valid())", "def payment(self, **post):\n cr, uid, context = request.cr, request.uid, request.context\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns lorentzian profile. 1/pi 0.5gamma / ((0.5gamma)2 + x2)
def lorentz(x, gamma): return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)
[ "def lorentzian_func(x, gamma):\n return gamma / np.pi / (x**2 + gamma**2)", "def lorentzian(x, gamma, x0=0):\n return gamma / np.pi / ((x-x0)**2 + gamma**2)", "def profile_plaw(r_range, rho_x, r_x, gamma, **kwargs):\n profile = rho_x * (r_range / r_x)**(-gamma)\n profile[(r_range < r_x)] = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns gaussian profile. 1/sqrt(2pi) / gamma exp((x/gamma)2 / 2)
def gauss(x, gamma): return 1 / np.sqrt(2*np.pi) / gamma * np.exp(-(x/gamma)**2 / 2)
[ "def gaussian(x,mu,sigma):\n \n return ( (1.0/math.sqrt(2*math.pi*sigma)) * (math.e**(-((x-mu)**2)/(2*sigma**2))))", "def gaussian(x, mu, sig):\n\treturn 1 / (sig * np.sqrt(2 * np.pi)) * np.exp((-1 / 2) * ((x - mu) / sig)**2)", "def gamma(x):\n return exp(gammaln(x))", "def gaussian_likelihood(x, mu,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
callback for receiving TwistStamped message on /twist_cmd topic
def cb_twist_cmd(self,msg): # log message # rospy.logdebug('DBWNode::twist_cmd_cb %s',msg) # store message self.twist = msg.twist self.velocity = msg.twist.linear.x self.yaw = msg.twist.angular.z
[ "def twist_callback(self, msg):\n self.compute_commands(msg)", "def on_tweet(self, tweet):\n pass", "def twist_callback(msg):\n global TWIST\n if hypot(msg.twist.linear.x, msg.twist.linear.y) > VELOCITY_MAGNITUDE_THRESHOLD:\n rospy.loginfo(\"Received valid Velocity\")\n TWIST =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
callback for receiving TwistStamped message on /current_velocity topic
def cb_current_velocity(self,msg): # log message # rospy.logdebug('DBWNode::velocity_cb %s',msg) # store message self.current_twist = msg.twist self.current_velocity = msg.twist.linear.x
[ "def vector_callback(msg):\n global TWIST\n if hypot(msg.vector.x, msg.vector.y) > VELOCITY_MAGNITUDE_THRESHOLD:\n rospy.loginfo(\"Received valid Velocity\")\n TWIST = TwistStamped(header=msg.header)\n TWIST.twist.linear = msg.vector", "def twist_callback(msg):\n global TWIST\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
callback for receiving Bool message on /vehicle/dbw_enabled topic
def cb_vehicle_dbw_enabled(self,msg): # log message # rospy.logdebug('DBWNode::dbw_enabled_cb %s',msg) # store message self.dbw = bool(msg.data)
[ "def Enabled(self) -> bool:", "def external_power(self):\n check = self.attrs.get('vehicleEmanager', {}).get('rbc', {}).get('status', {}).get('pluginState', {})\n if check == 'CONNECTED':\n return True\n else:\n return False", "def logbool(tag, value):\n pub = rospy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot a image grids of the dataset. If dataset is labeld it will generate one row of images per class and if it's unlabeled it will generate a bidimentional (almost square) grid of images sampled from dataset
def show_imagegrid_dataset(dataset, num=10, shuffle=True, classes='auto', figsize=None, fontsize=20, image_attr={'cmap': plt.cm.Greys_r}): sample = datase...
[ "def Mkgrid():\n\tinputs, classes = next(iter(dataset_loader_t['train']))\n\tout = torchvision.utils.make_grid(inputs)\n\thalf = round(len(classes)/2) -1\n\tprint(half)\n\ttitles = [class_names[x].split(\"_\")[1][0:3] for x in classes]\n\ttitles[half] = \"\".join([titles[half], \"\\n\"])\n\ttitle_joined = \", \".jo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup follow through (jiggle planes) to each selected transform object. Jiggle deformers are returned to allow users to animate the deformer attributes before baking.
def setup_follow_through(): # -- Grab the selected transforms node_list = pm.selected(type='transform') # -- Validate node list if not node_list: pm.warning( 'Select at least 1 transform object!' ) return None # -- Grab the current start and end f...
[ "def bake_follow_through():\r\n # -- Find all selected transforms that have a jiggle setup\r\n node_list = [\r\n node\r\n for node in pm.selected(type='transform')\r\n if any(pc.hasAttr('ld_jiggle_node')\r\n for pc in node.getChildren(type='parentConstraint'))\r\n ]\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bake and clean follow through (jiggle planes) on selected transform objects.
def bake_follow_through(): # -- Find all selected transforms that have a jiggle setup node_list = [ node for node in pm.selected(type='transform') if any(pc.hasAttr('ld_jiggle_node') for pc in node.getChildren(type='parentConstraint')) ] # -- Validate...
[ "def setup_follow_through():\r\n # -- Grab the selected transforms\r\n node_list = pm.selected(type='transform')\r\n \r\n # -- Validate node list\r\n if not node_list:\r\n pm.warning(\r\n 'Select at least 1 transform object!'\r\n )\r\n return None\r\n\r\n # -- Grab ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DSSAT model water uptake
def water_uptake_dssat(self, soil): CONV1 = 1e-4 # convert m/m3 to cm/cm3 CONV2 = 100 # convert m to cm CONV3 = 10 # convert cm to mm daily_ref_evap_transp = soil.daily_ref_evap_transp transp_pot = daily_ref_evap_transp * self.light_intercpt root_dens = self.root_...
[ "def water_uptake_apsim(self, soil):\r\n soil_wat_avail = np.zeros(soil.total_layers)\r\n soil_wat_supply = np.zeros(soil.total_layers)\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n # Water available in each...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
APSIM model water uptake
def water_uptake_apsim(self, soil): soil_wat_avail = np.zeros(soil.total_layers) soil_wat_supply = np.zeros(soil.total_layers) daily_ref_evap_transp = soil.daily_ref_evap_transp transp_pot = daily_ref_evap_transp * self.light_intercpt # Water available in each layer [mm] ...
[ "def find_min_sucrose_uptake(uptake_kinetics_type = 'conditional', output_filename = '', stdout_msgs = True):\n from tools.ancillary.plot import plot, axis\n\n #--- Metabolic models ---\n # Model path\n model_path = '/usr2/postdoc/alizom/work/models/Saccharomyces_cerevisiae/iAZ900/'\n\n # Create the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WOFOST model water uptake
def water_uptake_wofost(self, soil): daily_ref_evap_transp = soil.daily_ref_evap_transp transp_pot = daily_ref_evap_transp * self.light_intercpt DROUGHT_CAT = 4 p_value = p_wofost(transp_pot, DROUGHT_CAT) # WOFOST does not account for different layers for lyr in soi...
[ "def water_uptake_apsim(self, soil):\r\n soil_wat_avail = np.zeros(soil.total_layers)\r\n soil_wat_supply = np.zeros(soil.total_layers)\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n # Water available in each...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CropSyst/Campbell model daily water uptake
def water_uptake_campbell(self, soil): daily_ref_evap_transp = soil.daily_ref_evap_transp root_hydr_cond = np.zeros(soil.total_layers) shoot_hydr_cond = np.zeros(soil.total_layers) plant_hydr_cond = np.zeros(soil.total_layers) root_activity = np.zeros(soil.total_layers) ...
[ "def water_uptake_dssat(self, soil):\r\n CONV1 = 1e-4 # convert m/m3 to cm/cm3\r\n CONV2 = 100 # convert m to cm\r\n CONV3 = 10 # convert cm to mm\r\n\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n roo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of pysam.cbcf.VariantRecord split it into multiple lists, one for each chromosome copy
def split_copies(region, vl): # Sniff out the ploidy if len(vl) == 0: logger.warning('Empty region ({}), assuming diploid'.format(region)) ploidy = 2 else: ploidy = len(vl[0].samples[0]['GT']) logger.debug('Region: {}, ploidy: {}'.format(region, ploidy)) # cpy_l = [ # (cpy, '|'.join(['0'] *...
[ "def _group_by_chromosome(self):\n\t\trecords = []\n\t\tprev_chromosome = None\n\t\tfor record in self._vcf_reader:\n\t\t\tif record.CHROM != prev_chromosome:\n\t\t\t\tif prev_chromosome is not None:\n\t\t\t\t\tyield (prev_chromosome, records)\n\t\t\t\tprev_chromosome = record.CHROM\n\t\t\t\trecords = []\n\t\t\trec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take a pysam.cbcf.VariantRecord and convert it into a Variant object
def parse(v, cpy): if v.samples[0]['GT'][cpy] == 0: # Not present in this copy return None alt = v.samples[0].alleles[cpy] l_r, l_a = len(v.ref), len(alt) if l_r == 1: if l_a == 1: op, op_len = 'X', 0 else: op, op_len = 'I', l_a - l_r elif l_a == 1: op, op_len = 'D', l_r - l_a e...
[ "def convertVariant(self, record, callSetIds):\n variant = self._createGaVariant()\n variant.referenceName = record.contig\n if record.id is not None:\n variant.names = record.id.split(';')\n variant.start = record.start # 0-based inclusive\n variant.end = reco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare a variant file with only the given sample, complex variant calls filtered out, and restricted to the given bed file
def prepare_variant_file(fname_in, sample, bed_fname, fname_out, write_mode='w'): def _complex_variant(_v): var = _v.samples.values()[0] for alt in var.alleles: if _v.rlen > 1 and len(alt) > 1: if _v.ref != alt: logger.debug('Filtered out {}:{} {} -> {}'.format(_v.contig, _v.pos, _v.re...
[ "def variant_case(store, case_obj, variant_obj):\n case_obj['bam_files'] = []\n case_obj['mt_bams'] = []\n case_obj['bai_files'] = []\n case_obj['mt_bais'] = []\n case_obj['sample_names'] = []\n for individual in case_obj['individuals']:\n bam_path = individual.get('bam_file')\n mt_b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers a file format from a recipe dictionary
def register_format(recipe): afr = AFMFormatRecipe(recipe) formats_available.append(afr) # suffix if afr.suffix not in formats_by_suffix: formats_by_suffix[afr.suffix] = [] formats_by_suffix[afr.suffix].append(afr) # mode if afr.mode not in formats_by_mode: formats_by_mode[af...
[ "def register(file_format, extensions, reader, writer=None):\n register_format(\n fmt=file_format,\n ext_to_fmt=_extension_to_filetype,\n reader_map=_reader_map,\n writer_map=_writer_map,\n extensions=extensions,\n reader=reader,\n writer=writer,\n )", "def r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shortcut method for getting random start and end points in a file
def get_random_start_and_end_points_in_file(self, file_data): start_point = random.randint(2500, len(file_data)) end_point = start_point + random.randint(0, len(file_data) - start_point) return start_point, end_point
[ "def createRandomRange(self, start, end) :\n\t\ttime = random.randint(1, end-start)\n\t\treturn (start, start+time)", "def random_line(filename):\n return random.choice(list(open(filename)))", "def read_file(filename, start):", "def getRandomCoordinates( self, size ):\n if not self.mIsLoaded: self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splice a chunk in a file. Picks out a random chunk of the file, duplicates it several times, and then inserts that chunk at some other random position in the file.
def splice_a_chunk_in_a_file(self, file_data, glitch_num): start_point, end_point = self.get_random_start_and_end_points_in_file(file_data) section = file_data[start_point:end_point] repeated = '' for i in range(1, glitch_num): repeated += section new_start_point, n...
[ "def delete_chunk(self, chunk_x, chunk_z):\n\n # Load extents\n extents = [(0, 2)] # Ignore the extent and timestamp tables.\n self.fd.seek(0)\n buff = Buffer(self.fd.read(4096))\n for entry_index in range(1024):\n z, x = divmod(entry_index, 32)\n entry = buf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Glitch! Opens the original image file, reads its contents and stores them as 'file_data' Calls 'splice_a_chunk_in_a_file()' method on the data a random number of times between 1 and 5 Writes the new glitched image out to a file
def glitch_an_image(self, local_image): file_handler = open(local_image, 'r') file_data = file_handler.read() file_handler.close() ##YOUR NEW NUMBER## search_num = tumblr.top_hit_num # divide by 5,000,000 to normalize the range from [0,5000000] to [0,1] # multiply...
[ "def splice_a_chunk_in_a_file(self, file_data, glitch_num):\n start_point, end_point = self.get_random_start_and_end_points_in_file(file_data)\n section = file_data[start_point:end_point]\n repeated = ''\n\n for i in range(1, glitch_num):\n repeated += section\n\n new_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the list of items generated by expanding nonterminal B, where B is the "next" terminal/nonterminal. Assumes the 'dot' is not at the end of the list of items, and therefore B exists, and B is a symbol. Cache the result, as it does not change over time.
def items_generated_by_next(self): def lookup(rule): return self.grammar.rules[rule.content] if rule.is_symbol_name() else rule if self.the_items_generated_by_next is None: self.the_items_generated_by_next = [] rhs = lookup(self.grammar.rules[self.the_next.content]) ...
[ "def nonterminals(items):\n return [Nonterminal(item) for item in items]", "def closure(items, ruleSet, terminals):\n I = copy.deepcopy(items)\n\n added = 1\n while added:\n added = 0\n\n #for each item [A -> alpha . B Beta, a] in I (result)\n for item in I:\n if item.p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True when this item represents having accepted a valid sentence in the language
def is_accepting(self): return (self.position == 1) and (self.lhs.content == LANGUAGE)
[ "def isQuestion(self):\n i = 0\n while i < len(self.sentence):\n if \"?\" in self.sentence[i].getWord():\n return True\n i += 1\n return False", "def negation_check(self,sentence):", "def __contains__(self, sentence):\n return sentence in self._se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the set of names of symbols in the "externals" section of the Treesitter JSON grammar. Data looks like this, for section "externals". {
def json_externals(json): return set([ x["name"] for x in json.get("externals",[]) ])
[ "def external_terminologies(self):\n terms = set()\n for node_record in self.graph.run(\"MATCH (n) RETURN (n)\"):\n node = node_record[\"n\"]\n if \"links_to\" in node:\n terms.add(node[\"links_to\"])\n return terms", "def getSchemataNames():", "def _get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translates a JSON dictionary into a corresponding grammar node, based on the 'type' entry. Returns 'dct' itself when 'dct' has no type entry or has an unrecognized type entry. We use Treesitter's conventions for representing a grammar in JSON form.
def json_hook(grammar,memo,tokens_only,dct): def memoize(memo,name,node): if name in memo: return memo[name] memo[name] = node return node result = dct if "type" in dct: type_entry = dct["type"] if isinstance(type_entry,str): if type_entry =...
[ "def _resolve_dict_type(dict_type):\n if dict_type in [dict, col.OrderedDict, NestedDict]:\n dict_class = dict_type\n elif dict_type is None or dict_type == \"ndict\":\n dict_class = NestedDict\n elif dict_type == \"dict\":\n dict_class = dict\n elif dict...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the Canonical Form of a GrammarDict
def canonicalize_grammar(grammar,empty): rules = grammar.rules # First ensure right-hand sides of containers are Choice nodes. result = {} for key, value in rules.items(): if isinstance(value,ContainerRule): if isinstance(value,Choice): # Choice nodes are unchanged ...
[ "def canonicalize(self):\n self.rules = canonicalize_grammar(self,self.empty)\n self.is_canonical = True", "def build_canonical_codebook(codebook):\n if not codebook:\n return {}\n canonical = ([(key, codebook[key][LENGTH]) for key in sorted(\n codebook, key=lambda key: (codebook...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Records a new rule with the given key and value.
def add_rule(key,*values): rhs = grammar.MakeChoice(list(values)) result[key.content] = rhs return key
[ "def __setitem__(self, key, value):\n self.rules[key] = value", "def add_rule(self, rule):\n self.rules.append(rule)", "def add_rule(self, rule):\n \n self.rules.append(rule)", "def add_rule(rule):\n global RULE_DICT\n\n if rule[0] not in RULE_DICT:\n RULE_DICT[rule[0]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the First set for each node in the grammar. Populates the `first` attribute of each node.
def compute_first_sets(grammar,rules): grammar.reset_first_follow() names_of_non_terminals = [] grammar.end_of_text.first_data = set({grammar.end_of_text}) grammar.empty.first_data = set({grammar.empty}) for key, rule in rules.items(): if rule.is_terminal() or rule.is_empty(): #...
[ "def compute_first(self):\n compute_first_sets(self, self.rules)", "def calculate_first(terminals, nonterminals, grammar, nullable):\n first = dict()\n for t in terminals:\n first[t] = {t}\n for a in nonterminals:\n first[a] = set()\n changing = True\n while changing:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a copy of list s without Empty
def list_without_empty(L): return [i for i in L if not i.is_empty()]
[ "def compact_list(self):\n return [ele for ele in self if ele is not None]", "def empty_list():\n return list()", "def copy_list(self,list_):\r\n return list_[:]", "def prune_empty(lst: List[T]) -> List[T]:\n return [elem for elem in lst if elem]", "def copy(self):\n return List(*...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the First set for a Phrase, in the given grammar
def first(grammar,phrase): def lookup(rule): return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule # Map names of nonterminals to the nonterminals themselves phrase = [lookup(i) for i in phrase] result = set() for item in phrase: we = without_empty(item.first(...
[ "def compute_first_sets(grammar,rules):\n grammar.reset_first_follow()\n\n names_of_non_terminals = []\n grammar.end_of_text.first_data = set({grammar.end_of_text})\n grammar.empty.first_data = set({grammar.empty})\n for key, rule in rules.items():\n if rule.is_terminal() or rule.is_empty():\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the Follow set for each node in the grammar. Assumes First sets have been computed. Populates the `follow` attribute of each node.
def compute_follow_sets(grammar): # 1. Place $ in FOLLOW(S), where S is the start symbol and $ is the input # right end marker. grammar.rules[grammar.start_symbol].follow = set({grammar.end_of_text}) def lookup(rule): return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule ...
[ "def compute_follow(self):\n compute_follow_sets(self)", "def get_follow():\n for s in NON_TERMINAL_SET:\n sym = symbol_for_str(s)\n sym.follow_set = set()\n\n symbol_for_str('<s>').follow_set.update(set(['#']))\n\n while True:\n follow_set_is_stable = True\n for p in P...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Walk a JSON structure, yielding a new copy of the object. But for any dictionary 'd', first walk its contents, and then yield the result of calling dict_fn(d).
def walk(obj,dict_fn): if isinstance(obj,dict): result = dict() for key, value in obj.items(): result[key] = walk(value, dict_fn) return dict_fn(result) if isinstance(obj,list): return [walk(i,dict_fn) for i in obj] return obj
[ "def iterate(d, fun): # type: (Dict, Callable[[Any, Any], None]) -> None\n for key, value in d.items():\n if isinstance(value, dict):\n DictUtil.iterate(value, fun)\n else:\n fun(key, value)", "def walk_dict(dct):\n for k, v in dct.items():\n y...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recomputes self.str and self.hash
def rehash(self): self.str = "{}{}{}".format(LBRACE, " ".join(sorted([str(i) for i in self])), RBRACE) self.hash = self.str.__hash__() self.has_end_of_text = any([isinstance(i,EndOfText) for i in self])
[ "def __hash__(self):\n return hash(self.text)", "def hash_string(self):\n return self._hash_string", "def update_hash(self, h):\n # Generate a sequence of fragments that add up to the canonical\n # version of the expression.\n fragments = []\n self.collect_str_fragments...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lazily creates an unclosed ItemSet out of the next_items tracked by this edge. If by_index_memo is not None, then find and return the previously saved ItemSet with the same core items, if one exists there. Returns a pair (bool,ItemSet) True if the ItemSet was newly created the destination ItemSet when following this ed...
def NextItemSet(self,grammar,by_index_memo=None): changed = False if self.next_item_set_cache is None: # Create the item set from the "next" items and associated lookaheads. d = dict() for item_id, next_and_lookahead in self.next.items(): ...
[ "def makeSet(self, item):\n if not self.find(item):\n # add the item as an independant set and make root as itself\n self.__items[item] = item", "def __is_duplicated(self, _entry_set, _href_item):\n if (_href_item in _entry_set):\n return True\n return False",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds an itemtolookahead mapping.
def internal_add(self,item,lookahead): assert isinstance(item, Item) assert isinstance(lookahead, LookaheadSet) index = item.reg_info.index assert isinstance(index,int) assert index not in self.id_to_item self.id_to_item[index] = item self.id_to_lookahead[index] =...
[ "def addItem(self, item):\n\n # create new ItemScore and dictionary entries from ItemScore\n newEntry = self.ItemScore(item)\n newDictEntry = corpora.Dictionary([self.preprocess(str(newEntry))])\n\n # add new entries to items and dictionary, update flags\n self.itemScores.append(n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a short string, based only on core_index. Assumes core_index has been computed.
def short_str(self): return "#{}".format(self.core_index)
[ "def index_as_string(self):\n return self.index().to_string() if self.index() else None", "def format_core(self, core):\n error_msg = (\n \"Internal Error: ASP Result.control not populated. Please report to the spack\"\n \" maintainers\"\n )\n assert self.control,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the parser action for this item set should be 'accept'.
def is_accepting(self): for item_id, lookahead in self.id_to_lookahead.items(): if lookahead.includesEndOfText(): item = self.id_to_item[item_id] if item.is_accepting(): return True return False
[ "def isAccepting(self) -> bool:\n return self.accepting", "def is_accepted(self):\n return self._accepted", "def accepted(self) -> bool:\n return pulumi.get(self, \"accepted\")", "def isAccepted(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads a grammar from text. The text format is assumed to be JSON object representing a Treesitter grammar.
def Load(json_text, start_symbol, ignore='_reserved'): g = Grammar(json_text, start_symbol, ignore=ignore) g.canonicalize() g.compute_first() g.compute_follow() return g
[ "def load_grammar(path):\n return SCFG(iterrules(smart_ropen(path)))", "def _init_parse_json_grammar_specification(self, path_to_json_grammar_specification):\n # Load in the JSON spec\n try:\n grammar_dictionary = json.loads(open(path_to_json_grammar_specification).read())\n exc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds a Rule by its Python string name.
def find(self, rule_name): return self.rules[rule_name]
[ "def find_rule(self, name):\n for rule in self._rules:\n if rule.name == name:\n return rule\n return None", "def get_rule_from_name(self, name):\n if name not in self.rule_names:\n raise GrammarError(\"'%s' is not a rule in Grammar '%s'\" % (name, self))\n\n return self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds a registered object by its index. Registered objects are either Item or Rule (including Token)
def findByIndex(self, obj_index): return self.registry.findByIndex(obj_index)
[ "def find_index(self, obj):\n return self.model.indexlist[obj]", "def get_objtype_by_idx(self, *args, **kwargs): # real signature unknown\n pass", "def _object_find(self, oname, namespaces=None) -> OInfo:\n return self._ofind(oname, namespaces)", "def getObject(name, index=-1):\n\n nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new Fixed object, unique up to equivalence of its string text.
def MakeFixed(self,content): return self.register(Fixed(content,reg=self))
[ "def test_fixed():\n data = b\"foobar\"\n assert data[:3] == fastparquet.encoding.read_plain(\n data, parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY, -1, 3)[0]\n assert data[3:] == fastparquet.encoding.read_plain(\n data, parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY, -1, 3)[1]", "def fix(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new Pattern object, unique up to equivalence of its pattern text.
def MakePattern(self,content): return self.register(Pattern(content,reg=self))
[ "def __create_dummy_subpattern(pattern: Pattern, arg: PatternStructure):\n return Pattern(arg, None, pattern.window)", "def pattern(self):\n patternString = self.defn['pattern']\n pattrn = pattern.Pattern(patternString)\n return pattrn", "def produce_patterned_text_slot(cls, name, qu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new Repeat1 object, unique up to equivalence of its member.
def MakeRepeat1(self,content): return self.register(Repeat1(content,reg=self))
[ "def repeat(self, count):\n return self.Sequence((self,) * count)", "def make_unique(self) -> \"UniqueMolecule\":\n return UniqueMolecule(rd_mol=self.rd_mol)", "def make_unique(self):\n return UniqueMolecule(rd_mol=self.rd_mol)", "def ipset_x_repeating():\n x = np.linspace(0, 10, 11)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new Item, unique up to equivalence of its lefthand side nonterminal, righthand side production rule, and its position within that righthand side.
def MakeItem(self,lhs,rule,position): # Upconvert a lhs to a SymbolName if it's a Python string. lhs = lhs if isinstance(lhs,SymbolName) else self.MakeSymbolName(lhs) candidate = Item(lhs,rule,position,reg=self) # Avoid double-registering. result = self.register(candidate) ...
[ "def newResidue(self):\n\n\t\tres = NMRresidue()\n\t\tself.addResidue(res)\n\t\treturn res", "def construct(self, rule):\n reads = set(self.reads) - set(rule.keys()) | set(rule.values())\n rhs = self.rhs.xreplace(rule)\n return Temporary(self.lhs, rhs, reads=reads, readby=self.readby)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rewrites this Grammar's rules so they are in Canonical Form.
def canonicalize(self): self.rules = canonicalize_grammar(self,self.empty) self.is_canonical = True
[ "def replace_rules(self):", "def canonicalize_grammar(grammar,empty):\n\n rules = grammar.rules\n\n # First ensure right-hand sides of containers are Choice nodes.\n result = {}\n for key, value in rules.items():\n if isinstance(value,ContainerRule):\n if isinstance(value,Choice):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the First set for each rule, saving the result on each rule node. Also computes .derives_empty
def compute_first(self): compute_first_sets(self, self.rules)
[ "def compute_first_sets(grammar,rules):\n grammar.reset_first_follow()\n\n names_of_non_terminals = []\n grammar.end_of_text.first_data = set({grammar.end_of_text})\n grammar.empty.first_data = set({grammar.empty})\n for key, rule in rules.items():\n if rule.is_terminal() or rule.is_empty():\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the Follow set for each rule, saving the result on each rule node. Assumes First sets have been computed.
def compute_follow(self): compute_follow_sets(self)
[ "def compute_follow_sets(grammar):\n\n # 1. Place $ in FOLLOW(S), where S is the start symbol and $ is the input\n # right end marker.\n grammar.rules[grammar.start_symbol].follow = set({grammar.end_of_text})\n\n def lookup(rule):\n return grammar.rules[rule.content] if isinstance(rule,SymbolName...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Emits the internal representation of the grammar to stdout
def dump(self): dump_grammar(self.rules) print(self.registry)
[ "def __str__(self):\n rep = \"***GRAMMAR***\"\n for symbol in self.grammar_env:\n rep += \"\\n{} -> {}\".format(symbol, self.grammar_env[symbol])\n rep += \"\\n*** ------ ***\"\n return rep\n #return str(self.grammar_env)", "def grammar(self):\n raise NotImplem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers an item set, and return an index such that any item set with the same core will map to the same index. Indices start at 0 and go up by 1. Returns its index.
def register_item_set(self,item_set): assert isinstance(item_set,ItemSet) core = item_set.kernel_item_ids if core in self.item_set_core_index: return self.item_set_core_index[core] # Register it result = len(self.item_set_core_index) self.item_set_core_index[c...
[ "def __increment_support_count(itemset, hash_tree):\n item = itemset[0]\n if len(itemset) == 1:\n if item in hash_tree:\n assert type(hash_tree[item]) is int\n hash_tree[item] = hash_tree[item] + 1\n else:\n if item in hash_tree:\n __increment_support_count(it...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the names of rules, in order, based on the preorder traversal starting from the LANGUAGE start node. Assumes the grammar is in canonical form
def preorder(self): assert self.is_canonical # Names of visited nodes visited = set() # Names of nodes to visit worklist = [LANGUAGE] result = [] while len(worklist) > 0: successors = [] for rule_name in worklist: if rule_n...
[ "def get_rule_names(self):\n return self.rules.keys()", "def get_rule_names(self):\n\n return self.rules.keys()", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matche...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Refactor the grammar, shifting uses of 'target_rule_name' in the first position out to the invoking context. That is, when 'target_rule_name' names nonterminal X, and 'A' is not in 'stop_at_set',
def left_refactor(self,target_rule_name,stop_at_set): name_suffix = ".post.{}".format(target_rule_name) # Map a rule name X to a set of rules Y where X appears # as a first nonterminal in one of Y's options. appears_first_in = defaultdict(set) for name, rule in self.rules.items(...
[ "def hoist_until(self,target_rule_name,stop_at_set):\n assert self.is_canonical\n\n\n def expand_first(grammar,rule):\n \"\"\"\n When rule is\n Seq(A rest)\n and A -> A1 | ... | An\n Return [ A1 rest | ... | An rest ]\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If two nonterminals have the same right hand side, combine them. Don't combine any rules named in inline_stop.
def dedup_rhs(self,inline_stop=set(),verbose=False): # Map an object index to the nonterminal that first defines it. index_to_name = dict() # Map a rule name to the rule name it should be replaced by. replacement = dict() def process_replacement(grammar,name,replacement_dict): ...
[ "def combine(self, other: 'Symbol') -> 'Symbol':\n\n def resolve_strings(old: Optional[str], new: Optional[str]) -> Optional[str]:\n resolved = old\n\n if old is not None and new is not None:\n if old != new:\n raise ParserError(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inline a rule when it only has one option, and at least one of the symbols is a symbol name. Don't inline any symbol named by excepting_set.
def inline_single_choice_with_nonterminal(self,excepting_set=set()): # Map a rule name to the phrase it should be replaced with. replacement = dict() # Needed for computing follow sets excepting_set = set(excepting_set) | {self.start_symbol} # Process descendants first ...
[ "def ifind_symbols(self, name=\"any\", **kw):\n for sym in self.itersymbols():\n if (name==\"any\" or name==sym.sym.name) and \\\n sym.sym.k==kw:\n yield sym.sym", "def mark_used(self, symbol):\n assert isinstance(symbol, str)\n if (symbol in self._relatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If there are rules X > ... X.post.POST Then set X > POST X.post.POST
def refactor_post(self,post_name): for name in list(self.rules): related_post = "{}.post.{}".format(name,post_name) if related_post in self.rules: parts = [self.MakeSymbolName(x) for x in [post_name, related_post]] self.rules[name] = self.MakeChoice([self....
[ "def passivize(rule):\n rule[\"mother\"][\"subcat\"] = {\n \"obj\": None,\n \"preps\": {\n \"by\": [[\"*Subj\"]]}}\n\n rule[\"mother\"][\"hooks\"] = {\n \"head\": [\"*Obj\"]}\n\n rule[\"dtrs\"][0][\"subcat\"] = {\n \"obj\": [\"*Obj\"]}\n\n rule[\"dtrs\"][0][\"hooks...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Hoists the rules for a a nonterminal into its ancestors. When target_rule_name holds the name for nonterminal X, and
def hoist_until(self,target_rule_name,stop_at_set): assert self.is_canonical def expand_first(grammar,rule): """ When rule is Seq(A rest) and A -> A1 | ... | An Return [ A1 rest | ... | An rest ] If Ai is ...
[ "def left_refactor(self,target_rule_name,stop_at_set):\n name_suffix = \".post.{}\".format(target_rule_name)\n\n # Map a rule name X to a set of rules Y where X appears\n # as a first nonterminal in one of Y's options.\n appears_first_in = defaultdict(set)\n for name, rule in self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs an LL(1) parser table and associated conflicts (if any).
def LL1(self): conflicts = [] table = dict() def add(lhs,terminal,action): action_key = (lhs,terminal) if action_key in table: # Record the conflict, and only keep the original. prev = table[action_key] conflicts.append((lh...
[ "def testLR0ParseTable(self):\r\n from pydsl.Parser.LR0 import _slr_build_parser_table, build_states_sets\r\n state_sets = build_states_sets(productionset0)\r\n self.assertEqual(len(state_sets), 5)\r\n #0 . EI: : . exp $ , \r\n # exp : .SR\r\n # transitions: S -> 2,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }