query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Plot shear profiles This function can be called by either passing in an instance of `GalaxyCluster` or as an attribute of an instance of a `GalaxyCluster` object assuming that that instance has had a shear profile computed and saved as a `.profile` attribute. This function can also be called by passing in `rbins` along...
def plot_profiles(cluster=None, rbins=None, tangential_component=None, tangential_component_error=None, cross_component=None, cross_component_error=None, r_units=None, table_name='profile', xscale='linear',yscale='linear'): # If a cluster object was passed, use these arrays ...
[ "def applyShear(self, *args, **kwargs):\n if len(args) == 1:\n if kwargs:\n raise TypeError(\"Error, gave both unnamed and named arguments to applyShear!\")\n if not isinstance(args[0], galsim.Shear):\n raise TypeError(\"Error, unnamed argument to applyShea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
names/quotes are list of tuples of indices of names & quotes
def removeNamesInQuotes(names,quotes): for quote in Expand(quotes): for name in Expand(names): for i in name: if i in quote: names.remove((name[0],name[-1])) break return names
[ "def quote_names(self):\n return \"\"\"--quote-names\"\"\"", "def quoted_terms(term_list):\n return [MULTIWORD_PAT.sub(r'\"\\1 \\2\"', t) for t in term_list]", "def process_names(raw_names):\n names = []\n for name in raw_names:\n name = name.lower()\n if ' ' in name:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case verifies that the task add form page loads with expected fields if the user is authenticated
def test_add_task_form(self): # Issue a GET request logged_out_response = self.client.get(reverse("check_mate:task_add")) # Confirm that the response does not have any content self.assertFalse(logged_out_response.content) # Confirm that the user is redirected to the login page...
[ "def test_add_task(self):\n\n # Log the user in\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Issue a POST request with a new task\n response = self.client.post(reverse(\"check_mate:task_add\"), {\"task_name\": \"New Task\", \"task_description\": \"Test task\", \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case verifies that you can add a new task and it successfully saves
def test_add_task(self): # Log the user in self.client.login(username="test_user", password="secret") # Issue a POST request with a new task response = self.client.post(reverse("check_mate:task_add"), {"task_name": "New Task", "task_description": "Test task", "task_status": "Not Starte...
[ "def test_add_task(self):\n\n\t\texisting = Task({'id': 1})\n\t\tnew = Task({'name': 'new'})\n\n\t\tproject = Project({'id': 2, 'workspace': {'id': 3}})\n\n\t\tproject.add_task(existing)\n\n\t\tself.assertIn(\n\t\t\t('post', 'tasks/1/addProject', {'data': {'project': 2}}),\n\t\t\tself.api.requests\n\t\t)\n\n\t\tpro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case verifies that the task edit form pagee loads with expected fields if the user is authenticated
def test_edit_task_form(self): # Issue a GET request logged_out_response = self.client.get(reverse("check_mate:task_edit", args=(1,))) # Confirm that the response does not have associated content self.assertFalse(logged_out_response.content) # Confirm that the user is redirect...
[ "def test_add_task_form(self):\n\n # Issue a GET request\n logged_out_response = self.client.get(reverse(\"check_mate:task_add\"))\n\n # Confirm that the response does not have any content\n self.assertFalse(logged_out_response.content)\n\n # Confirm that the user is redirected to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case verifies that you can delete a task if the user is authenticated and certain conditions are met
def test_delete_task(self): # Issue a GET request logged_out_response = self.client.get(reverse("check_mate:task_delete", args=(1,))) # Confirm that the response does not have any content self.assertFalse(logged_out_response.content) # Confirm that the user is redirected to th...
[ "def test_api_can_delete_task(self):\n\n task = self.template_task\n task.save()\n\n token = self.get_auth_token()\n self.client.credentials(HTTP_AUTHORIZATION='Token '+token)\n\n response = self.client.delete(\n reverse('select_template_api:select_template_status',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing the heap after items are inserted
def test_insert(items, pqueue): bhm = BinaryHeapMax() for i in items: bhm.insert(i) assert str(bhm) == str(pqueue)
[ "def _check_heap(self):\n for i in self.heap.walk():\n parent = self.heap.get_parent(i)\n if self.heap.is_max():\n self.assertLessEqual(i.get_item(), parent.get_item())\n else:\n self.assertGreaterEqual(i.get_item(), parent.get_item())", "def t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing the heap after items are removed
def test_remove(items, pqueue): bhm = BinaryHeapMax() bhm.heapify(items) bhm.remove() assert str(bhm) == str(pqueue)
[ "def test_random_deletion_same_insatnce(self):\n random.seed(self._seed)\n for run in range(20):\n list_to_test = util.get_random_list()\n heap_instance = HeapQueue(list_to_test[:])\n util.check_heap(heap_instance, self)\n for deletion in range(random.randra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extrae los operandos y operadores y quita los parentesis
def test_extraer_operandos_y_operadores_en_expr_sin_ptsis(self): self.assertEqual({'Operandos': [5, 4, 2, 2], 'Operadores': ['+', '*', '/']}, self.expresion.parse("5 + 4 * 2 / 2"))
[ "def __calculate_operators__(self):\n self.operator_str = \"\"\n for lit in self.p_seq_lits:\n ops = lit.get_operators()\n self.operator_str += ''.join(x for x in ops)", "def operacao(*args):\r\n if '+' in args:\r\n return num[0] + num[1]\r\n elif '-' in args:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prompt the user to get their lottery numbers
def get_numbers(): number_of_lotto_numbers = 3 user_lotto_numbers = [] for i in range(number_of_lotto_numbers): is_powerball = False if i == number_of_lotto_numbers - 1: is_powerball = True number = int(input(f"Select a powerball number: ")) else: ...
[ "def ask_numbers():", "def lottery_game():\n user_numbers = get_numbers()\n number_of_drawings = 0\n while user_numbers != get_lotto_numbers():\n number_of_drawings += 1\n years = round(((number_of_drawings / 2) / 52), 2)\n print(f\"Full match took {years} years\")", "def user_nums():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate list of random lottery numbers
def get_lotto_numbers(): number_of_lotto_numbers = 3 lotto_numbers = [] for i in range(number_of_lotto_numbers): while True: if i == number_of_lotto_numbers - 1: lotto_numbers.append(randrange(1, 26)) break else: new_number = ra...
[ "def sure_lottery():\n return [0, 1, 0]", "def lottery():\n drawing_list = []\n i = 0\n while i < 6:\n pick = randint(1, 50)\n if pick not in drawing_list:\n drawing_list.append(pick)\n i += 1\n drawing_list.sort()\n return drawing_list", "def get_lotto_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts the lottery game Prints amount of years it took to complete
def lottery_game(): user_numbers = get_numbers() number_of_drawings = 0 while user_numbers != get_lotto_numbers(): number_of_drawings += 1 years = round(((number_of_drawings / 2) / 52), 2) print(f"Full match took {years} years")
[ "def execute(self):\n self.yearlyStepsPerTile = self.model.sim(self.pedsPerHourOn, self.pedsPerHourOff)", "def main():\n\t\n\t# start running trials\n\t# save outcomes of trials\n\t\n\tsuccesses = 0\n\t\n\tfor trial in range(1000):\n\t\tavailable_seats = list(range(0, 100))\n\t\tsuccesses += simulate(avail...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recon enumeration should always produce valid recons
def test_enum_recon(self): expected_recons = [ {'a': 'a', 1: 1, 'c': 'c', 3: 3, 'd': 'd', 2: 2, 'b': 'b'}, {'a': 'a', 1: 1, 'c': 'c', 3: 3, 'd': 'd', 2: 2, 'b': 'b'}, {'a': 'a', 1: 1, 'c': 'c', 3: 3, 'd': 'd', 2: 2, 'b': 'b'}, {'a': 'a', 1: 1, 'c': 'c', 3: 3, 'd'...
[ "def re_to_nfa(self, re):\n\t alphabet = CHARSET + \"\";\n\t if re.is_simple_type():\n\t if re.type == SYMBOL_RE:\n\t # pt un sg. simbol\n\t return NFA(alphabet, {0, 1}, 0, {1}, {(0, re.symbol) : frozenset({1})});\n\t elif re.type == EMPTY_STRING_RE:\n\t # pt. epsilon\n\t ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a design matrix with features given by radial basis functions. `n_centers` Gaussian kernels are placed along data dimension, equidistant between the minimum and the maximum along that dimension. The result then contains one column for each of the Kernels.
def rbf(X, n_centers): mn = X.min(axis=0) mx = X.max(axis=0) pivots = [] for i, j in itertools.izip(mn, mx): _tmp = np.linspace(i, j, n_centers + 2) pivots.append(_tmp[1:-1]) Y = [] for row in X: _row = [] for r, cs in itertools.izip(row, pivots): widt...
[ "def get_indices_clusters(self, centers):\n\n assert self.get_size() > 0, \"set is empty\"\n centers_size = centers.get_size()\n assert centers_size > 0, \"no centers given\"\n\n self_size = self.get_size()\n dim = self.dim\n self_displacements = self.displacements\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate label based on length
def gen_label(self, length): characters = string.ascii_lowercase + string.digits selected_charalist = random.choices(characters, k=length) return "".join(selected_charalist)
[ "def label_generator(self) -> str:\n label = LABEL + str(self.label_counter)\n self.label_counter += 1\n return label", "def augment_label(label, n):\n return [label]*n", "def ln(label, char='-', width=70):\r\n label_len = len(label) + 2\r\n chunk = (width - label_len) // 2\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get always the same 0...1 random number based on an arbitrary string
def get_random_by_string(s): sum = reduce(lambda x, y: x+(y*37), [ord(c) for c in s]) return float(sum % 360) / 360 # Initialize random gen by server name hash #random.seed(s) #return random.random()
[ "def get_random_sensor_id():\n return \"\".join(random.choice(\"0123456789abcdef\") for i in range(12))", "def get_random_number():\n\n return random.randint(0, 100000)", "def generate_number_to_be_guessed():\n\n digits = [str(digit) for digit in range(10)]\n random.shuffle(digits)\n\n return ''....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set terminal tab / decoration color. Please note that iTerm 2 / Konsole have different control codes over this. Note sure what other terminals support this behavior.
def decorate_terminal(color): if color is None: # Reset tab color sys.stdout.write("\033]6;1;bg;*;default\a") sys.stdout.flush() else: r, g, b = color # iTerm 2 # http://www.iterm2.com/#/section/documentation/escape_codes" sys.stdout.write("\033]6;1;bg;r...
[ "def set_iterm_tab_color(color):\n return \"\"\"\n \\033]6;1;bg;red;brightness;%s\\a\n \\033]6;1;bg;green;brightness;%s\\a\n \\033]6;1;bg;blue;brightness;%s\\a\n \"\"\" % (*util.hex_to_rgb(color),)", "def DefaultColorCoding():\n print((\"\\033[49m \\033[39m \"), end=' ') #set to default color co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Train MLP model, using random search to choose hyperparameters. The general structure is to first do the random search and get the best performing hyperparameters on the validation set, then retrain the best model on the train/valid split to generate a learning curve. sklearn RandomizedSearch doesn't provide a way to s...
def train_mlp(X_train, X_test, y_train, y_test, seed, search_hparams={}, batch_size=50, n_folds=3, max_iter=100, search_n_iter=20): import torch.optim from skorch import NeuralNetClassi...
[ "def random_search(learner, params = {}, rnn_type=\"\", seed=0, attempts_per_param=2):\n print(\"RNN type:\", rnn_type)\n print(\"===\")\n print(\"full parameter range:\")\n print(params)\n print(\"===\")\n\n shuffle_seed=0\n random.seed(shuffle_seed)\n params_subrange = {}\n\n best_accuracy = 0.0\n best_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs all filters on the word. If none of them return False, then returns true
def _run_filters(self, word): if len(self._filters) > 0: for f in self._filters: f.run(word) # print( 'running filter \n filtername: %s \n word: %s' % (f.__name__, word) ) # if f.run(word) is False: # print( 'filter %s failed: %s' %...
[ "def __call__(self, buf):\n return all(filter_(buf) for filter_ in self.filters)", "def tweet_filter(self, tweet):\n for rule in self.tf:\n if not self.tf[rule](tweet):\n return False\n return True", "def match(self,filter):\n\n\n return filter in self.memo ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
HTMLescape the text in `t`. We first hide real common entities to avoid double escaping
def escape(t): return (t .replace("&quot;", '@quot;') .replace("&amp;", "@amp;").replace("&lt;", "@lt;").replace("&gt;", "@gt;") .replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") .replace("'", "&#39;").replace('"', "&quot;") .replace("\\", "&#92;") .r...
[ "def encode_html( self, text):\n\t\thtml_escape_table = {\n\t\t\t\"&\": \"&amp;\",\n\t\t\t'\"': \"&quot;\",\n\t\t\t\"'\": \"&apos;\",\n\t\t\t\">\": \"&gt;\",\n\t\t\t\"<\": \"&lt;\",\n\t\t\t}\n\t\t\n\t\tdef html_escape(text):\n\t\t\t\"\"\"Produce entities within text.\"\"\"\n\t\t\tL=[]\n\t\t\tfor c in text:\n\t\t\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
See logging.Handler.emit(self, record) docs.
def emit(self, record): pass # do not emit the record. Other handlers can do that.
[ "def emit(self, record):\n try:\n if self.shouldRollover(record):\n self.doRollover()\n if self.header_msg is not None:\n for msg in self.header_msg:\n header_record = logging.LogRecord(\"\", 20, \"\", 0, msg, (), None, None)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a new switchPorts resource on the server and adds it to the container. Args
def add( self, Enabled=None, EthernetAddress=None, NumberOfPorts=None, PortName=None, PortNumber=None, ): # type: (bool, str, int, str, str) -> SwitchPorts return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
[ "def streaming_ports_add(ports: str):\n return _run_speedify_cmd([\"streaming\", \"ports\", \"add\", ports])", "def add_switch(session, **kwargs) -> Switch:\n\n # Check if switch already exists\n if 'name' not in kwargs:\n raise KeyError(\"Missing necessary argument 'name' for adding switch\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds and retrieves switchPorts resources from the server. All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve switchPorts resources from the server. To retrieve an exact match ensure the parameter value starts with ^ and ends with $ By default the find...
def find( self, Enabled=None, EthernetAddress=None, NumberOfPorts=None, PortName=None, PortNumber=None, ): # type: (bool, str, int, str, str) -> SwitchPorts return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
[ "def discover(self, pump_id = None):\n from serial import Serial\n\n from sys import version_info\n if pump_id is None:\n pump_id = self.pump_id\n\n available_ports = self.available_ports\n port = None\n for port_name in self.available_ports:\n try:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Executes the simulatePortUpDown operation on the server. Exec to simulate port up and down. simulatePortUpDown(async_operation=bool)bool
def SimulatePortUpDown(self, *args, **kwargs): # type: (*Any, **Any) -> Union[bool, None] payload = {"Arg1": self.href} for i in range(len(args)): payload["Arg%s" % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._exec...
[ "def test_port_up_down_events(self):\n self.mech_driver.set_port_status_up = mock.Mock()\n self.mech_driver.set_port_status_down = mock.Mock()\n with self.port(name='port') as p:\n p = p['port']\n # using the monitor IDL connection to the NB DB, set the\n # Logi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Increasing the change probability should be reflected in the probability.
def test_increasing_change_probability(self): initial_probability = self.default_sim.probability('A', 'T', 1.0) sim = GeneralizedReversibleSimulator(frac_a=0.25, frac_c=0.25, frac_g=0.25, a_c=0.25, a_g=0.25, a_t=0.4, c_g=0.25, c_t=0.25, g_t=0.25) self...
[ "def mutate_prob(self) -> float:\n return self._mutate_prob", "def uniform_probability(self, args = []):\n\t\tself.probability = 1", "def update_prob(self):\n size = len(self.options)\n\n # Complete the ranking\n improvements = np.array(self.improvements.values())\n totals = n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Increasing the proportion of a character should be reflected in the probability.
def test_increasing_proportion(self): for char in ['A', 'C', 'G']: params = self.default_params.copy() params[self.char_to_frac_param[char]] = 0.4 other_chars = [c for c in ['A', 'C', 'G'] if c != char] for other_char in other_chars: params[self.ch...
[ "def mutate_prob(self) -> float:\n return self._mutate_prob", "async def joseprob(self, ctx, jccount: float, amount: float):\n prob = (1 + (Decimal(jccount) / Decimal(amount))) * Decimal(0.42)\n prob = round(prob, 2)\n await ctx.send(f\"Probability: `{prob}`\")", "def probability(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the probability from a character to all characters sums to 1.0.
def test_probability_sums_to_1(self, sim, char, distance): assume(distance > 0) total_probability = sim.probability(char, 'A', distance) + sim.probability(char, 'C', distance) + sim.probability(char, 'G', distance) + sim.probability(char, 'T', distance) self.assertAlmostEqual(total_probability, ...
[ "def test_increasing_proportion(self):\n for char in ['A', 'C', 'G']:\n params = self.default_params.copy()\n params[self.char_to_frac_param[char]] = 0.4\n other_chars = [c for c in ['A', 'C', 'G'] if c != char]\n for other_char in other_chars:\n par...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Represent data in DAS plain view for queries with filters.
def plainview(self, head, data): dasquery = head['dasquery'] fields = dasquery.mongo_query.get('fields', []) filters = dasquery.filters results = "" status = head.get('status', None) if status == 'fail': reason = head.get('reason', '') if r...
[ "def show(self, **kwargs):\n if _is_dataframe(self.encrypt):\n viz_data = self.encrypt\n\n elif _is_dataframe(self.redact):\n viz_data = self.redact\n\n elif self.infotypes:\n viz_data = self.infotypes\n\n return viz_data", "def show(table,**kwargs):\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converti une phrase de texte en son
def convertir(phrase): global indice phrase_convertie = [] while indice < len(phrase): if debug: print("[{}] '{}' -> ".format(indice,phrase[indice]),end="") son,increment = transcrire(indice) phrase_convertie.append(son) if debug: print("'{}'...
[ "def convert_word(self,word,font='preeti'):\n converted = word\n if font == 'preeti':\n converted = self.convert_preeti(word)\n return converted", "def translation(text):\n\tinput_text = TextBlob(text)\n\tclick.secho(\"Text Translation\",fg='black',bg='white')\n\tclick.secho(\"Orig...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert patch embedding weight from manual patchify + linear proj to conv
def _conv_filter(state_dict, patch_size=16): out_dict = {} for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: v = v.reshape((v.shape[0], 3, patch_size, patch_size)) out_dict[k] = v return out_dict
[ "def _conv_filter(state_dict, patch_size=16):\n out_dict = {}\n for k, v in state_dict.items():\n if 'patch_embed.proj.weight' in k and len(v.shape) < 4:\n v = v.reshape((v.shape[0], 3, patch_size, patch_size))\n out_dict[k] = v\n return out_dict", "def __interpolate_positional_e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete license from cluster
def license_v2_delete(self): license_delete = netapp_utils.zapi.NaElement.create_node_with_children( 'license-v2-delete', **{'package': self.license_package, 'serial-number': self.node_serial_number}) try: self.server.invoke_successfully(licens...
[ "def delete_license(self, license_key):\n return self._xjtrans(\"/settings/licenses/%s\" % license_key, \"DELETE\", None, True, APITimestampFormat.NANOSECOND)", "def delete(self, license_key):\n\n try:\n lic = urllib.parse.quote(license_key) # python 2\n except:\n lic =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates CSV file of every teams' unique id
def get_team_id(): team_abb = [] for team in Teams(): team_abb.append(team.abbreviation) with open('Data/team_abb.csv','w',newline='') as result_file: wr = csv.writer(result_file, quoting=csv.QUOTE_ALL) wr.writerow(team_abb)
[ "def generate_TeamImportData(session, output_directory):\n with open(\n output_directory / Path(\"TeamImportData.csv\"), \"w\", newline=\"\"\n ) as csvfile:\n csvwriter = csv.writer(csvfile)\n\n for school in session.query(School).order_by(School.SchoolID):\n row = [\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates csv with all player_ids from [X] school
def get_player_id(school,start,end): player_id = [] for i in range(start,end): for player in Roster(school,i).players: player_id.append(player.player_id) # player_id = set(player_id) # Output list of player_id to a csv file with open('sportsref_Data/%s_player_id.cs...
[ "def writePlayerCSV(self) -> None:\r\n with open(self.csv_location, \"w\", encoding=\"utf-16\") as file:\r\n for extracted_player in self._extracted_players:\r\n player_name = extracted_player\r\n print(self._extracted_players[extracted_player])\r\n ass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls get_player_id() function and loops
def start_script(team_id,start,end): for i in team_id: try: get_player_id(i,start,end) print(f'Successful : {i}') except: print(f'ERROR : {i}')
[ "def next_player(self):\n self.current_player = self.players[(self.current_player.identity.id + 1) % len(self.players)]", "def get_id(self):\n return self.__player_id", "def test_get_player(self):\n player23 = self.player_manager.get_player(3)\n self.assertEqual(3, player23.get_playe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Automatically populates the WorkerList object with the worker URLs for the GPUdb server to support multihead ingest. (If the specified GPUdb instance has multihead ingest disabled, the worker list will be empty and multihead ingest will not be used.) Note that in some cases, workers may be configured to use more than o...
def __init__( self, gpudb, ip_regex = "" ): # Check the input parameter type assert isinstance(gpudb, GPUdb), ("Parameter 'gpudb' must be of " "type GPUdb; given %s" % type(gpudb) ) self.work...
[ "def request_register_default_worker_servers(self, req):\n for idx in range(8):\n self._server_pool.add(\"apscn{:02d}.mpifr-be.mkat.karoo.kat.ac.za\".format(idx), 6000)\n return (\"ok\",)", "def AddWorkerpoolArgs(parser, release_track, update=False):\n verb = 'update' if update else 'cre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of the URLs for the GPUdb workers.
def get_worker_urls( self ): return self.worker_urls
[ "def workers(self):\n return self._wrap_get('/workers')", "def get_workers(self):\n with self._engine.begin() as conn:\n worker_rows = conn.execute(\n select([cl_worker, cl_worker_dependency.c.dependencies]).select_from(\n cl_worker.outerjoin(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds some characters to the record key byte array.
def add_char( self, value ): self.buffer_value.append( bytearray( value ) )
[ "def encode(message, key):\n encoded_message = ''\n print('encoding message...')\n\n for letter in message:\n letter_num = (ord(letter) * key - 32) % Cipher.alphabet_size\n encoded_message += Cipher.alphabet[letter_num]\n return encoded_message", "def extend_key(m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds numeric value to the record key byte array.
def add_number( self, value ): self.buffer_value.append( bytearray( value ) )
[ "def add(self, key, value):\n\n assert isinstance(key, bytes_type)\n assert isinstance(value, bytes_type)\n\n dbfile = self.dbfile\n pos = dbfile.tell()\n dbfile.write(_lengths.pack(len(key), len(value)))\n dbfile.write(key)\n dbfile.write(value)\n\n # Get has...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a string to the record key byte array.
def add_string( self, value ): string_hash = mmh3.hash_bytes( value ) self.buffer_value.append( bytearray( string_hash ) )
[ "def bytes_key(string):\n return key_to_bytes(key(string))", "def AddKey(self, *args):\n return _snap.TIntStrH_AddKey(self, *args)", "def add(self, key: int, assyrec: AssembleRecord):\n self.entries[key] = assyrec", "async def add_key_for_did(self, did: str, key: str):\n record = Stora...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a routing table, return the rank of the GPUdb server that this record key should be routed to. routing_table A list of integers... the rank of the GPUdb server that this record key should be routed to.
def route( self, routing_table ): routing_index = ((self.routing % len(routing_table) ) - 1) return routing_table[ routing_index ]
[ "def convert_routing_table_entry_to_spinnaker_route(routing_table_entry):\n route_entry = 0\n for processor_id in routing_table_entry.processor_ids:\n if processor_id >= Router.MAX_CORES_PER_ROUTER or processor_id < 0:\n raise SpinnMachineInvalidParameterException(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a RecordKey object based on the input data and returns it. record An object of the given type to make the record key out of.
def build( self, record ): # Nothing to do if the key size is zero! if (self.key_buffer_size == 0): return None # Check that the given record is a dict of the given table # type if not isinstance( record, dict ): raise ValueErr...
[ "def get_key_from_record(type_: type) -> type:\n if not typing_inspect.is_generic_type(type_):\n raise Exception(f'Cannot get associated key from not generic type {type_.__name__}')\n\n from datacentric.types.record import TypedKey, TypedRecord, RootRecord\n from typing import Forwar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether this record has any key associated with it.
def has_key( self ): return (len( self.key_columns_names ) > 0)
[ "def has_key(self, key: str) -> bool:\n return key in self._collection", "def __contains__(self, key):\n return key in self._keys", "def is_key_registered(self, key):\n return key in self._objects", "def exists(cls, key):\n # Check the store first.\n if cls._store and cls._s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the given record key builder is equivalent to this one.
def has_same_key( self, other_record_key_builder ): return (self.key_schema_str == other_record_key_builder.key_schema_str)
[ "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, OrdinalEncoder):\n return False\n if (\n self.columns == other.columns\n and self.derived_columns == other.derived_columns\n ):\n return True\n\n return False", "def __eq__(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the current (old) record queue and create a new empty one.
def flush( self ): old_queue = self.record_queue # Create a fresh new queue self.record_queue = [] # if a key->record_queue_index map exists, clear it if self.primary_key_to_queue_index_map: self.primary_key_to_queue_index_map = {} ...
[ "def new_empty_q():\n from queue_ds import Queue\n this_empty_q = Queue()\n return this_empty_q", "def queue_fixture():\n new_queue = our_queue()\n return new_queue", "def create_queue(self, queue):", "def refresh_queue(self):\n #print(\"REF Q\")\n now_s = time.time()\n sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the GPUdbIngestor instance. gpudb table_name batch_size options workers
def __init__( self, gpudb, table_name, batch_size, options = None, workers = None ): # Validate input parameter 'gpudb' assert isinstance(gpudb, GPUdb), ("Parameter 'gpudb' must be of " ...
[ "def init():\n\n # Check if metric already present in the metric_map\n if gpu_count not in metric_map: \n\n # Create metric and add it to metric_map\n metric_map[gpu_count] = Gauge(gpu_count, \"Number of GPUs\")\n\n if not created:\n metric_map[gpu_healthrollup] = Gauge(gpu_healthrollu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of records inserted thus far.
def get_count_inserted( self ): return self.count_inserted
[ "def rowcount(self):\n self._check_that_read_query_was_issued()\n return self._delegate.rowcount", "def getAffectedRowsCount(self): \n return self.affectedRows", "def get_records_count(conn):\n\n if not conn:\n return 0\n\n raw_iterator = conn.raw_iterator() # in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal method to flushactually insertthe records to GPUdb. queue List of records to insert url The URL to which to send the records.
def __flush( self, queue, worker_gpudb ): if not queue: return # nothing to do try: print "Flushing to %s with %d objects" % (worker_gpudb.get_url(), len(queue)) # debug~~~~~~~~~ # Insert the records insert_rsp = worker_gpudb.insert_records( table_name = ...
[ "def insert_record(self,list_holding_record):", "def _insert_helper(list_of_docs: List[Dict], api: MongoAPI) -> None:\n api.batch_insert(list_of_docs)\n list_of_docs.clear()", "def bulk_insert_to_es(self, bulk_list_fp, debug = True): \n bulkstr_list = bulk_list_fp.readlines()\n bulkstr = \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new Image object from source file.
def open_file_as_pil_image(source_file): return Image.open(source_file)
[ "def load_image(file):\n return Image.open(os.path.abspath(file))", "def load(image_path, access='random'):\n\n return pyvips.Image.new_from_file(image_path, access=access)", "def from_file(klass, filename):\n surface = pygame.image.load(filename).convert_alpha()\n return Texture(surface)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an empty PIL Image.
def create_empty_pil_image(pil_image): return Image.new('RGB', (pil_image.size[0], pil_image.size[1]))
[ "def createImage(size, blank=True):\n if blank:\n img = np.zeros(size)\n else:\n img = np.ones(size) * 255\n img = Image.fromarray(img, mode='L')\n return img", "def empty_image(request):\n channels = request.param\n data_shape = (4, 8, 12, channels)\n return np.zeros(data_shape...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resize PIL image object, fixing largest dimension to 1080px.
def resize_pil_image(image): width = image.size[0] height = image.size[1] ratio = width / height if ratio >= 1: width = 810 height = int(width / ratio) else: height = 810 width = int(height * ratio) return image.resize((width, height))
[ "def resize_image(image, size):\n image.thumbnail(size)\n return image", "def setImagesize(self, W, H) -> None:\n ...", "def resizeImage(path):\n temp_pic = Image.open(path,mode='r')\n temp_pic = temp_pic.resize((200,200))\n temp_pic.save(path)", "def resize_image(img, new_width, new_hei...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Covert RGB threetuple and sort newly converted HLS data.
def refactor_and_sort_data(color_data): return sorted(color_data)
[ "def actual_pixel_sort(color_data):\n color_data = [tuple(rgb_shift(i) for i in toop) for toop in color_data]\n return color_data", "def rgb_sort(colours, reverse=False):\n sorted_col = sorted(colours, reverse=reverse)\n return sorted_col", "def BGRtoRGBHLS(colours):\n # put them in RGB order\n co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
some kind of actual pixel sorting to make art or at least try
def actual_pixel_sort(color_data): color_data = [tuple(rgb_shift(i) for i in toop) for toop in color_data] return color_data
[ "def paintSortBuf( self, nFigID, center ):\n #print(dir(cv2))\n #cv2.floodFill( self.sortbuf, nFigID, center, 255 ) # todo: find right command\n cv2.circle( self.sortbuf, (center[0],center[1]), 40, (nFigID), 100 ) # temp sprout\n print( self.sortbuf[0,0] )\n print( self.sortbuf[ce...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the is_password_set of this StorageRemoteKeySettingAllOf.
def is_password_set(self, is_password_set): self._is_password_set = is_password_set
[ "def _set_isPassword(self, *args) -> \"bool\" :\n return _core.StringValueCommandInput__set_isPassword(self, *args)", "def is_encryption_key_set(self, is_encryption_key_set):\n\n self._is_encryption_key_set = is_encryption_key_set", "def _setSavePassword(self, checked):\r\n\r\n self.savePas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the primary_server of this StorageRemoteKeySettingAllOf.
def primary_server(self, primary_server): self._primary_server = primary_server
[ "def _set_server_mode_primary(server, mode):\n allowed_mode = \\\n (_server.MySQLServer.WRITE_ONLY, _server.MySQLServer.READ_WRITE)\n _do_set_server_mode(server, mode, allowed_mode)", "def _set_server_status_primary(server, update_only):\n raise _errors.ServerError(\n \"If you want to make ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the secondary_server of this StorageRemoteKeySettingAllOf.
def secondary_server(self, secondary_server): self._secondary_server = secondary_server
[ "def fpolicy_set_secondary_servers(self, secondary_servers, policy_name):\n return self.request( \"fpolicy-set-secondary-servers\", {\n 'secondary_servers': [ secondary_servers, 'secondary-servers', [ SecondaryServerInfo, 'None' ], True ],\n 'policy_name': [ policy_name, 'policy-name', ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the server_certificate of this StorageRemoteKeySettingAllOf.
def server_certificate(self, server_certificate): self._server_certificate = server_certificate
[ "def set_ServerCertificateName(self, value):\n super(UploadServerCertificateInputSet, self)._set_input('ServerCertificateName', value)", "def set_keys(self, client_private_key, server_public_key):\n d_client_private_key = Data(client_private_key)\n d_server_public_key = Data(server_public_key...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns [Candle] of the last year (if excluding_last = 252 days)
def last_remaining(self): df = pd.read_csv('^GSPC.csv', index_col='Date', parse_dates=True) df = df[-1 * self.excluding_last:] # for example, past 252 days # for 2017 # years_to_chop_off = 10 # 1: chop off 2018 to get just 2017 # df = df[:-252*years_to_chop_off] # print...
[ "def last_year(self):\n return self._years[-1]", "def last_close_date(country=\"US\"):\r\n return yyyymmdd(n_trading_days_before(today(), 1, country=country))", "def iso_year(self) -> Series:", "def cve_last_year():\n current_time = datetime.now()\n\n cves = CVE.query.filter(\n current_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get image data and cleanup.
def get_image_data(img): fpath = 'ocr_images/{}'.format(img) ref_name = fpath.replace('/', '__').replace('.', '___') + '.txt' @cached(ref_name, directory='ocr_images') def get(): img = Image.open(fpath) res = pytesseract.image_to_string(img) ppr(res) return res retur...
[ "def get_raw_data(self):\n raise OSError(\"GXPARM.XDS does not support image data!\")", "def getImgData(inputImg, outputImg):\n copy = ' '.join(['3dcopy', inputImg, outputImg])\n\n if os.path.exists(outputImg):\n print outputImg.split('/')[-1], 'Already exists, removing... '\n os.remove...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simply splits text into paragraphs by splitting on the string '\n\n'
def split_into_paras(text): paras = string.split(text, "\n\n") return paras
[ "def _split_paragraphs(self, text):\n\n import re\n import textwrap\n\n text = textwrap.dedent(text).strip()\n text = re.sub('\\n\\n[\\n]+', '\\n\\n', text)\n\n last_sub_indent = None\n paragraphs = list()\n for line in text.splitlines():\n (indent, sub_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get ths stem word for 'word'
def get_stem(word): #stub #PLACEHOLDER ps = PorterStemmer() return word
[ "def stem(word):\n global _stemmer\n if _stemmer is None:\n _stemmer = nltk.stem.porter.PorterStemmer()\n return _stemmer.stem(word)", "def getStemmer():\n return nltk.stem.PorterStemmer().stem", "def find_term(self, question):\n\n term = None\n\n for regex in self.regexes:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets the lemma for 'word' (similar to stem, but guaranteed to be a real word) pos is an optional part of speech tag
def get_lemmas(word, pos=None): #stub #PLACEHOLDER Lemmatizer = WordNetLemmatizer() lemma = None if pos != None: try: lemma = Lemmatizer.lemmatize(word, pos=pos) except KeyError: lemma = Lemmatizer.lemmatize(word) return lemma
[ "def lemmatize(word, pos='n'):\n if not pos.strip():\n return word\n\n return wordnet_lemmatizer.lemmatize(word, pos=pos)", "def lemmatize(doc):\n lemma = [token.lemma_ for token in doc\n if not token.is_punct and not token.is_space\n and (token.text == \"US\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use Audio3DManager.loadSfx to load a sound with 3D positioning enabled
def loadSfx(self, name): sound = None if name: sound = self.audio_manager.getSound(name, 1) return sound
[ "def load_audio(self, path):\n pass", "def import_sounds(self):\n pygame.mixer.pre_init(buffer=1024)\n self.troll_sound = pygame.mixer.Sound('sounds/troll_music.wav')", "def load_sounds(self):\n try:\n pygame.mixer.init()\n except:\n print 'Cannot load so...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Control the presence of the Doppler effect. Default is 1.0 Exaggerated Doppler, use >1.0 Diminshed Doppler, use <1.0
def setDopplerFactor(self, factor): self.audio_manager.audio3dSetDopplerFactor(factor)
[ "def setDopplerFactor(self, factor: 'float') -> \"void\":\n return _coin.SoVRMLSound_setDopplerFactor(self, factor)", "def getDopplerFactor(self) -> \"float\":\n return _coin.SoVRMLSound_getDopplerFactor(self)", "def ford(self):\n chance = random.randint(0,100)\n percentChance = 5 * ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Controls the maximum distance (in units) that this sound stops falling off. The sound does not stop at that point, it just doesn't get any quieter. You should rarely need to adjust this. Default is 1000000000.0
def setSoundMaxDistance(self, sound, dist): sound.set3dMaxDistance(dist)
[ "def max_velocity(self):\n return 10 * self.velocity_scale", "def max_speed(self, value):\n\n pass", "def max_speed(self):\n raise NotImplementedError", "def limit_speed(self):\n\n if self.x_speed > 0:\n self.x_speed = max(0, self.x_speed - 0.3)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the velocity of the sound.
def getSoundVelocity(self, sound): if sound in self.vel_dict: vel = self.vel_dict[sound] if vel is not None: return vel for known_object in list(self.sound_dict.keys()): if self.sound_dict[known_object].count(sound): node_p...
[ "def velocity(self):\r\n if self.sprint:\r\n return self._absDirection * self.sprintSpeed\r\n else:\r\n return self._absDirection * self.baseSpeed", "def getDopplerVelocity(self) -> \"float\":\n return _coin.SoVRMLSound_getDopplerVelocity(self)", "def get_velocity(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If velocity is set to auto, the velocity will be determined by the previous position of the object the listener is attached to and the frame dt. Make sure if you use this method that you remember to clear the previous transformation between frames.
def setListenerVelocityAuto(self): self.listener_vel = None
[ "def velocity(self, t):\n pass", "def object_velocity(self, object_id, object_type, local_frame=False):\n if not isinstance(object_type, int):\n object_type = _str2type(object_type)\n velocity = np.empty(6, dtype=np.float64)\n if not isinstance(object_id, int):\n object_id = self.model.n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the velocity of the listener.
def getListenerVelocity(self): if self.listener_vel is not None: return self.listener_vel elif self.listener_target is not None: clock = ClockObject.getGlobalClock() return self.listener_target.getPosDelta(self.root) / clock.getDt() else: return VB...
[ "def get_velocity(self):\n return self.vr.simxGetObjectVelocity(self.car_handle, \n vrep.simx_opmode_buffer)[0]", "def get_velocity(self):\n linear, angular = self._physics_client.getBaseVelocity(self.uid)\n return np.asarray(linear), np.asarray(angular)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sound will come from the location of the object it is attached to. If the object is deleted, the sound will automatically be removed.
def attachSoundToObject(self, sound, object): # sound is an AudioSound # object is any Panda object with coordinates for known_object in list(self.sound_dict.keys()): if self.sound_dict[known_object].count(sound): # This sound is already attached to something ...
[ "def play_sound(sound_object):\n sound_object.play()\n time.sleep(0.5)\n sound_object.stop()", "def BACKGROUND_MUSIC(self): \n musicSound = Sound(source = 'ninja.wav')\n musicSound.play()", "def hit_sound(self):\n self.alien_explosion_sound.play()", "def PlaySound(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a list of sounds attached to an object
def getSoundsOnObject(self, object): if object not in self.sound_dict: return [] sound_list = [] sound_list.extend(self.sound_dict[object]) return sound_list
[ "def get_all_audio(self):\n return [x.file for x in self.audio_data.values()]", "def _get_sounds(directory):\n dirlist = os.listdir(directory)\n sound = {}\n for fx in dirlist:\n if fx[-3:] == \"wav\":\n sound[fx[:-4]] = pg.mixer.Sound(os.path.join(directory,fx))\n return soun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates position of sounds in the 3D audio system. Will be called automatically in a task.
def update(self, task=None): # Update the positions of all sounds based on the objects # to which they are attached # The audio manager is not active so do nothing if hasattr(self.audio_manager, "getActive"): if self.audio_manager.getActive()==0: return Task....
[ "def set_player_position(self, position):", "def position(self, position):\n self._position = position\n\n # Pyglet uses 3d coordinates, convert 2d to a 3d tuple\n self._player.position = (position[0], position[1], 0)", "def update(self):\r\n self.updateVelocities()\r\n self.u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Detaches any existing sounds and removes the update task
def disable(self): taskMgr.remove("Audio3DManager-updateTask") self.detachListener() for object in list(self.sound_dict.keys()): for sound in self.sound_dict[object]: self.detachSound(sound)
[ "def destroyPlayTimers(self):\n try:\n gobject.source_remove(self.timSec)\n except:\n pass", "def purge():\n common.debug('Purging library: {}'.format(g.library()))\n for library_item in g.library().values():\n execute_library_tasks(library_item['videoid'], remove_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the vhdeps CLI. The commandline arguments are taken from `args` when specified, or `sys.argv` by default. The return value is the exit code for the process. If the backtrace option is passed, exceptions will not be caught.
def run_cli(args=None): parser = argparse.ArgumentParser( usage='vhdeps <target> [entities...] [flags...] [--] [target-flags...]', description='This script is a VHDL dependency analyzer. Given a list ' 'of VHDL files and/or directories containing VHDL files, it can ' 'generate a com...
[ "def main():\n backup_args = None\n try:\n\n freezer_config.config(args=sys.argv[1:])\n freezer_config.setup_logging()\n backup_args = freezer_config.get_backup_args()\n if backup_args.config:\n # reload logging configuration to force oslo use the new log path\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
orders a teardrop trace by identifying the streptavidin molecule.
def order_teardrop_trace(td_indices, strep_ind): td_indices = np.append([strep_ind], td_indices, axis=0) # append strep td_indices = order_trace(td_indices, 0) # start ordering with strep td_indices = np.append(td_indices, [strep_ind], axis=0) # close loop return td_indices
[ "def local_tetrahedral_order(dumpfile, filetype = 'lammps', moltypes = '', ppp = [1,1,1], outputfile = ''):\r\n\r\n d = readdump(dumpfile, 3, filetype, moltypes)\r\n d.read_onefile()\r\n num_nearest = 4 #number of selected nearest neighbors\r\n results = np.zeros((max(d.ParticleNumber), d.SnapshotNumbe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
method to remove faulty short skeleton branches.
def prune_skeleton(skel, skel_ep, prune_length=0.15): pruned_skel = np.copy(skel) prune_indices = np.transpose(np.nonzero(skel)).tolist() # Set pruning length length_of_trace = len(prune_indices) max_branch_length = int(length_of_trace * prune_length) # short branch limit # Identify al...
[ "def prune_branches(skeleton):\n for he in skeleton.half_edges.values():\n assert he.face.id is not None, he.id\n assert he.twin.face.id is not None, he.id\n # remove edges that have the same face on both sides\n remove = set()\n for he in skeleton.half_edges.values():\n if he.face ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main function to get list urls in csv file, collect metrics form PageSpeed and pulling metrics to the prometheus
def process_request(): with open('inlist.csv', 'r') as csvfile: file_read_lines = csv.reader(csvfile, delimiter=',') for row in file_read_lines: page = ', '.join(row[:1]) # getting first row from file logging.info(f'Take URL: {page}') try: respo...
[ "def run_parser(machine, output, report):\n dict_rows = csv.DictReader(report, delimiter='|')\n pattern = re.compile(r'^(http|https|ftp)://(aix.software.ibm.com|public.dhe.ibm.com)/(aix/ifixes/.*?/|aix/efixes/security/.*?.tar)$')\n rows = [row['Download URL'] for row in dict_rows]\n rows = [row for row ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace the property on with fxn. This allows you to change the behavior of an existing unrelated class.
def replace_property(cls, obj, target, replacement): Mock.__recall__[(obj,target)]=getattr(obj,target) obj.__dict__[target]=property(replacement)
[ "def prop_proxy(self, prop):\n return self", "def _override(self, name, obj):\n path = name.split('.')\n assert len(path) > 1, 'module name not provided'\n obj_name = path[-1]\n\n objs = self._resolvePath(path[:-1])\n container = objs[-1]\n try:\n origin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset all back to original methods.
def reset_all(cls): for key in Mock.__recall__.keys(): cls.replace(key[0],key[1],Mock.__recall__[key]) Mock.__recall__ = {}
[ "def reset(self):\n\n for test in self._tests:\n test.reset()", "def _reset(self) -> None:\n self._reset_source()", "def reset_method(self, method_name, run):\n \n run_path = os.path.join(self.root_dir, \n str(run))\n method_path = os....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test record to see that points are applied to the right area. This only works for nonfree point tests.
def test_area_rec(self,rec,area,points): print "Testing %s" % rec.rule.name self.assertEqual(points,rec.points,"Error: Wrong number of points for %s. Should have been %s, was %s" % (rec.rule.name,points,rec.points))
[ "def test_property_points(self):\n self.assertEqual(self.tr1.points,((7, 1), (1, 9), (1, 1)), 'Property did not work')\n self.assertEqual(self.tr2.points,((3.0, 0.0), (0.0, 4.0), (0.0, 0.0)), 'Property did not work')\n self.assertEqual(self.floattest.points, ((3.3, 0.0), (0.2, 4.2), (0.0, 0.0))...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
computes the evolution between the number of cluster initially and the error rate
def evolution_error_cluster(n_point, dimension, nb_rect_min, nb_rect_max, nb_rect_step): nbrs, fits1, fits2 = [], [], [] for nb_rect in range(nb_rect_min, nb_rect_max, nb_rect_step): print("calcul pour nb_rect = ", nb_rect) fit1, fit2 = experience_between_theoritical_and_learned_cluster(nb_...
[ "def calculate_errors(dataset,cluster_indices,predict_energies):\n \n print(\"Calculating errors for each cluster...\")\n\n \n #helping variables\n n_clusters=len(cluster_indices)\n R,F,E=dataset[\"R\"],dataset[\"F\"],dataset[\"E\"]\n mse=[]\n sample_errors=[]\n\n #loop through clusters\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
shows the curse of dimension cad increasing time computation with dimension
def explosion_dimension(dim_mini, dim_max, nb_point, nb_carre): print('lacement calcul') tms, dims = [], [] for dim in range(dim_mini, dim_max): print('dimension de calcul : ', dim) set_point = creation_point_rectangles_2(nb_point, nb_carre, dim) t1 = clock() #ht = mv1_a...
[ "def timespans_dq():\n #date point gets colored value based on average standard deviation of scenes\n #using that date. NOTE: Mask out regions of know signal or else large timespan\n #scenes show high variance.\n print('Not Implemented')", "def show_covid(alpha, delta_t):\n real_t = np.arange(0, 5....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
shows the evolution of the cost depending on the numbered of merged rectangle in the cluster at each step
def evolution_nb_rectangle_cost(nb_point, nb_rectangle, dimension): set_point = creation_point_rectangles(nb_point, nb_rectangle, dimension) Y, X = evolution_cost(set_point, 0.05) plt.plot(X, Y) plt.xlabel('number of learned rectangles') plt.ylabel('cost function') plt.title('evolution of a cost...
[ "def evolution_error_cluster(n_point, dimension, nb_rect_min, nb_rect_max, nb_rect_step):\n \n nbrs, fits1, fits2 = [], [], []\n for nb_rect in range(nb_rect_min, nb_rect_max, nb_rect_step):\n print(\"calcul pour nb_rect = \", nb_rect)\n fit1, fit2 = experience_between_theoritical_and_learned...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test if two ractangles merge well
def test_merge(): R = ([0.29, 0.17], [0.38, 0.41]) S = ([0.51, 0.00], [0.96, 0.47]) RUS = merge_rectangle((R,S), [R, S]) #print(RUS) afficher_plsr_pts_rect([R, S, RUS[0] ], None)
[ "def are_comparable(r1: OriginRectangle, r2: OriginRectangle) -> bool:\n for a in [r1, r1.rot90()]:\n for b in [r2, r2.rot90()]:\n if is_inside(a, b) or is_inside(b, a):\n return True", "def check_overlap(l1_x, l1_y, r1_x, r1_y, l2_x, l2_y, r2_x, r2_y):\r\n# If one rectang...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find start of the code object
def find_start(lst): i=0 for i in range(len(lst)): if opcodes.m_type.get(lst[i])=='TYPE_CODE': return i
[ "def __find_block_start(self):\n try:\n return self.__find_token(self.__block_head)\n except RouteParserError:\n raise StartTokenNotFoundError(_('No match for entry block start'))", "def get_doc_start():\n start = \"*** START OF THIS PROJECT GUTENBERG EBOOK THE ADVENTURES OF...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses stored client credentials to get oauth access token from Spotify Requires accepting app access via spotify browser redirect and user need to copy and paste redirect url into console
def get_oauth(): # Get Spotify client credentials from a .gitignore'd config file with open('config.json', 'r') as data_file: config = json.load(data_file) spotify_client_id = config['spotify_client_id'] spotify_client_secret = config['spotify_client_secret'] url_redirect = 'https:/...
[ "def authSpotify():\n scopes = [\"streaming\", \"user-read-birthdate\", \"user-read-email\", \"user-read-private\"]\n scope_string = (\" \").join(scopes)\n params = {\n \"response_type\": \"code\",\n \"client_id\": SPOTIFY_CLIENT_ID,\n \"scope\": scope_string,\n \"redirect_uri\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets bearer token through existing token.txt file or requests ones from Spotify through get_oauth function
def get_bearer(): try: # get existing bearer token and test it through bearer_test method with open('token.txt', 'r') as token_file: token_data = token_file.read() token_dict = json.loads(token_data) bearer_token_str = token_dict['access_token'] except FileNo...
[ "def get_oauth():\n\n # Get Spotify client credentials from a .gitignore'd config file\n with open('config.json', 'r') as data_file:\n config = json.load(data_file)\n spotify_client_id = config['spotify_client_id']\n spotify_client_secret = config['spotify_client_secret']\n url_redirec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search gnip for ``q``, return `results`__ directly from gnip.
def search(self, q, **kw): return self.gnip.search(q, **kw)
[ "def search():\n\n # parses querie into key word array\n q = request.args.get(\"q\")\n\n # parases query into an array\n q_array = q.split(\" \")\n\n # remove any commas (if any)\n query = []\n for item in q_array:\n if item[len(item) - 1] == \",\":\n item = item.replace(\",\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes a graph object with an empty dictionary. self.vert_dict > List of the edges self.num_verticies > List of verticies
def __init__(self): self.vert_dict = {} self.num_verticies = 0 self.num_edges = 0
[ "def __init__(self, number_of_vertices: int):\n super().__init__(number_of_vertices)\n\n self.__graph: Dict[int: List[int]] = {}\n\n for i in range(number_of_vertices):\n self.__graph[i] = []", "def __init__(self, vertex_num=0):\n self._adj_lists = [{} for i in range(vertex_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a new vertex object to the graph with the given key and return the vertex.
def add_vertex(self, key): self.num_verticies += 1 new_vertex = Vertex(key) self.vert_dict[key] = new_vertex return new_vertex
[ "def add_vertex(self, key):\n\n if key in self.vert_dict:\n print(f'Vertex {key} already exists')\n return\n\n # create a new vertex\n new_vertex = Vertex(key)\n self.vert_dict[key] = new_vertex\n self.num_vertices += 1\n\n return self.vert_dict[key]",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add an edge from vertex f to vertex t with a cost
def add_edge(self, f, t, cost=0): if f not in self.vert_dict: self.add_vertex(f) if t not in self.vert_dict: self.add_vertex(t) self.vert_dict[f].add_neighbor(self.vert_dict[t], cost) self.vert_dict[t].add_neighbor(self.vert_dict[f], cost) self.num_edges +...
[ "def add_edge(self, f_key, t_key, f_data=None, t_data=None, cost=0):\n if f_key not in self:\n self.add_node(f_key, data=f_data)\n if t_key not in self:\n self.add_node(t_key, data=t_data)\n f_node, t_node = self[f_key], self[t_key]\n self.nodes[f_node][t_node] = co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all the edges of a given vertex
def get_edges(self, vertex): dict_edges = self.vert_dict[vertex].neighbors return dict_edges
[ "def get_edges(graph, vertex):\n try:\n vertex = vertex.index\n except AttributeError:\n pass\n pairs = (\n (vertex, cur_vertex)\n for cur_vertex in range(graph.vcount())\n if graph[vertex, cur_vertex] > 0\n )\n return tuple(graph.es[idx] for idx in graph.get_eids(t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a sequence of 2tuples (x,y) from the given string, which is a path, as represented by SVG's or 'd' attribute.
def parseSVGPath(pathstring): actions = [Action(part) for part in PARTFINDER.findall(pathstring)] # assert (len(parts) % 2) == 0, "Path string does not contain even number of x's and y's" r = [] for a in [a for a in actions if a.type in 'ML']: x, y = a.parts r.append((x,y)) return r
[ "def tuplesPath(path):\n\n\treturn map(lambda n: (n.location.x, n.location.y), path)", "def string_to_coordinates(s):\n\t\n\tx, y = map(float, s.split(maxsplit = 2))\n\t\n\treturn x, y", "def _ParseD(self, d_str):\n #PATH_IDENTIFIERS = \"ML\"\n chars_to_remove = \"z|Z\"\n\n self.d_str = d_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for email_subscriptions_mailing_lists_add
def test_email_subscriptions_mailing_lists_add(self): pass
[ "def test_email_subscriptions_mailing_lists_list(self):\n pass", "def test001MethodsForEmaillist(self):\n \n user_name = 'YujiMatsuo-' + self.postfix\n family_name = 'Matsuo'\n given_name = 'Yuji'\n password = '123$$abc'\n suspended = 'false'\n\n try:\n user_yuji = self.apps_clien...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for email_subscriptions_mailing_lists_list
def test_email_subscriptions_mailing_lists_list(self): pass
[ "def test_email_subscriptions_mailing_lists_add(self):\n pass", "def test_get_list_unsubscribe_recipients(self):\n pass", "def test_user_list_subscriptions(self):\n pass", "def test_user_current_list_subscriptions(self):\n pass", "def testListSubscriptionIDs(self, mock_list):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log parameters and/or response of the wrapped/decorated function using logging package
def log(parameters=False, response=False): def decorator(func): def wrapper(*args, **kwargs): if parameters: LOGGER.info(PARAM_LOG_MESSAGE, func.__name__, args) func_response = func(*args, **kwargs) if response: LOGGER.info(RESPONSE_LOG_MES...
[ "def log_call(func):\n @wraps(func)\n def logged(*args, **kawrgs):\n header = \"-\" * len(func.__name__)\n print(green(\"\\n\".join([header, func.__name__, header]), bold=True))\n return func(*args, **kawrgs)\n return logged", "def log_func(function):\n @wraps(function)\n def d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles all exceptions thrown by the wrapped/decorated function.
def handle_all_exceptions(): def decorator(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as ex: # noqa: pylint - catching-non-exception LOGGER.error(str(ex)) return failure(str(ex)) ret...
[ "def exception_handler(function: Any) -> Any:\n\n @functools.wraps(function)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n try:\n return function(*args, **kwargs)\n\n except pexpect.EOF as exception:\n error_message = exception.value.split(\"\\n\")[5]\n\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds HSTS header to the response of the decorated function
def hsts(max_age: int = None): def decorator(func): def wrapper(*args, **kwargs): response = func(*args, **kwargs) if isinstance(response, dict): headers_key = find_key_case_insensitive("headers", response) resp_headers = response[headers_key] if hea...
[ "def _patch_header(response: HttpResponse, status: Status) -> None:\n # Patch cache-control with no-cache if it is not already set.\n if status == Status.SKIP and not response.get(\"Cache-Control\", None):\n response[\"Cache-Control\"] = CacheControl.NOCACHE.value\n # Add our custom header.\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot a heatmap for the given matrix A.
def plot_matrix(A, ax=None, vmin=-3, vmax=3, formt="%0.2f", thresh=1e-16, block=False): if ax is None: plt.figure() ax = plt.gca() ax.imshow(A, vmin=vmin, vmax=vmax, cmap='bwr') for i in range(len(A)): for j in range(len(A)): if A[i, j] != 0: ax.text(j, i,...
[ "def heatmap(self):\n plt.imshow(self.M)\n plt.yticks([])\n plt.xticks(np.arange(self.size[1]))\n plt.show()", "def show_heatmap(self):\n plt.show()", "def plot_heat_map(\n data_matrix, x_free_variable, y_free_variable, matrix_range=(None, None),\n cmap=None, cba...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dictionnary of anti dependencies for design unit
def compute_anti_dependences(self): res = {} lib = libraries.Get_Libraries_Chain() while lib != nodes.Null_Iir: files = nodes.Get_Design_File_Chain(lib) while files != nodes.Null_Iir: units = nodes.Get_First_Design_Unit(files) while units !...
[ "def test_cfg_exclude_component_dict(self):\n # create the top level externals file\n desc = self.setup_dict_config()\n # Test an excluded repo\n external = create_externals_description(desc, model_format='dict',\n exclude=['simp_tag',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the currently running Bot instance.
def bot(cls): return cls._cur_bot
[ "def current_app(self):\n return self.app", "def bot_id(self):\n return self._bot_id", "def get_instance(self):\n if not self.is_server_active():\n self._log('The TCPServer instance is not running!')\n return self._process", "def get_instance():\n if Overworld.__insta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse and do whatever work necessary for the message. If the message necessitates a reply, save it to self.reply_msg.
def _parse(self): self.reply_msg = MessageHandler.fire_handlers(self)
[ "def process(self, message):\n assert self._state.connected\n try:\n prefix, command, params = parsing.parse(message)\n three_digits = re.compile('[0-9][0-9][0-9]')\n if three_digits.match(command):\n numeric_reply = int(command)\n if 0 <=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }