query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Reads feature vectors and labels from a file and prints information about their clustering properties. Here, we think of the space of feature vectors, and consider a vector v_i to be in cluster j if j is one of the labels for example i.
def analyze_feature_vector_clusters(features_file_path, distance=utils.L2_distance): feature_vectors, label_vectors = utils.read_feature_and_label_vectors(features_file_path) logging.info('Building clusters...') # Map from (integer j) --> (list of indices i such that feature_vectors[i] is in cluster j) # Cluster 0 indicates no disease indices_for_label = map_labels_to_example_indices(label_vectors) logging.info('...done.') logging.info('Computing global and within-cluster average distances') # Compute average distance between vectors overall global_average_distance = average_distance_between_vectors(feature_vectors, distance) logging.info('Global average ' + distance.__name__ + ' between vectors: ' + str(global_average_distance)) # Compute average distance within each cluster for j, vector_indices in indices_for_label.items(): vectors_in_cluster = [feature_vectors[index] for index in vector_indices] average_cluster_distance = average_distance_between_vectors(vectors_in_cluster, distance) logging.info('Average ' + distance.__name__ + ' between vectors in cluster ' + str(j) + ': ' + str(average_cluster_distance))
[ "def load_vectors (file_extension = None):\n \n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n \n prettyPrint( \"Loading feature vectors and labels from disk ... \", color.CYAN)\n if not os.path.isfile(feat_file_name) or not os....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test user's if favourite restaurant is added to DB
def test_add_to_fav_(self): result = self.client.post("/add_to_fav", data={"yelp_biz_id":"JA_V9TqDCrkgknqrcUndIQ", "yelp_rest_name":"Siam", "yelp_rating":"4", "yelp_category":"Thai", "yelp_price":"$$", "yelp_image_url":"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg" }) DB_result = Restaurant_details.query.filter_by(biz_id = "JA_V9TqDCrkgknqrcUndIQ").first() self.assertIsNotNone(DB_result) #testing that the returned result is not NONE self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be self.assertIn(b"Your Favourite has been saved", result.data)
[ "def test_add_favorites_add_favorite_to_favorites_of_user(self):\n p3 = Product.objects.create(barcode=\"123456\",\n product_name=\"Lait3\",\n brand=\"gandia +\",\n url_page=\"www.test.com\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the eol mode map
def EOLModeMap(): # Maintenance Note: ints must be kept in sync with EDSTC_EOL_* in edstc return { EOL_MODE_CR : _("Old Machintosh (\\r)"), EOL_MODE_LF : _("Unix (\\n)"), EOL_MODE_CRLF : _("Windows (\\r\\n)")}
[ "def get_eol_for_open(self) -> str:\n map = {\n EOLTypes.CRLF: WINDOWS_EOL,\n EOLTypes.LF: UNIX_EOL,\n EOLTypes.NATIVE: linesep,\n }\n\n return map[self]", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loops through each page within a single PDB and sums up the stats of each page to arrive at the overall total
def analyze(directory, pdf_file, doc_type): total_redaction_count = 0 total_redacted_text_area = 0 total_estimated_text_area = 0 total_estimated_num_words_redacted = 0 # Split the pdb (which is a pdf file) into individual jpgs. redaction_module.pdf_to_jpg(directory, pdf_file) os.chdir(directory) for jpg_file in os.listdir(directory): # Iterating through each page of the PDB if jpg_file.endswith(".jpg"): [redaction_count, redacted_text_area, estimated_text_area, estimated_num_words_redacted, potential, text_potential, type1, type2, type3] = redaction_module.image_processing(jpg_file, doc_type) total_redaction_count += redaction_count total_redacted_text_area += redacted_text_area total_estimated_text_area += estimated_text_area total_estimated_num_words_redacted += estimated_num_words_redacted # Crucial clean-up of jpg files (Note: If files are not removed, code will NOT work properly). os.remove(jpg_file) # Now that we've gone through each page, we need to calculate the stats for the document. if total_estimated_text_area != 0: total_percent_text_redacted = float(total_redacted_text_area / total_estimated_text_area) else: total_percent_text_redacted = 0 data = [] # open csv file and write the stats in a single row representing the document. with open('output.csv', mode='a+') as output: output_writer = csv.writer(output, delimiter=',') row = [pdf_file, total_redaction_count, total_percent_text_redacted, total_estimated_num_words_redacted] data.append(row) print(tabulate(data, headers=[" ", " ", " ", " ", " "])) output_writer.writerow(row) output.close()
[ "def total_db_hits(profile):\r\n nb = 0\r\n for child in profile.children:\r\n nb += total_db_hits(child)\r\n nb += profile.db_hits\r\n return nb", "def __get_totals(self, soup):\n pnum_span = soup.select_one('.pagenum')\n if pnum_span.text != 'no results':\n range_1 = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new text input instance. colorNames a sequence of strings (each color must start with a different letter)
def __init__(self, colorNames): self._lengthOfPattern = 0 # will later be queried from the user self._palette = '' # initials for color choices, e.g., R for red for color in colorNames: self._palette += color[0].upper()
[ "def mkColor(self, name):\n known_attrs = [ 'font-family', 'font-style', 'font-weight', 'font-size', 'text-decoration', 'color', 'background-color' ]\n stack = []\n color = Color(name)\n for token in self.tokenizer:\n if token.text == \";\":\n stack[0].assert_sy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Robustly prompt the user for an integer from small to large.
def _readInt(self, prompt, small, large): prompt = prompt + ' (from ' + str(small) + ' to ' + str(large) + ')? ' answer = small - 1 # intentionally invalid while not small <= answer <= large: try: answer = int(raw_input(prompt)) if not small <= answer <= large: print 'Integer must be from '+str(small)+' to '+str(large)+'.' except ValueError: print 'That is not a valid integer.' return answer
[ "def enforceInt(prompt, minValue = None, maxValue = None):\n testInput = input(prompt)\n try:\n testInput = int(testInput)\n return sizeCheck(testInput, minValue, maxValue)\n except:\n return f'Input \"{testInput}\" cannot be converted into an integer'", "def prompt_int(prompt):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ask the user how many pegs in the secret pattern.
def queryLengthOfPattern(self): self._lengthOfPattern = \ self._readInt('How many pegs are in the secret', 1, 10) return self._lengthOfPattern
[ "def guessing(guess, count):", "def numplayers_ask(context):\n response = 0\n while response == 0:\n response = int(raw_input(\"How many players? (2-6):\"))\n if response in [2,3,4,5,6]:\n return response\n else: \n print \"Bad Input\"\n response = 0", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a guess from the user and return it as a Pattern instance.
def enterGuess(self): validPattern = False while not validPattern: print # intentional blank line prompt = 'Enter a guess (colors are ' prompt += self._palette[:self._numColorsInUse] + '): ' patternString = raw_input(prompt) validPattern = True if len(patternString) != self._lengthOfPattern: print 'The pattern must have', self._lengthOfPattern, 'pegs' validPattern = False else: for i in range(self._lengthOfPattern): if patternString[i].upper() not in self._palette[:self._numColorsInUse]: validPattern = False if not validPattern: print 'The color options are', self._palette[:self._numColorsInUse] if validPattern: pattern = Pattern(self._lengthOfPattern) for i in range(self._lengthOfPattern): pattern.setPegColor(i, self._palette.index(patternString[i].upper())) return pattern
[ "def get_guess(self):\n return self._guess", "def get_guess(self):\n new_guess = \"\"\n try:\n new_guess = input(\"Enter a letter: \").lower()\n if len(new_guess) > 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restarts the timer and closes any existing progress bar.
def restart(self): self.done() self.counter = 0 self.start_time = time.time()
[ "def restart_timer(self):\n self.log.info(\"{} timer restarted ({} seconds)\".format(self.name, self.interval))\n self.count = self.interval / self.sleep_chunk\n if not self.defer and self.interval > 0:\n self._callback()\n if self.start_event.is_set():\n self.reset...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Advances the progress bar. If visible, shows progress, otherwise updates in the background. If the time threshold has passed and the progress bar should appear, this method creates it.
def next(self): if self.skip: return self.counter += 1 if self.pbar is None and (time.time() - self.start_time) > self.threshold: self.pbar = tqdm(total=self.n, desc=self.title, initial=self.counter) elif self.pbar is not None: self.pbar.update(1)
[ "def updateProgressBar(self):\n while not self.abortProgressBar:\n time.sleep(0.05)\n cur = self.progressTracker\n if cur == None or cur.startTime == None:\n continue\n remaining = self.getEstimatedTime(cur)-(datetime.now()-cur.startTime)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The worker function, invoked in a thread. 'nums' is a list of numbers to factor. The results are placed in outdict.
def worker(nums, outdict): print(threading.current_thread().name) print ("pid:", os.getpid()) for n in nums: outdict[n] = factorize_naive(n)
[ "def worker(nums, out_q):\n outdict = {}\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n print (\"data size:\", nums)\n for n in nums:\n outdict[n] = factorize_naive(n)\n out_q.put(outdict)", "def find_num_factors(number, pause=0, num...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The worker function, invoked in a process. 'nums' is a list of numbers to factor. The results are placed in a dictionary that's pushed to a queue.
def worker(nums, out_q): outdict = {} print(threading.current_thread().name) print ("pid:", os.getpid()) print ("data size:", nums) for n in nums: outdict[n] = factorize_naive(n) out_q.put(outdict)
[ "def worker(nums, outdict):\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n for n in nums:\n outdict[n] = factorize_naive(n)", "def find_num_factors(number, pause=0, num_procs=1, num_threads=1):\n def find_factors(number, queue, pause=0, interval=1, st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge a sequence of operations into a crossproduct tree.
def merge(from_args): assert len(from_args) > 0 def cross(x, y): return algebra.CrossProduct(x, y) from_ops = from_args.values() op = reduce(cross, from_ops) return (op, __calculate_offsets(from_args))
[ "def cross(self, *args):\n tmp = self\n for arg in args:\n tmp = _SetProduct(tmp, arg)\n return tmp", "def build_operations(self):\n pipeline_steps = []\n\n del_operation = self.build_delete_step(self.configs_.get('delete', []))\n pipeline_steps.append(('delete...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test getting the agent name.
def test_get_agent_name(self): result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", "agent.agent_name"], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 assert result.output == "Agent0\n"
[ "def get_name(self):\n return self.agent_name", "def test_get_agents_names(self):\n pass", "def agent_name(self) -> str:\n return self.identity.name", "def test_get_agent(self):\n pass", "def agentName(self):\n return self.__class__.__name__", "def test_get_name(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test getting the 'dummy' skill name.
def test_get_skill_name(self): result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", "skills.dummy.name"], standalone_mode=False, ) assert result.exit_code == 0 assert result.output == "dummy\n"
[ "def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1", "def sample_name(data):\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the 'get' fails because the path is too short but the root is correct.
def test_too_short_path_but_root_correct(self): result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", "agent"], standalone_mode=False ) assert result.exit_code == 1 assert ( result.exception.message == "The path is too short. Please specify a path up to an attribute name." ) result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", "skills.dummy"], standalone_mode=False, ) assert result.exit_code == 1 assert ( result.exception.message == "The path is too short. Please specify a path up to an attribute name." )
[ "def test_get_invalid_path(self):\n yield self.start_server()\n url = \"%s/%s\" % (self.url, 'bad_path',)\n resp = yield http_request_full(method='GET', url=url)\n\n self.assertEqual(resp.code, http.NOT_FOUND)", "def test_root_get(self):\n pass", "def testInvalidPath(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that getting a nested object in 'dummy' skill fails because path is not valid.
def test_get_fails_when_getting_nested_object(self): with pytest.raises( ClickException, match=r"Attribute `.* for .* config does not exist" ): self.runner.invoke( cli, [ *CLI_LOG_OPTION, "config", "get", "skills.dummy.non_existing_attribute.dummy", ], standalone_mode=False, catch_exceptions=False, )
[ "def test_get_object_nested_dotted(basic_object, basic_object_value):\n acc = Accessor(getter=\"value.key.key\")\n assert acc.get(basic_object) == \"value\"", "def test_contextpath_getattr_readable():\n assert ContextPath() == ContextPath(\"$$\")\n assert ContextPath().Execution == ContextPath(\"$$.Ex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that getting a vendor component with wrong component type raises error.
def test_get_fails_when_getting_vendor_dependency_with_wrong_component_type(self): result = self.runner.invoke( cli, [ *CLI_LOG_OPTION, "config", "get", "vendor.fetchai.component_type_not_correct.error.non_existing_attribute", ], standalone_mode=False, ) assert result.exit_code == 1 s = "'component_type_not_correct' is not a valid component type. Please use one of ['protocols', 'connections', 'skills', 'contracts']." assert result.exception.message == s
[ "def test_register_component_with_invalid_type():\n\n with pytest.raises(InvalidComponentTypeError):\n component = CoreObject()\n application_services.register_component(component)", "def test_get_component_doesnt_exist(composite):\n ct = composite()\n with pytest.raises(SarasvatiException)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test setting the agent name.
def test_set_agent_incorrect_value(self): with pytest.raises( ClickException, match="Attribute `not_agent_name` is not allowed to be updated!", ): self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", "agent.not_agent_name", "new_name"], standalone_mode=False, catch_exceptions=False, )
[ "def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test setting the 'dummy' skill name.
def test_set_skill_name_should_fail(self): result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", "skills.dummy.name", "new_dummy_name"], standalone_mode=False, ) assert result.exit_code == 1
[ "def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"", "def test_empty_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test setting a nested attribute.
def test_set_nested_attribute(self): path = "skills.dummy.behaviours.dummy.args.behaviour_arg_1" new_value = "10" # cause old value is int result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", path, new_value], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", path], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 assert new_value in result.output
[ "def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that setting the 'dummy' skill behaviours fails because not a primitive type.
def test_set_fails_when_setting_non_primitive_type(self): with pytest.raises( ClickException, match="Attribute `behaviours` is not allowed to be updated!" ): self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", "skills.dummy.behaviours", "value"], standalone_mode=False, catch_exceptions=False, )
[ "def _dummy(self):\n pass", "def test_categorical_disallow_special_values() -> None:\n json_config = \"\"\"\n {\n \"type\": \"categorical\",\n \"values\": [\"foo\", \"bar\", \"foo\"],\n \"special\": [\"baz\"],\n \"default\": \"foo\"\n }\n \"\"\"\n config = json.lo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that setting a nested object in 'dummy' skill fails because path is not valid.
def test_get_fails_when_setting_nested_object(self): with pytest.raises( ClickException, match=r"Attribute `non_existing_attribute.dummy` is not allowed to be updated!", ): self.runner.invoke( cli, [ *CLI_LOG_OPTION, "config", "set", "skills.dummy.non_existing_attribute.dummy", "new_value", ], standalone_mode=False, catch_exceptions=False, )
[ "def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}", "def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test component value updated in agent config not in component config.
def test_set_get_correct_path(self): agent_config = self.load_agent_config() assert not agent_config.component_configurations config_value = self.get_component_config_value() assert config_value == self.INITIAL_VALUE result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", self.PATH], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 assert str(self.INITIAL_VALUE) in result.output result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", self.PATH, str(self.NEW_VALUE)], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 config_value = self.get_component_config_value() assert config_value == self.INITIAL_VALUE result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", self.PATH], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 assert str(self.NEW_VALUE) in result.output agent_config = self.load_agent_config() assert agent_config.component_configurations
[ "def test_component_configuration_removed_from_agent_config(self):\n with cd(self._get_cwd()):\n self.run_cli_command(\n \"add\", \"--local\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID)\n )\n self.run_cli_command(\"add\", \"--local\", \"connection\", \"fetchai/ht...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test agent config manager get_overridables.
def test_AgentConfigManager_get_overridables(): path = Path(CUR_PATH, "data", "dummy_aea") agent_config = AEABuilder.try_to_load_agent_configuration_file(path) config_manager = AgentConfigManager(agent_config, path) agent_overridables, component_overridables = config_manager.get_overridables() assert "description" in agent_overridables assert "is_abstract" in list(component_overridables.values())[0]
[ "def get_overridables(self) -> Tuple[Dict, List[Dict]]:\n (\n agent_overridables,\n components_overridables,\n ) = self.agent_config_manager.get_overridables()\n components_configurations = []\n for component_id, obj in components_overridables.items():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate prior to posterior distribution using input data.
def iterate(self, data): # Append data to self.data self.data = np.append(self.data, data) for i, d in enumerate(data): update = self.current*self.likelihood(d) self.current = self._normalize(update) self.posterior = np.concatenate((self.posterior,[self.current])) print(str(len(data)) + " iterations completed!") return None
[ "def posterior_sample(self):\n pass", "def sample_from_prior(self, *args, **kwargs):\n pass", "def priorLikelihood(self, step):\n # grab the portion of the sample that's mine\n θ = self.restrict(theta=step.theta)\n # and the storage for the prior likelihoods\n likelihoo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates credible interval for any probability distribution given input interval for cdf.
def credible_interval(self, distType='current', interval=(0.025, 0.975)): # Calculate cdf to use for credible interval distCred = self.cumulative_distribution(dist=distType) # Prior and Current credible intervals if (distType=='current' or distType=='prior'): minCred = self.hypotheses[np.where((distCred-interval[0])>0)[0].min()] maxCred = self.hypotheses[np.where((distCred-interval[1])>0)[0].min()] ci = [(minCred, maxCred)] # Posterior: all iterations credible intervals else: ci = [] for i, row in enumerate(distCred): minCred = self.hypotheses[np.where((distCred[i]-interval[0])>0)[0].min()] maxCred = self.hypotheses[np.where((distCred[i]-interval[1])>0)[0].min()] ci.append((minCred, maxCred)) return ci
[ "def get_probabilty_in_closed_interval(h2_values, cdf, interval):\n p = get_approx_cdf(h2_values, cdf, interval[1]) - get_approx_cdf(h2_values, cdf, interval[0])\n if interval[0] == 0.0:\n p += cdf[0]\n return p", "def credible_interval(samples, ci=.9):\n # number of intervals to compute\n n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize the product of likelihood and prior.
def _normalize(self, inp): return inp/inp.sum()
[ "def _normalize(self, probs):\n probs = np.array(probs)\n probs = probs / probs.sum()\n return probs", "def normalize(self):\n var = self.a_variable()\n return self.scale(1 / self.coefficient(var))", "def _normalize(self, distribution):\n #print distribution\n no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Longest run testcases with more than one target
def test_longest_run_mult(self): self.assertTrue(geneutil.longestRun('QQQQN','QN')==5) self.assertTrue(geneutil.longestRun('QQANNQ','QN',1)==6) self.assertTrue(geneutil.longestRun('QQNPPQ','QN',1)==3) self.assertTrue(geneutil.longestRun('QQQAANN','QN',2)==7) self.assertTrue(geneutil.longestRun('ANQNQAN','QN',1)==6) self.assertTrue(geneutil.longestRun('ANQNQANP','QN',1)==6)
[ "def test_returns_number_of_ways_to_reach_target(self):\n result = find_target_sum_ways([1,1,1,1,1], 3)\n self.assertEqual(result, 5)", "def num_targets(self) -> int:", "def test_max_run_start():\n state = np.array([1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])\n assert max_run...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Max Sliding Count testcases
def test_max_sliding_count(self): self.assertTrue(geneutil.maxSlidingCount('AAAAA','A')==5) self.assertTrue(geneutil.maxSlidingCount('AAAAA','Q')==0) self.assertTrue(geneutil.maxSlidingCount('AAATAA','A')==4) self.assertTrue(geneutil.maxSlidingCount('AAATTAA','A')==3) self.assertTrue(geneutil.maxSlidingCount('MMMMMMMMMMABCABCABCDM','M',10)==10) self.assertTrue(geneutil.maxSlidingCount('MMMMMMMMMMABCABCABCDM','C',10)==3)
[ "def test_max_run_end():\n state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n assert max_run(1, state) == 9", "def test_max_run_start():\n state = np.array([1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])\n assert max_run(1, state) == 6", "def get_highest(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine what moves are safe for a player to make. Returns a list of valid actions that player p can make in the given state.
def safe_moves(p, state): x, y = state['players'][p]['x'], state['players'][p]['y'] moves = [] actions = [(1, 0, 'east'), (-1, 0, 'west'), (0, -1, 'north'), (0, 1, 'south')] for dx, dy, move in actions: tx, ty = str(x + dx), str(y + dy) if tx not in state['cells'] or ty not in state['cells'][tx]: moves.append(move) return moves
[ "def _get_valid_actions(self, player, state):\n # create an empty list to hold all the available actions for a give player\n available_actions = []\n # 0 is sys_player and 1 is env player\n if player == 0:\n t = 1\n else:\n t = 0\n for k, v in self.tra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start the client listening to the game. Pass in a function that accepts the available actions and the current state of the game, and returns the action to take. The SDK will handle the rest. Checks if any commandline arguments are passed when running, if there are any, they are assumed to be client keys that are sent to the server for connecting.
def start(turn_handler): if os.environ.get('BOTBOX_SECRET'): print('Using env secret:', os.environ['BOTBOX_SECRET']) headers = {'Authorization': os.environ['BOTBOX_SECRET']} elif len(sys.argv) > 1: print('Using cli secret:', sys.argv[1]) headers = {'Authorization': sys.argv[1]} else: print('Using no authentication') headers = [] # get the URL for the server from an environment variable if it is set, # otherwise use the default localhost if os.environ.get('BOTBOX_SERVER'): url = (WS_SERVER_SCHEME + '://' + os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT) else: url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT print("Connecting to:", url) ws = websocket.WebSocketApp( url, on_open = _on_open, on_message = lambda ws, msg: _on_message(ws, msg, turn_handler), on_error = _on_error, on_close = _on_close, header = headers ) ws.run_forever()
[ "def start(self):\n if self._callable:\n self._is_running = True\n self._run_client()", "def run_chat_client():\r\n while must_run:\r\n print_menu()\r\n action = select_user_action()\r\n perform_user_action(action)\r\n print(\"Thanks for watching. Like and s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a private method that handles incoming messages from the websocket, passes the turn information to an agent's turn handler, and then passes the result back to the server.
def _on_message(ws, msg, turn_handler): def x(): parsed = json.loads(msg) player = parsed['player'] actions = parsed['actions'] state = parsed['state'] action = turn_handler(player, actions, state) response = {"action":action} ws.send(json.dumps(response)) _thread.start_new_thread(x, ())
[ "def on_message(self, wsobj, message):\n\n message = json.loads(message)\n\n # If needed, complete the websocket handshake\n if message[\"op\"] == \"C\":\n self.on_open(wsobj, message=message)\n\n # The next few lines ensure only gameplay related event for the\n # speci...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tile an image to a given width and height.
def tile_image( im: Image.Image, width: int, height: int, mode: Optional[str] = "RGB", **kwargs: Any ) -> Image.Image: im_out = Image.new(mode, (width, height), **kwargs) h_tiles = ceil(width / im.width) v_tiles = ceil(height / im.height) for i in range(v_tiles): y = im.height * i for j in range(h_tiles): x = im.width * j im_out.paste(im, box=(x, y)) return im_out
[ "def tile(img):\n rows, cols, res = img.rows, img.cols, img.res\n pixels, pixsize = img.pixels, channels[img.pixtype] # assumes 8-bit channels\n width, height = cols/res, rows/res\n\n def tiled(x, y):\n h = (x + width/2.0) % width # horz, vert offset from top left\n v = (height/2.0 - y) %...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch an image from a given URL.
def fetch_image(url: str) -> Image.Image: r = httpx.get(url) if not r.status_code == httpx.codes.OK: raise HTTPException(r.status_code, detail=r.reason_phrase) f = BytesIO(r.content) im = handle_image_file(f) return im
[ "def fetch_image(img_url):\n\n r = requests.get(img_url)\n return r.content", "def _download_img_from_url(self, img_url):\r\n response = requests.get(img_url)\r\n img = Image.open(BytesIO(response.content))\r\n print(\"Downloaded image from url\")\r\n return img", "def get_imag...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for command_trigger_webhook_post Launch a command via a Trigger
def test_command_trigger_webhook_post(self): pass
[ "async def test_webhook_endpoint_generates_telegram_command_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n update_message_command,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_command\")\n\n response = aw...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if specified operation is allowed on the resource.
def _operation_allowed(self, headers_dict, operation): if 'allow' in headers_dict: if operation in headers_dict['allow']: return True return False
[ "def supports_operation(self, operation: str) -> bool:\n return operation in OPERATION_SUPPORT_BY_TYPE[self.backing_type]", "def validate_operation(self, operation: 'cirq.Operation') -> None:", "def is_valid_operation(self):\n\n if self.operation_value < 100:\n return True\n else...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the ExtendedError object and retruns the message. Build a list of decoded messages from the extended_error using the message registries. An ExtendedError JSON object is a response from the with its own schema. This function knows how to parse the ExtendedError object and, using any loaded message registries, render an array of plain language strings that represent the response.
def _render_extended_error_message_list(self, extended_error): messages = [] if isinstance(extended_error, dict): if ('Type' in extended_error and extended_error['Type'].startswith('ExtendedError.')): for msg in extended_error['Messages']: message_id = msg['MessageID'] x = message_id.split('.') registry = x[0] msgkey = x[len(x) - 1] # if the correct message registry is loaded, # do string resolution if (registry in self.message_registries and msgkey in self.message_registries[registry]['Messages']): rmsgs = self.message_registries[registry]['Messages'] msg_dict = rmsgs[msgkey] msg_str = message_id + ': ' + msg_dict['Message'] for argn in range(0, msg_dict['NumberOfArgs']): subst = '%' + str(argn+1) m = str(msg['MessageArgs'][argn]) msg_str = msg_str.replace(subst, m) if ('Resolution' in msg_dict and msg_dict['Resolution'] != 'None'): msg_str += ' ' + msg_dict['Resolution'] messages.append(msg_str) else: # no message registry, simply return the msg object # in string form messages.append(str(message_id)) return messages
[ "def _get_extended_error(self, extended_error):\n return self._render_extended_error_message_list(extended_error)", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the list of decoded messages from the extended_error.
def _get_extended_error(self, extended_error): return self._render_extended_error_message_list(extended_error)
[ "def _render_extended_error_message_list(self, extended_error):\n messages = []\n if isinstance(extended_error, dict):\n if ('Type' in extended_error and\n extended_error['Type'].startswith('ExtendedError.')):\n for msg in extended_error['Messages']:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the PCI devices.
def _get_pci_devices(self): system = self._get_host_details() if ('links' in system['Oem']['Hp'] and 'PCIDevices' in system['Oem']['Hp']['links']): # Get the PCI URI and Settings pci_uri = system['Oem']['Hp']['links']['PCIDevices']['href'] status, headers, pci_device_list = self._rest_get(pci_uri) if status >= 300: msg = self._get_extended_error(pci_device_list) raise exception.IloError(msg) return pci_device_list else: msg = ('links/PCIDevices section in ComputerSystem/Oem/Hp' ' does not exist') raise exception.IloCommandNotSupportedError(msg)
[ "def test_get_pci_device_list(self):\n pass", "def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices", "def get_all_devices(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetAllDevi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the BIOS settings resource.
def _get_bios_settings_resource(self, data): try: bios_settings_uri = data['links']['Settings']['href'] except KeyError: msg = ('BIOS Settings resource not found.') raise exception.IloError(msg) status, headers, bios_settings = self._rest_get(bios_settings_uri) if status != 200: msg = self._get_extended_error(bios_settings) raise exception.IloError(msg) return headers, bios_settings_uri, bios_settings
[ "def get_system_bios_settings( context, bios, system_id ):\n\n if \"SettingsObject\" in bios.dict[\"@Redfish.Settings\"]:\n bios_settings = context.get( bios.dict[\"@Redfish.Settings\"][\"SettingsObject\"][\"@odata.id\"] )\n else:\n if config.__workarounds__:\n warnings.warn( \"System...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the PATCH Operation is allowed on the resource.
def _validate_if_patch_supported(self, headers, uri): if not self._operation_allowed(headers, 'PATCH'): msg = ('PATCH Operation not supported on the resource ' '"%s"' % uri) raise exception.IloError(msg)
[ "def test_PATCH(self):\n if not self.url:\n return\n response = self.client.patch(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])", "def test_client_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves bios settings of the server.
def _get_bios_setting(self, bios_property): headers, bios_uri, bios_settings = self._check_bios_resource([ bios_property]) return bios_settings[bios_property]
[ "def get_system_bios_settings( context, bios, system_id ):\n\n if \"SettingsObject\" in bios.dict[\"@Redfish.Settings\"]:\n bios_settings = context.get( bios.dict[\"@Redfish.Settings\"][\"SettingsObject\"][\"@odata.id\"] )\n else:\n if config.__workarounds__:\n warnings.warn( \"System...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the iscsi settings resoure.
def _get_iscsi_settings_resource(self, data): try: iscsi_settings_uri = data['links']['Settings']['href'] except KeyError: msg = ('iscsi settings resource not found.') raise exception.IloCommandNotSupportedError(msg) status, headers, iscsi_settings = self._rest_get(iscsi_settings_uri) if status != 200: msg = self._get_extended_error(iscsi_settings) raise exception.IloError(msg) return headers, iscsi_settings_uri, iscsi_settings
[ "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def settings(self):\n if self._settings is None:\n self._settings = ServerSettings(\"%s/settings\" % self._href, self.rsapi)\n return self._settings", "def get_settings(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Boot resource like BootSources.
def _get_bios_boot_resource(self, data): try: boot_uri = data['links']['Boot']['href'] except KeyError: msg = ('Boot resource not found.') raise exception.IloCommandNotSupportedError(msg) status, headers, boot_settings = self._rest_get(boot_uri) if status != 200: msg = self._get_extended_error(boot_settings) raise exception.IloError(msg) return boot_settings
[ "def scan_source_boot_loader_configuration():\n\n boot_loader_configuration = SourceBootLoaderConfiguration(\n entries=scan_boot_entries()\n )\n\n api.produce(boot_loader_configuration)", "def _get_persistent_boot_devices(self):\n # Check if the BIOS resource if exists.\n headers_bio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Mappings resource.
def _get_bios_mappings_resource(self, data): try: map_uri = data['links']['Mappings']['href'] except KeyError: msg = ('Mappings resource not found.') raise exception.IloCommandNotSupportedError(msg) status, headers, map_settings = self._rest_get(map_uri) if status != 200: msg = self._get_extended_error(map_settings) raise exception.IloError(msg) return map_settings
[ "def get_mapping(self, ksf: str) -> InfoResMapping:\n irm = self.InfoResMapping(self, ksf)\n return irm", "def MappingApi(self):\n return self.__MappingApi", "def mappings(self):\n return self._mappings", "def get_mapping(self):\n if self.role:\n return self.role....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if patch is supported on iscsi.
def _check_iscsi_rest_patch_allowed(self): headers, bios_uri, bios_settings = self._check_bios_resource() # Check if the bios resource exists. if('links' in bios_settings and 'iScsi' in bios_settings['links']): iscsi_uri = bios_settings['links']['iScsi']['href'] status, headers, settings = self._rest_get(iscsi_uri) if status != 200: msg = self._get_extended_error(settings) raise exception.IloError(msg) if not self._operation_allowed(headers, 'PATCH'): headers, iscsi_uri, settings = ( self._get_iscsi_settings_resource(settings)) self._validate_if_patch_supported(headers, iscsi_uri) return iscsi_uri else: msg = ('"links/iScsi" section in bios' ' does not exist') raise exception.IloCommandNotSupportedError(msg)
[ "def _validate_if_patch_supported(self, headers, uri):\n if not self._operation_allowed(headers, 'PATCH'):\n msg = ('PATCH Operation not supported on the resource '\n '\"%s\"' % uri)\n raise exception.IloError(msg)", "def _check_supported(kernel_info):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change secure boot settings on the server.
def _change_secure_boot_settings(self, property, value): system = self._get_host_details() # find the BIOS URI if ('links' not in system['Oem']['Hp'] or 'SecureBoot' not in system['Oem']['Hp']['links']): msg = (' "SecureBoot" resource or feature is not ' 'supported on this system') raise exception.IloCommandNotSupportedError(msg) secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href'] # Change the property required new_secure_boot_settings = {} new_secure_boot_settings[property] = value # perform the patch status, headers, response = self._rest_patch( secure_boot_uri, None, new_secure_boot_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg) # Change the bios setting as a workaround to enable secure boot # Can be removed when fixed for Gen9 snap2 val = self._get_bios_setting('CustomPostMessage') val = val.rstrip() if val.endswith(" ") else val+" " self._change_bios_setting({'CustomPostMessage': val})
[ "def set_secure_boot_mode(self, secure_boot_enable):\n sushy_system = self._get_sushy_system()\n try:\n sushy_system.secure_boot.enable_secure_boot(secure_boot_enable)\n except exception.InvalidInputError as e:\n msg = (self._('Invalid input. Error %(error)s')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the system is in uefi boot mode.
def _is_boot_mode_uefi(self): boot_mode = self.get_current_boot_mode() if boot_mode == 'UEFI': return True else: return False
[ "def is_bootable(self):\n return self.bootable_flag == 0x80", "def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value ==...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the status of secure boot.
def get_secure_boot_mode(self): system = self._get_host_details() if ('links' not in system['Oem']['Hp'] or 'SecureBoot' not in system['Oem']['Hp']['links']): msg = ('"SecureBoot" resource or feature is not supported' ' on this system') raise exception.IloCommandNotSupportedError(msg) secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href'] # get the Secure Boot object status, headers, secure_boot_settings = self._rest_get(secure_boot_uri) if status >= 300: msg = self._get_extended_error(secure_boot_settings) raise exception.IloError(msg) return secure_boot_settings['SecureBootCurrentState']
[ "def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset secure boot keys to manufacturing defaults.
def reset_secure_boot_keys(self): if self._is_boot_mode_uefi(): self._change_secure_boot_settings('ResetToDefaultKeys', True) else: msg = ('System is not in UEFI boot mode. "SecureBoot" related ' 'resources cannot be changed.') raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform requested power operation.
def _perform_power_op(self, oper): power_settings = {"Action": "Reset", "ResetType": oper} systems_uri = "/rest/v1/Systems/1" status, headers, response = self._rest_post(systems_uri, None, power_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def power_on(self):\n print(\"Cpu.power_on()\")", "def get_setPower(self):\n self.read(\":POW?\")", "def powerIP(self,power):\n np.power(self.t, power, out=self.t)\n return self", "def calculate_power(self):\n # Convert Command Speed, Current Speed, and Setpoint Speed into m/s\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simulates a physical press of the server power button.
def press_pwr_btn(self): self._press_pwr_btn()
[ "def press_power_button():\r\n Android.send_adb_command(command='adb shell input keyevent 26')\r\n logging.info('ADB command to press power button has been sent')", "def press_pwr_btn(self):\n data = self._execute_command('PRESS_PWR_BTN', 'SERVER_INFO', 'write')\n return data", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request the http boot url from system in uefi boot mode.
def get_http_boot_url(self): if(self._is_boot_mode_uefi() is True): return self._get_bios_setting('UefiShellStartupUrl') else: msg = 'get_http_boot_url is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def set_http_boot_url(self, url):\n if(self._is_boot_mode_uefi() is True):\n self._change_bios_setting({'UefiShellStartupUrl': url})\n else:\n msg = 'set_http_boot_url is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set url to the UefiShellStartupUrl to the system in uefi boot mode.
def set_http_boot_url(self, url): if(self._is_boot_mode_uefi() is True): self._change_bios_setting({'UefiShellStartupUrl': url}) else: msg = 'set_http_boot_url is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def set_http_boot_uri(self, url):\n try:\n sushy_system = self._get_sushy_system()\n sushy_system.http_boot_uri.set_http_boot_uri(url)\n except sushy.exceptions.SushyError as e:\n msg = (self._('Unable to set HTTP Boot URI. Error '\n '%(error)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set iscsi details of the system in uefi boot mode. The iSCSI initiator is identified by the MAC provided. The initiator system is set with the target details like IQN, LUN, IP, Port etc.
def set_iscsi_boot_info(self, mac, target_name, lun, ip_address, port='3260', auth_method=None, username=None, password=None): if(self._is_boot_mode_uefi() is True): iscsi_info = {} iscsi_info['iSCSITargetName'] = target_name iscsi_info['iSCSIBootLUN'] = lun iscsi_info['iSCSITargetIpAddress'] = ip_address iscsi_info['iSCSITargetTcpPort'] = int(port) iscsi_info['iSCSITargetInfoViaDHCP'] = False iscsi_info['iSCSIBootEnable'] = 'Enabled' if (auth_method == 'CHAP'): iscsi_info['iSCSIAuthenticationMethod'] = 'Chap' iscsi_info['iSCSIChapUsername'] = username iscsi_info['iSCSIChapSecret'] = password self._change_iscsi_settings(mac.upper(), iscsi_info) else: msg = 'iscsi boot is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def unset_iscsi_boot_info(self, mac):\n if(self._is_boot_mode_uefi() is True):\n iscsi_info = {'iSCSIBootEnable': 'Disabled'}\n self._change_iscsi_settings(mac.upper(), iscsi_info)\n else:\n msg = 'iscsi boot is not supported in the BIOS boot mode'\n raise ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disable iscsi boot option in uefi boot mode.
def unset_iscsi_boot_info(self, mac): if(self._is_boot_mode_uefi() is True): iscsi_info = {'iSCSIBootEnable': 'Disabled'} self._change_iscsi_settings(mac.upper(), iscsi_info) else: msg = 'iscsi boot is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def disable_start_on_boot(host,guest):\n run(['xec-vm', '-n', guest, 'set', 'start-on-boot', 'false'], host=host)", "def reset_secure_boot_keys(self):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('ResetToDefaultKeys', True)\n else:\n msg = ('System is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the current boot mode of the server.
def get_current_boot_mode(self): boot_mode = self._get_bios_setting('BootMode') if boot_mode == 'LegacyBios': boot_mode = 'legacy' return boot_mode.upper()
[ "def get_current_boot_mode(self):\n data = self._execute_command(\n 'GET_CURRENT_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_CURRENT_BOOT_MODE']['BOOT_MODE']['VALUE']", "def get_supported_boot_mode(self):\n data = self._execute_command(\n 'GET_SUPPORTED_BOOT_MO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the pending boot mode of the server. Gets the boot mode to be set on next reset.
def get_pending_boot_mode(self): headers, uri, bios_settings = self._check_bios_resource(['BootMode']) _, _, settings = self._get_bios_settings_resource(bios_settings) boot_mode = settings.get('BootMode') if boot_mode == 'LegacyBios': boot_mode = 'legacy' return boot_mode.upper()
[ "def get_pending_boot_mode(self):\n data = self._execute_command(\n 'GET_PENDING_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_PENDING_BOOT_MODE']['BOOT_MODE']['VALUE']", "def get_current_boot_mode(self):\n data = self._execute_command(\n 'GET_CURRENT_BOOT_MODE',...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the boot mode of the system for next boot.
def set_pending_boot_mode(self, boot_mode): boot_mode = boot_mode.lower() if boot_mode not in ['uefi', 'legacy']: msg = 'Invalid Boot mode specified' raise exception.IloInvalidInputError(msg) boot_properties = {'BootMode': boot_mode} if boot_mode == 'legacy': boot_properties['BootMode'] = 'LegacyBios' else: # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first. boot_properties['UefiOptimizedBoot'] = "Enabled" # Change the Boot Mode self._change_bios_setting(boot_properties)
[ "def set_boot_mode(self, task, mode):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='set_boot_mode')", "def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletInd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the iLO password.
def reset_ilo_credential(self, password): acc_uri = '/rest/v1/AccountService/Accounts' for status, hds, account, memberuri in self._get_collection(acc_uri): if account['UserName'] == self.login: mod_user = {} mod_user['Password'] = password status, headers, response = self._rest_patch(memberuri, None, mod_user) if status != 200: msg = self._get_extended_error(response) raise exception.IloError(msg) return msg = "iLO Account with specified username is not found." raise exception.IloError(msg)
[ "def reset_ilo_credential(self, password):\n\n dic = {'USER_LOGIN': self.login}\n root = self._create_dynamic_xml(\n 'MOD_USER', 'USER_INFO', 'write', dic)\n\n element = root.find('LOGIN/USER_INFO/MOD_USER')\n etree.SubElement(element, 'PASSWORD', VALUE=password)\n d = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the BIOS settings to default values.
def reset_bios_to_default(self): # Check if the BIOS resource if exists. headers_bios, bios_uri, bios_settings = self._check_bios_resource() # Get the BaseConfig resource. try: base_config_uri = bios_settings['links']['BaseConfigs']['href'] except KeyError: msg = ("BaseConfigs resource not found. Couldn't apply the BIOS " "Settings.") raise exception.IloCommandNotSupportedError(msg) # Check if BIOS resource supports patch, else get the settings if not self._operation_allowed(headers_bios, 'PATCH'): headers, bios_uri, _ = self._get_bios_settings_resource( bios_settings) self._validate_if_patch_supported(headers, bios_uri) status, headers, config = self._rest_get(base_config_uri) if status != 200: msg = self._get_extended_error(config) raise exception.IloError(msg) new_bios_settings = {} for cfg in config['BaseConfigs']: default_settings = cfg.get('default', None) if default_settings is not None: new_bios_settings = default_settings break else: msg = ("Default Settings not found in 'BaseConfigs' resource.") raise exception.IloCommandNotSupportedError(msg) request_headers = self._get_bios_hash_password(self.bios_password) status, headers, response = self._rest_patch(bios_uri, request_headers, new_bios_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def set_bios_default(self):\n result = {}\n try:\n system_url = self._find_system_resource()\n result = self._get_url(system_url + '/Bios')\n if result['ret'] == False:\n return result\n\n reset_bios_url = result['entries']['Actions']['#Bios....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the ilo firmware version for server capabilities
def _get_ilo_firmware_version(self): manager, reset_uri = self._get_ilo_details() ilo_firmware_version = manager['Firmware']['Current']['VersionString'] return {'ilo_firmware_version': ilo_firmware_version}
[ "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def firmware_version(self):\n try:\n return self._fw_version\n except AttributeError:\n self._fw_version = self.conn.cmd( # pylint: disable-msg=W0201\n \"lsb_release -r | awk...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return sriov enabled or not
def _is_sriov_enabled(self): return (self._get_bios_setting('Sriov') == 'Enabled')
[ "def swo_enabled(self):\n return self._swo_enabled", "def _get_redist_enabled(self):\n return self.__redist_enabled", "def ms_get_rstp_enabled(self):\n self.open_route('/configure/switch_settings', \"Switch\")\n dropdown_value = page_utils.get_dropdown_value(\n self.get_page(),\n v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the given virtual media device status and device URI
def _get_vm_device_status(self, device='FLOPPY'): valid_devices = {'FLOPPY': 'floppy', 'CDROM': 'cd'} # Check if the input is valid if device not in valid_devices: raise exception.IloInvalidInputError( "Invalid device. Valid devices: FLOPPY or CDROM.") manager, uri = self._get_ilo_details() try: vmedia_uri = manager['links']['VirtualMedia']['href'] except KeyError: msg = ('"VirtualMedia" section in Manager/links does not exist') raise exception.IloCommandNotSupportedError(msg) for status, hds, vmed, memberuri in self._get_collection(vmedia_uri): status, headers, response = self._rest_get(memberuri) if status != 200: msg = self._get_extended_error(response) raise exception.IloError(msg) if (valid_devices[device] in [item.lower() for item in response['MediaTypes']]): vm_device_uri = response['links']['self']['href'] return response, vm_device_uri # Requested device not found msg = ('Virtualmedia device "' + device + '" is not' ' found on this system.') raise exception.IloError(msg)
[ "def get_vmedia_device_uri(self, device):\n\n try:\n sushy_system = self._get_sushy_system()\n uri = utils.get_subresource_path_by(sushy_system, 'VirtualMedia')\n resp = sushy_system._conn.get(uri)\n vmedia_resp = json.loads(resp.text)\n for val in vmedi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the Virtual Media drive status It sets the boot option for virtual media device.
def set_vm_status(self, device='FLOPPY', boot_option='BOOT_ONCE', write_protect='YES'): # CONNECT is a RIBCL call. There is no such property to set in RIS. if boot_option == 'CONNECT': return boot_option_map = {'BOOT_ONCE': True, 'BOOT_ALWAYS': False, 'NO_BOOT': False } if boot_option not in boot_option_map: msg = ('Virtualmedia boot option "' + boot_option + '" is ' 'invalid.') raise exception.IloInvalidInputError(msg) response, vm_device_uri = self._get_vm_device_status(device) # Update required property vm_settings = {} vm_settings['Oem'] = ( {'Hp': {'BootOnNextServerReset': boot_option_map[boot_option]}}) # perform the patch operation status, headers, response = self._rest_patch( vm_device_uri, None, vm_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def set_volume_bootable_status(self, volume, bootable):\n volume = self._get_resource(_volume.Volume, volume)\n volume.set_bootable_status(self, bootable)", "def set_vm_status(self, device='FLOPPY',\n boot_option='BOOT_ONCE', write_protect='YES'):\n dic = {'DEVICE': devi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Notifies iLO of the location of a virtual media diskette image.
def insert_virtual_media(self, url, device='FLOPPY'): response, vm_device_uri = self._get_vm_device_status(device) # Eject media if there is one. RIBCL was tolerant enough to overwrite # existing media, RIS is not. This check is to take care of that # assumption. if response.get('Inserted', False): self.eject_virtual_media(device) # Update required property vm_settings = {} vm_settings['Image'] = url # Perform the patch operation status, headers, response = self._rest_patch( vm_device_uri, None, vm_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def insert_virtual_media(self, url, device='FLOPPY'):\n dic = {\n 'DEVICE': device.upper(),\n 'IMAGE_URL': url,\n }\n data = self._execute_command(\n 'INSERT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic)\n return data", "def update_volume_after_attached_to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ejects the Virtual Media image if one is inserted.
def eject_virtual_media(self, device='FLOPPY'): response, vm_device_uri = self._get_vm_device_status(device) # Check if virtual media is connected. if response.get('Inserted') is False: return # Update required property vm_settings = {} vm_settings['Image'] = None # perform the patch operation status, headers, response = self._rest_patch( vm_device_uri, None, vm_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def eject_image(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get details of persistent boot devices, its order
def _get_persistent_boot_devices(self): # Check if the BIOS resource if exists. headers_bios, bios_uri, bios_settings = self._check_bios_resource() # Get the Boot resource. boot_settings = self._get_bios_boot_resource(bios_settings) # Get the BootSources resource try: boot_sources = boot_settings['BootSources'] except KeyError: msg = ("BootSources resource not found.") raise exception.IloError(msg) try: boot_order = boot_settings['PersistentBootConfigOrder'] except KeyError: msg = ("PersistentBootConfigOrder resource not found.") raise exception.IloCommandNotSupportedError(msg) return boot_sources, boot_order
[ "def list_devices():\r\n return sd.query_devices()", "def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get current persistent boot device set for the host
def get_persistent_boot_device(self): system = self._get_host_details() try: # Return boot device if it is persistent. if system['Boot']['BootSourceOverrideEnabled'] == 'Continuous': device = system['Boot']['BootSourceOverrideTarget'] if device in DEVICE_RIS_TO_COMMON: return DEVICE_RIS_TO_COMMON[device] return device except KeyError as e: msg = "get_persistent_boot_device failed with the KeyError:%s" raise exception.IloError((msg) % e) # Check if we are in BIOS boot mode. # There is no resource to fetch boot device order for BIOS boot mode if not self._is_boot_mode_uefi(): return None # Get persistent boot device order for UEFI boot_sources, boot_devices = self._get_persistent_boot_devices() boot_string = "" try: for source in boot_sources: if (source["StructuredBootString"] == boot_devices[0]): boot_string = source["BootString"] break except KeyError as e: msg = "get_persistent_boot_device failed with the KeyError:%s" raise exception.IloError((msg) % e) if 'HP iLO Virtual USB CD' in boot_string: return 'CDROM' elif ('NIC' in boot_string or 'PXE' in boot_string or "iSCSI" in boot_string): return 'NETWORK' elif common.isDisk(boot_string): return 'HDD' else: return None
[ "def _get_persistent_boot_devices(self):\n # Check if the BIOS resource if exists.\n headers_bios, bios_uri, bios_settings = self._check_bios_resource()\n\n # Get the Boot resource.\n boot_settings = self._get_bios_boot_resource(bios_settings)\n\n # Get the BootSources resource\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the current setting for the one time boot.
def get_one_time_boot(self): system = self._get_host_details() try: if system['Boot']['BootSourceOverrideEnabled'] == 'Once': device = system['Boot']['BootSourceOverrideTarget'] if device in DEVICE_RIS_TO_COMMON: return DEVICE_RIS_TO_COMMON[device] return device else: # value returned by RIBCL if one-time boot setting are absent return 'Normal' except KeyError as e: msg = "get_one_time_boot failed with the KeyError:%s" raise exception.IloError((msg) % e)
[ "def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the firmware update service uri.
def _get_firmware_update_service_resource(self): manager, uri = self._get_ilo_details() try: fw_uri = manager['Oem']['Hp']['links']['UpdateService']['href'] except KeyError: msg = ("Firmware Update Service resource not found.") raise exception.IloCommandNotSupportedError(msg) return fw_uri
[ "def _get_uri(plex_server):\n return plex_server.url(\n \"/:/websockets/notifications\", includeToken=True\n ).replace(\"http\", \"ws\")", "def get_http_boot_uri(self):\n try:\n sushy_system = self._get_sushy_system()\n http_boot_uri = sushy_system.http_boot_u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the given firmware on the server for the given component.
def update_firmware(self, file_url, component_type): fw_update_uri = self._get_firmware_update_service_resource() action_data = { 'Action': 'InstallFromURI', 'FirmwareURI': file_url, } # perform the POST LOG.debug(self._('Flashing firmware file: %s ...'), file_url) status, headers, response = self._rest_post( fw_update_uri, None, action_data) if status != 200: msg = self._get_extended_error(response) raise exception.IloError(msg) # wait till the firmware update completes. common.wait_for_ris_firmware_update_to_complete(self) try: state, percent = self.get_firmware_update_progress() except exception.IloError: msg = 'Status of firmware update not known' LOG.debug(self._(msg)) # noqa return if state == "ERROR": msg = 'Unable to update firmware' LOG.debug(self._(msg)) # noqa raise exception.IloError(msg) elif state == "UNKNOWN": msg = 'Status of firmware update not known' LOG.debug(self._(msg)) # noqa else: # "COMPLETED" | "IDLE" LOG.info(self._('Flashing firmware file: %s ... done'), file_url)
[ "def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)", "def update_firmware(self) -> str:", "def update_firmware(self, node, port):\n return hpsum_controller.update_firmware(node)", "def updateFirmware(self, device=None, version=\"latest\"):\n if device in ('sender', 'm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the progress of the firmware update.
def get_firmware_update_progress(self): try: fw_update_uri = self._get_firmware_update_service_resource() except exception.IloError as e: LOG.debug(self._('Progress of firmware update not known: %s'), str(e)) return "UNKNOWN", "UNKNOWN" # perform the GET status, headers, response = self._rest_get(fw_update_uri) if status != 200: msg = self._get_extended_error(response) raise exception.IloError(msg) fw_update_state = response.get('State') fw_update_progress_percent = response.get('ProgressPercent') LOG.debug(self._('Flashing firmware file ... in progress %d%%'), fw_update_progress_percent) return fw_update_state, fw_update_progress_percent
[ "def get_firmware_update_status(self):\n\n response = self.execute_command(CMD_GET_FIRMWARE_UPDATE_STATUS)[0]\n inprogress = (response & 0x80) == 0x80\n return {\n \"inprogress\": inprogress,\n \"error\": response & 0x7f,\n }", "def progress(self):\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves if server is TPM capable or not.
def _get_tpm_capability(self): tpm_values = {"NotPresent": False, "PresentDisabled": True, "PresentEnabled": True} try: tpm_state = self._get_bios_setting('TpmState') except exception.IloCommandNotSupportedError: tpm_state = "NotPresent" tpm_result = tpm_values[tpm_state] return tpm_result
[ "def is_available():", "def has_tpu():\n def _check():\n with session.Session() as sess:\n sess.run(tpu.initialize_system())\n sess.run(tpu.shutdown_system())\n\n try:\n _check()\n return True\n except errors.OpError as _:\n return False", "def is_vtd_supported(self):\n\t\treturn bool(c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get cpu virtualization status.
def _get_cpu_virtualization(self): try: cpu_vt = self._get_bios_setting('ProcVirtualization') except exception.IloCommandNotSupportedError: return False if cpu_vt == 'Enabled': vt_status = True else: vt_status = False return vt_status
[ "def _vm_get_cpu(self, vm_instance):\n pass", "def vCPU_calculator(self):\r\n\r\n return self.vm_obj.config.hardware.numCPU", "def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data", "def get_cpu_mode(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get status of NVDIMM_N.
def _get_nvdimm_n_status(self): try: nvdimm_n_status = self._get_bios_setting('NvDimmNMemFunctionality') if nvdimm_n_status == 'Enabled': nvn_status = True else: nvn_status = False except exception.IloCommandNotSupportedError: nvn_status = False return nvn_status
[ "def get_thinet_status_n(self):\r\n return self.ask(\"MOTTE:STATUS?\",\"int\")", "def get_brf_status_n(self):\r\n return self.ask(\"MOTBI:STATUS?\",\"int\")", "def avail_status(self):\n return self._nfvi_image.avail_status # assume one-to-one mapping", "def get_status(self):\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determina numere divizibile cu k dintro lista
def get_longest_div_k(lst, k): rezultat = [] for x in lst: if x % k == 0: rezultat.append(x) return rezultat
[ "def divides(k, n):\n return n % k == 0", "def subarraysDivByK(A: List[int], K: int) -> int:\n from collections import defaultdict\n s = defaultdict(int)\n s[0] += 1\n res, x = 0, 0\n for a in A:\n x = (x + a) % K\n res += s[x]\n s[x] += 1\n return res", "def getDivisor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Linac phasing Note that these overlays override individual klystron phases.
def bmad_linac_phasing_lines(epics): lines = [ '! Linac overall phasing', 'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.', 'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')), 'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499')) ] return lines
[ "def set_display_from_lines(self):\n y = 1\n maxlin = CA_World.ca_display_size - 1\n limy = len(self.ca_lines) + maxlin\n for i in self.ca_lines:\n x = 1\n if limy >= maxlin:\n if SimEngine.gui_get('init') == \"Right\": # Right\n l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes linac phasing lines to a Bmad file. Requires epics (or proxy object).
def write_bmad_linac_phasing_lines(filePath='linac_settings.bmad', epics=None, verbose=False): lines = bmad_linac_phasing_lines(epics) with open(filePath, 'w') as f: for l in lines: f.write(l+'\n') if verbose: print('Written:', filePath)
[ "def write_tao_BC_and_LEM_lines(filePath='LEM_settings.tao', epics=None, verbose=False):\n lines = tao_BC_and_LEM_lines(epics)\n with open(filePath, 'w') as f:\n for l in lines:\n f.write(l+'\\n')\n if verbose:\n print('Written:', filePath)\n\n \n \n return lines",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes tao LEM lines to a .tao file. Requires epics (or proxy object).
def write_tao_BC_and_LEM_lines(filePath='LEM_settings.tao', epics=None, verbose=False): lines = tao_BC_and_LEM_lines(epics) with open(filePath, 'w') as f: for l in lines: f.write(l+'\n') if verbose: print('Written:', filePath) return lines
[ "def write_bmad_linac_phasing_lines(filePath='linac_settings.bmad', epics=None, verbose=False):\n lines = bmad_linac_phasing_lines(epics)\n with open(filePath, 'w') as f:\n for l in lines:\n f.write(l+'\\n')\n if verbose:\n print('Written:', filePath)", "def write_ead(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to get the credentials from ~/.mofplusrc
def credentials_from_rc(self): mprc_filename = os.environ["HOME"]+'/.mofplusrc' with open(mprc_filename, 'r') as mprc: username = mprc.readline().split()[0] pw = mprc.readline().split()[0] return username, pw
[ "def load_credentials():\n import os\n from dotenv import load_dotenv\n load_dotenv()\n return (\n os.getenv('MARKLOGIC_URL'),\n os.getenv('MARKLOGIC_USERNAME'),\n os.getenv('MARKLOGIC_PASSWORD')\n )", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to get the credentials from the command line
def credentials_from_cmd(self): username = raw_input("Email:") pw = getpass.getpass() return username, pw
[ "def request_credentials_from_console():\n username = raw_input('Username: ')\n password = raw_input('Password: ')\n return username, password", "def credentials(args):\n\n if args.helper_command != 'get':\n # From api-credentials.txt:\n # For a `store` or `erase` operation, the helper's...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints the MFP banner
def print_banner(self): print ":##::::'##::'#######::'########:::::::::::::::'###::::'########::'####:\n\ :###::'###:'##.... ##: ##.....::::'##::::::::'## ##::: ##.... ##:. ##::\n\ :####'####: ##:::: ##: ##::::::::: ##:::::::'##:. ##:: ##:::: ##:: ##::\n\ :## ### ##: ##:::: ##: ######:::'######::::'##:::. ##: ########::: ##::\n\ :##. #: ##: ##:::: ##: ##...::::.. ##.::::: #########: ##.....:::: ##::\n\ :##:.:: ##: ##:::: ##: ##::::::::: ##:::::: ##.... ##: ##::::::::: ##::\n\ :##:::: ##:. #######:: ##:::::::::..::::::: ##:::: ##: ##::::::::'####:\n\ :..:::::..:::.......:::..:::::::::::::::::::..:::::..::..:::::::::....:"
[ "def banner():\n\n print(\n r\"\"\"\n ____ ____ ____ ____\n/ \\ / \\ / \\ / \\\n| ################################# |\n\\__#_/ \\____/ \\____/ \\_#__/\n # _ _______ _____ #\n # (_) |_ __ \\|_ _| # -*- v 2.0 ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all BBS in the db
def get_list_of_bbs(self): return self.mfp.get_list_of_bbs()
[ "def get_all(session):\n try:\n vbds = session.xenapi.VBD.get_all()\n vbd_list = []\n\n for vbd in vbds:\n vbd_list.append(VBD(session, vbd))\n\n return vbd_list\n except Exception as e:\n print(\"VBD.get_all Exception\", e)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the scaled topo file for a given id supercell id.
def get_scaledtopo(self,id): lines = self.mfp.get_scaledtopo(id) return lines
[ "def path(self, id):\n return self._inode_to_path.get(id)", "def serve_gridfs_file_for_id(self, id):\n return File(id).serve(self.request)", "def get_coord_file(cls, modelid):\n if not PDBMapSwiss._modelid2info:\n raise Exception(\"PDBMapSwiss.load_swiss_INDEX_JSON must be called...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the orients file for a given id supercell id.
def get_orients(self,id): lines = self.mfp.get_orients(id) return lines
[ "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def path(self, id):\n return self._inode_to_path.get(id)", "def id_to_index(self, cell_id):\n\n # TODO: Need __getitem__\n raise NotImplementedError", "def get_volume(self, id):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
替换传入的logger中的所有file_handler的stream。 替换方法为: 将stream使用的文件如filename, 更改为 filename.进程名 目的: 避免多个进程写同一个文件导致错误,已知错误有:切日志异常、日志会丢失、日志会混乱等
def trans_logger(logger): if not isinstance(logger, logging.Logger): return for handler in logger.handlers: if isinstance(handler, logging.FileHandler): handler.baseFilename = handler.baseFilename + '.' + multiprocessing.current_process().name old_stream = handler.stream if old_stream: try: old_stream.flush() finally: if hasattr(old_stream, "close"): old_stream.close() handler.stream = handler._open()
[ "def processTempLog(file_name):", "def logger_file(self, value):\n self.__logger_file = value\n if self.__logger_file:\n # If set logging file,\n # then add file handler and remove stream handler.\n self.logger_file_handler = logging.FileHandler(self.__logger_file)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the list of films in which a character appears
def getFilms(character): ret = [] for film in character.get('films'): number = int(film.rstrip('/').rpartition('/')[2]) if number not in cache: response = requests.get(film) response = response.json() title = response.get('title') cache[number] = title ret.append(cache.get(number)) return ret
[ "def castFilmography (movies, minAppearances):\n actors = {}\n for (k,v) in movies.items():\n for a in v[2:7]:\n actors[a] = actors.get(a, []) + [k]\n return sorted([ [k] + v for (k,v) in actors.items() if len(v) >= minAppearances ])", "def get_films():\n\n # TODO: Get films from DB\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers a backward hook on the used to save the gradients of the embeddings for use in get_gradients() when there are multiple inputs (e.g., a passage and question), the hook will be called multiple times. We append all the embeddings gradients to a list.
def _register_embedding_gradient_hooks(self, embedding_gradients): def hook_layers(module, grad_in, grad_out): embedding_gradients.append(grad_out[0]) backward_hooks = [] embedding_layer = self.get_embeddings_layer() backward_hooks.append(embedding_layer.register_backward_hook(hook_layers)) return backward_hooks
[ "def _register_post_backward_hooks(self) -> None:\n if not torch.is_grad_enabled():\n return # don't register grad hooks if grad isn't enabled\n for p in self.full_params:\n if p.requires_grad:\n if hasattr(p, \"_shard_bwd_hook\"):\n continue\n # Register a hook on the first ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
some tokenizers don't have 'eos_token' and 'bos_token' attributes. Thus, we need some trick to get them.
def special_tokens(self, ): if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None: special_tokens = self.tokenizer.build_inputs_with_special_tokens([]) special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens) self.tokenizer.bos_token, self.tokenizer.eos_token = special_tokens_ids special_tokens = self.tokenizer.eos_token, self.tokenizer.bos_token return special_tokens
[ "def _generate_tokenizers(self):\n self.input_tokenizer = \"\"\n self.output_tokenizer = \"\"", "def synth_tokens(self):\n if self.lliagraph:\n return self.lliagraph.synth_tokens.items()\n else:\n return []", "def _construct_tokenizer(self, model):\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the euclidean distance between each word in the vocab and each word in the source.
def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False): # compute square norm to avoid compute all the directions vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2 src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2 # dot product dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds) # reshape for broadcasting vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1 # compute squared difference sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product if squared: return sq_norm else: # relu + epsilon for numerical stability sq_norm = F.relu(sq_norm) + 1e-20 # take the square root return sq_norm.sqrt()
[ "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If TASK_USE_PATH is set rely on PATH to look for task binaries. Otherwise ../src/ is used by default.
def task_binary_location(cmd="task"): return binary_location(cmd, TASK_USE_PATH)
[ "def test_task_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tasks')", "def get_celery_path():\n\n return get_executable_path('celery')", "def TaskRelativeName(cls, task):\n if not task: return None\n return os.path.relpath(cls.TaskNormalizedName(task),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If USE_PATH is True rely on PATH to look for binaries. Otherwise ../src/ is used by default.
def binary_location(cmd, USE_PATH=False): if USE_PATH: return cmd else: return os.path.join(BIN_PREFIX, cmd)
[ "def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval", "def pythonpath_init():\n # Get this file's directory path\n my_dir = os.path.dirname(os.path.abspath(__file__))\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path.
def which(cmd, mode=os.F_OK | os.X_OK, path=None): # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly # rather than referring to PATH directories. This includes checking # relative to the current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path # extensions. This will allow us to short circuit when given # "python.exe". If it does match, only test that one, otherwise we # have to try others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
[ "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to compute value for fields debit/credit/amount_currency based on an amount and the currencies given in parameter
def compute_amount_fields(self, amount, src_currency, company_currency, invoice_currency=False): amount_currency = False currency_id = False if src_currency and src_currency != company_currency: amount_currency = amount amount = src_currency.with_context(self._context).compute(amount, company_currency) currency_id = src_currency.id debit = amount > 0 and amount or 0.0 credit = amount < 0 and -amount or 0.0 if invoice_currency and invoice_currency != company_currency and not amount_currency: amount_currency = src_currency.with_context(self._context).compute(amount, invoice_currency) currency_id = invoice_currency.id return debit, credit, amount_currency, currency_id
[ "def _compute_amount_fields(self, amount, src_currency, company_currency):\n amount_currency = False\n currency_id = False\n date = self.env.context.get('date') or fields.Date.today()\n company = self.env.context.get('company_id')\n company = self.env['res.company'].browse(company...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. Create a bucket with no max_ttl 2. Upload 1000 docs with exp = 100s 3. Set maxTTL on bucket as 60s 4. After 60s, run expiry pager, get item count, must be 1000 5. After 40s, run expiry pager again and get item count, must be 0 6. Now load another set of docs with exp = 100s 7. Run expiry pager after 60s and get item count, must be 0
def test_set_maxttl_on_existing_bucket(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self._update_bucket_maxTTL(maxttl=60) self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s" "(set after doc creation), after 60s, item count = {0}".format(items)) if items != self.num_items: self.fail("FAIL: Items with larger expiry before maxTTL updation deleted!") self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s" "(set after doc creation), after 100s," " item count = {0}".format(items)) if items != 0: self.fail("FAIL: Items with not greater expiry set before maxTTL " "updation not deleted after elapsed TTL!") for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s, after 100s," " item count = {0}".format(items)) if items != 0: self.fail("FAIL: Items with not greater expiry not " "deleted after elapsed maxTTL!")
[ "def test_maxttl_with_doc_updates(self):\n rest = RestConnection(self.master)\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=40)\n\n self.sleep(20, \"waiting to update docs with exp=60s...\")\n\n for bucket in self.buckets:\n self._load_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. Create a bucket with ttl = 60s 2. Upload 1000 docs with exp = 40s 3. After 20s, Update docs with exp = 60s 4. After 40s, run expiry pager again and get item count, must be 1000 5. After 20s, run expiry pager again and get item count, must be 0
def test_maxttl_with_doc_updates(self): rest = RestConnection(self.master) for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=40) self.sleep(20, "waiting to update docs with exp=60s...") for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=60) self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) for bucket in self.buckets: items = rest.get_active_key_count(bucket) self.log.info("Items: {0}".format(items)) if items != self.num_items: self.fail("FAIL: Docs with updated expiry deleted unexpectedly!") self.sleep(20, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = rest.get_active_key_count(bucket) self.log.info("Items: {0}".format(items)) if items != 0: self.fail("FAIL: Docs with updated expiry not deleted after new exp has elapsed!")
[ "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use active_ids from the context to fetch the leads
def default_get(self, cr, uid, fields, context=None): if context is None: context = {} record_ids = context.get('active_ids', False) res = super(crm_lead_stage, self).default_get(cr, uid, fields, context=context) if record_ids: opp_ids = [] opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context) for opp in opps: opp_ids.append(opp.id) if 'lead_ids' in fields: res.update({'lead_ids': opp_ids}) return res
[ "def get_list_ids():", "def get_all_activities(self):\n return map(lambda x: self.__activity_controller.find_by_id(x.activity_id),list(filter(lambda x: x.activity_id in [e.entity_id for e in self.__activity_controller.get_all()],self.get_all())))", "def _get_objs_for_report(self, docids, data):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use lead_ids from the wizard and set to new stage
def action_multi_lead_stage(self, cr, uid, ids, context=None): if context is None: context = {} wizard = self.browse(cr, uid, ids[0], context=context) lead_ids = wizard.lead_ids if lead_ids: for lead in lead_ids: self.pool.get('crm.lead').write(cr, uid, [lead.id], {'stage_id':wizard.stage_id.id},context) return {'type': 'ir.actions.act_window_close'}
[ "def _onchange_stage_id_values(self, stage_id):\n if not stage_id:\n return {}\n print('1111')\n\n call_attempt = len(self.env['call.attempt'].browse(self.call_attempt_ids))\n call_pitch = len(self.env['call.pitch'].browse(self.call_pitch_ids))\n contact_meeting = len(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }