code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def find_task(self, name: str) -> Optional['Task']: <NEW_LINE> <INDENT> return next((p for p in self._tasks if p.name == name), None)
|
find task with given name.
|
625941bcf8510a7c17cf95d7
|
def fullJustify(self, words, maxWidth): <NEW_LINE> <INDENT> res = [] <NEW_LINE> row = [] <NEW_LINE> cur_len = 0 <NEW_LINE> n = len(words) <NEW_LINE> for wi in xrange(n): <NEW_LINE> <INDENT> word = words[wi] <NEW_LINE> if len(word)>maxWidth: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> cur_len += (0 if cur_len == 0 else 1) + len(word) <NEW_LINE> row.append(word) <NEW_LINE> if wi<n-1 and (maxWidth - cur_len - 1)<len(words[wi+1]): <NEW_LINE> <INDENT> row_s = '' <NEW_LINE> if len(row) == 1: <NEW_LINE> <INDENT> gap = maxWidth-cur_len <NEW_LINE> rest_gap = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> gap = (maxWidth - cur_len + len(row) - 1)//(len(row)-1) <NEW_LINE> rest_gap = (maxWidth - cur_len + len(row) - 1)%(len(row)-1) <NEW_LINE> <DEDENT> for i in xrange(len(row)): <NEW_LINE> <INDENT> w = row[i] <NEW_LINE> row_s += w <NEW_LINE> if i < len(row) - 1 or len(row) == 1: <NEW_LINE> <INDENT> row_s += ' '*gap <NEW_LINE> <DEDENT> if i < rest_gap: <NEW_LINE> <INDENT> row_s += ' ' <NEW_LINE> <DEDENT> <DEDENT> res.append(row_s) <NEW_LINE> row = [] <NEW_LINE> cur_len = 0 <NEW_LINE> <DEDENT> if wi == n-1: <NEW_LINE> <INDENT> row_s = '' <NEW_LINE> for i in xrange(len(row)): <NEW_LINE> <INDENT> w = row[i] <NEW_LINE> row_s += w <NEW_LINE> if i < len(row)-1: <NEW_LINE> <INDENT> row_s += ' ' <NEW_LINE> <DEDENT> <DEDENT> row_s += ' ' * (maxWidth - len(row_s)) <NEW_LINE> res.append(row_s) <NEW_LINE> <DEDENT> <DEDENT> return res
|
:type words: List[str]
:type maxWidth: int
:rtype: List[str]
|
625941bccb5e8a47e48b798a
|
def make_link(cfgdict, nodea, nodeb): <NEW_LINE> <INDENT> stdlink = { "delay": "43ms", "capacity": "1Gb", "weight": 10 } <NEW_LINE> sourceidx = get_nodeindex(cfgdict, nodea) <NEW_LINE> targetidx = get_nodeindex(cfgdict, nodeb) <NEW_LINE> stdlink["source"] = sourceidx <NEW_LINE> stdlink["target"] = targetidx <NEW_LINE> cfgdict["links"].append(stdlink) <NEW_LINE> return len(cfgdict["links"])
|
Make a new link from nodea to nodeb and add it to configuration
|
625941bc0c0af96317bb80c5
|
def test_LeafBondSelector_constructor(self): <NEW_LINE> <INDENT> leafBndSel = LeafBondSelector() <NEW_LINE> self.assertEqual( leafBndSel.__class__, LeafBondSelector)
|
test LeafBond constructor
|
625941bc10dbd63aa1bd2a83
|
def GetReplicationControllers(self, namespace=None): <NEW_LINE> <INDENT> if namespace: <NEW_LINE> <INDENT> url = ('%(base_url)s/namespaces/%(ns)s/replicationcontrollers' % {"base_url":self.base_url, "ns":namespace}) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> url = '%s/replicationcontrollers' % self.base_url <NEW_LINE> <DEDENT> json = self._RequestUrl(url, 'GET') <NEW_LINE> data = self._ParseAndCheckKubernetes(json.content) <NEW_LINE> return ReplicationControllerList.NewFromJsonDict(data)
|
List all replicationcontrollers on this cluster
|
625941bc56b00c62f0f14534
|
def check_interrupt(self): <NEW_LINE> <INDENT> if not self._interrupted: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.before_interrupt() <NEW_LINE> raise TaskInterruption
|
Checks if this task was marked interrupted. If yes, raise an Exception
to terminate the thread.
A good task should call this function frequently to avoid infinite
loop.
|
625941bc4f88993c3716bf47
|
def cal_snr_weight(used_misfit_window, value1, value2): <NEW_LINE> <INDENT> value = used_misfit_window.snr_energy <NEW_LINE> weight = cal_cos_weight(value, value1, value2) <NEW_LINE> return weight
|
cal_snr_weight: use snr_energy to get a weight.
|
625941bc5fc7496912cc385a
|
def test_tags_can_be_added_to_photos(self): <NEW_LINE> <INDENT> user = User.objects.all()[0] <NEW_LINE> self.client.force_login(user) <NEW_LINE> photo_id = Photo.objects.all()[0].id <NEW_LINE> response = self.client.get(reverse( 'photo_edit', kwargs={'photo_id': photo_id}) ) <NEW_LINE> html = soup(response.rendered_content, "html.parser") <NEW_LINE> token = html.findAll('input', {'name': "csrfmiddlewaretoken"}) <NEW_LINE> info = { 'title': 'photo name', 'description': 'a description', 'tags': 'basketball', 'published': 'PU', 'csrfmiddlewaretoken': token[0]['value'] } <NEW_LINE> response = self.client.post( reverse('photo_edit', kwargs={'photo_id': photo_id}), info, follow=True ) <NEW_LINE> html = soup(response.content, "html.parser") <NEW_LINE> self.assertTrue(b'basketball' in response.content)
|
Test tags can be added to photos.
|
625941bcfff4ab517eb2f316
|
def delete_inner_session(self): <NEW_LINE> <INDENT> self.token = None <NEW_LINE> return True
|
#====================================================================================
# @Method: 删除带内Session
# @Param:
# @Return:
# @author:
#====================================================================================
|
625941bc3346ee7daa2b2c46
|
def cb_alternative(self): <NEW_LINE> <INDENT> self.cb.action_getbufferlist()
|
Called when the detach button is clicked.
|
625941bcd10714528d5ffbbd
|
def hidden_layer(pooled, input_size, output_size): <NEW_LINE> <INDENT> with tf.name_scope("hidden"): <NEW_LINE> <INDENT> weights = tf.Variable( tf.truncated_normal( [input_size, output_size], stddev=1.0 / math.sqrt(input_size) ), name='weights' ) <NEW_LINE> init = tf.truncated_normal([output_size,], stddev=math.sqrt(2.0/input_size)) <NEW_LINE> biases = tf.Variable(init, name='biases') <NEW_LINE> return lrelu(tf.matmul(pooled, weights) + biases, 0.01), weights
|
Create a hidden feedforward layer.
|
625941bc71ff763f4b549563
|
def test_list_ints(self): <NEW_LINE> <INDENT> assert duck.list_ints(1) == [1] <NEW_LINE> assert duck.list_ints([1]) == [1] <NEW_LINE> assert duck.list_ints([1, 2, 3]) == [1, 2, 3]
|
Testing list_ints.
|
625941bc3eb6a72ae02ec3b1
|
def stop(self,req): <NEW_LINE> <INDENT> self.log_play() <NEW_LINE> self._stop() <NEW_LINE> req.auto=False <NEW_LINE> return self.redirect(req,"")
|
stop and clear the list
|
625941bc26068e7796caebb6
|
def test_simple(self): <NEW_LINE> <INDENT> tbl = TextTable("%one") <NEW_LINE> tbl.append({'one': 'foo'}) <NEW_LINE> self.assertEqual(str(tbl), "ONE\n---\nfoo")
|
simple row
|
625941bcadb09d7d5db6c66e
|
def Size(self): <NEW_LINE> <INDENT> return _itkSamplePython.itkSampleVF3_Size(self)
|
Size(self) -> size_t
|
625941bc85dfad0860c3ad36
|
def bootstrap_steady_perf(steady_segments_all_pexecs, confidence_level=CONFIDENCE_LEVEL, quality='HIGH'): <NEW_LINE> <INDENT> if quality.lower() == "high": <NEW_LINE> <INDENT> means = _bootstrap_means_highq(steady_segments_all_pexecs) <NEW_LINE> <DEDENT> elif quality.lower() == "low": <NEW_LINE> <INDENT> means = _bootstrap_means_lowq(steady_segments_all_pexecs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sys.stderr.write("Unknown quality level '%s'" % quality) <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> means.sort() <NEW_LINE> assert not isinstance(confidence_level, float) <NEW_LINE> confidence_level = Decimal(confidence_level) <NEW_LINE> assert isinstance(confidence_level, Decimal) <NEW_LINE> exclude = (1 - confidence_level) / 2 <NEW_LINE> length = len(means) <NEW_LINE> if length % 2 == 0: <NEW_LINE> <INDENT> median_indices = (length // 2 - 1, length // 2) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> median_indices = (length // 2, ) <NEW_LINE> <DEDENT> lower_index = int((exclude * length).quantize(Decimal('1.0'), rounding=ROUND_DOWN)) <NEW_LINE> upper_index = int(((1 - exclude) * length).quantize(Decimal('1.0'), rounding=ROUND_UP)) <NEW_LINE> lower, upper = means[lower_index], means[upper_index - 1] <NEW_LINE> median = _mean([means[i] for i in median_indices]) <NEW_LINE> ci = _mean([upper - median, median - lower]) <NEW_LINE> return median, ci
|
This is not a general bootstrapping function.
Input is a list containing a list for each pexec, containing a list of
segments with iteration times.
|
625941bcc4546d3d9de7290e
|
def enable_internal_db(self, region=0, key_address=None, db_password=None, ssh_password=None, log_callback=None): <NEW_LINE> <INDENT> (log_callback or self.log.info)( 'Enabling internal DB (region {}) on {}.'.format(region, self.address)) <NEW_LINE> self.db_address = self.address <NEW_LINE> del(self.db) <NEW_LINE> client = self.ssh_client <NEW_LINE> db_password = db_password or conf.credentials['database']['password'] <NEW_LINE> ssh_password = ssh_password or conf.credentials['ssh']['password'] <NEW_LINE> if self.has_cli: <NEW_LINE> <INDENT> if key_address: <NEW_LINE> <INDENT> status, out = client.run_command( 'appliance_console_cli --region {} --internal --fetch-key {} -p {} -a {}' .format(region, key_address, db_password, ssh_password) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> status, out = client.run_command( 'appliance_console_cli --region {} --internal --force-key -p {}' .format(region, db_password) ) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'region': region, 'scl_name': db.scl_name() } <NEW_LINE> rbt = datafile.data_path_for_filename('enable-internal-db.rbt', scripts_path.strpath) <NEW_LINE> rb = datafile.load_data_file(rbt, rbt_repl) <NEW_LINE> remote_file = '/tmp/%s' % fauxfactory.gen_alphanumeric() <NEW_LINE> client.put_file(rb.name, remote_file) <NEW_LINE> status, out = client.run_command('ruby %s' % remote_file) <NEW_LINE> client.run_command('rm %s' % remote_file) <NEW_LINE> <DEDENT> return status, out
|
Enables internal database
Args:
region: Region number of the CFME appliance.
key_address: Address of CFME appliance where key can be fetched.
Note:
If key_address is None, a new encryption key is generated for the appliance.
|
625941bc26238365f5f0ed47
|
def rerank(self, hypotheses: Dict[str, Any], reference: str) -> Dict[str, Any]: <NEW_LINE> <INDENT> scores = [self.scoring_function(hypothesis, reference).score for hypothesis in hypotheses['translations']] <NEW_LINE> ranking = list(np.argsort(scores, kind='mergesort')[::-1]) <NEW_LINE> reranked_hypotheses = self._sort_by_ranking(hypotheses, ranking) <NEW_LINE> if self.return_score: <NEW_LINE> <INDENT> reranked_hypotheses['scores'] = [scores[i] for i in ranking] <NEW_LINE> <DEDENT> return reranked_hypotheses
|
Reranks a set of hypotheses that belong to one single reference
translation. Uses stable sorting.
:param hypotheses: Nbest translations.
:param reference: A single string with the actual reference translation.
:return: Nbest translations sorted by reranking scores.
|
625941bccdde0d52a9e52f0c
|
def context_before(string, index, contextnum): <NEW_LINE> <INDENT> if contextnum < 0: <NEW_LINE> <INDENT> raise ValueError("Context amount cannot be negative") <NEW_LINE> <DEDENT> bound = index - contextnum <NEW_LINE> if bound < 0: <NEW_LINE> <INDENT> bound = 0 <NEW_LINE> <DEDENT> return string[bound:index]
|
gets the specified number of characters before the given index,
or until the beginning of the string.
|
625941bca934411ee3751577
|
def _run_length_encoding(x): <NEW_LINE> <INDENT> x = np.asarray(x) <NEW_LINE> assert x.ndim == 1, "run_length_encoding currently only supports 1D arrays" <NEW_LINE> changes = x[:-1] != x[1:] <NEW_LINE> changes_ix = np.where(changes)[0] + 1 <NEW_LINE> changes_from = np.concatenate(([int(not x[0])], x[changes_ix])) <NEW_LINE> changes_ix = np.concatenate(([0], changes_ix)) <NEW_LINE> changes_to = np.logical_not(changes_from).astype(int) <NEW_LINE> lengths = np.diff(np.concatenate((changes_ix, [x.size]))) <NEW_LINE> return changes_ix, lengths, changes_to
|
from https://github.com/ornithos/pyalexutil/manipulate.py
For 1D array x, turn [0,0,0,0,1,1,1,1,0,1,1,1] into [0, 3, 7, 8],
[3, 4, 1, 4], [0, 1, 0, 1]
:param x:
:return: (indices of changes, length of runs, new number (assuming boolean).
|
625941bc01c39578d7e74d18
|
def echange_banque(player_id=Player.current_player,liste_carte=[0,0,0,0,0]): <NEW_LINE> <INDENT> if (isinstance(liste_carte,list) and len(liste_carte) == 5): <NEW_LINE> <INDENT> if (min(liste_carte) == -4 and max(liste_carte) == 1 and sum(liste_carte) == -3): <NEW_LINE> <INDENT> for i, carte in enumerate(['wood','clay','sheep','wheat','stone']): <NEW_LINE> <INDENT> modif_ressource(player_id, carte, liste_carte[i]) <NEW_LINE> <DEDENT> <DEDENT> elif len(Player.player_list[player_id].list_port) > 0: <NEW_LINE> <INDENT> if '3:1' in Player.player_list[player_id].list_port: <NEW_LINE> <INDENT> if (min(liste_carte) == -3 and Player.player_list[player_id].port3_1 and max(liste_carte) == 1 and sum(liste_carte) == -2): <NEW_LINE> <INDENT> for i, carte in enumerate(['wood','clay','sheep','wheat','stone']): <NEW_LINE> <INDENT> modif_ressource(player_id, carte, liste_carte[i]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif ['wood','clay','sheep','wheat','stone'][np.argmin(liste_carte)] in Player.player_list[player_id].list_port : <NEW_LINE> <INDENT> print('Utilisation du port {}'.format( ['wood','clay','sheep','wheat','stone'][np.argmin(liste_carte)])) <NEW_LINE> if (max(liste_carte) == 1 and min(liste_carte) == -2 and sum(liste_carte) ==-1): <NEW_LINE> <INDENT> for i, carte in enumerate(['wood','clay','sheep','wheat','stone']): <NEW_LINE> <INDENT> modif_ressource(player_id, carte, liste_carte[i]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print('échante incorrect') <NEW_LINE> <DEDENT> <DEDENT> else : <NEW_LINE> <INDENT> print('échange incorrect')
|
Permet d'échanger ses cartes avec la banque
['wood','clay','sheep','wheat','stone']
Par exemple, si le joueur souhaite donner 4 bois et
recevoir 1 argile à la banque : [-4,1,0,0,0]
|
625941bcd6c5a10208143f25
|
def getColumns(self, query): <NEW_LINE> <INDENT> return query.find('columns').text
|
Get columns
|
625941bc1f037a2d8b9460db
|
def test_pyquest_interactive_density_matrix(): <NEW_LINE> <INDENT> circuit = Circuit() <NEW_LINE> circuit += ops.DefinitionBit(name='out', length=2, is_output=True) <NEW_LINE> circuit += ops.DefinitionBit(name='out2', length=2, is_output=True) <NEW_LINE> circuit += ops.DefinitionBit(name='notout', length=2, is_output=False) <NEW_LINE> circuit += ops.PauliX(qubit=0) <NEW_LINE> circuit += ops.MeasureQubit(qubit=0, readout='out', readout_index = 0) <NEW_LINE> circuit += ops.MeasureQubit(qubit=1, readout='out', readout_index = 1) <NEW_LINE> pyquest = PyQuestBackend(number_qubits=2) <NEW_LINE> (output_bit_register_dict, output_float_register_dict, output_complex_register_dict) = pyquest.run_circuit(circuit) <NEW_LINE> assert output_bit_register_dict['out'] == [[True, False]] <NEW_LINE> assert len(output_bit_register_dict['out2']) == 1 <NEW_LINE> assert 'notout' not in output_bit_register_dict.keys()
|
Test with PyQuEST density matrix
|
625941bc1b99ca400220a98d
|
def getMarkerIcon(): <NEW_LINE> <INDENT> pass
|
Returns name of marker icon. Allowed values are defined
in maps_properties/map_markers property.
Method should return 'Red Marker' for example.
|
625941bc4a966d76dd550ee9
|
def _next_character(self): <NEW_LINE> <INDENT> return self._file_stream.pop(0)
|
Get the next character in the file stream.
|
625941bc4e4d5625662d42b9
|
def store(self): <NEW_LINE> <INDENT> tt = self.t%self.STIME <NEW_LINE> self.data[self.l_inp][:,tt] = self.inp <NEW_LINE> self.data[self.l_out][:,tt] = self.out <NEW_LINE> out = self.out_raw*2 <NEW_LINE> datax = self.data[self.l_out_raw] <NEW_LINE> win = self.idx
|
storage
|
625941bc6fb2d068a760ef77
|
def setUp(self): <NEW_LINE> <INDENT> self.user = User("Fellow1", "fellow1@andela.com", "bootcampertofellow") <NEW_LINE> self.event1 = Event("Bootcamp", "Learning", "Uganda", "Andela", "Learning event for aspiring Andelans") <NEW_LINE> self.event2 = Event("Blaze", "Entrepreneurial", "Kenya", "Safariom", "This is is a great opportunity for budding young entrepreneurs") <NEW_LINE> self.event3 = Event("Blankets and wines", "Social", "Kenya", "B&W", "Chance for everyone to meet and socialise")
|
Instantiating reusable variables
|
625941bc1f037a2d8b9460dc
|
def button_2_click(self, **event_args): <NEW_LINE> <INDENT> anvil.users.logout() <NEW_LINE> pagestack.clear() <NEW_LINE> app_tables.order_tmp2.delete_all_rows() <NEW_LINE> self.repeating_panel_1.items = app_tables.order_tmp2.search() <NEW_LINE> open_form('Query')
|
This method is called when the button is clicked
|
625941bc6e29344779a624f1
|
def get_config(self): <NEW_LINE> <INDENT> return MCVirtConfig().get_config()[Factory.GROUP_CONFIG_KEY]
|
Return the configs for storage backends.
|
625941bc2c8b7c6e89b3569f
|
def test(self): <NEW_LINE> <INDENT> if self.__class__ is HarnessCase: <NEW_LINE> <INDENT> logger.warning('Skip this harness itself') <NEW_LINE> return <NEW_LINE> <DEDENT> logger.info('Testing role[%d] case[%s]', self.role, self.case) <NEW_LINE> try: <NEW_LINE> <INDENT> self._init_browser() <NEW_LINE> while True: <NEW_LINE> <INDENT> url = self._browser.current_url <NEW_LINE> if url.endswith('SetupPage.html'): <NEW_LINE> <INDENT> self._setup_page() <NEW_LINE> <DEDENT> elif url.endswith('TestBed.html'): <NEW_LINE> <INDENT> self._test_bed() <NEW_LINE> <DEDENT> elif url.endswith('TestExecution.html'): <NEW_LINE> <INDENT> logger.info('Ready to handle dialogs') <NEW_LINE> break <NEW_LINE> <DEDENT> time.sleep(2) <NEW_LINE> <DEDENT> <DEDENT> except UnexpectedAlertPresentException: <NEW_LINE> <INDENT> logger.exception('Failed to connect to harness server') <NEW_LINE> raise SystemExit() <NEW_LINE> <DEDENT> except FatalError: <NEW_LINE> <INDENT> logger.exception('Test stopped for fatal error') <NEW_LINE> raise SystemExit() <NEW_LINE> <DEDENT> except FailError: <NEW_LINE> <INDENT> logger.exception('Test failed') <NEW_LINE> raise <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> logger.exception('Something wrong') <NEW_LINE> <DEDENT> self._select_case(self.role, self.case) <NEW_LINE> self._wait_dialog() <NEW_LINE> try: <NEW_LINE> <INDENT> self._collect_result() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> logger.exception('Failed to collect results') <NEW_LINE> raise <NEW_LINE> <DEDENT> status = self._browser.find_element_by_class_name('title-test').text <NEW_LINE> logger.info(status) <NEW_LINE> success = 'Pass' in status <NEW_LINE> self.assertTrue(success)
|
This method will only start test case in child class
|
625941bc23849d37ff7b2f6e
|
def isValid(self, s): <NEW_LINE> <INDENT> if not s: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> stack = [] <NEW_LINE> com_dict = {'(':')', '[':']', '{':'}'} <NEW_LINE> for str_index in range(len(s)): <NEW_LINE> <INDENT> if s[str_index] in ['(', '[', '{']: <NEW_LINE> <INDENT> stack.append(s[str_index]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if not stack or com_dict[stack[-1]] != s[str_index]: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> stack.pop() <NEW_LINE> <DEDENT> <DEDENT> if stack: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return True
|
:type s: str
:rtype: bool
|
625941bc3617ad0b5ed67dd5
|
def test_sentence(): <NEW_LINE> <INDENT> _sentence = None <NEW_LINE> for sentence in CoNLL.iter_string(CONLL_SENTENCE): <NEW_LINE> <INDENT> _sentence = sentence <NEW_LINE> <DEDENT> sentence_obj = Sentence(_sentence) <NEW_LINE> assert sentence_obj.meta_present("text") is True <NEW_LINE> assert sentence_obj.meta_value("text") == "والدین معمولی زخمی ہوئے ہےں۔" <NEW_LINE> sentence_obj.set_meta('number_check', "100") <NEW_LINE> assert sentence_obj.meta_value("number_check") == "100" <NEW_LINE> for word in sentence_obj.words: <NEW_LINE> <INDENT> assert isinstance(word, Word) <NEW_LINE> <DEDENT> for token in sentence_obj.tokens: <NEW_LINE> <INDENT> assert isinstance(token, Token) <NEW_LINE> <DEDENT> assert isinstance(sentence_obj.to_dict(), list) <NEW_LINE> assert isinstance(sentence_obj.conll(), str)
|
Test case
|
625941bc462c4b4f79d1d5ad
|
def update(version=None): <NEW_LINE> <INDENT> ret = {} <NEW_LINE> if not HAS_ESKY: <NEW_LINE> <INDENT> ret["_error"] = "Esky not available as import" <NEW_LINE> return ret <NEW_LINE> <DEDENT> if not getattr(sys, "frozen", False): <NEW_LINE> <INDENT> ret["_error"] = "Minion is not running an Esky build" <NEW_LINE> return ret <NEW_LINE> <DEDENT> if not __salt__["config.option"]("update_url"): <NEW_LINE> <INDENT> ret["_error"] = '"update_url" not configured on this minion' <NEW_LINE> return ret <NEW_LINE> <DEDENT> app = esky.Esky(sys.executable, __opts__["update_url"]) <NEW_LINE> oldversion = __grains__["saltversion"] <NEW_LINE> if not version: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> version = app.find_update() <NEW_LINE> <DEDENT> except urllib.error.URLError as exc: <NEW_LINE> <INDENT> ret["_error"] = "Could not connect to update_url. Error: {}".format(exc) <NEW_LINE> return ret <NEW_LINE> <DEDENT> <DEDENT> if not version: <NEW_LINE> <INDENT> ret["_error"] = "No updates available" <NEW_LINE> return ret <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> app.fetch_version(version) <NEW_LINE> <DEDENT> except EskyVersionError as exc: <NEW_LINE> <INDENT> ret["_error"] = "Unable to fetch version {}. Error: {}".format(version, exc) <NEW_LINE> return ret <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> app.install_version(version) <NEW_LINE> <DEDENT> except EskyVersionError as exc: <NEW_LINE> <INDENT> ret["_error"] = "Unable to install version {}. Error: {}".format(version, exc) <NEW_LINE> return ret <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> app.cleanup() <NEW_LINE> <DEDENT> except Exception as exc: <NEW_LINE> <INDENT> ret["_error"] = "Unable to cleanup. Error: {}".format(exc) <NEW_LINE> <DEDENT> restarted = {} <NEW_LINE> for service in __opts__["update_restart_services"]: <NEW_LINE> <INDENT> restarted[service] = __salt__["service.restart"](service) <NEW_LINE> <DEDENT> ret["comment"] = "Updated from {} to {}".format(oldversion, version) <NEW_LINE> ret["restarted"] = restarted <NEW_LINE> return ret
|
Update the salt minion from the URL defined in opts['update_url']
SaltStack, Inc provides the latest builds here:
update_url: https://repo.saltproject.io/windows/
Be aware that as of 2014-8-11 there's a bug in esky such that only the
latest version available in the update_url can be downloaded and installed.
This feature requires the minion to be running a bdist_esky build.
The version number is optional and will default to the most recent version
available at opts['update_url'].
Returns details about the transaction upon completion.
CLI Examples:
.. code-block:: bash
salt '*' saltutil.update
salt '*' saltutil.update 0.10.3
|
625941bccad5886f8bd26ebf
|
def _process_layer_uuid(self, uuid): <NEW_LINE> <INDENT> self._validate_uuid(uuid) <NEW_LINE> layer = None <NEW_LINE> try: <NEW_LINE> <INDENT> layer = Layer.retrieve(uuid) <NEW_LINE> <DEDENT> except InvalidRequestError: <NEW_LINE> <INDENT> raise ValueError( "UUID '{}' is not a Layer.".format(uuid)) <NEW_LINE> <DEDENT> self._add_layer_elts(layer)
|
Validates uuid as a Layer UUID, and adds ELTs for that UUID to
self._elt_loss_sets
|
625941bc50812a4eaa59c201
|
def transfer(self, from_acct: Account, b58_to_address: str, value: int, payer_acct: Account, gas_limit: int, gas_price: int) -> str: <NEW_LINE> <INDENT> func = self.__abi_info.get_function('transfer') <NEW_LINE> if not isinstance(value, int): <NEW_LINE> <INDENT> raise SDKException(ErrorCode.param_err('the data type of value should be int.')) <NEW_LINE> <DEDENT> if value < 0: <NEW_LINE> <INDENT> raise SDKException(ErrorCode.param_err('the value should be equal or great than 0.')) <NEW_LINE> <DEDENT> if not isinstance(from_acct, Account): <NEW_LINE> <INDENT> raise SDKException(ErrorCode.param_err('the data type of from_acct should be Account.')) <NEW_LINE> <DEDENT> Oep4.__b58_address_check(b58_to_address) <NEW_LINE> from_address = from_acct.get_address().to_array() <NEW_LINE> to_address = Address.b58decode(b58_to_address).to_array() <NEW_LINE> params = (from_address, to_address, value) <NEW_LINE> func.set_params_value(params) <NEW_LINE> tx_hash = self.__sdk.neo_vm().send_transaction(self.__contract_address, from_acct, payer_acct, gas_limit, gas_price, func, False) <NEW_LINE> return tx_hash
|
This interface is used to call the Transfer method in ope4
that transfer an amount of tokens from one account to another account.
:param from_acct: an Account class that send the oep4 token.
:param b58_to_address: a base58 encode address that receive the oep4 token.
:param value: an int value that indicate the amount oep4 token that will be transferred in this transaction.
:param payer_acct: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: the hexadecimal transaction hash value.
|
625941bcbe8e80087fb20b24
|
def __init__(self, k): <NEW_LINE> <INDENT> self.k = k
|
Class to manage all the evaluation methods and operation
:param data: dataset object
:param k: top-k evaluation
|
625941bc8c0ade5d55d3e89c
|
def infer(self, features, *args, **kwargs): <NEW_LINE> <INDENT> del args, kwargs <NEW_LINE> if "targets" not in features: <NEW_LINE> <INDENT> if "infer_targets" in features: <NEW_LINE> <INDENT> targets_shape = common_layers.shape_list(features["infer_targets"]) <NEW_LINE> <DEDENT> elif "inputs" in features: <NEW_LINE> <INDENT> targets_shape = common_layers.shape_list(features["inputs"]) <NEW_LINE> targets_shape[1] = self.hparams.video_num_target_frames <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("no inputs are given.") <NEW_LINE> <DEDENT> features["targets"] = tf.zeros(targets_shape, dtype=tf.float32) <NEW_LINE> <DEDENT> output, _ = self(features) <NEW_LINE> output["targets"] = tf.squeeze(output["targets"], axis=-1) <NEW_LINE> output["target_reward"] = tf.argmax(output["target_reward"], axis=-1) <NEW_LINE> output["outputs"] = output["targets"] <NEW_LINE> output["scores"] = output["targets"] <NEW_LINE> return output
|
Produce predictions from the model by running it.
|
625941bc3617ad0b5ed67dd6
|
def conv_gauss(img, kernel): <NEW_LINE> <INDENT> n_channels, _, kw, kh = kernel.shape <NEW_LINE> img = fnn.pad(img, (kw//2, kh//2, kw//2, kh//2), mode='replicate') <NEW_LINE> return fnn.conv2d(img, kernel, groups=n_channels)
|
convolve img with a gaussian kernel that has been built with build_gauss_kernel
|
625941bc30dc7b7665901847
|
def _get_province(self): <NEW_LINE> <INDENT> province = self.address.province <NEW_LINE> if province: <NEW_LINE> <INDENT> return province <NEW_LINE> <DEDENT> return ""
|
Return province of main address
|
625941bc090684286d50ebbf
|
def delete_data_source(self, data_source, ignore_missing=True): <NEW_LINE> <INDENT> return self._delete(_ds.DataSource, data_source, ignore_missing=ignore_missing)
|
Delete a data_source
:param data_source: value can be the ID of a data_source or an instance
of :class:`~openstack.map_reduce.v1.data_source.DataSource`
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the data_source does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent data_source.
:returns: DataSource been deleted
:rtype: :class:`~openstack.map_reduce.v1.data_source.DataSource`
|
625941bca17c0f6771cbdf30
|
def __ne__(self, other) : <NEW_LINE> <INDENT> return not self.__eq__(other)
|
Rend le P1 != P2 fonctionnel
|
625941bcad47b63b2c509e5e
|
def __init__(self): <NEW_LINE> <INDENT> self.platformName = "Issuu" <NEW_LINE> self.tags = ["tools"] <NEW_LINE> self.isValidMode = {} <NEW_LINE> self.isValidMode["phonefy"] = False <NEW_LINE> self.isValidMode["usufy"] = True <NEW_LINE> self.isValidMode["searchfy"] = False <NEW_LINE> self.url = {} <NEW_LINE> self.url["usufy"] = "http://www.issuu.com/" + "<usufy>" <NEW_LINE> self.needsCredentials = {} <NEW_LINE> self.needsCredentials["usufy"] = False <NEW_LINE> self.validQuery = {} <NEW_LINE> self.validQuery["usufy"] = ".+" <NEW_LINE> self.notFoundText = {} <NEW_LINE> self.notFoundText["usufy"] = ["We can't find what you're looking for."] <NEW_LINE> self.fieldsRegExp = {} <NEW_LINE> self.fieldsRegExp["usufy"] = {} <NEW_LINE> self.foundFields = {}
|
Constructor...
|
625941bc283ffb24f3c557e9
|
def test_makefastqs_standard_protocol_bcl2fastq_2_17(self): <NEW_LINE> <INDENT> illumina_run = MockIlluminaRun( "171020_M00879_00002_AHGXXXX", "miseq", top_dir=self.wd) <NEW_LINE> illumina_run.create() <NEW_LINE> run_dir = illumina_run.dirn <NEW_LINE> sample_sheet = os.path.join(self.wd,"SampleSheet.csv") <NEW_LINE> with open(sample_sheet,'wt') as fp: <NEW_LINE> <INDENT> fp.write(SampleSheets.miseq) <NEW_LINE> <DEDENT> MockBcl2fastq2Exe.create(os.path.join(self.bin, "bcl2fastq"), version='2.17.1.14') <NEW_LINE> os.environ['PATH'] = "%s:%s" % (self.bin, os.environ['PATH']) <NEW_LINE> analysis_dir = os.path.join(self.wd,"analysis") <NEW_LINE> os.mkdir(analysis_dir) <NEW_LINE> p = MakeFastqs(run_dir,sample_sheet) <NEW_LINE> status = p.run(analysis_dir, poll_interval=0.5) <NEW_LINE> self.assertEqual(status,0) <NEW_LINE> self.assertEqual(p.output.platform,"miseq") <NEW_LINE> self.assertEqual(p.output.primary_data_dir, os.path.join(analysis_dir, "primary_data")) <NEW_LINE> self.assertEqual(p.output.bcl2fastq_info, (os.path.join(self.bin,"bcl2fastq"), "bcl2fastq", "2.17.1.14")) <NEW_LINE> self.assertEqual(p.output.cellranger_info,None) <NEW_LINE> self.assertTrue(p.output.acquired_primary_data) <NEW_LINE> self.assertEqual(p.output.stats_file, os.path.join(analysis_dir,"statistics.info")) <NEW_LINE> self.assertEqual(p.output.stats_full, os.path.join(analysis_dir,"statistics_full.info")) <NEW_LINE> self.assertEqual(p.output.per_lane_stats, os.path.join(analysis_dir, "per_lane_statistics.info")) <NEW_LINE> self.assertEqual(p.output.per_lane_sample_stats, os.path.join(analysis_dir, "per_lane_sample_stats.info")) <NEW_LINE> self.assertEqual(p.output.missing_fastqs,[]) <NEW_LINE> for subdir in (os.path.join("primary_data", "171020_M00879_00002_AHGXXXX"), "bcl2fastq", "barcode_analysis",): <NEW_LINE> <INDENT> self.assertTrue(os.path.isdir( os.path.join(analysis_dir,subdir)), "Missing subdir: %s" % subdir) <NEW_LINE> <DEDENT> self.assertTrue(os.path.islink( os.path.join(analysis_dir, "primary_data", "171020_M00879_00002_AHGXXXX"))) <NEW_LINE> for filen in ("statistics.info", "statistics_full.info", "per_lane_statistics.info", "per_lane_sample_stats.info", "processing_qc.html"): <NEW_LINE> <INDENT> self.assertTrue(os.path.isfile( os.path.join(analysis_dir,filen)), "Missing file: %s" % filen)
|
MakeFastqs: standard protocol/bcl2fastq: use v2.17
|
625941bc01c39578d7e74d19
|
def say( self, verbosity, msg, channel = 1 ): <NEW_LINE> <INDENT> if self._verbosity >= verbosity: <NEW_LINE> <INDENT> self.write( self._preamble.format( **self.about ) + msg + '\n', channel ).flush() <NEW_LINE> <DEDENT> return self
|
if verbosity level met, write and flush message string msg to system channel {1,2} => (stdout, stderr).
Returns self.
|
625941bc56ac1b37e62640b2
|
def to_python(self, value): <NEW_LINE> <INDENT> if not value: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if isinstance(value, list): <NEW_LINE> <INDENT> for index, position_val in enumerate(value): <NEW_LINE> <INDENT> val = super(MultipleDecimalField, self).to_python(position_val) <NEW_LINE> value[index] = val <NEW_LINE> <DEDENT> <DEDENT> return value
|
Validates that the input can be converted to a list of decimals.
|
625941bc31939e2706e4cd4c
|
def run_command(container, command): <NEW_LINE> <INDENT> return container.exec_run( cmd="bash -c \"" + command + "\"", tty=True, privileged=True)
|
Function that executes a command inside a container.
|
625941bc2c8b7c6e89b356a0
|
def fetch(self, filter, **kwargs): <NEW_LINE> <INDENT> pass
|
To be implemented in final models
:param item:
:return: list with data
:raises: DataModelFetchError
|
625941bce64d504609d7471e
|
def start(self): <NEW_LINE> <INDENT> pygame.display.init() <NEW_LINE> self.screen = pygame.display.set_mode((640,400),0,8) <NEW_LINE> pygame.display.set_caption('Simple CPU Simulator framebuffer') <NEW_LINE> self.vga = vgaconsole.VGAConsole(self.screen) <NEW_LINE> self.cpu.mem.add_map(0xc, Framebuffer(self.vga.vgabuf)) <NEW_LINE> self.vga.foreground = 7 <NEW_LINE> self.vga.background = 0 <NEW_LINE> self.vga.draw() <NEW_LINE> pygame.display.update()
|
This will initialize the actual framebuffer device.
|
625941bc7b180e01f3dc46e2
|
def __sub__(self, other): <NEW_LINE> <INDENT> if isinstance(other, Point): <NEW_LINE> <INDENT> return Point(self.x - other.x, self.y - other.y) <NEW_LINE> <DEDENT> return Point(self.x - other, self.y - other)
|
getter: (point1 , point2) substract 2 points and return the result
|
625941bc4428ac0f6e5ba6cf
|
def optionsStringWasSet(option, text, default=None): <NEW_LINE> <INDENT> if default is None: <NEW_LINE> <INDENT> return _to_tuple(PISM.cpp.OptionString(option, text, ""), False) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return _to_tuple(PISM.cpp.OptionString(option, text, default), True)
|
Determines if a string-valued command line option was set.
:param option: Name of command line option.
:param text: Description of option.
:param default: Default value if option was not set.
:returns: Tuple ``(value, wasSet)`` where ``value`` is the value that was set (or the ``default`` value if it was not)
and ``wasSet`` is a boolean that is ``True`` if the command line option was set explicitly.
|
625941bc0a50d4780f666d6d
|
def create_attention_mask_from_input_mask(from_tensor, to_mask): <NEW_LINE> <INDENT> from_shape = get_shape_list(from_tensor, expected_rank=[2, 3],name='') <NEW_LINE> batch_size = from_shape[0] <NEW_LINE> from_seq_length = from_shape[1] <NEW_LINE> to_shape = get_shape_list(to_mask, expected_rank=2,name='') <NEW_LINE> to_seq_length = to_shape[1] <NEW_LINE> to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) <NEW_LINE> broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=tf.float32) <NEW_LINE> mask = broadcast_ones * to_mask <NEW_LINE> return mask
|
Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
|
625941bc97e22403b379ce76
|
def get_current_moon_phase(): <NEW_LINE> <INDENT> moon_observer.date = datetime.now() <NEW_LINE> phase_end = ephem.next_new_moon(moon_observer.date) <NEW_LINE> phase_start = ephem.previous_new_moon(moon_observer.date) <NEW_LINE> phase = (moon_observer.date - phase_start) / (phase_end - phase_start) <NEW_LINE> phase *= 8 <NEW_LINE> phase = round(phase) % 8 <NEW_LINE> return "The current moon phase is {0}".format(moon_phases[int(phase)])
|
Get the current moon phase
|
625941bc851cf427c661a3f0
|
def validate_layout(self, layout, expected_widgets, optional_widgets = set()): <NEW_LINE> <INDENT> next_visits = deque([layout]) <NEW_LINE> widget_seen = set() <NEW_LINE> while next_visits: <NEW_LINE> <INDENT> w_desc = next_visits.popleft() <NEW_LINE> if type(w_desc) is str: <NEW_LINE> <INDENT> if w_desc not in expected_widgets and w_desc not in optional_widgets: <NEW_LINE> <INDENT> raise ValueError('Unrecognized widget "{}", pick one of: {}'.format(w_desc, ', '.join(expected_widgets))) <NEW_LINE> <DEDENT> elif w_desc in widget_seen: <NEW_LINE> <INDENT> raise ValueError('Duplicate widget "{}", all expected_widgets can only appear once'.format(w_desc)) <NEW_LINE> <DEDENT> widget_seen.add(w_desc) <NEW_LINE> <DEDENT> elif type(w_desc) is dict: <NEW_LINE> <INDENT> if 'orientation' not in w_desc or w_desc['orientation'] not in ['horizontal', 'vertical']: <NEW_LINE> <INDENT> raise ValueError('"orientation" is mandatory and must be "horizontal" or "vertical" at node {}'.format(w_desc)) <NEW_LINE> <DEDENT> elif 'children' not in w_desc or type(w_desc['children']) is not list or len(w_desc['children']) < 2: <NEW_LINE> <INDENT> raise ValueError('"children" is mandatory and must be a list of 2+ items at node {}'.format(w_desc)) <NEW_LINE> <DEDENT> elif 'resizeable' in w_desc and type(w_desc['resizeable']) is not bool: <NEW_LINE> <INDENT> raise ValueError('"resizeable" must be boolean at node {}'.format(w_desc)) <NEW_LINE> <DEDENT> elif 'proportions' in w_desc: <NEW_LINE> <INDENT> if 'resizeable' not in w_desc or not w_desc['resizeable']: <NEW_LINE> <INDENT> raise ValueError('"proportions" is only valid for resizeable widgets at node {}'.format(w_desc)) <NEW_LINE> <DEDENT> elif type(w_desc['proportions']) is not list or any(type(n) is not float for n in w_desc['proportions']) or len(w_desc['proportions']) != len(w_desc['children']) or abs(sum(w_desc['proportions']) - 1) > 1e-10: <NEW_LINE> <INDENT> raise ValueError('"proportions" must be a list of floats (one per separator), between 0 and 1, at node {}'.format(w_desc)) <NEW_LINE> <DEDENT> <DEDENT> next_visits.extend(w_desc['children']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('Unexpected type {}, nodes must be dicts or strings, at node {}'.format(type(w_desc), w_desc)) <NEW_LINE> <DEDENT> <DEDENT> widget_missing = expected_widgets - widget_seen <NEW_LINE> if widget_missing: <NEW_LINE> <INDENT> raise ValueError('Following placeable_widgets were not specified: {}'.format(', '.join(widget_missing)))
|
Validate layout: check whether the layout of widgets built from the config string is valid.
Args:
layout (`dict`): the json-parsed config string
expected_widgets (`set`): strings with the names of widgets that have to be used in this layout
optional_widgets (`set`): strings with the names of widgets that may or may not be used in this layout
Layout must have all self.placeable_widgets (leaves of the tree, as `str`) and only allowed properties
on the nodes of the tree (as `dict`).
Contraints on the only allowed properties of the nodes are:
- resizeable: `bool` (optional, defaults to no),
- orientation: `str`, either "vertical" or "horizontal" (mandatory)
- children: `list` of size >= 2, containing `str`s or `dict`s (mandatory)
- proportions: `list` of `float` with sum = 1, length == len(children), representing the relative sizes
of all the resizeable items (if and only if resizeable).
|
625941bcd4950a0f3b08c22f
|
def create_schema(config=None, engine=None): <NEW_LINE> <INDENT> if engine is None: <NEW_LINE> <INDENT> engine = sqla_api.get_engine() <NEW_LINE> <DEDENT> if version(engine=engine) is not None: <NEW_LINE> <INDENT> raise db_exc.DBMigrationError( _("Watcher database schema is already under version control; " "use upgrade() instead")) <NEW_LINE> <DEDENT> models.Base.metadata.create_all(engine) <NEW_LINE> stamp('head', config=config)
|
Create database schema from models description.
Can be used for initial installation instead of upgrade('head').
|
625941bc091ae35668666e42
|
def _get_journal(self, cr, uid, context=None): <NEW_LINE> <INDENT> journal_id = False <NEW_LINE> journal_pool = self.pool.get('account.journal') <NEW_LINE> if context.get('journal_type', False): <NEW_LINE> <INDENT> jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))]) <NEW_LINE> if not jids: <NEW_LINE> <INDENT> raise osv.except_osv(_('Configuration Error !'), _('Can\'t find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration/Financial Accounting/Accounts/Journals.') % context.get('journal_type')) <NEW_LINE> <DEDENT> journal_id = jids[0] <NEW_LINE> <DEDENT> return journal_id
|
Return journal based on the journal type
|
625941bcf7d966606f6a9edf
|
def get_config_query_kwargs(self, **kwargs): <NEW_LINE> <INDENT> return {}
|
Return query arguments for fetching an integration configuration.
This can be subclassed to return additional arguments used when
fetching configurations, based on the needs of the application. For
example, limiting it by user or organization.
By default, this doesn't return any additional query arguments.
Args:
**kwargs (dict):
Any arguments captured in the URL.
Returns:
dict:
Additional query arguments as a dictionary. This will be turned
into keyword arguments for a filter query.
|
625941bc097d151d1a222d3a
|
def removeLeaves(self, accessibles): <NEW_LINE> <INDENT> nonleaves = [] <NEW_LINE> for acc in accessibles: <NEW_LINE> <INDENT> if acc.childCount > 0: <NEW_LINE> <INDENT> nonleaves.append(acc) <NEW_LINE> <DEDENT> <DEDENT> return nonleaves
|
Removes accessibles with no children (leaves) from a list
of accessibles.
@param accessibles: List of accessibles to be examined
@type accessibles: list
@return: The accessibles list without leaves
@rtype: list
|
625941bc462c4b4f79d1d5ae
|
def cmdCallback(self, cmd): <NEW_LINE> <INDENT> if not cmd.isDone: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> log.info("%s %s" % (self, cmd)) <NEW_LINE> msgCode, msgStr = cmd.getKeyValMsg() <NEW_LINE> self.writeToUsers(msgCode, msgStr, cmd=cmd)
|
!Called when a user command changes state; report completion or failure
|
625941bca8370b771705277f
|
def make_random_path(): <NEW_LINE> <INDENT> return "%s" % (User.objects.make_random_password(20))
|
this functions creates a random path for storing
JS file
|
625941bca79ad161976cc023
|
def get_node(num): <NEW_LINE> <INDENT> this_node, tmp = root, num <NEW_LINE> for i in xrange(depth-1): <NEW_LINE> <INDENT> if tmp&self.filter: <NEW_LINE> <INDENT> this_node = this_node.right <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> this_node = this_node.left <NEW_LINE> <DEDENT> tmp = tmp<<1 <NEW_LINE> <DEDENT> return this_node
|
Given the node's index, return the node
|
625941bc63b5f9789fde6fc3
|
def __getitem__(self, remaining): <NEW_LINE> <INDENT> if not remaining: <NEW_LINE> <INDENT> return self <NEW_LINE> <DEDENT> elif self.children is None: <NEW_LINE> <INDENT> raise KeyError("%r is not a container" % self.name) <NEW_LINE> <DEDENT> for child in self.children: <NEW_LINE> <INDENT> if child.name == remaining[0]: <NEW_LINE> <INDENT> return child[remaining[1:]] <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise KeyError("%r not found" % remaining[0])
|
Look up a child atom, potentially recursively.
e.g. atom['udta', 'meta'] => <Atom name='meta' ...>
|
625941bc56b00c62f0f14535
|
def enable(self): <NEW_LINE> <INDENT> self.write("MO")
|
Enables motion for the axis.
|
625941bc5fdd1c0f98dc0110
|
def log_ec2_limit_error(exception): <NEW_LINE> <INDENT> if isinstance(exception, BotoServerError) and "RequestLimitExceeded" in utils.safe_stringify(exception): <NEW_LINE> <INDENT> stack_str = traceback.format_exc() <NEW_LINE> if "add_tag" in stack_str: <NEW_LINE> <INDENT> op_name = "CreateTag" <NEW_LINE> <DEDENT> elif "create_snapshot" in stack_str: <NEW_LINE> <INDENT> op_name = "CreateSnapshot" <NEW_LINE> <DEDENT> elif "delete_snapshot" in stack_str: <NEW_LINE> <INDENT> op_name = "DeleteSnapshot" <NEW_LINE> <DEDENT> elif "get_all_snapshots" in stack_str: <NEW_LINE> <INDENT> op_name = "DescribeSnapshots" <NEW_LINE> <DEDENT> elif "get_all_volumes" in stack_str: <NEW_LINE> <INDENT> op_name = "DescribeVolumes" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> op_name = "UNKNOWN" <NEW_LINE> <DEDENT> logger.info("EC2_THROTTLE: Got a RequestLimitExceeded on op '%s', Error body: %s, Trace: %s" % (op_name, exception.body, stack_str))
|
Logs more details if the exception is RequestLimitExceeded. Not implemented in the best way/right place but
should work just fine
:param exception:
:return:
|
625941bc23849d37ff7b2f6f
|
def test_generate_freeway_routes_flow(self): <NEW_LINE> <INDENT> routes_file = os.path.join(self._output_dir, 'freeway_routes_demands.xml') <NEW_LINE> token = '<routes>\n' <NEW_LINE> file_util.append_line_to_file(routes_file, token) <NEW_LINE> token = (' <vType id="Car" accel="0.8" decel="4.5" sigma="0.5" ' 'length="5" minGap="2.5" maxSpeed="38" guiShape="passenger"/>\n') <NEW_LINE> file_util.append_line_to_file(routes_file, token) <NEW_LINE> input_output = self._random_traffic_generator.get_freeway_input_output() <NEW_LINE> token = ' <!-- freeway routes -->' <NEW_LINE> file_util.append_line_to_file(routes_file, token) <NEW_LINE> freeway_routes = self._random_traffic_generator.setup_shortest_routes( input_output, edge_type_list=random_traffic_generator.FREEWAY_EDGE_TYPES, routes_file=routes_file, figures_folder=self._output_dir) <NEW_LINE> token = ' <!-- freeway demands -->' <NEW_LINE> file_util.append_line_to_file(routes_file, token) <NEW_LINE> time_step_size = 100 <NEW_LINE> for time_point in range(0, 1200, time_step_size): <NEW_LINE> <INDENT> freeway_routes_demands = [(0, 0.3), (1, 0.3)] <NEW_LINE> self._random_traffic_generator.generate_routes_flow( time_point, time_step_size, freeway_routes, freeway_routes_demands, routes_file) <NEW_LINE> <DEDENT> token = '\n</routes>' <NEW_LINE> file_util.append_line_to_file(routes_file, token) <NEW_LINE> with file_util.f_open(routes_file, 'r') as f: <NEW_LINE> <INDENT> self.assertLen(f.readlines(), 36)
|
Test for the freeway demands generation workflow.
All the unit tests have been done above, and there is no calculation in this
test. So this one just verifies nothing is block in the workflow.
|
625941bcd10714528d5ffbbe
|
def kill_cursors(self, cursor_ids): <NEW_LINE> <INDENT> if not isinstance(cursor_ids, list): <NEW_LINE> <INDENT> raise TypeError("cursor_ids must be a list") <NEW_LINE> <DEDENT> return self._send_message(message.kill_cursors(cursor_ids))
|
Send a kill cursors message with the given ids.
Raises :class:`TypeError` if `cursor_ids` is not an instance of
``list``.
:Parameters:
- `cursor_ids`: list of cursor ids to kill
|
625941bcd99f1b3c44c67473
|
def test_set_output_volume(): <NEW_LINE> <INDENT> mock = MagicMock(return_value={"retcode": 0}) <NEW_LINE> with patch.dict(mac_desktop.__salt__, {"cmd.run_all": mock}), patch( "salt.modules.mac_desktop.get_output_volume", MagicMock(return_value="25") ): <NEW_LINE> <INDENT> assert mac_desktop.set_output_volume("25")
|
Test if it set the volume of sound (range 0 to 100)
|
625941bc50485f2cf553cc77
|
def get_similarities(tags=None): <NEW_LINE> <INDENT> tags = tags or _get_tags()
|
Should return a list of similar tag pairs (tuples)
|
625941bc1d351010ab8559fb
|
def setup_platform(hass, config, add_devices, discovery_info=None): <NEW_LINE> <INDENT> from pysabnzbd import SabnzbdApi, SabnzbdApiException <NEW_LINE> host = config.get(CONF_HOST) <NEW_LINE> port = config.get(CONF_PORT) <NEW_LINE> name = config.get(CONF_NAME) <NEW_LINE> api_key = config.get(CONF_API_KEY) <NEW_LINE> monitored_types = config.get(CONF_MONITORED_VARIABLES) <NEW_LINE> use_ssl = config.get(CONF_SSL) <NEW_LINE> if use_ssl: <NEW_LINE> <INDENT> uri_scheme = 'https://' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> uri_scheme = 'http://' <NEW_LINE> <DEDENT> base_url = "{}{}:{}/".format(uri_scheme, host, port) <NEW_LINE> sab_api = SabnzbdApi(base_url, api_key) <NEW_LINE> try: <NEW_LINE> <INDENT> sab_api.check_available() <NEW_LINE> <DEDENT> except SabnzbdApiException: <NEW_LINE> <INDENT> _LOGGER.error("Connection to SABnzbd API failed") <NEW_LINE> return False <NEW_LINE> <DEDENT> global _THROTTLED_REFRESH <NEW_LINE> _THROTTLED_REFRESH = Throttle( MIN_TIME_BETWEEN_UPDATES)(sab_api.refresh_queue) <NEW_LINE> devices = [] <NEW_LINE> for variable in monitored_types: <NEW_LINE> <INDENT> devices.append(SabnzbdSensor(variable, sab_api, name)) <NEW_LINE> <DEDENT> add_devices(devices)
|
Setup the SABnzbd sensors.
|
625941bc4c3428357757c208
|
def parse_coaches(self): <NEW_LINE> <INDENT> lx_doc = self.html_doc() <NEW_LINE> tr = lx_doc.xpath('//tr[@id="HeadCoaches"]')[0] <NEW_LINE> for i, td in enumerate(tr): <NEW_LINE> <INDENT> txt = td.xpath('.//text()') <NEW_LINE> txt = ex_junk(txt, ['\n','\r']) <NEW_LINE> team = 'away' if i == 0 else 'home' <NEW_LINE> self.coaches[team] = txt[0] <NEW_LINE> <DEDENT> return self if self.coaches else None
|
Parse the home and away coaches
:returns: ``self`` on success, ``None`` otherwise
|
625941bc3346ee7daa2b2c48
|
def testResourcesGroupAccount(self): <NEW_LINE> <INDENT> pass
|
Test ResourcesGroupAccount
|
625941bc925a0f43d2549d52
|
def SetText(self, *args, **kw): <NEW_LINE> <INDENT> self.setText(self.text)
|
set the text of the Label
|
625941bc71ff763f4b549565
|
def setup_interrupt( self, pin: PinType, edge: InterruptEdge, in_conf: ConfigType, ) -> None: <NEW_LINE> <INDENT> pass
|
Configure a pin as an interrupt.
This is used on modules which don't supply software callbacks, but use some other
way of representing interrupts, such as connecting a dedicated interrupt output
pin to another module that does supply software callbacks.
|
625941bc66673b3332b91f6f
|
def get_template_id(kwargs=None, call=None): <NEW_LINE> <INDENT> if call == 'action': <NEW_LINE> <INDENT> raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) <NEW_LINE> <DEDENT> if kwargs is None: <NEW_LINE> <INDENT> kwargs = {} <NEW_LINE> <DEDENT> name = kwargs.get('name', None) <NEW_LINE> if name is None: <NEW_LINE> <INDENT> raise SaltCloudSystemExit( 'The get_template_id function requires a \'name\'.' ) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> ret = list_templates()[name]['id'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise SaltCloudSystemExit( 'The template \'{0}\' could not be foound.'.format(name) ) <NEW_LINE> <DEDENT> return ret
|
Returns a template's ID from the given template name.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt-cloud -f get_template_id opennebula name=my-template-name
|
625941bc090684286d50ebc0
|
def pdf(self, X, y): <NEW_LINE> <INDENT> return np.exp(self.logpdf(X, y))
|
GMLR probability density function.
|
625941bc3eb6a72ae02ec3b3
|
def take_screenshot(filename='screenshot.png', add_timestamp=True): <NEW_LINE> <INDENT> logger.debug('Capturing Screenshot') <NEW_LINE> _make_results_dir() <NEW_LINE> if add_timestamp: <NEW_LINE> <INDENT> filename = _add_time_stamp(filename) <NEW_LINE> <DEDENT> screenshot_file = os.path.join(config.results_directory, filename) <NEW_LINE> browser.get_screenshot_as_file(screenshot_file) <NEW_LINE> return screenshot_file
|
Take a screenshot of the browser window. Called automatically on failures
when running in `-s` mode.
Return the path to the saved screenshot.
|
625941bc26068e7796caebb8
|
def single_loop(self, inlet_mass_flow=None, update_channels=True): <NEW_LINE> <INDENT> if inlet_mass_flow is not None: <NEW_LINE> <INDENT> self.mass_flow_in = inlet_mass_flow <NEW_LINE> <DEDENT> if update_channels: <NEW_LINE> <INDENT> self.update_channels() <NEW_LINE> self.dp_channel[:] = np.array([channel.pressure[channel.id_in] - channel.pressure[channel.id_out] for channel in self.channels]) <NEW_LINE> self.channel_vol_flow[:] = np.array([np.average(channel.vol_flow) for channel in self.channels]) <NEW_LINE> self.visc_channel[:] = np.array([np.average(channel.fluid.viscosity) for channel in self.channels]) <NEW_LINE> <DEDENT> p_in = ip.interpolate_1d(self.manifolds[0].pressure) <NEW_LINE> p_out = ip.interpolate_1d(self.manifolds[1].pressure) <NEW_LINE> if np.any(self.channel_vol_flow == 0.0): <NEW_LINE> <INDENT> raise ValueError('zero flow rates detected, ' 'check boundary conditions') <NEW_LINE> <DEDENT> if self.initialize: <NEW_LINE> <INDENT> self.k_perm[:] = self.channel_vol_flow / self.dp_channel * self.visc_channel * self.l_by_a <NEW_LINE> <DEDENT> self.dp_ref = np.maximum(self.dp_channel[-1], 1e-3) <NEW_LINE> self.alpha[:] = (p_in - p_out) / self.dp_ref <NEW_LINE> self.dp_ref = self.vol_flow_in / np.sum(self.alpha) * self.l_by_a * self.visc_channel[-1] / self.k_perm[-1] / self.n_subchannels <NEW_LINE> p_in += self.dp_ref + self.manifolds[1].pressure[self.manifolds[1].id_out] - self.manifolds[0].p_out <NEW_LINE> self.alpha[:] = (p_in - p_out) / self.dp_ref <NEW_LINE> self.channel_vol_flow[:] = (p_in - p_out) * self.k_perm / self.l_by_a * self.n_subchannels / self.visc_channel <NEW_LINE> density = np.array([channel.fluid.density[channel.id_in] for channel in self.channels]) <NEW_LINE> self.channel_mass_flow[:] = self.channel_vol_flow * density <NEW_LINE> mass_flow_correction = self.mass_flow_in / np.sum(self.channel_mass_flow) <NEW_LINE> self.channel_mass_flow[:] *= mass_flow_correction
|
Update the flow circuit
|
625941bc460517430c39406b
|
def get_user_menu(auths): <NEW_LINE> <INDENT> menus = SysMenuDao.get_all_menu() <NEW_LINE> self_menus = [] <NEW_LINE> [self_menus.append(m) for m in menus if not m["authority"] or m["authority"] in auths] <NEW_LINE> ids = [] <NEW_LINE> [ids.append(menu["parent_id"]) for menu in self_menus if menu["parent_id"] not in ids] <NEW_LINE> menus = [] <NEW_LINE> [menus.append(m) for m in self_menus if m["menu_url"] or (not m["menu_url"] and m["menu_id"] in ids)] <NEW_LINE> tree_menus = _build_tree_menu(menus, -1) <NEW_LINE> return tree_menus
|
根据权限获取主菜单
:param auths:
:return:
|
625941bcdc8b845886cb5412
|
def __init__(self) -> None: <NEW_LINE> <INDENT> self.coqtop: Optional[CoqtopProcess] = None <NEW_LINE> self.xml: Optional[XMLInterfaceBase] = None <NEW_LINE> self.states: List[int] = [] <NEW_LINE> self.state_id = -1 <NEW_LINE> self.root_state = -1 <NEW_LINE> self.out_q: BytesQueue = Queue() <NEW_LINE> self.err_q: BytesQueue = Queue() <NEW_LINE> self.stopping = False <NEW_LINE> self.log: Optional[IO[str]] = None <NEW_LINE> self.handler: logging.Handler = logging.NullHandler() <NEW_LINE> self.logger = logging.getLogger(str(id(self))) <NEW_LINE> self.logger.addHandler(self.handler) <NEW_LINE> self.logger.setLevel(logging.INFO)
|
Initialize Coqtop state.
coqtop - The Coqtop process
states - A stack of previous state_ids (grows to the right)
state_id - The current (tip) state_id
root_state - The starting state_id
out_q - A thread-safe queue of data read from Coqtop
err_q - A thread-safe queue of error messages read from Coqtop
xml - The XML interface for the given version
|
625941bc01c39578d7e74d1a
|
def CalcScore(): <NEW_LINE> <INDENT> Correctness = eval(input("Out of ten points, enter the score for correctness of code depending on the specifications met: ")) <NEW_LINE> Elegence = eval(input("Out of ten points, enter the score for the elegence of the code: ")) <NEW_LINE> Hygiene = eval(input("Out of ten points, enter the score for code hygiene: ")) <NEW_LINE> VideoQuality = eval(input("Out of ten points, enter the score based on the quality of discussion in youtube video: ")) <NEW_LINE> Total_Score = Correctness + Elegence + Hygiene + VideoQuality <NEW_LINE> Late(Total_Score)
|
This function calculates the total score of the assigment depending on whether the assignment was submitted on time
|
625941bc4c3428357757c209
|
def to_file(self, filename, mode='P'): <NEW_LINE> <INDENT> format = os.path.splitext(filename)[1][1:].upper() <NEW_LINE> if format == 'LMP': writefile(filename, self.data) <NEW_LINE> elif format == 'RAW': writefile(filename, self.to_raw()) <NEW_LINE> else: <NEW_LINE> <INDENT> im = self.to_Image(mode) <NEW_LINE> if format: <NEW_LINE> <INDENT> im.save(filename) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> im.save(filename, "PNG")
|
Save the graphic to an image file.
The output format is selected based on the filename extension.
For example, "file.jpg" saves to JPEG format. If the file has
no extension, PNG format is used.
Special cases: ".lmp" saves the raw lump data, and ".raw" saves
the raw pixel data.
`mode` may be be 'P', 'RGB', or 'RGBA' for palette or 24/32 bit
output, respectively. However, .raw ignores this parameter and
always writes in palette mode.
|
625941bc2eb69b55b151c78a
|
def test_json_conversion(self): <NEW_LINE> <INDENT> metric1 = BaseMetrics() <NEW_LINE> jsobj = metric1.to_json_object() <NEW_LINE> metric2 = BaseMetrics.from_json_object(json.loads(json.dumps(jsobj))) <NEW_LINE> assert metric1 == metric2
|
Test json conversion
|
625941bcd486a94d0b98e024
|
def __init__(self): <NEW_LINE> <INDENT> pass
|
Initialize FEMOutput object
|
625941bc1f037a2d8b9460dd
|
def commit_item(): <NEW_LINE> <INDENT> return s3_rest_controller()
|
REST Controller
|
625941bc4e4d5625662d42bb
|
def set_athena_lambda_enable(self): <NEW_LINE> <INDENT> if 'athena_partition_refresh_config' not in self.config['lambda']: <NEW_LINE> <INDENT> LOGGER_CLI.error('No configuration found for Athena Partition Refresh. ' 'Please run: $ python manage.py athena init') <NEW_LINE> return <NEW_LINE> <DEDENT> self.config['lambda']['athena_partition_refresh_config']['enabled'] = True <NEW_LINE> self.write() <NEW_LINE> LOGGER_CLI.info('Athena configuration successfully enabled')
|
Enable athena partition refreshes
|
625941bcb57a9660fec3375f
|
def pmtu_discovery(self): <NEW_LINE> <INDENT> assert self.automatic_pmtu <NEW_LINE> if self.pmtu_probe_size is not None and self.pmtu_probe_size <= self.pmtu_probe_acked_mtu: <NEW_LINE> <INDENT> self.pmtu_probe_iteration = 0 <NEW_LINE> self.pmtu_probe_size = None <NEW_LINE> self.pmtu_probe_acked_mtu = 0 <NEW_LINE> self.create_timer(self.pmtu_discovery, timeout=random.randrange(500, 700)) <NEW_LINE> return <NEW_LINE> <DEDENT> self.pmtu_probe_size = PMTU_PROBE_SIZES[int(self.pmtu_probe_iteration / PMTU_PROBE_REPEATS)] <NEW_LINE> self.pmtu_probe_iteration = (self.pmtu_probe_iteration + 1) % PMTU_PROBE_COMBINATIONS <NEW_LINE> probe = b'\x80\x73\xA7\x01\x06\x00' <NEW_LINE> probe += b'\x00' * (self.pmtu_probe_size - IPV4_HDR_OVERHEAD - len(probe)) <NEW_LINE> self.write(self.endpoint, probe) <NEW_LINE> self.create_timer(self.pmtu_discovery, timeout=random.randrange(2, 5))
|
Handle periodic PMTU discovery.
|
625941bc1f037a2d8b9460de
|
def test_search_find_one_result_by_ref(self): <NEW_LINE> <INDENT> make_recipe( "legalaid.case", personal_details__full_name="abc", personal_details__postcode="123", **self.get_extra_search_make_recipe_kwargs() ) <NEW_LINE> response = self.client.get( self.list_url, data={"search": self.resource.reference}, HTTP_AUTHORIZATION=self.get_http_authorization() ) <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_200_OK) <NEW_LINE> self.assertEqual(1, len(response.data["results"])) <NEW_LINE> self.assertCaseEqual(response.data["results"][0], self.resource)
|
GET search by name should work
|
625941bccc0a2c11143dcd6f
|
def regulartype(prompt_template="default"): <NEW_LINE> <INDENT> echo_prompt(prompt_template) <NEW_LINE> command_string = "" <NEW_LINE> cursor_position = 0 <NEW_LINE> with raw_mode(): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> in_char = getchar() <NEW_LINE> if in_char in {ESC, CTRLC}: <NEW_LINE> <INDENT> echo(carriage_return=True) <NEW_LINE> raise click.Abort() <NEW_LINE> <DEDENT> elif in_char == TAB: <NEW_LINE> <INDENT> echo("\r", nl=True) <NEW_LINE> return in_char <NEW_LINE> <DEDENT> elif in_char == BACKSPACE: <NEW_LINE> <INDENT> if cursor_position > 0: <NEW_LINE> <INDENT> echo("\b \b", nl=False) <NEW_LINE> command_string = command_string[:-1] <NEW_LINE> cursor_position -= 1 <NEW_LINE> <DEDENT> <DEDENT> elif in_char in RETURNS: <NEW_LINE> <INDENT> echo("\r", nl=True) <NEW_LINE> return command_string <NEW_LINE> <DEDENT> elif in_char == CTRLZ and hasattr(signal, "SIGTSTP"): <NEW_LINE> <INDENT> os.kill(0, signal.SIGTSTP) <NEW_LINE> click.clear() <NEW_LINE> echo_prompt(prompt_template) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> echo(in_char, nl=False) <NEW_LINE> command_string += in_char <NEW_LINE> cursor_position += 1
|
Echo each character typed. Unlike magictype, this echos the characters the
user is pressing.
Returns: command_string | The command to be passed to the shell to run. This is
| typed by the user.
|
625941bc76d4e153a657ea0f
|
def tofile(f,txt): <NEW_LINE> <INDENT> f.write(txt + "\n")
|
write text to file with trailing line break
|
625941bc38b623060ff0accd
|
def setup(bot): <NEW_LINE> <INDENT> bot.add_cog(CoreCommands())
|
DecoraterBot's Core Commands Plugin.
|
625941bc67a9b606de4a7d9b
|
def _subwidget_names(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> x = self.tk.call(self._w, 'subwidgets', '-all') <NEW_LINE> return self.tk.split(x) <NEW_LINE> <DEDENT> except TclError: <NEW_LINE> <INDENT> return None
|
Return the name of all subwidgets.
|
625941bc3617ad0b5ed67dd7
|
def bounds_scale(X, bounds, scale=(-1., 1.)): <NEW_LINE> <INDENT> if X.ndim == 1: X = X.reshape(1, -1) <NEW_LINE> _X = (X - bounds[:,0]) * (scale[1] - scale[0]) <NEW_LINE> _X = _X / (bounds[:,1] - bounds[:,0]) + scale[0] <NEW_LINE> if X.ndim == 1: return _X.ravel() <NEW_LINE> return _X
|
Scale the features X with range in bounds into range in scale
|
625941bc851cf427c661a3f1
|
def optimize_costs(self): <NEW_LINE> <INDENT> while self.cost_occurence != 1500: <NEW_LINE> <INDENT> new_solution = deepcopy(self.solution) <NEW_LINE> new_grid = new_solution.grid <NEW_LINE> netlist = new_grid.netlist <NEW_LINE> net = choice(netlist) <NEW_LINE> new_grid.delete_net(net, -1) <NEW_LINE> new_astar_solution = astar.Astar(new_grid) <NEW_LINE> new_astar_solution.run() <NEW_LINE> self.check_solution(new_astar_solution) <NEW_LINE> <DEDENT> self.solution.grid.get_output(self.costs) <NEW_LINE> return self.solution
|
Optimize a given solution on base of the total costs. Remove a randomly chosen net and let
the A* algorithm plot it again. If the costs of the new solution are lower than or equal to
the old costs, remember the new solution. Else, continue with the loop.
|
625941bc7b25080760e39339
|
def get_arc_cost_sav( arc ): <NEW_LINE> <INDENT> __moveData["arc_stat"] = get_arc_status( arc ); <NEW_LINE> if __moveData["arc_stat"] == __BASIC: return( 0.0 ) <NEW_LINE> in_arc = __int( arc ) <NEW_LINE> apex = __int(0) <NEW_LINE> out_arc = __int( 0 ) <NEW_LINE> flow_chg = __int( 0 ) <NEW_LINE> i_path = __int(0) <NEW_LINE> to_upper = __int(0) <NEW_LINE> ref = ctypes.byref <NEW_LINE> __FCTP_Lib.FCTPgetcstsav.restype = __double <NEW_LINE> cstsav = __FCTP_Lib.FCTPgetcstsav( in_arc, ref(apex), ref(out_arc), ref(flow_chg), ref(i_path), ref(to_upper ) ) <NEW_LINE> __moveData["in_arc"] = in_arc <NEW_LINE> __moveData["apex"] = apex <NEW_LINE> __moveData["out_arc"] = out_arc <NEW_LINE> __moveData["flow_chg"] = flow_chg <NEW_LINE> __moveData["i_path"] = i_path <NEW_LINE> __moveData["to_upper"] = to_upper <NEW_LINE> __moveData["saving"] = __double(cstsav) <NEW_LINE> return cstsav
|
Compute and return the cost savings that can be achieved by
introducing the arc "arc" into the basis.
|
625941bca79ad161976cc024
|
def assert_body(self, body, body_msg, expected_msg): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> msg = body[body_msg] <NEW_LINE> assert msg == expected_msg <NEW_LINE> self.logger.info( "Response body msg == expected_msg, expected_msg is %s, body_msg is %s" % (expected_msg, body[body_msg])) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> self.logger.error( "Response body msg != expected_msg, expected_msg is %s, body_msg is %s" % (expected_msg, body[body_msg])) <NEW_LINE> Consts.RESULT_LIST.append('fail') <NEW_LINE> raise
|
验证response body中任意属性的值
:param body:
:param body_msg:
:param expected_msg:
:return:
|
625941bc45492302aab5e19f
|
def history(d): <NEW_LINE> <INDENT> if not hasattr(d, 'cut_history'): <NEW_LINE> <INDENT> raise ValueError("Cut history for this data not available.") <NEW_LINE> <DEDENT> hist = pd.DataFrame(d.cut_history, columns=['selection_desc', 'n_before', 'n_after']) <NEW_LINE> hist['n_removed'] = hist.n_before - hist.n_after <NEW_LINE> hist['fraction_passed'] = hist.n_after / hist.n_before <NEW_LINE> if len(hist): <NEW_LINE> <INDENT> n_orig = hist.iloc[0].n_before <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> n_orig = len(d) <NEW_LINE> <DEDENT> hist['cumulative_fraction_left'] = hist.n_after / n_orig <NEW_LINE> return hist
|
Return pandas dataframe describing cuts history on dataframe.
|
625941bcd53ae8145f87a154
|
def main(): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser(description='Renders an occupancy grid (BINVOX file).') <NEW_LINE> parser.add_argument('--binvox', type=str, help='Path to OFF file.') <NEW_LINE> parser.add_argument('--output', type=str, default='output.png', help='Path to output PNG image.') <NEW_LINE> try: <NEW_LINE> <INDENT> argv = sys.argv[sys.argv.index("--") + 1:] <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> argv = "" <NEW_LINE> <DEDENT> args = parser.parse_args(argv) <NEW_LINE> if not os.path.exists(args.binvox): <NEW_LINE> <INDENT> log('BINVOX file not found.', LogLevel.ERROR) <NEW_LINE> exit() <NEW_LINE> <DEDENT> camera_target = initialize() <NEW_LINE> binvox_material = make_material('BRC_Material_Occupancy', (0.66, 0.45, 0.23), 0.8, True) <NEW_LINE> load_binvox(args.binvox, 0.0125, binvox_material, (0, 0, 0), (1, 1, 1), 'zxy') <NEW_LINE> rotation = (5, 0, -55) <NEW_LINE> distance = 0.5 <NEW_LINE> render(camera_target, args.output, rotation, distance)
|
Main function for rendering a specific binvox file.
|
625941bc9f2886367277a76f
|
def __rollback(self): <NEW_LINE> <INDENT> for tile_contents in self.tile_create_list: <NEW_LINE> <INDENT> tile_contents.remove() <NEW_LINE> <DEDENT> self.db.rollback()
|
Roll back the transaction while handling tile files.
|
625941bcd8ef3951e324341c
|
def fibonacci(n): <NEW_LINE> <INDENT> if n==0 or n==1: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return fibonacci(n-1) + fibonacci(n-2)
|
Regresa el termino $a_n$ de la sucesion
de Fibonacci.
|
625941bcbe8e80087fb20b26
|
def __init__(self, url: str, user: str, password: str, *, verify: bool = True, timeout: Optional[float] = None, retry: Optional[dict] = None, auth_update_callback: Optional[Callable[[], Tuple[str, str]]] = None ): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.host, self.version = self._get_host_and_api_version(url) <NEW_LINE> self.session = Session() <NEW_LINE> self.session.auth = (user, password) <NEW_LINE> self.timeout = timeout <NEW_LINE> self.verify = verify <NEW_LINE> self.auth_update_callback = auth_update_callback <NEW_LINE> if not retry: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self._validate_retry_argument(retry) <NEW_LINE> adapter = HTTPAdapter(max_retries=Retry( total=retry['total'], backoff_factor=retry.get('factor', 1), status_forcelist=retry.get('statuses', []), method_whitelist=['GET', 'POST', 'PATCH'], )) <NEW_LINE> self.session.mount('http://', adapter) <NEW_LINE> self.session.mount('https://', adapter)
|
Swarm client class.
* url: ``str``
Url of Swarm server, must include API version.
* user: ``str``
User name, login.
* password: ``str``
Password for user.
* verify: ``bool`` (optional)
Verify SSL (default: true).
* timeout: ``float`` (optional)
HTTP request timeout.
* retry: ``dict`` (optional)
Retry options to prevent failures if server restarting or temporary
network problem. Disabled by default use total > 0 to enable.
- total: ``int`` Total retries count.
- factor: ``int`` Sleep factor between retries (default 1)
{factor} * (2 ** ({number of total retries} - 1))
- statuses: ``List[int]`` HTTP statues retries on. (default [])
Example:
.. code-block:: python
retry = dict(
total=10,
factor=1,
statuses=[500]
)
With factor = 1
============ =============
Retry number Sleep
============ =============
1 0.5 seconds
2 1.0 seconds
3 2.0 seconds
4 4.0 seconds
5 8.0 seconds
6 16.0 seconds
7 32.0 seconds
8 1.1 minutes
9 2.1 minutes
10 4.3 minutes
11 8.5 minutes
12 17.1 minutes
13 34.1 minutes
14 1.1 hours
15 2.3 hours
16 4.6 hours
17 9.1 hours
18 18.2 hours
19 36.4 hours
20 72.8 hours
============ =============
* auth_update_callback: ``Callable[[], Tuple[str, str]]`` (optional)
Callback function which will be called on SwarmUnauthorizedError
to update login and password and retry request again.
:returns: ``SwarmClient instance``
:raises: ``SwarmError``
|
625941bc3539df3088e2e22a
|
def save_clustering_results(_output_file, _centers, _scores, _feature_names): <NEW_LINE> <INDENT> with open(_output_file, 'w') as f: <NEW_LINE> <INDENT> writer = csv.writer(f) <NEW_LINE> writer.writerow(['k'] + ['score'] + list(_feature_names)) <NEW_LINE> for k, v in _centers.items(): <NEW_LINE> <INDENT> centers_list = [c.tolist() for c in v] <NEW_LINE> for center in centers_list: <NEW_LINE> <INDENT> writer.writerow([k] + [_scores[k]] + center)
|
Save the cluster centers and Silhuette scores for the different k.
These data will be use to choose the optimal k.
Input:
- output file name where the data are saved. It will be overwritten if it exists.
- dictionary containing the cluster centers with k as key.
- dictionary containing the Silhuette scores with l as key
- a list with the name of the features
|
625941bc8e71fb1e9831d68c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.